/root/src/xen/xen/arch/x86/pv/descriptor-tables.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * arch/x86/pv/descriptor-tables.c |
3 | | * |
4 | | * Descriptor table manipulation code for PV guests |
5 | | * |
6 | | * Copyright (c) 2002-2005 K A Fraser |
7 | | * Copyright (c) 2004 Christian Limpach |
8 | | * |
9 | | * This program is free software; you can redistribute it and/or |
10 | | * modify it under the terms and conditions of the GNU General Public |
11 | | * License, version 2, as published by the Free Software Foundation. |
12 | | * |
13 | | * This program is distributed in the hope that it will be useful, |
14 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
16 | | * General Public License for more details. |
17 | | * |
18 | | * You should have received a copy of the GNU General Public |
19 | | * License along with this program; If not, see <http://www.gnu.org/licenses/>. |
20 | | */ |
21 | | |
22 | | #include <xen/guest_access.h> |
23 | | #include <xen/hypercall.h> |
24 | | |
25 | | #include <asm/p2m.h> |
26 | | #include <asm/pv/mm.h> |
27 | | |
28 | | /* Override macros from asm/page.h to make them work with mfn_t */ |
29 | | #undef mfn_to_page |
30 | 0 | #define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn)) |
31 | | #undef page_to_mfn |
32 | 0 | #define page_to_mfn(pg) _mfn(__page_to_mfn(pg)) |
33 | | |
34 | | /******************* |
35 | | * Descriptor Tables |
36 | | */ |
37 | | |
38 | | void pv_destroy_gdt(struct vcpu *v) |
39 | 0 | { |
40 | 0 | l1_pgentry_t *pl1e; |
41 | 0 | unsigned int i; |
42 | 0 | unsigned long pfn, zero_pfn = PFN_DOWN(__pa(zero_page)); |
43 | 0 |
|
44 | 0 | v->arch.pv_vcpu.gdt_ents = 0; |
45 | 0 | pl1e = pv_gdt_ptes(v); |
46 | 0 | for ( i = 0; i < FIRST_RESERVED_GDT_PAGE; i++ ) |
47 | 0 | { |
48 | 0 | pfn = l1e_get_pfn(pl1e[i]); |
49 | 0 | if ( (l1e_get_flags(pl1e[i]) & _PAGE_PRESENT) && pfn != zero_pfn ) |
50 | 0 | put_page_and_type(mfn_to_page(_mfn(pfn))); |
51 | 0 | l1e_write(&pl1e[i], l1e_from_pfn(zero_pfn, __PAGE_HYPERVISOR_RO)); |
52 | 0 | v->arch.pv_vcpu.gdt_frames[i] = 0; |
53 | 0 | } |
54 | 0 | } |
55 | | |
56 | | long pv_set_gdt(struct vcpu *v, unsigned long *frames, unsigned int entries) |
57 | 0 | { |
58 | 0 | struct domain *d = v->domain; |
59 | 0 | l1_pgentry_t *pl1e; |
60 | 0 | /* NB. There are 512 8-byte entries per GDT page. */ |
61 | 0 | unsigned int i, nr_pages = (entries + 511) / 512; |
62 | 0 |
|
63 | 0 | if ( entries > FIRST_RESERVED_GDT_ENTRY ) |
64 | 0 | return -EINVAL; |
65 | 0 |
|
66 | 0 | /* Check the pages in the new GDT. */ |
67 | 0 | for ( i = 0; i < nr_pages; i++ ) |
68 | 0 | { |
69 | 0 | struct page_info *page; |
70 | 0 |
|
71 | 0 | page = get_page_from_gfn(d, frames[i], NULL, P2M_ALLOC); |
72 | 0 | if ( !page ) |
73 | 0 | goto fail; |
74 | 0 | if ( !get_page_type(page, PGT_seg_desc_page) ) |
75 | 0 | { |
76 | 0 | put_page(page); |
77 | 0 | goto fail; |
78 | 0 | } |
79 | 0 | frames[i] = mfn_x(page_to_mfn(page)); |
80 | 0 | } |
81 | 0 |
|
82 | 0 | /* Tear down the old GDT. */ |
83 | 0 | pv_destroy_gdt(v); |
84 | 0 |
|
85 | 0 | /* Install the new GDT. */ |
86 | 0 | v->arch.pv_vcpu.gdt_ents = entries; |
87 | 0 | pl1e = pv_gdt_ptes(v); |
88 | 0 | for ( i = 0; i < nr_pages; i++ ) |
89 | 0 | { |
90 | 0 | v->arch.pv_vcpu.gdt_frames[i] = frames[i]; |
91 | 0 | l1e_write(&pl1e[i], l1e_from_pfn(frames[i], __PAGE_HYPERVISOR_RW)); |
92 | 0 | } |
93 | 0 |
|
94 | 0 | return 0; |
95 | 0 |
|
96 | 0 | fail: |
97 | 0 | while ( i-- > 0 ) |
98 | 0 | { |
99 | 0 | put_page_and_type(mfn_to_page(_mfn(frames[i]))); |
100 | 0 | } |
101 | 0 | return -EINVAL; |
102 | 0 | } |
103 | | |
104 | | long do_set_gdt(XEN_GUEST_HANDLE_PARAM(xen_ulong_t) frame_list, |
105 | | unsigned int entries) |
106 | 0 | { |
107 | 0 | int nr_pages = (entries + 511) / 512; |
108 | 0 | unsigned long frames[16]; |
109 | 0 | struct vcpu *curr = current; |
110 | 0 | long ret; |
111 | 0 |
|
112 | 0 | /* Rechecked in set_gdt, but ensures a sane limit for copy_from_user(). */ |
113 | 0 | if ( entries > FIRST_RESERVED_GDT_ENTRY ) |
114 | 0 | return -EINVAL; |
115 | 0 |
|
116 | 0 | if ( copy_from_guest(frames, frame_list, nr_pages) ) |
117 | 0 | return -EFAULT; |
118 | 0 |
|
119 | 0 | domain_lock(curr->domain); |
120 | 0 |
|
121 | 0 | if ( (ret = pv_set_gdt(curr, frames, entries)) == 0 ) |
122 | 0 | flush_tlb_local(); |
123 | 0 |
|
124 | 0 | domain_unlock(curr->domain); |
125 | 0 |
|
126 | 0 | return ret; |
127 | 0 | } |
128 | | |
129 | | long do_update_descriptor(uint64_t pa, uint64_t desc) |
130 | 0 | { |
131 | 0 | struct domain *currd = current->domain; |
132 | 0 | unsigned long gmfn = pa >> PAGE_SHIFT; |
133 | 0 | unsigned long mfn; |
134 | 0 | unsigned int offset; |
135 | 0 | struct desc_struct *gdt_pent, d; |
136 | 0 | struct page_info *page; |
137 | 0 | long ret = -EINVAL; |
138 | 0 |
|
139 | 0 | offset = ((unsigned int)pa & ~PAGE_MASK) / sizeof(struct desc_struct); |
140 | 0 |
|
141 | 0 | *(uint64_t *)&d = desc; |
142 | 0 |
|
143 | 0 | page = get_page_from_gfn(currd, gmfn, NULL, P2M_ALLOC); |
144 | 0 | if ( (((unsigned int)pa % sizeof(struct desc_struct)) != 0) || |
145 | 0 | !page || |
146 | 0 | !check_descriptor(currd, &d) ) |
147 | 0 | { |
148 | 0 | if ( page ) |
149 | 0 | put_page(page); |
150 | 0 | return -EINVAL; |
151 | 0 | } |
152 | 0 | mfn = mfn_x(page_to_mfn(page)); |
153 | 0 |
|
154 | 0 | /* Check if the given frame is in use in an unsafe context. */ |
155 | 0 | switch ( page->u.inuse.type_info & PGT_type_mask ) |
156 | 0 | { |
157 | 0 | case PGT_seg_desc_page: |
158 | 0 | if ( unlikely(!get_page_type(page, PGT_seg_desc_page)) ) |
159 | 0 | goto out; |
160 | 0 | break; |
161 | 0 | default: |
162 | 0 | if ( unlikely(!get_page_type(page, PGT_writable_page)) ) |
163 | 0 | goto out; |
164 | 0 | break; |
165 | 0 | } |
166 | 0 |
|
167 | 0 | paging_mark_dirty(currd, _mfn(mfn)); |
168 | 0 |
|
169 | 0 | /* All is good so make the update. */ |
170 | 0 | gdt_pent = map_domain_page(_mfn(mfn)); |
171 | 0 | write_atomic((uint64_t *)&gdt_pent[offset], *(uint64_t *)&d); |
172 | 0 | unmap_domain_page(gdt_pent); |
173 | 0 |
|
174 | 0 | put_page_type(page); |
175 | 0 |
|
176 | 0 | ret = 0; /* success */ |
177 | 0 |
|
178 | 0 | out: |
179 | 0 | put_page(page); |
180 | 0 |
|
181 | 0 | return ret; |
182 | 0 | } |
183 | | |
184 | | int compat_set_gdt(XEN_GUEST_HANDLE_PARAM(uint) frame_list, unsigned int entries) |
185 | 0 | { |
186 | 0 | unsigned int i, nr_pages = (entries + 511) / 512; |
187 | 0 | unsigned long frames[16]; |
188 | 0 | int ret; |
189 | 0 |
|
190 | 0 | /* Rechecked in set_gdt, but ensures a sane limit for copy_from_user(). */ |
191 | 0 | if ( entries > FIRST_RESERVED_GDT_ENTRY ) |
192 | 0 | return -EINVAL; |
193 | 0 |
|
194 | 0 | if ( !guest_handle_okay(frame_list, nr_pages) ) |
195 | 0 | return -EFAULT; |
196 | 0 |
|
197 | 0 | for ( i = 0; i < nr_pages; ++i ) |
198 | 0 | { |
199 | 0 | unsigned int frame; |
200 | 0 |
|
201 | 0 | if ( __copy_from_guest(&frame, frame_list, 1) ) |
202 | 0 | return -EFAULT; |
203 | 0 | frames[i] = frame; |
204 | 0 | guest_handle_add_offset(frame_list, 1); |
205 | 0 | } |
206 | 0 |
|
207 | 0 | domain_lock(current->domain); |
208 | 0 |
|
209 | 0 | if ( (ret = pv_set_gdt(current, frames, entries)) == 0 ) |
210 | 0 | flush_tlb_local(); |
211 | 0 |
|
212 | 0 | domain_unlock(current->domain); |
213 | 0 |
|
214 | 0 | return ret; |
215 | 0 | } |
216 | | |
217 | | int compat_update_descriptor(uint32_t pa_lo, uint32_t pa_hi, |
218 | | uint32_t desc_lo, uint32_t desc_hi) |
219 | 0 | { |
220 | 0 | return do_update_descriptor(pa_lo | ((uint64_t)pa_hi << 32), |
221 | 0 | desc_lo | ((uint64_t)desc_hi << 32)); |
222 | 0 | } |
223 | | |
224 | | /* |
225 | | * Local variables: |
226 | | * mode: C |
227 | | * c-file-style: "BSD" |
228 | | * c-basic-offset: 4 |
229 | | * tab-width: 4 |
230 | | * indent-tabs-mode: nil |
231 | | * End: |
232 | | */ |