debuggers.hg

view xen/arch/x86/x86_64/compat/mm.c @ 0:7d21f7218375

Exact replica of unstable on 051908 + README-this
author Mukesh Rathor
date Mon May 19 15:34:57 2008 -0700 (2008-05-19)
parents
children 5c0bf00e371d
line source
1 #ifdef CONFIG_COMPAT
3 #include <xen/event.h>
4 #include <xen/multicall.h>
5 #include <compat/memory.h>
6 #include <compat/xen.h>
8 int compat_set_gdt(XEN_GUEST_HANDLE(uint) frame_list, unsigned int entries)
9 {
10 unsigned int i, nr_pages = (entries + 511) / 512;
11 unsigned long frames[16];
12 long ret;
14 /* Rechecked in set_gdt, but ensures a sane limit for copy_from_user(). */
15 if ( entries > FIRST_RESERVED_GDT_ENTRY )
16 return -EINVAL;
18 if ( !guest_handle_okay(frame_list, nr_pages) )
19 return -EFAULT;
21 for ( i = 0; i < nr_pages; ++i )
22 {
23 unsigned int frame;
25 if ( __copy_from_guest(&frame, frame_list, 1) )
26 return -EFAULT;
27 frames[i] = frame;
28 guest_handle_add_offset(frame_list, 1);
29 }
31 domain_lock(current->domain);
33 if ( (ret = set_gdt(current, frames, entries)) == 0 )
34 flush_tlb_local();
36 domain_unlock(current->domain);
38 return ret;
39 }
41 int compat_update_descriptor(u32 pa_lo, u32 pa_hi, u32 desc_lo, u32 desc_hi)
42 {
43 return do_update_descriptor(pa_lo | ((u64)pa_hi << 32),
44 desc_lo | ((u64)desc_hi << 32));
45 }
47 int compat_arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
48 {
49 struct compat_machphys_mfn_list xmml;
50 l2_pgentry_t l2e;
51 unsigned long v;
52 compat_pfn_t mfn;
53 unsigned int i;
54 int rc = 0;
56 switch ( op )
57 {
58 case XENMEM_add_to_physmap:
59 {
60 struct compat_add_to_physmap cmp;
61 struct xen_add_to_physmap *nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
63 if ( copy_from_guest(&cmp, arg, 1) )
64 return -EFAULT;
66 XLAT_add_to_physmap(nat, &cmp);
67 rc = arch_memory_op(op, guest_handle_from_ptr(nat, void));
69 break;
70 }
72 case XENMEM_set_memory_map:
73 {
74 struct compat_foreign_memory_map cmp;
75 struct xen_foreign_memory_map *nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
77 if ( copy_from_guest(&cmp, arg, 1) )
78 return -EFAULT;
80 #define XLAT_memory_map_HNDL_buffer(_d_, _s_) \
81 guest_from_compat_handle((_d_)->buffer, (_s_)->buffer)
82 XLAT_foreign_memory_map(nat, &cmp);
83 #undef XLAT_memory_map_HNDL_buffer
85 rc = arch_memory_op(op, guest_handle_from_ptr(nat, void));
87 break;
88 }
90 case XENMEM_memory_map:
91 case XENMEM_machine_memory_map:
92 {
93 struct compat_memory_map cmp;
94 struct xen_memory_map *nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
96 if ( copy_from_guest(&cmp, arg, 1) )
97 return -EFAULT;
99 #define XLAT_memory_map_HNDL_buffer(_d_, _s_) \
100 guest_from_compat_handle((_d_)->buffer, (_s_)->buffer)
101 XLAT_memory_map(nat, &cmp);
102 #undef XLAT_memory_map_HNDL_buffer
104 rc = arch_memory_op(op, guest_handle_from_ptr(nat, void));
105 if ( rc < 0 )
106 break;
108 #define XLAT_memory_map_HNDL_buffer(_d_, _s_) ((void)0)
109 XLAT_memory_map(&cmp, nat);
110 #undef XLAT_memory_map_HNDL_buffer
111 if ( copy_to_guest(arg, &cmp, 1) )
112 rc = -EFAULT;
114 break;
115 }
117 case XENMEM_machphys_mapping:
118 {
119 struct domain *d = current->domain;
120 struct compat_machphys_mapping mapping = {
121 .v_start = MACH2PHYS_COMPAT_VIRT_START(d),
122 .v_end = MACH2PHYS_COMPAT_VIRT_END,
123 .max_mfn = MACH2PHYS_COMPAT_NR_ENTRIES(d) - 1
124 };
126 if ( copy_to_guest(arg, &mapping, 1) )
127 rc = -EFAULT;
129 break;
130 }
132 case XENMEM_machphys_mfn_list:
133 if ( copy_from_guest(&xmml, arg, 1) )
134 return -EFAULT;
136 for ( i = 0, v = RDWR_COMPAT_MPT_VIRT_START;
137 (i != xmml.max_extents) && (v != RDWR_COMPAT_MPT_VIRT_END);
138 i++, v += 1 << L2_PAGETABLE_SHIFT )
139 {
140 l2e = compat_idle_pg_table_l2[l2_table_offset(v)];
141 if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
142 break;
143 mfn = l2e_get_pfn(l2e) + l1_table_offset(v);
144 if ( copy_to_compat_offset(xmml.extent_start, i, &mfn, 1) )
145 return -EFAULT;
146 }
148 xmml.nr_extents = i;
149 if ( copy_to_guest(arg, &xmml, 1) )
150 rc = -EFAULT;
152 break;
154 default:
155 rc = -ENOSYS;
156 break;
157 }
159 return rc;
160 }
162 int compat_update_va_mapping(unsigned int va, u32 lo, u32 hi,
163 unsigned int flags)
164 {
165 return do_update_va_mapping(va, lo | ((u64)hi << 32), flags);
166 }
168 int compat_update_va_mapping_otherdomain(unsigned long va, u32 lo, u32 hi,
169 unsigned long flags,
170 domid_t domid)
171 {
172 return do_update_va_mapping_otherdomain(va, lo | ((u64)hi << 32), flags, domid);
173 }
175 DEFINE_XEN_GUEST_HANDLE(mmuext_op_compat_t);
177 int compat_mmuext_op(XEN_GUEST_HANDLE(mmuext_op_compat_t) cmp_uops,
178 unsigned int count,
179 XEN_GUEST_HANDLE(uint) pdone,
180 unsigned int foreigndom)
181 {
182 unsigned int i, preempt_mask;
183 int rc = 0;
184 XEN_GUEST_HANDLE(mmuext_op_t) nat_ops;
186 preempt_mask = count & MMU_UPDATE_PREEMPTED;
187 count ^= preempt_mask;
189 if ( unlikely(!guest_handle_okay(cmp_uops, count)) )
190 return -EFAULT;
192 set_xen_guest_handle(nat_ops, (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id));
194 for ( ; count; count -= i )
195 {
196 mmuext_op_t *nat_op = nat_ops.p;
197 unsigned int limit = COMPAT_ARG_XLAT_SIZE / sizeof(*nat_op);
198 int err;
200 for ( i = 0; i < min(limit, count); ++i )
201 {
202 mmuext_op_compat_t cmp_op;
203 enum XLAT_mmuext_op_arg1 arg1;
204 enum XLAT_mmuext_op_arg2 arg2;
206 if ( unlikely(__copy_from_guest(&cmp_op, cmp_uops, 1) != 0) )
207 {
208 rc = -EFAULT;
209 break;
210 }
212 switch ( cmp_op.cmd )
213 {
214 case MMUEXT_PIN_L1_TABLE:
215 case MMUEXT_PIN_L2_TABLE:
216 case MMUEXT_PIN_L3_TABLE:
217 case MMUEXT_PIN_L4_TABLE:
218 case MMUEXT_UNPIN_TABLE:
219 case MMUEXT_NEW_BASEPTR:
220 arg1 = XLAT_mmuext_op_arg1_mfn;
221 break;
222 default:
223 arg1 = XLAT_mmuext_op_arg1_linear_addr;
224 break;
225 case MMUEXT_NEW_USER_BASEPTR:
226 rc = -EINVAL;
227 case MMUEXT_TLB_FLUSH_LOCAL:
228 case MMUEXT_TLB_FLUSH_MULTI:
229 case MMUEXT_TLB_FLUSH_ALL:
230 case MMUEXT_FLUSH_CACHE:
231 arg1 = -1;
232 break;
233 }
235 if ( rc )
236 break;
238 switch ( cmp_op.cmd )
239 {
240 case MMUEXT_SET_LDT:
241 arg2 = XLAT_mmuext_op_arg2_nr_ents;
242 break;
243 case MMUEXT_TLB_FLUSH_MULTI:
244 case MMUEXT_INVLPG_MULTI:
245 arg2 = XLAT_mmuext_op_arg2_vcpumask;
246 break;
247 default:
248 arg2 = -1;
249 break;
250 }
252 #define XLAT_mmuext_op_HNDL_arg2_vcpumask(_d_, _s_) \
253 do \
254 { \
255 unsigned int vcpumask; \
256 if ( i < --limit ) \
257 { \
258 (_d_)->arg2.vcpumask.p = (void *)(nat_ops.p + limit); \
259 if ( copy_from_compat(&vcpumask, (_s_)->arg2.vcpumask, 1) == 0 ) \
260 *(unsigned long *)(_d_)->arg2.vcpumask.p = vcpumask; \
261 else \
262 rc = -EFAULT; \
263 } \
264 } while(0)
265 XLAT_mmuext_op(nat_op, &cmp_op);
266 #undef XLAT_mmuext_op_HNDL_arg2_vcpumask
268 if ( rc || i >= limit )
269 break;
271 guest_handle_add_offset(cmp_uops, 1);
272 ++nat_op;
273 }
275 err = do_mmuext_op(nat_ops, i | preempt_mask, pdone, foreigndom);
277 if ( err )
278 {
279 BUILD_BUG_ON(__HYPERVISOR_mmuext_op <= 0);
280 if ( err == __HYPERVISOR_mmuext_op )
281 {
282 struct cpu_user_regs *regs = guest_cpu_user_regs();
283 struct mc_state *mcs = &this_cpu(mc_state);
284 unsigned int arg1 = !test_bit(_MCSF_in_multicall, &mcs->flags)
285 ? regs->ecx
286 : mcs->call.args[1];
287 unsigned int left = arg1 & ~MMU_UPDATE_PREEMPTED;
289 BUG_ON(left == arg1);
290 BUG_ON(left > count);
291 guest_handle_add_offset(nat_ops, i - left);
292 guest_handle_subtract_offset(cmp_uops, left);
293 left = 1;
294 BUG_ON(!hypercall_xlat_continuation(&left, 0x01, nat_ops, cmp_uops));
295 BUG_ON(left != arg1);
296 if (!test_bit(_MCSF_in_multicall, &mcs->flags))
297 regs->_ecx += count - i;
298 else
299 mcs->compat_call.args[1] += count - i;
300 }
301 else
302 BUG_ON(err > 0);
303 rc = err;
304 }
306 if ( rc )
307 break;
309 /* Force do_mmuext_op() to not start counting from zero again. */
310 preempt_mask = MMU_UPDATE_PREEMPTED;
311 }
313 return rc;
314 }
316 #endif /* CONFIG_COMPAT */
318 /*
319 * Local variables:
320 * mode: C
321 * c-set-style: "BSD"
322 * c-basic-offset: 4
323 * tab-width: 4
324 * indent-tabs-mode: nil
325 * End:
326 */