debuggers.hg

view xen/arch/x86/domctl.c @ 16630:ef83b50fc4a4

vt-d: Test device assignability in xend, but defer actual assignment to qemu-dm.
Signed-off-by: Weidong Han <weidong.han@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Dec 12 10:29:35 2007 +0000 (2007-12-12)
parents 0e8e68cfc8ac
children 23febc32fc1b
line source
1 /******************************************************************************
2 * Arch-specific domctl.c
3 *
4 * Copyright (c) 2002-2006, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/types.h>
9 #include <xen/lib.h>
10 #include <xen/mm.h>
11 #include <xen/guest_access.h>
12 #include <xen/compat.h>
13 #include <public/domctl.h>
14 #include <xen/sched.h>
15 #include <xen/domain.h>
16 #include <xen/event.h>
17 #include <xen/domain_page.h>
18 #include <asm/msr.h>
19 #include <xen/trace.h>
20 #include <xen/console.h>
21 #include <xen/iocap.h>
22 #include <asm/paging.h>
23 #include <asm/irq.h>
24 #include <asm/hvm/hvm.h>
25 #include <asm/hvm/support.h>
26 #include <asm/hvm/cacheattr.h>
27 #include <asm/processor.h>
28 #include <xsm/xsm.h>
29 #include <asm/iommu.h>
31 long arch_do_domctl(
32 struct xen_domctl *domctl,
33 XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
34 {
35 long ret = 0;
37 switch ( domctl->cmd )
38 {
40 case XEN_DOMCTL_shadow_op:
41 {
42 struct domain *d;
43 ret = -ESRCH;
44 d = rcu_lock_domain_by_id(domctl->domain);
45 if ( d != NULL )
46 {
47 ret = paging_domctl(d,
48 &domctl->u.shadow_op,
49 guest_handle_cast(u_domctl, void));
50 rcu_unlock_domain(d);
51 copy_to_guest(u_domctl, domctl, 1);
52 }
53 }
54 break;
56 case XEN_DOMCTL_ioport_permission:
57 {
58 struct domain *d;
59 unsigned int fp = domctl->u.ioport_permission.first_port;
60 unsigned int np = domctl->u.ioport_permission.nr_ports;
62 ret = -EINVAL;
63 if ( (fp + np) > 65536 )
64 break;
66 ret = -ESRCH;
67 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
68 break;
70 ret = xsm_ioport_permission(d, fp,
71 domctl->u.ioport_permission.allow_access);
72 if ( ret )
73 {
74 rcu_unlock_domain(d);
75 break;
76 }
78 if ( np == 0 )
79 ret = 0;
80 else if ( domctl->u.ioport_permission.allow_access )
81 ret = ioports_permit_access(d, fp, fp + np - 1);
82 else
83 ret = ioports_deny_access(d, fp, fp + np - 1);
85 rcu_unlock_domain(d);
86 }
87 break;
89 case XEN_DOMCTL_getpageframeinfo:
90 {
91 struct page_info *page;
92 unsigned long mfn = domctl->u.getpageframeinfo.gmfn;
93 domid_t dom = domctl->domain;
94 struct domain *d;
96 ret = -EINVAL;
98 if ( unlikely(!mfn_valid(mfn)) ||
99 unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
100 break;
102 page = mfn_to_page(mfn);
104 ret = xsm_getpageframeinfo(page);
105 if ( ret )
106 {
107 rcu_unlock_domain(d);
108 break;
109 }
111 if ( likely(get_page(page, d)) )
112 {
113 ret = 0;
115 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_NOTAB;
117 if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
118 {
119 switch ( page->u.inuse.type_info & PGT_type_mask )
120 {
121 case PGT_l1_page_table:
122 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L1TAB;
123 break;
124 case PGT_l2_page_table:
125 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L2TAB;
126 break;
127 case PGT_l3_page_table:
128 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L3TAB;
129 break;
130 case PGT_l4_page_table:
131 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L4TAB;
132 break;
133 }
134 }
136 put_page(page);
137 }
139 rcu_unlock_domain(d);
141 copy_to_guest(u_domctl, domctl, 1);
142 }
143 break;
145 case XEN_DOMCTL_getpageframeinfo2:
146 {
147 int n,j;
148 int num = domctl->u.getpageframeinfo2.num;
149 domid_t dom = domctl->domain;
150 struct domain *d;
151 uint32_t *arr32;
152 ret = -ESRCH;
154 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
155 break;
157 if ( unlikely(num > 1024) )
158 {
159 ret = -E2BIG;
160 rcu_unlock_domain(d);
161 break;
162 }
164 arr32 = alloc_xenheap_page();
165 if ( !arr32 )
166 {
167 ret = -ENOMEM;
168 put_domain(d);
169 break;
170 }
172 ret = 0;
173 for ( n = 0; n < num; )
174 {
175 int k = PAGE_SIZE / 4;
176 if ( (num - n) < k )
177 k = num - n;
179 if ( copy_from_guest_offset(arr32,
180 domctl->u.getpageframeinfo2.array,
181 n, k) )
182 {
183 ret = -EFAULT;
184 break;
185 }
187 for ( j = 0; j < k; j++ )
188 {
189 struct page_info *page;
190 unsigned long mfn = arr32[j];
192 page = mfn_to_page(mfn);
194 ret = xsm_getpageframeinfo(page);
195 if ( ret )
196 continue;
198 if ( likely(mfn_valid(mfn) && get_page(page, d)) )
199 {
200 unsigned long type = 0;
202 switch( page->u.inuse.type_info & PGT_type_mask )
203 {
204 case PGT_l1_page_table:
205 type = XEN_DOMCTL_PFINFO_L1TAB;
206 break;
207 case PGT_l2_page_table:
208 type = XEN_DOMCTL_PFINFO_L2TAB;
209 break;
210 case PGT_l3_page_table:
211 type = XEN_DOMCTL_PFINFO_L3TAB;
212 break;
213 case PGT_l4_page_table:
214 type = XEN_DOMCTL_PFINFO_L4TAB;
215 break;
216 }
218 if ( page->u.inuse.type_info & PGT_pinned )
219 type |= XEN_DOMCTL_PFINFO_LPINTAB;
220 arr32[j] |= type;
221 put_page(page);
222 }
223 else
224 arr32[j] |= XEN_DOMCTL_PFINFO_XTAB;
226 }
228 if ( copy_to_guest_offset(domctl->u.getpageframeinfo2.array,
229 n, arr32, k) )
230 {
231 ret = -EFAULT;
232 break;
233 }
235 n += k;
236 }
238 free_xenheap_page(arr32);
240 rcu_unlock_domain(d);
241 }
242 break;
244 case XEN_DOMCTL_getmemlist:
245 {
246 int i;
247 struct domain *d = rcu_lock_domain_by_id(domctl->domain);
248 unsigned long max_pfns = domctl->u.getmemlist.max_pfns;
249 uint64_t mfn;
250 struct list_head *list_ent;
252 ret = -EINVAL;
253 if ( d != NULL )
254 {
255 ret = xsm_getmemlist(d);
256 if ( ret )
257 {
258 rcu_unlock_domain(d);
259 break;
260 }
262 spin_lock(&d->page_alloc_lock);
264 if ( unlikely(d->is_dying) ) {
265 spin_unlock(&d->page_alloc_lock);
266 goto getmemlist_out;
267 }
269 ret = 0;
270 list_ent = d->page_list.next;
271 for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
272 {
273 mfn = page_to_mfn(list_entry(
274 list_ent, struct page_info, list));
275 if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
276 i, &mfn, 1) )
277 {
278 ret = -EFAULT;
279 break;
280 }
281 list_ent = mfn_to_page(mfn)->list.next;
282 }
284 spin_unlock(&d->page_alloc_lock);
286 domctl->u.getmemlist.num_pfns = i;
287 copy_to_guest(u_domctl, domctl, 1);
288 getmemlist_out:
289 rcu_unlock_domain(d);
290 }
291 }
292 break;
294 case XEN_DOMCTL_hypercall_init:
295 {
296 struct domain *d = rcu_lock_domain_by_id(domctl->domain);
297 unsigned long gmfn = domctl->u.hypercall_init.gmfn;
298 unsigned long mfn;
299 void *hypercall_page;
301 ret = -ESRCH;
302 if ( unlikely(d == NULL) )
303 break;
305 ret = xsm_hypercall_init(d);
306 if ( ret )
307 {
308 rcu_unlock_domain(d);
309 break;
310 }
312 mfn = gmfn_to_mfn(d, gmfn);
314 ret = -EACCES;
315 if ( !mfn_valid(mfn) ||
316 !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
317 {
318 rcu_unlock_domain(d);
319 break;
320 }
322 ret = 0;
324 hypercall_page = map_domain_page(mfn);
325 hypercall_page_initialise(d, hypercall_page);
326 unmap_domain_page(hypercall_page);
328 put_page_and_type(mfn_to_page(mfn));
330 rcu_unlock_domain(d);
331 }
332 break;
334 case XEN_DOMCTL_sethvmcontext:
335 {
336 struct hvm_domain_context c;
337 struct domain *d;
339 c.cur = 0;
340 c.size = domctl->u.hvmcontext.size;
341 c.data = NULL;
343 ret = -ESRCH;
344 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
345 break;
347 ret = xsm_hvmcontext(d, domctl->cmd);
348 if ( ret )
349 goto sethvmcontext_out;
351 ret = -EINVAL;
352 if ( !is_hvm_domain(d) )
353 goto sethvmcontext_out;
355 ret = -ENOMEM;
356 if ( (c.data = xmalloc_bytes(c.size)) == NULL )
357 goto sethvmcontext_out;
359 ret = -EFAULT;
360 if ( copy_from_guest(c.data, domctl->u.hvmcontext.buffer, c.size) != 0)
361 goto sethvmcontext_out;
363 domain_pause(d);
364 ret = hvm_load(d, &c);
365 domain_unpause(d);
367 sethvmcontext_out:
368 if ( c.data != NULL )
369 xfree(c.data);
371 rcu_unlock_domain(d);
372 }
373 break;
375 case XEN_DOMCTL_gethvmcontext:
376 {
377 struct hvm_domain_context c;
378 struct domain *d;
380 ret = -ESRCH;
381 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
382 break;
384 ret = xsm_hvmcontext(d, domctl->cmd);
385 if ( ret )
386 goto gethvmcontext_out;
388 ret = -EINVAL;
389 if ( !is_hvm_domain(d) )
390 goto gethvmcontext_out;
392 c.cur = 0;
393 c.size = hvm_save_size(d);
394 c.data = NULL;
396 if ( guest_handle_is_null(domctl->u.hvmcontext.buffer) )
397 {
398 /* Client is querying for the correct buffer size */
399 domctl->u.hvmcontext.size = c.size;
400 ret = 0;
401 goto gethvmcontext_out;
402 }
404 /* Check that the client has a big enough buffer */
405 ret = -ENOSPC;
406 if ( domctl->u.hvmcontext.size < c.size )
407 goto gethvmcontext_out;
409 /* Allocate our own marshalling buffer */
410 ret = -ENOMEM;
411 if ( (c.data = xmalloc_bytes(c.size)) == NULL )
412 goto gethvmcontext_out;
414 domain_pause(d);
415 ret = hvm_save(d, &c);
416 domain_unpause(d);
418 domctl->u.hvmcontext.size = c.cur;
419 if ( copy_to_guest(domctl->u.hvmcontext.buffer, c.data, c.size) != 0 )
420 ret = -EFAULT;
422 gethvmcontext_out:
423 if ( copy_to_guest(u_domctl, domctl, 1) )
424 ret = -EFAULT;
426 if ( c.data != NULL )
427 xfree(c.data);
429 rcu_unlock_domain(d);
430 }
431 break;
433 case XEN_DOMCTL_set_address_size:
434 {
435 struct domain *d;
437 ret = -ESRCH;
438 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
439 break;
441 ret = xsm_address_size(d, domctl->cmd);
442 if ( ret )
443 {
444 rcu_unlock_domain(d);
445 break;
446 }
448 switch ( domctl->u.address_size.size )
449 {
450 #ifdef CONFIG_COMPAT
451 case 32:
452 ret = switch_compat(d);
453 break;
454 case 64:
455 ret = switch_native(d);
456 break;
457 #endif
458 default:
459 ret = (domctl->u.address_size.size == BITS_PER_LONG) ? 0 : -EINVAL;
460 break;
461 }
463 rcu_unlock_domain(d);
464 }
465 break;
467 case XEN_DOMCTL_get_address_size:
468 {
469 struct domain *d;
471 ret = -ESRCH;
472 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
473 break;
475 ret = xsm_address_size(d, domctl->cmd);
476 if ( ret )
477 {
478 rcu_unlock_domain(d);
479 break;
480 }
482 domctl->u.address_size.size = BITS_PER_GUEST_LONG(d);
484 ret = 0;
485 rcu_unlock_domain(d);
487 if ( copy_to_guest(u_domctl, domctl, 1) )
488 ret = -EFAULT;
489 }
490 break;
492 case XEN_DOMCTL_sendtrigger:
493 {
494 struct domain *d;
495 struct vcpu *v;
497 ret = -ESRCH;
498 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
499 break;
501 ret = -EINVAL;
502 if ( domctl->u.sendtrigger.vcpu >= MAX_VIRT_CPUS )
503 goto sendtrigger_out;
505 ret = -ESRCH;
506 if ( (v = d->vcpu[domctl->u.sendtrigger.vcpu]) == NULL )
507 goto sendtrigger_out;
509 switch ( domctl->u.sendtrigger.trigger )
510 {
511 case XEN_DOMCTL_SENDTRIGGER_NMI:
512 {
513 ret = 0;
514 if ( !test_and_set_bool(v->nmi_pending) )
515 vcpu_kick(v);
516 }
517 break;
519 default:
520 ret = -ENOSYS;
521 }
523 sendtrigger_out:
524 rcu_unlock_domain(d);
525 }
526 break;
528 case XEN_DOMCTL_test_assign_device:
529 {
530 u8 bus, devfn;
532 ret = -EINVAL;
533 if ( !vtd_enabled )
534 break;
536 bus = (domctl->u.test_assign_device.machine_bdf >> 16) & 0xff;
537 devfn = (domctl->u.test_assign_device.machine_bdf >> 8) & 0xff;
539 if ( device_assigned(bus, devfn) )
540 {
541 gdprintk(XENLOG_ERR, "XEN_DOMCTL_test_assign_device: "
542 "%x:%x:%x already assigned\n",
543 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
544 break;
545 }
546 ret = 0;
547 }
548 break;
550 case XEN_DOMCTL_assign_device:
551 {
552 struct domain *d;
553 u8 bus, devfn;
555 ret = -EINVAL;
556 if ( !vtd_enabled )
557 break;
559 if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
560 {
561 gdprintk(XENLOG_ERR,
562 "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n");
563 break;
564 }
565 bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
566 devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
568 if ( device_assigned(bus, devfn) )
569 {
570 gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: "
571 "%x:%x:%x already assigned\n",
572 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
573 break;
574 }
576 ret = assign_device(d, bus, devfn);
577 gdprintk(XENLOG_INFO, "XEN_DOMCTL_assign_device: bdf = %x:%x:%x\n",
578 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
579 put_domain(d);
580 }
581 break;
583 case XEN_DOMCTL_bind_pt_irq:
584 {
585 struct domain * d;
586 xen_domctl_bind_pt_irq_t * bind;
588 ret = -ESRCH;
589 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
590 break;
591 bind = &(domctl->u.bind_pt_irq);
592 if (vtd_enabled)
593 ret = pt_irq_create_bind_vtd(d, bind);
594 if (ret < 0)
595 gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n");
596 rcu_unlock_domain(d);
597 }
598 break;
600 case XEN_DOMCTL_memory_mapping:
601 {
602 struct domain *d;
603 unsigned long gfn = domctl->u.memory_mapping.first_gfn;
604 unsigned long mfn = domctl->u.memory_mapping.first_mfn;
605 unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
606 int i;
608 ret = -EINVAL;
609 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
610 break;
612 ret = -ESRCH;
613 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
614 break;
616 ret=0;
617 if ( domctl->u.memory_mapping.add_mapping )
618 {
619 gdprintk(XENLOG_INFO,
620 "memory_map:add: gfn=%lx mfn=%lx nr_mfns=%lx\n",
621 gfn, mfn, nr_mfns);
623 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
624 for ( i = 0; i < nr_mfns; i++ )
625 set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i));
626 }
627 else
628 {
629 gdprintk(XENLOG_INFO,
630 "memory_map:remove: gfn=%lx mfn=%lx nr_mfns=%lx\n",
631 gfn, mfn, nr_mfns);
633 for ( i = 0; i < nr_mfns; i++ )
634 clear_mmio_p2m_entry(d, gfn+i);
635 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
636 }
638 rcu_unlock_domain(d);
639 }
640 break;
642 case XEN_DOMCTL_ioport_mapping:
643 {
644 #define MAX_IOPORTS 0x10000
645 struct domain *d;
646 struct hvm_iommu *hd;
647 unsigned int fgp = domctl->u.ioport_mapping.first_gport;
648 unsigned int fmp = domctl->u.ioport_mapping.first_mport;
649 unsigned int np = domctl->u.ioport_mapping.nr_ports;
650 struct g2m_ioport *g2m_ioport;
651 int found = 0;
653 ret = -EINVAL;
654 if ( (np == 0) || (fgp > MAX_IOPORTS) || (fmp > MAX_IOPORTS) ||
655 ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) )
656 {
657 gdprintk(XENLOG_ERR,
658 "ioport_map:invalid:gport=%x mport=%x nr_ports=%x\n",
659 fgp, fmp, np);
660 break;
661 }
663 ret = -ESRCH;
664 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
665 break;
667 hd = domain_hvm_iommu(d);
668 if ( domctl->u.ioport_mapping.add_mapping )
669 {
670 gdprintk(XENLOG_INFO,
671 "ioport_map:add f_gport=%x f_mport=%x np=%x\n",
672 fgp, fmp, np);
674 list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
675 if (g2m_ioport->mport == fmp )
676 {
677 g2m_ioport->gport = fgp;
678 g2m_ioport->np = np;
679 found = 1;
680 break;
681 }
682 if ( !found )
683 {
684 g2m_ioport = xmalloc(struct g2m_ioport);
685 g2m_ioport->gport = fgp;
686 g2m_ioport->mport = fmp;
687 g2m_ioport->np = np;
688 list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
689 }
690 ret = ioports_permit_access(d, fmp, fmp + np - 1);
691 }
692 else
693 {
694 gdprintk(XENLOG_INFO,
695 "ioport_map:remove f_gport=%x f_mport=%x np=%x\n",
696 fgp, fmp, np);
697 list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
698 if ( g2m_ioport->mport == fmp )
699 {
700 list_del(&g2m_ioport->list);
701 xfree(g2m_ioport);
702 break;
703 }
704 ret = ioports_deny_access(d, fmp, fmp + np - 1);
705 }
706 rcu_unlock_domain(d);
707 }
708 break;
710 case XEN_DOMCTL_pin_mem_cacheattr:
711 {
712 struct domain *d;
714 ret = -ESRCH;
715 d = rcu_lock_domain_by_id(domctl->domain);
716 if ( d == NULL )
717 break;
719 ret = hvm_set_mem_pinned_cacheattr(
720 d, domctl->u.pin_mem_cacheattr.start,
721 domctl->u.pin_mem_cacheattr.end,
722 domctl->u.pin_mem_cacheattr.type);
724 rcu_unlock_domain(d);
725 }
726 break;
728 case XEN_DOMCTL_set_ext_vcpucontext:
729 case XEN_DOMCTL_get_ext_vcpucontext:
730 {
731 struct xen_domctl_ext_vcpucontext *evc;
732 struct domain *d;
733 struct vcpu *v;
735 evc = &domctl->u.ext_vcpucontext;
737 ret = -ESRCH;
738 d = rcu_lock_domain_by_id(domctl->domain);
739 if ( d == NULL )
740 break;
742 ret = -ESRCH;
743 if ( (evc->vcpu >= MAX_VIRT_CPUS) ||
744 ((v = d->vcpu[evc->vcpu]) == NULL) )
745 goto ext_vcpucontext_out;
747 if ( domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext )
748 {
749 evc->size = sizeof(*evc);
750 #ifdef __x86_64__
751 evc->sysenter_callback_cs = v->arch.sysenter_callback_cs;
752 evc->sysenter_callback_eip = v->arch.sysenter_callback_eip;
753 evc->sysenter_disables_events = v->arch.sysenter_disables_events;
754 evc->syscall32_callback_cs = v->arch.syscall32_callback_cs;
755 evc->syscall32_callback_eip = v->arch.syscall32_callback_eip;
756 evc->syscall32_disables_events = v->arch.syscall32_disables_events;
757 #else
758 evc->sysenter_callback_cs = 0;
759 evc->sysenter_callback_eip = 0;
760 evc->sysenter_disables_events = 0;
761 evc->syscall32_callback_cs = 0;
762 evc->syscall32_callback_eip = 0;
763 evc->syscall32_disables_events = 0;
764 #endif
765 }
766 else
767 {
768 ret = -EINVAL;
769 if ( evc->size != sizeof(*evc) )
770 goto ext_vcpucontext_out;
771 #ifdef __x86_64__
772 fixup_guest_code_selector(d, evc->sysenter_callback_cs);
773 v->arch.sysenter_callback_cs = evc->sysenter_callback_cs;
774 v->arch.sysenter_callback_eip = evc->sysenter_callback_eip;
775 v->arch.sysenter_disables_events = evc->sysenter_disables_events;
776 fixup_guest_code_selector(d, evc->syscall32_callback_cs);
777 v->arch.syscall32_callback_cs = evc->syscall32_callback_cs;
778 v->arch.syscall32_callback_eip = evc->syscall32_callback_eip;
779 v->arch.syscall32_disables_events = evc->syscall32_disables_events;
780 #else
781 /* We do not support syscall/syscall32/sysenter on 32-bit Xen. */
782 if ( (evc->sysenter_callback_cs & ~3) ||
783 evc->sysenter_callback_eip ||
784 (evc->syscall32_callback_cs & ~3) ||
785 evc->syscall32_callback_eip )
786 goto ext_vcpucontext_out;
787 #endif
788 }
790 ret = 0;
792 ext_vcpucontext_out:
793 rcu_unlock_domain(d);
794 if ( (domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext) &&
795 copy_to_guest(u_domctl, domctl, 1) )
796 ret = -EFAULT;
797 }
798 break;
800 default:
801 ret = -ENOSYS;
802 break;
803 }
805 return ret;
806 }
808 void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
809 {
810 #ifdef CONFIG_COMPAT
811 #define c(fld) (!is_pv_32on64_domain(v->domain) ? (c.nat->fld) : (c.cmp->fld))
812 #else
813 #define c(fld) (c.nat->fld)
814 #endif
816 if ( !is_pv_32on64_domain(v->domain) )
817 memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat));
818 #ifdef CONFIG_COMPAT
819 else
820 XLAT_vcpu_guest_context(c.cmp, &v->arch.guest_context);
821 #endif
823 c(flags &= ~(VGCF_i387_valid|VGCF_in_kernel));
824 if ( v->fpu_initialised )
825 c(flags |= VGCF_i387_valid);
826 if ( !test_bit(_VPF_down, &v->pause_flags) )
827 c(flags |= VGCF_online);
829 if ( is_hvm_vcpu(v) )
830 {
831 memset(c.nat->ctrlreg, 0, sizeof(c.nat->ctrlreg));
832 c.nat->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0];
833 c.nat->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2];
834 c.nat->ctrlreg[3] = v->arch.hvm_vcpu.guest_cr[3];
835 c.nat->ctrlreg[4] = v->arch.hvm_vcpu.guest_cr[4];
836 }
837 else
838 {
839 /* IOPL privileges are virtualised: merge back into returned eflags. */
840 BUG_ON((c(user_regs.eflags) & EF_IOPL) != 0);
841 c(user_regs.eflags |= v->arch.iopl << 12);
843 if ( !is_pv_32on64_domain(v->domain) )
844 {
845 c.nat->ctrlreg[3] = xen_pfn_to_cr3(
846 pagetable_get_pfn(v->arch.guest_table));
847 #ifdef __x86_64__
848 if ( !pagetable_is_null(v->arch.guest_table_user) )
849 c.nat->ctrlreg[1] = xen_pfn_to_cr3(
850 pagetable_get_pfn(v->arch.guest_table_user));
851 #endif
853 /* Merge shadow DR7 bits into real DR7. */
854 c.nat->debugreg[7] |= c.nat->debugreg[5];
855 c.nat->debugreg[5] = 0;
856 }
857 #ifdef CONFIG_COMPAT
858 else
859 {
860 l4_pgentry_t *l4e = __va(pagetable_get_paddr(v->arch.guest_table));
861 c.cmp->ctrlreg[3] = compat_pfn_to_cr3(l4e_get_pfn(*l4e));
863 /* Merge shadow DR7 bits into real DR7. */
864 c.cmp->debugreg[7] |= c.cmp->debugreg[5];
865 c.cmp->debugreg[5] = 0;
866 }
867 #endif
869 if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) )
870 c(flags |= VGCF_in_kernel);
871 }
873 c(vm_assist = v->domain->vm_assist);
874 #undef c
875 }
877 /*
878 * Local variables:
879 * mode: C
880 * c-set-style: "BSD"
881 * c-basic-offset: 4
882 * tab-width: 4
883 * indent-tabs-mode: nil
884 * End:
885 */