debuggers.hg

view xen/arch/x86/domctl.c @ 20838:0447c5532e9f

x86: add and use XEN_DOMCTL_getpageframeinfo3

To support wider than 28-bit MFNs, add XEN_DOMCTL_getpageframeinfo3
(with the type replacing the passed in MFN rather than getting or-ed
into it) to properly back xc_get_pfn_type_batch().

With xc_get_pfn_type_batch() only used internally to libxc, move its
prototype from xenctrl.h to xc_private.h.

This also fixes a couple of bugs in pre-existing code:
- the failure path for init_mem_info() leaked minfo->pfn_type,
- one error path of the XEN_DOMCTL_getpageframeinfo2 handler used
put_domain() where rcu_unlock_domain() was meant, and
- the XEN_DOMCTL_getpageframeinfo2 handler could call
xsm_getpageframeinfo() with an invalid struct page_info pointer.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jan 13 08:14:01 2010 +0000 (2010-01-13)
parents 68e964ec2c7b
children 0edb75cd8126
line source
1 /******************************************************************************
2 * Arch-specific domctl.c
3 *
4 * Copyright (c) 2002-2006, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/types.h>
9 #include <xen/lib.h>
10 #include <xen/mm.h>
11 #include <xen/guest_access.h>
12 #include <xen/compat.h>
13 #include <xen/pci.h>
14 #include <public/domctl.h>
15 #include <xen/sched.h>
16 #include <xen/domain.h>
17 #include <xen/event.h>
18 #include <xen/domain_page.h>
19 #include <asm/msr.h>
20 #include <xen/trace.h>
21 #include <xen/console.h>
22 #include <xen/iocap.h>
23 #include <xen/paging.h>
24 #include <asm/irq.h>
25 #include <asm/hvm/hvm.h>
26 #include <asm/hvm/support.h>
27 #include <asm/hvm/cacheattr.h>
28 #include <asm/processor.h>
29 #include <asm/acpi.h> /* for hvm_acpi_power_button */
30 #include <asm/hypercall.h> /* for arch_do_domctl */
31 #include <xsm/xsm.h>
32 #include <xen/iommu.h>
33 #include <asm/mem_event.h>
34 #include <public/mem_event.h>
35 #include <asm/mem_sharing.h>
37 #ifdef XEN_GDBSX_CONFIG
38 #ifdef XEN_KDB_CONFIG
39 #include "../kdb/include/kdbdefs.h"
40 #include "../kdb/include/kdbproto.h"
41 #else
42 typedef unsigned long kdbva_t;
43 typedef unsigned char kdbbyt_t;
44 extern int dbg_rw_mem(kdbva_t, kdbbyt_t *, int, domid_t, int, uint64_t);
45 #endif
46 static int
47 gdbsx_guest_mem_io(domid_t domid, struct xen_domctl_gdbsx_memio *iop)
48 {
49 ulong l_uva = (ulong)iop->uva;
50 iop->remain = dbg_rw_mem(
51 (kdbva_t)iop->gva, (kdbbyt_t *)l_uva, iop->len, domid,
52 iop->gwr, iop->pgd3val);
53 return (iop->remain ? -EFAULT : 0);
54 }
55 #endif /* XEN_GDBSX_CONFIG */
57 long arch_do_domctl(
58 struct xen_domctl *domctl,
59 XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
60 {
61 long ret = 0;
63 switch ( domctl->cmd )
64 {
66 case XEN_DOMCTL_shadow_op:
67 {
68 struct domain *d;
69 ret = -ESRCH;
70 d = rcu_lock_domain_by_id(domctl->domain);
71 if ( d != NULL )
72 {
73 ret = paging_domctl(d,
74 &domctl->u.shadow_op,
75 guest_handle_cast(u_domctl, void));
76 rcu_unlock_domain(d);
77 copy_to_guest(u_domctl, domctl, 1);
78 }
79 }
80 break;
82 case XEN_DOMCTL_ioport_permission:
83 {
84 struct domain *d;
85 unsigned int fp = domctl->u.ioport_permission.first_port;
86 unsigned int np = domctl->u.ioport_permission.nr_ports;
88 ret = -EINVAL;
89 if ( (fp + np) > 65536 )
90 break;
92 ret = -ESRCH;
93 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
94 break;
96 if ( np == 0 )
97 ret = 0;
98 else if ( domctl->u.ioport_permission.allow_access )
99 ret = ioports_permit_access(d, fp, fp + np - 1);
100 else
101 ret = ioports_deny_access(d, fp, fp + np - 1);
103 rcu_unlock_domain(d);
104 }
105 break;
107 case XEN_DOMCTL_getpageframeinfo:
108 {
109 struct page_info *page;
110 unsigned long mfn = domctl->u.getpageframeinfo.gmfn;
111 domid_t dom = domctl->domain;
112 struct domain *d;
114 ret = -EINVAL;
116 if ( unlikely(!mfn_valid(mfn)) ||
117 unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
118 break;
120 page = mfn_to_page(mfn);
122 ret = xsm_getpageframeinfo(page);
123 if ( ret )
124 {
125 rcu_unlock_domain(d);
126 break;
127 }
129 if ( likely(get_page(page, d)) )
130 {
131 ret = 0;
133 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_NOTAB;
135 if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
136 {
137 switch ( page->u.inuse.type_info & PGT_type_mask )
138 {
139 case PGT_l1_page_table:
140 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L1TAB;
141 break;
142 case PGT_l2_page_table:
143 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L2TAB;
144 break;
145 case PGT_l3_page_table:
146 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L3TAB;
147 break;
148 case PGT_l4_page_table:
149 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L4TAB;
150 break;
151 }
152 }
154 put_page(page);
155 }
157 rcu_unlock_domain(d);
159 copy_to_guest(u_domctl, domctl, 1);
160 }
161 break;
163 case XEN_DOMCTL_getpageframeinfo3:
164 #ifdef __x86_64__
165 if (!has_32bit_shinfo(current->domain))
166 {
167 unsigned int n, j;
168 unsigned int num = domctl->u.getpageframeinfo3.num;
169 domid_t dom = domctl->domain;
170 struct domain *d;
171 struct page_info *page;
172 xen_pfn_t *arr;
174 ret = -ESRCH;
175 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
176 break;
178 if ( unlikely(num > 1024) ||
179 unlikely(num != domctl->u.getpageframeinfo3.num) )
180 {
181 ret = -E2BIG;
182 rcu_unlock_domain(d);
183 break;
184 }
186 page = alloc_domheap_page(NULL, 0);
187 if ( !page )
188 {
189 ret = -ENOMEM;
190 rcu_unlock_domain(d);
191 break;
192 }
193 arr = page_to_virt(page);
195 for ( n = ret = 0; n < num; )
196 {
197 unsigned int k = min_t(unsigned int, num - n, PAGE_SIZE / 4);
199 if ( copy_from_guest_offset(arr,
200 domctl->u.getpageframeinfo3.array,
201 n, k) )
202 {
203 ret = -EFAULT;
204 break;
205 }
207 for ( j = 0; j < k; j++ )
208 {
209 unsigned long type = 0, mfn = arr[j];
211 page = mfn_to_page(mfn);
213 if ( unlikely(!mfn_valid(mfn)) )
214 type = XEN_DOMCTL_PFINFO_XTAB;
215 else if ( xsm_getpageframeinfo(page) != 0 )
216 ;
217 else if ( likely(get_page(page, d)) )
218 {
219 switch( page->u.inuse.type_info & PGT_type_mask )
220 {
221 case PGT_l1_page_table:
222 type = XEN_DOMCTL_PFINFO_L1TAB;
223 break;
224 case PGT_l2_page_table:
225 type = XEN_DOMCTL_PFINFO_L2TAB;
226 break;
227 case PGT_l3_page_table:
228 type = XEN_DOMCTL_PFINFO_L3TAB;
229 break;
230 case PGT_l4_page_table:
231 type = XEN_DOMCTL_PFINFO_L4TAB;
232 break;
233 }
235 if ( page->u.inuse.type_info & PGT_pinned )
236 type |= XEN_DOMCTL_PFINFO_LPINTAB;
238 put_page(page);
239 }
240 else
241 type = XEN_DOMCTL_PFINFO_XTAB;
243 arr[j] = type;
244 }
246 if ( copy_to_guest_offset(domctl->u.getpageframeinfo3.array,
247 n, arr, k) )
248 {
249 ret = -EFAULT;
250 break;
251 }
253 n += k;
254 }
256 free_domheap_page(virt_to_page(arr));
258 rcu_unlock_domain(d);
259 break;
260 }
261 #endif
262 /* fall thru */
263 case XEN_DOMCTL_getpageframeinfo2:
264 {
265 int n,j;
266 int num = domctl->u.getpageframeinfo2.num;
267 domid_t dom = domctl->domain;
268 struct domain *d;
269 uint32_t *arr32;
270 ret = -ESRCH;
272 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
273 break;
275 if ( unlikely(num > 1024) )
276 {
277 ret = -E2BIG;
278 rcu_unlock_domain(d);
279 break;
280 }
282 arr32 = alloc_xenheap_page();
283 if ( !arr32 )
284 {
285 ret = -ENOMEM;
286 rcu_unlock_domain(d);
287 break;
288 }
290 ret = 0;
291 for ( n = 0; n < num; )
292 {
293 int k = PAGE_SIZE / 4;
294 if ( (num - n) < k )
295 k = num - n;
297 if ( copy_from_guest_offset(arr32,
298 domctl->u.getpageframeinfo2.array,
299 n, k) )
300 {
301 ret = -EFAULT;
302 break;
303 }
305 for ( j = 0; j < k; j++ )
306 {
307 struct page_info *page;
308 unsigned long mfn = arr32[j];
310 page = mfn_to_page(mfn);
312 if ( domctl->cmd == XEN_DOMCTL_getpageframeinfo3)
313 arr32[j] = 0;
315 if ( unlikely(!mfn_valid(mfn)) )
316 arr32[j] |= XEN_DOMCTL_PFINFO_XTAB;
317 else if ( xsm_getpageframeinfo(page) != 0 )
318 continue;
319 else if ( likely(get_page(page, d)) )
320 {
321 unsigned long type = 0;
323 switch( page->u.inuse.type_info & PGT_type_mask )
324 {
325 case PGT_l1_page_table:
326 type = XEN_DOMCTL_PFINFO_L1TAB;
327 break;
328 case PGT_l2_page_table:
329 type = XEN_DOMCTL_PFINFO_L2TAB;
330 break;
331 case PGT_l3_page_table:
332 type = XEN_DOMCTL_PFINFO_L3TAB;
333 break;
334 case PGT_l4_page_table:
335 type = XEN_DOMCTL_PFINFO_L4TAB;
336 break;
337 }
339 if ( page->u.inuse.type_info & PGT_pinned )
340 type |= XEN_DOMCTL_PFINFO_LPINTAB;
341 arr32[j] |= type;
342 put_page(page);
343 }
344 else
345 arr32[j] |= XEN_DOMCTL_PFINFO_XTAB;
347 }
349 if ( copy_to_guest_offset(domctl->u.getpageframeinfo2.array,
350 n, arr32, k) )
351 {
352 ret = -EFAULT;
353 break;
354 }
356 n += k;
357 }
359 free_xenheap_page(arr32);
361 rcu_unlock_domain(d);
362 }
363 break;
365 case XEN_DOMCTL_getmemlist:
366 {
367 int i;
368 struct domain *d = rcu_lock_domain_by_id(domctl->domain);
369 unsigned long max_pfns = domctl->u.getmemlist.max_pfns;
370 uint64_t mfn;
371 struct page_info *page;
373 ret = -EINVAL;
374 if ( d != NULL )
375 {
376 ret = xsm_getmemlist(d);
377 if ( ret )
378 {
379 rcu_unlock_domain(d);
380 break;
381 }
383 spin_lock(&d->page_alloc_lock);
385 if ( unlikely(d->is_dying) ) {
386 spin_unlock(&d->page_alloc_lock);
387 goto getmemlist_out;
388 }
390 ret = i = 0;
391 page_list_for_each(page, &d->page_list)
392 {
393 if ( i >= max_pfns )
394 break;
395 mfn = page_to_mfn(page);
396 if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
397 i, &mfn, 1) )
398 {
399 ret = -EFAULT;
400 break;
401 }
402 ++i;
403 }
405 spin_unlock(&d->page_alloc_lock);
407 domctl->u.getmemlist.num_pfns = i;
408 copy_to_guest(u_domctl, domctl, 1);
409 getmemlist_out:
410 rcu_unlock_domain(d);
411 }
412 }
413 break;
415 case XEN_DOMCTL_hypercall_init:
416 {
417 struct domain *d = rcu_lock_domain_by_id(domctl->domain);
418 unsigned long gmfn = domctl->u.hypercall_init.gmfn;
419 unsigned long mfn;
420 void *hypercall_page;
422 ret = -ESRCH;
423 if ( unlikely(d == NULL) )
424 break;
426 ret = xsm_hypercall_init(d);
427 if ( ret )
428 {
429 rcu_unlock_domain(d);
430 break;
431 }
433 mfn = gmfn_to_mfn(d, gmfn);
435 ret = -EACCES;
436 if ( !mfn_valid(mfn) ||
437 !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
438 {
439 rcu_unlock_domain(d);
440 break;
441 }
443 ret = 0;
445 hypercall_page = map_domain_page(mfn);
446 hypercall_page_initialise(d, hypercall_page);
447 unmap_domain_page(hypercall_page);
449 put_page_and_type(mfn_to_page(mfn));
451 rcu_unlock_domain(d);
452 }
453 break;
455 case XEN_DOMCTL_sethvmcontext:
456 {
457 struct hvm_domain_context c = { .size = domctl->u.hvmcontext.size };
458 struct domain *d;
460 ret = -ESRCH;
461 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
462 break;
464 ret = xsm_hvmcontext(d, domctl->cmd);
465 if ( ret )
466 goto sethvmcontext_out;
468 ret = -EINVAL;
469 if ( !is_hvm_domain(d) )
470 goto sethvmcontext_out;
472 ret = -ENOMEM;
473 if ( (c.data = xmalloc_bytes(c.size)) == NULL )
474 goto sethvmcontext_out;
476 ret = -EFAULT;
477 if ( copy_from_guest(c.data, domctl->u.hvmcontext.buffer, c.size) != 0)
478 goto sethvmcontext_out;
480 domain_pause(d);
481 ret = hvm_load(d, &c);
482 domain_unpause(d);
484 sethvmcontext_out:
485 if ( c.data != NULL )
486 xfree(c.data);
488 rcu_unlock_domain(d);
489 }
490 break;
492 case XEN_DOMCTL_gethvmcontext:
493 {
494 struct hvm_domain_context c = { 0 };
495 struct domain *d;
497 ret = -ESRCH;
498 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
499 break;
501 ret = xsm_hvmcontext(d, domctl->cmd);
502 if ( ret )
503 goto gethvmcontext_out;
505 ret = -EINVAL;
506 if ( !is_hvm_domain(d) )
507 goto gethvmcontext_out;
509 c.size = hvm_save_size(d);
511 if ( guest_handle_is_null(domctl->u.hvmcontext.buffer) )
512 {
513 /* Client is querying for the correct buffer size */
514 domctl->u.hvmcontext.size = c.size;
515 ret = 0;
516 goto gethvmcontext_out;
517 }
519 /* Check that the client has a big enough buffer */
520 ret = -ENOSPC;
521 if ( domctl->u.hvmcontext.size < c.size )
522 goto gethvmcontext_out;
524 /* Allocate our own marshalling buffer */
525 ret = -ENOMEM;
526 if ( (c.data = xmalloc_bytes(c.size)) == NULL )
527 goto gethvmcontext_out;
529 domain_pause(d);
530 ret = hvm_save(d, &c);
531 domain_unpause(d);
533 domctl->u.hvmcontext.size = c.cur;
534 if ( copy_to_guest(domctl->u.hvmcontext.buffer, c.data, c.size) != 0 )
535 ret = -EFAULT;
537 gethvmcontext_out:
538 if ( copy_to_guest(u_domctl, domctl, 1) )
539 ret = -EFAULT;
541 if ( c.data != NULL )
542 xfree(c.data);
544 rcu_unlock_domain(d);
545 }
546 break;
548 case XEN_DOMCTL_gethvmcontext_partial:
549 {
550 struct domain *d;
552 ret = -ESRCH;
553 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
554 break;
556 ret = xsm_hvmcontext(d, domctl->cmd);
557 if ( ret )
558 goto gethvmcontext_partial_out;
560 ret = -EINVAL;
561 if ( !is_hvm_domain(d) )
562 goto gethvmcontext_partial_out;
564 domain_pause(d);
565 ret = hvm_save_one(d, domctl->u.hvmcontext_partial.type,
566 domctl->u.hvmcontext_partial.instance,
567 domctl->u.hvmcontext_partial.buffer);
568 domain_unpause(d);
570 gethvmcontext_partial_out:
571 rcu_unlock_domain(d);
572 }
573 break;
576 case XEN_DOMCTL_set_address_size:
577 {
578 struct domain *d;
580 ret = -ESRCH;
581 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
582 break;
584 ret = xsm_address_size(d, domctl->cmd);
585 if ( ret )
586 {
587 rcu_unlock_domain(d);
588 break;
589 }
591 switch ( domctl->u.address_size.size )
592 {
593 #ifdef CONFIG_COMPAT
594 case 32:
595 ret = switch_compat(d);
596 break;
597 case 64:
598 ret = switch_native(d);
599 break;
600 #endif
601 default:
602 ret = (domctl->u.address_size.size == BITS_PER_LONG) ? 0 : -EINVAL;
603 break;
604 }
606 rcu_unlock_domain(d);
607 }
608 break;
610 case XEN_DOMCTL_get_address_size:
611 {
612 struct domain *d;
614 ret = -ESRCH;
615 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
616 break;
618 ret = xsm_address_size(d, domctl->cmd);
619 if ( ret )
620 {
621 rcu_unlock_domain(d);
622 break;
623 }
625 domctl->u.address_size.size =
626 is_pv_32on64_domain(d) ? 32 : BITS_PER_LONG;
628 ret = 0;
629 rcu_unlock_domain(d);
631 if ( copy_to_guest(u_domctl, domctl, 1) )
632 ret = -EFAULT;
633 }
634 break;
636 case XEN_DOMCTL_set_machine_address_size:
637 {
638 struct domain *d;
640 ret = -ESRCH;
641 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
642 break;
644 ret = xsm_machine_address_size(d, domctl->cmd);
645 if ( ret )
646 rcu_unlock_domain(d);
648 ret = -EBUSY;
649 if ( d->tot_pages > 0 )
650 goto set_machine_address_size_out;
652 d->arch.physaddr_bitsize = domctl->u.address_size.size;
654 ret = 0;
655 set_machine_address_size_out:
656 rcu_unlock_domain(d);
657 }
658 break;
660 case XEN_DOMCTL_get_machine_address_size:
661 {
662 struct domain *d;
664 ret = -ESRCH;
665 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
666 break;
668 ret = xsm_machine_address_size(d, domctl->cmd);
669 if ( ret )
670 {
671 rcu_unlock_domain(d);
672 break;
673 }
675 domctl->u.address_size.size = d->arch.physaddr_bitsize;
677 ret = 0;
678 rcu_unlock_domain(d);
680 if ( copy_to_guest(u_domctl, domctl, 1) )
681 ret = -EFAULT;
684 }
685 break;
687 case XEN_DOMCTL_sendtrigger:
688 {
689 struct domain *d;
690 struct vcpu *v;
692 ret = -ESRCH;
693 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
694 break;
696 ret = xsm_sendtrigger(d);
697 if ( ret )
698 goto sendtrigger_out;
700 ret = -EINVAL;
701 if ( domctl->u.sendtrigger.vcpu >= MAX_VIRT_CPUS )
702 goto sendtrigger_out;
704 ret = -ESRCH;
705 if ( domctl->u.sendtrigger.vcpu >= d->max_vcpus ||
706 (v = d->vcpu[domctl->u.sendtrigger.vcpu]) == NULL )
707 goto sendtrigger_out;
709 switch ( domctl->u.sendtrigger.trigger )
710 {
711 case XEN_DOMCTL_SENDTRIGGER_NMI:
712 {
713 ret = 0;
714 if ( !test_and_set_bool(v->nmi_pending) )
715 vcpu_kick(v);
716 }
717 break;
719 case XEN_DOMCTL_SENDTRIGGER_POWER:
720 {
721 ret = -EINVAL;
722 if ( is_hvm_domain(d) )
723 {
724 ret = 0;
725 hvm_acpi_power_button(d);
726 }
727 }
728 break;
730 default:
731 ret = -ENOSYS;
732 }
734 sendtrigger_out:
735 rcu_unlock_domain(d);
736 }
737 break;
739 case XEN_DOMCTL_get_device_group:
740 {
741 struct domain *d;
742 u32 max_sdevs;
743 u8 bus, devfn;
744 XEN_GUEST_HANDLE_64(uint32) sdevs;
745 int num_sdevs;
747 ret = -ENOSYS;
748 if ( !iommu_enabled )
749 break;
751 ret = -EINVAL;
752 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
753 break;
755 bus = (domctl->u.get_device_group.machine_bdf >> 16) & 0xff;
756 devfn = (domctl->u.get_device_group.machine_bdf >> 8) & 0xff;
757 max_sdevs = domctl->u.get_device_group.max_sdevs;
758 sdevs = domctl->u.get_device_group.sdev_array;
760 num_sdevs = iommu_get_device_group(d, bus, devfn, sdevs, max_sdevs);
761 if ( num_sdevs < 0 )
762 {
763 dprintk(XENLOG_ERR, "iommu_get_device_group() failed!\n");
764 ret = -EFAULT;
765 domctl->u.get_device_group.num_sdevs = 0;
766 }
767 else
768 {
769 ret = 0;
770 domctl->u.get_device_group.num_sdevs = num_sdevs;
771 }
772 if ( copy_to_guest(u_domctl, domctl, 1) )
773 ret = -EFAULT;
774 rcu_unlock_domain(d);
775 }
776 break;
778 case XEN_DOMCTL_test_assign_device:
779 {
780 u8 bus, devfn;
782 ret = -ENOSYS;
783 if ( !iommu_enabled )
784 break;
786 ret = xsm_test_assign_device(domctl->u.assign_device.machine_bdf);
787 if ( ret )
788 break;
790 ret = -EINVAL;
791 bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
792 devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
794 if ( device_assigned(bus, devfn) )
795 {
796 gdprintk(XENLOG_ERR, "XEN_DOMCTL_test_assign_device: "
797 "%x:%x.%x already assigned, or non-existent\n",
798 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
799 break;
800 }
801 ret = 0;
802 }
803 break;
805 case XEN_DOMCTL_assign_device:
806 {
807 struct domain *d;
808 u8 bus, devfn;
810 ret = -ENOSYS;
811 if ( !iommu_enabled )
812 break;
814 ret = -EINVAL;
815 if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
816 {
817 gdprintk(XENLOG_ERR,
818 "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n");
819 break;
820 }
822 ret = xsm_assign_device(d, domctl->u.assign_device.machine_bdf);
823 if ( ret )
824 goto assign_device_out;
826 bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
827 devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
829 if ( !iommu_pv_enabled && !is_hvm_domain(d) )
830 {
831 ret = -ENOSYS;
832 put_domain(d);
833 break;
834 }
836 ret = -EINVAL;
838 ret = assign_device(d, bus, devfn);
839 if ( ret )
840 gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: "
841 "assign device (%x:%x.%x) failed\n",
842 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
844 assign_device_out:
845 put_domain(d);
846 }
847 break;
849 case XEN_DOMCTL_deassign_device:
850 {
851 struct domain *d;
852 u8 bus, devfn;
854 ret = -ENOSYS;
855 if ( !iommu_enabled )
856 break;
858 ret = -EINVAL;
859 if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
860 {
861 gdprintk(XENLOG_ERR,
862 "XEN_DOMCTL_deassign_device: get_domain_by_id() failed\n");
863 break;
864 }
866 ret = xsm_assign_device(d, domctl->u.assign_device.machine_bdf);
867 if ( ret )
868 goto deassign_device_out;
870 bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
871 devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
873 if ( !iommu_pv_enabled && !is_hvm_domain(d) )
874 {
875 ret = -ENOSYS;
876 put_domain(d);
877 break;
878 }
879 ret = 0;
880 spin_lock(&pcidevs_lock);
881 ret = deassign_device(d, bus, devfn);
882 spin_unlock(&pcidevs_lock);
883 gdprintk(XENLOG_INFO, "XEN_DOMCTL_deassign_device: bdf = %x:%x.%x\n",
884 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
886 deassign_device_out:
887 put_domain(d);
888 }
889 break;
891 case XEN_DOMCTL_bind_pt_irq:
892 {
893 struct domain * d;
894 xen_domctl_bind_pt_irq_t * bind;
896 ret = -ESRCH;
897 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
898 break;
899 bind = &(domctl->u.bind_pt_irq);
901 ret = xsm_bind_pt_irq(d, bind);
902 if ( ret )
903 goto bind_out;
905 ret = -EPERM;
906 if ( !IS_PRIV(current->domain) &&
907 !irq_access_permitted(current->domain, bind->machine_irq) )
908 goto bind_out;
910 ret = -ESRCH;
911 if ( iommu_enabled )
912 {
913 spin_lock(&pcidevs_lock);
914 ret = pt_irq_create_bind_vtd(d, bind);
915 spin_unlock(&pcidevs_lock);
916 }
917 if ( ret < 0 )
918 gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n");
920 bind_out:
921 rcu_unlock_domain(d);
922 }
923 break;
925 case XEN_DOMCTL_unbind_pt_irq:
926 {
927 struct domain * d;
928 xen_domctl_bind_pt_irq_t * bind;
930 ret = -ESRCH;
931 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
932 break;
933 bind = &(domctl->u.bind_pt_irq);
935 ret = -EPERM;
936 if ( !IS_PRIV(current->domain) &&
937 !irq_access_permitted(current->domain, bind->machine_irq) )
938 goto bind_out;
940 if ( iommu_enabled )
941 {
942 spin_lock(&pcidevs_lock);
943 ret = pt_irq_destroy_bind_vtd(d, bind);
944 spin_unlock(&pcidevs_lock);
945 }
946 if ( ret < 0 )
947 gdprintk(XENLOG_ERR, "pt_irq_destroy_bind failed!\n");
948 rcu_unlock_domain(d);
949 }
950 break;
952 case XEN_DOMCTL_memory_mapping:
953 {
954 struct domain *d;
955 unsigned long gfn = domctl->u.memory_mapping.first_gfn;
956 unsigned long mfn = domctl->u.memory_mapping.first_mfn;
957 unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
958 int i;
960 ret = -EINVAL;
961 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
962 break;
964 ret = -ESRCH;
965 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
966 break;
968 ret = -EPERM;
969 if ( !IS_PRIV(current->domain) &&
970 !iomem_access_permitted(current->domain, mfn, mfn + nr_mfns - 1) )
971 break;
973 ret=0;
974 if ( domctl->u.memory_mapping.add_mapping )
975 {
976 gdprintk(XENLOG_INFO,
977 "memory_map:add: gfn=%lx mfn=%lx nr_mfns=%lx\n",
978 gfn, mfn, nr_mfns);
980 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
981 for ( i = 0; i < nr_mfns; i++ )
982 set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i));
983 }
984 else
985 {
986 gdprintk(XENLOG_INFO,
987 "memory_map:remove: gfn=%lx mfn=%lx nr_mfns=%lx\n",
988 gfn, mfn, nr_mfns);
990 for ( i = 0; i < nr_mfns; i++ )
991 clear_mmio_p2m_entry(d, gfn+i);
992 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
993 }
995 rcu_unlock_domain(d);
996 }
997 break;
999 case XEN_DOMCTL_ioport_mapping:
1001 #define MAX_IOPORTS 0x10000
1002 struct domain *d;
1003 struct hvm_iommu *hd;
1004 unsigned int fgp = domctl->u.ioport_mapping.first_gport;
1005 unsigned int fmp = domctl->u.ioport_mapping.first_mport;
1006 unsigned int np = domctl->u.ioport_mapping.nr_ports;
1007 struct g2m_ioport *g2m_ioport;
1008 int found = 0;
1010 ret = -EINVAL;
1011 if ( (np == 0) || (fgp > MAX_IOPORTS) || (fmp > MAX_IOPORTS) ||
1012 ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) )
1014 gdprintk(XENLOG_ERR,
1015 "ioport_map:invalid:gport=%x mport=%x nr_ports=%x\n",
1016 fgp, fmp, np);
1017 break;
1020 ret = -EPERM;
1021 if ( !IS_PRIV(current->domain) &&
1022 !ioports_access_permitted(current->domain, fmp, fmp + np - 1) )
1023 break;
1025 ret = -ESRCH;
1026 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
1027 break;
1029 hd = domain_hvm_iommu(d);
1030 if ( domctl->u.ioport_mapping.add_mapping )
1032 gdprintk(XENLOG_INFO,
1033 "ioport_map:add f_gport=%x f_mport=%x np=%x\n",
1034 fgp, fmp, np);
1036 list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
1037 if (g2m_ioport->mport == fmp )
1039 g2m_ioport->gport = fgp;
1040 g2m_ioport->np = np;
1041 found = 1;
1042 break;
1044 if ( !found )
1046 g2m_ioport = xmalloc(struct g2m_ioport);
1047 g2m_ioport->gport = fgp;
1048 g2m_ioport->mport = fmp;
1049 g2m_ioport->np = np;
1050 list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
1052 ret = ioports_permit_access(d, fmp, fmp + np - 1);
1054 else
1056 gdprintk(XENLOG_INFO,
1057 "ioport_map:remove f_gport=%x f_mport=%x np=%x\n",
1058 fgp, fmp, np);
1059 list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
1060 if ( g2m_ioport->mport == fmp )
1062 list_del(&g2m_ioport->list);
1063 xfree(g2m_ioport);
1064 break;
1066 ret = ioports_deny_access(d, fmp, fmp + np - 1);
1068 rcu_unlock_domain(d);
1070 break;
1072 case XEN_DOMCTL_pin_mem_cacheattr:
1074 struct domain *d;
1076 ret = -ESRCH;
1077 d = rcu_lock_domain_by_id(domctl->domain);
1078 if ( d == NULL )
1079 break;
1081 ret = xsm_pin_mem_cacheattr(d);
1082 if ( ret )
1083 goto pin_out;
1085 ret = hvm_set_mem_pinned_cacheattr(
1086 d, domctl->u.pin_mem_cacheattr.start,
1087 domctl->u.pin_mem_cacheattr.end,
1088 domctl->u.pin_mem_cacheattr.type);
1090 pin_out:
1091 rcu_unlock_domain(d);
1093 break;
1095 case XEN_DOMCTL_set_ext_vcpucontext:
1096 case XEN_DOMCTL_get_ext_vcpucontext:
1098 struct xen_domctl_ext_vcpucontext *evc;
1099 struct domain *d;
1100 struct vcpu *v;
1102 evc = &domctl->u.ext_vcpucontext;
1104 ret = -ESRCH;
1105 d = rcu_lock_domain_by_id(domctl->domain);
1106 if ( d == NULL )
1107 break;
1109 ret = xsm_ext_vcpucontext(d, domctl->cmd);
1110 if ( ret )
1111 goto ext_vcpucontext_out;
1113 ret = -ESRCH;
1114 if ( (evc->vcpu >= d->max_vcpus) ||
1115 ((v = d->vcpu[evc->vcpu]) == NULL) )
1116 goto ext_vcpucontext_out;
1118 if ( domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext )
1120 evc->size = sizeof(*evc);
1121 #ifdef __x86_64__
1122 evc->sysenter_callback_cs = v->arch.sysenter_callback_cs;
1123 evc->sysenter_callback_eip = v->arch.sysenter_callback_eip;
1124 evc->sysenter_disables_events = v->arch.sysenter_disables_events;
1125 evc->syscall32_callback_cs = v->arch.syscall32_callback_cs;
1126 evc->syscall32_callback_eip = v->arch.syscall32_callback_eip;
1127 evc->syscall32_disables_events = v->arch.syscall32_disables_events;
1128 #else
1129 evc->sysenter_callback_cs = 0;
1130 evc->sysenter_callback_eip = 0;
1131 evc->sysenter_disables_events = 0;
1132 evc->syscall32_callback_cs = 0;
1133 evc->syscall32_callback_eip = 0;
1134 evc->syscall32_disables_events = 0;
1135 #endif
1137 else
1139 ret = -EINVAL;
1140 if ( evc->size != sizeof(*evc) )
1141 goto ext_vcpucontext_out;
1142 #ifdef __x86_64__
1143 fixup_guest_code_selector(d, evc->sysenter_callback_cs);
1144 v->arch.sysenter_callback_cs = evc->sysenter_callback_cs;
1145 v->arch.sysenter_callback_eip = evc->sysenter_callback_eip;
1146 v->arch.sysenter_disables_events = evc->sysenter_disables_events;
1147 fixup_guest_code_selector(d, evc->syscall32_callback_cs);
1148 v->arch.syscall32_callback_cs = evc->syscall32_callback_cs;
1149 v->arch.syscall32_callback_eip = evc->syscall32_callback_eip;
1150 v->arch.syscall32_disables_events = evc->syscall32_disables_events;
1151 #else
1152 /* We do not support syscall/syscall32/sysenter on 32-bit Xen. */
1153 if ( (evc->sysenter_callback_cs & ~3) ||
1154 evc->sysenter_callback_eip ||
1155 (evc->syscall32_callback_cs & ~3) ||
1156 evc->syscall32_callback_eip )
1157 goto ext_vcpucontext_out;
1158 #endif
1161 ret = 0;
1163 ext_vcpucontext_out:
1164 rcu_unlock_domain(d);
1165 if ( (domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext) &&
1166 copy_to_guest(u_domctl, domctl, 1) )
1167 ret = -EFAULT;
1169 break;
1171 case XEN_DOMCTL_set_cpuid:
1173 struct domain *d;
1174 xen_domctl_cpuid_t *ctl = &domctl->u.cpuid;
1175 cpuid_input_t *cpuid = NULL;
1176 int i;
1178 ret = -ESRCH;
1179 d = rcu_lock_domain_by_id(domctl->domain);
1180 if ( d == NULL )
1181 break;
1183 for ( i = 0; i < MAX_CPUID_INPUT; i++ )
1185 cpuid = &d->arch.cpuids[i];
1187 if ( cpuid->input[0] == XEN_CPUID_INPUT_UNUSED )
1188 break;
1190 if ( (cpuid->input[0] == ctl->input[0]) &&
1191 ((cpuid->input[1] == XEN_CPUID_INPUT_UNUSED) ||
1192 (cpuid->input[1] == ctl->input[1])) )
1193 break;
1196 if ( i == MAX_CPUID_INPUT )
1198 ret = -ENOENT;
1200 else
1202 memcpy(cpuid, ctl, sizeof(cpuid_input_t));
1203 ret = 0;
1206 rcu_unlock_domain(d);
1208 break;
1210 case XEN_DOMCTL_gettscinfo:
1212 struct domain *d;
1213 xen_guest_tsc_info_t info;
1215 ret = -ESRCH;
1216 d = rcu_lock_domain_by_id(domctl->domain);
1217 if ( d == NULL )
1218 break;
1220 domain_pause(d);
1221 tsc_get_info(d, &info.tsc_mode,
1222 &info.elapsed_nsec,
1223 &info.gtsc_khz,
1224 &info.incarnation);
1225 if ( copy_to_guest(domctl->u.tsc_info.out_info, &info, 1) )
1226 ret = -EFAULT;
1227 else
1228 ret = 0;
1229 domain_unpause(d);
1231 rcu_unlock_domain(d);
1233 break;
1235 case XEN_DOMCTL_settscinfo:
1237 struct domain *d;
1239 ret = -ESRCH;
1240 d = rcu_lock_domain_by_id(domctl->domain);
1241 if ( d == NULL )
1242 break;
1244 domain_pause(d);
1245 tsc_set_info(d, domctl->u.tsc_info.info.tsc_mode,
1246 domctl->u.tsc_info.info.elapsed_nsec,
1247 domctl->u.tsc_info.info.gtsc_khz,
1248 domctl->u.tsc_info.info.incarnation);
1249 domain_unpause(d);
1251 rcu_unlock_domain(d);
1252 ret = 0;
1254 break;
1256 case XEN_DOMCTL_suppress_spurious_page_faults:
1258 struct domain *d;
1260 ret = -ESRCH;
1261 d = rcu_lock_domain_by_id(domctl->domain);
1262 if ( d != NULL )
1264 d->arch.suppress_spurious_page_faults = 1;
1265 rcu_unlock_domain(d);
1266 ret = 0;
1269 break;
1271 case XEN_DOMCTL_debug_op:
1273 struct domain *d;
1274 struct vcpu *v;
1276 ret = -ESRCH;
1277 d = rcu_lock_domain_by_id(domctl->domain);
1278 if ( d == NULL )
1279 break;
1281 ret = -EINVAL;
1282 if ( (domctl->u.debug_op.vcpu >= d->max_vcpus) ||
1283 ((v = d->vcpu[domctl->u.debug_op.vcpu]) == NULL) )
1284 goto debug_op_out;
1286 ret = -EINVAL;
1287 if ( !is_hvm_domain(d))
1288 goto debug_op_out;
1290 ret = hvm_debug_op(v, domctl->u.debug_op.op);
1292 debug_op_out:
1293 rcu_unlock_domain(d);
1295 break;
1297 #ifdef XEN_GDBSX_CONFIG
1298 case XEN_DOMCTL_gdbsx_guestmemio:
1300 struct domain *d;
1302 ret = -ESRCH;
1303 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
1304 break;
1306 domctl->u.gdbsx_guest_memio.remain =
1307 domctl->u.gdbsx_guest_memio.len;
1309 ret = gdbsx_guest_mem_io(domctl->domain, &domctl->u.gdbsx_guest_memio);
1310 if ( !ret && copy_to_guest(u_domctl, domctl, 1) )
1311 ret = -EFAULT;
1313 rcu_unlock_domain(d);
1315 break;
1317 case XEN_DOMCTL_gdbsx_pausevcpu:
1319 struct domain *d;
1320 struct vcpu *v;
1322 ret = -ESRCH;
1323 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
1324 break;
1326 ret = -EBUSY;
1327 if ( !d->is_paused_by_controller )
1329 rcu_unlock_domain(d);
1330 break;
1332 ret = -EINVAL;
1333 if ( domctl->u.gdbsx_pauseunp_vcpu.vcpu >= MAX_VIRT_CPUS ||
1334 (v = d->vcpu[domctl->u.gdbsx_pauseunp_vcpu.vcpu]) == NULL )
1336 rcu_unlock_domain(d);
1337 break;
1339 vcpu_pause(v);
1340 ret = 0;
1341 rcu_unlock_domain(d);
1343 break;
1345 case XEN_DOMCTL_gdbsx_unpausevcpu:
1347 struct domain *d;
1348 struct vcpu *v;
1350 ret = -ESRCH;
1351 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
1352 break;
1354 ret = -EBUSY;
1355 if ( !d->is_paused_by_controller )
1357 rcu_unlock_domain(d);
1358 break;
1360 ret = -EINVAL;
1361 if ( domctl->u.gdbsx_pauseunp_vcpu.vcpu >= MAX_VIRT_CPUS ||
1362 (v = d->vcpu[domctl->u.gdbsx_pauseunp_vcpu.vcpu]) == NULL )
1364 rcu_unlock_domain(d);
1365 break;
1367 if ( !atomic_read(&v->pause_count) )
1368 printk("WARN: Unpausing vcpu:%d which is not paused\n", v->vcpu_id);
1369 vcpu_unpause(v);
1370 ret = 0;
1371 rcu_unlock_domain(d);
1373 break;
1375 case XEN_DOMCTL_gdbsx_domstatus:
1377 struct domain *d;
1378 struct vcpu *v;
1380 ret = -ESRCH;
1381 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
1382 break;
1384 domctl->u.gdbsx_domstatus.vcpu_id = -1;
1385 domctl->u.gdbsx_domstatus.paused = d->is_paused_by_controller;
1386 if ( domctl->u.gdbsx_domstatus.paused )
1388 for_each_vcpu ( d, v )
1390 if ( v->arch.gdbsx_vcpu_event )
1392 domctl->u.gdbsx_domstatus.vcpu_id = v->vcpu_id;
1393 domctl->u.gdbsx_domstatus.vcpu_ev =
1394 v->arch.gdbsx_vcpu_event;
1395 v->arch.gdbsx_vcpu_event = 0;
1396 break;
1400 ret = 0;
1401 if ( copy_to_guest(u_domctl, domctl, 1) )
1402 ret = -EFAULT;
1403 rcu_unlock_domain(d);
1405 break;
1406 #endif /* XEN_GDBSX_CONFIG */
1408 case XEN_DOMCTL_mem_event_op:
1410 struct domain *d;
1412 ret = -ESRCH;
1413 d = rcu_lock_domain_by_id(domctl->domain);
1414 if ( d != NULL )
1416 ret = mem_event_domctl(d, &domctl->u.mem_event_op,
1417 guest_handle_cast(u_domctl, void));
1418 rcu_unlock_domain(d);
1419 copy_to_guest(u_domctl, domctl, 1);
1422 break;
1424 case XEN_DOMCTL_mem_sharing_op:
1426 struct domain *d;
1428 ret = -ESRCH;
1429 d = rcu_lock_domain_by_id(domctl->domain);
1430 if ( d != NULL )
1432 ret = mem_sharing_domctl(d, &domctl->u.mem_sharing_op);
1433 rcu_unlock_domain(d);
1434 copy_to_guest(u_domctl, domctl, 1);
1437 break;
1439 default:
1440 ret = -ENOSYS;
1441 break;
1444 return ret;
1447 void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
1449 #ifdef CONFIG_COMPAT
1450 #define c(fld) (!is_pv_32on64_domain(v->domain) ? (c.nat->fld) : (c.cmp->fld))
1451 #else
1452 #define c(fld) (c.nat->fld)
1453 #endif
1455 if ( !is_pv_32on64_domain(v->domain) )
1456 memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat));
1457 #ifdef CONFIG_COMPAT
1458 else
1459 XLAT_vcpu_guest_context(c.cmp, &v->arch.guest_context);
1460 #endif
1462 c(flags &= ~(VGCF_i387_valid|VGCF_in_kernel));
1463 if ( v->fpu_initialised )
1464 c(flags |= VGCF_i387_valid);
1465 if ( !test_bit(_VPF_down, &v->pause_flags) )
1466 c(flags |= VGCF_online);
1468 if ( is_hvm_vcpu(v) )
1470 struct segment_register sreg;
1471 memset(c.nat->ctrlreg, 0, sizeof(c.nat->ctrlreg));
1472 c.nat->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0];
1473 c.nat->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2];
1474 c.nat->ctrlreg[3] = v->arch.hvm_vcpu.guest_cr[3];
1475 c.nat->ctrlreg[4] = v->arch.hvm_vcpu.guest_cr[4];
1476 hvm_get_segment_register(v, x86_seg_cs, &sreg);
1477 c.nat->user_regs.cs = sreg.sel;
1478 hvm_get_segment_register(v, x86_seg_ss, &sreg);
1479 c.nat->user_regs.ss = sreg.sel;
1480 hvm_get_segment_register(v, x86_seg_ds, &sreg);
1481 c.nat->user_regs.ds = sreg.sel;
1482 hvm_get_segment_register(v, x86_seg_es, &sreg);
1483 c.nat->user_regs.es = sreg.sel;
1484 hvm_get_segment_register(v, x86_seg_fs, &sreg);
1485 c.nat->user_regs.fs = sreg.sel;
1486 hvm_get_segment_register(v, x86_seg_gs, &sreg);
1487 c.nat->user_regs.gs = sreg.sel;
1489 else
1491 /* IOPL privileges are virtualised: merge back into returned eflags. */
1492 BUG_ON((c(user_regs.eflags) & X86_EFLAGS_IOPL) != 0);
1493 c(user_regs.eflags |= v->arch.iopl << 12);
1495 if ( !is_pv_32on64_domain(v->domain) )
1497 c.nat->ctrlreg[3] = xen_pfn_to_cr3(
1498 pagetable_get_pfn(v->arch.guest_table));
1499 #ifdef __x86_64__
1500 c.nat->ctrlreg[1] =
1501 pagetable_is_null(v->arch.guest_table_user) ? 0
1502 : xen_pfn_to_cr3(pagetable_get_pfn(v->arch.guest_table_user));
1503 #endif
1505 /* Merge shadow DR7 bits into real DR7. */
1506 c.nat->debugreg[7] |= c.nat->debugreg[5];
1507 c.nat->debugreg[5] = 0;
1509 #ifdef CONFIG_COMPAT
1510 else
1512 l4_pgentry_t *l4e = __va(pagetable_get_paddr(v->arch.guest_table));
1513 c.cmp->ctrlreg[3] = compat_pfn_to_cr3(l4e_get_pfn(*l4e));
1515 /* Merge shadow DR7 bits into real DR7. */
1516 c.cmp->debugreg[7] |= c.cmp->debugreg[5];
1517 c.cmp->debugreg[5] = 0;
1519 #endif
1521 if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) )
1522 c(flags |= VGCF_in_kernel);
1525 c(vm_assist = v->domain->vm_assist);
1526 #undef c
1529 /*
1530 * Local variables:
1531 * mode: C
1532 * c-set-style: "BSD"
1533 * c-basic-offset: 4
1534 * tab-width: 4
1535 * indent-tabs-mode: nil
1536 * End:
1537 */