debuggers.hg

view xen/arch/x86/mm/paging.c @ 21000:b4041e7bbe1b

paging_domctl: Add missing breaks in switch stmt.

From: Tim Deegan <Tim.Deegan@citrix.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Feb 17 12:11:13 2010 +0000 (2010-02-17)
parents 257bd5e90294
children a591bf0a9dd6
line source
1 /******************************************************************************
2 * arch/x86/paging.c
3 *
4 * x86 specific paging support
5 * Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
6 * Copyright (c) 2007 XenSource Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
23 #include <xen/init.h>
24 #include <asm/paging.h>
25 #include <asm/shadow.h>
26 #include <asm/p2m.h>
27 #include <asm/hap.h>
28 #include <asm/guest_access.h>
29 #include <xen/numa.h>
30 #include <xsm/xsm.h>
32 #define hap_enabled(d) (is_hvm_domain(d) && (d)->arch.hvm_domain.hap_enabled)
34 /* Printouts */
35 #define PAGING_PRINTK(_f, _a...) \
36 debugtrace_printk("pg: %s(): " _f, __func__, ##_a)
37 #define PAGING_ERROR(_f, _a...) \
38 printk("pg error: %s(): " _f, __func__, ##_a)
39 #define PAGING_DEBUG(flag, _f, _a...) \
40 do { \
41 if (PAGING_DEBUG_ ## flag) \
42 debugtrace_printk("pgdebug: %s(): " _f, __func__, ##_a); \
43 } while (0)
45 /************************************************/
46 /* LOG DIRTY SUPPORT */
47 /************************************************/
48 /* Override macros from asm/page.h to make them work with mfn_t */
49 #undef mfn_to_page
50 #define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
51 #undef mfn_valid
52 #define mfn_valid(_mfn) __mfn_valid(mfn_x(_mfn))
53 #undef page_to_mfn
54 #define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
56 /* The log-dirty lock. This protects the log-dirty bitmap from
57 * concurrent accesses (and teardowns, etc).
58 *
59 * Locking discipline: always acquire shadow or HAP lock before this one.
60 *
61 * Because mark_dirty is called from a lot of places, the log-dirty lock
62 * may be acquired with the shadow or HAP locks already held. When the
63 * log-dirty code makes callbacks into HAP or shadow code to reset
64 * various traps that will trigger the mark_dirty calls, it must *not*
65 * have the log-dirty lock held, or it risks deadlock. Because the only
66 * purpose of those calls is to make sure that *guest* actions will
67 * cause mark_dirty to be called (hypervisor actions explictly call it
68 * anyway), it is safe to release the log-dirty lock before the callback
69 * as long as the domain is paused for the entire operation. */
71 #define log_dirty_lock_init(_d) \
72 do { \
73 spin_lock_init(&(_d)->arch.paging.log_dirty.lock); \
74 (_d)->arch.paging.log_dirty.locker = -1; \
75 (_d)->arch.paging.log_dirty.locker_function = "nobody"; \
76 } while (0)
78 #define log_dirty_lock(_d) \
79 do { \
80 if (unlikely((_d)->arch.paging.log_dirty.locker==current->processor))\
81 { \
82 printk("Error: paging log dirty lock held by %s\n", \
83 (_d)->arch.paging.log_dirty.locker_function); \
84 BUG(); \
85 } \
86 spin_lock(&(_d)->arch.paging.log_dirty.lock); \
87 ASSERT((_d)->arch.paging.log_dirty.locker == -1); \
88 (_d)->arch.paging.log_dirty.locker = current->processor; \
89 (_d)->arch.paging.log_dirty.locker_function = __func__; \
90 } while (0)
92 #define log_dirty_unlock(_d) \
93 do { \
94 ASSERT((_d)->arch.paging.log_dirty.locker == current->processor); \
95 (_d)->arch.paging.log_dirty.locker = -1; \
96 (_d)->arch.paging.log_dirty.locker_function = "nobody"; \
97 spin_unlock(&(_d)->arch.paging.log_dirty.lock); \
98 } while (0)
100 static mfn_t paging_new_log_dirty_page(struct domain *d, void **mapping_p)
101 {
102 struct page_info *page;
104 page = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
105 if ( unlikely(page == NULL) )
106 {
107 d->arch.paging.log_dirty.failed_allocs++;
108 return _mfn(INVALID_MFN);
109 }
111 d->arch.paging.log_dirty.allocs++;
112 *mapping_p = __map_domain_page(page);
114 return page_to_mfn(page);
115 }
117 static mfn_t paging_new_log_dirty_leaf(
118 struct domain *d, unsigned long **leaf_p)
119 {
120 mfn_t mfn = paging_new_log_dirty_page(d, (void **)leaf_p);
121 if ( mfn_valid(mfn) )
122 clear_page(*leaf_p);
123 return mfn;
124 }
126 static mfn_t paging_new_log_dirty_node(struct domain *d, mfn_t **node_p)
127 {
128 int i;
129 mfn_t mfn = paging_new_log_dirty_page(d, (void **)node_p);
130 if ( mfn_valid(mfn) )
131 for ( i = 0; i < LOGDIRTY_NODE_ENTRIES; i++ )
132 (*node_p)[i] = _mfn(INVALID_MFN);
133 return mfn;
134 }
136 int paging_alloc_log_dirty_bitmap(struct domain *d)
137 {
138 mfn_t *mapping;
140 if ( mfn_valid(d->arch.paging.log_dirty.top) )
141 return 0;
143 d->arch.paging.log_dirty.top = paging_new_log_dirty_node(d, &mapping);
144 if ( unlikely(!mfn_valid(d->arch.paging.log_dirty.top)) )
145 {
146 /* Clear error indicator since we're reporting this one */
147 d->arch.paging.log_dirty.failed_allocs = 0;
148 return -ENOMEM;
149 }
150 unmap_domain_page(mapping);
152 return 0;
153 }
155 static void paging_free_log_dirty_page(struct domain *d, mfn_t mfn)
156 {
157 d->arch.paging.log_dirty.allocs--;
158 free_domheap_page(mfn_to_page(mfn));
159 }
161 void paging_free_log_dirty_bitmap(struct domain *d)
162 {
163 mfn_t *l4, *l3, *l2;
164 int i4, i3, i2;
166 if ( !mfn_valid(d->arch.paging.log_dirty.top) )
167 return;
169 dprintk(XENLOG_DEBUG, "%s: used %d pages for domain %d dirty logging\n",
170 __FUNCTION__, d->arch.paging.log_dirty.allocs, d->domain_id);
172 l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
174 for ( i4 = 0; i4 < LOGDIRTY_NODE_ENTRIES; i4++ )
175 {
176 if ( !mfn_valid(l4[i4]) )
177 continue;
179 l3 = map_domain_page(mfn_x(l4[i4]));
181 for ( i3 = 0; i3 < LOGDIRTY_NODE_ENTRIES; i3++ )
182 {
183 if ( !mfn_valid(l3[i3]) )
184 continue;
186 l2 = map_domain_page(mfn_x(l3[i3]));
188 for ( i2 = 0; i2 < LOGDIRTY_NODE_ENTRIES; i2++ )
189 if ( mfn_valid(l2[i2]) )
190 paging_free_log_dirty_page(d, l2[i2]);
192 unmap_domain_page(l2);
193 paging_free_log_dirty_page(d, l3[i3]);
194 }
196 unmap_domain_page(l3);
197 paging_free_log_dirty_page(d, l4[i4]);
198 }
200 unmap_domain_page(l4);
201 paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top);
203 d->arch.paging.log_dirty.top = _mfn(INVALID_MFN);
204 ASSERT(d->arch.paging.log_dirty.allocs == 0);
205 d->arch.paging.log_dirty.failed_allocs = 0;
206 }
208 int paging_log_dirty_enable(struct domain *d)
209 {
210 int ret;
212 domain_pause(d);
213 log_dirty_lock(d);
215 if ( paging_mode_log_dirty(d) )
216 {
217 ret = -EINVAL;
218 goto out;
219 }
221 ret = paging_alloc_log_dirty_bitmap(d);
222 if ( ret != 0 )
223 {
224 paging_free_log_dirty_bitmap(d);
225 goto out;
226 }
228 log_dirty_unlock(d);
230 /* Safe because the domain is paused. */
231 ret = d->arch.paging.log_dirty.enable_log_dirty(d);
233 /* Possibility of leaving the bitmap allocated here but it'll be
234 * tidied on domain teardown. */
236 domain_unpause(d);
237 return ret;
239 out:
240 log_dirty_unlock(d);
241 domain_unpause(d);
242 return ret;
243 }
245 int paging_log_dirty_disable(struct domain *d)
246 {
247 int ret;
249 domain_pause(d);
250 /* Safe because the domain is paused. */
251 ret = d->arch.paging.log_dirty.disable_log_dirty(d);
252 log_dirty_lock(d);
253 if ( !paging_mode_log_dirty(d) )
254 paging_free_log_dirty_bitmap(d);
255 log_dirty_unlock(d);
256 domain_unpause(d);
258 return ret;
259 }
261 /* Mark a page as dirty */
262 void paging_mark_dirty(struct domain *d, unsigned long guest_mfn)
263 {
264 unsigned long pfn;
265 mfn_t gmfn;
266 int changed;
267 mfn_t mfn, *l4, *l3, *l2;
268 unsigned long *l1;
269 int i1, i2, i3, i4;
271 gmfn = _mfn(guest_mfn);
273 if ( !paging_mode_log_dirty(d) || !mfn_valid(gmfn) ||
274 page_get_owner(mfn_to_page(gmfn)) != d )
275 return;
277 log_dirty_lock(d);
279 ASSERT(mfn_valid(d->arch.paging.log_dirty.top));
281 /* We /really/ mean PFN here, even for non-translated guests. */
282 pfn = get_gpfn_from_mfn(mfn_x(gmfn));
283 /* Shared MFNs should NEVER be marked dirty */
284 BUG_ON(SHARED_M2P(pfn));
286 /*
287 * Values with the MSB set denote MFNs that aren't really part of the
288 * domain's pseudo-physical memory map (e.g., the shared info frame).
289 * Nothing to do here...
290 */
291 if ( unlikely(!VALID_M2P(pfn)) )
292 goto out;
294 i1 = L1_LOGDIRTY_IDX(pfn);
295 i2 = L2_LOGDIRTY_IDX(pfn);
296 i3 = L3_LOGDIRTY_IDX(pfn);
297 i4 = L4_LOGDIRTY_IDX(pfn);
299 l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
300 mfn = l4[i4];
301 if ( !mfn_valid(mfn) )
302 mfn = l4[i4] = paging_new_log_dirty_node(d, &l3);
303 else
304 l3 = map_domain_page(mfn_x(mfn));
305 unmap_domain_page(l4);
306 if ( unlikely(!mfn_valid(mfn)) )
307 goto out;
309 mfn = l3[i3];
310 if ( !mfn_valid(mfn) )
311 mfn = l3[i3] = paging_new_log_dirty_node(d, &l2);
312 else
313 l2 = map_domain_page(mfn_x(mfn));
314 unmap_domain_page(l3);
315 if ( unlikely(!mfn_valid(mfn)) )
316 goto out;
318 mfn = l2[i2];
319 if ( !mfn_valid(mfn) )
320 mfn = l2[i2] = paging_new_log_dirty_leaf(d, &l1);
321 else
322 l1 = map_domain_page(mfn_x(mfn));
323 unmap_domain_page(l2);
324 if ( unlikely(!mfn_valid(mfn)) )
325 goto out;
327 changed = !__test_and_set_bit(i1, l1);
328 unmap_domain_page(l1);
329 if ( changed )
330 {
331 PAGING_DEBUG(LOGDIRTY,
332 "marked mfn %" PRI_mfn " (pfn=%lx), dom %d\n",
333 mfn_x(gmfn), pfn, d->domain_id);
334 d->arch.paging.log_dirty.dirty_count++;
335 }
337 out:
338 log_dirty_unlock(d);
339 }
341 /* Read a domain's log-dirty bitmap and stats. If the operation is a CLEAN,
342 * clear the bitmap and stats as well. */
343 int paging_log_dirty_op(struct domain *d, struct xen_domctl_shadow_op *sc)
344 {
345 int rv = 0, clean = 0, peek = 1;
346 unsigned long pages = 0;
347 mfn_t *l4, *l3, *l2;
348 unsigned long *l1;
349 int i4, i3, i2;
351 domain_pause(d);
352 log_dirty_lock(d);
354 clean = (sc->op == XEN_DOMCTL_SHADOW_OP_CLEAN);
356 PAGING_DEBUG(LOGDIRTY, "log-dirty %s: dom %u faults=%u dirty=%u\n",
357 (clean) ? "clean" : "peek",
358 d->domain_id,
359 d->arch.paging.log_dirty.fault_count,
360 d->arch.paging.log_dirty.dirty_count);
362 sc->stats.fault_count = d->arch.paging.log_dirty.fault_count;
363 sc->stats.dirty_count = d->arch.paging.log_dirty.dirty_count;
365 if ( clean )
366 {
367 d->arch.paging.log_dirty.fault_count = 0;
368 d->arch.paging.log_dirty.dirty_count = 0;
369 }
371 if ( guest_handle_is_null(sc->dirty_bitmap) )
372 /* caller may have wanted just to clean the state or access stats. */
373 peek = 0;
375 if ( (peek || clean) && !mfn_valid(d->arch.paging.log_dirty.top) )
376 {
377 rv = -EINVAL; /* perhaps should be ENOMEM? */
378 goto out;
379 }
381 if ( unlikely(d->arch.paging.log_dirty.failed_allocs) ) {
382 printk("%s: %d failed page allocs while logging dirty pages\n",
383 __FUNCTION__, d->arch.paging.log_dirty.failed_allocs);
384 rv = -ENOMEM;
385 goto out;
386 }
388 pages = 0;
389 l4 = (mfn_valid(d->arch.paging.log_dirty.top) ?
390 map_domain_page(mfn_x(d->arch.paging.log_dirty.top)) : NULL);
392 for ( i4 = 0;
393 (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES);
394 i4++ )
395 {
396 l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(mfn_x(l4[i4])) : NULL;
397 for ( i3 = 0;
398 (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES);
399 i3++ )
400 {
401 l2 = ((l3 && mfn_valid(l3[i3])) ?
402 map_domain_page(mfn_x(l3[i3])) : NULL);
403 for ( i2 = 0;
404 (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES);
405 i2++ )
406 {
407 static unsigned long zeroes[PAGE_SIZE/BYTES_PER_LONG];
408 unsigned int bytes = PAGE_SIZE;
409 l1 = ((l2 && mfn_valid(l2[i2])) ?
410 map_domain_page(mfn_x(l2[i2])) : zeroes);
411 if ( unlikely(((sc->pages - pages + 7) >> 3) < bytes) )
412 bytes = (unsigned int)((sc->pages - pages + 7) >> 3);
413 if ( likely(peek) )
414 {
415 if ( copy_to_guest_offset(sc->dirty_bitmap, pages >> 3,
416 (uint8_t *)l1, bytes) != 0 )
417 {
418 rv = -EFAULT;
419 goto out;
420 }
421 }
422 if ( clean && l1 != zeroes )
423 clear_page(l1);
424 pages += bytes << 3;
425 if ( l1 != zeroes )
426 unmap_domain_page(l1);
427 }
428 if ( l2 )
429 unmap_domain_page(l2);
430 }
431 if ( l3 )
432 unmap_domain_page(l3);
433 }
434 if ( l4 )
435 unmap_domain_page(l4);
437 if ( pages < sc->pages )
438 sc->pages = pages;
440 log_dirty_unlock(d);
442 if ( clean )
443 {
444 /* We need to further call clean_dirty_bitmap() functions of specific
445 * paging modes (shadow or hap). Safe because the domain is paused. */
446 d->arch.paging.log_dirty.clean_dirty_bitmap(d);
447 }
448 domain_unpause(d);
449 return rv;
451 out:
452 log_dirty_unlock(d);
453 domain_unpause(d);
454 return rv;
455 }
457 int paging_log_dirty_range(struct domain *d,
458 unsigned long begin_pfn,
459 unsigned long nr,
460 XEN_GUEST_HANDLE_64(uint8) dirty_bitmap)
461 {
462 int rv = 0;
463 unsigned long pages = 0;
464 mfn_t *l4, *l3, *l2;
465 unsigned long *l1;
466 int b1, b2, b3, b4;
467 int i2, i3, i4;
469 d->arch.paging.log_dirty.clean_dirty_bitmap(d);
470 log_dirty_lock(d);
472 PAGING_DEBUG(LOGDIRTY, "log-dirty-range: dom %u faults=%u dirty=%u\n",
473 d->domain_id,
474 d->arch.paging.log_dirty.fault_count,
475 d->arch.paging.log_dirty.dirty_count);
477 if ( !mfn_valid(d->arch.paging.log_dirty.top) )
478 {
479 rv = -EINVAL; /* perhaps should be ENOMEM? */
480 goto out;
481 }
483 if ( unlikely(d->arch.paging.log_dirty.failed_allocs) ) {
484 printk("%s: %d failed page allocs while logging dirty pages\n",
485 __FUNCTION__, d->arch.paging.log_dirty.failed_allocs);
486 rv = -ENOMEM;
487 goto out;
488 }
490 if ( !d->arch.paging.log_dirty.fault_count &&
491 !d->arch.paging.log_dirty.dirty_count ) {
492 int size = (nr + BITS_PER_LONG - 1) / BITS_PER_LONG;
493 unsigned long zeroes[size];
494 memset(zeroes, 0x00, size * BYTES_PER_LONG);
495 rv = 0;
496 if ( copy_to_guest_offset(dirty_bitmap, 0, (uint8_t *) zeroes,
497 size * BYTES_PER_LONG) != 0 )
498 rv = -EFAULT;
499 goto out;
500 }
501 d->arch.paging.log_dirty.fault_count = 0;
502 d->arch.paging.log_dirty.dirty_count = 0;
504 b1 = L1_LOGDIRTY_IDX(begin_pfn);
505 b2 = L2_LOGDIRTY_IDX(begin_pfn);
506 b3 = L3_LOGDIRTY_IDX(begin_pfn);
507 b4 = L4_LOGDIRTY_IDX(begin_pfn);
508 l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
510 for ( i4 = b4;
511 (pages < nr) && (i4 < LOGDIRTY_NODE_ENTRIES);
512 i4++ )
513 {
514 l3 = mfn_valid(l4[i4]) ? map_domain_page(mfn_x(l4[i4])) : NULL;
515 for ( i3 = b3;
516 (pages < nr) && (i3 < LOGDIRTY_NODE_ENTRIES);
517 i3++ )
518 {
519 l2 = ((l3 && mfn_valid(l3[i3])) ?
520 map_domain_page(mfn_x(l3[i3])) : NULL);
521 for ( i2 = b2;
522 (pages < nr) && (i2 < LOGDIRTY_NODE_ENTRIES);
523 i2++ )
524 {
525 static unsigned long zeroes[PAGE_SIZE/BYTES_PER_LONG];
526 unsigned int bytes = PAGE_SIZE;
527 uint8_t *s;
528 l1 = ((l2 && mfn_valid(l2[i2])) ?
529 map_domain_page(mfn_x(l2[i2])) : zeroes);
531 s = ((uint8_t*)l1) + (b1 >> 3);
532 bytes -= b1 >> 3;
534 if ( likely(((nr - pages + 7) >> 3) < bytes) )
535 bytes = (unsigned int)((nr - pages + 7) >> 3);
537 /* begin_pfn is not 32K aligned, hence we have to bit
538 * shift the bitmap */
539 if ( b1 & 0x7 )
540 {
541 int i, j;
542 uint32_t *l = (uint32_t*) s;
543 int bits = b1 & 0x7;
544 int bitmask = (1 << bits) - 1;
545 int size = (bytes + BYTES_PER_LONG - 1) / BYTES_PER_LONG;
546 unsigned long bitmap[size];
547 static unsigned long printed = 0;
549 if ( printed != begin_pfn )
550 {
551 dprintk(XENLOG_DEBUG, "%s: begin_pfn %lx is not 32K aligned!\n",
552 __FUNCTION__, begin_pfn);
553 printed = begin_pfn;
554 }
556 for ( i = 0; i < size - 1; i++, l++ ) {
557 bitmap[i] = ((*l) >> bits) |
558 (((*((uint8_t*)(l + 1))) & bitmask) << (sizeof(*l) * 8 - bits));
559 }
560 s = (uint8_t*) l;
561 size = BYTES_PER_LONG - ((b1 >> 3) & 0x3);
562 bitmap[i] = 0;
563 for ( j = 0; j < size; j++, s++ )
564 bitmap[i] |= (*s) << (j * 8);
565 bitmap[i] = (bitmap[i] >> bits) | (bitmask << (size * 8 - bits));
566 if ( copy_to_guest_offset(dirty_bitmap, (pages >> 3),
567 (uint8_t*) bitmap, bytes) != 0 )
568 {
569 rv = -EFAULT;
570 goto out;
571 }
572 }
573 else
574 {
575 if ( copy_to_guest_offset(dirty_bitmap, pages >> 3,
576 s, bytes) != 0 )
577 {
578 rv = -EFAULT;
579 goto out;
580 }
581 }
583 if ( l1 != zeroes )
584 clear_page(l1);
585 pages += bytes << 3;
586 if ( l1 != zeroes )
587 unmap_domain_page(l1);
588 b1 = b1 & 0x7;
589 }
590 b2 = 0;
591 if ( l2 )
592 unmap_domain_page(l2);
593 }
594 b3 = 0;
595 if ( l3 )
596 unmap_domain_page(l3);
597 }
598 unmap_domain_page(l4);
600 log_dirty_unlock(d);
602 return rv;
604 out:
605 log_dirty_unlock(d);
606 return rv;
607 }
609 /* Note that this function takes three function pointers. Callers must supply
610 * these functions for log dirty code to call. This function usually is
611 * invoked when paging is enabled. Check shadow_enable() and hap_enable() for
612 * reference.
613 *
614 * These function pointers must not be followed with the log-dirty lock held.
615 */
616 void paging_log_dirty_init(struct domain *d,
617 int (*enable_log_dirty)(struct domain *d),
618 int (*disable_log_dirty)(struct domain *d),
619 void (*clean_dirty_bitmap)(struct domain *d))
620 {
621 /* We initialize log dirty lock first */
622 log_dirty_lock_init(d);
624 d->arch.paging.log_dirty.enable_log_dirty = enable_log_dirty;
625 d->arch.paging.log_dirty.disable_log_dirty = disable_log_dirty;
626 d->arch.paging.log_dirty.clean_dirty_bitmap = clean_dirty_bitmap;
627 d->arch.paging.log_dirty.top = _mfn(INVALID_MFN);
628 }
630 /* This function fress log dirty bitmap resources. */
631 static void paging_log_dirty_teardown(struct domain*d)
632 {
633 log_dirty_lock(d);
634 paging_free_log_dirty_bitmap(d);
635 log_dirty_unlock(d);
636 }
637 /************************************************/
638 /* CODE FOR PAGING SUPPORT */
639 /************************************************/
640 /* Domain paging struct initialization. */
641 int paging_domain_init(struct domain *d, unsigned int domcr_flags)
642 {
643 int rc;
645 if ( (rc = p2m_init(d)) != 0 )
646 return rc;
648 /* The order of the *_init calls below is important, as the later
649 * ones may rewrite some common fields. Shadow pagetables are the
650 * default... */
651 shadow_domain_init(d, domcr_flags);
653 /* ... but we will use hardware assistance if it's available. */
654 if ( hap_enabled(d) )
655 hap_domain_init(d);
657 return 0;
658 }
660 /* vcpu paging struct initialization goes here */
661 void paging_vcpu_init(struct vcpu *v)
662 {
663 if ( hap_enabled(v->domain) )
664 hap_vcpu_init(v);
665 else
666 shadow_vcpu_init(v);
667 }
670 int paging_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
671 XEN_GUEST_HANDLE(void) u_domctl)
672 {
673 int rc;
675 if ( unlikely(d == current->domain) )
676 {
677 gdprintk(XENLOG_INFO, "Tried to do a paging op on itself.\n");
678 return -EINVAL;
679 }
681 if ( unlikely(d->is_dying) )
682 {
683 gdprintk(XENLOG_INFO, "Ignoring paging op on dying domain %u\n",
684 d->domain_id);
685 return 0;
686 }
688 if ( unlikely(d->vcpu == NULL) || unlikely(d->vcpu[0] == NULL) )
689 {
690 PAGING_ERROR("Paging op on a domain (%u) with no vcpus\n",
691 d->domain_id);
692 return -EINVAL;
693 }
695 rc = xsm_shadow_control(d, sc->op);
696 if ( rc )
697 return rc;
699 /* Code to handle log-dirty. Note that some log dirty operations
700 * piggy-back on shadow operations. For example, when
701 * XEN_DOMCTL_SHADOW_OP_OFF is called, it first checks whether log dirty
702 * mode is enabled. If does, we disables log dirty and continues with
703 * shadow code. For this reason, we need to further dispatch domctl
704 * to next-level paging code (shadow or hap).
705 */
706 switch ( sc->op )
707 {
708 case XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY:
709 if ( hap_enabled(d) )
710 hap_logdirty_init(d);
711 return paging_log_dirty_enable(d);
713 case XEN_DOMCTL_SHADOW_OP_ENABLE:
714 if ( sc->mode & XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY )
715 {
716 if ( hap_enabled(d) )
717 hap_logdirty_init(d);
718 return paging_log_dirty_enable(d);
719 }
720 break;
722 case XEN_DOMCTL_SHADOW_OP_OFF:
723 if ( paging_mode_log_dirty(d) )
724 if ( (rc = paging_log_dirty_disable(d)) != 0 )
725 return rc;
726 break;
728 case XEN_DOMCTL_SHADOW_OP_CLEAN:
729 case XEN_DOMCTL_SHADOW_OP_PEEK:
730 return paging_log_dirty_op(d, sc);
731 }
733 /* Here, dispatch domctl to the appropriate paging code */
734 if ( hap_enabled(d) )
735 return hap_domctl(d, sc, u_domctl);
736 else
737 return shadow_domctl(d, sc, u_domctl);
738 }
740 /* Call when destroying a domain */
741 void paging_teardown(struct domain *d)
742 {
743 if ( hap_enabled(d) )
744 hap_teardown(d);
745 else
746 shadow_teardown(d);
748 /* clean up log dirty resources. */
749 paging_log_dirty_teardown(d);
751 /* Move populate-on-demand cache back to domain_list for destruction */
752 p2m_pod_empty_cache(d);
753 }
755 /* Call once all of the references to the domain have gone away */
756 void paging_final_teardown(struct domain *d)
757 {
758 if ( hap_enabled(d) )
759 hap_final_teardown(d);
760 else
761 shadow_final_teardown(d);
763 p2m_final_teardown(d);
764 }
766 /* Enable an arbitrary paging-assistance mode. Call once at domain
767 * creation. */
768 int paging_enable(struct domain *d, u32 mode)
769 {
770 if ( hap_enabled(d) )
771 return hap_enable(d, mode | PG_HAP_enable);
772 else
773 return shadow_enable(d, mode | PG_SH_enable);
774 }
776 /* Print paging-assistance info to the console */
777 void paging_dump_domain_info(struct domain *d)
778 {
779 if ( paging_mode_enabled(d) )
780 {
781 printk(" paging assistance: ");
782 if ( paging_mode_shadow(d) )
783 printk("shadow ");
784 if ( paging_mode_hap(d) )
785 printk("hap ");
786 if ( paging_mode_refcounts(d) )
787 printk("refcounts ");
788 if ( paging_mode_log_dirty(d) )
789 printk("log_dirty ");
790 if ( paging_mode_translate(d) )
791 printk("translate ");
792 if ( paging_mode_external(d) )
793 printk("external ");
794 printk("\n");
795 }
796 }
798 void paging_dump_vcpu_info(struct vcpu *v)
799 {
800 if ( paging_mode_enabled(v->domain) )
801 {
802 printk(" paging assistance: ");
803 if ( paging_mode_shadow(v->domain) )
804 {
805 if ( v->arch.paging.mode )
806 printk("shadowed %u-on-%u\n",
807 v->arch.paging.mode->guest_levels,
808 v->arch.paging.mode->shadow.shadow_levels);
809 else
810 printk("not shadowed\n");
811 }
812 else if ( paging_mode_hap(v->domain) && v->arch.paging.mode )
813 printk("hap, %u levels\n",
814 v->arch.paging.mode->guest_levels);
815 else
816 printk("none\n");
817 }
818 }
821 /*
822 * Local variables:
823 * mode: C
824 * c-set-style: "BSD"
825 * c-basic-offset: 4
826 * indent-tabs-mode: nil
827 * End:
828 */