debuggers.hg

view xen/arch/x86/mm/paging.c @ 21002:a591bf0a9dd6

paging: Remove noisy printk

Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Feb 22 09:58:48 2010 +0000 (2010-02-22)
parents b4041e7bbe1b
children c4301c2c727d
line source
1 /******************************************************************************
2 * arch/x86/paging.c
3 *
4 * x86 specific paging support
5 * Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
6 * Copyright (c) 2007 XenSource Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
23 #include <xen/init.h>
24 #include <asm/paging.h>
25 #include <asm/shadow.h>
26 #include <asm/p2m.h>
27 #include <asm/hap.h>
28 #include <asm/guest_access.h>
29 #include <xen/numa.h>
30 #include <xsm/xsm.h>
32 #define hap_enabled(d) (is_hvm_domain(d) && (d)->arch.hvm_domain.hap_enabled)
34 /* Printouts */
35 #define PAGING_PRINTK(_f, _a...) \
36 debugtrace_printk("pg: %s(): " _f, __func__, ##_a)
37 #define PAGING_ERROR(_f, _a...) \
38 printk("pg error: %s(): " _f, __func__, ##_a)
39 #define PAGING_DEBUG(flag, _f, _a...) \
40 do { \
41 if (PAGING_DEBUG_ ## flag) \
42 debugtrace_printk("pgdebug: %s(): " _f, __func__, ##_a); \
43 } while (0)
45 /************************************************/
46 /* LOG DIRTY SUPPORT */
47 /************************************************/
48 /* Override macros from asm/page.h to make them work with mfn_t */
49 #undef mfn_to_page
50 #define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
51 #undef mfn_valid
52 #define mfn_valid(_mfn) __mfn_valid(mfn_x(_mfn))
53 #undef page_to_mfn
54 #define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
56 /* The log-dirty lock. This protects the log-dirty bitmap from
57 * concurrent accesses (and teardowns, etc).
58 *
59 * Locking discipline: always acquire shadow or HAP lock before this one.
60 *
61 * Because mark_dirty is called from a lot of places, the log-dirty lock
62 * may be acquired with the shadow or HAP locks already held. When the
63 * log-dirty code makes callbacks into HAP or shadow code to reset
64 * various traps that will trigger the mark_dirty calls, it must *not*
65 * have the log-dirty lock held, or it risks deadlock. Because the only
66 * purpose of those calls is to make sure that *guest* actions will
67 * cause mark_dirty to be called (hypervisor actions explictly call it
68 * anyway), it is safe to release the log-dirty lock before the callback
69 * as long as the domain is paused for the entire operation. */
71 #define log_dirty_lock_init(_d) \
72 do { \
73 spin_lock_init(&(_d)->arch.paging.log_dirty.lock); \
74 (_d)->arch.paging.log_dirty.locker = -1; \
75 (_d)->arch.paging.log_dirty.locker_function = "nobody"; \
76 } while (0)
78 #define log_dirty_lock(_d) \
79 do { \
80 if (unlikely((_d)->arch.paging.log_dirty.locker==current->processor))\
81 { \
82 printk("Error: paging log dirty lock held by %s\n", \
83 (_d)->arch.paging.log_dirty.locker_function); \
84 BUG(); \
85 } \
86 spin_lock(&(_d)->arch.paging.log_dirty.lock); \
87 ASSERT((_d)->arch.paging.log_dirty.locker == -1); \
88 (_d)->arch.paging.log_dirty.locker = current->processor; \
89 (_d)->arch.paging.log_dirty.locker_function = __func__; \
90 } while (0)
92 #define log_dirty_unlock(_d) \
93 do { \
94 ASSERT((_d)->arch.paging.log_dirty.locker == current->processor); \
95 (_d)->arch.paging.log_dirty.locker = -1; \
96 (_d)->arch.paging.log_dirty.locker_function = "nobody"; \
97 spin_unlock(&(_d)->arch.paging.log_dirty.lock); \
98 } while (0)
100 static mfn_t paging_new_log_dirty_page(struct domain *d, void **mapping_p)
101 {
102 struct page_info *page;
104 page = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
105 if ( unlikely(page == NULL) )
106 {
107 d->arch.paging.log_dirty.failed_allocs++;
108 return _mfn(INVALID_MFN);
109 }
111 d->arch.paging.log_dirty.allocs++;
112 *mapping_p = __map_domain_page(page);
114 return page_to_mfn(page);
115 }
117 static mfn_t paging_new_log_dirty_leaf(
118 struct domain *d, unsigned long **leaf_p)
119 {
120 mfn_t mfn = paging_new_log_dirty_page(d, (void **)leaf_p);
121 if ( mfn_valid(mfn) )
122 clear_page(*leaf_p);
123 return mfn;
124 }
126 static mfn_t paging_new_log_dirty_node(struct domain *d, mfn_t **node_p)
127 {
128 int i;
129 mfn_t mfn = paging_new_log_dirty_page(d, (void **)node_p);
130 if ( mfn_valid(mfn) )
131 for ( i = 0; i < LOGDIRTY_NODE_ENTRIES; i++ )
132 (*node_p)[i] = _mfn(INVALID_MFN);
133 return mfn;
134 }
136 int paging_alloc_log_dirty_bitmap(struct domain *d)
137 {
138 mfn_t *mapping;
140 if ( mfn_valid(d->arch.paging.log_dirty.top) )
141 return 0;
143 d->arch.paging.log_dirty.top = paging_new_log_dirty_node(d, &mapping);
144 if ( unlikely(!mfn_valid(d->arch.paging.log_dirty.top)) )
145 {
146 /* Clear error indicator since we're reporting this one */
147 d->arch.paging.log_dirty.failed_allocs = 0;
148 return -ENOMEM;
149 }
150 unmap_domain_page(mapping);
152 return 0;
153 }
155 static void paging_free_log_dirty_page(struct domain *d, mfn_t mfn)
156 {
157 d->arch.paging.log_dirty.allocs--;
158 free_domheap_page(mfn_to_page(mfn));
159 }
161 void paging_free_log_dirty_bitmap(struct domain *d)
162 {
163 mfn_t *l4, *l3, *l2;
164 int i4, i3, i2;
166 if ( !mfn_valid(d->arch.paging.log_dirty.top) )
167 return;
169 l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
171 for ( i4 = 0; i4 < LOGDIRTY_NODE_ENTRIES; i4++ )
172 {
173 if ( !mfn_valid(l4[i4]) )
174 continue;
176 l3 = map_domain_page(mfn_x(l4[i4]));
178 for ( i3 = 0; i3 < LOGDIRTY_NODE_ENTRIES; i3++ )
179 {
180 if ( !mfn_valid(l3[i3]) )
181 continue;
183 l2 = map_domain_page(mfn_x(l3[i3]));
185 for ( i2 = 0; i2 < LOGDIRTY_NODE_ENTRIES; i2++ )
186 if ( mfn_valid(l2[i2]) )
187 paging_free_log_dirty_page(d, l2[i2]);
189 unmap_domain_page(l2);
190 paging_free_log_dirty_page(d, l3[i3]);
191 }
193 unmap_domain_page(l3);
194 paging_free_log_dirty_page(d, l4[i4]);
195 }
197 unmap_domain_page(l4);
198 paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top);
200 d->arch.paging.log_dirty.top = _mfn(INVALID_MFN);
201 ASSERT(d->arch.paging.log_dirty.allocs == 0);
202 d->arch.paging.log_dirty.failed_allocs = 0;
203 }
205 int paging_log_dirty_enable(struct domain *d)
206 {
207 int ret;
209 domain_pause(d);
210 log_dirty_lock(d);
212 if ( paging_mode_log_dirty(d) )
213 {
214 ret = -EINVAL;
215 goto out;
216 }
218 ret = paging_alloc_log_dirty_bitmap(d);
219 if ( ret != 0 )
220 {
221 paging_free_log_dirty_bitmap(d);
222 goto out;
223 }
225 log_dirty_unlock(d);
227 /* Safe because the domain is paused. */
228 ret = d->arch.paging.log_dirty.enable_log_dirty(d);
230 /* Possibility of leaving the bitmap allocated here but it'll be
231 * tidied on domain teardown. */
233 domain_unpause(d);
234 return ret;
236 out:
237 log_dirty_unlock(d);
238 domain_unpause(d);
239 return ret;
240 }
242 int paging_log_dirty_disable(struct domain *d)
243 {
244 int ret;
246 domain_pause(d);
247 /* Safe because the domain is paused. */
248 ret = d->arch.paging.log_dirty.disable_log_dirty(d);
249 log_dirty_lock(d);
250 if ( !paging_mode_log_dirty(d) )
251 paging_free_log_dirty_bitmap(d);
252 log_dirty_unlock(d);
253 domain_unpause(d);
255 return ret;
256 }
258 /* Mark a page as dirty */
259 void paging_mark_dirty(struct domain *d, unsigned long guest_mfn)
260 {
261 unsigned long pfn;
262 mfn_t gmfn;
263 int changed;
264 mfn_t mfn, *l4, *l3, *l2;
265 unsigned long *l1;
266 int i1, i2, i3, i4;
268 gmfn = _mfn(guest_mfn);
270 if ( !paging_mode_log_dirty(d) || !mfn_valid(gmfn) ||
271 page_get_owner(mfn_to_page(gmfn)) != d )
272 return;
274 log_dirty_lock(d);
276 ASSERT(mfn_valid(d->arch.paging.log_dirty.top));
278 /* We /really/ mean PFN here, even for non-translated guests. */
279 pfn = get_gpfn_from_mfn(mfn_x(gmfn));
280 /* Shared MFNs should NEVER be marked dirty */
281 BUG_ON(SHARED_M2P(pfn));
283 /*
284 * Values with the MSB set denote MFNs that aren't really part of the
285 * domain's pseudo-physical memory map (e.g., the shared info frame).
286 * Nothing to do here...
287 */
288 if ( unlikely(!VALID_M2P(pfn)) )
289 goto out;
291 i1 = L1_LOGDIRTY_IDX(pfn);
292 i2 = L2_LOGDIRTY_IDX(pfn);
293 i3 = L3_LOGDIRTY_IDX(pfn);
294 i4 = L4_LOGDIRTY_IDX(pfn);
296 l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
297 mfn = l4[i4];
298 if ( !mfn_valid(mfn) )
299 mfn = l4[i4] = paging_new_log_dirty_node(d, &l3);
300 else
301 l3 = map_domain_page(mfn_x(mfn));
302 unmap_domain_page(l4);
303 if ( unlikely(!mfn_valid(mfn)) )
304 goto out;
306 mfn = l3[i3];
307 if ( !mfn_valid(mfn) )
308 mfn = l3[i3] = paging_new_log_dirty_node(d, &l2);
309 else
310 l2 = map_domain_page(mfn_x(mfn));
311 unmap_domain_page(l3);
312 if ( unlikely(!mfn_valid(mfn)) )
313 goto out;
315 mfn = l2[i2];
316 if ( !mfn_valid(mfn) )
317 mfn = l2[i2] = paging_new_log_dirty_leaf(d, &l1);
318 else
319 l1 = map_domain_page(mfn_x(mfn));
320 unmap_domain_page(l2);
321 if ( unlikely(!mfn_valid(mfn)) )
322 goto out;
324 changed = !__test_and_set_bit(i1, l1);
325 unmap_domain_page(l1);
326 if ( changed )
327 {
328 PAGING_DEBUG(LOGDIRTY,
329 "marked mfn %" PRI_mfn " (pfn=%lx), dom %d\n",
330 mfn_x(gmfn), pfn, d->domain_id);
331 d->arch.paging.log_dirty.dirty_count++;
332 }
334 out:
335 log_dirty_unlock(d);
336 }
338 /* Read a domain's log-dirty bitmap and stats. If the operation is a CLEAN,
339 * clear the bitmap and stats as well. */
340 int paging_log_dirty_op(struct domain *d, struct xen_domctl_shadow_op *sc)
341 {
342 int rv = 0, clean = 0, peek = 1;
343 unsigned long pages = 0;
344 mfn_t *l4, *l3, *l2;
345 unsigned long *l1;
346 int i4, i3, i2;
348 domain_pause(d);
349 log_dirty_lock(d);
351 clean = (sc->op == XEN_DOMCTL_SHADOW_OP_CLEAN);
353 PAGING_DEBUG(LOGDIRTY, "log-dirty %s: dom %u faults=%u dirty=%u\n",
354 (clean) ? "clean" : "peek",
355 d->domain_id,
356 d->arch.paging.log_dirty.fault_count,
357 d->arch.paging.log_dirty.dirty_count);
359 sc->stats.fault_count = d->arch.paging.log_dirty.fault_count;
360 sc->stats.dirty_count = d->arch.paging.log_dirty.dirty_count;
362 if ( clean )
363 {
364 d->arch.paging.log_dirty.fault_count = 0;
365 d->arch.paging.log_dirty.dirty_count = 0;
366 }
368 if ( guest_handle_is_null(sc->dirty_bitmap) )
369 /* caller may have wanted just to clean the state or access stats. */
370 peek = 0;
372 if ( (peek || clean) && !mfn_valid(d->arch.paging.log_dirty.top) )
373 {
374 rv = -EINVAL; /* perhaps should be ENOMEM? */
375 goto out;
376 }
378 if ( unlikely(d->arch.paging.log_dirty.failed_allocs) ) {
379 printk("%s: %d failed page allocs while logging dirty pages\n",
380 __FUNCTION__, d->arch.paging.log_dirty.failed_allocs);
381 rv = -ENOMEM;
382 goto out;
383 }
385 pages = 0;
386 l4 = (mfn_valid(d->arch.paging.log_dirty.top) ?
387 map_domain_page(mfn_x(d->arch.paging.log_dirty.top)) : NULL);
389 for ( i4 = 0;
390 (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES);
391 i4++ )
392 {
393 l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(mfn_x(l4[i4])) : NULL;
394 for ( i3 = 0;
395 (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES);
396 i3++ )
397 {
398 l2 = ((l3 && mfn_valid(l3[i3])) ?
399 map_domain_page(mfn_x(l3[i3])) : NULL);
400 for ( i2 = 0;
401 (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES);
402 i2++ )
403 {
404 static unsigned long zeroes[PAGE_SIZE/BYTES_PER_LONG];
405 unsigned int bytes = PAGE_SIZE;
406 l1 = ((l2 && mfn_valid(l2[i2])) ?
407 map_domain_page(mfn_x(l2[i2])) : zeroes);
408 if ( unlikely(((sc->pages - pages + 7) >> 3) < bytes) )
409 bytes = (unsigned int)((sc->pages - pages + 7) >> 3);
410 if ( likely(peek) )
411 {
412 if ( copy_to_guest_offset(sc->dirty_bitmap, pages >> 3,
413 (uint8_t *)l1, bytes) != 0 )
414 {
415 rv = -EFAULT;
416 goto out;
417 }
418 }
419 if ( clean && l1 != zeroes )
420 clear_page(l1);
421 pages += bytes << 3;
422 if ( l1 != zeroes )
423 unmap_domain_page(l1);
424 }
425 if ( l2 )
426 unmap_domain_page(l2);
427 }
428 if ( l3 )
429 unmap_domain_page(l3);
430 }
431 if ( l4 )
432 unmap_domain_page(l4);
434 if ( pages < sc->pages )
435 sc->pages = pages;
437 log_dirty_unlock(d);
439 if ( clean )
440 {
441 /* We need to further call clean_dirty_bitmap() functions of specific
442 * paging modes (shadow or hap). Safe because the domain is paused. */
443 d->arch.paging.log_dirty.clean_dirty_bitmap(d);
444 }
445 domain_unpause(d);
446 return rv;
448 out:
449 log_dirty_unlock(d);
450 domain_unpause(d);
451 return rv;
452 }
454 int paging_log_dirty_range(struct domain *d,
455 unsigned long begin_pfn,
456 unsigned long nr,
457 XEN_GUEST_HANDLE_64(uint8) dirty_bitmap)
458 {
459 int rv = 0;
460 unsigned long pages = 0;
461 mfn_t *l4, *l3, *l2;
462 unsigned long *l1;
463 int b1, b2, b3, b4;
464 int i2, i3, i4;
466 d->arch.paging.log_dirty.clean_dirty_bitmap(d);
467 log_dirty_lock(d);
469 PAGING_DEBUG(LOGDIRTY, "log-dirty-range: dom %u faults=%u dirty=%u\n",
470 d->domain_id,
471 d->arch.paging.log_dirty.fault_count,
472 d->arch.paging.log_dirty.dirty_count);
474 if ( !mfn_valid(d->arch.paging.log_dirty.top) )
475 {
476 rv = -EINVAL; /* perhaps should be ENOMEM? */
477 goto out;
478 }
480 if ( unlikely(d->arch.paging.log_dirty.failed_allocs) ) {
481 printk("%s: %d failed page allocs while logging dirty pages\n",
482 __FUNCTION__, d->arch.paging.log_dirty.failed_allocs);
483 rv = -ENOMEM;
484 goto out;
485 }
487 if ( !d->arch.paging.log_dirty.fault_count &&
488 !d->arch.paging.log_dirty.dirty_count ) {
489 int size = (nr + BITS_PER_LONG - 1) / BITS_PER_LONG;
490 unsigned long zeroes[size];
491 memset(zeroes, 0x00, size * BYTES_PER_LONG);
492 rv = 0;
493 if ( copy_to_guest_offset(dirty_bitmap, 0, (uint8_t *) zeroes,
494 size * BYTES_PER_LONG) != 0 )
495 rv = -EFAULT;
496 goto out;
497 }
498 d->arch.paging.log_dirty.fault_count = 0;
499 d->arch.paging.log_dirty.dirty_count = 0;
501 b1 = L1_LOGDIRTY_IDX(begin_pfn);
502 b2 = L2_LOGDIRTY_IDX(begin_pfn);
503 b3 = L3_LOGDIRTY_IDX(begin_pfn);
504 b4 = L4_LOGDIRTY_IDX(begin_pfn);
505 l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
507 for ( i4 = b4;
508 (pages < nr) && (i4 < LOGDIRTY_NODE_ENTRIES);
509 i4++ )
510 {
511 l3 = mfn_valid(l4[i4]) ? map_domain_page(mfn_x(l4[i4])) : NULL;
512 for ( i3 = b3;
513 (pages < nr) && (i3 < LOGDIRTY_NODE_ENTRIES);
514 i3++ )
515 {
516 l2 = ((l3 && mfn_valid(l3[i3])) ?
517 map_domain_page(mfn_x(l3[i3])) : NULL);
518 for ( i2 = b2;
519 (pages < nr) && (i2 < LOGDIRTY_NODE_ENTRIES);
520 i2++ )
521 {
522 static unsigned long zeroes[PAGE_SIZE/BYTES_PER_LONG];
523 unsigned int bytes = PAGE_SIZE;
524 uint8_t *s;
525 l1 = ((l2 && mfn_valid(l2[i2])) ?
526 map_domain_page(mfn_x(l2[i2])) : zeroes);
528 s = ((uint8_t*)l1) + (b1 >> 3);
529 bytes -= b1 >> 3;
531 if ( likely(((nr - pages + 7) >> 3) < bytes) )
532 bytes = (unsigned int)((nr - pages + 7) >> 3);
534 /* begin_pfn is not 32K aligned, hence we have to bit
535 * shift the bitmap */
536 if ( b1 & 0x7 )
537 {
538 int i, j;
539 uint32_t *l = (uint32_t*) s;
540 int bits = b1 & 0x7;
541 int bitmask = (1 << bits) - 1;
542 int size = (bytes + BYTES_PER_LONG - 1) / BYTES_PER_LONG;
543 unsigned long bitmap[size];
544 static unsigned long printed = 0;
546 if ( printed != begin_pfn )
547 {
548 dprintk(XENLOG_DEBUG, "%s: begin_pfn %lx is not 32K aligned!\n",
549 __FUNCTION__, begin_pfn);
550 printed = begin_pfn;
551 }
553 for ( i = 0; i < size - 1; i++, l++ ) {
554 bitmap[i] = ((*l) >> bits) |
555 (((*((uint8_t*)(l + 1))) & bitmask) << (sizeof(*l) * 8 - bits));
556 }
557 s = (uint8_t*) l;
558 size = BYTES_PER_LONG - ((b1 >> 3) & 0x3);
559 bitmap[i] = 0;
560 for ( j = 0; j < size; j++, s++ )
561 bitmap[i] |= (*s) << (j * 8);
562 bitmap[i] = (bitmap[i] >> bits) | (bitmask << (size * 8 - bits));
563 if ( copy_to_guest_offset(dirty_bitmap, (pages >> 3),
564 (uint8_t*) bitmap, bytes) != 0 )
565 {
566 rv = -EFAULT;
567 goto out;
568 }
569 }
570 else
571 {
572 if ( copy_to_guest_offset(dirty_bitmap, pages >> 3,
573 s, bytes) != 0 )
574 {
575 rv = -EFAULT;
576 goto out;
577 }
578 }
580 if ( l1 != zeroes )
581 clear_page(l1);
582 pages += bytes << 3;
583 if ( l1 != zeroes )
584 unmap_domain_page(l1);
585 b1 = b1 & 0x7;
586 }
587 b2 = 0;
588 if ( l2 )
589 unmap_domain_page(l2);
590 }
591 b3 = 0;
592 if ( l3 )
593 unmap_domain_page(l3);
594 }
595 unmap_domain_page(l4);
597 log_dirty_unlock(d);
599 return rv;
601 out:
602 log_dirty_unlock(d);
603 return rv;
604 }
606 /* Note that this function takes three function pointers. Callers must supply
607 * these functions for log dirty code to call. This function usually is
608 * invoked when paging is enabled. Check shadow_enable() and hap_enable() for
609 * reference.
610 *
611 * These function pointers must not be followed with the log-dirty lock held.
612 */
613 void paging_log_dirty_init(struct domain *d,
614 int (*enable_log_dirty)(struct domain *d),
615 int (*disable_log_dirty)(struct domain *d),
616 void (*clean_dirty_bitmap)(struct domain *d))
617 {
618 /* We initialize log dirty lock first */
619 log_dirty_lock_init(d);
621 d->arch.paging.log_dirty.enable_log_dirty = enable_log_dirty;
622 d->arch.paging.log_dirty.disable_log_dirty = disable_log_dirty;
623 d->arch.paging.log_dirty.clean_dirty_bitmap = clean_dirty_bitmap;
624 d->arch.paging.log_dirty.top = _mfn(INVALID_MFN);
625 }
627 /* This function fress log dirty bitmap resources. */
628 static void paging_log_dirty_teardown(struct domain*d)
629 {
630 log_dirty_lock(d);
631 paging_free_log_dirty_bitmap(d);
632 log_dirty_unlock(d);
633 }
634 /************************************************/
635 /* CODE FOR PAGING SUPPORT */
636 /************************************************/
637 /* Domain paging struct initialization. */
638 int paging_domain_init(struct domain *d, unsigned int domcr_flags)
639 {
640 int rc;
642 if ( (rc = p2m_init(d)) != 0 )
643 return rc;
645 /* The order of the *_init calls below is important, as the later
646 * ones may rewrite some common fields. Shadow pagetables are the
647 * default... */
648 shadow_domain_init(d, domcr_flags);
650 /* ... but we will use hardware assistance if it's available. */
651 if ( hap_enabled(d) )
652 hap_domain_init(d);
654 return 0;
655 }
657 /* vcpu paging struct initialization goes here */
658 void paging_vcpu_init(struct vcpu *v)
659 {
660 if ( hap_enabled(v->domain) )
661 hap_vcpu_init(v);
662 else
663 shadow_vcpu_init(v);
664 }
667 int paging_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
668 XEN_GUEST_HANDLE(void) u_domctl)
669 {
670 int rc;
672 if ( unlikely(d == current->domain) )
673 {
674 gdprintk(XENLOG_INFO, "Tried to do a paging op on itself.\n");
675 return -EINVAL;
676 }
678 if ( unlikely(d->is_dying) )
679 {
680 gdprintk(XENLOG_INFO, "Ignoring paging op on dying domain %u\n",
681 d->domain_id);
682 return 0;
683 }
685 if ( unlikely(d->vcpu == NULL) || unlikely(d->vcpu[0] == NULL) )
686 {
687 PAGING_ERROR("Paging op on a domain (%u) with no vcpus\n",
688 d->domain_id);
689 return -EINVAL;
690 }
692 rc = xsm_shadow_control(d, sc->op);
693 if ( rc )
694 return rc;
696 /* Code to handle log-dirty. Note that some log dirty operations
697 * piggy-back on shadow operations. For example, when
698 * XEN_DOMCTL_SHADOW_OP_OFF is called, it first checks whether log dirty
699 * mode is enabled. If does, we disables log dirty and continues with
700 * shadow code. For this reason, we need to further dispatch domctl
701 * to next-level paging code (shadow or hap).
702 */
703 switch ( sc->op )
704 {
705 case XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY:
706 if ( hap_enabled(d) )
707 hap_logdirty_init(d);
708 return paging_log_dirty_enable(d);
710 case XEN_DOMCTL_SHADOW_OP_ENABLE:
711 if ( sc->mode & XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY )
712 {
713 if ( hap_enabled(d) )
714 hap_logdirty_init(d);
715 return paging_log_dirty_enable(d);
716 }
717 break;
719 case XEN_DOMCTL_SHADOW_OP_OFF:
720 if ( paging_mode_log_dirty(d) )
721 if ( (rc = paging_log_dirty_disable(d)) != 0 )
722 return rc;
723 break;
725 case XEN_DOMCTL_SHADOW_OP_CLEAN:
726 case XEN_DOMCTL_SHADOW_OP_PEEK:
727 return paging_log_dirty_op(d, sc);
728 }
730 /* Here, dispatch domctl to the appropriate paging code */
731 if ( hap_enabled(d) )
732 return hap_domctl(d, sc, u_domctl);
733 else
734 return shadow_domctl(d, sc, u_domctl);
735 }
737 /* Call when destroying a domain */
738 void paging_teardown(struct domain *d)
739 {
740 if ( hap_enabled(d) )
741 hap_teardown(d);
742 else
743 shadow_teardown(d);
745 /* clean up log dirty resources. */
746 paging_log_dirty_teardown(d);
748 /* Move populate-on-demand cache back to domain_list for destruction */
749 p2m_pod_empty_cache(d);
750 }
752 /* Call once all of the references to the domain have gone away */
753 void paging_final_teardown(struct domain *d)
754 {
755 if ( hap_enabled(d) )
756 hap_final_teardown(d);
757 else
758 shadow_final_teardown(d);
760 p2m_final_teardown(d);
761 }
763 /* Enable an arbitrary paging-assistance mode. Call once at domain
764 * creation. */
765 int paging_enable(struct domain *d, u32 mode)
766 {
767 if ( hap_enabled(d) )
768 return hap_enable(d, mode | PG_HAP_enable);
769 else
770 return shadow_enable(d, mode | PG_SH_enable);
771 }
773 /* Print paging-assistance info to the console */
774 void paging_dump_domain_info(struct domain *d)
775 {
776 if ( paging_mode_enabled(d) )
777 {
778 printk(" paging assistance: ");
779 if ( paging_mode_shadow(d) )
780 printk("shadow ");
781 if ( paging_mode_hap(d) )
782 printk("hap ");
783 if ( paging_mode_refcounts(d) )
784 printk("refcounts ");
785 if ( paging_mode_log_dirty(d) )
786 printk("log_dirty ");
787 if ( paging_mode_translate(d) )
788 printk("translate ");
789 if ( paging_mode_external(d) )
790 printk("external ");
791 printk("\n");
792 }
793 }
795 void paging_dump_vcpu_info(struct vcpu *v)
796 {
797 if ( paging_mode_enabled(v->domain) )
798 {
799 printk(" paging assistance: ");
800 if ( paging_mode_shadow(v->domain) )
801 {
802 if ( v->arch.paging.mode )
803 printk("shadowed %u-on-%u\n",
804 v->arch.paging.mode->guest_levels,
805 v->arch.paging.mode->shadow.shadow_levels);
806 else
807 printk("not shadowed\n");
808 }
809 else if ( paging_mode_hap(v->domain) && v->arch.paging.mode )
810 printk("hap, %u levels\n",
811 v->arch.paging.mode->guest_levels);
812 else
813 printk("none\n");
814 }
815 }
818 /*
819 * Local variables:
820 * mode: C
821 * c-set-style: "BSD"
822 * c-basic-offset: 4
823 * indent-tabs-mode: nil
824 * End:
825 */