debuggers.hg

view xen/common/memory.c @ 22848:6341fe0f4e5a

Added tag 4.1.0-rc2 for changeset 9dca60d88c63
author Keir Fraser <keir@xen.org>
date Tue Jan 25 14:06:55 2011 +0000 (2011-01-25)
parents c5b42971234a
children
line source
1 /******************************************************************************
2 * memory.c
3 *
4 * Code to handle memory-related requests.
5 *
6 * Copyright (c) 2003-2004, B Dragovic
7 * Copyright (c) 2003-2005, K A Fraser
8 */
10 #include <xen/config.h>
11 #include <xen/types.h>
12 #include <xen/lib.h>
13 #include <xen/mm.h>
14 #include <xen/perfc.h>
15 #include <xen/sched.h>
16 #include <xen/event.h>
17 #include <xen/paging.h>
18 #include <xen/iocap.h>
19 #include <xen/guest_access.h>
20 #include <xen/hypercall.h>
21 #include <xen/errno.h>
22 #include <xen/tmem.h>
23 #include <xen/tmem_xen.h>
24 #include <asm/current.h>
25 #include <asm/hardirq.h>
26 #ifdef CONFIG_X86
27 # include <asm/p2m.h>
28 #endif
29 #include <xen/numa.h>
30 #include <public/memory.h>
31 #include <xsm/xsm.h>
32 #include <xen/trace.h>
34 struct memop_args {
35 /* INPUT */
36 struct domain *domain; /* Domain to be affected. */
37 XEN_GUEST_HANDLE(xen_pfn_t) extent_list; /* List of extent base addrs. */
38 unsigned int nr_extents; /* Number of extents to allocate or free. */
39 unsigned int extent_order; /* Size of each extent. */
40 unsigned int memflags; /* Allocation flags. */
42 /* INPUT/OUTPUT */
43 unsigned int nr_done; /* Number of extents processed so far. */
44 int preempted; /* Was the hypercall preempted? */
45 };
47 static void increase_reservation(struct memop_args *a)
48 {
49 struct page_info *page;
50 unsigned long i;
51 xen_pfn_t mfn;
52 struct domain *d = a->domain;
54 if ( !guest_handle_is_null(a->extent_list) &&
55 !guest_handle_subrange_okay(a->extent_list, a->nr_done,
56 a->nr_extents-1) )
57 return;
59 if ( !multipage_allocation_permitted(current->domain, a->extent_order) )
60 return;
62 for ( i = a->nr_done; i < a->nr_extents; i++ )
63 {
64 if ( hypercall_preempt_check() )
65 {
66 a->preempted = 1;
67 goto out;
68 }
70 page = alloc_domheap_pages(d, a->extent_order, a->memflags);
71 if ( unlikely(page == NULL) )
72 {
73 gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
74 "id=%d memflags=%x (%ld of %d)\n",
75 a->extent_order, d->domain_id, a->memflags,
76 i, a->nr_extents);
77 goto out;
78 }
80 /* Inform the domain of the new page's machine address. */
81 if ( !guest_handle_is_null(a->extent_list) )
82 {
83 mfn = page_to_mfn(page);
84 if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
85 goto out;
86 }
87 }
89 out:
90 a->nr_done = i;
91 }
93 static void populate_physmap(struct memop_args *a)
94 {
95 struct page_info *page;
96 unsigned long i, j;
97 xen_pfn_t gpfn, mfn;
98 struct domain *d = a->domain;
100 if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
101 a->nr_extents-1) )
102 return;
104 if ( !multipage_allocation_permitted(current->domain, a->extent_order) )
105 return;
107 for ( i = a->nr_done; i < a->nr_extents; i++ )
108 {
109 if ( hypercall_preempt_check() )
110 {
111 a->preempted = 1;
112 goto out;
113 }
115 if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
116 goto out;
118 if ( a->memflags & MEMF_populate_on_demand )
119 {
120 if ( guest_physmap_mark_populate_on_demand(d, gpfn,
121 a->extent_order) < 0 )
122 goto out;
123 }
124 else
125 {
126 page = alloc_domheap_pages(d, a->extent_order, a->memflags);
127 if ( unlikely(page == NULL) )
128 {
129 if ( !opt_tmem || (a->extent_order != 0) )
130 gdprintk(XENLOG_INFO, "Could not allocate order=%d extent:"
131 " id=%d memflags=%x (%ld of %d)\n",
132 a->extent_order, d->domain_id, a->memflags,
133 i, a->nr_extents);
134 goto out;
135 }
137 mfn = page_to_mfn(page);
138 guest_physmap_add_page(d, gpfn, mfn, a->extent_order);
140 if ( !paging_mode_translate(d) )
141 {
142 for ( j = 0; j < (1 << a->extent_order); j++ )
143 set_gpfn_from_mfn(mfn + j, gpfn + j);
145 /* Inform the domain of the new page's machine address. */
146 if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
147 goto out;
148 }
149 }
150 }
152 out:
153 a->nr_done = i;
154 }
156 int guest_remove_page(struct domain *d, unsigned long gmfn)
157 {
158 struct page_info *page;
159 #ifdef CONFIG_X86
160 p2m_type_t p2mt;
161 #endif
162 unsigned long mfn;
164 #ifdef CONFIG_X86
165 mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(d), gmfn, &p2mt));
166 if ( unlikely(p2m_is_paging(p2mt)) )
167 {
168 guest_physmap_remove_page(d, gmfn, mfn, 0);
169 p2m_mem_paging_drop_page(p2m_get_hostp2m(d), gmfn);
170 return 1;
171 }
172 #else
173 mfn = gmfn_to_mfn(d, gmfn);
174 #endif
175 if ( unlikely(!mfn_valid(mfn)) )
176 {
177 gdprintk(XENLOG_INFO, "Domain %u page number %lx invalid\n",
178 d->domain_id, gmfn);
179 return 0;
180 }
182 page = mfn_to_page(mfn);
183 #ifdef CONFIG_X86
184 /* If gmfn is shared, just drop the guest reference (which may or may not
185 * free the page) */
186 if(p2m_is_shared(p2mt))
187 {
188 put_page_and_type(page);
189 guest_physmap_remove_page(d, gmfn, mfn, 0);
190 return 1;
191 }
193 #endif /* CONFIG_X86 */
194 if ( unlikely(!get_page(page, d)) )
195 {
196 gdprintk(XENLOG_INFO, "Bad page free for domain %u\n", d->domain_id);
197 return 0;
198 }
200 if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
201 put_page_and_type(page);
203 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
204 put_page(page);
206 guest_physmap_remove_page(d, gmfn, mfn, 0);
208 put_page(page);
210 return 1;
211 }
213 static void decrease_reservation(struct memop_args *a)
214 {
215 unsigned long i, j;
216 xen_pfn_t gmfn;
218 if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
219 a->nr_extents-1) )
220 return;
222 for ( i = a->nr_done; i < a->nr_extents; i++ )
223 {
224 if ( hypercall_preempt_check() )
225 {
226 a->preempted = 1;
227 goto out;
228 }
230 if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) )
231 goto out;
233 if ( tb_init_done )
234 {
235 struct {
236 u64 gfn;
237 int d:16,order:16;
238 } t;
240 t.gfn = gmfn;
241 t.d = a->domain->domain_id;
242 t.order = a->extent_order;
244 __trace_var(TRC_MEM_DECREASE_RESERVATION, 0, sizeof(t), &t);
245 }
247 /* See if populate-on-demand wants to handle this */
248 if ( is_hvm_domain(a->domain)
249 && p2m_pod_decrease_reservation(a->domain, gmfn, a->extent_order) )
250 continue;
252 for ( j = 0; j < (1 << a->extent_order); j++ )
253 if ( !guest_remove_page(a->domain, gmfn + j) )
254 goto out;
255 }
257 out:
258 a->nr_done = i;
259 }
261 static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
262 {
263 struct xen_memory_exchange exch;
264 PAGE_LIST_HEAD(in_chunk_list);
265 PAGE_LIST_HEAD(out_chunk_list);
266 unsigned long in_chunk_order, out_chunk_order;
267 xen_pfn_t gpfn, gmfn, mfn;
268 unsigned long i, j, k;
269 unsigned int memflags = 0;
270 long rc = 0;
271 struct domain *d;
272 struct page_info *page;
274 if ( copy_from_guest(&exch, arg, 1) )
275 return -EFAULT;
277 /* Various sanity checks. */
278 if ( (exch.nr_exchanged > exch.in.nr_extents) ||
279 /* Input and output domain identifiers match? */
280 (exch.in.domid != exch.out.domid) ||
281 /* Sizes of input and output lists do not overflow a long? */
282 ((~0UL >> exch.in.extent_order) < exch.in.nr_extents) ||
283 ((~0UL >> exch.out.extent_order) < exch.out.nr_extents) ||
284 /* Sizes of input and output lists match? */
285 ((exch.in.nr_extents << exch.in.extent_order) !=
286 (exch.out.nr_extents << exch.out.extent_order)) )
287 {
288 rc = -EINVAL;
289 goto fail_early;
290 }
292 /* Only privileged guests can allocate multi-page contiguous extents. */
293 if ( !multipage_allocation_permitted(current->domain,
294 exch.in.extent_order) ||
295 !multipage_allocation_permitted(current->domain,
296 exch.out.extent_order) )
297 {
298 rc = -EPERM;
299 goto fail_early;
300 }
302 if ( exch.in.extent_order <= exch.out.extent_order )
303 {
304 in_chunk_order = exch.out.extent_order - exch.in.extent_order;
305 out_chunk_order = 0;
306 }
307 else
308 {
309 in_chunk_order = 0;
310 out_chunk_order = exch.in.extent_order - exch.out.extent_order;
311 }
313 if ( likely(exch.in.domid == DOMID_SELF) )
314 {
315 d = rcu_lock_current_domain();
316 }
317 else
318 {
319 if ( (d = rcu_lock_domain_by_id(exch.in.domid)) == NULL )
320 goto fail_early;
322 if ( !IS_PRIV_FOR(current->domain, d) )
323 {
324 rcu_unlock_domain(d);
325 rc = -EPERM;
326 goto fail_early;
327 }
328 }
330 memflags |= MEMF_bits(domain_clamp_alloc_bitsize(
331 d,
332 XENMEMF_get_address_bits(exch.out.mem_flags) ? :
333 (BITS_PER_LONG+PAGE_SHIFT)));
334 memflags |= MEMF_node(XENMEMF_get_node(exch.out.mem_flags));
336 for ( i = (exch.nr_exchanged >> in_chunk_order);
337 i < (exch.in.nr_extents >> in_chunk_order);
338 i++ )
339 {
340 if ( hypercall_preempt_check() )
341 {
342 exch.nr_exchanged = i << in_chunk_order;
343 rcu_unlock_domain(d);
344 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
345 return -EFAULT;
346 return hypercall_create_continuation(
347 __HYPERVISOR_memory_op, "lh", XENMEM_exchange, arg);
348 }
350 /* Steal a chunk's worth of input pages from the domain. */
351 for ( j = 0; j < (1UL << in_chunk_order); j++ )
352 {
353 if ( unlikely(__copy_from_guest_offset(
354 &gmfn, exch.in.extent_start, (i<<in_chunk_order)+j, 1)) )
355 {
356 rc = -EFAULT;
357 goto fail;
358 }
360 for ( k = 0; k < (1UL << exch.in.extent_order); k++ )
361 {
362 #ifdef CONFIG_X86
363 p2m_type_t p2mt;
365 /* Shared pages cannot be exchanged */
366 mfn = mfn_x(gfn_to_mfn_unshare(p2m_get_hostp2m(d), gmfn + k, &p2mt, 0));
367 if ( p2m_is_shared(p2mt) )
368 {
369 rc = -ENOMEM;
370 goto fail;
371 }
372 #else /* !CONFIG_X86 */
373 mfn = gmfn_to_mfn(d, gmfn + k);
374 #endif
375 if ( unlikely(!mfn_valid(mfn)) )
376 {
377 rc = -EINVAL;
378 goto fail;
379 }
381 page = mfn_to_page(mfn);
383 if ( unlikely(steal_page(d, page, MEMF_no_refcount)) )
384 {
385 rc = -EINVAL;
386 goto fail;
387 }
389 page_list_add(page, &in_chunk_list);
390 }
391 }
393 /* Allocate a chunk's worth of anonymous output pages. */
394 for ( j = 0; j < (1UL << out_chunk_order); j++ )
395 {
396 page = alloc_domheap_pages(NULL, exch.out.extent_order, memflags);
397 if ( unlikely(page == NULL) )
398 {
399 rc = -ENOMEM;
400 goto fail;
401 }
403 page_list_add(page, &out_chunk_list);
404 }
406 /*
407 * Success! Beyond this point we cannot fail for this chunk.
408 */
410 /* Destroy final reference to each input page. */
411 while ( (page = page_list_remove_head(&in_chunk_list)) )
412 {
413 unsigned long gfn;
415 if ( !test_and_clear_bit(_PGC_allocated, &page->count_info) )
416 BUG();
417 mfn = page_to_mfn(page);
418 gfn = mfn_to_gmfn(d, mfn);
419 /* Pages were unshared above */
420 BUG_ON(SHARED_M2P(gfn));
421 guest_physmap_remove_page(d, gfn, mfn, 0);
422 put_page(page);
423 }
425 /* Assign each output page to the domain. */
426 j = 0;
427 while ( (page = page_list_remove_head(&out_chunk_list)) )
428 {
429 if ( assign_pages(d, page, exch.out.extent_order,
430 MEMF_no_refcount) )
431 {
432 unsigned long dec_count;
433 bool_t drop_dom_ref;
435 /*
436 * Pages in in_chunk_list is stolen without
437 * decreasing the tot_pages. If the domain is dying when
438 * assign pages, we need decrease the count. For those pages
439 * that has been assigned, it should be covered by
440 * domain_relinquish_resources().
441 */
442 dec_count = (((1UL << exch.in.extent_order) *
443 (1UL << in_chunk_order)) -
444 (j * (1UL << exch.out.extent_order)));
446 spin_lock(&d->page_alloc_lock);
447 d->tot_pages -= dec_count;
448 drop_dom_ref = (dec_count && !d->tot_pages);
449 spin_unlock(&d->page_alloc_lock);
451 if ( drop_dom_ref )
452 put_domain(d);
454 free_domheap_pages(page, exch.out.extent_order);
455 goto dying;
456 }
458 /* Note that we ignore errors accessing the output extent list. */
459 (void)__copy_from_guest_offset(
460 &gpfn, exch.out.extent_start, (i<<out_chunk_order)+j, 1);
462 mfn = page_to_mfn(page);
463 guest_physmap_add_page(d, gpfn, mfn, exch.out.extent_order);
465 if ( !paging_mode_translate(d) )
466 {
467 for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
468 set_gpfn_from_mfn(mfn + k, gpfn + k);
469 (void)__copy_to_guest_offset(
470 exch.out.extent_start, (i<<out_chunk_order)+j, &mfn, 1);
471 }
472 j++;
473 }
474 BUG_ON( !(d->is_dying) && (j != (1UL << out_chunk_order)) );
475 }
477 exch.nr_exchanged = exch.in.nr_extents;
478 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
479 rc = -EFAULT;
480 rcu_unlock_domain(d);
481 return rc;
483 /*
484 * Failed a chunk! Free any partial chunk work. Tell caller how many
485 * chunks succeeded.
486 */
487 fail:
488 /* Reassign any input pages we managed to steal. */
489 while ( (page = page_list_remove_head(&in_chunk_list)) )
490 if ( assign_pages(d, page, 0, MEMF_no_refcount) )
491 BUG();
492 dying:
493 rcu_unlock_domain(d);
494 /* Free any output pages we managed to allocate. */
495 while ( (page = page_list_remove_head(&out_chunk_list)) )
496 free_domheap_pages(page, exch.out.extent_order);
498 exch.nr_exchanged = i << in_chunk_order;
500 fail_early:
501 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
502 rc = -EFAULT;
503 return rc;
504 }
506 long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE(void) arg)
507 {
508 struct domain *d;
509 int rc, op;
510 unsigned int address_bits;
511 unsigned long start_extent;
512 struct xen_memory_reservation reservation;
513 struct memop_args args;
514 domid_t domid;
516 op = cmd & MEMOP_CMD_MASK;
518 switch ( op )
519 {
520 case XENMEM_increase_reservation:
521 case XENMEM_decrease_reservation:
522 case XENMEM_populate_physmap:
523 start_extent = cmd >> MEMOP_EXTENT_SHIFT;
525 if ( copy_from_guest(&reservation, arg, 1) )
526 return start_extent;
528 /* Is size too large for us to encode a continuation? */
529 if ( reservation.nr_extents > (ULONG_MAX >> MEMOP_EXTENT_SHIFT) )
530 return start_extent;
532 if ( unlikely(start_extent >= reservation.nr_extents) )
533 return start_extent;
535 args.extent_list = reservation.extent_start;
536 args.nr_extents = reservation.nr_extents;
537 args.extent_order = reservation.extent_order;
538 args.nr_done = start_extent;
539 args.preempted = 0;
540 args.memflags = 0;
542 address_bits = XENMEMF_get_address_bits(reservation.mem_flags);
543 if ( (address_bits != 0) &&
544 (address_bits < (get_order_from_pages(max_page) + PAGE_SHIFT)) )
545 {
546 if ( address_bits <= PAGE_SHIFT )
547 return start_extent;
548 args.memflags = MEMF_bits(address_bits);
549 }
551 args.memflags |= MEMF_node(XENMEMF_get_node(reservation.mem_flags));
552 if ( reservation.mem_flags & XENMEMF_exact_node_request )
553 args.memflags |= MEMF_exact_node;
555 if ( op == XENMEM_populate_physmap
556 && (reservation.mem_flags & XENMEMF_populate_on_demand) )
557 args.memflags |= MEMF_populate_on_demand;
559 if ( likely(reservation.domid == DOMID_SELF) )
560 {
561 d = rcu_lock_current_domain();
562 }
563 else
564 {
565 if ( (d = rcu_lock_domain_by_id(reservation.domid)) == NULL )
566 return start_extent;
567 if ( !IS_PRIV_FOR(current->domain, d) )
568 {
569 rcu_unlock_domain(d);
570 return start_extent;
571 }
572 }
573 args.domain = d;
575 rc = xsm_memory_adjust_reservation(current->domain, d);
576 if ( rc )
577 {
578 rcu_unlock_domain(d);
579 return rc;
580 }
582 switch ( op )
583 {
584 case XENMEM_increase_reservation:
585 increase_reservation(&args);
586 break;
587 case XENMEM_decrease_reservation:
588 decrease_reservation(&args);
589 break;
590 default: /* XENMEM_populate_physmap */
591 populate_physmap(&args);
592 break;
593 }
595 rcu_unlock_domain(d);
597 rc = args.nr_done;
599 if ( args.preempted )
600 return hypercall_create_continuation(
601 __HYPERVISOR_memory_op, "lh",
602 op | (rc << MEMOP_EXTENT_SHIFT), arg);
604 break;
606 case XENMEM_exchange:
607 rc = memory_exchange(guest_handle_cast(arg, xen_memory_exchange_t));
608 break;
610 case XENMEM_maximum_ram_page:
611 rc = max_page;
612 break;
614 case XENMEM_current_reservation:
615 case XENMEM_maximum_reservation:
616 case XENMEM_maximum_gpfn:
617 if ( copy_from_guest(&domid, arg, 1) )
618 return -EFAULT;
620 rc = rcu_lock_target_domain_by_id(domid, &d);
621 if ( rc )
622 return rc;
624 rc = xsm_memory_stat_reservation(current->domain, d);
625 if ( rc )
626 {
627 rcu_unlock_domain(d);
628 return rc;
629 }
631 switch ( op )
632 {
633 case XENMEM_current_reservation:
634 rc = d->tot_pages;
635 break;
636 case XENMEM_maximum_reservation:
637 rc = d->max_pages;
638 break;
639 default:
640 ASSERT(op == XENMEM_maximum_gpfn);
641 rc = domain_get_maximum_gpfn(d);
642 break;
643 }
645 rcu_unlock_domain(d);
647 break;
649 default:
650 rc = arch_memory_op(op, arg);
651 break;
652 }
654 return rc;
655 }
657 /*
658 * Local variables:
659 * mode: C
660 * c-set-style: "BSD"
661 * c-basic-offset: 4
662 * tab-width: 4
663 * indent-tabs-mode: nil
664 * End:
665 */