xen-vtx-unstable

annotate xen/common/memory.c @ 6776:e7c7196fa329

merge?
author cl349@firebug.cl.cam.ac.uk
date Tue Sep 13 15:46:49 2005 +0000 (2005-09-13)
parents 4d899a738d59 cdfa7dd00c44
children 72e4e2aab342
rev   line source
kaf24@6468 1 /******************************************************************************
kaf24@6468 2 * memory.c
kaf24@6468 3 *
kaf24@6468 4 * Code to handle memory-related requests.
kaf24@6468 5 *
kaf24@6468 6 * Copyright (c) 2003-2004, B Dragovic
kaf24@6468 7 * Copyright (c) 2003-2005, K A Fraser
kaf24@6468 8 */
kaf24@6468 9
kaf24@6468 10 #include <xen/config.h>
kaf24@6468 11 #include <xen/types.h>
kaf24@6468 12 #include <xen/lib.h>
kaf24@6468 13 #include <xen/mm.h>
kaf24@6468 14 #include <xen/perfc.h>
kaf24@6468 15 #include <xen/sched.h>
kaf24@6468 16 #include <xen/event.h>
kaf24@6468 17 #include <xen/shadow.h>
kaf24@6468 18 #include <asm/current.h>
kaf24@6468 19 #include <asm/hardirq.h>
kaf24@6468 20 #include <public/memory.h>
kaf24@6468 21
kaf24@6468 22 static long
kaf24@6468 23 increase_reservation(
kaf24@6468 24 struct domain *d,
kaf24@6468 25 unsigned long *extent_list,
kaf24@6468 26 unsigned int nr_extents,
kaf24@6468 27 unsigned int extent_order,
kaf24@6589 28 unsigned int flags,
kaf24@6589 29 int *preempted)
kaf24@6468 30 {
kaf24@6468 31 struct pfn_info *page;
kaf24@6732 32 unsigned int i;
kaf24@6468 33
kaf24@6683 34 if ( (extent_list != NULL) &&
kaf24@6683 35 !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
kaf24@6468 36 return 0;
kaf24@6468 37
kaf24@6468 38 if ( (extent_order != 0) && !IS_CAPABLE_PHYSDEV(current->domain) )
kaf24@6468 39 {
kaf24@6732 40 DPRINTK("Only I/O-capable domains may allocate multi-page extents.\n");
kaf24@6468 41 return 0;
kaf24@6468 42 }
kaf24@6468 43
kaf24@6468 44 for ( i = 0; i < nr_extents; i++ )
kaf24@6468 45 {
kaf24@6468 46 if ( hypercall_preempt_check() )
kaf24@6589 47 {
kaf24@6589 48 *preempted = 1;
kaf24@6468 49 return i;
kaf24@6589 50 }
kaf24@6468 51
kaf24@6468 52 if ( unlikely((page = alloc_domheap_pages(
kaf24@6468 53 d, extent_order, flags)) == NULL) )
kaf24@6468 54 {
kaf24@6732 55 DPRINTK("Could not allocate order=%d extent: "
kaf24@6732 56 "id=%d flags=%x (%d of %d)\n",
kaf24@6732 57 extent_order, d->domain_id, flags, i, nr_extents);
kaf24@6468 58 return i;
kaf24@6468 59 }
kaf24@6468 60
kaf24@6468 61 /* Inform the domain of the new page's machine address. */
kaf24@6683 62 if ( (extent_list != NULL) &&
kaf24@6683 63 (__put_user(page_to_pfn(page), &extent_list[i]) != 0) )
kaf24@6468 64 return i;
kaf24@6468 65 }
kaf24@6468 66
kaf24@6468 67 return nr_extents;
kaf24@6468 68 }
kaf24@6468 69
kaf24@6468 70 static long
kaf24@6468 71 decrease_reservation(
kaf24@6468 72 struct domain *d,
kaf24@6468 73 unsigned long *extent_list,
kaf24@6468 74 unsigned int nr_extents,
kaf24@6468 75 unsigned int extent_order,
kaf24@6589 76 unsigned int flags,
kaf24@6589 77 int *preempted)
kaf24@6468 78 {
kaf24@6468 79 struct pfn_info *page;
kaf24@6468 80 unsigned long i, j, mpfn;
kaf24@6468 81
kaf24@6468 82 if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
kaf24@6468 83 return 0;
kaf24@6468 84
kaf24@6468 85 for ( i = 0; i < nr_extents; i++ )
kaf24@6468 86 {
kaf24@6468 87 if ( hypercall_preempt_check() )
kaf24@6589 88 {
kaf24@6589 89 *preempted = 1;
kaf24@6468 90 return i;
kaf24@6589 91 }
kaf24@6468 92
kaf24@6468 93 if ( unlikely(__get_user(mpfn, &extent_list[i]) != 0) )
kaf24@6468 94 return i;
kaf24@6468 95
kaf24@6468 96 for ( j = 0; j < (1 << extent_order); j++ )
kaf24@6468 97 {
kaf24@6468 98 if ( unlikely((mpfn + j) >= max_page) )
kaf24@6468 99 {
kaf24@6468 100 DPRINTK("Domain %u page number out of range (%lx >= %lx)\n",
kaf24@6468 101 d->domain_id, mpfn + j, max_page);
kaf24@6468 102 return i;
kaf24@6468 103 }
kaf24@6468 104
kaf24@6468 105 page = &frame_table[mpfn + j];
kaf24@6468 106 if ( unlikely(!get_page(page, d)) )
kaf24@6468 107 {
kaf24@6468 108 DPRINTK("Bad page free for domain %u\n", d->domain_id);
kaf24@6468 109 return i;
kaf24@6468 110 }
kaf24@6468 111
kaf24@6468 112 if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
kaf24@6468 113 put_page_and_type(page);
kaf24@6468 114
kaf24@6468 115 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
kaf24@6468 116 put_page(page);
kaf24@6468 117
kaf24@6468 118 shadow_sync_and_drop_references(d, page);
kaf24@6468 119
kaf24@6468 120 put_page(page);
kaf24@6468 121 }
kaf24@6468 122 }
kaf24@6468 123
kaf24@6468 124 return nr_extents;
kaf24@6468 125 }
kaf24@6468 126
kaf24@6468 127 /*
kaf24@6468 128 * To allow safe resume of do_memory_op() after preemption, we need to know
kaf24@6468 129 * at what point in the page list to resume. For this purpose I steal the
kaf24@6468 130 * high-order bits of the @cmd parameter, which are otherwise unused and zero.
kaf24@6468 131 */
kaf24@6468 132 #define START_EXTENT_SHIFT 4 /* cmd[:4] == start_extent */
kaf24@6468 133
kaf24@6468 134 long do_memory_op(int cmd, void *arg)
kaf24@6468 135 {
kaf24@6468 136 struct domain *d;
kaf24@6589 137 int rc, start_extent, op, flags = 0, preempted = 0;
kaf24@6468 138 struct xen_memory_reservation reservation;
kaf24@6468 139
kaf24@6468 140 op = cmd & ((1 << START_EXTENT_SHIFT) - 1);
kaf24@6468 141
kaf24@6468 142 switch ( op )
kaf24@6468 143 {
kaf24@6468 144 case XENMEM_increase_reservation:
kaf24@6468 145 case XENMEM_decrease_reservation:
kaf24@6468 146 if ( copy_from_user(&reservation, arg, sizeof(reservation)) )
kaf24@6468 147 return -EFAULT;
kaf24@6468 148
kaf24@6468 149 start_extent = cmd >> START_EXTENT_SHIFT;
kaf24@6468 150 if ( unlikely(start_extent > reservation.nr_extents) )
kaf24@6468 151 return -EINVAL;
kaf24@6468 152
kaf24@6468 153 if ( reservation.extent_start != NULL )
kaf24@6468 154 reservation.extent_start += start_extent;
kaf24@6468 155 reservation.nr_extents -= start_extent;
kaf24@6468 156
kaf24@6683 157 if ( (reservation.address_bits != 0) &&
kaf24@6684 158 (reservation.address_bits <
kaf24@6684 159 (get_order_from_pages(max_page) + PAGE_SHIFT)) )
kaf24@6468 160 {
kaf24@6468 161 if ( reservation.address_bits < 31 )
kaf24@6468 162 return -ENOMEM;
kaf24@6468 163 flags = ALLOC_DOM_DMA;
kaf24@6468 164 }
kaf24@6468 165
kaf24@6468 166 if ( likely(reservation.domid == DOMID_SELF) )
kaf24@6468 167 d = current->domain;
kaf24@6468 168 else if ( !IS_PRIV(current->domain) )
kaf24@6468 169 return -EPERM;
kaf24@6468 170 else if ( (d = find_domain_by_id(reservation.domid)) == NULL )
kaf24@6468 171 return -ESRCH;
kaf24@6468 172
kaf24@6468 173 rc = ((op == XENMEM_increase_reservation) ?
kaf24@6468 174 increase_reservation : decrease_reservation)(
kaf24@6468 175 d,
kaf24@6468 176 reservation.extent_start,
kaf24@6468 177 reservation.nr_extents,
kaf24@6468 178 reservation.extent_order,
kaf24@6589 179 flags,
kaf24@6589 180 &preempted);
kaf24@6468 181
kaf24@6468 182 if ( unlikely(reservation.domid != DOMID_SELF) )
kaf24@6468 183 put_domain(d);
kaf24@6468 184
kaf24@6468 185 rc += start_extent;
kaf24@6468 186
kaf24@6589 187 if ( preempted )
kaf24@6468 188 return hypercall2_create_continuation(
kaf24@6589 189 __HYPERVISOR_memory_op, op | (rc << START_EXTENT_SHIFT), arg);
kaf24@6589 190
kaf24@6468 191 break;
kaf24@6468 192
kaf24@6468 193 case XENMEM_maximum_ram_page:
kaf24@6468 194 if ( put_user(max_page, (unsigned long *)arg) )
kaf24@6468 195 return -EFAULT;
kaf24@6631 196 rc = 0;
kaf24@6468 197 break;
kaf24@6468 198
kaf24@6468 199 default:
kaf24@6468 200 rc = -ENOSYS;
kaf24@6468 201 break;
kaf24@6468 202 }
kaf24@6468 203
kaf24@6468 204 return rc;
kaf24@6468 205 }
kaf24@6468 206
kaf24@6468 207 /*
kaf24@6468 208 * Local variables:
kaf24@6468 209 * mode: C
kaf24@6468 210 * c-set-style: "BSD"
kaf24@6468 211 * c-basic-offset: 4
kaf24@6468 212 * tab-width: 4
kaf24@6468 213 * indent-tabs-mode: nil
kaf24@6468 214 * End:
kaf24@6468 215 */