debuggers.hg

view xen/common/memory.c @ 6644:29808fef9148

merge?
author cl349@firebug.cl.cam.ac.uk
date Sat Sep 03 18:24:46 2005 +0000 (2005-09-03)
parents ec11c5cca195 f27205ea60ef
children b6c98fe62e1a
line source
1 /******************************************************************************
2 * memory.c
3 *
4 * Code to handle memory-related requests.
5 *
6 * Copyright (c) 2003-2004, B Dragovic
7 * Copyright (c) 2003-2005, K A Fraser
8 */
10 #include <xen/config.h>
11 #include <xen/types.h>
12 #include <xen/lib.h>
13 #include <xen/mm.h>
14 #include <xen/perfc.h>
15 #include <xen/sched.h>
16 #include <xen/event.h>
17 #include <xen/shadow.h>
18 #include <asm/current.h>
19 #include <asm/hardirq.h>
20 #include <public/memory.h>
22 static long
23 increase_reservation(
24 struct domain *d,
25 unsigned long *extent_list,
26 unsigned int nr_extents,
27 unsigned int extent_order,
28 unsigned int flags,
29 int *preempted)
30 {
31 struct pfn_info *page;
32 unsigned long i;
34 if ( (extent_list != NULL)
35 && !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
36 return 0;
38 if ( (extent_order != 0) && !IS_CAPABLE_PHYSDEV(current->domain) )
39 {
40 DPRINTK("Only I/O-capable domains may allocate > order-0 memory.\n");
41 return 0;
42 }
44 for ( i = 0; i < nr_extents; i++ )
45 {
46 if ( hypercall_preempt_check() )
47 {
48 *preempted = 1;
49 return i;
50 }
52 if ( unlikely((page = alloc_domheap_pages(
53 d, extent_order, flags)) == NULL) )
54 {
55 DPRINTK("Could not allocate a frame\n");
56 return i;
57 }
59 /* Inform the domain of the new page's machine address. */
60 if ( (extent_list != NULL)
61 && (__put_user(page_to_pfn(page), &extent_list[i]) != 0) )
62 return i;
63 }
65 return nr_extents;
66 }
68 static long
69 decrease_reservation(
70 struct domain *d,
71 unsigned long *extent_list,
72 unsigned int nr_extents,
73 unsigned int extent_order,
74 unsigned int flags,
75 int *preempted)
76 {
77 struct pfn_info *page;
78 unsigned long i, j, mpfn;
80 if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
81 return 0;
83 for ( i = 0; i < nr_extents; i++ )
84 {
85 if ( hypercall_preempt_check() )
86 {
87 *preempted = 1;
88 return i;
89 }
91 if ( unlikely(__get_user(mpfn, &extent_list[i]) != 0) )
92 return i;
94 for ( j = 0; j < (1 << extent_order); j++ )
95 {
96 if ( unlikely((mpfn + j) >= max_page) )
97 {
98 DPRINTK("Domain %u page number out of range (%lx >= %lx)\n",
99 d->domain_id, mpfn + j, max_page);
100 return i;
101 }
103 page = &frame_table[mpfn + j];
104 if ( unlikely(!get_page(page, d)) )
105 {
106 DPRINTK("Bad page free for domain %u\n", d->domain_id);
107 return i;
108 }
110 if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
111 put_page_and_type(page);
113 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
114 put_page(page);
116 shadow_sync_and_drop_references(d, page);
118 put_page(page);
119 }
120 }
122 return nr_extents;
123 }
125 /*
126 * To allow safe resume of do_memory_op() after preemption, we need to know
127 * at what point in the page list to resume. For this purpose I steal the
128 * high-order bits of the @cmd parameter, which are otherwise unused and zero.
129 */
130 #define START_EXTENT_SHIFT 4 /* cmd[:4] == start_extent */
132 long do_memory_op(int cmd, void *arg)
133 {
134 struct domain *d;
135 int rc, start_extent, op, flags = 0, preempted = 0;
136 struct xen_memory_reservation reservation;
138 op = cmd & ((1 << START_EXTENT_SHIFT) - 1);
140 switch ( op )
141 {
142 case XENMEM_increase_reservation:
143 case XENMEM_decrease_reservation:
144 if ( copy_from_user(&reservation, arg, sizeof(reservation)) )
145 return -EFAULT;
147 start_extent = cmd >> START_EXTENT_SHIFT;
148 if ( unlikely(start_extent > reservation.nr_extents) )
149 return -EINVAL;
151 if ( reservation.extent_start != NULL )
152 reservation.extent_start += start_extent;
153 reservation.nr_extents -= start_extent;
155 if ( unlikely(reservation.address_bits != 0)
156 && (reservation.address_bits > (get_order(max_page)+PAGE_SHIFT)) )
157 {
158 if ( reservation.address_bits < 31 )
159 return -ENOMEM;
160 flags = ALLOC_DOM_DMA;
161 }
163 if ( likely(reservation.domid == DOMID_SELF) )
164 d = current->domain;
165 else if ( !IS_PRIV(current->domain) )
166 return -EPERM;
167 else if ( (d = find_domain_by_id(reservation.domid)) == NULL )
168 return -ESRCH;
170 rc = ((op == XENMEM_increase_reservation) ?
171 increase_reservation : decrease_reservation)(
172 d,
173 reservation.extent_start,
174 reservation.nr_extents,
175 reservation.extent_order,
176 flags,
177 &preempted);
179 if ( unlikely(reservation.domid != DOMID_SELF) )
180 put_domain(d);
182 rc += start_extent;
184 if ( preempted )
185 return hypercall2_create_continuation(
186 __HYPERVISOR_memory_op, op | (rc << START_EXTENT_SHIFT), arg);
188 break;
190 case XENMEM_maximum_ram_page:
191 if ( put_user(max_page, (unsigned long *)arg) )
192 return -EFAULT;
193 rc = -ENOSYS;
194 break;
196 default:
197 rc = -ENOSYS;
198 break;
199 }
201 return rc;
202 }
204 /*
205 * Local variables:
206 * mode: C
207 * c-set-style: "BSD"
208 * c-basic-offset: 4
209 * tab-width: 4
210 * indent-tabs-mode: nil
211 * End:
212 */