debuggers.hg

view xen/common/compat/memory.c @ 21055:a02b1c791c9b

Fix compat mode type checking macros for gcc 4.5

Just like with the __RING_SIZE() macro, the compat mode type checking
macros also need changing in order to work with gcc 4.5.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Mar 08 19:11:00 2010 +0000 (2010-03-08)
parents bcee82a0e9d6
children 3ffdb094c2c0
line source
1 #include <xen/config.h>
2 #include <xen/types.h>
3 #include <xen/hypercall.h>
4 #include <xen/guest_access.h>
5 #include <xen/sched.h>
6 #include <xen/event.h>
7 #include <asm/current.h>
8 #include <compat/memory.h>
10 #define xen_domid_t domid_t
11 #define compat_domid_t domid_compat_t
12 CHECK_TYPE(domid);
13 #undef compat_domid_t
14 #undef xen_domid_t
16 int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE(void) compat)
17 {
18 int rc, split, op = cmd & MEMOP_CMD_MASK;
19 unsigned int start_extent = cmd >> MEMOP_EXTENT_SHIFT;
21 do
22 {
23 unsigned int i, end_extent = 0;
24 union {
25 XEN_GUEST_HANDLE(void) hnd;
26 struct xen_memory_reservation *rsrv;
27 struct xen_memory_exchange *xchg;
28 } nat;
29 union {
30 struct compat_memory_reservation rsrv;
31 struct compat_memory_exchange xchg;
32 } cmp;
34 set_xen_guest_handle(nat.hnd, COMPAT_ARG_XLAT_VIRT_BASE);
35 split = 0;
36 switch ( op )
37 {
38 xen_pfn_t *space;
40 case XENMEM_increase_reservation:
41 case XENMEM_decrease_reservation:
42 case XENMEM_populate_physmap:
43 if ( copy_from_guest(&cmp.rsrv, compat, 1) )
44 return start_extent;
46 /* Is size too large for us to encode a continuation? */
47 if ( cmp.rsrv.nr_extents > (UINT_MAX >> MEMOP_EXTENT_SHIFT) )
48 return start_extent;
50 if ( !compat_handle_is_null(cmp.rsrv.extent_start) &&
51 !compat_handle_okay(cmp.rsrv.extent_start, cmp.rsrv.nr_extents) )
52 return start_extent;
54 end_extent = start_extent + (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.rsrv)) /
55 sizeof(*space);
56 if ( end_extent > cmp.rsrv.nr_extents )
57 end_extent = cmp.rsrv.nr_extents;
59 space = (xen_pfn_t *)(nat.rsrv + 1);
60 #define XLAT_memory_reservation_HNDL_extent_start(_d_, _s_) \
61 do \
62 { \
63 if ( !compat_handle_is_null((_s_)->extent_start) ) \
64 { \
65 set_xen_guest_handle((_d_)->extent_start, space - start_extent); \
66 if ( op != XENMEM_increase_reservation ) \
67 { \
68 for ( i = start_extent; i < end_extent; ++i ) \
69 { \
70 compat_pfn_t pfn; \
71 if ( __copy_from_compat_offset(&pfn, (_s_)->extent_start, i, 1) ) \
72 { \
73 end_extent = i; \
74 split = -1; \
75 break; \
76 } \
77 *space++ = pfn; \
78 } \
79 } \
80 } \
81 else \
82 { \
83 set_xen_guest_handle((_d_)->extent_start, NULL); \
84 end_extent = cmp.rsrv.nr_extents; \
85 } \
86 } while (0)
87 XLAT_memory_reservation(nat.rsrv, &cmp.rsrv);
88 #undef XLAT_memory_reservation_HNDL_extent_start
90 if ( end_extent < cmp.rsrv.nr_extents )
91 {
92 nat.rsrv->nr_extents = end_extent;
93 ++split;
94 }
96 break;
98 case XENMEM_exchange:
99 {
100 int order_delta;
102 if ( copy_from_guest(&cmp.xchg, compat, 1) )
103 return -EFAULT;
105 order_delta = cmp.xchg.out.extent_order - cmp.xchg.in.extent_order;
106 /* Various sanity checks. */
107 if ( (cmp.xchg.nr_exchanged > cmp.xchg.in.nr_extents) ||
108 (order_delta > 0 && (cmp.xchg.nr_exchanged & ((1U << order_delta) - 1))) ||
109 /* Sizes of input and output lists do not overflow an int? */
110 ((~0U >> cmp.xchg.in.extent_order) < cmp.xchg.in.nr_extents) ||
111 ((~0U >> cmp.xchg.out.extent_order) < cmp.xchg.out.nr_extents) ||
112 /* Sizes of input and output lists match? */
113 ((cmp.xchg.in.nr_extents << cmp.xchg.in.extent_order) !=
114 (cmp.xchg.out.nr_extents << cmp.xchg.out.extent_order)) )
115 return -EINVAL;
117 start_extent = cmp.xchg.nr_exchanged;
118 end_extent = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.xchg)) /
119 (((1U << __builtin_abs(order_delta)) + 1) *
120 sizeof(*space));
121 if ( end_extent == 0 )
122 {
123 printk("Cannot translate compatibility mode XENMEM_exchange extents (%u,%u)\n",
124 cmp.xchg.in.extent_order, cmp.xchg.out.extent_order);
125 return -E2BIG;
126 }
127 if ( order_delta > 0 )
128 end_extent <<= order_delta;
129 end_extent += start_extent;
130 if ( end_extent > cmp.xchg.in.nr_extents )
131 end_extent = cmp.xchg.in.nr_extents;
133 space = (xen_pfn_t *)(nat.xchg + 1);
134 /* Code below depends upon .in preceding .out. */
135 BUILD_BUG_ON(offsetof(xen_memory_exchange_t, in) > offsetof(xen_memory_exchange_t, out));
136 #define XLAT_memory_reservation_HNDL_extent_start(_d_, _s_) \
137 do \
138 { \
139 set_xen_guest_handle((_d_)->extent_start, space - start_extent); \
140 for ( i = start_extent; i < end_extent; ++i ) \
141 { \
142 compat_pfn_t pfn; \
143 if ( __copy_from_compat_offset(&pfn, (_s_)->extent_start, i, 1) ) \
144 return -EFAULT; \
145 *space++ = pfn; \
146 } \
147 if ( order_delta > 0 ) \
148 { \
149 start_extent >>= order_delta; \
150 end_extent >>= order_delta; \
151 } \
152 else \
153 { \
154 start_extent <<= -order_delta; \
155 end_extent <<= -order_delta; \
156 } \
157 order_delta = -order_delta; \
158 } while (0)
159 XLAT_memory_exchange(nat.xchg, &cmp.xchg);
160 #undef XLAT_memory_reservation_HNDL_extent_start
162 if ( end_extent < cmp.xchg.in.nr_extents )
163 {
164 nat.xchg->in.nr_extents = end_extent;
165 if ( order_delta >= 0 )
166 nat.xchg->out.nr_extents = end_extent >> order_delta;
167 else
168 nat.xchg->out.nr_extents = end_extent << order_delta;
169 ++split;
170 }
172 break;
173 }
175 case XENMEM_current_reservation:
176 case XENMEM_maximum_reservation:
177 case XENMEM_maximum_gpfn:
178 case XENMEM_maximum_ram_page:
179 nat.hnd = compat;
180 break;
182 default:
183 return compat_arch_memory_op(cmd, compat);
184 }
186 rc = do_memory_op(cmd, nat.hnd);
187 if ( rc < 0 )
188 return rc;
190 cmd = 0;
191 if ( hypercall_xlat_continuation(&cmd, 0x02, nat.hnd, compat) )
192 {
193 BUG_ON(rc != __HYPERVISOR_memory_op);
194 BUG_ON((cmd & MEMOP_CMD_MASK) != op);
195 split = -1;
196 }
198 switch ( op )
199 {
200 case XENMEM_increase_reservation:
201 case XENMEM_decrease_reservation:
202 case XENMEM_populate_physmap:
203 end_extent = split >= 0 ? rc : cmd >> MEMOP_EXTENT_SHIFT;
204 if ( (op != XENMEM_decrease_reservation) &&
205 !guest_handle_is_null(nat.rsrv->extent_start) )
206 {
207 for ( ; start_extent < end_extent; ++start_extent )
208 {
209 compat_pfn_t pfn = nat.rsrv->extent_start.p[start_extent];
211 BUG_ON(pfn != nat.rsrv->extent_start.p[start_extent]);
212 if ( __copy_to_compat_offset(cmp.rsrv.extent_start,
213 start_extent, &pfn, 1) )
214 {
215 if ( split >= 0 )
216 {
217 rc = start_extent;
218 split = 0;
219 }
220 else
221 /*
222 * Short of being able to cancel the continuation,
223 * force it to restart here; eventually we shall
224 * get out of this state.
225 */
226 rc = (start_extent << MEMOP_EXTENT_SHIFT) | op;
227 break;
228 }
229 }
230 }
231 else
232 {
233 start_extent = end_extent;
234 }
235 /* Bail if there was an error. */
236 if ( (split >= 0) && (end_extent != nat.rsrv->nr_extents) )
237 split = 0;
238 break;
240 case XENMEM_exchange:
241 {
242 DEFINE_XEN_GUEST_HANDLE(compat_memory_exchange_t);
243 int order_delta;
245 BUG_ON(split >= 0 && rc);
246 BUG_ON(end_extent < nat.xchg->nr_exchanged);
247 end_extent = nat.xchg->nr_exchanged;
249 order_delta = cmp.xchg.out.extent_order - cmp.xchg.in.extent_order;
250 if ( order_delta > 0 )
251 {
252 start_extent >>= order_delta;
253 BUG_ON(end_extent & ((1U << order_delta) - 1));
254 end_extent >>= order_delta;
255 }
256 else
257 {
258 start_extent <<= -order_delta;
259 end_extent <<= -order_delta;
260 }
262 for ( ; start_extent < end_extent; ++start_extent )
263 {
264 compat_pfn_t pfn = nat.xchg->out.extent_start.p[start_extent];
266 BUG_ON(pfn != nat.xchg->out.extent_start.p[start_extent]);
267 /* Note that we ignore errors accessing the output extent list. */
268 __copy_to_compat_offset(cmp.xchg.out.extent_start, start_extent, &pfn, 1);
269 }
271 cmp.xchg.nr_exchanged = nat.xchg->nr_exchanged;
272 if ( copy_field_to_guest(guest_handle_cast(compat, compat_memory_exchange_t),
273 &cmp.xchg, nr_exchanged) )
274 {
275 if ( split < 0 )
276 /* Cannot cancel the continuation... */
277 domain_crash(current->domain);
278 return -EFAULT;
279 }
280 break;
281 }
283 case XENMEM_maximum_ram_page:
284 case XENMEM_current_reservation:
285 case XENMEM_maximum_reservation:
286 case XENMEM_maximum_gpfn:
287 break;
289 default:
290 domain_crash(current->domain);
291 split = 0;
292 break;
293 }
295 cmd = op | (start_extent << MEMOP_EXTENT_SHIFT);
296 if ( split > 0 && hypercall_preempt_check() )
297 return hypercall_create_continuation(
298 __HYPERVISOR_memory_op, "ih", cmd, compat);
299 } while ( split > 0 );
301 return rc;
302 }