debuggers.hg

view xen/arch/x86/x86_32/usercopy.c @ 3726:88957a238191

bitkeeper revision 1.1159.1.544 (4207248crq3YxiyLWjUehtHv_Yd3tg)

Merge tempest.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xeno.bk
into tempest.cl.cam.ac.uk:/local/scratch/smh22/xen-unstable.bk
author smh22@tempest.cl.cam.ac.uk
date Mon Feb 07 08:19:24 2005 +0000 (2005-02-07)
parents c326283ef029 7db5b671b347
children 0a4b76b6b5a0
line source
1 /*
2 * User address space access functions.
3 * The non inlined parts of asm-i386/uaccess.h are here.
4 *
5 * Copyright 1997 Andi Kleen <ak@muc.de>
6 * Copyright 1997 Linus Torvalds
7 */
8 #include <xen/config.h>
9 #include <xen/mm.h>
10 #include <asm/uaccess.h>
12 static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
13 {
14 #ifdef CONFIG_X86_INTEL_USERCOPY
15 if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask))
16 return 0;
17 #endif
18 return 1;
19 }
20 #define movsl_is_ok(a1,a2,n) \
21 __movsl_is_ok((unsigned long)(a1),(unsigned long)(a2),(n))
24 /*
25 * Zero Userspace
26 */
28 #define __do_clear_user(addr,size) \
29 do { \
30 int __d0; \
31 __asm__ __volatile__( \
32 "0: rep; stosl\n" \
33 " movl %2,%0\n" \
34 "1: rep; stosb\n" \
35 "2:\n" \
36 ".section .fixup,\"ax\"\n" \
37 "3: lea 0(%2,%0,4),%0\n" \
38 " jmp 2b\n" \
39 ".previous\n" \
40 ".section __ex_table,\"a\"\n" \
41 " .align 4\n" \
42 " .long 0b,3b\n" \
43 " .long 1b,2b\n" \
44 ".previous" \
45 : "=&c"(size), "=&D" (__d0) \
46 : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
47 } while (0)
49 /**
50 * clear_user: - Zero a block of memory in user space.
51 * @to: Destination address, in user space.
52 * @n: Number of bytes to zero.
53 *
54 * Zero a block of memory in user space.
55 *
56 * Returns number of bytes that could not be cleared.
57 * On success, this will be zero.
58 */
59 unsigned long
60 clear_user(void __user *to, unsigned long n)
61 {
62 if (access_ok(VERIFY_WRITE, to, n))
63 __do_clear_user(to, n);
64 return n;
65 }
67 /**
68 * __clear_user: - Zero a block of memory in user space, with less checking.
69 * @to: Destination address, in user space.
70 * @n: Number of bytes to zero.
71 *
72 * Zero a block of memory in user space. Caller must check
73 * the specified block with access_ok() before calling this function.
74 *
75 * Returns number of bytes that could not be cleared.
76 * On success, this will be zero.
77 */
78 unsigned long
79 __clear_user(void __user *to, unsigned long n)
80 {
81 __do_clear_user(to, n);
82 return n;
83 }
85 #ifdef CONFIG_X86_INTEL_USERCOPY
86 static unsigned long
87 __copy_user_intel(void __user *to, const void *from, unsigned long size)
88 {
89 int d0, d1;
90 __asm__ __volatile__(
91 " .align 2,0x90\n"
92 "1: movl 32(%4), %%eax\n"
93 " cmpl $67, %0\n"
94 " jbe 3f\n"
95 "2: movl 64(%4), %%eax\n"
96 " .align 2,0x90\n"
97 "3: movl 0(%4), %%eax\n"
98 "4: movl 4(%4), %%edx\n"
99 "5: movl %%eax, 0(%3)\n"
100 "6: movl %%edx, 4(%3)\n"
101 "7: movl 8(%4), %%eax\n"
102 "8: movl 12(%4),%%edx\n"
103 "9: movl %%eax, 8(%3)\n"
104 "10: movl %%edx, 12(%3)\n"
105 "11: movl 16(%4), %%eax\n"
106 "12: movl 20(%4), %%edx\n"
107 "13: movl %%eax, 16(%3)\n"
108 "14: movl %%edx, 20(%3)\n"
109 "15: movl 24(%4), %%eax\n"
110 "16: movl 28(%4), %%edx\n"
111 "17: movl %%eax, 24(%3)\n"
112 "18: movl %%edx, 28(%3)\n"
113 "19: movl 32(%4), %%eax\n"
114 "20: movl 36(%4), %%edx\n"
115 "21: movl %%eax, 32(%3)\n"
116 "22: movl %%edx, 36(%3)\n"
117 "23: movl 40(%4), %%eax\n"
118 "24: movl 44(%4), %%edx\n"
119 "25: movl %%eax, 40(%3)\n"
120 "26: movl %%edx, 44(%3)\n"
121 "27: movl 48(%4), %%eax\n"
122 "28: movl 52(%4), %%edx\n"
123 "29: movl %%eax, 48(%3)\n"
124 "30: movl %%edx, 52(%3)\n"
125 "31: movl 56(%4), %%eax\n"
126 "32: movl 60(%4), %%edx\n"
127 "33: movl %%eax, 56(%3)\n"
128 "34: movl %%edx, 60(%3)\n"
129 " addl $-64, %0\n"
130 " addl $64, %4\n"
131 " addl $64, %3\n"
132 " cmpl $63, %0\n"
133 " ja 1b\n"
134 "35: movl %0, %%eax\n"
135 " shrl $2, %0\n"
136 " andl $3, %%eax\n"
137 " cld\n"
138 "99: rep; movsl\n"
139 "36: movl %%eax, %0\n"
140 "37: rep; movsb\n"
141 "100:\n"
142 ".section .fixup,\"ax\"\n"
143 "101: lea 0(%%eax,%0,4),%0\n"
144 " jmp 100b\n"
145 ".previous\n"
146 ".section __ex_table,\"a\"\n"
147 " .align 4\n"
148 " .long 1b,100b\n"
149 " .long 2b,100b\n"
150 " .long 3b,100b\n"
151 " .long 4b,100b\n"
152 " .long 5b,100b\n"
153 " .long 6b,100b\n"
154 " .long 7b,100b\n"
155 " .long 8b,100b\n"
156 " .long 9b,100b\n"
157 " .long 10b,100b\n"
158 " .long 11b,100b\n"
159 " .long 12b,100b\n"
160 " .long 13b,100b\n"
161 " .long 14b,100b\n"
162 " .long 15b,100b\n"
163 " .long 16b,100b\n"
164 " .long 17b,100b\n"
165 " .long 18b,100b\n"
166 " .long 19b,100b\n"
167 " .long 20b,100b\n"
168 " .long 21b,100b\n"
169 " .long 22b,100b\n"
170 " .long 23b,100b\n"
171 " .long 24b,100b\n"
172 " .long 25b,100b\n"
173 " .long 26b,100b\n"
174 " .long 27b,100b\n"
175 " .long 28b,100b\n"
176 " .long 29b,100b\n"
177 " .long 30b,100b\n"
178 " .long 31b,100b\n"
179 " .long 32b,100b\n"
180 " .long 33b,100b\n"
181 " .long 34b,100b\n"
182 " .long 35b,100b\n"
183 " .long 36b,100b\n"
184 " .long 37b,100b\n"
185 " .long 99b,101b\n"
186 ".previous"
187 : "=&c"(size), "=&D" (d0), "=&S" (d1)
188 : "1"(to), "2"(from), "0"(size)
189 : "eax", "edx", "memory");
190 return size;
191 }
193 static unsigned long
194 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
195 {
196 int d0, d1;
197 __asm__ __volatile__(
198 " .align 2,0x90\n"
199 "0: movl 32(%4), %%eax\n"
200 " cmpl $67, %0\n"
201 " jbe 2f\n"
202 "1: movl 64(%4), %%eax\n"
203 " .align 2,0x90\n"
204 "2: movl 0(%4), %%eax\n"
205 "21: movl 4(%4), %%edx\n"
206 " movl %%eax, 0(%3)\n"
207 " movl %%edx, 4(%3)\n"
208 "3: movl 8(%4), %%eax\n"
209 "31: movl 12(%4),%%edx\n"
210 " movl %%eax, 8(%3)\n"
211 " movl %%edx, 12(%3)\n"
212 "4: movl 16(%4), %%eax\n"
213 "41: movl 20(%4), %%edx\n"
214 " movl %%eax, 16(%3)\n"
215 " movl %%edx, 20(%3)\n"
216 "10: movl 24(%4), %%eax\n"
217 "51: movl 28(%4), %%edx\n"
218 " movl %%eax, 24(%3)\n"
219 " movl %%edx, 28(%3)\n"
220 "11: movl 32(%4), %%eax\n"
221 "61: movl 36(%4), %%edx\n"
222 " movl %%eax, 32(%3)\n"
223 " movl %%edx, 36(%3)\n"
224 "12: movl 40(%4), %%eax\n"
225 "71: movl 44(%4), %%edx\n"
226 " movl %%eax, 40(%3)\n"
227 " movl %%edx, 44(%3)\n"
228 "13: movl 48(%4), %%eax\n"
229 "81: movl 52(%4), %%edx\n"
230 " movl %%eax, 48(%3)\n"
231 " movl %%edx, 52(%3)\n"
232 "14: movl 56(%4), %%eax\n"
233 "91: movl 60(%4), %%edx\n"
234 " movl %%eax, 56(%3)\n"
235 " movl %%edx, 60(%3)\n"
236 " addl $-64, %0\n"
237 " addl $64, %4\n"
238 " addl $64, %3\n"
239 " cmpl $63, %0\n"
240 " ja 0b\n"
241 "5: movl %0, %%eax\n"
242 " shrl $2, %0\n"
243 " andl $3, %%eax\n"
244 " cld\n"
245 "6: rep; movsl\n"
246 " movl %%eax,%0\n"
247 "7: rep; movsb\n"
248 "8:\n"
249 ".section .fixup,\"ax\"\n"
250 "9: lea 0(%%eax,%0,4),%0\n"
251 "16: pushl %0\n"
252 " pushl %%eax\n"
253 " xorl %%eax,%%eax\n"
254 " rep; stosb\n"
255 " popl %%eax\n"
256 " popl %0\n"
257 " jmp 8b\n"
258 ".previous\n"
259 ".section __ex_table,\"a\"\n"
260 " .align 4\n"
261 " .long 0b,16b\n"
262 " .long 1b,16b\n"
263 " .long 2b,16b\n"
264 " .long 21b,16b\n"
265 " .long 3b,16b\n"
266 " .long 31b,16b\n"
267 " .long 4b,16b\n"
268 " .long 41b,16b\n"
269 " .long 10b,16b\n"
270 " .long 51b,16b\n"
271 " .long 11b,16b\n"
272 " .long 61b,16b\n"
273 " .long 12b,16b\n"
274 " .long 71b,16b\n"
275 " .long 13b,16b\n"
276 " .long 81b,16b\n"
277 " .long 14b,16b\n"
278 " .long 91b,16b\n"
279 " .long 6b,9b\n"
280 " .long 7b,16b\n"
281 ".previous"
282 : "=&c"(size), "=&D" (d0), "=&S" (d1)
283 : "1"(to), "2"(from), "0"(size)
284 : "eax", "edx", "memory");
285 return size;
286 }
287 #else
288 /*
289 * Leave these declared but undefined. They should not be any references to
290 * them
291 */
292 unsigned long
293 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size);
294 unsigned long
295 __copy_user_intel(void __user *to, const void *from, unsigned long size);
296 #endif /* CONFIG_X86_INTEL_USERCOPY */
298 /* Generic arbitrary sized copy. */
299 #define __copy_user(to,from,size) \
300 do { \
301 int __d0, __d1, __d2; \
302 __asm__ __volatile__( \
303 " cmp $7,%0\n" \
304 " jbe 1f\n" \
305 " movl %1,%0\n" \
306 " negl %0\n" \
307 " andl $7,%0\n" \
308 " subl %0,%3\n" \
309 "4: rep; movsb\n" \
310 " movl %3,%0\n" \
311 " shrl $2,%0\n" \
312 " andl $3,%3\n" \
313 " .align 2,0x90\n" \
314 "0: rep; movsl\n" \
315 " movl %3,%0\n" \
316 "1: rep; movsb\n" \
317 "2:\n" \
318 ".section .fixup,\"ax\"\n" \
319 "5: addl %3,%0\n" \
320 " jmp 2b\n" \
321 "3: lea 0(%3,%0,4),%0\n" \
322 " jmp 2b\n" \
323 ".previous\n" \
324 ".section __ex_table,\"a\"\n" \
325 " .align 4\n" \
326 " .long 4b,5b\n" \
327 " .long 0b,3b\n" \
328 " .long 1b,2b\n" \
329 ".previous" \
330 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
331 : "3"(size), "0"(size), "1"(to), "2"(from) \
332 : "memory"); \
333 } while (0)
335 #define __copy_user_zeroing(to,from,size) \
336 do { \
337 int __d0, __d1, __d2; \
338 __asm__ __volatile__( \
339 " cmp $7,%0\n" \
340 " jbe 1f\n" \
341 " movl %1,%0\n" \
342 " negl %0\n" \
343 " andl $7,%0\n" \
344 " subl %0,%3\n" \
345 "4: rep; movsb\n" \
346 " movl %3,%0\n" \
347 " shrl $2,%0\n" \
348 " andl $3,%3\n" \
349 " .align 2,0x90\n" \
350 "0: rep; movsl\n" \
351 " movl %3,%0\n" \
352 "1: rep; movsb\n" \
353 "2:\n" \
354 ".section .fixup,\"ax\"\n" \
355 "5: addl %3,%0\n" \
356 " jmp 6f\n" \
357 "3: lea 0(%3,%0,4),%0\n" \
358 "6: pushl %0\n" \
359 " pushl %%eax\n" \
360 " xorl %%eax,%%eax\n" \
361 " rep; stosb\n" \
362 " popl %%eax\n" \
363 " popl %0\n" \
364 " jmp 2b\n" \
365 ".previous\n" \
366 ".section __ex_table,\"a\"\n" \
367 " .align 4\n" \
368 " .long 4b,5b\n" \
369 " .long 0b,3b\n" \
370 " .long 1b,6b\n" \
371 ".previous" \
372 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
373 : "3"(size), "0"(size), "1"(to), "2"(from) \
374 : "memory"); \
375 } while (0)
378 unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n)
379 {
380 if (movsl_is_ok(to, from, n))
381 __copy_user(to, from, n);
382 else
383 n = __copy_user_intel(to, from, n);
384 return n;
385 }
387 unsigned long
388 __copy_from_user_ll(void *to, const void __user *from, unsigned long n)
389 {
390 if (movsl_is_ok(to, from, n))
391 __copy_user_zeroing(to, from, n);
392 else
393 n = __copy_user_zeroing_intel(to, from, n);
394 return n;
395 }
397 /**
398 * copy_to_user: - Copy a block of data into user space.
399 * @to: Destination address, in user space.
400 * @from: Source address, in kernel space.
401 * @n: Number of bytes to copy.
402 *
403 * Context: User context only. This function may sleep.
404 *
405 * Copy data from kernel space to user space.
406 *
407 * Returns number of bytes that could not be copied.
408 * On success, this will be zero.
409 */
410 unsigned long
411 copy_to_user(void __user *to, const void *from, unsigned long n)
412 {
413 if (access_ok(VERIFY_WRITE, to, n))
414 n = __copy_to_user(to, from, n);
415 return n;
416 }
418 /**
419 * copy_from_user: - Copy a block of data from user space.
420 * @to: Destination address, in kernel space.
421 * @from: Source address, in user space.
422 * @n: Number of bytes to copy.
423 *
424 * Context: User context only. This function may sleep.
425 *
426 * Copy data from user space to kernel space.
427 *
428 * Returns number of bytes that could not be copied.
429 * On success, this will be zero.
430 *
431 * If some data could not be copied, this function will pad the copied
432 * data to the requested size using zero bytes.
433 */
434 unsigned long
435 copy_from_user(void *to, const void __user *from, unsigned long n)
436 {
437 if (access_ok(VERIFY_READ, from, n))
438 n = __copy_from_user(to, from, n);
439 else
440 memset(to, 0, n);
441 return n;
442 }