debuggers.hg

view xen/arch/x86/x86_32/usercopy.c @ 2376:c326283ef029

bitkeeper revision 1.1159.1.99 (412b0f11cFbfdCRdP2-GJYp0ANDfUA)

Merge scramble.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into scramble.cl.cam.ac.uk:/local/scratch/kaf24/xeno
author kaf24@scramble.cl.cam.ac.uk
date Tue Aug 24 09:49:05 2004 +0000 (2004-08-24)
parents 0e23f01219c6 3145fa096b1a
children 7db5b671b347 88957a238191
line source
1 /*
2 * User address space access functions.
3 * The non inlined parts of asm-i386/uaccess.h are here.
4 *
5 * Copyright 1997 Andi Kleen <ak@muc.de>
6 * Copyright 1997 Linus Torvalds
7 */
8 #include <xen/config.h>
9 #include <xen/mm.h>
10 #include <asm/uaccess.h>
12 #define might_sleep() ((void)0)
14 static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
15 {
16 #ifdef CONFIG_X86_INTEL_USERCOPY
17 if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask))
18 return 0;
19 #endif
20 return 1;
21 }
22 #define movsl_is_ok(a1,a2,n) \
23 __movsl_is_ok((unsigned long)(a1),(unsigned long)(a2),(n))
25 /*
26 * Copy a null terminated string from userspace.
27 */
29 #define __do_strncpy_from_user(dst,src,count,res) \
30 do { \
31 int __d0, __d1, __d2; \
32 __asm__ __volatile__( \
33 " testl %1,%1\n" \
34 " jz 2f\n" \
35 "0: lodsb\n" \
36 " stosb\n" \
37 " testb %%al,%%al\n" \
38 " jz 1f\n" \
39 " decl %1\n" \
40 " jnz 0b\n" \
41 "1: subl %1,%0\n" \
42 "2:\n" \
43 ".section .fixup,\"ax\"\n" \
44 "3: movl %5,%0\n" \
45 " jmp 2b\n" \
46 ".previous\n" \
47 ".section __ex_table,\"a\"\n" \
48 " .align 4\n" \
49 " .long 0b,3b\n" \
50 ".previous" \
51 : "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \
52 "=&D" (__d2) \
53 : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
54 : "memory"); \
55 } while (0)
57 /**
58 * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
59 * @dst: Destination address, in kernel space. This buffer must be at
60 * least @count bytes long.
61 * @src: Source address, in user space.
62 * @count: Maximum number of bytes to copy, including the trailing NUL.
63 *
64 * Copies a NUL-terminated string from userspace to kernel space.
65 * Caller must check the specified block with access_ok() before calling
66 * this function.
67 *
68 * On success, returns the length of the string (not including the trailing
69 * NUL).
70 *
71 * If access to userspace fails, returns -EFAULT (some data may have been
72 * copied).
73 *
74 * If @count is smaller than the length of the string, copies @count bytes
75 * and returns @count.
76 */
77 long
78 __strncpy_from_user(char *dst, const char __user *src, long count)
79 {
80 long res;
81 __do_strncpy_from_user(dst, src, count, res);
82 return res;
83 }
85 /**
86 * strncpy_from_user: - Copy a NUL terminated string from userspace.
87 * @dst: Destination address, in kernel space. This buffer must be at
88 * least @count bytes long.
89 * @src: Source address, in user space.
90 * @count: Maximum number of bytes to copy, including the trailing NUL.
91 *
92 * Copies a NUL-terminated string from userspace to kernel space.
93 *
94 * On success, returns the length of the string (not including the trailing
95 * NUL).
96 *
97 * If access to userspace fails, returns -EFAULT (some data may have been
98 * copied).
99 *
100 * If @count is smaller than the length of the string, copies @count bytes
101 * and returns @count.
102 */
103 long
104 strncpy_from_user(char *dst, const char __user *src, long count)
105 {
106 long res = -EFAULT;
107 if (access_ok(VERIFY_READ, src, 1))
108 __do_strncpy_from_user(dst, src, count, res);
109 return res;
110 }
113 /*
114 * Zero Userspace
115 */
117 #define __do_clear_user(addr,size) \
118 do { \
119 int __d0; \
120 __asm__ __volatile__( \
121 "0: rep; stosl\n" \
122 " movl %2,%0\n" \
123 "1: rep; stosb\n" \
124 "2:\n" \
125 ".section .fixup,\"ax\"\n" \
126 "3: lea 0(%2,%0,4),%0\n" \
127 " jmp 2b\n" \
128 ".previous\n" \
129 ".section __ex_table,\"a\"\n" \
130 " .align 4\n" \
131 " .long 0b,3b\n" \
132 " .long 1b,2b\n" \
133 ".previous" \
134 : "=&c"(size), "=&D" (__d0) \
135 : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
136 } while (0)
138 /**
139 * clear_user: - Zero a block of memory in user space.
140 * @to: Destination address, in user space.
141 * @n: Number of bytes to zero.
142 *
143 * Zero a block of memory in user space.
144 *
145 * Returns number of bytes that could not be cleared.
146 * On success, this will be zero.
147 */
148 unsigned long
149 clear_user(void __user *to, unsigned long n)
150 {
151 might_sleep();
152 if (access_ok(VERIFY_WRITE, to, n))
153 __do_clear_user(to, n);
154 return n;
155 }
157 /**
158 * __clear_user: - Zero a block of memory in user space, with less checking.
159 * @to: Destination address, in user space.
160 * @n: Number of bytes to zero.
161 *
162 * Zero a block of memory in user space. Caller must check
163 * the specified block with access_ok() before calling this function.
164 *
165 * Returns number of bytes that could not be cleared.
166 * On success, this will be zero.
167 */
168 unsigned long
169 __clear_user(void __user *to, unsigned long n)
170 {
171 __do_clear_user(to, n);
172 return n;
173 }
175 /**
176 * strlen_user: - Get the size of a string in user space.
177 * @s: The string to measure.
178 * @n: The maximum valid length
179 *
180 * Get the size of a NUL-terminated string in user space.
181 *
182 * Returns the size of the string INCLUDING the terminating NUL.
183 * On exception, returns 0.
184 * If the string is too long, returns a value greater than @n.
185 */
186 long strnlen_user(const char __user *s, long n)
187 {
188 unsigned long mask = -__addr_ok(s);
189 unsigned long res, tmp;
191 might_sleep();
193 __asm__ __volatile__(
194 " testl %0, %0\n"
195 " jz 3f\n"
196 " andl %0,%%ecx\n"
197 "0: repne; scasb\n"
198 " setne %%al\n"
199 " subl %%ecx,%0\n"
200 " addl %0,%%eax\n"
201 "1:\n"
202 ".section .fixup,\"ax\"\n"
203 "2: xorl %%eax,%%eax\n"
204 " jmp 1b\n"
205 "3: movb $1,%%al\n"
206 " jmp 1b\n"
207 ".previous\n"
208 ".section __ex_table,\"a\"\n"
209 " .align 4\n"
210 " .long 0b,2b\n"
211 ".previous"
212 :"=r" (n), "=D" (s), "=a" (res), "=c" (tmp)
213 :"0" (n), "1" (s), "2" (0), "3" (mask)
214 :"cc");
215 return res & mask;
216 }
218 #ifdef CONFIG_X86_INTEL_USERCOPY
219 static unsigned long
220 __copy_user_intel(void __user *to, const void *from, unsigned long size)
221 {
222 int d0, d1;
223 __asm__ __volatile__(
224 " .align 2,0x90\n"
225 "1: movl 32(%4), %%eax\n"
226 " cmpl $67, %0\n"
227 " jbe 3f\n"
228 "2: movl 64(%4), %%eax\n"
229 " .align 2,0x90\n"
230 "3: movl 0(%4), %%eax\n"
231 "4: movl 4(%4), %%edx\n"
232 "5: movl %%eax, 0(%3)\n"
233 "6: movl %%edx, 4(%3)\n"
234 "7: movl 8(%4), %%eax\n"
235 "8: movl 12(%4),%%edx\n"
236 "9: movl %%eax, 8(%3)\n"
237 "10: movl %%edx, 12(%3)\n"
238 "11: movl 16(%4), %%eax\n"
239 "12: movl 20(%4), %%edx\n"
240 "13: movl %%eax, 16(%3)\n"
241 "14: movl %%edx, 20(%3)\n"
242 "15: movl 24(%4), %%eax\n"
243 "16: movl 28(%4), %%edx\n"
244 "17: movl %%eax, 24(%3)\n"
245 "18: movl %%edx, 28(%3)\n"
246 "19: movl 32(%4), %%eax\n"
247 "20: movl 36(%4), %%edx\n"
248 "21: movl %%eax, 32(%3)\n"
249 "22: movl %%edx, 36(%3)\n"
250 "23: movl 40(%4), %%eax\n"
251 "24: movl 44(%4), %%edx\n"
252 "25: movl %%eax, 40(%3)\n"
253 "26: movl %%edx, 44(%3)\n"
254 "27: movl 48(%4), %%eax\n"
255 "28: movl 52(%4), %%edx\n"
256 "29: movl %%eax, 48(%3)\n"
257 "30: movl %%edx, 52(%3)\n"
258 "31: movl 56(%4), %%eax\n"
259 "32: movl 60(%4), %%edx\n"
260 "33: movl %%eax, 56(%3)\n"
261 "34: movl %%edx, 60(%3)\n"
262 " addl $-64, %0\n"
263 " addl $64, %4\n"
264 " addl $64, %3\n"
265 " cmpl $63, %0\n"
266 " ja 1b\n"
267 "35: movl %0, %%eax\n"
268 " shrl $2, %0\n"
269 " andl $3, %%eax\n"
270 " cld\n"
271 "99: rep; movsl\n"
272 "36: movl %%eax, %0\n"
273 "37: rep; movsb\n"
274 "100:\n"
275 ".section .fixup,\"ax\"\n"
276 "101: lea 0(%%eax,%0,4),%0\n"
277 " jmp 100b\n"
278 ".previous\n"
279 ".section __ex_table,\"a\"\n"
280 " .align 4\n"
281 " .long 1b,100b\n"
282 " .long 2b,100b\n"
283 " .long 3b,100b\n"
284 " .long 4b,100b\n"
285 " .long 5b,100b\n"
286 " .long 6b,100b\n"
287 " .long 7b,100b\n"
288 " .long 8b,100b\n"
289 " .long 9b,100b\n"
290 " .long 10b,100b\n"
291 " .long 11b,100b\n"
292 " .long 12b,100b\n"
293 " .long 13b,100b\n"
294 " .long 14b,100b\n"
295 " .long 15b,100b\n"
296 " .long 16b,100b\n"
297 " .long 17b,100b\n"
298 " .long 18b,100b\n"
299 " .long 19b,100b\n"
300 " .long 20b,100b\n"
301 " .long 21b,100b\n"
302 " .long 22b,100b\n"
303 " .long 23b,100b\n"
304 " .long 24b,100b\n"
305 " .long 25b,100b\n"
306 " .long 26b,100b\n"
307 " .long 27b,100b\n"
308 " .long 28b,100b\n"
309 " .long 29b,100b\n"
310 " .long 30b,100b\n"
311 " .long 31b,100b\n"
312 " .long 32b,100b\n"
313 " .long 33b,100b\n"
314 " .long 34b,100b\n"
315 " .long 35b,100b\n"
316 " .long 36b,100b\n"
317 " .long 37b,100b\n"
318 " .long 99b,101b\n"
319 ".previous"
320 : "=&c"(size), "=&D" (d0), "=&S" (d1)
321 : "1"(to), "2"(from), "0"(size)
322 : "eax", "edx", "memory");
323 return size;
324 }
326 static unsigned long
327 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
328 {
329 int d0, d1;
330 __asm__ __volatile__(
331 " .align 2,0x90\n"
332 "0: movl 32(%4), %%eax\n"
333 " cmpl $67, %0\n"
334 " jbe 2f\n"
335 "1: movl 64(%4), %%eax\n"
336 " .align 2,0x90\n"
337 "2: movl 0(%4), %%eax\n"
338 "21: movl 4(%4), %%edx\n"
339 " movl %%eax, 0(%3)\n"
340 " movl %%edx, 4(%3)\n"
341 "3: movl 8(%4), %%eax\n"
342 "31: movl 12(%4),%%edx\n"
343 " movl %%eax, 8(%3)\n"
344 " movl %%edx, 12(%3)\n"
345 "4: movl 16(%4), %%eax\n"
346 "41: movl 20(%4), %%edx\n"
347 " movl %%eax, 16(%3)\n"
348 " movl %%edx, 20(%3)\n"
349 "10: movl 24(%4), %%eax\n"
350 "51: movl 28(%4), %%edx\n"
351 " movl %%eax, 24(%3)\n"
352 " movl %%edx, 28(%3)\n"
353 "11: movl 32(%4), %%eax\n"
354 "61: movl 36(%4), %%edx\n"
355 " movl %%eax, 32(%3)\n"
356 " movl %%edx, 36(%3)\n"
357 "12: movl 40(%4), %%eax\n"
358 "71: movl 44(%4), %%edx\n"
359 " movl %%eax, 40(%3)\n"
360 " movl %%edx, 44(%3)\n"
361 "13: movl 48(%4), %%eax\n"
362 "81: movl 52(%4), %%edx\n"
363 " movl %%eax, 48(%3)\n"
364 " movl %%edx, 52(%3)\n"
365 "14: movl 56(%4), %%eax\n"
366 "91: movl 60(%4), %%edx\n"
367 " movl %%eax, 56(%3)\n"
368 " movl %%edx, 60(%3)\n"
369 " addl $-64, %0\n"
370 " addl $64, %4\n"
371 " addl $64, %3\n"
372 " cmpl $63, %0\n"
373 " ja 0b\n"
374 "5: movl %0, %%eax\n"
375 " shrl $2, %0\n"
376 " andl $3, %%eax\n"
377 " cld\n"
378 "6: rep; movsl\n"
379 " movl %%eax,%0\n"
380 "7: rep; movsb\n"
381 "8:\n"
382 ".section .fixup,\"ax\"\n"
383 "9: lea 0(%%eax,%0,4),%0\n"
384 "16: pushl %0\n"
385 " pushl %%eax\n"
386 " xorl %%eax,%%eax\n"
387 " rep; stosb\n"
388 " popl %%eax\n"
389 " popl %0\n"
390 " jmp 8b\n"
391 ".previous\n"
392 ".section __ex_table,\"a\"\n"
393 " .align 4\n"
394 " .long 0b,16b\n"
395 " .long 1b,16b\n"
396 " .long 2b,16b\n"
397 " .long 21b,16b\n"
398 " .long 3b,16b\n"
399 " .long 31b,16b\n"
400 " .long 4b,16b\n"
401 " .long 41b,16b\n"
402 " .long 10b,16b\n"
403 " .long 51b,16b\n"
404 " .long 11b,16b\n"
405 " .long 61b,16b\n"
406 " .long 12b,16b\n"
407 " .long 71b,16b\n"
408 " .long 13b,16b\n"
409 " .long 81b,16b\n"
410 " .long 14b,16b\n"
411 " .long 91b,16b\n"
412 " .long 6b,9b\n"
413 " .long 7b,16b\n"
414 ".previous"
415 : "=&c"(size), "=&D" (d0), "=&S" (d1)
416 : "1"(to), "2"(from), "0"(size)
417 : "eax", "edx", "memory");
418 return size;
419 }
420 #else
421 /*
422 * Leave these declared but undefined. They should not be any references to
423 * them
424 */
425 unsigned long
426 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size);
427 unsigned long
428 __copy_user_intel(void __user *to, const void *from, unsigned long size);
429 #endif /* CONFIG_X86_INTEL_USERCOPY */
431 /* Generic arbitrary sized copy. */
432 #define __copy_user(to,from,size) \
433 do { \
434 int __d0, __d1, __d2; \
435 __asm__ __volatile__( \
436 " cmp $7,%0\n" \
437 " jbe 1f\n" \
438 " movl %1,%0\n" \
439 " negl %0\n" \
440 " andl $7,%0\n" \
441 " subl %0,%3\n" \
442 "4: rep; movsb\n" \
443 " movl %3,%0\n" \
444 " shrl $2,%0\n" \
445 " andl $3,%3\n" \
446 " .align 2,0x90\n" \
447 "0: rep; movsl\n" \
448 " movl %3,%0\n" \
449 "1: rep; movsb\n" \
450 "2:\n" \
451 ".section .fixup,\"ax\"\n" \
452 "5: addl %3,%0\n" \
453 " jmp 2b\n" \
454 "3: lea 0(%3,%0,4),%0\n" \
455 " jmp 2b\n" \
456 ".previous\n" \
457 ".section __ex_table,\"a\"\n" \
458 " .align 4\n" \
459 " .long 4b,5b\n" \
460 " .long 0b,3b\n" \
461 " .long 1b,2b\n" \
462 ".previous" \
463 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
464 : "3"(size), "0"(size), "1"(to), "2"(from) \
465 : "memory"); \
466 } while (0)
468 #define __copy_user_zeroing(to,from,size) \
469 do { \
470 int __d0, __d1, __d2; \
471 __asm__ __volatile__( \
472 " cmp $7,%0\n" \
473 " jbe 1f\n" \
474 " movl %1,%0\n" \
475 " negl %0\n" \
476 " andl $7,%0\n" \
477 " subl %0,%3\n" \
478 "4: rep; movsb\n" \
479 " movl %3,%0\n" \
480 " shrl $2,%0\n" \
481 " andl $3,%3\n" \
482 " .align 2,0x90\n" \
483 "0: rep; movsl\n" \
484 " movl %3,%0\n" \
485 "1: rep; movsb\n" \
486 "2:\n" \
487 ".section .fixup,\"ax\"\n" \
488 "5: addl %3,%0\n" \
489 " jmp 6f\n" \
490 "3: lea 0(%3,%0,4),%0\n" \
491 "6: pushl %0\n" \
492 " pushl %%eax\n" \
493 " xorl %%eax,%%eax\n" \
494 " rep; stosb\n" \
495 " popl %%eax\n" \
496 " popl %0\n" \
497 " jmp 2b\n" \
498 ".previous\n" \
499 ".section __ex_table,\"a\"\n" \
500 " .align 4\n" \
501 " .long 4b,5b\n" \
502 " .long 0b,3b\n" \
503 " .long 1b,6b\n" \
504 ".previous" \
505 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
506 : "3"(size), "0"(size), "1"(to), "2"(from) \
507 : "memory"); \
508 } while (0)
511 unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n)
512 {
513 if (movsl_is_ok(to, from, n))
514 __copy_user(to, from, n);
515 else
516 n = __copy_user_intel(to, from, n);
517 return n;
518 }
520 unsigned long
521 __copy_from_user_ll(void *to, const void __user *from, unsigned long n)
522 {
523 if (movsl_is_ok(to, from, n))
524 __copy_user_zeroing(to, from, n);
525 else
526 n = __copy_user_zeroing_intel(to, from, n);
527 return n;
528 }
530 /**
531 * copy_to_user: - Copy a block of data into user space.
532 * @to: Destination address, in user space.
533 * @from: Source address, in kernel space.
534 * @n: Number of bytes to copy.
535 *
536 * Context: User context only. This function may sleep.
537 *
538 * Copy data from kernel space to user space.
539 *
540 * Returns number of bytes that could not be copied.
541 * On success, this will be zero.
542 */
543 unsigned long
544 copy_to_user(void __user *to, const void *from, unsigned long n)
545 {
546 might_sleep();
547 if (access_ok(VERIFY_WRITE, to, n))
548 n = __copy_to_user(to, from, n);
549 return n;
550 }
551 EXPORT_SYMBOL(copy_to_user);
553 /**
554 * copy_from_user: - Copy a block of data from user space.
555 * @to: Destination address, in kernel space.
556 * @from: Source address, in user space.
557 * @n: Number of bytes to copy.
558 *
559 * Context: User context only. This function may sleep.
560 *
561 * Copy data from user space to kernel space.
562 *
563 * Returns number of bytes that could not be copied.
564 * On success, this will be zero.
565 *
566 * If some data could not be copied, this function will pad the copied
567 * data to the requested size using zero bytes.
568 */
569 unsigned long
570 copy_from_user(void *to, const void __user *from, unsigned long n)
571 {
572 might_sleep();
573 if (access_ok(VERIFY_READ, from, n))
574 n = __copy_from_user(to, from, n);
575 else
576 memset(to, 0, n);
577 return n;
578 }
579 EXPORT_SYMBOL(copy_from_user);