debuggers.hg

view xen/include/asm-x86/x86_32/uaccess.h @ 3746:9e80fc0dcac5

bitkeeper revision 1.1159.212.121 (42081031Gcfd1G5fgexBl7vd4XfmLQ)

Use 1:1 pagetables used for guest physical mode emulation for doing
phys_to_machine_mapping as well.

Signed-off-by: Arun Sharma <arun.sharma@intel.com>
Signed-off-by: ian@xensource.com
author iap10@labyrinth.cl.cam.ac.uk
date Tue Feb 08 01:04:49 2005 +0000 (2005-02-08)
parents 7db5b671b347
children f5f2757b3aa2
line source
1 #ifndef __i386_UACCESS_H
2 #define __i386_UACCESS_H
4 /*
5 * User space memory access functions
6 */
7 #include <xen/config.h>
8 #include <xen/errno.h>
9 #include <xen/prefetch.h>
10 #include <xen/string.h>
12 #define __user
14 #define VERIFY_READ 0
15 #define VERIFY_WRITE 1
17 /*
18 * movsl can be slow when source and dest are not both 8-byte aligned
19 */
20 #ifdef CONFIG_X86_INTEL_USERCOPY
21 extern struct movsl_mask {
22 int mask;
23 } __cacheline_aligned movsl_mask;
24 #endif
26 #define __addr_ok(addr) ((unsigned long)(addr) < HYPERVISOR_VIRT_START)
28 /*
29 * Test whether a block of memory is a valid user space address.
30 * Returns 0 if the range is valid, nonzero otherwise.
31 *
32 * This is equivalent to the following test:
33 * (u33)addr + (u33)size >= (u33)HYPERVISOR_VIRT_START
34 */
35 #define __range_not_ok(addr,size) ({ \
36 unsigned long flag,sum; \
37 asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
38 :"=&r" (flag), "=r" (sum) \
39 :"1" (addr),"g" ((int)(size)),"r" (HYPERVISOR_VIRT_START)); \
40 flag; })
42 #define access_ok(type,addr,size) (likely(__range_not_ok(addr,size) == 0))
44 #define array_access_ok(type,addr,count,size) \
45 (likely(count < (~0UL/size)) && access_ok(type,addr,count*size))
47 extern long __get_user_bad(void);
48 extern void __put_user_bad(void);
50 /**
51 * get_user: - Get a simple variable from user space.
52 * @x: Variable to store result.
53 * @ptr: Source address, in user space.
54 *
55 * Context: User context only. This function may sleep.
56 *
57 * This macro copies a single simple variable from user space to kernel
58 * space. It supports simple types like char and int, but not larger
59 * data types like structures or arrays.
60 *
61 * @ptr must have pointer-to-simple-variable type, and the result of
62 * dereferencing @ptr must be assignable to @x without a cast.
63 *
64 * Returns zero on success, or -EFAULT on error.
65 * On error, the variable @x is set to zero.
66 */
67 #define get_user(x,ptr) \
68 __get_user_check((x),(ptr),sizeof(*(ptr)))
70 /**
71 * put_user: - Write a simple value into user space.
72 * @x: Value to copy to user space.
73 * @ptr: Destination address, in user space.
74 *
75 * Context: User context only. This function may sleep.
76 *
77 * This macro copies a single simple value from kernel space to user
78 * space. It supports simple types like char and int, but not larger
79 * data types like structures or arrays.
80 *
81 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
82 * to the result of dereferencing @ptr.
83 *
84 * Returns zero on success, or -EFAULT on error.
85 */
86 #define put_user(x,ptr) \
87 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
90 /**
91 * __get_user: - Get a simple variable from user space, with less checking.
92 * @x: Variable to store result.
93 * @ptr: Source address, in user space.
94 *
95 * Context: User context only. This function may sleep.
96 *
97 * This macro copies a single simple variable from user space to kernel
98 * space. It supports simple types like char and int, but not larger
99 * data types like structures or arrays.
100 *
101 * @ptr must have pointer-to-simple-variable type, and the result of
102 * dereferencing @ptr must be assignable to @x without a cast.
103 *
104 * Caller must check the pointer with access_ok() before calling this
105 * function.
106 *
107 * Returns zero on success, or -EFAULT on error.
108 * On error, the variable @x is set to zero.
109 */
110 #define __get_user(x,ptr) \
111 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
114 /**
115 * __put_user: - Write a simple value into user space, with less checking.
116 * @x: Value to copy to user space.
117 * @ptr: Destination address, in user space.
118 *
119 * Context: User context only. This function may sleep.
120 *
121 * This macro copies a single simple value from kernel space to user
122 * space. It supports simple types like char and int, but not larger
123 * data types like structures or arrays.
124 *
125 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
126 * to the result of dereferencing @ptr.
127 *
128 * Caller must check the pointer with access_ok() before calling this
129 * function.
130 *
131 * Returns zero on success, or -EFAULT on error.
132 */
133 #define __put_user(x,ptr) \
134 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
136 #define __put_user_nocheck(x,ptr,size) \
137 ({ \
138 long __pu_err; \
139 __put_user_size((x),(ptr),(size),__pu_err,-EFAULT); \
140 __pu_err; \
141 })
143 #define __put_user_check(x,ptr,size) \
144 ({ \
145 long __pu_err = -EFAULT; \
146 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
147 if (__addr_ok(__pu_addr)) \
148 __put_user_size((x),__pu_addr,(size),__pu_err,-EFAULT); \
149 __pu_err; \
150 })
152 #define __put_user_u64(x, addr, err) \
153 __asm__ __volatile__( \
154 "1: movl %%eax,0(%2)\n" \
155 "2: movl %%edx,4(%2)\n" \
156 "3:\n" \
157 ".section .fixup,\"ax\"\n" \
158 "4: movl %3,%0\n" \
159 " jmp 3b\n" \
160 ".previous\n" \
161 ".section __ex_table,\"a\"\n" \
162 " .align 4\n" \
163 " .long 1b,4b\n" \
164 " .long 2b,4b\n" \
165 ".previous" \
166 : "=r"(err) \
167 : "A" (x), "r" (addr), "i"(-EFAULT), "0"(err))
169 #ifdef CONFIG_X86_WP_WORKS_OK
171 #define __put_user_size(x,ptr,size,retval,errret) \
172 do { \
173 retval = 0; \
174 switch (size) { \
175 case 1: __put_user_asm(x,ptr,retval,"b","b","iq",errret);break; \
176 case 2: __put_user_asm(x,ptr,retval,"w","w","ir",errret);break; \
177 case 4: __put_user_asm(x,ptr,retval,"l","","ir",errret); break; \
178 case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\
179 default: __put_user_bad(); \
180 } \
181 } while (0)
183 #else
185 #define __put_user_size(x,ptr,size,retval,errret) \
186 do { \
187 __typeof__(*(ptr)) __pus_tmp = x; \
188 retval = 0; \
189 \
190 if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
191 retval = errret; \
192 } while (0)
194 #endif
195 struct __large_struct { unsigned long buf[100]; };
196 #define __m(x) (*(struct __large_struct *)(x))
198 /*
199 * Tell gcc we read from memory instead of writing: this is because
200 * we do not write to any memory gcc knows about, so there are no
201 * aliasing issues.
202 */
203 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
204 __asm__ __volatile__( \
205 "1: mov"itype" %"rtype"1,%2\n" \
206 "2:\n" \
207 ".section .fixup,\"ax\"\n" \
208 "3: movl %3,%0\n" \
209 " jmp 2b\n" \
210 ".previous\n" \
211 ".section __ex_table,\"a\"\n" \
212 " .align 4\n" \
213 " .long 1b,3b\n" \
214 ".previous" \
215 : "=r"(err) \
216 : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err))
219 #define __get_user_nocheck(x,ptr,size) \
220 ({ \
221 long __gu_err, __gu_val; \
222 __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\
223 (x) = (__typeof__(*(ptr)))__gu_val; \
224 __gu_err; \
225 })
227 #define __get_user_check(x,ptr,size) \
228 ({ \
229 long __gu_err, __gu_val; \
230 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
231 __get_user_size(__gu_val,__gu_addr,(size),__gu_err,-EFAULT); \
232 (x) = (__typeof__(*(ptr)))__gu_val; \
233 if (!__addr_ok(__gu_addr)) __gu_err = -EFAULT; \
234 __gu_err; \
235 })
237 #define __get_user_size(x,ptr,size,retval,errret) \
238 do { \
239 retval = 0; \
240 switch (size) { \
241 case 1: __get_user_asm(x,ptr,retval,"b","b","=q",errret);break; \
242 case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break; \
243 case 4: __get_user_asm(x,ptr,retval,"l","","=r",errret);break; \
244 default: (x) = __get_user_bad(); \
245 } \
246 } while (0)
248 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
249 __asm__ __volatile__( \
250 "1: mov"itype" %2,%"rtype"1\n" \
251 "2:\n" \
252 ".section .fixup,\"ax\"\n" \
253 "3: movl %3,%0\n" \
254 " xor"itype" %"rtype"1,%"rtype"1\n" \
255 " jmp 2b\n" \
256 ".previous\n" \
257 ".section __ex_table,\"a\"\n" \
258 " .align 4\n" \
259 " .long 1b,3b\n" \
260 ".previous" \
261 : "=r"(err), ltype (x) \
262 : "m"(__m(addr)), "i"(errret), "0"(err))
265 unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n);
266 unsigned long __copy_from_user_ll(void *to, const void __user *from, unsigned long n);
268 /*
269 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
270 * we return the initial request size (1, 2 or 4), as copy_*_user should do.
271 * If a store crosses a page boundary and gets a fault, the x86 will not write
272 * anything, so this is accurate.
273 */
275 /**
276 * __copy_to_user: - Copy a block of data into user space, with less checking.
277 * @to: Destination address, in user space.
278 * @from: Source address, in kernel space.
279 * @n: Number of bytes to copy.
280 *
281 * Context: User context only. This function may sleep.
282 *
283 * Copy data from kernel space to user space. Caller must check
284 * the specified block with access_ok() before calling this function.
285 *
286 * Returns number of bytes that could not be copied.
287 * On success, this will be zero.
288 */
289 static always_inline unsigned long
290 __copy_to_user(void __user *to, const void *from, unsigned long n)
291 {
292 if (__builtin_constant_p(n)) {
293 unsigned long ret;
295 switch (n) {
296 case 1:
297 __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1);
298 return ret;
299 case 2:
300 __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2);
301 return ret;
302 case 4:
303 __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4);
304 return ret;
305 }
306 }
307 return __copy_to_user_ll(to, from, n);
308 }
310 /**
311 * __copy_from_user: - Copy a block of data from user space, with less checking.
312 * @to: Destination address, in kernel space.
313 * @from: Source address, in user space.
314 * @n: Number of bytes to copy.
315 *
316 * Context: User context only. This function may sleep.
317 *
318 * Copy data from user space to kernel space. Caller must check
319 * the specified block with access_ok() before calling this function.
320 *
321 * Returns number of bytes that could not be copied.
322 * On success, this will be zero.
323 *
324 * If some data could not be copied, this function will pad the copied
325 * data to the requested size using zero bytes.
326 */
327 static always_inline unsigned long
328 __copy_from_user(void *to, const void __user *from, unsigned long n)
329 {
330 if (__builtin_constant_p(n)) {
331 unsigned long ret;
333 switch (n) {
334 case 1:
335 __get_user_size(*(u8 *)to, from, 1, ret, 1);
336 return ret;
337 case 2:
338 __get_user_size(*(u16 *)to, from, 2, ret, 2);
339 return ret;
340 case 4:
341 __get_user_size(*(u32 *)to, from, 4, ret, 4);
342 return ret;
343 }
344 }
345 return __copy_from_user_ll(to, from, n);
346 }
348 unsigned long copy_to_user(void __user *to, const void *from, unsigned long n);
349 unsigned long copy_from_user(void *to,
350 const void __user *from, unsigned long n);
352 unsigned long clear_user(void __user *mem, unsigned long len);
353 unsigned long __clear_user(void __user *mem, unsigned long len);
355 #endif /* __i386_UACCESS_H */