debuggers.hg

view xen/include/asm-x86/x86_64/uaccess.h @ 3726:88957a238191

bitkeeper revision 1.1159.1.544 (4207248crq3YxiyLWjUehtHv_Yd3tg)

Merge tempest.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xeno.bk
into tempest.cl.cam.ac.uk:/local/scratch/smh22/xen-unstable.bk
author smh22@tempest.cl.cam.ac.uk
date Mon Feb 07 08:19:24 2005 +0000 (2005-02-07)
parents dda5ab69e74a 7db5b671b347
children f5f2757b3aa2
line source
1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
4 /*
5 * User space memory access functions
6 */
7 #include <xen/config.h>
8 #include <xen/compiler.h>
9 #include <xen/errno.h>
10 #include <xen/sched.h>
11 #include <xen/prefetch.h>
12 #include <asm/page.h>
14 #define __user
16 #define VERIFY_READ 0
17 #define VERIFY_WRITE 1
19 #define __addr_ok(addr) ((unsigned long)(addr) < HYPERVISOR_VIRT_START)
21 /*
22 * Test whether a block of memory is a valid user space address.
23 * Returns 0 if the range is valid, nonzero otherwise.
24 *
25 * This is equivalent to the following test:
26 * ((u65)addr >= (u65)HYPERVISOR_VIRT_END) ?
27 * (((u65)addr + (u65)size) >= ((u65)1 << 64)) :
28 * (((u65)addr + (u65)size) >= ((u65)HYPERVISOR_VIRT_START))
29 */
30 #define __range_not_ok(addr,size) ({ \
31 unsigned long flag,sum; \
32 if ((unsigned long)addr >= HYPERVISOR_VIRT_END) \
33 asm("addq %3,%1 ; sbbq %0,%0" \
34 :"=&r" (flag), "=r" (sum) \
35 :"1" (addr),"g" ((long)(size))); \
36 else \
37 asm("addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \
38 :"=&r" (flag), "=r" (sum) \
39 :"1" (addr),"g" ((long)(size)),"r" (HYPERVISOR_VIRT_START)); \
40 flag; })
42 #define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0)
44 #define array_access_ok(type,addr,count,size) \
45 (likely(sizeof(count) <= 4) /* disallow 64-bit counts */ && \
46 access_ok(type,addr,(unsigned long)count*(unsigned long)size))
48 extern long __get_user_bad(void);
49 extern void __put_user_bad(void);
51 /**
52 * get_user: - Get a simple variable from user space.
53 * @x: Variable to store result.
54 * @ptr: Source address, in user space.
55 *
56 * Context: User context only. This function may sleep.
57 *
58 * This macro copies a single simple variable from user space to kernel
59 * space. It supports simple types like char and int, but not larger
60 * data types like structures or arrays.
61 *
62 * @ptr must have pointer-to-simple-variable type, and the result of
63 * dereferencing @ptr must be assignable to @x without a cast.
64 *
65 * Returns zero on success, or -EFAULT on error.
66 * On error, the variable @x is set to zero.
67 */
68 #define get_user(x,ptr) \
69 __get_user_check((x),(ptr),sizeof(*(ptr)))
71 /**
72 * put_user: - Write a simple value into user space.
73 * @x: Value to copy to user space.
74 * @ptr: Destination address, in user space.
75 *
76 * Context: User context only. This function may sleep.
77 *
78 * This macro copies a single simple value from kernel space to user
79 * space. It supports simple types like char and int, but not larger
80 * data types like structures or arrays.
81 *
82 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
83 * to the result of dereferencing @ptr.
84 *
85 * Returns zero on success, or -EFAULT on error.
86 */
87 #define put_user(x,ptr) \
88 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
91 /**
92 * __get_user: - Get a simple variable from user space, with less checking.
93 * @x: Variable to store result.
94 * @ptr: Source address, in user space.
95 *
96 * Context: User context only. This function may sleep.
97 *
98 * This macro copies a single simple variable from user space to kernel
99 * space. It supports simple types like char and int, but not larger
100 * data types like structures or arrays.
101 *
102 * @ptr must have pointer-to-simple-variable type, and the result of
103 * dereferencing @ptr must be assignable to @x without a cast.
104 *
105 * Caller must check the pointer with access_ok() before calling this
106 * function.
107 *
108 * Returns zero on success, or -EFAULT on error.
109 * On error, the variable @x is set to zero.
110 */
111 #define __get_user(x,ptr) \
112 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
115 /**
116 * __put_user: - Write a simple value into user space, with less checking.
117 * @x: Value to copy to user space.
118 * @ptr: Destination address, in user space.
119 *
120 * Context: User context only. This function may sleep.
121 *
122 * This macro copies a single simple value from kernel space to user
123 * space. It supports simple types like char and int, but not larger
124 * data types like structures or arrays.
125 *
126 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
127 * to the result of dereferencing @ptr.
128 *
129 * Caller must check the pointer with access_ok() before calling this
130 * function.
131 *
132 * Returns zero on success, or -EFAULT on error.
133 */
134 #define __put_user(x,ptr) \
135 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
137 #define __put_user_nocheck(x,ptr,size) \
138 ({ \
139 long __pu_err; \
140 __put_user_size((x),(ptr),(size),__pu_err,-EFAULT); \
141 __pu_err; \
142 })
144 #define __put_user_check(x,ptr,size) \
145 ({ \
146 long __pu_err = -EFAULT; \
147 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
148 if (__addr_ok(__pu_addr)) \
149 __put_user_size((x),__pu_addr,(size),__pu_err,-EFAULT); \
150 __pu_err; \
151 })
153 #define __put_user_size(x,ptr,size,retval,errret) \
154 do { \
155 retval = 0; \
156 switch (size) { \
157 case 1: __put_user_asm(x,ptr,retval,"b","b","iq",errret);break; \
158 case 2: __put_user_asm(x,ptr,retval,"w","w","ir",errret);break; \
159 case 4: __put_user_asm(x,ptr,retval,"l","k","ir",errret);break; \
160 case 8: __put_user_asm(x,ptr,retval,"q","","ir",errret);break; \
161 default: __put_user_bad(); \
162 } \
163 } while (0)
165 struct __large_struct { unsigned long buf[100]; };
166 #define __m(x) (*(struct __large_struct *)(x))
168 /*
169 * Tell gcc we read from memory instead of writing: this is because
170 * we do not write to any memory gcc knows about, so there are no
171 * aliasing issues.
172 */
173 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
174 __asm__ __volatile__( \
175 "1: mov"itype" %"rtype"1,%2\n" \
176 "2:\n" \
177 ".section .fixup,\"ax\"\n" \
178 "3: mov %3,%0\n" \
179 " jmp 2b\n" \
180 ".previous\n" \
181 ".section __ex_table,\"a\"\n" \
182 " .align 8\n" \
183 " .quad 1b,3b\n" \
184 ".previous" \
185 : "=r"(err) \
186 : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err))
188 #define __get_user_nocheck(x,ptr,size) \
189 ({ \
190 long __gu_err, __gu_val; \
191 __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\
192 (x) = (__typeof__(*(ptr)))__gu_val; \
193 __gu_err; \
194 })
196 #define __get_user_check(x,ptr,size) \
197 ({ \
198 long __gu_err, __gu_val; \
199 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
200 __get_user_size(__gu_val,__gu_addr,(size),__gu_err,-EFAULT); \
201 (x) = (__typeof__(*(ptr)))__gu_val; \
202 if (!__addr_ok(__gu_addr)) __gu_err = -EFAULT; \
203 __gu_err; \
204 })
206 #define __get_user_size(x,ptr,size,retval,errret) \
207 do { \
208 retval = 0; \
209 switch (size) { \
210 case 1: __get_user_asm(x,ptr,retval,"b","b","=q",errret);break; \
211 case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break; \
212 case 4: __get_user_asm(x,ptr,retval,"l","k","=r",errret);break; \
213 case 8: __get_user_asm(x,ptr,retval,"q","","=r",errret); break; \
214 default: (x) = __get_user_bad(); \
215 } \
216 } while (0)
218 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
219 __asm__ __volatile__( \
220 "1: mov"itype" %2,%"rtype"1\n" \
221 "2:\n" \
222 ".section .fixup,\"ax\"\n" \
223 "3: mov %3,%0\n" \
224 " xor"itype" %"rtype"1,%"rtype"1\n" \
225 " jmp 2b\n" \
226 ".previous\n" \
227 ".section __ex_table,\"a\"\n" \
228 " .align 8\n" \
229 " .quad 1b,3b\n" \
230 ".previous" \
231 : "=r"(err), ltype (x) \
232 : "m"(__m(addr)), "i"(errret), "0"(err))
235 /*
236 * Copy To/From Userspace
237 */
239 /* Handles exceptions in both to and from, but doesn't do access_ok */
240 unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned n);
241 unsigned long __copy_from_user_ll(void *to, const void __user *from, unsigned n);
243 unsigned long copy_to_user(void __user *to, const void *from, unsigned len);
244 unsigned long copy_from_user(void *to, const void __user *from, unsigned len);
246 static always_inline int __copy_from_user(void *dst, const void __user *src, unsigned size)
247 {
248 int ret = 0;
249 if (!__builtin_constant_p(size))
250 return __copy_from_user_ll(dst,(void *)src,size);
251 switch (size) {
252 case 1:__get_user_asm(*(u8*)dst,(u8 __user *)src,ret,"b","b","=q",1);
253 return ret;
254 case 2:__get_user_asm(*(u16*)dst,(u16 __user *)src,ret,"w","w","=r",2);
255 return ret;
256 case 4:__get_user_asm(*(u32*)dst,(u32 __user *)src,ret,"l","k","=r",4);
257 return ret;
258 case 8:__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",8);
259 return ret;
260 case 10:
261 __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
262 if (unlikely(ret)) return ret;
263 __get_user_asm(*(u16*)(8+(char*)dst),(u16 __user *)(8+(char __user *)src),ret,"w","w","=r",2);
264 return ret;
265 case 16:
266 __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
267 if (unlikely(ret)) return ret;
268 __get_user_asm(*(u64*)(8+(char*)dst),(u64 __user *)(8+(char __user *)src),ret,"q","","=r",8);
269 return ret;
270 default:
271 return __copy_from_user_ll(dst,(void *)src,size);
272 }
273 }
275 static always_inline int __copy_to_user(void __user *dst, const void *src, unsigned size)
276 {
277 int ret = 0;
278 if (!__builtin_constant_p(size))
279 return __copy_to_user_ll((void *)dst,src,size);
280 switch (size) {
281 case 1:__put_user_asm(*(u8*)src,(u8 __user *)dst,ret,"b","b","iq",1);
282 return ret;
283 case 2:__put_user_asm(*(u16*)src,(u16 __user *)dst,ret,"w","w","ir",2);
284 return ret;
285 case 4:__put_user_asm(*(u32*)src,(u32 __user *)dst,ret,"l","k","ir",4);
286 return ret;
287 case 8:__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",8);
288 return ret;
289 case 10:
290 __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",10);
291 if (unlikely(ret)) return ret;
292 asm("":::"memory");
293 __put_user_asm(4[(u16*)src],4+(u16 __user *)dst,ret,"w","w","ir",2);
294 return ret;
295 case 16:
296 __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",16);
297 if (unlikely(ret)) return ret;
298 asm("":::"memory");
299 __put_user_asm(1[(u64*)src],1+(u64 __user *)dst,ret,"q","","ir",8);
300 return ret;
301 default:
302 return __copy_to_user_ll((void *)dst,src,size);
303 }
304 }
306 unsigned long clear_user(void __user *mem, unsigned long len);
307 unsigned long __clear_user(void __user *mem, unsigned long len);
309 #endif /* __X86_64_UACCESS_H */