debuggers.hg

view xen/include/asm-x86/x86_64/uaccess.h @ 19806:67a0ffade665

x86: improve output resulting from sending '0' over serial

While the original logic already implied that the kernel part of the
guest's address space is identical on all vCPU-s (i.e. for all guest
processes), it didn't fully leverage the potential here: As long as
the top page table currently active is owned by the subject domain
(currently only Dom0), the stack dump can be done without extra
effort.

For x86-64, additionally add page table traversal so that the stack
can be dumped in all cases (unless it's invalid or user space).

I left the 32-bit variant of do_page_walk() unimplemented for the
moment as I couldn't convince myself using map_domain_page() there is
a good idea, and didn't want to introduce new fixmap entries either.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jun 16 13:57:18 2009 +0100 (2009-06-16)
parents 08fb9a4489f7
children bcee82a0e9d6
line source
1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
4 #define COMPAT_ARG_XLAT_VIRT_BASE this_cpu(compat_arg_xlat)
5 #define COMPAT_ARG_XLAT_SIZE PAGE_SIZE
6 DECLARE_PER_CPU(char, compat_arg_xlat[COMPAT_ARG_XLAT_SIZE]);
7 #define is_compat_arg_xlat_range(addr, size) ({ \
8 unsigned long __off; \
9 __off = (unsigned long)(addr) - (unsigned long)COMPAT_ARG_XLAT_VIRT_BASE; \
10 (__off <= COMPAT_ARG_XLAT_SIZE) && \
11 ((__off + (unsigned long)(size)) <= COMPAT_ARG_XLAT_SIZE); \
12 })
14 /*
15 * Valid if in +ve half of 48-bit address space, or above Xen-reserved area.
16 * This is also valid for range checks (addr, addr+size). As long as the
17 * start address is outside the Xen-reserved area then we will access a
18 * non-canonical address (and thus fault) before ever reaching VIRT_START.
19 */
20 #define __addr_ok(addr) \
21 (((unsigned long)(addr) < (1UL<<48)) || \
22 ((unsigned long)(addr) >= HYPERVISOR_VIRT_END))
24 #define access_ok(addr, size) \
25 (__addr_ok(addr) || is_compat_arg_xlat_range(addr, size))
27 #define array_access_ok(addr, count, size) \
28 (access_ok(addr, (count)*(size)))
30 #define __compat_addr_ok(d, addr) \
31 ((unsigned long)(addr) < HYPERVISOR_COMPAT_VIRT_START(d))
33 #define __compat_access_ok(d, addr, size) \
34 __compat_addr_ok(d, (unsigned long)(addr) + ((size) ? (size) - 1 : 0))
36 #define compat_access_ok(addr, size) \
37 __compat_access_ok(current->domain, addr, size)
39 #define compat_array_access_ok(addr,count,size) \
40 (likely((count) < (~0U / (size))) && \
41 compat_access_ok(addr, (count) * (size)))
43 #define __put_user_size(x,ptr,size,retval,errret) \
44 do { \
45 retval = 0; \
46 switch (size) { \
47 case 1: __put_user_asm(x,ptr,retval,"b","b","iq",errret);break; \
48 case 2: __put_user_asm(x,ptr,retval,"w","w","ir",errret);break; \
49 case 4: __put_user_asm(x,ptr,retval,"l","k","ir",errret);break; \
50 case 8: __put_user_asm(x,ptr,retval,"q","","ir",errret);break; \
51 default: __put_user_bad(); \
52 } \
53 } while (0)
55 #define __get_user_size(x,ptr,size,retval,errret) \
56 do { \
57 retval = 0; \
58 switch (size) { \
59 case 1: __get_user_asm(x,ptr,retval,"b","b","=q",errret);break; \
60 case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break; \
61 case 4: __get_user_asm(x,ptr,retval,"l","k","=r",errret);break; \
62 case 8: __get_user_asm(x,ptr,retval,"q","","=r",errret); break; \
63 default: __get_user_bad(); \
64 } \
65 } while (0)
67 #endif /* __X86_64_UACCESS_H */