Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/include/asm/x86_64/uaccess.h
Line
Count
Source (jump to first uncovered line)
1
#ifndef __X86_64_UACCESS_H
2
#define __X86_64_UACCESS_H
3
4
0
#define COMPAT_ARG_XLAT_VIRT_BASE ((void *)ARG_XLAT_START(current))
5
0
#define COMPAT_ARG_XLAT_SIZE      (2*PAGE_SIZE)
6
struct vcpu;
7
int setup_compat_arg_xlat(struct vcpu *v);
8
void free_compat_arg_xlat(struct vcpu *v);
9
0
#define is_compat_arg_xlat_range(addr, size) ({                               \
10
0
    unsigned long __off;                                                      \
11
0
    __off = (unsigned long)(addr) - (unsigned long)COMPAT_ARG_XLAT_VIRT_BASE; \
12
0
    (__off < COMPAT_ARG_XLAT_SIZE) &&                                         \
13
0
    ((__off + (unsigned long)(size)) <= COMPAT_ARG_XLAT_SIZE);                \
14
0
})
15
16
0
#define xlat_page_start ((unsigned long)COMPAT_ARG_XLAT_VIRT_BASE)
17
#define xlat_page_size  COMPAT_ARG_XLAT_SIZE
18
#define xlat_page_left_size(xlat_page_current) \
19
    (xlat_page_start + xlat_page_size - xlat_page_current)
20
21
0
#define xlat_malloc_init(xlat_page_current)    do { \
22
0
    xlat_page_current = xlat_page_start; \
23
0
} while (0)
24
25
extern void *xlat_malloc(unsigned long *xlat_page_current, size_t size);
26
27
0
#define xlat_malloc_array(_p, _t, _c) ((_t *) xlat_malloc(&_p, sizeof(_t) * _c))
28
29
/*
30
 * Valid if in +ve half of 48-bit address space, or above Xen-reserved area.
31
 * This is also valid for range checks (addr, addr+size). As long as the
32
 * start address is outside the Xen-reserved area, sequential accesses
33
 * (starting at addr) will hit a non-canonical address (and thus fault)
34
 * before ever reaching VIRT_START.
35
 */
36
#define __addr_ok(addr) \
37
0
    (((unsigned long)(addr) < (1UL<<47)) || \
38
0
     ((unsigned long)(addr) >= HYPERVISOR_VIRT_END))
39
40
#define access_ok(addr, size) \
41
0
    (__addr_ok(addr) || is_compat_arg_xlat_range(addr, size))
42
43
#define array_access_ok(addr, count, size) \
44
0
    (likely(((count) ?: 0UL) < (~0UL / (size))) && \
45
0
     access_ok(addr, (count) * (size)))
46
47
#define __compat_addr_ok(d, addr) \
48
0
    ((unsigned long)(addr) < HYPERVISOR_COMPAT_VIRT_START(d))
49
50
#define __compat_access_ok(d, addr, size) \
51
0
    __compat_addr_ok(d, (unsigned long)(addr) + ((size) ? (size) - 1 : 0))
52
53
#define compat_access_ok(addr, size) \
54
0
    __compat_access_ok(current->domain, addr, size)
55
56
#define compat_array_access_ok(addr,count,size) \
57
0
    (likely((count) < (~0U / (size))) && \
58
0
     compat_access_ok(addr, 0 + (count) * (size)))
59
60
0
#define __put_user_size(x,ptr,size,retval,errret)     \
61
0
do {                 \
62
0
  retval = 0;             \
63
0
  switch (size) {             \
64
0
  case 1: __put_user_asm(x,ptr,retval,"b","b","iq",errret);break; \
65
0
  case 2: __put_user_asm(x,ptr,retval,"w","w","ir",errret);break; \
66
0
  case 4: __put_user_asm(x,ptr,retval,"l","k","ir",errret);break; \
67
0
  case 8: __put_user_asm(x,ptr,retval,"q","","ir",errret);break; \
68
0
  default: __put_user_bad();         \
69
0
  }               \
70
0
} while (0)
71
72
0
#define __get_user_size(x,ptr,size,retval,errret)     \
73
0
do {                 \
74
0
  retval = 0;             \
75
0
  switch (size) {             \
76
0
  case 1: __get_user_asm(x,ptr,retval,"b","b","=q",errret);break; \
77
0
  case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break; \
78
0
  case 4: __get_user_asm(x,ptr,retval,"l","k","=r",errret);break; \
79
0
  case 8: __get_user_asm(x,ptr,retval,"q","","=r",errret); break; \
80
0
  default: __get_user_bad();         \
81
0
  }               \
82
0
} while (0)
83
84
#endif /* __X86_64_UACCESS_H */