debuggers.hg

view xen/include/asm-x86/hvm/support.h @ 13651:fde9e1d474b7

hvm: Define a global I/O access bitmap, allowing direct access to port 0x80.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Jan 25 18:20:58 2007 +0000 (2007-01-25)
parents 56228886421d
children 99d36a153024
line source
1 /*
2 * support.h: HVM support routines used by VT-x and SVM.
3 *
4 * Leendert van Doorn, leendert@watson.ibm.com
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #ifndef __ASM_X86_HVM_SUPPORT_H__
22 #define __ASM_X86_HVM_SUPPORT_H__
24 #include <xen/sched.h>
25 #include <asm/types.h>
26 #include <asm/regs.h>
27 #include <asm/processor.h>
29 #ifndef NDEBUG
30 #define HVM_DEBUG 1
31 #else
32 #define HVM_DEBUG 1
33 #endif
35 static inline shared_iopage_t *get_sp(struct domain *d)
36 {
37 return (shared_iopage_t *) d->arch.hvm_domain.shared_page_va;
38 }
40 static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu)
41 {
42 return &get_sp(d)->vcpu_iodata[cpu];
43 }
45 /* XXX these are really VMX specific */
46 #define TYPE_MOV_TO_DR (0 << 4)
47 #define TYPE_MOV_FROM_DR (1 << 4)
48 #define TYPE_MOV_TO_CR (0 << 4)
49 #define TYPE_MOV_FROM_CR (1 << 4)
50 #define TYPE_CLTS (2 << 4)
51 #define TYPE_LMSW (3 << 4)
53 enum hval_bitmaps {
54 EXCEPTION_BITMAP_TABLE=0,
55 };
57 #define EXCEPTION_BITMAP_DE (1 << 0) /* Divide Error */
58 #define EXCEPTION_BITMAP_DB (1 << 1) /* Debug */
59 #define EXCEPTION_BITMAP_NMI (1 << 2) /* NMI */
60 #define EXCEPTION_BITMAP_BP (1 << 3) /* Breakpoint */
61 #define EXCEPTION_BITMAP_OF (1 << 4) /* Overflow */
62 #define EXCEPTION_BITMAP_BR (1 << 5) /* BOUND Range Exceeded */
63 #define EXCEPTION_BITMAP_UD (1 << 6) /* Invalid Opcode */
64 #define EXCEPTION_BITMAP_NM (1 << 7) /* Device Not Available */
65 #define EXCEPTION_BITMAP_DF (1 << 8) /* Double Fault */
66 /* reserved */
67 #define EXCEPTION_BITMAP_TS (1 << 10) /* Invalid TSS */
68 #define EXCEPTION_BITMAP_NP (1 << 11) /* Segment Not Present */
69 #define EXCEPTION_BITMAP_SS (1 << 12) /* Stack-Segment Fault */
70 #define EXCEPTION_BITMAP_GP (1 << 13) /* General Protection */
71 #define EXCEPTION_BITMAP_PG (1 << 14) /* Page Fault */
72 #define EXCEPTION_BITMAP_MF (1 << 16) /* x87 FPU Floating-Point Error (Math Fault) */
73 #define EXCEPTION_BITMAP_AC (1 << 17) /* Alignment Check */
74 #define EXCEPTION_BITMAP_MC (1 << 18) /* Machine Check */
75 #define EXCEPTION_BITMAP_XF (1 << 19) /* SIMD Floating-Point Exception */
77 /* Pending Debug exceptions */
78 #define PENDING_DEBUG_EXC_BP (1 << 12) /* break point */
79 #define PENDING_DEBUG_EXC_BS (1 << 14) /* Single step */
81 #ifdef XEN_DEBUGGER
82 #define MONITOR_DEFAULT_EXCEPTION_BITMAP \
83 ( EXCEPTION_BITMAP_PG | \
84 EXCEPTION_BITMAP_DB | \
85 EXCEPTION_BITMAP_BP | \
86 EXCEPTION_BITMAP_GP )
87 #else
88 #define MONITOR_DEFAULT_EXCEPTION_BITMAP \
89 ( EXCEPTION_BITMAP_PG | \
90 EXCEPTION_BITMAP_BP )
91 #endif
93 #define VMX_DELIVER_NO_ERROR_CODE -1
95 #if HVM_DEBUG
96 #define DBG_LEVEL_0 (1 << 0)
97 #define DBG_LEVEL_1 (1 << 1)
98 #define DBG_LEVEL_2 (1 << 2)
99 #define DBG_LEVEL_3 (1 << 3)
100 #define DBG_LEVEL_IO (1 << 4)
101 #define DBG_LEVEL_VMMU (1 << 5)
102 #define DBG_LEVEL_VLAPIC (1 << 6)
103 #define DBG_LEVEL_VLAPIC_TIMER (1 << 7)
104 #define DBG_LEVEL_VLAPIC_INTERRUPT (1 << 8)
105 #define DBG_LEVEL_IOAPIC (1 << 9)
107 extern unsigned int opt_hvm_debug_level;
108 #define HVM_DBG_LOG(level, _f, _a...) \
109 do { \
110 if ( unlikely((level) & opt_hvm_debug_level) ) \
111 printk("[HVM:%d.%d] <%s> " _f "\n", \
112 current->domain->domain_id, current->vcpu_id, __func__, \
113 ## _a); \
114 } while (0)
115 #else
116 #define HVM_DBG_LOG(level, _f, _a...)
117 #endif
119 #define TRACE_VMEXIT(index, value) \
120 current->arch.hvm_vcpu.hvm_trace_values[index] = (value)
122 /* save/restore support */
124 //#define HVM_DEBUG_SUSPEND
126 extern int hvm_register_savevm(struct domain *d,
127 const char *idstr,
128 int instance_id,
129 int version_id,
130 SaveStateHandler *save_state,
131 LoadStateHandler *load_state,
132 void *opaque);
134 static inline void hvm_ctxt_seek(hvm_domain_context_t *h, unsigned int pos)
135 {
136 h->cur = pos;
137 }
139 static inline uint32_t hvm_ctxt_tell(hvm_domain_context_t *h)
140 {
141 return h->cur;
142 }
144 static inline int hvm_ctxt_end(hvm_domain_context_t *h)
145 {
146 return (h->cur >= h->size || h->cur >= HVM_CTXT_SIZE);
147 }
149 static inline void hvm_put_byte(hvm_domain_context_t *h, unsigned int i)
150 {
151 if (h->cur >= HVM_CTXT_SIZE) {
152 h->cur++;
153 return;
154 }
155 h->data[h->cur++] = (char)i;
156 }
158 static inline void hvm_put_8u(hvm_domain_context_t *h, uint8_t b)
159 {
160 hvm_put_byte(h, b);
161 }
163 static inline void hvm_put_16u(hvm_domain_context_t *h, uint16_t b)
164 {
165 hvm_put_8u(h, b >> 8);
166 hvm_put_8u(h, b);
167 }
169 static inline void hvm_put_32u(hvm_domain_context_t *h, uint32_t b)
170 {
171 hvm_put_16u(h, b >> 16);
172 hvm_put_16u(h, b);
173 }
175 static inline void hvm_put_64u(hvm_domain_context_t *h, uint64_t b)
176 {
177 hvm_put_32u(h, b >> 32);
178 hvm_put_32u(h, b);
179 }
181 static inline void hvm_put_buffer(hvm_domain_context_t *h, const char *buf, int len)
182 {
183 memcpy(&h->data[h->cur], buf, len);
184 h->cur += len;
185 }
187 static inline char hvm_get_byte(hvm_domain_context_t *h)
188 {
189 if (h->cur >= HVM_CTXT_SIZE) {
190 printk("hvm_get_byte overflow.\n");
191 return -1;
192 }
194 if (h->cur >= h->size) {
195 printk("hvm_get_byte exceed data area.\n");
196 return -1;
197 }
199 return h->data[h->cur++];
200 }
202 static inline uint8_t hvm_get_8u(hvm_domain_context_t *h)
203 {
204 return hvm_get_byte(h);
205 }
207 static inline uint16_t hvm_get_16u(hvm_domain_context_t *h)
208 {
209 uint16_t v;
210 v = hvm_get_8u(h) << 8;
211 v |= hvm_get_8u(h);
213 return v;
214 }
216 static inline uint32_t hvm_get_32u(hvm_domain_context_t *h)
217 {
218 uint32_t v;
219 v = hvm_get_16u(h) << 16;
220 v |= hvm_get_16u(h);
222 return v;
223 }
225 static inline uint64_t hvm_get_64u(hvm_domain_context_t *h)
226 {
227 uint64_t v;
228 v = (uint64_t)hvm_get_32u(h) << 32;
229 v |= hvm_get_32u(h);
231 return v;
232 }
234 static inline void hvm_get_buffer(hvm_domain_context_t *h, char *buf, int len)
235 {
236 memcpy(buf, &h->data[h->cur], len);
237 h->cur += len;
238 }
240 #define hvm_put_struct(_h, _p) \
241 hvm_put_buffer((_h), (char *)(_p), sizeof(*(_p)))
242 #define hvm_get_struct(_h, _p) \
243 hvm_get_buffer((_h), (char *)(_p), sizeof(*(_p)))
245 int hvm_save(struct vcpu*, hvm_domain_context_t *h);
246 int hvm_load(struct vcpu*, hvm_domain_context_t *h);
248 int arch_sethvm_ctxt(struct vcpu *v, struct hvm_domain_context *c);
249 int arch_gethvm_ctxt(struct vcpu *v, struct hvm_domain_context *c);
251 void shpage_init(struct domain *d, shared_iopage_t *sp);
253 extern char hvm_io_bitmap[];
254 extern int hvm_enabled;
256 void hvm_enable(void);
258 int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size);
259 int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size);
260 int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size);
261 int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size);
263 void hvm_print_line(struct vcpu *v, const char c);
264 void hlt_timer_fn(void *data);
266 void hvm_do_hypercall(struct cpu_user_regs *pregs);
268 void hvm_hlt(unsigned long rflags);
269 void hvm_triple_fault(void);
271 #endif /* __ASM_X86_HVM_SUPPORT_H__ */