debuggers.hg

view tools/ioemu/target-i386-dm/exec-dm.c @ 13688:d1710eb35385

[HVM] Allow HVM guest to request invalidation of foreign mappings via
an I/O port write.
Signed-off-by: Dexuan Cui <dexuan.cui@intel.com>
author kaf24@localhost.localdomain
date Sat Jan 27 13:32:27 2007 +0000 (2007-01-27)
parents bb622907eb89
children 8e76e1b95b12
line source
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include <xen/hvm/e820.h>
37 #include "cpu.h"
38 #include "exec-all.h"
39 #include "vl.h"
41 //#define DEBUG_TB_INVALIDATE
42 //#define DEBUG_FLUSH
43 //#define DEBUG_TLB
45 /* make various TB consistency checks */
46 //#define DEBUG_TB_CHECK
47 //#define DEBUG_TLB_CHECK
49 #ifndef CONFIG_DM
50 /* threshold to flush the translated code buffer */
51 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
53 #define SMC_BITMAP_USE_THRESHOLD 10
55 #define MMAP_AREA_START 0x00000000
56 #define MMAP_AREA_END 0xa8000000
58 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
59 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
60 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
61 int nb_tbs;
62 /* any access to the tbs or the page table must use this lock */
63 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
65 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
66 uint8_t *code_gen_ptr;
67 #endif /* !CONFIG_DM */
69 uint64_t phys_ram_size;
70 extern uint64_t ram_size;
71 int phys_ram_fd;
72 uint8_t *phys_ram_base;
73 uint8_t *phys_ram_dirty;
75 CPUState *first_cpu;
76 /* current CPU in the current thread. It is only valid inside
77 cpu_exec() */
78 CPUState *cpu_single_env;
80 typedef struct PageDesc {
81 /* list of TBs intersecting this ram page */
82 TranslationBlock *first_tb;
83 /* in order to optimize self modifying code, we count the number
84 of lookups we do to a given page to use a bitmap */
85 unsigned int code_write_count;
86 uint8_t *code_bitmap;
87 #if defined(CONFIG_USER_ONLY)
88 unsigned long flags;
89 #endif
90 } PageDesc;
92 typedef struct PhysPageDesc {
93 /* offset in host memory of the page + io_index in the low 12 bits */
94 unsigned long phys_offset;
95 } PhysPageDesc;
97 typedef struct VirtPageDesc {
98 /* physical address of code page. It is valid only if 'valid_tag'
99 matches 'virt_valid_tag' */
100 target_ulong phys_addr;
101 unsigned int valid_tag;
102 #if !defined(CONFIG_SOFTMMU)
103 /* original page access rights. It is valid only if 'valid_tag'
104 matches 'virt_valid_tag' */
105 unsigned int prot;
106 #endif
107 } VirtPageDesc;
109 #define L2_BITS 10
110 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
112 #define L1_SIZE (1 << L1_BITS)
113 #define L2_SIZE (1 << L2_BITS)
115 unsigned long qemu_real_host_page_size;
116 unsigned long qemu_host_page_bits;
117 unsigned long qemu_host_page_size;
118 unsigned long qemu_host_page_mask;
120 /* io memory support */
121 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
122 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
123 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
124 static int io_mem_nb = 1;
126 /* log support */
127 char *logfilename = "/tmp/qemu.log";
128 FILE *logfile;
129 int loglevel;
132 #ifdef MAPCACHE
133 pthread_mutex_t mapcache_mutex;
134 #endif
137 void cpu_exec_init(CPUState *env)
138 {
139 CPUState **penv;
140 int cpu_index;
141 #ifdef MAPCACHE
142 pthread_mutexattr_t mxattr;
143 #endif
145 env->next_cpu = NULL;
146 penv = &first_cpu;
147 cpu_index = 0;
148 while (*penv != NULL) {
149 penv = (CPUState **)&(*penv)->next_cpu;
150 cpu_index++;
151 }
152 env->cpu_index = cpu_index;
153 *penv = env;
155 /* alloc dirty bits array */
156 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
158 #ifdef MAPCACHE
159 /* setup memory access mutex to protect mapcache */
160 pthread_mutexattr_init(&mxattr);
161 pthread_mutexattr_settype(&mxattr, PTHREAD_MUTEX_RECURSIVE);
162 pthread_mutex_init(&mapcache_mutex, &mxattr);
163 pthread_mutexattr_destroy(&mxattr);
164 #endif
165 }
167 /* enable or disable low levels log */
168 void cpu_set_log(int log_flags)
169 {
170 loglevel = log_flags;
171 if (!logfile) {
172 logfile = fopen(logfilename, "w");
173 if (!logfile) {
174 perror(logfilename);
175 _exit(1);
176 }
177 #if !defined(CONFIG_SOFTMMU)
178 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
179 {
180 static uint8_t logfile_buf[4096];
181 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
182 }
183 #else
184 setvbuf(logfile, NULL, _IOLBF, 0);
185 #endif
186 stdout = logfile;
187 stderr = logfile;
188 }
189 }
191 void cpu_set_log_filename(const char *filename)
192 {
193 logfilename = strdup(filename);
194 }
196 /* mask must never be zero, except for A20 change call */
197 void cpu_interrupt(CPUState *env, int mask)
198 {
199 env->interrupt_request |= mask;
200 }
202 void cpu_reset_interrupt(CPUState *env, int mask)
203 {
204 env->interrupt_request &= ~mask;
205 }
207 CPULogItem cpu_log_items[] = {
208 { CPU_LOG_TB_OUT_ASM, "out_asm",
209 "show generated host assembly code for each compiled TB" },
210 { CPU_LOG_TB_IN_ASM, "in_asm",
211 "show target assembly code for each compiled TB" },
212 { CPU_LOG_TB_OP, "op",
213 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
214 #ifdef TARGET_I386
215 { CPU_LOG_TB_OP_OPT, "op_opt",
216 "show micro ops after optimization for each compiled TB" },
217 #endif
218 { CPU_LOG_INT, "int",
219 "show interrupts/exceptions in short format" },
220 { CPU_LOG_EXEC, "exec",
221 "show trace before each executed TB (lots of logs)" },
222 { CPU_LOG_TB_CPU, "cpu",
223 "show CPU state before bloc translation" },
224 #ifdef TARGET_I386
225 { CPU_LOG_PCALL, "pcall",
226 "show protected mode far calls/returns/exceptions" },
227 #endif
228 #ifdef DEBUG_IOPORT
229 { CPU_LOG_IOPORT, "ioport",
230 "show all i/o ports accesses" },
231 #endif
232 { 0, NULL, NULL },
233 };
235 static int cmp1(const char *s1, int n, const char *s2)
236 {
237 if (strlen(s2) != n)
238 return 0;
239 return memcmp(s1, s2, n) == 0;
240 }
242 /* takes a comma separated list of log masks. Return 0 if error. */
243 int cpu_str_to_log_mask(const char *str)
244 {
245 CPULogItem *item;
246 int mask;
247 const char *p, *p1;
249 p = str;
250 mask = 0;
251 for(;;) {
252 p1 = strchr(p, ',');
253 if (!p1)
254 p1 = p + strlen(p);
255 if(cmp1(p,p1-p,"all")) {
256 for(item = cpu_log_items; item->mask != 0; item++) {
257 mask |= item->mask;
258 }
259 } else {
260 for(item = cpu_log_items; item->mask != 0; item++) {
261 if (cmp1(p, p1 - p, item->name))
262 goto found;
263 }
264 return 0;
265 }
266 found:
267 mask |= item->mask;
268 if (*p1 != ',')
269 break;
270 p = p1 + 1;
271 }
272 return mask;
273 }
275 void cpu_abort(CPUState *env, const char *fmt, ...)
276 {
277 va_list ap;
279 va_start(ap, fmt);
280 fprintf(stderr, "qemu: fatal: ");
281 vfprintf(stderr, fmt, ap);
282 fprintf(stderr, "\n");
283 va_end(ap);
284 abort();
285 }
288 /* XXX: Simple implementation. Fix later */
289 #define MAX_MMIO 32
290 struct mmio_space {
291 target_phys_addr_t start;
292 unsigned long size;
293 unsigned long io_index;
294 } mmio[MAX_MMIO];
295 unsigned long mmio_cnt;
297 /* register physical memory. 'size' must be a multiple of the target
298 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
299 io memory page */
300 void cpu_register_physical_memory(target_phys_addr_t start_addr,
301 unsigned long size,
302 unsigned long phys_offset)
303 {
304 int i;
306 for (i = 0; i < mmio_cnt; i++) {
307 if(mmio[i].start == start_addr) {
308 mmio[i].io_index = phys_offset;
309 mmio[i].size = size;
310 return;
311 }
312 }
314 if (mmio_cnt == MAX_MMIO) {
315 fprintf(logfile, "too many mmio regions\n");
316 exit(-1);
317 }
319 mmio[mmio_cnt].io_index = phys_offset;
320 mmio[mmio_cnt].start = start_addr;
321 mmio[mmio_cnt++].size = size;
322 }
324 /* mem_read and mem_write are arrays of functions containing the
325 function to access byte (index 0), word (index 1) and dword (index
326 2). All functions must be supplied. If io_index is non zero, the
327 corresponding io zone is modified. If it is zero, a new io zone is
328 allocated. The return value can be used with
329 cpu_register_physical_memory(). (-1) is returned if error. */
330 int cpu_register_io_memory(int io_index,
331 CPUReadMemoryFunc **mem_read,
332 CPUWriteMemoryFunc **mem_write,
333 void *opaque)
334 {
335 int i;
337 if (io_index <= 0) {
338 if (io_index >= IO_MEM_NB_ENTRIES)
339 return -1;
340 io_index = io_mem_nb++;
341 } else {
342 if (io_index >= IO_MEM_NB_ENTRIES)
343 return -1;
344 }
346 for(i = 0;i < 3; i++) {
347 io_mem_read[io_index][i] = mem_read[i];
348 io_mem_write[io_index][i] = mem_write[i];
349 }
350 io_mem_opaque[io_index] = opaque;
351 return io_index << IO_MEM_SHIFT;
352 }
354 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
355 {
356 return io_mem_write[io_index >> IO_MEM_SHIFT];
357 }
359 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
360 {
361 return io_mem_read[io_index >> IO_MEM_SHIFT];
362 }
364 #ifdef __ia64__
365 /* IA64 has seperate I/D cache, with coherence maintained by DMA controller.
366 * So to emulate right behavior that guest OS is assumed, we need to flush
367 * I/D cache here.
368 */
369 static void sync_icache(unsigned long address, int len)
370 {
371 int l;
373 for(l = 0; l < (len + 32); l += 32)
374 __ia64_fc(address + l);
376 ia64_sync_i();
377 ia64_srlz_i();
378 }
379 #endif
381 /* physical memory access (slow version, mainly for debug) */
382 #if defined(CONFIG_USER_ONLY)
383 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
384 int len, int is_write)
385 {
386 int l, flags;
387 target_ulong page;
389 while (len > 0) {
390 page = addr & TARGET_PAGE_MASK;
391 l = (page + TARGET_PAGE_SIZE) - addr;
392 if (l > len)
393 l = len;
394 flags = page_get_flags(page);
395 if (!(flags & PAGE_VALID))
396 return;
397 if (is_write) {
398 if (!(flags & PAGE_WRITE))
399 return;
400 memcpy((uint8_t *)addr, buf, len);
401 } else {
402 if (!(flags & PAGE_READ))
403 return;
404 memcpy(buf, (uint8_t *)addr, len);
405 }
406 len -= l;
407 buf += l;
408 addr += l;
409 }
410 }
411 #else
413 int iomem_index(target_phys_addr_t addr)
414 {
415 int i;
417 for (i = 0; i < mmio_cnt; i++) {
418 unsigned long start, end;
420 start = mmio[i].start;
421 end = mmio[i].start + mmio[i].size;
423 if ((addr >= start) && (addr < end)){
424 return (mmio[i].io_index >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
425 }
426 }
427 return 0;
428 }
430 static inline int paddr_is_ram(target_phys_addr_t addr)
431 {
432 /* Is this guest physical address RAM-backed? */
433 #if defined(CONFIG_DM) && (defined(__i386__) || defined(__x86_64__))
434 if (ram_size <= HVM_BELOW_4G_RAM_END)
435 /* RAM is contiguous */
436 return (addr < ram_size);
437 else
438 /* There is RAM below and above the MMIO hole */
439 return ((addr < HVM_BELOW_4G_MMIO_START) ||
440 ((addr >= HVM_BELOW_4G_MMIO_START + HVM_BELOW_4G_MMIO_LENGTH)
441 && (addr < ram_size + HVM_BELOW_4G_MMIO_LENGTH)));
442 #else
443 return (addr < ram_size);
444 #endif
445 }
447 #if defined(__i386__) || defined(__x86_64__)
448 #define phys_ram_addr(x) (qemu_map_cache(x))
449 #elif defined(__ia64__)
450 #define phys_ram_addr(x) (phys_ram_base + (x))
451 #endif
453 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
454 int len, int is_write)
455 {
456 int l, io_index;
457 uint8_t *ptr;
458 uint32_t val;
460 mapcache_lock();
462 while (len > 0) {
463 /* How much can we copy before the next page boundary? */
464 l = TARGET_PAGE_SIZE - (addr & ~TARGET_PAGE_MASK);
465 if (l > len)
466 l = len;
468 io_index = iomem_index(addr);
469 if (is_write) {
470 if (io_index) {
471 if (l >= 4 && ((addr & 3) == 0)) {
472 /* 32 bit read access */
473 val = ldl_raw(buf);
474 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
475 l = 4;
476 } else if (l >= 2 && ((addr & 1) == 0)) {
477 /* 16 bit read access */
478 val = lduw_raw(buf);
479 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
480 l = 2;
481 } else {
482 /* 8 bit access */
483 val = ldub_raw(buf);
484 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
485 l = 1;
486 }
487 } else if (paddr_is_ram(addr)) {
488 /* Reading from RAM */
489 ptr = phys_ram_addr(addr);
490 memcpy(ptr, buf, l);
491 #ifdef __ia64__
492 sync_icache(ptr, l);
493 #endif
494 }
495 } else {
496 if (io_index) {
497 if (l >= 4 && ((addr & 3) == 0)) {
498 /* 32 bit read access */
499 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
500 stl_raw(buf, val);
501 l = 4;
502 } else if (l >= 2 && ((addr & 1) == 0)) {
503 /* 16 bit read access */
504 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
505 stw_raw(buf, val);
506 l = 2;
507 } else {
508 /* 8 bit access */
509 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
510 stb_raw(buf, val);
511 l = 1;
512 }
513 } else if (paddr_is_ram(addr)) {
514 /* Reading from RAM */
515 ptr = phys_ram_addr(addr);
516 memcpy(buf, ptr, l);
517 } else {
518 /* Neither RAM nor known MMIO space */
519 memset(buf, 0xff, len);
520 }
521 }
522 len -= l;
523 buf += l;
524 addr += l;
525 }
527 mapcache_unlock();
528 }
529 #endif
531 /* virtual memory access for debug */
532 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
533 uint8_t *buf, int len, int is_write)
534 {
535 int l;
536 target_ulong page, phys_addr;
538 while (len > 0) {
539 page = addr & TARGET_PAGE_MASK;
540 phys_addr = cpu_get_phys_page_debug(env, page);
541 /* if no physical page mapped, return an error */
542 if (phys_addr == -1)
543 return -1;
544 l = (page + TARGET_PAGE_SIZE) - addr;
545 if (l > len)
546 l = len;
547 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
548 buf, l, is_write);
549 len -= l;
550 buf += l;
551 addr += l;
552 }
553 return 0;
554 }
556 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
557 int dirty_flags)
558 {
559 unsigned long length;
560 int i, mask, len;
561 uint8_t *p;
563 start &= TARGET_PAGE_MASK;
564 end = TARGET_PAGE_ALIGN(end);
566 length = end - start;
567 if (length == 0)
568 return;
569 mask = ~dirty_flags;
570 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
571 len = length >> TARGET_PAGE_BITS;
572 for(i = 0; i < len; i++)
573 p[i] &= mask;
575 return;
576 }