debuggers.hg

view tools/ioemu/patches/qemu-target-i386-dm @ 0:7d21f7218375

Exact replica of unstable on 051908 + README-this
author Mukesh Rathor
date Mon May 19 15:34:57 2008 -0700 (2008-05-19)
parents
children
line source
1 Index: ioemu/Makefile.target
2 ===================================================================
3 --- ioemu.orig/Makefile.target 2007-05-11 10:00:33.000000000 +0100
4 +++ ioemu/Makefile.target 2007-05-11 10:04:05.000000000 +0100
5 @@ -65,6 +65,8 @@
6 QEMU_SYSTEM=qemu-fast
7 endif
9 +QEMU_SYSTEM=qemu-dm
10 +
11 ifdef CONFIG_USER_ONLY
12 PROGS=$(QEMU_USER)
13 else
14 @@ -321,6 +323,9 @@
15 OBJS+=gdbstub.o
16 endif
18 +# qemu-dm objects
19 +LIBOBJS=helper2.o exec-dm.o i8259-dm.o
20 +
21 all: $(PROGS)
23 $(QEMU_USER): $(OBJS)
24 @@ -381,7 +386,7 @@
25 ifeq ($(TARGET_BASE_ARCH), i386)
26 # Hardware support
27 VL_OBJS+= ide.o pckbd.o ps2.o vga.o $(SOUND_HW) dma.o $(AUDIODRV)
28 -VL_OBJS+= fdc.o mc146818rtc.o serial.o i8259.o i8254.o pcspk.o pc.o
29 +VL_OBJS+= fdc.o mc146818rtc.o serial.o i8254.o pcspk.o pc.o
30 VL_OBJS+= cirrus_vga.o mixeng.o apic.o parallel.o acpi.o piix_pci.o
31 VL_OBJS+= usb-uhci.o smbus_eeprom.o
32 CPPFLAGS += -DHAS_AUDIO
33 Index: ioemu/configure
34 ===================================================================
35 --- ioemu.orig/configure 2007-05-11 10:00:33.000000000 +0100
36 +++ ioemu/configure 2007-05-11 10:04:04.000000000 +0100
37 @@ -426,6 +426,8 @@
38 if [ "$darwin_user" = "yes" ] ; then
39 target_list="i386-darwin-user ppc-darwin-user $target_list"
40 fi
41 +# the i386-dm target
42 + target_list="i386-dm"
43 else
44 target_list=`echo "$target_list" | sed -e 's/,/ /g'`
45 fi
46 Index: ioemu/monitor.c
47 ===================================================================
48 --- ioemu.orig/monitor.c 2007-05-11 10:00:33.000000000 +0100
49 +++ ioemu/monitor.c 2007-05-11 10:04:06.000000000 +0100
50 @@ -1325,6 +1325,10 @@
51 "", "show which guest mouse is receiving events" },
52 { "vnc", "", do_info_vnc,
53 "", "show the vnc server status"},
54 +#ifdef CONFIG_DM
55 + { "hvmiopage", "", sp_info,
56 + "", "show HVM device model shared page info" },
57 +#endif /* CONFIG_DM */
58 { NULL, NULL, },
59 };
61 Index: ioemu/vl.c
62 ===================================================================
63 --- ioemu.orig/vl.c 2007-05-11 10:00:33.000000000 +0100
64 +++ ioemu/vl.c 2007-05-11 10:04:06.000000000 +0100
65 @@ -88,7 +88,7 @@
67 #include "exec-all.h"
69 -#define DEFAULT_NETWORK_SCRIPT "/etc/qemu-ifup"
70 +#define DEFAULT_NETWORK_SCRIPT "/etc/xen/qemu-ifup"
71 #ifdef __sun__
72 #define SMBD_COMMAND "/usr/sfw/sbin/smbd"
73 #else
74 @@ -5805,7 +5805,7 @@
76 static QEMUResetEntry *first_reset_entry;
77 static int reset_requested;
78 -static int shutdown_requested;
79 +int shutdown_requested;
80 static int powerdown_requested;
82 void qemu_register_reset(QEMUResetHandler *func, void *opaque)
83 @@ -5957,6 +5957,7 @@
84 qemu_get_clock(rt_clock));
85 }
87 +#ifndef CONFIG_DM
88 static CPUState *cur_cpu;
90 int main_loop(void)
91 @@ -6031,6 +6032,7 @@
92 cpu_disable_ticks();
93 return ret;
94 }
95 +#endif /* !CONFIG_DM */
97 void help(void)
98 {
99 Index: ioemu/vl.h
100 ===================================================================
101 --- ioemu.orig/vl.h 2007-05-11 10:00:33.000000000 +0100
102 +++ ioemu/vl.h 2007-05-11 10:04:06.000000000 +0100
103 @@ -37,6 +37,8 @@
104 #include <unistd.h>
105 #include <fcntl.h>
106 #include <sys/stat.h>
107 +#include "xenctrl.h"
108 +#include "xs.h"
110 #ifndef O_LARGEFILE
111 #define O_LARGEFILE 0
112 @@ -144,6 +146,11 @@
114 void main_loop_wait(int timeout);
116 +extern FILE *logfile;
117 +
118 +extern int xc_handle;
119 +extern int domid;
120 +
121 extern int ram_size;
122 extern int bios_size;
123 extern int rtc_utc;
124 @@ -1023,6 +1030,7 @@
125 uint32_t pic_intack_read(PicState2 *s);
126 void pic_info(void);
127 void irq_info(void);
128 +void sp_info(void);
130 /* APIC */
131 typedef struct IOAPICState IOAPICState;
132 Index: ioemu/target-i386-dm/cpu.h
133 ===================================================================
134 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
135 +++ ioemu/target-i386-dm/cpu.h 2007-05-11 10:04:06.000000000 +0100
136 @@ -0,0 +1,84 @@
137 +/*
138 + * i386 virtual CPU header
139 + *
140 + * Copyright (c) 2003 Fabrice Bellard
141 + *
142 + * This library is free software; you can redistribute it and/or
143 + * modify it under the terms of the GNU Lesser General Public
144 + * License as published by the Free Software Foundation; either
145 + * version 2 of the License, or (at your option) any later version.
146 + *
147 + * This library is distributed in the hope that it will be useful,
148 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
149 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
150 + * Lesser General Public License for more details.
151 + *
152 + * You should have received a copy of the GNU Lesser General Public
153 + * License along with this library; if not, write to the Free Software
154 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
155 + */
156 +#ifndef CPU_I386_H
157 +#define CPU_I386_H
158 +
159 +#include "config.h"
160 +
161 +#ifdef TARGET_X86_64
162 +#define TARGET_LONG_BITS 64
163 +#else
164 +#define TARGET_LONG_BITS 32
165 +#endif
166 +
167 +/* target supports implicit self modifying code */
168 +#define TARGET_HAS_SMC
169 +/* support for self modifying code even if the modified instruction is
170 + close to the modifying instruction */
171 +#define TARGET_HAS_PRECISE_SMC
172 +
173 +#include "cpu-defs.h"
174 +
175 +#include "softfloat.h"
176 +
177 +#if defined(__i386__) && !defined(CONFIG_SOFTMMU)
178 +#define USE_CODE_COPY
179 +#endif
180 +
181 +#ifdef USE_X86LDOUBLE
182 +typedef floatx80 CPU86_LDouble;
183 +#else
184 +typedef float64 CPU86_LDouble;
185 +#endif
186 +
187 +/* Empty for now */
188 +typedef struct CPUX86State {
189 + uint32_t a20_mask;
190 +
191 + int interrupt_request;
192 +
193 + CPU_COMMON
194 +} CPUX86State;
195 +
196 +CPUX86State *cpu_x86_init(void);
197 +int cpu_x86_exec(CPUX86State *s);
198 +void cpu_x86_close(CPUX86State *s);
199 +int cpu_get_pic_interrupt(CPUX86State *s);
200 +/* MSDOS compatibility mode FPU exception support */
201 +void cpu_set_ferr(CPUX86State *s);
202 +
203 +void cpu_x86_set_a20(CPUX86State *env, int a20_state);
204 +
205 +#ifndef IN_OP_I386
206 +void cpu_x86_outb(CPUX86State *env, int addr, int val);
207 +void cpu_x86_outw(CPUX86State *env, int addr, int val);
208 +void cpu_x86_outl(CPUX86State *env, int addr, int val);
209 +int cpu_x86_inb(CPUX86State *env, int addr);
210 +int cpu_x86_inw(CPUX86State *env, int addr);
211 +int cpu_x86_inl(CPUX86State *env, int addr);
212 +#endif
213 +
214 +/* helper2.c */
215 +int main_loop(void);
216 +
217 +#define TARGET_PAGE_BITS 12
218 +#include "cpu-all.h"
219 +
220 +#endif /* CPU_I386_H */
221 Index: ioemu/target-i386-dm/exec-dm.c
222 ===================================================================
223 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
224 +++ ioemu/target-i386-dm/exec-dm.c 2007-05-11 10:04:04.000000000 +0100
225 @@ -0,0 +1,545 @@
226 +/*
227 + * virtual page mapping and translated block handling
228 + *
229 + * Copyright (c) 2003 Fabrice Bellard
230 + *
231 + * This library is free software; you can redistribute it and/or
232 + * modify it under the terms of the GNU Lesser General Public
233 + * License as published by the Free Software Foundation; either
234 + * version 2 of the License, or (at your option) any later version.
235 + *
236 + * This library is distributed in the hope that it will be useful,
237 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
238 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
239 + * Lesser General Public License for more details.
240 + *
241 + * You should have received a copy of the GNU Lesser General Public
242 + * License along with this library; if not, write to the Free Software
243 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
244 + */
245 +#include "config.h"
246 +#ifdef _WIN32
247 +#include <windows.h>
248 +#else
249 +#include <sys/types.h>
250 +#include <sys/mman.h>
251 +#endif
252 +#include <stdlib.h>
253 +#include <stdio.h>
254 +#include <stdarg.h>
255 +#include <string.h>
256 +#include <errno.h>
257 +#include <unistd.h>
258 +#include <inttypes.h>
259 +
260 +#include <xen/hvm/e820.h>
261 +
262 +#include "cpu.h"
263 +#include "exec-all.h"
264 +
265 +//#define DEBUG_TB_INVALIDATE
266 +//#define DEBUG_FLUSH
267 +//#define DEBUG_TLB
268 +
269 +/* make various TB consistency checks */
270 +//#define DEBUG_TB_CHECK
271 +//#define DEBUG_TLB_CHECK
272 +
273 +#ifndef CONFIG_DM
274 +/* threshold to flush the translated code buffer */
275 +#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
276 +
277 +#define SMC_BITMAP_USE_THRESHOLD 10
278 +
279 +#define MMAP_AREA_START 0x00000000
280 +#define MMAP_AREA_END 0xa8000000
281 +
282 +TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
283 +TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
284 +TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
285 +int nb_tbs;
286 +/* any access to the tbs or the page table must use this lock */
287 +spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
288 +
289 +uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
290 +uint8_t *code_gen_ptr;
291 +#endif /* !CONFIG_DM */
292 +
293 +uint64_t phys_ram_size;
294 +extern uint64_t ram_size;
295 +int phys_ram_fd;
296 +uint8_t *phys_ram_base;
297 +uint8_t *phys_ram_dirty;
298 +
299 +CPUState *first_cpu;
300 +/* current CPU in the current thread. It is only valid inside
301 + cpu_exec() */
302 +CPUState *cpu_single_env;
303 +
304 +typedef struct PageDesc {
305 + /* list of TBs intersecting this ram page */
306 + TranslationBlock *first_tb;
307 + /* in order to optimize self modifying code, we count the number
308 + of lookups we do to a given page to use a bitmap */
309 + unsigned int code_write_count;
310 + uint8_t *code_bitmap;
311 +#if defined(CONFIG_USER_ONLY)
312 + unsigned long flags;
313 +#endif
314 +} PageDesc;
315 +
316 +typedef struct PhysPageDesc {
317 + /* offset in host memory of the page + io_index in the low 12 bits */
318 + unsigned long phys_offset;
319 +} PhysPageDesc;
320 +
321 +typedef struct VirtPageDesc {
322 + /* physical address of code page. It is valid only if 'valid_tag'
323 + matches 'virt_valid_tag' */
324 + target_ulong phys_addr;
325 + unsigned int valid_tag;
326 +#if !defined(CONFIG_SOFTMMU)
327 + /* original page access rights. It is valid only if 'valid_tag'
328 + matches 'virt_valid_tag' */
329 + unsigned int prot;
330 +#endif
331 +} VirtPageDesc;
332 +
333 +#define L2_BITS 10
334 +#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
335 +
336 +#define L1_SIZE (1 << L1_BITS)
337 +#define L2_SIZE (1 << L2_BITS)
338 +
339 +unsigned long qemu_real_host_page_size;
340 +unsigned long qemu_host_page_bits;
341 +unsigned long qemu_host_page_size;
342 +unsigned long qemu_host_page_mask;
343 +
344 +/* io memory support */
345 +CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
346 +CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
347 +void *io_mem_opaque[IO_MEM_NB_ENTRIES];
348 +static int io_mem_nb = 1;
349 +
350 +/* log support */
351 +char *logfilename = "/tmp/qemu.log";
352 +FILE *logfile;
353 +int loglevel;
354 +
355 +void cpu_exec_init(CPUState *env)
356 +{
357 + CPUState **penv;
358 + int cpu_index;
359 +
360 + env->next_cpu = NULL;
361 + penv = &first_cpu;
362 + cpu_index = 0;
363 + while (*penv != NULL) {
364 + penv = (CPUState **)&(*penv)->next_cpu;
365 + cpu_index++;
366 + }
367 + env->cpu_index = cpu_index;
368 + *penv = env;
369 +
370 + /* alloc dirty bits array */
371 + phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
372 +}
373 +
374 +/* enable or disable low levels log */
375 +void cpu_set_log(int log_flags)
376 +{
377 + loglevel = log_flags;
378 + if (!logfile) {
379 + logfile = fopen(logfilename, "w");
380 + if (!logfile) {
381 + perror(logfilename);
382 + _exit(1);
383 + }
384 +#if !defined(CONFIG_SOFTMMU)
385 + /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
386 + {
387 + static uint8_t logfile_buf[4096];
388 + setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
389 + }
390 +#else
391 + setvbuf(logfile, NULL, _IOLBF, 0);
392 +#endif
393 + stdout = logfile;
394 + stderr = logfile;
395 + }
396 +}
397 +
398 +void cpu_set_log_filename(const char *filename)
399 +{
400 + logfilename = strdup(filename);
401 +}
402 +
403 +/* mask must never be zero, except for A20 change call */
404 +void cpu_interrupt(CPUState *env, int mask)
405 +{
406 + env->interrupt_request |= mask;
407 +}
408 +
409 +void cpu_reset_interrupt(CPUState *env, int mask)
410 +{
411 + env->interrupt_request &= ~mask;
412 +}
413 +
414 +CPULogItem cpu_log_items[] = {
415 + { CPU_LOG_TB_OUT_ASM, "out_asm",
416 + "show generated host assembly code for each compiled TB" },
417 + { CPU_LOG_TB_IN_ASM, "in_asm",
418 + "show target assembly code for each compiled TB" },
419 + { CPU_LOG_TB_OP, "op",
420 + "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
421 +#ifdef TARGET_I386
422 + { CPU_LOG_TB_OP_OPT, "op_opt",
423 + "show micro ops after optimization for each compiled TB" },
424 +#endif
425 + { CPU_LOG_INT, "int",
426 + "show interrupts/exceptions in short format" },
427 + { CPU_LOG_EXEC, "exec",
428 + "show trace before each executed TB (lots of logs)" },
429 + { CPU_LOG_TB_CPU, "cpu",
430 + "show CPU state before bloc translation" },
431 +#ifdef TARGET_I386
432 + { CPU_LOG_PCALL, "pcall",
433 + "show protected mode far calls/returns/exceptions" },
434 +#endif
435 +#ifdef DEBUG_IOPORT
436 + { CPU_LOG_IOPORT, "ioport",
437 + "show all i/o ports accesses" },
438 +#endif
439 + { 0, NULL, NULL },
440 +};
441 +
442 +static int cmp1(const char *s1, int n, const char *s2)
443 +{
444 + if (strlen(s2) != n)
445 + return 0;
446 + return memcmp(s1, s2, n) == 0;
447 +}
448 +
449 +/* takes a comma separated list of log masks. Return 0 if error. */
450 +int cpu_str_to_log_mask(const char *str)
451 +{
452 + CPULogItem *item;
453 + int mask;
454 + const char *p, *p1;
455 +
456 + p = str;
457 + mask = 0;
458 + for(;;) {
459 + p1 = strchr(p, ',');
460 + if (!p1)
461 + p1 = p + strlen(p);
462 + if(cmp1(p,p1-p,"all")) {
463 + for(item = cpu_log_items; item->mask != 0; item++) {
464 + mask |= item->mask;
465 + }
466 + } else {
467 + for(item = cpu_log_items; item->mask != 0; item++) {
468 + if (cmp1(p, p1 - p, item->name))
469 + goto found;
470 + }
471 + return 0;
472 + }
473 + found:
474 + mask |= item->mask;
475 + if (*p1 != ',')
476 + break;
477 + p = p1 + 1;
478 + }
479 + return mask;
480 +}
481 +
482 +void cpu_abort(CPUState *env, const char *fmt, ...)
483 +{
484 + va_list ap;
485 +
486 + va_start(ap, fmt);
487 + fprintf(stderr, "qemu: fatal: ");
488 + vfprintf(stderr, fmt, ap);
489 + fprintf(stderr, "\n");
490 + va_end(ap);
491 + abort();
492 +}
493 +
494 +
495 +/* XXX: Simple implementation. Fix later */
496 +#define MAX_MMIO 32
497 +struct mmio_space {
498 + target_phys_addr_t start;
499 + unsigned long size;
500 + unsigned long io_index;
501 +} mmio[MAX_MMIO];
502 +unsigned long mmio_cnt;
503 +
504 +/* register physical memory. 'size' must be a multiple of the target
505 + page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
506 + io memory page */
507 +void cpu_register_physical_memory(target_phys_addr_t start_addr,
508 + unsigned long size,
509 + unsigned long phys_offset)
510 +{
511 + int i;
512 +
513 + for (i = 0; i < mmio_cnt; i++) {
514 + if(mmio[i].start == start_addr) {
515 + mmio[i].io_index = phys_offset;
516 + mmio[i].size = size;
517 + return;
518 + }
519 + }
520 +
521 + if (mmio_cnt == MAX_MMIO) {
522 + fprintf(logfile, "too many mmio regions\n");
523 + exit(-1);
524 + }
525 +
526 + mmio[mmio_cnt].io_index = phys_offset;
527 + mmio[mmio_cnt].start = start_addr;
528 + mmio[mmio_cnt++].size = size;
529 +}
530 +
531 +/* mem_read and mem_write are arrays of functions containing the
532 + function to access byte (index 0), word (index 1) and dword (index
533 + 2). All functions must be supplied. If io_index is non zero, the
534 + corresponding io zone is modified. If it is zero, a new io zone is
535 + allocated. The return value can be used with
536 + cpu_register_physical_memory(). (-1) is returned if error. */
537 +int cpu_register_io_memory(int io_index,
538 + CPUReadMemoryFunc **mem_read,
539 + CPUWriteMemoryFunc **mem_write,
540 + void *opaque)
541 +{
542 + int i;
543 +
544 + if (io_index <= 0) {
545 + if (io_index >= IO_MEM_NB_ENTRIES)
546 + return -1;
547 + io_index = io_mem_nb++;
548 + } else {
549 + if (io_index >= IO_MEM_NB_ENTRIES)
550 + return -1;
551 + }
552 +
553 + for(i = 0;i < 3; i++) {
554 + io_mem_read[io_index][i] = mem_read[i];
555 + io_mem_write[io_index][i] = mem_write[i];
556 + }
557 + io_mem_opaque[io_index] = opaque;
558 + return io_index << IO_MEM_SHIFT;
559 +}
560 +
561 +CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
562 +{
563 + return io_mem_write[io_index >> IO_MEM_SHIFT];
564 +}
565 +
566 +CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
567 +{
568 + return io_mem_read[io_index >> IO_MEM_SHIFT];
569 +}
570 +
571 +#ifdef __ia64__
572 +
573 +#define __ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
574 +#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
575 +#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
576 +
577 +/* IA64 has seperate I/D cache, with coherence maintained by DMA controller.
578 + * So to emulate right behavior that guest OS is assumed, we need to flush
579 + * I/D cache here.
580 + */
581 +static void sync_icache(unsigned long address, int len)
582 +{
583 + int l;
584 +
585 + for(l = 0; l < (len + 32); l += 32)
586 + __ia64_fc(address + l);
587 +
588 + ia64_sync_i();
589 + ia64_srlz_i();
590 +}
591 +#endif
592 +
593 +/* physical memory access (slow version, mainly for debug) */
594 +#if defined(CONFIG_USER_ONLY)
595 +void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
596 + int len, int is_write)
597 +{
598 + int l, flags;
599 + target_ulong page;
600 +
601 + while (len > 0) {
602 + page = addr & TARGET_PAGE_MASK;
603 + l = (page + TARGET_PAGE_SIZE) - addr;
604 + if (l > len)
605 + l = len;
606 + flags = page_get_flags(page);
607 + if (!(flags & PAGE_VALID))
608 + return;
609 + if (is_write) {
610 + if (!(flags & PAGE_WRITE))
611 + return;
612 + memcpy((uint8_t *)addr, buf, len);
613 + } else {
614 + if (!(flags & PAGE_READ))
615 + return;
616 + memcpy(buf, (uint8_t *)addr, len);
617 + }
618 + len -= l;
619 + buf += l;
620 + addr += l;
621 + }
622 +}
623 +#else
624 +
625 +int iomem_index(target_phys_addr_t addr)
626 +{
627 + int i;
628 +
629 + for (i = 0; i < mmio_cnt; i++) {
630 + unsigned long start, end;
631 +
632 + start = mmio[i].start;
633 + end = mmio[i].start + mmio[i].size;
634 +
635 + if ((addr >= start) && (addr < end)){
636 + return (mmio[i].io_index >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
637 + }
638 + }
639 + return 0;
640 +}
641 +
642 +static inline int paddr_is_ram(target_phys_addr_t addr)
643 +{
644 + /* Is this guest physical address RAM-backed? */
645 +#if defined(CONFIG_DM) && (defined(__i386__) || defined(__x86_64__))
646 + return ((addr < HVM_BELOW_4G_MMIO_START) ||
647 + (addr >= HVM_BELOW_4G_MMIO_START + HVM_BELOW_4G_MMIO_LENGTH));
648 +#else
649 + return (addr < ram_size);
650 +#endif
651 +}
652 +
653 +void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
654 + int len, int is_write)
655 +{
656 + int l, io_index;
657 + uint8_t *ptr;
658 + uint32_t val;
659 +
660 + while (len > 0) {
661 + /* How much can we copy before the next page boundary? */
662 + l = TARGET_PAGE_SIZE - (addr & ~TARGET_PAGE_MASK);
663 + if (l > len)
664 + l = len;
665 +
666 + io_index = iomem_index(addr);
667 + if (is_write) {
668 + if (io_index) {
669 + if (l >= 4 && ((addr & 3) == 0)) {
670 + /* 32 bit read access */
671 + val = ldl_raw(buf);
672 + io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
673 + l = 4;
674 + } else if (l >= 2 && ((addr & 1) == 0)) {
675 + /* 16 bit read access */
676 + val = lduw_raw(buf);
677 + io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
678 + l = 2;
679 + } else {
680 + /* 8 bit access */
681 + val = ldub_raw(buf);
682 + io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
683 + l = 1;
684 + }
685 + } else if (paddr_is_ram(addr)) {
686 + /* Reading from RAM */
687 + memcpy(phys_ram_base + addr, buf, l);
688 +#ifdef __ia64__
689 + sync_icache((unsigned long)(phys_ram_base + addr), l);
690 +#endif
691 + }
692 + } else {
693 + if (io_index) {
694 + if (l >= 4 && ((addr & 3) == 0)) {
695 + /* 32 bit read access */
696 + val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
697 + stl_raw(buf, val);
698 + l = 4;
699 + } else if (l >= 2 && ((addr & 1) == 0)) {
700 + /* 16 bit read access */
701 + val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
702 + stw_raw(buf, val);
703 + l = 2;
704 + } else {
705 + /* 8 bit access */
706 + val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
707 + stb_raw(buf, val);
708 + l = 1;
709 + }
710 + } else if (paddr_is_ram(addr)) {
711 + /* Reading from RAM */
712 + memcpy(buf, phys_ram_base + addr, l);
713 + } else {
714 + /* Neither RAM nor known MMIO space */
715 + memset(buf, 0xff, len);
716 + }
717 + }
718 + len -= l;
719 + buf += l;
720 + addr += l;
721 + }
722 +}
723 +#endif
724 +
725 +/* virtual memory access for debug */
726 +int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
727 + uint8_t *buf, int len, int is_write)
728 +{
729 + int l;
730 + target_ulong page, phys_addr;
731 +
732 + while (len > 0) {
733 + page = addr & TARGET_PAGE_MASK;
734 + phys_addr = cpu_get_phys_page_debug(env, page);
735 + /* if no physical page mapped, return an error */
736 + if (phys_addr == -1)
737 + return -1;
738 + l = (page + TARGET_PAGE_SIZE) - addr;
739 + if (l > len)
740 + l = len;
741 + cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
742 + buf, l, is_write);
743 + len -= l;
744 + buf += l;
745 + addr += l;
746 + }
747 + return 0;
748 +}
749 +
750 +void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
751 + int dirty_flags)
752 +{
753 + unsigned long length;
754 + int i, mask, len;
755 + uint8_t *p;
756 +
757 + start &= TARGET_PAGE_MASK;
758 + end = TARGET_PAGE_ALIGN(end);
759 +
760 + length = end - start;
761 + if (length == 0)
762 + return;
763 + mask = ~dirty_flags;
764 + p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
765 + len = length >> TARGET_PAGE_BITS;
766 + for(i = 0; i < len; i++)
767 + p[i] &= mask;
768 +
769 + return;
770 +}
771 Index: ioemu/target-i386-dm/helper2.c
772 ===================================================================
773 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
774 +++ ioemu/target-i386-dm/helper2.c 2007-05-11 10:04:05.000000000 +0100
775 @@ -0,0 +1,542 @@
776 +/*
777 + * i386 helpers (without register variable usage)
778 + *
779 + * Copyright (c) 2003 Fabrice Bellard
780 + *
781 + * This library is free software; you can redistribute it and/or
782 + * modify it under the terms of the GNU Lesser General Public
783 + * License as published by the Free Software Foundation; either
784 + * version 2 of the License, or (at your option) any later version.
785 + *
786 + * This library is distributed in the hope that it will be useful,
787 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
788 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
789 + * Lesser General Public License for more details.
790 + *
791 + * You should have received a copy of the GNU Lesser General Public
792 + * License along with this library; if not, write to the Free Software
793 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
794 + */
795 +
796 +/*
797 + * Main cpu loop for handling I/O requests coming from a virtual machine
798 + * Copyright 2004, Intel Corporation.
799 + * Copyright 2005, International Business Machines Corporation.
800 + *
801 + * This program is free software; you can redistribute it and/or modify it
802 + * under the terms and conditions of the GNU Lesser General Public License,
803 + * version 2.1, as published by the Free Software Foundation.
804 + *
805 + * This program is distributed in the hope it will be useful, but WITHOUT
806 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
807 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
808 + * more details.
809 + *
810 + * You should have received a copy of the GNU Lesser General Public License
811 + * along with this program; if not, write to the Free Software Foundation,
812 + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307 USA.
813 + */
814 +#include <stdarg.h>
815 +#include <stdlib.h>
816 +#include <stdio.h>
817 +#include <string.h>
818 +#include <inttypes.h>
819 +#include <signal.h>
820 +#include <assert.h>
821 +
822 +#include <limits.h>
823 +#include <fcntl.h>
824 +
825 +#include <xenctrl.h>
826 +#include <xen/hvm/ioreq.h>
827 +
828 +#include "cpu.h"
829 +#include "exec-all.h"
830 +
831 +//#define DEBUG_MMU
832 +
833 +#ifdef USE_CODE_COPY
834 +#include <asm/ldt.h>
835 +#include <linux/unistd.h>
836 +#include <linux/version.h>
837 +
838 +_syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
839 +
840 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
841 +#define modify_ldt_ldt_s user_desc
842 +#endif
843 +#endif /* USE_CODE_COPY */
844 +
845 +#include "vl.h"
846 +
847 +int domid = -1;
848 +int vcpus = 1;
849 +
850 +int xc_handle;
851 +
852 +shared_iopage_t *shared_page = NULL;
853 +
854 +/* the evtchn fd for polling */
855 +int xce_handle = -1;
856 +
857 +/* which vcpu we are serving */
858 +int send_vcpu = 0;
859 +
860 +//the evtchn port for polling the notification,
861 +#define NR_CPUS 32
862 +evtchn_port_t ioreq_local_port[NR_CPUS];
863 +
864 +CPUX86State *cpu_x86_init(void)
865 +{
866 + CPUX86State *env;
867 + static int inited;
868 + int i, rc;
869 +
870 + env = qemu_mallocz(sizeof(CPUX86State));
871 + if (!env)
872 + return NULL;
873 + cpu_exec_init(env);
874 +
875 + /* init various static tables */
876 + if (!inited) {
877 + inited = 1;
878 +
879 + cpu_single_env = env;
880 +
881 + xce_handle = xc_evtchn_open();
882 + if (xce_handle == -1) {
883 + perror("open");
884 + return NULL;
885 + }
886 +
887 + /* FIXME: how about if we overflow the page here? */
888 + for (i = 0; i < vcpus; i++) {
889 + rc = xc_evtchn_bind_interdomain(
890 + xce_handle, domid, shared_page->vcpu_iodata[i].vp_eport);
891 + if (rc == -1) {
892 + fprintf(logfile, "bind interdomain ioctl error %d\n", errno);
893 + return NULL;
894 + }
895 + ioreq_local_port[i] = rc;
896 + }
897 + }
898 +
899 + return env;
900 +}
901 +
902 +/* called from main_cpu_reset */
903 +void cpu_reset(CPUX86State *env)
904 +{
905 +}
906 +
907 +void cpu_x86_close(CPUX86State *env)
908 +{
909 + free(env);
910 +}
911 +
912 +
913 +void cpu_dump_state(CPUState *env, FILE *f,
914 + int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
915 + int flags)
916 +{
917 +}
918 +
919 +/***********************************************************/
920 +/* x86 mmu */
921 +/* XXX: add PGE support */
922 +
923 +void cpu_x86_set_a20(CPUX86State *env, int a20_state)
924 +{
925 + a20_state = (a20_state != 0);
926 + if (a20_state != ((env->a20_mask >> 20) & 1)) {
927 +#if defined(DEBUG_MMU)
928 + printf("A20 update: a20=%d\n", a20_state);
929 +#endif
930 + env->a20_mask = 0xffefffff | (a20_state << 20);
931 + }
932 +}
933 +
934 +target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
935 +{
936 + return addr;
937 +}
938 +
939 +//some functions to handle the io req packet
940 +void sp_info()
941 +{
942 + ioreq_t *req;
943 + int i;
944 +
945 + for (i = 0; i < vcpus; i++) {
946 + req = &(shared_page->vcpu_iodata[i].vp_ioreq);
947 + term_printf("vcpu %d: event port %d\n", i, ioreq_local_port[i]);
948 + term_printf(" req state: %x, ptr: %x, addr: %"PRIx64", "
949 + "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
950 + req->state, req->data_is_ptr, req->addr,
951 + req->data, req->count, req->size);
952 + term_printf(" IO totally occurred on this vcpu: %"PRIx64"\n",
953 + req->io_count);
954 + }
955 +}
956 +
957 +//get the ioreq packets from share mem
958 +static ioreq_t *__cpu_get_ioreq(int vcpu)
959 +{
960 + ioreq_t *req;
961 +
962 + req = &(shared_page->vcpu_iodata[vcpu].vp_ioreq);
963 +
964 + if (req->state != STATE_IOREQ_READY) {
965 + fprintf(logfile, "I/O request not ready: "
966 + "%x, ptr: %x, port: %"PRIx64", "
967 + "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
968 + req->state, req->data_is_ptr, req->addr,
969 + req->data, req->count, req->size);
970 + return NULL;
971 + }
972 +
973 + rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
974 +
975 + req->state = STATE_IOREQ_INPROCESS;
976 + return req;
977 +}
978 +
979 +//use poll to get the port notification
980 +//ioreq_vec--out,the
981 +//retval--the number of ioreq packet
982 +static ioreq_t *cpu_get_ioreq(void)
983 +{
984 + int i;
985 + evtchn_port_t port;
986 +
987 + port = xc_evtchn_pending(xce_handle);
988 + if (port != -1) {
989 + for ( i = 0; i < vcpus; i++ )
990 + if ( ioreq_local_port[i] == port )
991 + break;
992 +
993 + if ( i == vcpus ) {
994 + fprintf(logfile, "Fatal error while trying to get io event!\n");
995 + exit(1);
996 + }
997 +
998 + // unmask the wanted port again
999 + xc_evtchn_unmask(xce_handle, port);
1001 + //get the io packet from shared memory
1002 + send_vcpu = i;
1003 + return __cpu_get_ioreq(i);
1004 + }
1006 + //read error or read nothing
1007 + return NULL;
1008 +}
1010 +unsigned long do_inp(CPUState *env, unsigned long addr, unsigned long size)
1011 +{
1012 + switch(size) {
1013 + case 1:
1014 + return cpu_inb(env, addr);
1015 + case 2:
1016 + return cpu_inw(env, addr);
1017 + case 4:
1018 + return cpu_inl(env, addr);
1019 + default:
1020 + fprintf(logfile, "inp: bad size: %lx %lx\n", addr, size);
1021 + exit(-1);
1022 + }
1023 +}
1025 +void do_outp(CPUState *env, unsigned long addr,
1026 + unsigned long size, unsigned long val)
1027 +{
1028 + switch(size) {
1029 + case 1:
1030 + return cpu_outb(env, addr, val);
1031 + case 2:
1032 + return cpu_outw(env, addr, val);
1033 + case 4:
1034 + return cpu_outl(env, addr, val);
1035 + default:
1036 + fprintf(logfile, "outp: bad size: %lx %lx\n", addr, size);
1037 + exit(-1);
1038 + }
1039 +}
1041 +extern void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1042 + int len, int is_write);
1044 +static inline void read_physical(uint64_t addr, unsigned long size, void *val)
1045 +{
1046 + return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 0);
1047 +}
1049 +static inline void write_physical(uint64_t addr, unsigned long size, void *val)
1050 +{
1051 + return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 1);
1052 +}
1054 +void cpu_ioreq_pio(CPUState *env, ioreq_t *req)
1055 +{
1056 + int i, sign;
1058 + sign = req->df ? -1 : 1;
1060 + if (req->dir == IOREQ_READ) {
1061 + if (!req->data_is_ptr) {
1062 + req->data = do_inp(env, req->addr, req->size);
1063 + } else {
1064 + unsigned long tmp;
1066 + for (i = 0; i < req->count; i++) {
1067 + tmp = do_inp(env, req->addr, req->size);
1068 + write_physical((target_phys_addr_t) req->data
1069 + + (sign * i * req->size),
1070 + req->size, &tmp);
1071 + }
1072 + }
1073 + } else if (req->dir == IOREQ_WRITE) {
1074 + if (!req->data_is_ptr) {
1075 + do_outp(env, req->addr, req->size, req->data);
1076 + } else {
1077 + for (i = 0; i < req->count; i++) {
1078 + unsigned long tmp;
1080 + read_physical((target_phys_addr_t) req->data
1081 + + (sign * i * req->size),
1082 + req->size, &tmp);
1083 + do_outp(env, req->addr, req->size, tmp);
1084 + }
1085 + }
1086 + }
1087 +}
1089 +void cpu_ioreq_move(CPUState *env, ioreq_t *req)
1090 +{
1091 + int i, sign;
1093 + sign = req->df ? -1 : 1;
1095 + if (!req->data_is_ptr) {
1096 + if (req->dir == IOREQ_READ) {
1097 + for (i = 0; i < req->count; i++) {
1098 + read_physical(req->addr
1099 + + (sign * i * req->size),
1100 + req->size, &req->data);
1101 + }
1102 + } else if (req->dir == IOREQ_WRITE) {
1103 + for (i = 0; i < req->count; i++) {
1104 + write_physical(req->addr
1105 + + (sign * i * req->size),
1106 + req->size, &req->data);
1107 + }
1108 + }
1109 + } else {
1110 + unsigned long tmp;
1112 + if (req->dir == IOREQ_READ) {
1113 + for (i = 0; i < req->count; i++) {
1114 + read_physical(req->addr
1115 + + (sign * i * req->size),
1116 + req->size, &tmp);
1117 + write_physical((target_phys_addr_t )req->data
1118 + + (sign * i * req->size),
1119 + req->size, &tmp);
1120 + }
1121 + } else if (req->dir == IOREQ_WRITE) {
1122 + for (i = 0; i < req->count; i++) {
1123 + read_physical((target_phys_addr_t) req->data
1124 + + (sign * i * req->size),
1125 + req->size, &tmp);
1126 + write_physical(req->addr
1127 + + (sign * i * req->size),
1128 + req->size, &tmp);
1129 + }
1130 + }
1131 + }
1132 +}
1134 +void cpu_ioreq_and(CPUState *env, ioreq_t *req)
1135 +{
1136 + unsigned long tmp1, tmp2;
1138 + if (req->data_is_ptr != 0)
1139 + hw_error("expected scalar value");
1141 + read_physical(req->addr, req->size, &tmp1);
1142 + if (req->dir == IOREQ_WRITE) {
1143 + tmp2 = tmp1 & (unsigned long) req->data;
1144 + write_physical(req->addr, req->size, &tmp2);
1145 + }
1146 + req->data = tmp1;
1147 +}
1149 +void cpu_ioreq_add(CPUState *env, ioreq_t *req)
1150 +{
1151 + unsigned long tmp1, tmp2;
1153 + if (req->data_is_ptr != 0)
1154 + hw_error("expected scalar value");
1156 + read_physical(req->addr, req->size, &tmp1);
1157 + if (req->dir == IOREQ_WRITE) {
1158 + tmp2 = tmp1 + (unsigned long) req->data;
1159 + write_physical(req->addr, req->size, &tmp2);
1160 + }
1161 + req->data = tmp1;
1162 +}
1164 +void cpu_ioreq_sub(CPUState *env, ioreq_t *req)
1165 +{
1166 + unsigned long tmp1, tmp2;
1168 + if (req->data_is_ptr != 0)
1169 + hw_error("expected scalar value");
1171 + read_physical(req->addr, req->size, &tmp1);
1172 + if (req->dir == IOREQ_WRITE) {
1173 + tmp2 = tmp1 - (unsigned long) req->data;
1174 + write_physical(req->addr, req->size, &tmp2);
1175 + }
1176 + req->data = tmp1;
1177 +}
1179 +void cpu_ioreq_or(CPUState *env, ioreq_t *req)
1180 +{
1181 + unsigned long tmp1, tmp2;
1183 + if (req->data_is_ptr != 0)
1184 + hw_error("expected scalar value");
1186 + read_physical(req->addr, req->size, &tmp1);
1187 + if (req->dir == IOREQ_WRITE) {
1188 + tmp2 = tmp1 | (unsigned long) req->data;
1189 + write_physical(req->addr, req->size, &tmp2);
1190 + }
1191 + req->data = tmp1;
1192 +}
1194 +void cpu_ioreq_xor(CPUState *env, ioreq_t *req)
1195 +{
1196 + unsigned long tmp1, tmp2;
1198 + if (req->data_is_ptr != 0)
1199 + hw_error("expected scalar value");
1201 + read_physical(req->addr, req->size, &tmp1);
1202 + if (req->dir == IOREQ_WRITE) {
1203 + tmp2 = tmp1 ^ (unsigned long) req->data;
1204 + write_physical(req->addr, req->size, &tmp2);
1205 + }
1206 + req->data = tmp1;
1207 +}
1209 +void cpu_ioreq_xchg(CPUState *env, ioreq_t *req)
1210 +{
1211 + unsigned long tmp1;
1213 + if (req->data_is_ptr != 0)
1214 + hw_error("expected scalar value");
1216 + read_physical(req->addr, req->size, &tmp1);
1217 + write_physical(req->addr, req->size, &req->data);
1218 + req->data = tmp1;
1219 +}
1221 +void cpu_handle_ioreq(void *opaque)
1222 +{
1223 + extern int vm_running;
1224 + extern int shutdown_requested;
1225 + CPUState *env = opaque;
1226 + ioreq_t *req = cpu_get_ioreq();
1228 + if (req) {
1229 + if ((!req->data_is_ptr) && (req->dir == IOREQ_WRITE)) {
1230 + if (req->size != 4)
1231 + req->data &= (1UL << (8 * req->size))-1;
1232 + }
1234 + switch (req->type) {
1235 + case IOREQ_TYPE_PIO:
1236 + cpu_ioreq_pio(env, req);
1237 + break;
1238 + case IOREQ_TYPE_COPY:
1239 + cpu_ioreq_move(env, req);
1240 + break;
1241 + case IOREQ_TYPE_AND:
1242 + cpu_ioreq_and(env, req);
1243 + break;
1244 + case IOREQ_TYPE_ADD:
1245 + cpu_ioreq_add(env, req);
1246 + break;
1247 + case IOREQ_TYPE_SUB:
1248 + cpu_ioreq_sub(env, req);
1249 + break;
1250 + case IOREQ_TYPE_OR:
1251 + cpu_ioreq_or(env, req);
1252 + break;
1253 + case IOREQ_TYPE_XOR:
1254 + cpu_ioreq_xor(env, req);
1255 + break;
1256 + case IOREQ_TYPE_XCHG:
1257 + cpu_ioreq_xchg(env, req);
1258 + break;
1259 + default:
1260 + hw_error("Invalid ioreq type 0x%x\n", req->type);
1261 + }
1263 + if (req->state != STATE_IOREQ_INPROCESS) {
1264 + fprintf(logfile, "Badness in I/O request ... not in service?!: "
1265 + "%x, ptr: %x, port: %"PRIx64", "
1266 + "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
1267 + req->state, req->data_is_ptr, req->addr,
1268 + req->data, req->count, req->size);
1269 + destroy_hvm_domain();
1270 + return;
1271 + }
1273 + wmb(); /* Update ioreq contents /then/ update state. */
1275 + /*
1276 + * We do this before we send the response so that the tools
1277 + * have the opportunity to pick up on the reset before the
1278 + * guest resumes and does a hlt with interrupts disabled which
1279 + * causes Xen to powerdown the domain.
1280 + */
1281 + if (vm_running) {
1282 + if (shutdown_requested) {
1283 + fprintf(logfile, "shutdown requested in cpu_handle_ioreq\n");
1284 + destroy_hvm_domain();
1285 + }
1286 + if (reset_requested) {
1287 + fprintf(logfile, "reset requested in cpu_handle_ioreq.\n");
1288 + qemu_system_reset();
1289 + reset_requested = 0;
1290 + }
1291 + }
1293 + req->state = STATE_IORESP_READY;
1294 + xc_evtchn_notify(xce_handle, ioreq_local_port[send_vcpu]);
1295 + }
1296 +}
1298 +int main_loop(void)
1299 +{
1300 + extern int vm_running;
1301 + extern int shutdown_requested;
1302 + CPUState *env = cpu_single_env;
1303 + int evtchn_fd = xc_evtchn_fd(xce_handle);
1305 + qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env);
1307 + while (1) {
1308 + if (vm_running) {
1309 + if (shutdown_requested)
1310 + break;
1311 + }
1313 + /* Wait up to 10 msec. */
1314 + main_loop_wait(10);
1315 + }
1316 + return 0;
1317 +}
1318 Index: ioemu/target-i386-dm/i8259-dm.c
1319 ===================================================================
1320 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
1321 +++ ioemu/target-i386-dm/i8259-dm.c 2007-05-11 10:04:04.000000000 +0100
1322 @@ -0,0 +1,67 @@
1323 +/* Xen 8259 stub for interrupt controller emulation
1324 + *
1325 + * Copyright (c) 2003-2004 Fabrice Bellard
1326 + * Copyright (c) 2005 Intel corperation
1327 + *
1328 + * Permission is hereby granted, free of charge, to any person obtaining a copy
1329 + * of this software and associated documentation files (the "Software"), to deal
1330 + * in the Software without restriction, including without limitation the rights
1331 + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
1332 + * copies of the Software, and to permit persons to whom the Software is
1333 + * furnished to do so, subject to the following conditions:
1334 + *
1335 + * The above copyright notice and this permission notice shall be included in
1336 + * all copies or substantial portions of the Software.
1337 + *
1338 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1339 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1340 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1341 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1342 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1343 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
1344 + * THE SOFTWARE.
1345 + */
1346 +#include "vl.h"
1347 +#include "xenctrl.h"
1348 +#include <xen/hvm/ioreq.h>
1349 +#include <stdio.h>
1350 +#include "cpu.h"
1351 +#include "cpu-all.h"
1353 +struct PicState2 {
1354 +};
1356 +void pic_set_irq_new(void *opaque, int irq, int level)
1357 +{
1358 + xc_hvm_set_irq_level(xc_handle, domid, irq, level);
1359 +}
1361 +/* obsolete function */
1362 +void pic_set_irq(int irq, int level)
1363 +{
1364 + pic_set_irq_new(isa_pic, irq, level);
1365 +}
1367 +void irq_info(void)
1368 +{
1369 + term_printf("irq statistic code not compiled.\n");
1370 +}
1372 +void pic_info(void)
1373 +{
1374 + term_printf("pic_info code not compiled.\n");
1375 +}
1377 +PicState2 *pic_init(IRQRequestFunc *irq_request, void *irq_request_opaque)
1378 +{
1379 + PicState2 *s;
1380 + s = qemu_mallocz(sizeof(PicState2));
1381 + if (!s)
1382 + return NULL;
1383 + return s;
1384 +}
1386 +void pic_set_alt_irq_func(PicState2 *s, SetIRQFunc *alt_irq_func,
1387 + void *alt_irq_opaque)
1388 +{
1389 +}
1390 Index: ioemu/target-i386-dm/qemu-dm.debug
1391 ===================================================================
1392 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
1393 +++ ioemu/target-i386-dm/qemu-dm.debug 2007-05-11 10:01:09.000000000 +0100
1394 @@ -0,0 +1,10 @@
1395 +#!/bin/sh
1397 +if [ "`arch`" = "x86_64" ]; then
1398 + LIBDIR="lib64"
1399 +else
1400 + LIBDIR="lib"
1401 +fi
1402 +echo $* > /tmp/args
1403 +echo $DISPLAY >> /tmp/args
1404 +exec /usr/$LIBDIR/xen/bin/qemu-dm $*
1405 Index: ioemu/target-i386-dm/qemu-ifup
1406 ===================================================================
1407 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
1408 +++ ioemu/target-i386-dm/qemu-ifup 2007-06-03 11:50:25.000000000 +1000
1409 @@ -0,0 +1,37 @@
1410 +#!/bin/sh
1412 +#. /etc/rc.d/init.d/functions
1413 +#ulimit -c unlimited
1415 +echo 'config qemu network with xen bridge for ' $*
1417 +bridge=$2
1419 +#
1420 +# Old style bridge setup with netloop, used to have a bridge name
1421 +# of xenbrX, enslaving pethX and vif0.X, and then configuring
1422 +# eth0.
1423 +#
1424 +# New style bridge setup does not use netloop, so the bridge name
1425 +# is ethX and the physical device is enslaved pethX
1426 +#
1427 +# So if...
1428 +#
1429 +# - User asks for xenbrX
1430 +# - AND xenbrX doesn't exist
1431 +# - AND there is a ethX device which is a bridge
1432 +#
1433 +# ..then we translate xenbrX to ethX
1434 +#
1435 +# This lets old config files work without modification
1436 +#
1437 +if [ ! -e "/sys/class/net/$bridge" ] && [ -z "${bridge##xenbr*}" ]
1438 +then
1439 + if [ -e "/sys/class/net/eth${bridge#xenbr}/bridge" ]
1440 + then
1441 + bridge="eth${bridge#xenbr}"
1442 + fi
1443 +fi
1445 +ifconfig $1 0.0.0.0 up
1446 +brctl addif $bridge $1