debuggers.hg

view tools/ioemu/target-i386-dm/helper2.c @ 6644:29808fef9148

merge?
author cl349@firebug.cl.cam.ac.uk
date Sat Sep 03 18:24:46 2005 +0000 (2005-09-03)
parents 0c0d929e787c f27205ea60ef
children b6c98fe62e1a
line source
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
21 /*
22 * Main cpu loop for handling I/O requests coming from a virtual machine
23 * Copyright 2004, Intel Corporation.
24 *
25 * This program is free software; you can redistribute it and/or modify it
26 * under the terms and conditions of the GNU Lesser General Public License,
27 * version 2.1, as published by the Free Software Foundation.
28 *
29 * This program is distributed in the hope it will be useful, but WITHOUT
30 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
31 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
32 * more details.
33 *
34 * You should have received a copy of the GNU Lesser General Public License
35 * along with this program; if not, write to the Free Software Foundation,
36 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307 USA.
37 */
38 #include <stdarg.h>
39 #include <stdlib.h>
40 #include <stdio.h>
41 #include <string.h>
42 #include <inttypes.h>
43 #include <signal.h>
44 #include <assert.h>
46 #include <limits.h>
47 #include <fcntl.h>
48 #include <sys/ioctl.h>
50 #include "xenctrl.h"
51 #include <io/ioreq.h>
53 #include "cpu.h"
54 #include "exec-all.h"
55 #include "vl.h"
57 shared_iopage_t *shared_page = NULL;
58 extern int reset_requested;
60 CPUX86State *cpu_86_init(void)
61 {
62 CPUX86State *env;
63 static int inited;
65 cpu_exec_init();
67 env = malloc(sizeof(CPUX86State));
68 if (!env)
69 return NULL;
70 memset(env, 0, sizeof(CPUX86State));
71 /* init various static tables */
72 if (!inited) {
73 inited = 1;
74 }
75 cpu_single_env = env;
76 cpu_reset(env);
77 return env;
78 }
80 /* NOTE: must be called outside the CPU execute loop */
81 void cpu_reset(CPUX86State *env)
82 {
83 }
85 void cpu_x86_close(CPUX86State *env)
86 {
87 free(env);
88 }
91 void cpu_dump_state(CPUState *env, FILE *f,
92 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
93 int flags)
94 {
95 }
97 /***********************************************************/
98 /* x86 mmu */
99 /* XXX: add PGE support */
101 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
102 {
103 a20_state = (a20_state != 0);
104 if (a20_state != ((env->a20_mask >> 20) & 1)) {
105 #if defined(DEBUG_MMU)
106 printf("A20 update: a20=%d\n", a20_state);
107 #endif
108 env->a20_mask = 0xffefffff | (a20_state << 20);
109 }
110 }
112 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
113 {
114 return addr;
115 }
117 //the evtchn fd for polling
118 int evtchn_fd = -1;
119 //the evtchn port for polling the notification, should be inputed as bochs's parameter
120 u16 ioreq_port = 0;
122 //some functions to handle the io req packet
123 void
124 sp_info()
125 {
126 ioreq_t *req;
128 req = &(shared_page->vcpu_iodata[0].vp_ioreq);
129 term_printf("event port: %d\n", shared_page->sp_global.eport);
130 term_printf("req state: %x, pvalid: %x, addr: %llx, data: %llx, count: %llx, size: %llx\n", req->state, req->pdata_valid, req->addr, req->u.data, req->count, req->size);
131 }
133 //get the ioreq packets from share mem
134 ioreq_t* __cpu_get_ioreq(void)
135 {
136 ioreq_t *req;
138 req = &(shared_page->vcpu_iodata[0].vp_ioreq);
139 if (req->state == STATE_IOREQ_READY) {
140 req->state = STATE_IOREQ_INPROCESS;
141 } else {
142 fprintf(logfile, "False I/O request ... in-service already: %x, pvalid: %x,port: %llx, data: %llx, count: %llx, size: %llx\n", req->state, req->pdata_valid, req->addr, req->u.data, req->count, req->size);
143 req = NULL;
144 }
146 return req;
147 }
149 //use poll to get the port notification
150 //ioreq_vec--out,the
151 //retval--the number of ioreq packet
152 ioreq_t* cpu_get_ioreq(void)
153 {
154 int rc;
155 u16 buf[2];
156 rc = read(evtchn_fd, buf, 2);
157 if (rc == 2 && buf[0] == ioreq_port){//got only one matched 16bit port index
158 // unmask the wanted port again
159 write(evtchn_fd, &ioreq_port, 2);
161 //get the io packet from shared memory
162 return __cpu_get_ioreq();
163 }
165 //read error or read nothing
166 return NULL;
167 }
169 unsigned long
170 do_inp(CPUState *env, unsigned long addr, unsigned long size)
171 {
172 switch(size) {
173 case 1:
174 return cpu_inb(env, addr);
175 case 2:
176 return cpu_inw(env, addr);
177 case 4:
178 return cpu_inl(env, addr);
179 default:
180 fprintf(logfile, "inp: bad size: %lx %lx\n", addr, size);
181 exit(-1);
182 }
183 }
185 void
186 do_outp(CPUState *env, unsigned long addr, unsigned long size,
187 unsigned long val)
188 {
189 switch(size) {
190 case 1:
191 return cpu_outb(env, addr, val);
192 case 2:
193 return cpu_outw(env, addr, val);
194 case 4:
195 return cpu_outl(env, addr, val);
196 default:
197 fprintf(logfile, "outp: bad size: %lx %lx\n", addr, size);
198 exit(-1);
199 }
200 }
202 extern void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
203 int len, int is_write);
205 static inline void
206 read_physical(u64 addr, unsigned long size, void *val)
207 {
208 return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 0);
209 }
211 static inline void
212 write_physical(u64 addr, unsigned long size, void *val)
213 {
214 return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 1);
215 }
217 void
218 cpu_ioreq_pio(CPUState *env, ioreq_t *req)
219 {
220 int i, sign;
222 sign = req->df ? -1 : 1;
224 if (req->dir == IOREQ_READ) {
225 if (!req->pdata_valid) {
226 req->u.data = do_inp(env, req->addr, req->size);
227 } else {
228 unsigned long tmp;
230 for (i = 0; i < req->count; i++) {
231 tmp = do_inp(env, req->addr, req->size);
232 write_physical((target_phys_addr_t) req->u.pdata
233 + (sign * i * req->size),
234 req->size, &tmp);
235 }
236 }
237 } else if (req->dir == IOREQ_WRITE) {
238 if (!req->pdata_valid) {
239 do_outp(env, req->addr, req->size, req->u.data);
240 } else {
241 for (i = 0; i < req->count; i++) {
242 unsigned long tmp;
244 read_physical((target_phys_addr_t) req->u.pdata
245 + (sign * i * req->size),
246 req->size, &tmp);
247 do_outp(env, req->addr, req->size, tmp);
248 }
249 }
250 }
251 }
253 void
254 cpu_ioreq_move(CPUState *env, ioreq_t *req)
255 {
256 int i, sign;
258 sign = req->df ? -1 : 1;
260 if (!req->pdata_valid) {
261 if (req->dir == IOREQ_READ) {
262 for (i = 0; i < req->count; i++) {
263 read_physical(req->addr
264 + (sign * i * req->size),
265 req->size, &req->u.data);
266 }
267 } else if (req->dir == IOREQ_WRITE) {
268 for (i = 0; i < req->count; i++) {
269 write_physical(req->addr
270 + (sign * i * req->size),
271 req->size, &req->u.data);
272 }
273 }
274 } else {
275 unsigned long tmp;
277 if (req->dir == IOREQ_READ) {
278 for (i = 0; i < req->count; i++) {
279 read_physical(req->addr
280 + (sign * i * req->size),
281 req->size, &tmp);
282 write_physical((target_phys_addr_t )req->u.pdata
283 + (sign * i * req->size),
284 req->size, &tmp);
285 }
286 } else if (req->dir == IOREQ_WRITE) {
287 for (i = 0; i < req->count; i++) {
288 read_physical((target_phys_addr_t) req->u.pdata
289 + (sign * i * req->size),
290 req->size, &tmp);
291 write_physical(req->addr
292 + (sign * i * req->size),
293 req->size, &tmp);
294 }
295 }
296 }
297 }
299 void
300 cpu_ioreq_and(CPUState *env, ioreq_t *req)
301 {
302 unsigned long tmp1, tmp2;
304 if (req->pdata_valid != 0)
305 hw_error("expected scalar value");
307 read_physical(req->addr, req->size, &tmp1);
308 if (req->dir == IOREQ_WRITE) {
309 tmp2 = tmp1 & (unsigned long) req->u.data;
310 write_physical(req->addr, req->size, &tmp2);
311 }
312 req->u.data = tmp1;
313 }
315 void
316 cpu_ioreq_or(CPUState *env, ioreq_t *req)
317 {
318 unsigned long tmp1, tmp2;
320 if (req->pdata_valid != 0)
321 hw_error("expected scalar value");
323 read_physical(req->addr, req->size, &tmp1);
324 if (req->dir == IOREQ_WRITE) {
325 tmp2 = tmp1 | (unsigned long) req->u.data;
326 write_physical(req->addr, req->size, &tmp2);
327 }
328 req->u.data = tmp1;
329 }
331 void
332 cpu_ioreq_xor(CPUState *env, ioreq_t *req)
333 {
334 unsigned long tmp1, tmp2;
336 if (req->pdata_valid != 0)
337 hw_error("expected scalar value");
339 read_physical(req->addr, req->size, &tmp1);
340 if (req->dir == IOREQ_WRITE) {
341 tmp2 = tmp1 ^ (unsigned long) req->u.data;
342 write_physical(req->addr, req->size, &tmp2);
343 }
344 req->u.data = tmp1;
345 }
347 void
348 cpu_handle_ioreq(CPUState *env)
349 {
350 ioreq_t *req = cpu_get_ioreq();
352 if (req) {
353 if ((!req->pdata_valid) && (req->dir == IOREQ_WRITE)) {
354 if (req->size != 4)
355 req->u.data &= (1UL << (8 * req->size))-1;
356 }
358 switch (req->type) {
359 case IOREQ_TYPE_PIO:
360 cpu_ioreq_pio(env, req);
361 break;
362 case IOREQ_TYPE_COPY:
363 cpu_ioreq_move(env, req);
364 break;
365 case IOREQ_TYPE_AND:
366 cpu_ioreq_and(env, req);
367 break;
368 case IOREQ_TYPE_OR:
369 cpu_ioreq_or(env, req);
370 break;
371 case IOREQ_TYPE_XOR:
372 cpu_ioreq_xor(env, req);
373 break;
374 default:
375 hw_error("Invalid ioreq type 0x%x", req->type);
376 }
378 /* No state change if state = STATE_IORESP_HOOK */
379 if (req->state == STATE_IOREQ_INPROCESS)
380 req->state = STATE_IORESP_READY;
381 env->send_event = 1;
382 }
383 }
385 void
386 cpu_timer_handler(CPUState *env)
387 {
388 cpu_handle_ioreq(env);
389 }
391 int xc_handle;
393 static __inline__ void atomic_set_bit(long nr, volatile void *addr)
394 {
395 __asm__ __volatile__(
396 "lock ; bts %1,%0"
397 :"=m" (*(volatile long *)addr)
398 :"dIr" (nr));
399 }
401 void
402 do_interrupt(CPUState *env, int vector)
403 {
404 unsigned long *intr;
406 // Send a message on the event channel. Add the vector to the shared mem
407 // page.
408 intr = (unsigned long *) &(shared_page->sp_global.pic_intr[0]);
409 atomic_set_bit(vector, intr);
410 if (loglevel & CPU_LOG_INT)
411 fprintf(logfile, "injecting vector: %x\n", vector);
412 env->send_event = 1;
413 }
415 void
416 destroy_vmx_domain(void)
417 {
418 extern int domid;
419 extern FILE* logfile;
420 char destroy_cmd[20];
421 sprintf(destroy_cmd, "xm destroy %d", domid);
422 if (system(destroy_cmd) == -1)
423 fprintf(logfile, "%s failed.!\n", destroy_cmd);
424 }
426 int main_loop(void)
427 {
428 int vector;
429 fd_set rfds;
430 struct timeval tv;
431 extern CPUState *global_env;
432 extern int vm_running;
433 extern int shutdown_requested;
434 CPUState *env = global_env;
435 int retval;
436 extern void main_loop_wait(int);
438 /* Watch stdin (fd 0) to see when it has input. */
439 FD_ZERO(&rfds);
441 while (1) {
442 if (vm_running) {
443 if (shutdown_requested) {
444 break;
445 }
446 if (reset_requested){
447 qemu_system_reset();
448 reset_requested = 0;
449 }
450 }
452 /* Wait up to one seconds. */
453 tv.tv_sec = 0;
454 tv.tv_usec = 100000;
455 FD_SET(evtchn_fd, &rfds);
457 env->send_event = 0;
458 retval = select(evtchn_fd+1, &rfds, NULL, NULL, &tv);
459 if (retval == -1) {
460 perror("select");
461 return 0;
462 }
464 #if __WORDSIZE == 32
465 #define ULONGLONG_MAX 0xffffffffffffffffULL
466 #else
467 #define ULONGLONG_MAX ULONG_MAX
468 #endif
470 main_loop_wait(0);
471 #ifdef APIC_SUPPORT
472 ioapic_update_EOI();
473 #endif
474 cpu_timer_handler(env);
475 if (env->interrupt_request & CPU_INTERRUPT_HARD) {
476 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
477 vector = cpu_get_pic_interrupt(env);
478 do_interrupt(env, vector);
479 }
480 #ifdef APIC_SUPPORT
481 if (ioapic_has_intr())
482 do_ioapic();
483 #endif
484 if (env->send_event) {
485 int ret;
486 ret = xc_evtchn_send(xc_handle, ioreq_port);
487 if (ret == -1) {
488 fprintf(logfile, "evtchn_send failed on port: %d\n", ioreq_port);
489 }
490 }
491 }
492 destroy_vmx_domain();
493 return 0;
494 }
496 static void
497 qemu_vmx_reset(void *unused)
498 {
499 char cmd[255];
500 extern int domid;
502 /* pause domain first, to avoid repeated reboot request*/
503 xc_domain_pause (xc_handle, domid);
505 sprintf(cmd,"xm shutdown -R %d", domid);
506 system (cmd);
507 }
509 CPUState *
510 cpu_init()
511 {
512 CPUX86State *env;
514 cpu_exec_init();
515 qemu_register_reset(qemu_vmx_reset, NULL);
516 env = malloc(sizeof(CPUX86State));
517 if (!env)
518 return NULL;
519 memset(env, 0, sizeof(CPUX86State));
521 cpu_single_env = env;
523 if (evtchn_fd != -1)//the evtchn has been opened by another cpu object
524 return NULL;
526 //use nonblock reading not polling, may change in future.
527 evtchn_fd = open("/dev/xen/evtchn", O_RDWR|O_NONBLOCK);
528 if (evtchn_fd == -1) {
529 perror("open");
530 return NULL;
531 }
533 fprintf(logfile, "listening to port: %d\n", ioreq_port);
534 /*unmask the wanted port -- bind*/
535 if (ioctl(evtchn_fd, ('E'<<8)|2, ioreq_port) == -1) {
536 perror("ioctl");
537 return NULL;
538 }
540 return env;
541 }