debuggers.hg

view xen/arch/ia64/xen/fw_emul.c @ 17986:f2148e532c81

x86 hvm: Fix RTC handling.
1. Clean up initialisation/destruction.
2. Better handle per-domain time-offset changes.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jul 02 17:25:05 2008 +0100 (2008-07-02)
parents f04ce41dab84
children 3acca92b9597
line source
1 /*
2 * fw_emul.c:
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 */
18 #include <xen/config.h>
19 #include <asm/system.h>
20 #include <asm/pgalloc.h>
22 #include <linux/efi.h>
23 #include <asm/pal.h>
24 #include <asm/sal.h>
25 #include <asm/sn/sn_sal.h>
26 #include <asm/sn/hubdev.h>
27 #include <asm/xenmca.h>
29 #include <public/sched.h>
30 #include "hpsim_ssc.h"
31 #include <asm/vcpu.h>
32 #include <asm/vmx_vcpu.h>
33 #include <asm/dom_fw.h>
34 #include <asm/uaccess.h>
35 #include <xen/console.h>
36 #include <xen/hypercall.h>
37 #include <xen/softirq.h>
38 #include <xen/time.h>
39 #include <asm/debugger.h>
40 #include <asm/vmx_phy_mode.h>
42 static DEFINE_SPINLOCK(efi_time_services_lock);
44 struct sal_mc_params {
45 u64 param_type;
46 u64 i_or_m;
47 u64 i_or_m_val;
48 u64 timeout;
49 u64 rz_always;
50 } sal_mc_params[SAL_MC_PARAM_CPE_INT + 1];
52 struct sal_vectors {
53 u64 vector_type;
54 u64 handler_addr1;
55 u64 gp1;
56 u64 handler_len1;
57 u64 handler_addr2;
58 u64 gp2;
59 u64 handler_len2;
60 } sal_vectors[SAL_VECTOR_OS_BOOT_RENDEZ + 1];
62 struct smp_call_args_t {
63 u64 type;
64 u64 ret;
65 u64 target;
66 struct domain *domain;
67 int corrected;
68 int status;
69 void *data;
70 };
72 extern sal_log_record_header_t *sal_record;
73 DEFINE_SPINLOCK(sal_record_lock);
75 extern spinlock_t sal_queue_lock;
77 #define IA64_SAL_NO_INFORMATION_AVAILABLE -5
79 #if defined(IA64_SAL_DEBUG_INFO)
80 static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
82 # define IA64_SAL_DEBUG(fmt...) printk("sal_emulator: " fmt)
83 #else
84 # define IA64_SAL_DEBUG(fmt...)
85 #endif
87 void get_state_info_on(void *data) {
88 struct smp_call_args_t *arg = data;
89 int flags;
91 spin_lock_irqsave(&sal_record_lock, flags);
92 memset(sal_record, 0, ia64_sal_get_state_info_size(arg->type));
93 arg->ret = ia64_sal_get_state_info(arg->type, (u64 *)sal_record);
94 IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) on CPU#%d returns %ld.\n",
95 rec_name[arg->type], smp_processor_id(), arg->ret);
96 if (arg->corrected) {
97 sal_record->severity = sal_log_severity_corrected;
98 IA64_SAL_DEBUG("%s: IA64_SAL_CLEAR_STATE_INFO(SAL_INFO_TYPE_MCA)"
99 " force\n", __FUNCTION__);
100 }
101 if (arg->ret > 0) {
102 /*
103 * Save current->domain and set to local(caller) domain for
104 * xencomm_paddr_to_maddr() which calculates maddr from
105 * paddr using mpa value of current->domain.
106 */
107 struct domain *save;
108 save = current->domain;
109 current->domain = arg->domain;
110 if (xencomm_copy_to_guest((void*)arg->target,
111 sal_record, arg->ret, 0)) {
112 printk("SAL_GET_STATE_INFO can't copy to user!!!!\n");
113 arg->status = IA64_SAL_NO_INFORMATION_AVAILABLE;
114 arg->ret = 0;
115 }
116 /* Restore current->domain to saved value. */
117 current->domain = save;
118 }
119 spin_unlock_irqrestore(&sal_record_lock, flags);
120 }
122 void clear_state_info_on(void *data) {
123 struct smp_call_args_t *arg = data;
125 arg->ret = ia64_sal_clear_state_info(arg->type);
126 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s) on CPU#%d returns %ld.\n",
127 rec_name[arg->type], smp_processor_id(), arg->ret);
129 }
131 struct sal_ret_values
132 sal_emulator (long index, unsigned long in1, unsigned long in2,
133 unsigned long in3, unsigned long in4, unsigned long in5,
134 unsigned long in6, unsigned long in7)
135 {
136 struct ia64_sal_retval ret_stuff;
137 unsigned long r9 = 0;
138 unsigned long r10 = 0;
139 long r11 = 0;
140 long status;
142 debugger_event(XEN_IA64_DEBUG_ON_SAL);
144 status = 0;
145 switch (index) {
146 case SAL_FREQ_BASE:
147 if (likely(!running_on_sim))
148 status = ia64_sal_freq_base(in1,&r9,&r10);
149 else switch (in1) {
150 case SAL_FREQ_BASE_PLATFORM:
151 r9 = 200000000;
152 break;
154 case SAL_FREQ_BASE_INTERVAL_TIMER:
155 r9 = 700000000;
156 break;
158 case SAL_FREQ_BASE_REALTIME_CLOCK:
159 r9 = 1;
160 break;
162 default:
163 status = -1;
164 break;
165 }
166 break;
167 case SAL_PCI_CONFIG_READ:
168 if (current->domain == dom0) {
169 u64 value;
170 // note that args 2&3 are swapped!!
171 status = ia64_sal_pci_config_read(in1,in3,in2,&value);
172 r9 = value;
173 }
174 else
175 printk("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_READ\n");
176 break;
177 case SAL_PCI_CONFIG_WRITE:
178 if (current->domain == dom0) {
179 if (((in1 & ~0xffffffffUL) && (in4 == 0)) ||
180 (in4 > 1) ||
181 (in2 > 8) || (in2 & (in2-1)))
182 printk("*** SAL_PCI_CONF_WRITE?!?(adr=0x%lx,typ=0x%lx,sz=0x%lx,val=0x%lx)\n",
183 in1,in4,in2,in3);
184 // note that args are in a different order!!
185 status = ia64_sal_pci_config_write(in1,in4,in2,in3);
186 }
187 else
188 printk("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_WRITE\n");
189 break;
190 case SAL_SET_VECTORS:
191 if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
192 if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
193 /* Sanity check: cs_length1 must be 0,
194 second vector is reserved. */
195 status = -2;
196 }
197 else {
198 struct domain *d = current->domain;
199 d->arch.sal_data->boot_rdv_ip = in2;
200 d->arch.sal_data->boot_rdv_r1 = in3;
201 }
202 }
203 else if (current->domain == dom0) {
204 if (in1 >
205 sizeof(sal_vectors)/sizeof(sal_vectors[0])-1) {
206 gdprintk(XENLOG_DEBUG,
207 "SAL_SET_VECTORS invalid in1 %ld\n",
208 in1);
209 status = -2;
210 break;
211 }
212 sal_vectors[in1].vector_type = in1;
213 sal_vectors[in1].handler_addr1 = in2;
214 sal_vectors[in1].gp1 = in3;
215 sal_vectors[in1].handler_len1 = in4;
216 sal_vectors[in1].handler_addr2 = in5;
217 sal_vectors[in1].gp2 = in6;
218 sal_vectors[in1].handler_len2 = in7;
219 } else {
220 gdprintk(XENLOG_DEBUG, "NON-PRIV DOMAIN CALLED "
221 "SAL_SET_VECTORS %ld\n", in1);
222 /*
223 * status = -2;
224 * Temporal work around untill gfw support:
225 * windows 2003 sp2/sp1 dislike -2 to crash.
226 */
227 status = 0;
228 }
229 break;
230 case SAL_GET_STATE_INFO:
231 if (current->domain == dom0) {
232 sal_queue_entry_t *e;
233 unsigned long flags;
234 struct smp_call_args_t arg;
236 spin_lock_irqsave(&sal_queue_lock, flags);
237 if (!sal_queue || list_empty(&sal_queue[in1])) {
238 sal_log_record_header_t header;
239 XEN_GUEST_HANDLE(void) handle =
240 *(XEN_GUEST_HANDLE(void)*)&in3;
242 IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) "
243 "no sal_queue entry found.\n",
244 rec_name[in1]);
245 memset(&header, 0, sizeof(header));
247 if (copy_to_guest(handle, &header, 1)) {
248 printk("sal_emulator: "
249 "SAL_GET_STATE_INFO can't copy "
250 "empty header to user: 0x%lx\n",
251 in3);
252 }
253 status = IA64_SAL_NO_INFORMATION_AVAILABLE;
254 r9 = 0;
255 spin_unlock_irqrestore(&sal_queue_lock, flags);
256 break;
257 }
258 e = list_entry(sal_queue[in1].next,
259 sal_queue_entry_t, list);
261 list_del(&e->list);
262 spin_unlock_irqrestore(&sal_queue_lock, flags);
264 IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s <= %s) "
265 "on CPU#%d.\n",
266 rec_name[e->sal_info_type],
267 rec_name[in1], e->cpuid);
269 arg.type = e->sal_info_type;
270 arg.target = in3;
271 arg.corrected = !!((in1 != e->sal_info_type) &&
272 (e->sal_info_type == SAL_INFO_TYPE_MCA));
273 arg.domain = current->domain;
274 arg.status = 0;
276 if (e->cpuid == smp_processor_id()) {
277 IA64_SAL_DEBUG("SAL_GET_STATE_INFO: local\n");
278 get_state_info_on(&arg);
279 } else {
280 int ret;
281 IA64_SAL_DEBUG("SAL_GET_STATE_INFO: remote\n");
282 ret = smp_call_function_single(e->cpuid,
283 get_state_info_on,
284 &arg, 0, 1);
285 if (ret < 0) {
286 printk("SAL_GET_STATE_INFO "
287 "smp_call_function_single error:"
288 " %d\n", ret);
289 arg.ret = 0;
290 arg.status =
291 IA64_SAL_NO_INFORMATION_AVAILABLE;
292 }
293 }
294 r9 = arg.ret;
295 status = arg.status;
296 if (r9 == 0) {
297 xfree(e);
298 } else {
299 /* Re-add the entry to sal_queue */
300 spin_lock_irqsave(&sal_queue_lock, flags);
301 list_add(&e->list, &sal_queue[in1]);
302 spin_unlock_irqrestore(&sal_queue_lock, flags);
303 }
304 } else {
305 status = IA64_SAL_NO_INFORMATION_AVAILABLE;
306 r9 = 0;
307 }
308 break;
309 case SAL_GET_STATE_INFO_SIZE:
310 r9 = ia64_sal_get_state_info_size(in1);
311 break;
312 case SAL_CLEAR_STATE_INFO:
313 if (current->domain == dom0) {
314 sal_queue_entry_t *e;
315 unsigned long flags;
316 struct smp_call_args_t arg;
318 spin_lock_irqsave(&sal_queue_lock, flags);
319 if (list_empty(&sal_queue[in1])) {
320 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s) "
321 "no sal_queue entry found.\n",
322 rec_name[in1]);
323 status = IA64_SAL_NO_INFORMATION_AVAILABLE;
324 r9 = 0;
325 spin_unlock_irqrestore(&sal_queue_lock, flags);
326 break;
327 }
328 e = list_entry(sal_queue[in1].next,
329 sal_queue_entry_t, list);
331 list_del(&e->list);
332 spin_unlock_irqrestore(&sal_queue_lock, flags);
334 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s <= %s) "
335 "on CPU#%d.\n",
336 rec_name[e->sal_info_type],
337 rec_name[in1], e->cpuid);
339 arg.type = e->sal_info_type;
340 arg.status = 0;
342 if (e->cpuid == smp_processor_id()) {
343 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: local\n");
344 clear_state_info_on(&arg);
345 } else {
346 int ret;
347 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: remote\n");
348 ret = smp_call_function_single(e->cpuid,
349 clear_state_info_on, &arg, 0, 1);
350 if (ret < 0) {
351 printk("sal_emulator: "
352 "SAL_CLEAR_STATE_INFO "
353 "smp_call_function_single error:"
354 " %d\n", ret);
355 arg.ret = 0;
356 arg.status =
357 IA64_SAL_NO_INFORMATION_AVAILABLE;
358 }
359 }
360 r9 = arg.ret;
361 status = arg.status;
362 xfree(e);
363 }
364 break;
365 case SAL_MC_RENDEZ:
366 printk("*** CALLED SAL_MC_RENDEZ. IGNORED...\n");
367 break;
368 case SAL_MC_SET_PARAMS:
369 if (current->domain == dom0) {
370 if (in1 >
371 sizeof(sal_mc_params) / sizeof(sal_mc_params[0])) {
372 gdprintk(XENLOG_DEBUG,
373 "SAL_MC_SET_PARAMS invalid in1 %ld\n",
374 in1);
375 status = -2;
376 break;
377 }
378 sal_mc_params[in1].param_type = in1;
379 sal_mc_params[in1].i_or_m = in2;
380 sal_mc_params[in1].i_or_m_val = in3;
381 sal_mc_params[in1].timeout = in4;
382 sal_mc_params[in1].rz_always = in5;
383 } else {
384 gdprintk(XENLOG_DEBUG,
385 "*** CALLED SAL_MC_SET_PARAMS. IGNORED...\n");
386 /*
387 * status = -1;
388 * Temporal work around untill gfw support:
389 * windows 2003 sp2/sp1 dislike -1(not implemented)
390 * to crash.
391 */
392 status = 0;
393 }
394 break;
395 case SAL_CACHE_FLUSH:
396 if (1) {
397 /* Flush using SAL.
398 This method is faster but has a side effect on
399 other vcpu running on this cpu. */
400 status = ia64_sal_cache_flush (in1);
401 }
402 else {
403 /* Flush with fc all the domain.
404 This method is slower but has no side effects. */
405 domain_cache_flush (current->domain, in1 == 4 ? 1 : 0);
406 status = 0;
407 }
408 break;
409 case SAL_CACHE_INIT:
410 printk("*** CALLED SAL_CACHE_INIT. IGNORED...\n");
411 break;
412 case SAL_UPDATE_PAL:
413 printk("*** CALLED SAL_UPDATE_PAL. IGNORED...\n");
414 break;
415 case SAL_PHYSICAL_ID_INFO:
416 status = -1;
417 break;
418 case SAL_XEN_SAL_RETURN:
419 if (!test_and_set_bit(_VPF_down, &current->pause_flags))
420 vcpu_sleep_nosync(current);
421 break;
422 case SN_SAL_GET_MASTER_NASID:
423 status = -1;
424 if (current->domain == dom0) {
425 /* printk("*** Emulating SN_SAL_GET_MASTER_NASID ***\n"); */
426 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_MASTER_NASID,
427 0, 0, 0, 0, 0, 0, 0);
428 status = ret_stuff.status;
429 r9 = ret_stuff.v0;
430 r10 = ret_stuff.v1;
431 r11 = ret_stuff.v2;
432 }
433 break;
434 case SN_SAL_GET_KLCONFIG_ADDR:
435 status = -1;
436 if (current->domain == dom0) {
437 /* printk("*** Emulating SN_SAL_GET_KLCONFIG_ADDR ***\n"); */
438 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_KLCONFIG_ADDR,
439 in1, 0, 0, 0, 0, 0, 0);
440 status = ret_stuff.status;
441 r9 = ret_stuff.v0;
442 r10 = ret_stuff.v1;
443 r11 = ret_stuff.v2;
444 }
445 break;
446 case SN_SAL_GET_SAPIC_INFO:
447 status = -1;
448 if (current->domain == dom0) {
449 /* printk("*** Emulating SN_SAL_GET_SAPIC_INFO ***\n"); */
450 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SAPIC_INFO,
451 in1, 0, 0, 0, 0, 0, 0);
452 status = ret_stuff.status;
453 r9 = ret_stuff.v0;
454 r10 = ret_stuff.v1;
455 r11 = ret_stuff.v2;
456 }
457 break;
458 case SN_SAL_GET_SN_INFO:
459 status = -1;
460 if (current->domain == dom0) {
461 /* printk("*** Emulating SN_SAL_GET_SN_INFO ***\n"); */
462 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SN_INFO,
463 in1, 0, 0, 0, 0, 0, 0);
464 status = ret_stuff.status;
465 r9 = ret_stuff.v0;
466 r10 = ret_stuff.v1;
467 r11 = ret_stuff.v2;
468 }
469 break;
470 case SN_SAL_IOIF_GET_HUBDEV_INFO:
471 status = -1;
472 if (current->domain == dom0) {
473 /* printk("*** Emulating SN_SAL_IOIF_GET_HUBDEV_INFO ***\n"); */
474 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_IOIF_GET_HUBDEV_INFO,
475 in1, in2, 0, 0, 0, 0, 0);
476 status = ret_stuff.status;
477 r9 = ret_stuff.v0;
478 r10 = ret_stuff.v1;
479 r11 = ret_stuff.v2;
480 }
481 break;
482 case SN_SAL_IOIF_INIT:
483 status = -1;
484 if (current->domain == dom0) {
485 /* printk("*** Emulating SN_SAL_IOIF_INIT ***\n"); */
486 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_IOIF_INIT,
487 0, 0, 0, 0, 0, 0, 0);
488 status = ret_stuff.status;
489 r9 = ret_stuff.v0;
490 r10 = ret_stuff.v1;
491 r11 = ret_stuff.v2;
492 }
493 break;
494 case SN_SAL_GET_PROM_FEATURE_SET:
495 status = -1;
496 if (current->domain == dom0) {
497 /* printk("*** Emulating SN_SAL_GET_PROM_FEATURE_SET ***\n"); */
498 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_PROM_FEATURE_SET,
499 in1, 0, 0, 0, 0, 0, 0);
500 status = ret_stuff.status;
501 r9 = ret_stuff.v0;
502 r10 = ret_stuff.v1;
503 r11 = ret_stuff.v2;
504 }
505 break;
506 case SN_SAL_SET_OS_FEATURE_SET:
507 status = -1;
508 if (current->domain == dom0) {
509 /* printk("*** Emulating SN_SAL_SET_OS_FEATURE_SET ***\n"); */
510 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SET_OS_FEATURE_SET,
511 in1, 0, 0, 0, 0, 0, 0);
512 status = ret_stuff.status;
513 r9 = ret_stuff.v0;
514 r10 = ret_stuff.v1;
515 r11 = ret_stuff.v2;
516 }
517 break;
518 case SN_SAL_SET_ERROR_HANDLING_FEATURES:
519 status = -1;
520 if (current->domain == dom0) {
521 /* printk("*** Emulating SN_SAL_SET_ERROR_HANDLING_FEATURES ***\n"); */
522 SAL_CALL_NOLOCK(ret_stuff,
523 SN_SAL_SET_ERROR_HANDLING_FEATURES,
524 in1, 0, 0, 0, 0, 0, 0);
525 status = ret_stuff.status;
526 r9 = ret_stuff.v0;
527 r10 = ret_stuff.v1;
528 r11 = ret_stuff.v2;
529 }
530 break;
531 #if 0
532 /*
533 * Somehow ACPI breaks if allowing this one
534 */
535 case SN_SAL_SET_CPU_NUMBER:
536 status = -1;
537 if (current->domain == dom0) {
538 printk("*** Emulating SN_SAL_SET_CPU_NUMBER ***\n");
539 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SET_CPU_NUMBER,
540 in1, 0, 0, 0, 0, 0, 0);
541 status = ret_stuff.status;
542 r9 = ret_stuff.v0;
543 r10 = ret_stuff.v1;
544 r11 = ret_stuff.v2;
545 }
546 break;
547 #endif
548 case SN_SAL_LOG_CE:
549 status = -1;
550 if (current->domain == dom0) {
551 static int log_ce = 0;
552 if (!log_ce) {
553 printk("*** Emulating SN_SAL_LOG_CE *** "
554 " this will only be printed once\n");
555 log_ce = 1;
556 }
557 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_LOG_CE,
558 0, 0, 0, 0, 0, 0, 0);
559 status = ret_stuff.status;
560 r9 = ret_stuff.v0;
561 r10 = ret_stuff.v1;
562 r11 = ret_stuff.v2;
563 }
564 break;
565 case SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST:
566 status = -1;
567 if (current->domain == dom0) {
568 struct sn_flush_device_common flush;
569 int flush_size;
571 flush_size = sizeof(struct sn_flush_device_common);
572 memset(&flush, 0, flush_size);
573 SAL_CALL_NOLOCK(ret_stuff,
574 SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
575 in1, in2, in3, &flush, 0, 0, 0);
576 #if 0
577 printk("*** Emulating "
578 "SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST ***\n");
579 #endif
580 if (ret_stuff.status == SALRET_OK) {
581 XEN_GUEST_HANDLE(void) handle =
582 *(XEN_GUEST_HANDLE(void)*)&in4;
583 if (copy_to_guest(handle, &flush, 1)) {
584 printk("SN_SAL_IOIF_GET_DEVICE_"
585 "DMAFLUSH_LIST can't copy "
586 "to user!\n");
587 ret_stuff.status = SALRET_ERROR;
588 }
589 }
591 status = ret_stuff.status;
592 r9 = ret_stuff.v0;
593 r10 = ret_stuff.v1;
594 r11 = ret_stuff.v2;
595 }
596 break;
597 default:
598 printk("*** CALLED SAL_ WITH UNKNOWN INDEX (%lx). "
599 "IGNORED...\n", index);
600 status = -1;
601 break;
602 }
603 return ((struct sal_ret_values) {status, r9, r10, r11});
604 }
606 static int
607 safe_copy_to_guest(unsigned long to, void *from, long size)
608 {
609 BUG_ON((unsigned)size > PAGE_SIZE);
611 if (VMX_DOMAIN(current)) {
612 if (is_virtual_mode(current)) {
613 thash_data_t *data;
614 unsigned long gpa, poff;
616 /* The caller must provide a DTR or DTC mapping */
617 data = vtlb_lookup(current, to, DSIDE_TLB);
618 if (data) {
619 gpa = data->page_flags & _PAGE_PPN_MASK;
620 } else {
621 data = vhpt_lookup(to);
622 if (!data)
623 return -1;
624 gpa = __mpa_to_gpa(
625 data->page_flags & _PAGE_PPN_MASK);
626 gpa &= _PAGE_PPN_MASK;
627 }
628 poff = POFFSET(to, data->ps);
629 if (poff + size > PSIZE(data->ps))
630 return -1;
631 to = PAGEALIGN(gpa, data->ps) | poff;
632 }
633 to |= XENCOMM_INLINE_FLAG;
634 if (xencomm_copy_to_guest((void *)to, from, size, 0) != 0)
635 return -1;
636 return 0;
637 } else {
638 /* check for vulnerability */
639 if (IS_VMM_ADDRESS(to) || IS_VMM_ADDRESS(to + size - 1))
640 panic_domain(NULL, "copy to bad address:0x%lx\n", to);
641 return copy_to_user((void __user *)to, from, size);
642 }
643 }
645 cpumask_t cpu_cache_coherent_map;
647 struct cache_flush_args {
648 u64 cache_type;
649 u64 operation;
650 u64 progress;
651 long status;
652 };
654 static void
655 remote_pal_cache_flush(void *v)
656 {
657 struct cache_flush_args *args = v;
658 long status;
659 u64 progress = args->progress;
661 status = ia64_pal_cache_flush(args->cache_type, args->operation,
662 &progress, NULL);
663 if (status != 0)
664 args->status = status;
665 }
667 static void
668 remote_pal_prefetch_visibility(void *v)
669 {
670 s64 trans_type = (s64)v;
671 ia64_pal_prefetch_visibility(trans_type);
672 }
674 static void
675 remote_pal_mc_drain(void *v)
676 {
677 ia64_pal_mc_drain();
678 }
680 struct ia64_pal_retval
681 xen_pal_emulator(unsigned long index, u64 in1, u64 in2, u64 in3)
682 {
683 unsigned long r9 = 0;
684 unsigned long r10 = 0;
685 unsigned long r11 = 0;
686 long status = PAL_STATUS_UNIMPLEMENTED;
687 unsigned long flags;
688 int processor;
690 if (unlikely(running_on_sim))
691 return pal_emulator_static(index);
693 debugger_event(XEN_IA64_DEBUG_ON_PAL);
695 // pal code must be mapped by a TR when pal is called, however
696 // calls are rare enough that we will map it lazily rather than
697 // at every context switch
698 //efi_map_pal_code();
699 switch (index) {
700 case PAL_MEM_ATTRIB:
701 status = ia64_pal_mem_attrib(&r9);
702 break;
703 case PAL_FREQ_BASE:
704 status = ia64_pal_freq_base(&r9);
705 if (status == PAL_STATUS_UNIMPLEMENTED) {
706 status = ia64_sal_freq_base(0, &r9, &r10);
707 r10 = 0;
708 }
709 break;
710 case PAL_PROC_GET_FEATURES:
711 status = ia64_pal_proc_get_features(&r9,&r10,&r11);
712 break;
713 case PAL_BUS_GET_FEATURES:
714 status = ia64_pal_bus_get_features(
715 (pal_bus_features_u_t *) &r9,
716 (pal_bus_features_u_t *) &r10,
717 (pal_bus_features_u_t *) &r11);
718 break;
719 case PAL_FREQ_RATIOS:
720 status = ia64_pal_freq_ratios(
721 (struct pal_freq_ratio *) &r9,
722 (struct pal_freq_ratio *) &r10,
723 (struct pal_freq_ratio *) &r11);
724 break;
725 case PAL_PTCE_INFO:
726 /*
727 * return hard-coded xen-specific values because ptc.e
728 * is emulated on xen to always flush everything
729 * these values result in only one ptc.e instruction
730 */
731 status = PAL_STATUS_SUCCESS;
732 r10 = (1L << 32) | 1L;
733 break;
734 case PAL_VERSION:
735 status = ia64_pal_version(
736 (pal_version_u_t *) &r9,
737 (pal_version_u_t *) &r10);
738 break;
739 case PAL_VM_PAGE_SIZE:
740 status = ia64_pal_vm_page_size(&r9,&r10);
741 break;
742 case PAL_DEBUG_INFO:
743 status = ia64_pal_debug_info(&r9,&r10);
744 break;
745 case PAL_CACHE_SUMMARY:
746 status = ia64_pal_cache_summary(&r9,&r10);
747 break;
748 case PAL_VM_SUMMARY:
749 if (VMX_DOMAIN(current)) {
750 pal_vm_info_1_u_t v1;
751 pal_vm_info_2_u_t v2;
752 status = ia64_pal_vm_summary((pal_vm_info_1_u_t *)&v1,
753 (pal_vm_info_2_u_t *)&v2);
754 v1.pal_vm_info_1_s.max_itr_entry = NITRS - 1;
755 v1.pal_vm_info_1_s.max_dtr_entry = NDTRS - 1;
756 v2.pal_vm_info_2_s.impl_va_msb -= 1;
757 v2.pal_vm_info_2_s.rid_size =
758 current->domain->arch.rid_bits;
759 r9 = v1.pvi1_val;
760 r10 = v2.pvi2_val;
761 } else {
762 /* Use xen-specific values.
763 hash_tag_id is somewhat random! */
764 static const pal_vm_info_1_u_t v1 =
765 {.pal_vm_info_1_s =
766 { .vw = 1,
767 .phys_add_size = 44,
768 .key_size = 16,
769 .max_pkr = XEN_IA64_NPKRS,
770 .hash_tag_id = 0x30,
771 .max_dtr_entry = NDTRS - 1,
772 .max_itr_entry = NITRS - 1,
773 .max_unique_tcs = 3,
774 .num_tc_levels = 2
775 }};
776 pal_vm_info_2_u_t v2;
777 v2.pvi2_val = 0;
778 v2.pal_vm_info_2_s.rid_size =
779 current->domain->arch.rid_bits;
780 v2.pal_vm_info_2_s.impl_va_msb = 50;
781 r9 = v1.pvi1_val;
782 r10 = v2.pvi2_val;
783 status = PAL_STATUS_SUCCESS;
784 }
785 break;
786 case PAL_VM_INFO:
787 if (VMX_DOMAIN(current)) {
788 status = ia64_pal_vm_info(in1, in2,
789 (pal_tc_info_u_t *)&r9, &r10);
790 break;
791 }
792 if (in1 == 0 && in2 == 2) {
793 /* Level 1: VHPT */
794 const pal_tc_info_u_t v =
795 { .pal_tc_info_s = {.num_sets = 128,
796 .associativity = 1,
797 .num_entries = 128,
798 .pf = 1,
799 .unified = 1,
800 .reduce_tr = 0,
801 .reserved = 0}};
802 r9 = v.pti_val;
803 /* Only support PAGE_SIZE tc. */
804 r10 = PAGE_SIZE;
805 status = PAL_STATUS_SUCCESS;
806 }
807 else if (in1 == 1 && (in2 == 1 || in2 == 2)) {
808 /* Level 2: itlb/dtlb, 1 entry. */
809 const pal_tc_info_u_t v =
810 { .pal_tc_info_s = {.num_sets = 1,
811 .associativity = 1,
812 .num_entries = 1,
813 .pf = 1,
814 .unified = 0,
815 .reduce_tr = 0,
816 .reserved = 0}};
817 r9 = v.pti_val;
818 /* Only support PAGE_SIZE tc. */
819 r10 = PAGE_SIZE;
820 status = PAL_STATUS_SUCCESS;
821 } else
822 status = PAL_STATUS_EINVAL;
823 break;
824 case PAL_RSE_INFO:
825 status = ia64_pal_rse_info(&r9, (pal_hints_u_t *)&r10);
826 break;
827 case PAL_REGISTER_INFO:
828 status = ia64_pal_register_info(in1, &r9, &r10);
829 break;
830 case PAL_CACHE_FLUSH:
831 if (in3 != 0) /* Initially progress_indicator must be 0 */
832 panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
833 "progress_indicator=%lx", in3);
835 /* Always call Host Pal in int=0 */
836 in2 &= ~PAL_CACHE_FLUSH_CHK_INTRS;
838 if (in1 != PAL_CACHE_TYPE_COHERENT) {
839 struct cache_flush_args args = {
840 .cache_type = in1,
841 .operation = in2,
842 .progress = 0,
843 .status = 0
844 };
845 smp_call_function(remote_pal_cache_flush,
846 (void *)&args, 1, 1);
847 if (args.status != 0)
848 panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
849 "remote status %lx", args.status);
850 }
852 /*
853 * Call Host PAL cache flush
854 * Clear psr.ic when call PAL_CACHE_FLUSH
855 */
856 r10 = in3;
857 local_irq_save(flags);
858 processor = current->processor;
859 status = ia64_pal_cache_flush(in1, in2, &r10, &r9);
860 local_irq_restore(flags);
862 if (status != 0)
863 panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
864 "status %lx", status);
866 if (in1 == PAL_CACHE_TYPE_COHERENT) {
867 cpus_setall(current->arch.cache_coherent_map);
868 cpu_clear(processor, current->arch.cache_coherent_map);
869 cpus_setall(cpu_cache_coherent_map);
870 cpu_clear(processor, cpu_cache_coherent_map);
871 }
872 break;
873 case PAL_PERF_MON_INFO:
874 {
875 unsigned long pm_buffer[16];
876 status = ia64_pal_perf_mon_info(
877 pm_buffer,
878 (pal_perf_mon_info_u_t *) &r9);
879 if (status != 0) {
880 printk("PAL_PERF_MON_INFO fails ret=%ld\n", status);
881 break;
882 }
883 if (safe_copy_to_guest(
884 in1, pm_buffer, sizeof(pm_buffer))) {
885 status = PAL_STATUS_EINVAL;
886 goto fail_to_copy;
887 }
888 }
889 break;
890 case PAL_CACHE_INFO:
891 {
892 pal_cache_config_info_t ci;
893 status = ia64_pal_cache_config_info(in1,in2,&ci);
894 if (status != 0)
895 break;
896 r9 = ci.pcci_info_1.pcci1_data;
897 r10 = ci.pcci_info_2.pcci2_data;
898 }
899 break;
900 case PAL_VM_TR_READ: /* FIXME: vcpu_get_tr?? */
901 printk("%s: PAL_VM_TR_READ unimplmented, ignored\n", __func__);
902 break;
903 case PAL_HALT_INFO:
904 {
905 /* 1000 cycles to enter/leave low power state,
906 consumes 10 mW, implemented and cache/TLB coherent. */
907 unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32)
908 | (1UL << 61) | (1UL << 60);
909 if (safe_copy_to_guest (in1, &res, sizeof (res))) {
910 status = PAL_STATUS_EINVAL;
911 goto fail_to_copy;
912 }
913 status = PAL_STATUS_SUCCESS;
914 }
915 break;
916 case PAL_HALT:
917 set_bit(_VPF_down, &current->pause_flags);
918 vcpu_sleep_nosync(current);
919 status = PAL_STATUS_SUCCESS;
920 break;
921 case PAL_HALT_LIGHT:
922 if (VMX_DOMAIN(current)) {
923 /* Called by VTI. */
924 if (!is_unmasked_irq(current)) {
925 do_sched_op_compat(SCHEDOP_block, 0);
926 do_softirq();
927 }
928 status = PAL_STATUS_SUCCESS;
929 }
930 break;
931 case PAL_PLATFORM_ADDR:
932 if (VMX_DOMAIN(current))
933 status = PAL_STATUS_SUCCESS;
934 break;
935 case PAL_FIXED_ADDR:
936 status = PAL_STATUS_SUCCESS;
937 r9 = current->vcpu_id;
938 break;
939 case PAL_PREFETCH_VISIBILITY:
940 status = ia64_pal_prefetch_visibility(in1);
941 if (status == 0) {
942 /* must be performed on all remote processors
943 in the coherence domain. */
944 smp_call_function(remote_pal_prefetch_visibility,
945 (void *)in1, 1, 1);
946 status = 1; /* no more necessary on remote processor */
947 }
948 break;
949 case PAL_MC_DRAIN:
950 status = ia64_pal_mc_drain();
951 /* FIXME: All vcpus likely call PAL_MC_DRAIN.
952 That causes the congestion. */
953 smp_call_function(remote_pal_mc_drain, NULL, 1, 1);
954 break;
955 case PAL_BRAND_INFO:
956 if (in1 == 0) {
957 char brand_info[128];
958 status = ia64_pal_get_brand_info(brand_info);
959 if (status != PAL_STATUS_SUCCESS)
960 break;
961 if (safe_copy_to_guest(in2, brand_info,
962 sizeof(brand_info))) {
963 status = PAL_STATUS_EINVAL;
964 goto fail_to_copy;
965 }
966 } else {
967 status = PAL_STATUS_EINVAL;
968 }
969 break;
970 case PAL_LOGICAL_TO_PHYSICAL:
971 case PAL_GET_PSTATE:
972 case PAL_CACHE_SHARED_INFO:
973 /* Optional, no need to complain about being unimplemented */
974 break;
975 default:
976 printk("%s: Unimplemented PAL Call %lu\n", __func__, index);
977 break;
978 }
979 return ((struct ia64_pal_retval) {status, r9, r10, r11});
981 fail_to_copy:
982 gdprintk(XENLOG_WARNING,
983 "PAL(%ld) fail to copy!!! args 0x%lx 0x%lx 0x%lx\n",
984 index, in1, in2, in3);
985 return ((struct ia64_pal_retval) {status, r9, r10, r11});
986 }
988 // given a current domain (virtual or metaphysical) address, return the virtual address
989 static unsigned long
990 efi_translate_domain_addr(unsigned long domain_addr, IA64FAULT *fault,
991 struct page_info** page)
992 {
993 struct vcpu *v = current;
994 unsigned long mpaddr = domain_addr;
995 unsigned long virt;
996 *fault = IA64_NO_FAULT;
998 again:
999 if (v->domain->arch.sal_data->efi_virt_mode) {
1000 *fault = vcpu_tpa(v, domain_addr, &mpaddr);
1001 if (*fault != IA64_NO_FAULT) return 0;
1004 virt = (unsigned long)domain_mpa_to_imva(v->domain, mpaddr);
1005 *page = virt_to_page(virt);
1006 if (get_page(*page, current->domain) == 0) {
1007 if (page_get_owner(*page) != current->domain) {
1008 // which code is appropriate?
1009 *fault = IA64_FAULT;
1010 return 0;
1012 goto again;
1015 return virt;
1018 static efi_status_t
1019 efi_emulate_get_time(
1020 unsigned long tv_addr, unsigned long tc_addr,
1021 IA64FAULT *fault)
1023 unsigned long tv, tc = 0;
1024 struct page_info *tv_page = NULL;
1025 struct page_info *tc_page = NULL;
1026 efi_status_t status = 0;
1027 efi_time_t *tvp;
1028 struct tm timeptr;
1029 unsigned long xtimesec;
1031 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
1032 if (*fault != IA64_NO_FAULT)
1033 goto errout;
1034 if (tc_addr) {
1035 tc = efi_translate_domain_addr(tc_addr, fault, &tc_page);
1036 if (*fault != IA64_NO_FAULT)
1037 goto errout;
1040 spin_lock(&efi_time_services_lock);
1041 status = (*efi.get_time)((efi_time_t *) tv, (efi_time_cap_t *) tc);
1042 tvp = (efi_time_t *)tv;
1043 xtimesec = mktime(tvp->year, tvp->month, tvp->day, tvp->hour,
1044 tvp->minute, tvp->second);
1045 xtimesec += current->domain->time_offset_seconds;
1046 timeptr = gmtime(xtimesec);
1047 tvp->second = timeptr.tm_sec;
1048 tvp->minute = timeptr.tm_min;
1049 tvp->hour = timeptr.tm_hour;
1050 tvp->day = timeptr.tm_mday;
1051 tvp->month = timeptr.tm_mon + 1;
1052 tvp->year = timeptr.tm_year + 1900;
1053 spin_unlock(&efi_time_services_lock);
1055 errout:
1056 if (tc_page != NULL)
1057 put_page(tc_page);
1058 if (tv_page != NULL)
1059 put_page(tv_page);
1061 return status;
1064 void domain_set_time_offset(struct domain *d, int32_t time_offset_seconds)
1066 d->time_offset_seconds = time_offset_seconds;
1069 static efi_status_t
1070 efi_emulate_set_time(
1071 unsigned long tv_addr, IA64FAULT *fault)
1073 unsigned long tv;
1074 struct page_info *tv_page = NULL;
1075 efi_status_t status = 0;
1077 if (current->domain != dom0)
1078 return EFI_UNSUPPORTED;
1080 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
1081 if (*fault != IA64_NO_FAULT)
1082 goto errout;
1084 spin_lock(&efi_time_services_lock);
1085 status = (*efi.set_time)((efi_time_t *)tv);
1086 spin_unlock(&efi_time_services_lock);
1088 errout:
1089 if (tv_page != NULL)
1090 put_page(tv_page);
1092 return status;
1095 static efi_status_t
1096 efi_emulate_get_wakeup_time(
1097 unsigned long e_addr, unsigned long p_addr,
1098 unsigned long tv_addr, IA64FAULT *fault)
1100 unsigned long enabled, pending, tv;
1101 struct page_info *e_page = NULL, *p_page = NULL,
1102 *tv_page = NULL;
1103 efi_status_t status = 0;
1105 if (current->domain != dom0)
1106 return EFI_UNSUPPORTED;
1108 if (!e_addr || !p_addr || !tv_addr)
1109 return EFI_INVALID_PARAMETER;
1111 enabled = efi_translate_domain_addr(e_addr, fault, &e_page);
1112 if (*fault != IA64_NO_FAULT)
1113 goto errout;
1114 pending = efi_translate_domain_addr(p_addr, fault, &p_page);
1115 if (*fault != IA64_NO_FAULT)
1116 goto errout;
1117 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
1118 if (*fault != IA64_NO_FAULT)
1119 goto errout;
1121 spin_lock(&efi_time_services_lock);
1122 status = (*efi.get_wakeup_time)((efi_bool_t *)enabled,
1123 (efi_bool_t *)pending,
1124 (efi_time_t *)tv);
1125 spin_unlock(&efi_time_services_lock);
1127 errout:
1128 if (e_page != NULL)
1129 put_page(e_page);
1130 if (p_page != NULL)
1131 put_page(p_page);
1132 if (tv_page != NULL)
1133 put_page(tv_page);
1135 return status;
1138 static efi_status_t
1139 efi_emulate_set_wakeup_time(
1140 unsigned long enabled, unsigned long tv_addr,
1141 IA64FAULT *fault)
1143 unsigned long tv = 0;
1144 struct page_info *tv_page = NULL;
1145 efi_status_t status = 0;
1147 if (current->domain != dom0)
1148 return EFI_UNSUPPORTED;
1150 if (tv_addr) {
1151 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
1152 if (*fault != IA64_NO_FAULT)
1153 goto errout;
1156 spin_lock(&efi_time_services_lock);
1157 status = (*efi.set_wakeup_time)((efi_bool_t)enabled,
1158 (efi_time_t *)tv);
1159 spin_unlock(&efi_time_services_lock);
1161 errout:
1162 if (tv_page != NULL)
1163 put_page(tv_page);
1165 return status;
1168 static efi_status_t
1169 efi_emulate_get_variable(
1170 unsigned long name_addr, unsigned long vendor_addr,
1171 unsigned long attr_addr, unsigned long data_size_addr,
1172 unsigned long data_addr, IA64FAULT *fault)
1174 unsigned long name, vendor, attr = 0, data_size, data;
1175 struct page_info *name_page = NULL, *vendor_page = NULL,
1176 *attr_page = NULL, *data_size_page = NULL,
1177 *data_page = NULL;
1178 efi_status_t status = 0;
1180 if (current->domain != dom0)
1181 return EFI_UNSUPPORTED;
1183 name = efi_translate_domain_addr(name_addr, fault, &name_page);
1184 if (*fault != IA64_NO_FAULT)
1185 goto errout;
1186 vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
1187 if (*fault != IA64_NO_FAULT)
1188 goto errout;
1189 data_size = efi_translate_domain_addr(data_size_addr, fault,
1190 &data_size_page);
1191 if (*fault != IA64_NO_FAULT)
1192 goto errout;
1193 data = efi_translate_domain_addr(data_addr, fault, &data_page);
1194 if (*fault != IA64_NO_FAULT)
1195 goto errout;
1196 if (attr_addr) {
1197 attr = efi_translate_domain_addr(attr_addr, fault, &attr_page);
1198 if (*fault != IA64_NO_FAULT)
1199 goto errout;
1202 status = (*efi.get_variable)((efi_char16_t *)name,
1203 (efi_guid_t *)vendor,
1204 (u32 *)attr,
1205 (unsigned long *)data_size,
1206 (void *)data);
1208 errout:
1209 if (name_page != NULL)
1210 put_page(name_page);
1211 if (vendor_page != NULL)
1212 put_page(vendor_page);
1213 if (attr_page != NULL)
1214 put_page(attr_page);
1215 if (data_size_page != NULL)
1216 put_page(data_size_page);
1217 if (data_page != NULL)
1218 put_page(data_page);
1220 return status;
1223 static efi_status_t
1224 efi_emulate_get_next_variable(
1225 unsigned long name_size_addr, unsigned long name_addr,
1226 unsigned long vendor_addr, IA64FAULT *fault)
1228 unsigned long name_size, name, vendor;
1229 struct page_info *name_size_page = NULL, *name_page = NULL,
1230 *vendor_page = NULL;
1231 efi_status_t status = 0;
1233 if (current->domain != dom0)
1234 return EFI_UNSUPPORTED;
1236 name_size = efi_translate_domain_addr(name_size_addr, fault,
1237 &name_size_page);
1238 if (*fault != IA64_NO_FAULT)
1239 goto errout;
1240 name = efi_translate_domain_addr(name_addr, fault, &name_page);
1241 if (*fault != IA64_NO_FAULT)
1242 goto errout;
1243 vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
1244 if (*fault != IA64_NO_FAULT)
1245 goto errout;
1247 status = (*efi.get_next_variable)((unsigned long *)name_size,
1248 (efi_char16_t *)name,
1249 (efi_guid_t *)vendor);
1251 errout:
1252 if (name_size_page != NULL)
1253 put_page(name_size_page);
1254 if (name_page != NULL)
1255 put_page(name_page);
1256 if (vendor_page != NULL)
1257 put_page(vendor_page);
1259 return status;
1262 static efi_status_t
1263 efi_emulate_set_variable(
1264 unsigned long name_addr, unsigned long vendor_addr,
1265 unsigned long attr, unsigned long data_size,
1266 unsigned long data_addr, IA64FAULT *fault)
1268 unsigned long name, vendor, data;
1269 struct page_info *name_page = NULL, *vendor_page = NULL,
1270 *data_page = NULL;
1271 efi_status_t status = 0;
1273 if (current->domain != dom0)
1274 return EFI_UNSUPPORTED;
1276 name = efi_translate_domain_addr(name_addr, fault, &name_page);
1277 if (*fault != IA64_NO_FAULT)
1278 goto errout;
1279 vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
1280 if (*fault != IA64_NO_FAULT)
1281 goto errout;
1282 data = efi_translate_domain_addr(data_addr, fault, &data_page);
1283 if (*fault != IA64_NO_FAULT)
1284 goto errout;
1286 status = (*efi.set_variable)((efi_char16_t *)name,
1287 (efi_guid_t *)vendor,
1288 attr,
1289 data_size,
1290 (void *)data);
1292 errout:
1293 if (name_page != NULL)
1294 put_page(name_page);
1295 if (vendor_page != NULL)
1296 put_page(vendor_page);
1297 if (data_page != NULL)
1298 put_page(data_page);
1300 return status;
1303 static efi_status_t
1304 efi_emulate_set_virtual_address_map(
1305 unsigned long memory_map_size, unsigned long descriptor_size,
1306 u32 descriptor_version, efi_memory_desc_t *virtual_map)
1308 void *efi_map_start, *efi_map_end, *p;
1309 efi_memory_desc_t entry, *md = &entry;
1310 u64 efi_desc_size;
1312 unsigned long *vfn;
1313 struct domain *d = current->domain;
1314 efi_runtime_services_t *efi_runtime = d->arch.efi_runtime;
1315 fpswa_interface_t *fpswa_inf = d->arch.fpswa_inf;
1317 if (descriptor_version != EFI_MEMDESC_VERSION) {
1318 printk ("efi_emulate_set_virtual_address_map: memory "
1319 "descriptor version unmatched (%d vs %d)\n",
1320 (int)descriptor_version, EFI_MEMDESC_VERSION);
1321 return EFI_INVALID_PARAMETER;
1324 if (descriptor_size != sizeof(efi_memory_desc_t)) {
1325 printk ("efi_emulate_set_virtual_address_map: memory descriptor size unmatched\n");
1326 return EFI_INVALID_PARAMETER;
1329 if (d->arch.sal_data->efi_virt_mode)
1330 return EFI_UNSUPPORTED;
1332 efi_map_start = virtual_map;
1333 efi_map_end = efi_map_start + memory_map_size;
1334 efi_desc_size = sizeof(efi_memory_desc_t);
1336 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1337 if (copy_from_user(&entry, p, sizeof(efi_memory_desc_t))) {
1338 printk ("efi_emulate_set_virtual_address_map: copy_from_user() fault. addr=0x%p\n", p);
1339 return EFI_UNSUPPORTED;
1342 /* skip over non-PAL_CODE memory descriptors; EFI_RUNTIME is included in PAL_CODE. */
1343 if (md->type != EFI_PAL_CODE)
1344 continue;
1346 #define EFI_HYPERCALL_PATCH_TO_VIRT(tgt,call) \
1347 do { \
1348 vfn = (unsigned long *) domain_mpa_to_imva(d, tgt); \
1349 *vfn++ = FW_HYPERCALL_##call##_INDEX * 16UL + md->virt_addr; \
1350 *vfn++ = 0; \
1351 } while (0)
1353 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_time,EFI_GET_TIME);
1354 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_time,EFI_SET_TIME);
1355 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_wakeup_time,EFI_GET_WAKEUP_TIME);
1356 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_wakeup_time,EFI_SET_WAKEUP_TIME);
1357 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_virtual_address_map,EFI_SET_VIRTUAL_ADDRESS_MAP);
1358 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_variable,EFI_GET_VARIABLE);
1359 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_next_variable,EFI_GET_NEXT_VARIABLE);
1360 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_variable,EFI_SET_VARIABLE);
1361 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_next_high_mono_count,EFI_GET_NEXT_HIGH_MONO_COUNT);
1362 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->reset_system,EFI_RESET_SYSTEM);
1364 vfn = (unsigned long *) domain_mpa_to_imva(d, (unsigned long) fpswa_inf->fpswa);
1365 *vfn++ = FW_HYPERCALL_FPSWA_PATCH_INDEX * 16UL + md->virt_addr;
1366 *vfn = 0;
1367 fpswa_inf->fpswa = (void *) (FW_HYPERCALL_FPSWA_ENTRY_INDEX * 16UL + md->virt_addr);
1368 break;
1371 /* The virtual address map has been applied. */
1372 d->arch.sal_data->efi_virt_mode = 1;
1374 return EFI_SUCCESS;
1377 efi_status_t
1378 efi_emulator (struct pt_regs *regs, IA64FAULT *fault)
1380 struct vcpu *v = current;
1381 efi_status_t status;
1383 debugger_event(XEN_IA64_DEBUG_ON_EFI);
1385 *fault = IA64_NO_FAULT;
1387 switch (regs->r2) {
1388 case FW_HYPERCALL_EFI_RESET_SYSTEM:
1390 u8 reason;
1391 unsigned long val = vcpu_get_gr(v,32);
1392 switch (val)
1394 case EFI_RESET_SHUTDOWN:
1395 reason = SHUTDOWN_poweroff;
1396 break;
1397 case EFI_RESET_COLD:
1398 case EFI_RESET_WARM:
1399 default:
1400 reason = SHUTDOWN_reboot;
1401 break;
1403 domain_shutdown (current->domain, reason);
1405 status = EFI_UNSUPPORTED;
1406 break;
1407 case FW_HYPERCALL_EFI_GET_TIME:
1408 status = efi_emulate_get_time (
1409 vcpu_get_gr(v,32),
1410 vcpu_get_gr(v,33),
1411 fault);
1412 break;
1413 case FW_HYPERCALL_EFI_SET_TIME:
1414 status = efi_emulate_set_time (
1415 vcpu_get_gr(v,32),
1416 fault);
1417 break;
1418 case FW_HYPERCALL_EFI_GET_WAKEUP_TIME:
1419 status = efi_emulate_get_wakeup_time (
1420 vcpu_get_gr(v,32),
1421 vcpu_get_gr(v,33),
1422 vcpu_get_gr(v,34),
1423 fault);
1424 break;
1425 case FW_HYPERCALL_EFI_SET_WAKEUP_TIME:
1426 status = efi_emulate_set_wakeup_time (
1427 vcpu_get_gr(v,32),
1428 vcpu_get_gr(v,33),
1429 fault);
1430 break;
1431 case FW_HYPERCALL_EFI_GET_VARIABLE:
1432 status = efi_emulate_get_variable (
1433 vcpu_get_gr(v,32),
1434 vcpu_get_gr(v,33),
1435 vcpu_get_gr(v,34),
1436 vcpu_get_gr(v,35),
1437 vcpu_get_gr(v,36),
1438 fault);
1439 break;
1440 case FW_HYPERCALL_EFI_GET_NEXT_VARIABLE:
1441 status = efi_emulate_get_next_variable (
1442 vcpu_get_gr(v,32),
1443 vcpu_get_gr(v,33),
1444 vcpu_get_gr(v,34),
1445 fault);
1446 break;
1447 case FW_HYPERCALL_EFI_SET_VARIABLE:
1448 status = efi_emulate_set_variable (
1449 vcpu_get_gr(v,32),
1450 vcpu_get_gr(v,33),
1451 vcpu_get_gr(v,34),
1452 vcpu_get_gr(v,35),
1453 vcpu_get_gr(v,36),
1454 fault);
1455 break;
1456 case FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP:
1457 status = efi_emulate_set_virtual_address_map (
1458 vcpu_get_gr(v,32),
1459 vcpu_get_gr(v,33),
1460 (u32) vcpu_get_gr(v,34),
1461 (efi_memory_desc_t *) vcpu_get_gr(v,35));
1462 break;
1463 case FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT:
1464 // FIXME: need fixes in efi.h from 2.6.9
1465 status = EFI_UNSUPPORTED;
1466 break;
1467 default:
1468 printk("unknown ia64 fw hypercall %lx\n", regs->r2);
1469 status = EFI_UNSUPPORTED;
1472 return status;
1475 void
1476 do_ssc(unsigned long ssc, struct pt_regs *regs)
1478 unsigned long arg0, arg1, arg2, arg3, retval;
1479 char buf[2];
1480 /**/ static int last_fd, last_count; // FIXME FIXME FIXME
1481 /**/ // BROKEN FOR MULTIPLE DOMAINS & SMP
1482 /**/ struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
1484 arg0 = vcpu_get_gr(current,32);
1485 switch(ssc) {
1486 case SSC_PUTCHAR:
1487 buf[0] = arg0;
1488 buf[1] = '\0';
1489 printk(buf);
1490 break;
1491 case SSC_GETCHAR:
1492 retval = ia64_ssc(0,0,0,0,ssc);
1493 vcpu_set_gr(current,8,retval,0);
1494 break;
1495 case SSC_WAIT_COMPLETION:
1496 if (arg0) { // metaphysical address
1498 arg0 = translate_domain_mpaddr(arg0, NULL);
1499 /**/ stat = (struct ssc_disk_stat *)__va(arg0);
1500 ///**/ if (stat->fd == last_fd) stat->count = last_count;
1501 /**/ stat->count = last_count;
1502 //if (last_count >= PAGE_SIZE) printk("ssc_wait: stat->fd=%d,last_fd=%d,last_count=%d\n",stat->fd,last_fd,last_count);
1503 ///**/ retval = ia64_ssc(arg0,0,0,0,ssc);
1504 /**/ retval = 0;
1506 else retval = -1L;
1507 vcpu_set_gr(current,8,retval,0);
1508 break;
1509 case SSC_OPEN:
1510 arg1 = vcpu_get_gr(current,33); // access rights
1511 if (!running_on_sim) {
1512 printk("SSC_OPEN, not implemented on hardware. (ignoring...)\n");
1513 arg0 = 0;
1515 if (arg0) { // metaphysical address
1516 arg0 = translate_domain_mpaddr(arg0, NULL);
1517 retval = ia64_ssc(arg0,arg1,0,0,ssc);
1519 else retval = -1L;
1520 vcpu_set_gr(current,8,retval,0);
1521 break;
1522 case SSC_WRITE:
1523 case SSC_READ:
1524 //if (ssc == SSC_WRITE) printk("DOING AN SSC_WRITE\n");
1525 arg1 = vcpu_get_gr(current,33);
1526 arg2 = vcpu_get_gr(current,34);
1527 arg3 = vcpu_get_gr(current,35);
1528 if (arg2) { // metaphysical address of descriptor
1529 struct ssc_disk_req *req;
1530 unsigned long mpaddr;
1531 long len;
1533 arg2 = translate_domain_mpaddr(arg2, NULL);
1534 req = (struct ssc_disk_req *) __va(arg2);
1535 req->len &= 0xffffffffL; // avoid strange bug
1536 len = req->len;
1537 /**/ last_fd = arg1;
1538 /**/ last_count = len;
1539 mpaddr = req->addr;
1540 //if (last_count >= PAGE_SIZE) printk("do_ssc: read fd=%d, addr=%p, len=%lx ",last_fd,mpaddr,len);
1541 retval = 0;
1542 if ((mpaddr & PAGE_MASK) != ((mpaddr+len-1) & PAGE_MASK)) {
1543 // do partial page first
1544 req->addr = translate_domain_mpaddr(mpaddr, NULL);
1545 req->len = PAGE_SIZE - (req->addr & ~PAGE_MASK);
1546 len -= req->len; mpaddr += req->len;
1547 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
1548 arg3 += req->len; // file offset
1549 /**/ last_stat.fd = last_fd;
1550 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
1551 //if (last_count >= PAGE_SIZE) printk("ssc(%p,%lx)[part]=%x ",req->addr,req->len,retval);
1553 if (retval >= 0) while (len > 0) {
1554 req->addr = translate_domain_mpaddr(mpaddr, NULL);
1555 req->len = (len > PAGE_SIZE) ? PAGE_SIZE : len;
1556 len -= PAGE_SIZE; mpaddr += PAGE_SIZE;
1557 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
1558 arg3 += req->len; // file offset
1559 // TEMP REMOVED AGAIN arg3 += req->len; // file offset
1560 /**/ last_stat.fd = last_fd;
1561 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
1562 //if (last_count >= PAGE_SIZE) printk("ssc(%p,%lx)=%x ",req->addr,req->len,retval);
1564 // set it back to the original value
1565 req->len = last_count;
1567 else retval = -1L;
1568 vcpu_set_gr(current,8,retval,0);
1569 //if (last_count >= PAGE_SIZE) printk("retval=%x\n",retval);
1570 break;
1571 case SSC_CONNECT_INTERRUPT:
1572 arg1 = vcpu_get_gr(current,33);
1573 arg2 = vcpu_get_gr(current,34);
1574 arg3 = vcpu_get_gr(current,35);
1575 if (!running_on_sim) {
1576 printk("SSC_CONNECT_INTERRUPT, not implemented on hardware. (ignoring...)\n");
1577 break;
1579 (void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
1580 break;
1581 case SSC_NETDEV_PROBE:
1582 vcpu_set_gr(current,8,-1L,0);
1583 break;
1584 default:
1585 panic_domain(regs,
1586 "%s: bad ssc code %lx, iip=0x%lx, b0=0x%lx\n",
1587 __func__, ssc, regs->cr_iip, regs->b0);
1588 break;
1590 vcpu_increment_iip(current);