debuggers.hg

view xen/arch/ia64/xen/dom0_ops.c @ 16691:7b7b123625d0

[IA64] Fix missing put_domain in XEN_DOMCTL_set_opt_feature

Signed-off-by: Alex Williamson <alex.williamson@hp.com>
author Alex Williamson <alex.williamson@hp.com>
date Thu Dec 20 09:40:33 2007 -0700 (2007-12-20)
parents 8d5487ca222f
children 6c0aec4604f7
line source
1 /******************************************************************************
2 * Arch-specific dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <public/domctl.h>
14 #include <public/sysctl.h>
15 #include <xen/sched.h>
16 #include <xen/event.h>
17 #include <asm/pdb.h>
18 #include <xen/trace.h>
19 #include <xen/console.h>
20 #include <xen/guest_access.h>
21 #include <asm/vmx.h>
22 #include <asm/dom_fw.h>
23 #include <xen/iocap.h>
24 #include <xen/errno.h>
25 #include <xen/nodemask.h>
26 #include <asm/dom_fw_utils.h>
27 #include <asm/hvm/support.h>
28 #include <xsm/xsm.h>
29 #include <public/hvm/save.h>
31 #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
33 extern unsigned long total_pages;
35 long arch_do_domctl(xen_domctl_t *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
36 {
37 long ret = 0;
39 if ( !IS_PRIV(current->domain) )
40 return -EPERM;
42 switch ( op->cmd )
43 {
44 case XEN_DOMCTL_getmemlist:
45 {
46 unsigned long i;
47 struct domain *d = get_domain_by_id(op->domain);
48 unsigned long start_page = op->u.getmemlist.start_pfn;
49 unsigned long nr_pages = op->u.getmemlist.max_pfns;
50 uint64_t mfn;
52 if ( d == NULL ) {
53 ret = -EINVAL;
54 break;
55 }
56 for (i = 0 ; i < nr_pages ; i++) {
57 pte_t *pte;
59 pte = (pte_t *)lookup_noalloc_domain_pte(d,
60 (start_page + i) << PAGE_SHIFT);
61 if (pte && pte_present(*pte))
62 mfn = start_page + i;
63 else
64 mfn = INVALID_MFN;
66 if ( copy_to_guest_offset(op->u.getmemlist.buffer, i, &mfn, 1) ) {
67 ret = -EFAULT;
68 break;
69 }
70 }
72 op->u.getmemlist.num_pfns = i;
73 if (copy_to_guest(u_domctl, op, 1))
74 ret = -EFAULT;
76 put_domain(d);
77 }
78 break;
80 case XEN_DOMCTL_arch_setup:
81 {
82 xen_domctl_arch_setup_t *ds = &op->u.arch_setup;
83 struct domain *d = get_domain_by_id(op->domain);
85 if ( d == NULL) {
86 ret = -EINVAL;
87 break;
88 }
90 if (ds->flags & XEN_DOMAINSETUP_query) {
91 /* Set flags. */
92 if (d->arch.is_vti)
93 ds->flags |= XEN_DOMAINSETUP_hvm_guest;
94 /* Set params. */
95 ds->bp = 0; /* unknown. */
96 ds->maxmem = d->arch.convmem_end;
97 ds->xsi_va = d->arch.shared_info_va;
98 ds->hypercall_imm = d->arch.breakimm;
99 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
100 ds->vhpt_size_log2 = d->arch.vhpt_size_log2;
101 #endif
102 /* Copy back. */
103 if ( copy_to_guest(u_domctl, op, 1) )
104 ret = -EFAULT;
105 }
106 else {
107 if (ds->flags & XEN_DOMAINSETUP_hvm_guest) {
108 if (!vmx_enabled) {
109 printk("No VMX hardware feature for vmx domain.\n");
110 ret = -EINVAL;
111 } else {
112 d->arch.is_vti = 1;
113 xen_ia64_set_convmem_end(d, ds->maxmem);
114 ret = vmx_setup_platform(d);
115 }
116 }
117 else {
118 if (ds->hypercall_imm) {
119 /* dom_fw_setup() reads d->arch.breakimm */
120 struct vcpu *v;
121 d->arch.breakimm = ds->hypercall_imm;
122 for_each_vcpu (d, v)
123 v->arch.breakimm = d->arch.breakimm;
124 }
125 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
126 if (ds->vhpt_size_log2 == -1) {
127 d->arch.has_pervcpu_vhpt = 0;
128 ds->vhpt_size_log2 = -1;
129 printk(XENLOG_INFO "XEN_DOMCTL_arch_setup: "
130 "domain %d VHPT is global.\n", d->domain_id);
131 } else {
132 d->arch.has_pervcpu_vhpt = 1;
133 d->arch.vhpt_size_log2 = ds->vhpt_size_log2;
134 printk(XENLOG_INFO "XEN_DOMCTL_arch_setup: "
135 "domain %d VHPT is per vcpu. size=2**%d\n",
136 d->domain_id, ds->vhpt_size_log2);
137 }
138 #endif
139 if (ds->xsi_va)
140 d->arch.shared_info_va = ds->xsi_va;
141 ret = dom_fw_setup(d, ds->bp, ds->maxmem);
142 }
143 if (ret == 0) {
144 /*
145 * XXX IA64_SHARED_INFO_PADDR
146 * assign these pages into guest psudo physical address
147 * space for dom0 to map this page by gmfn.
148 * this is necessary for domain build, save, restore and
149 * dump-core.
150 */
151 unsigned long i;
152 for (i = 0; i < XSI_SIZE; i += PAGE_SIZE)
153 assign_domain_page(d, IA64_SHARED_INFO_PADDR + i,
154 virt_to_maddr(d->shared_info + i));
155 }
156 }
158 put_domain(d);
159 }
160 break;
162 case XEN_DOMCTL_shadow_op:
163 {
164 struct domain *d;
165 ret = -ESRCH;
166 d = get_domain_by_id(op->domain);
167 if ( d != NULL )
168 {
169 ret = shadow_mode_control(d, &op->u.shadow_op);
170 put_domain(d);
171 copy_to_guest(u_domctl, op, 1);
172 }
173 }
174 break;
176 case XEN_DOMCTL_ioport_permission:
177 {
178 struct domain *d;
179 unsigned int fp = op->u.ioport_permission.first_port;
180 unsigned int np = op->u.ioport_permission.nr_ports;
181 unsigned int lp = fp + np - 1;
183 ret = -ESRCH;
184 d = get_domain_by_id(op->domain);
185 if (unlikely(d == NULL))
186 break;
188 if (np == 0)
189 ret = 0;
190 else {
191 if (op->u.ioport_permission.allow_access)
192 ret = ioports_permit_access(d, fp, lp);
193 else
194 ret = ioports_deny_access(d, fp, lp);
195 }
197 put_domain(d);
198 }
199 break;
201 case XEN_DOMCTL_sendtrigger:
202 {
203 struct domain *d;
204 struct vcpu *v;
206 ret = -ESRCH;
207 d = get_domain_by_id(op->domain);
208 if ( d == NULL )
209 break;
211 ret = -EINVAL;
212 if ( op->u.sendtrigger.vcpu >= MAX_VIRT_CPUS )
213 goto sendtrigger_out;
215 ret = -ESRCH;
216 if ( (v = d->vcpu[op->u.sendtrigger.vcpu]) == NULL )
217 goto sendtrigger_out;
219 ret = 0;
220 switch (op->u.sendtrigger.trigger)
221 {
222 case XEN_DOMCTL_SENDTRIGGER_INIT:
223 {
224 if (VMX_DOMAIN(v))
225 vmx_pend_pal_init(d);
226 else
227 ret = -ENOSYS;
228 }
229 break;
231 default:
232 ret = -ENOSYS;
233 }
235 sendtrigger_out:
236 put_domain(d);
237 }
238 break;
240 case XEN_DOMCTL_sethvmcontext:
241 {
242 struct hvm_domain_context c;
243 struct domain *d;
245 c.cur = 0;
246 c.size = op->u.hvmcontext.size;
247 c.data = NULL;
249 ret = -ESRCH;
250 d = rcu_lock_domain_by_id(op->domain);
251 if (d == NULL)
252 break;
254 #ifdef CONFIG_X86
255 ret = xsm_hvmcontext(d, op->cmd);
256 if (ret)
257 goto sethvmcontext_out;
258 #endif /* CONFIG_X86 */
260 ret = -EINVAL;
261 if (!is_hvm_domain(d))
262 goto sethvmcontext_out;
264 ret = -ENOMEM;
265 c.data = xmalloc_bytes(c.size);
266 if (c.data == NULL)
267 goto sethvmcontext_out;
269 ret = -EFAULT;
270 if (copy_from_guest(c.data, op->u.hvmcontext.buffer, c.size) != 0)
271 goto sethvmcontext_out;
273 domain_pause(d);
274 ret = hvm_load(d, &c);
275 domain_unpause(d);
277 sethvmcontext_out:
278 if (c.data != NULL)
279 xfree(c.data);
281 rcu_unlock_domain(d);
282 }
283 break;
285 case XEN_DOMCTL_gethvmcontext:
286 {
287 struct hvm_domain_context c;
288 struct domain *d;
290 ret = -ESRCH;
291 d = rcu_lock_domain_by_id(op->domain);
292 if (d == NULL)
293 break;
295 #ifdef CONFIG_X86
296 ret = xsm_hvmcontext(d, op->cmd);
297 if (ret)
298 goto gethvmcontext_out;
299 #endif /* CONFIG_X86 */
301 ret = -EINVAL;
302 if (!is_hvm_domain(d))
303 goto gethvmcontext_out;
305 c.cur = 0;
306 c.size = hvm_save_size(d);
307 c.data = NULL;
309 if (guest_handle_is_null(op->u.hvmcontext.buffer)) {
310 /* Client is querying for the correct buffer size */
311 op->u.hvmcontext.size = c.size;
312 ret = 0;
313 goto gethvmcontext_out;
314 }
316 /* Check that the client has a big enough buffer */
317 ret = -ENOSPC;
318 if (op->u.hvmcontext.size < c.size)
319 goto gethvmcontext_out;
321 /* Allocate our own marshalling buffer */
322 ret = -ENOMEM;
323 c.data = xmalloc_bytes(c.size);
324 if (c.data == NULL)
325 goto gethvmcontext_out;
327 domain_pause(d);
328 ret = hvm_save(d, &c);
329 domain_unpause(d);
331 op->u.hvmcontext.size = c.cur;
332 if (copy_to_guest(op->u.hvmcontext.buffer, c.data, c.size) != 0)
333 ret = -EFAULT;
335 gethvmcontext_out:
336 if (copy_to_guest(u_domctl, op, 1))
337 ret = -EFAULT;
339 if (c.data != NULL)
340 xfree(c.data);
342 rcu_unlock_domain(d);
343 }
344 break;
346 case XEN_DOMCTL_set_opt_feature:
347 {
348 struct xen_ia64_opt_feature *optf = &op->u.set_opt_feature.optf;
349 struct domain *d = get_domain_by_id(op->domain);
351 if (d == NULL) {
352 ret = -EINVAL;
353 break;
354 }
356 ret = domain_opt_feature(d, optf);
357 put_domain(d);
358 }
359 break;
361 default:
362 printk("arch_do_domctl: unrecognized domctl: %d!!!\n",op->cmd);
363 ret = -ENOSYS;
365 }
367 return ret;
368 }
370 long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
371 {
372 long ret = 0;
374 switch ( op->cmd )
375 {
376 case XEN_SYSCTL_physinfo:
377 {
378 int i;
379 uint32_t max_array_ent;
381 xen_sysctl_physinfo_t *pi = &op->u.physinfo;
383 pi->threads_per_core = cpus_weight(cpu_sibling_map[0]);
384 pi->cores_per_socket =
385 cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
386 pi->nr_cpus = (u32)num_online_cpus();
387 pi->nr_nodes = num_online_nodes();
388 pi->total_pages = total_pages;
389 pi->free_pages = avail_domheap_pages();
390 pi->scrub_pages = avail_scrub_pages();
391 pi->cpu_khz = local_cpu_data->proc_freq / 1000;
392 memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
394 max_array_ent = pi->max_cpu_id;
395 pi->max_cpu_id = last_cpu(cpu_online_map);
396 max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id);
398 ret = 0;
400 if (!guest_handle_is_null(pi->cpu_to_node)) {
401 for (i = 0; i <= max_array_ent; i++) {
402 uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
403 if (copy_to_guest_offset(pi->cpu_to_node, i, &node, 1)) {
404 ret = -EFAULT;
405 break;
406 }
407 }
408 }
410 if ( copy_to_guest(u_sysctl, op, 1) )
411 ret = -EFAULT;
412 }
413 break;
415 default:
416 printk("arch_do_sysctl: unrecognized sysctl: %d!!!\n",op->cmd);
417 ret = -ENOSYS;
419 }
421 return ret;
422 }
424 static unsigned long
425 dom0vp_ioremap(struct domain *d, unsigned long mpaddr, unsigned long size)
426 {
427 unsigned long end;
429 /* Linux may use a 0 size! */
430 if (size == 0)
431 size = PAGE_SIZE;
433 if (size == 0)
434 printk(XENLOG_WARNING "ioremap(): Trying to map %lx, size 0\n", mpaddr);
436 end = PAGE_ALIGN(mpaddr + size);
438 if (!iomem_access_permitted(d, mpaddr >> PAGE_SHIFT,
439 (end >> PAGE_SHIFT) - 1))
440 return -EPERM;
442 return assign_domain_mmio_page(d, mpaddr, mpaddr, size,
443 ASSIGN_writable | ASSIGN_nocache);
444 }
446 static unsigned long
447 dom0vp_fpswa_revision(XEN_GUEST_HANDLE(uint) revision)
448 {
449 if (fpswa_interface == NULL)
450 return -ENOSYS;
451 if (copy_to_guest(revision, &fpswa_interface->revision, 1))
452 return -EFAULT;
453 return 0;
454 }
456 static unsigned long
457 dom0vp_add_io_space(struct domain *d, unsigned long phys_base,
458 unsigned long sparse, unsigned long space_number)
459 {
460 unsigned int fp, lp;
462 /*
463 * Registering new io_space roughly based on linux
464 * arch/ia64/pci/pci.c:new_space()
465 */
467 /* Skip legacy I/O port space, we already know about it */
468 if (phys_base == 0)
469 return 0;
471 /*
472 * Dom0 Linux initializes io spaces sequentially, if that changes,
473 * we'll need to add thread protection and the ability to handle
474 * a sparsely populated io_space array.
475 */
476 if (space_number > MAX_IO_SPACES || space_number != num_io_spaces)
477 return -EINVAL;
479 io_space[space_number].mmio_base = phys_base;
480 io_space[space_number].sparse = sparse;
482 num_io_spaces++;
484 fp = space_number << IO_SPACE_BITS;
485 lp = fp | 0xffff;
487 return ioports_permit_access(d, fp, lp);
488 }
490 unsigned long
491 do_dom0vp_op(unsigned long cmd,
492 unsigned long arg0, unsigned long arg1, unsigned long arg2,
493 unsigned long arg3)
494 {
495 unsigned long ret = 0;
496 struct domain *d = current->domain;
498 switch (cmd) {
499 case IA64_DOM0VP_ioremap:
500 ret = dom0vp_ioremap(d, arg0, arg1);
501 break;
502 case IA64_DOM0VP_phystomach:
503 ret = ____lookup_domain_mpa(d, arg0 << PAGE_SHIFT);
504 if (ret == INVALID_MFN) {
505 dprintk(XENLOG_INFO, "%s: INVALID_MFN ret: 0x%lx\n",
506 __func__, ret);
507 } else {
508 ret = (ret & _PFN_MASK) >> PAGE_SHIFT;//XXX pte_pfn()
509 }
510 perfc_incr(dom0vp_phystomach);
511 break;
512 case IA64_DOM0VP_machtophys:
513 if (!mfn_valid(arg0)) {
514 ret = INVALID_M2P_ENTRY;
515 break;
516 }
517 ret = get_gpfn_from_mfn(arg0);
518 perfc_incr(dom0vp_machtophys);
519 break;
520 case IA64_DOM0VP_zap_physmap:
521 ret = dom0vp_zap_physmap(d, arg0, (unsigned int)arg1);
522 break;
523 case IA64_DOM0VP_add_physmap:
524 ret = dom0vp_add_physmap(d, arg0, arg1, (unsigned int)arg2,
525 (domid_t)arg3);
526 break;
527 case IA64_DOM0VP_add_physmap_with_gmfn:
528 ret = dom0vp_add_physmap_with_gmfn(d, arg0, arg1, (unsigned int)arg2,
529 (domid_t)arg3);
530 break;
531 case IA64_DOM0VP_expose_p2m:
532 ret = dom0vp_expose_p2m(d, arg0, arg1, arg2, arg3);
533 break;
534 case IA64_DOM0VP_perfmon: {
535 XEN_GUEST_HANDLE(void) hnd;
536 set_xen_guest_handle(hnd, (void*)arg1);
537 ret = do_perfmon_op(arg0, hnd, arg2);
538 break;
539 }
540 case IA64_DOM0VP_fpswa_revision: {
541 XEN_GUEST_HANDLE(uint) hnd;
542 set_xen_guest_handle(hnd, (uint*)arg0);
543 ret = dom0vp_fpswa_revision(hnd);
544 break;
545 }
546 case IA64_DOM0VP_add_io_space:
547 ret = dom0vp_add_io_space(d, arg0, arg1, arg2);
548 break;
549 case IA64_DOM0VP_expose_foreign_p2m: {
550 XEN_GUEST_HANDLE(char) hnd;
551 set_xen_guest_handle(hnd, (char*)arg2);
552 ret = dom0vp_expose_foreign_p2m(d, arg0, (domid_t)arg1, hnd, arg3);
553 break;
554 }
555 case IA64_DOM0VP_unexpose_foreign_p2m:
556 ret = dom0vp_unexpose_foreign_p2m(d, arg0, arg1);
557 break;
558 default:
559 ret = -1;
560 printk("unknown dom0_vp_op 0x%lx\n", cmd);
561 break;
562 }
564 return ret;
565 }
567 /*
568 * Local variables:
569 * mode: C
570 * c-set-style: "BSD"
571 * c-basic-offset: 4
572 * tab-width: 4
573 * indent-tabs-mode: nil
574 * End:
575 */