debuggers.hg

view xen/arch/ia64/vmx/vmx_hypercall.c @ 0:7d21f7218375

Exact replica of unstable on 051908 + README-this
author Mukesh Rathor
date Mon May 19 15:34:57 2008 -0700 (2008-05-19)
parents
children 5c0bf00e371d
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_hyparcall.c: handling hypercall from domain
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
20 */
22 #include <xen/config.h>
23 #include <xen/errno.h>
24 #include <asm/vmx_vcpu.h>
25 #include <xen/guest_access.h>
26 #include <public/event_channel.h>
27 #include <asm/vmmu.h>
28 #include <asm/tlb.h>
29 #include <asm/regionreg.h>
30 #include <asm/page.h>
31 #include <xen/mm.h>
32 #include <xen/multicall.h>
33 #include <xen/hypercall.h>
34 #include <public/version.h>
35 #include <asm/dom_fw.h>
36 #include <xen/domain.h>
37 #include <asm/vmx.h>
38 #include <asm/viosapic.h>
40 static int hvmop_set_isa_irq_level(
41 XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t) uop)
42 {
43 struct xen_hvm_set_isa_irq_level op;
44 struct domain *d;
45 int rc;
47 if ( copy_from_guest(&op, uop, 1) )
48 return -EFAULT;
50 if ( op.isa_irq > 15 )
51 return -EINVAL;
53 if ( op.domid == DOMID_SELF )
54 op.domid = current->domain->domain_id;
56 d = rcu_lock_domain_by_id(op.domid);
57 if ( d == NULL )
58 return -ESRCH;
60 rc = -EPERM;
61 if ( !IS_PRIV_FOR(current->domain, d) && d != current->domain )
62 goto out;
64 rc = -EINVAL;
65 if ( !is_hvm_domain(d) )
66 goto out;
68 rc = 0;
69 viosapic_set_irq(d, op.isa_irq, op.level);
71 out:
72 rcu_unlock_domain(d);
73 return rc;
74 }
76 static int hvmop_set_pci_intx_level(
77 XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t) uop)
78 {
79 struct xen_hvm_set_pci_intx_level op;
80 struct domain *d;
81 int rc;
83 if ( copy_from_guest(&op, uop, 1) )
84 return -EFAULT;
86 if ( (op.domain > 0) || (op.bus > 0) || (op.device > 31) || (op.intx > 3) )
87 return -EINVAL;
89 if ( op.domid == DOMID_SELF )
90 op.domid = current->domain->domain_id;
92 d = rcu_lock_domain_by_id(op.domid);
93 if ( d == NULL )
94 return -ESRCH;
96 rc = -EPERM;
97 if ( !IS_PRIV_FOR(current->domain, d) && d != current->domain )
98 goto out;
100 rc = -EINVAL;
101 if ( !is_hvm_domain(d) )
102 goto out;
104 rc = 0;
105 viosapic_set_pci_irq(d, op.device, op.intx, op.level);
107 out:
108 rcu_unlock_domain(d);
109 return rc;
110 }
114 long
115 do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
116 {
117 long rc = 0;
119 switch (op) {
120 case HVMOP_set_param:
121 case HVMOP_get_param:
122 {
123 struct xen_hvm_param a;
124 struct domain *d;
126 if (copy_from_guest(&a, arg, 1))
127 return -EFAULT;
129 if (a.index >= HVM_NR_PARAMS)
130 return -EINVAL;
132 if (a.domid == DOMID_SELF) {
133 d = rcu_lock_current_domain();
134 }
135 else {
136 d = rcu_lock_domain_by_id(a.domid);
137 if (d == NULL)
138 return -ESRCH;
139 if (!IS_PRIV_FOR(current->domain, d)) {
140 rcu_unlock_domain(d);
141 return -EPERM;
142 }
143 }
145 if (op == HVMOP_set_param) {
146 struct vmx_ioreq_page *iorp;
147 struct vcpu *v;
149 switch (a.index) {
150 case HVM_PARAM_IOREQ_PFN:
151 iorp = &d->arch.hvm_domain.ioreq;
152 rc = vmx_set_ioreq_page(d, iorp, a.value);
153 spin_lock(&iorp->lock);
154 if (rc == 0 && iorp->va != NULL)
155 /* Initialise evtchn port info if VCPUs already created. */
156 for_each_vcpu(d, v)
157 get_vio(v)->vp_eport = v->arch.arch_vmx.xen_port;
158 spin_unlock(&iorp->lock);
159 break;
160 case HVM_PARAM_BUFIOREQ_PFN:
161 iorp = &d->arch.hvm_domain.buf_ioreq;
162 rc = vmx_set_ioreq_page(d, iorp, a.value);
163 break;
164 case HVM_PARAM_BUFPIOREQ_PFN:
165 iorp = &d->arch.hvm_domain.buf_pioreq;
166 rc = vmx_set_ioreq_page(d, iorp, a.value);
167 break;
168 case HVM_PARAM_DM_DOMAIN:
169 if (a.value == DOMID_SELF)
170 a.value = current->domain->domain_id;
171 rc = a.value ? -EINVAL : 0; /* no stub domain support */
172 break;
173 default:
174 /* nothing */
175 break;
176 }
177 if (rc == 0)
178 d->arch.hvm_domain.params[a.index] = a.value;
179 }
180 else {
181 a.value = d->arch.hvm_domain.params[a.index];
182 rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
183 }
185 rcu_unlock_domain(d);
186 break;
187 }
189 case HVMOP_set_pci_intx_level:
190 rc = hvmop_set_pci_intx_level(
191 guest_handle_cast(arg, xen_hvm_set_pci_intx_level_t));
192 break;
194 case HVMOP_set_isa_irq_level:
195 rc = hvmop_set_isa_irq_level(
196 guest_handle_cast(arg, xen_hvm_set_isa_irq_level_t));
197 break;
199 case HVMOP_set_pci_link_route:
200 rc = 0;
201 break;
203 case HVMOP_track_dirty_vram:
204 rc = -ENOSYS;
205 break;
207 default:
208 gdprintk(XENLOG_INFO, "Bad HVM op %ld.\n", op);
209 rc = -ENOSYS;
210 }
211 return rc;
212 }