/root/src/xen/xen/arch/x86/physdev.c
Line | Count | Source (jump to first uncovered line) |
1 | | |
2 | | #include <xen/init.h> |
3 | | #include <xen/lib.h> |
4 | | #include <xen/types.h> |
5 | | #include <xen/sched.h> |
6 | | #include <xen/irq.h> |
7 | | #include <xen/event.h> |
8 | | #include <xen/guest_access.h> |
9 | | #include <xen/iocap.h> |
10 | | #include <xen/serial.h> |
11 | | #include <asm/current.h> |
12 | | #include <asm/io_apic.h> |
13 | | #include <asm/msi.h> |
14 | | #include <asm/hvm/irq.h> |
15 | | #include <asm/hypercall.h> |
16 | | #include <public/xen.h> |
17 | | #include <public/physdev.h> |
18 | | #include <xsm/xsm.h> |
19 | | #include <asm/p2m.h> |
20 | | |
21 | | int physdev_map_pirq(domid_t, int type, int *index, int *pirq_p, |
22 | | struct msi_info *); |
23 | | int physdev_unmap_pirq(domid_t, int pirq); |
24 | | |
25 | | #include "x86_64/mmconfig.h" |
26 | | |
27 | | #ifndef COMPAT |
28 | | typedef long ret_t; |
29 | | |
30 | | static int physdev_hvm_map_pirq( |
31 | | struct domain *d, int type, int *index, int *pirq) |
32 | 0 | { |
33 | 0 | int ret = 0; |
34 | 0 |
|
35 | 0 | ASSERT(!is_hardware_domain(d)); |
36 | 0 |
|
37 | 0 | spin_lock(&d->event_lock); |
38 | 0 | switch ( type ) |
39 | 0 | { |
40 | 0 | case MAP_PIRQ_TYPE_GSI: { |
41 | 0 | const struct hvm_irq_dpci *hvm_irq_dpci; |
42 | 0 | unsigned int machine_gsi = 0; |
43 | 0 |
|
44 | 0 | if ( *index < 0 || *index >= NR_HVM_DOMU_IRQS ) |
45 | 0 | { |
46 | 0 | ret = -EINVAL; |
47 | 0 | break; |
48 | 0 | } |
49 | 0 |
|
50 | 0 | /* find the machine gsi corresponding to the |
51 | 0 | * emulated gsi */ |
52 | 0 | hvm_irq_dpci = domain_get_irq_dpci(d); |
53 | 0 | if ( hvm_irq_dpci ) |
54 | 0 | { |
55 | 0 | const struct hvm_girq_dpci_mapping *girq; |
56 | 0 |
|
57 | 0 | BUILD_BUG_ON(ARRAY_SIZE(hvm_irq_dpci->girq) < NR_HVM_DOMU_IRQS); |
58 | 0 | list_for_each_entry ( girq, |
59 | 0 | &hvm_irq_dpci->girq[*index], |
60 | 0 | list ) |
61 | 0 | machine_gsi = girq->machine_gsi; |
62 | 0 | } |
63 | 0 | /* found one, this mean we are dealing with a pt device */ |
64 | 0 | if ( machine_gsi ) |
65 | 0 | { |
66 | 0 | *index = domain_pirq_to_irq(d, machine_gsi); |
67 | 0 | *pirq = machine_gsi; |
68 | 0 | ret = (*pirq > 0) ? 0 : *pirq; |
69 | 0 | } |
70 | 0 | /* we didn't find any, this means we are dealing |
71 | 0 | * with an emulated device */ |
72 | 0 | else |
73 | 0 | { |
74 | 0 | if ( *pirq < 0 ) |
75 | 0 | *pirq = get_free_pirq(d, type); |
76 | 0 | ret = map_domain_emuirq_pirq(d, *pirq, *index); |
77 | 0 | } |
78 | 0 | break; |
79 | 0 | } |
80 | 0 |
|
81 | 0 | default: |
82 | 0 | ret = -EINVAL; |
83 | 0 | dprintk(XENLOG_G_WARNING, "map type %d not supported yet\n", type); |
84 | 0 | break; |
85 | 0 | } |
86 | 0 |
|
87 | 0 | spin_unlock(&d->event_lock); |
88 | 0 | return ret; |
89 | 0 | } |
90 | | |
91 | | int physdev_map_pirq(domid_t domid, int type, int *index, int *pirq_p, |
92 | | struct msi_info *msi) |
93 | 0 | { |
94 | 0 | struct domain *d = current->domain; |
95 | 0 | int ret; |
96 | 0 |
|
97 | 0 | if ( domid == DOMID_SELF && is_hvm_domain(d) && has_pirq(d) ) |
98 | 0 | { |
99 | 0 | /* |
100 | 0 | * Only makes sense for vector-based callback, else HVM-IRQ logic |
101 | 0 | * calls back into itself and deadlocks on hvm_domain.irq_lock. |
102 | 0 | */ |
103 | 0 | if ( !is_hvm_pv_evtchn_domain(d) ) |
104 | 0 | return -EINVAL; |
105 | 0 |
|
106 | 0 | return physdev_hvm_map_pirq(d, type, index, pirq_p); |
107 | 0 | } |
108 | 0 |
|
109 | 0 | d = rcu_lock_domain_by_any_id(domid); |
110 | 0 | if ( d == NULL ) |
111 | 0 | return -ESRCH; |
112 | 0 |
|
113 | 0 | ret = xsm_map_domain_pirq(XSM_DM_PRIV, d); |
114 | 0 | if ( ret ) |
115 | 0 | goto free_domain; |
116 | 0 |
|
117 | 0 | /* Verify or get irq. */ |
118 | 0 | switch ( type ) |
119 | 0 | { |
120 | 0 | case MAP_PIRQ_TYPE_GSI: |
121 | 0 | ret = allocate_and_map_gsi_pirq(d, *index, pirq_p); |
122 | 0 | break; |
123 | 0 |
|
124 | 0 | case MAP_PIRQ_TYPE_MSI: |
125 | 0 | if ( !msi->table_base ) |
126 | 0 | msi->entry_nr = 1; |
127 | 0 | /* fallthrough */ |
128 | 0 | case MAP_PIRQ_TYPE_MULTI_MSI: |
129 | 0 | ret = allocate_and_map_msi_pirq(d, *index, pirq_p, type, msi); |
130 | 0 | break; |
131 | 0 |
|
132 | 0 | default: |
133 | 0 | dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n", |
134 | 0 | d->domain_id, type); |
135 | 0 | ret = -EINVAL; |
136 | 0 | break; |
137 | 0 | } |
138 | 0 |
|
139 | 0 | free_domain: |
140 | 0 | rcu_unlock_domain(d); |
141 | 0 | return ret; |
142 | 0 | } |
143 | | |
144 | | int physdev_unmap_pirq(domid_t domid, int pirq) |
145 | 0 | { |
146 | 0 | struct domain *d; |
147 | 0 | int ret = 0; |
148 | 0 |
|
149 | 0 | d = rcu_lock_domain_by_any_id(domid); |
150 | 0 | if ( d == NULL ) |
151 | 0 | return -ESRCH; |
152 | 0 |
|
153 | 0 | if ( domid != DOMID_SELF || !is_hvm_domain(d) || !has_pirq(d) ) |
154 | 0 | ret = xsm_unmap_domain_pirq(XSM_DM_PRIV, d); |
155 | 0 | if ( ret ) |
156 | 0 | goto free_domain; |
157 | 0 |
|
158 | 0 | if ( is_hvm_domain(d) && has_pirq(d) ) |
159 | 0 | { |
160 | 0 | spin_lock(&d->event_lock); |
161 | 0 | if ( domain_pirq_to_emuirq(d, pirq) != IRQ_UNBOUND ) |
162 | 0 | ret = unmap_domain_pirq_emuirq(d, pirq); |
163 | 0 | spin_unlock(&d->event_lock); |
164 | 0 | if ( domid == DOMID_SELF || ret ) |
165 | 0 | goto free_domain; |
166 | 0 | } |
167 | 0 |
|
168 | 0 | pcidevs_lock(); |
169 | 0 | spin_lock(&d->event_lock); |
170 | 0 | ret = unmap_domain_pirq(d, pirq); |
171 | 0 | spin_unlock(&d->event_lock); |
172 | 0 | pcidevs_unlock(); |
173 | 0 |
|
174 | 0 | free_domain: |
175 | 0 | rcu_unlock_domain(d); |
176 | 0 | return ret; |
177 | 0 | } |
178 | | #endif /* COMPAT */ |
179 | | |
180 | | ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) |
181 | 0 | { |
182 | 0 | int irq; |
183 | 0 | ret_t ret; |
184 | 0 | struct domain *currd = current->domain; |
185 | 0 |
|
186 | 0 | switch ( cmd ) |
187 | 0 | { |
188 | 0 | case PHYSDEVOP_eoi: { |
189 | 0 | struct physdev_eoi eoi; |
190 | 0 | struct pirq *pirq; |
191 | 0 |
|
192 | 0 | ret = -EFAULT; |
193 | 0 | if ( copy_from_guest(&eoi, arg, 1) != 0 ) |
194 | 0 | break; |
195 | 0 | ret = -EINVAL; |
196 | 0 | if ( eoi.irq >= currd->nr_pirqs ) |
197 | 0 | break; |
198 | 0 | spin_lock(&currd->event_lock); |
199 | 0 | pirq = pirq_info(currd, eoi.irq); |
200 | 0 | if ( !pirq ) { |
201 | 0 | spin_unlock(&currd->event_lock); |
202 | 0 | break; |
203 | 0 | } |
204 | 0 | if ( currd->arch.auto_unmask ) |
205 | 0 | evtchn_unmask(pirq->evtchn); |
206 | 0 | if ( is_pv_domain(currd) || domain_pirq_to_irq(currd, eoi.irq) > 0 ) |
207 | 0 | pirq_guest_eoi(pirq); |
208 | 0 | if ( is_hvm_domain(currd) && |
209 | 0 | domain_pirq_to_emuirq(currd, eoi.irq) > 0 ) |
210 | 0 | { |
211 | 0 | struct hvm_irq *hvm_irq = hvm_domain_irq(currd); |
212 | 0 | int gsi = domain_pirq_to_emuirq(currd, eoi.irq); |
213 | 0 |
|
214 | 0 | /* if this is a level irq and count > 0, send another |
215 | 0 | * notification */ |
216 | 0 | if ( gsi >= NR_ISAIRQS /* ISA irqs are edge triggered */ |
217 | 0 | && hvm_irq->gsi_assert_count[gsi] ) |
218 | 0 | send_guest_pirq(currd, pirq); |
219 | 0 | } |
220 | 0 | spin_unlock(&currd->event_lock); |
221 | 0 | ret = 0; |
222 | 0 | break; |
223 | 0 | } |
224 | 0 |
|
225 | 0 | case PHYSDEVOP_pirq_eoi_gmfn_v2: |
226 | 0 | case PHYSDEVOP_pirq_eoi_gmfn_v1: { |
227 | 0 | struct physdev_pirq_eoi_gmfn info; |
228 | 0 | struct page_info *page; |
229 | 0 |
|
230 | 0 | ret = -EFAULT; |
231 | 0 | if ( copy_from_guest(&info, arg, 1) != 0 ) |
232 | 0 | break; |
233 | 0 |
|
234 | 0 | ret = -EINVAL; |
235 | 0 | page = get_page_from_gfn(current->domain, info.gmfn, NULL, P2M_ALLOC); |
236 | 0 | if ( !page ) |
237 | 0 | break; |
238 | 0 | if ( !get_page_type(page, PGT_writable_page) ) |
239 | 0 | { |
240 | 0 | put_page(page); |
241 | 0 | break; |
242 | 0 | } |
243 | 0 |
|
244 | 0 | if ( cmpxchg(&currd->arch.pirq_eoi_map_mfn, |
245 | 0 | 0, page_to_mfn(page)) != 0 ) |
246 | 0 | { |
247 | 0 | put_page_and_type(page); |
248 | 0 | ret = -EBUSY; |
249 | 0 | break; |
250 | 0 | } |
251 | 0 |
|
252 | 0 | currd->arch.pirq_eoi_map = __map_domain_page_global(page); |
253 | 0 | if ( currd->arch.pirq_eoi_map == NULL ) |
254 | 0 | { |
255 | 0 | currd->arch.pirq_eoi_map_mfn = 0; |
256 | 0 | put_page_and_type(page); |
257 | 0 | ret = -ENOSPC; |
258 | 0 | break; |
259 | 0 | } |
260 | 0 | if ( cmd == PHYSDEVOP_pirq_eoi_gmfn_v1 ) |
261 | 0 | currd->arch.auto_unmask = 1; |
262 | 0 |
|
263 | 0 | ret = 0; |
264 | 0 | break; |
265 | 0 | } |
266 | 0 |
|
267 | 0 | /* Legacy since 0x00030202. */ |
268 | 0 | case PHYSDEVOP_IRQ_UNMASK_NOTIFY: { |
269 | 0 | ret = pirq_guest_unmask(currd); |
270 | 0 | break; |
271 | 0 | } |
272 | 0 |
|
273 | 0 | case PHYSDEVOP_irq_status_query: { |
274 | 0 | struct physdev_irq_status_query irq_status_query; |
275 | 0 | ret = -EFAULT; |
276 | 0 | if ( copy_from_guest(&irq_status_query, arg, 1) != 0 ) |
277 | 0 | break; |
278 | 0 | irq = irq_status_query.irq; |
279 | 0 | ret = -EINVAL; |
280 | 0 | if ( (irq < 0) || (irq >= currd->nr_pirqs) ) |
281 | 0 | break; |
282 | 0 | irq_status_query.flags = 0; |
283 | 0 | if ( is_hvm_domain(currd) && |
284 | 0 | domain_pirq_to_irq(currd, irq) <= 0 && |
285 | 0 | domain_pirq_to_emuirq(currd, irq) == IRQ_UNBOUND ) |
286 | 0 | { |
287 | 0 | ret = -EINVAL; |
288 | 0 | break; |
289 | 0 | } |
290 | 0 |
|
291 | 0 | /* |
292 | 0 | * Even edge-triggered or message-based IRQs can need masking from |
293 | 0 | * time to time. If teh guest is not dynamically checking for this |
294 | 0 | * via the new pirq_eoi_map mechanism, it must conservatively always |
295 | 0 | * execute the EOI hypercall. In practice, this only really makes a |
296 | 0 | * difference for maskable MSI sources, and if those are supported |
297 | 0 | * then dom0 is probably modern anyway. |
298 | 0 | */ |
299 | 0 | irq_status_query.flags |= XENIRQSTAT_needs_eoi; |
300 | 0 | if ( pirq_shared(currd, irq) ) |
301 | 0 | irq_status_query.flags |= XENIRQSTAT_shared; |
302 | 0 | ret = __copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0; |
303 | 0 | break; |
304 | 0 | } |
305 | 0 |
|
306 | 0 | case PHYSDEVOP_map_pirq: { |
307 | 0 | physdev_map_pirq_t map; |
308 | 0 | struct msi_info msi; |
309 | 0 |
|
310 | 0 | ret = -EFAULT; |
311 | 0 | if ( copy_from_guest(&map, arg, 1) != 0 ) |
312 | 0 | break; |
313 | 0 |
|
314 | 0 | switch ( map.type ) |
315 | 0 | { |
316 | 0 | case MAP_PIRQ_TYPE_MSI_SEG: |
317 | 0 | map.type = MAP_PIRQ_TYPE_MSI; |
318 | 0 | msi.seg = map.bus >> 16; |
319 | 0 | break; |
320 | 0 |
|
321 | 0 | case MAP_PIRQ_TYPE_MULTI_MSI: |
322 | 0 | if ( map.table_base ) |
323 | 0 | return -EINVAL; |
324 | 0 | msi.seg = map.bus >> 16; |
325 | 0 | break; |
326 | 0 |
|
327 | 0 | default: |
328 | 0 | msi.seg = 0; |
329 | 0 | break; |
330 | 0 | } |
331 | 0 | msi.bus = map.bus; |
332 | 0 | msi.devfn = map.devfn; |
333 | 0 | msi.entry_nr = map.entry_nr; |
334 | 0 | msi.table_base = map.table_base; |
335 | 0 | ret = physdev_map_pirq(map.domid, map.type, &map.index, &map.pirq, |
336 | 0 | &msi); |
337 | 0 |
|
338 | 0 | if ( map.type == MAP_PIRQ_TYPE_MULTI_MSI ) |
339 | 0 | map.entry_nr = msi.entry_nr; |
340 | 0 | if ( __copy_to_guest(arg, &map, 1) ) |
341 | 0 | ret = -EFAULT; |
342 | 0 | break; |
343 | 0 | } |
344 | 0 |
|
345 | 0 | case PHYSDEVOP_unmap_pirq: { |
346 | 0 | struct physdev_unmap_pirq unmap; |
347 | 0 |
|
348 | 0 | ret = -EFAULT; |
349 | 0 | if ( copy_from_guest(&unmap, arg, 1) != 0 ) |
350 | 0 | break; |
351 | 0 |
|
352 | 0 | ret = physdev_unmap_pirq(unmap.domid, unmap.pirq); |
353 | 0 | break; |
354 | 0 | } |
355 | 0 |
|
356 | 0 | case PHYSDEVOP_apic_read: { |
357 | 0 | struct physdev_apic apic; |
358 | 0 | ret = -EFAULT; |
359 | 0 | if ( copy_from_guest(&apic, arg, 1) != 0 ) |
360 | 0 | break; |
361 | 0 | ret = xsm_apic(XSM_PRIV, currd, cmd); |
362 | 0 | if ( ret ) |
363 | 0 | break; |
364 | 0 | ret = ioapic_guest_read(apic.apic_physbase, apic.reg, &apic.value); |
365 | 0 | if ( __copy_to_guest(arg, &apic, 1) ) |
366 | 0 | ret = -EFAULT; |
367 | 0 | break; |
368 | 0 | } |
369 | 0 |
|
370 | 0 | case PHYSDEVOP_apic_write: { |
371 | 0 | struct physdev_apic apic; |
372 | 0 | ret = -EFAULT; |
373 | 0 | if ( copy_from_guest(&apic, arg, 1) != 0 ) |
374 | 0 | break; |
375 | 0 | ret = xsm_apic(XSM_PRIV, currd, cmd); |
376 | 0 | if ( ret ) |
377 | 0 | break; |
378 | 0 | ret = ioapic_guest_write(apic.apic_physbase, apic.reg, apic.value); |
379 | 0 | break; |
380 | 0 | } |
381 | 0 |
|
382 | 0 | case PHYSDEVOP_alloc_irq_vector: { |
383 | 0 | struct physdev_irq irq_op; |
384 | 0 |
|
385 | 0 | ret = -EFAULT; |
386 | 0 | if ( copy_from_guest(&irq_op, arg, 1) != 0 ) |
387 | 0 | break; |
388 | 0 |
|
389 | 0 | /* Use the APIC check since this dummy hypercall should still only |
390 | 0 | * be called by the domain with access to program the ioapic */ |
391 | 0 | ret = xsm_apic(XSM_PRIV, currd, cmd); |
392 | 0 | if ( ret ) |
393 | 0 | break; |
394 | 0 |
|
395 | 0 | /* Vector is only used by hypervisor, and dom0 shouldn't |
396 | 0 | touch it in its world, return irq_op.irq as the vecotr, |
397 | 0 | and make this hypercall dummy, and also defer the vector |
398 | 0 | allocation when dom0 tries to programe ioapic entry. */ |
399 | 0 | irq_op.vector = irq_op.irq; |
400 | 0 | ret = 0; |
401 | 0 | |
402 | 0 | if ( __copy_to_guest(arg, &irq_op, 1) ) |
403 | 0 | ret = -EFAULT; |
404 | 0 | break; |
405 | 0 | } |
406 | 0 |
|
407 | 0 | case PHYSDEVOP_set_iopl: { |
408 | 0 | struct vcpu *curr = current; |
409 | 0 | struct physdev_set_iopl set_iopl; |
410 | 0 |
|
411 | 0 | ret = -EFAULT; |
412 | 0 | if ( copy_from_guest(&set_iopl, arg, 1) != 0 ) |
413 | 0 | break; |
414 | 0 | ret = -EINVAL; |
415 | 0 | if ( set_iopl.iopl > 3 ) |
416 | 0 | break; |
417 | 0 | ret = 0; |
418 | 0 | curr->arch.pv_vcpu.iopl = MASK_INSR(set_iopl.iopl, X86_EFLAGS_IOPL); |
419 | 0 | break; |
420 | 0 | } |
421 | 0 |
|
422 | 0 | case PHYSDEVOP_set_iobitmap: { |
423 | 0 | struct vcpu *curr = current; |
424 | 0 | struct physdev_set_iobitmap set_iobitmap; |
425 | 0 |
|
426 | 0 | ret = -EFAULT; |
427 | 0 | if ( copy_from_guest(&set_iobitmap, arg, 1) != 0 ) |
428 | 0 | break; |
429 | 0 | ret = -EINVAL; |
430 | 0 | if ( !guest_handle_okay(set_iobitmap.bitmap, IOBMP_BYTES) || |
431 | 0 | (set_iobitmap.nr_ports > 65536) ) |
432 | 0 | break; |
433 | 0 | ret = 0; |
434 | 0 | #ifndef COMPAT |
435 | | curr->arch.pv_vcpu.iobmp = set_iobitmap.bitmap; |
436 | | #else |
437 | 0 | guest_from_compat_handle(curr->arch.pv_vcpu.iobmp, |
438 | | set_iobitmap.bitmap); |
439 | | #endif |
440 | 0 | curr->arch.pv_vcpu.iobmp_limit = set_iobitmap.nr_ports; |
441 | 0 | break; |
442 | 0 | } |
443 | 0 |
|
444 | 0 | case PHYSDEVOP_manage_pci_add: { |
445 | 0 | struct physdev_manage_pci manage_pci; |
446 | 0 | ret = -EFAULT; |
447 | 0 | if ( copy_from_guest(&manage_pci, arg, 1) != 0 ) |
448 | 0 | break; |
449 | 0 |
|
450 | 0 | ret = pci_add_device(0, manage_pci.bus, manage_pci.devfn, |
451 | 0 | NULL, NUMA_NO_NODE); |
452 | 0 | break; |
453 | 0 | } |
454 | 0 |
|
455 | 0 | case PHYSDEVOP_manage_pci_remove: { |
456 | 0 | struct physdev_manage_pci manage_pci; |
457 | 0 | ret = -EFAULT; |
458 | 0 | if ( copy_from_guest(&manage_pci, arg, 1) != 0 ) |
459 | 0 | break; |
460 | 0 |
|
461 | 0 | ret = pci_remove_device(0, manage_pci.bus, manage_pci.devfn); |
462 | 0 | break; |
463 | 0 | } |
464 | 0 |
|
465 | 0 | case PHYSDEVOP_manage_pci_add_ext: { |
466 | 0 | struct physdev_manage_pci_ext manage_pci_ext; |
467 | 0 | struct pci_dev_info pdev_info; |
468 | 0 |
|
469 | 0 | ret = -EFAULT; |
470 | 0 | if ( copy_from_guest(&manage_pci_ext, arg, 1) != 0 ) |
471 | 0 | break; |
472 | 0 |
|
473 | 0 | ret = -EINVAL; |
474 | 0 | if ( (manage_pci_ext.is_extfn > 1) || (manage_pci_ext.is_virtfn > 1) ) |
475 | 0 | break; |
476 | 0 |
|
477 | 0 | pdev_info.is_extfn = manage_pci_ext.is_extfn; |
478 | 0 | pdev_info.is_virtfn = manage_pci_ext.is_virtfn; |
479 | 0 | pdev_info.physfn.bus = manage_pci_ext.physfn.bus; |
480 | 0 | pdev_info.physfn.devfn = manage_pci_ext.physfn.devfn; |
481 | 0 | ret = pci_add_device(0, manage_pci_ext.bus, |
482 | 0 | manage_pci_ext.devfn, |
483 | 0 | &pdev_info, NUMA_NO_NODE); |
484 | 0 | break; |
485 | 0 | } |
486 | 0 |
|
487 | 0 | case PHYSDEVOP_pci_device_add: { |
488 | 0 | struct physdev_pci_device_add add; |
489 | 0 | struct pci_dev_info pdev_info; |
490 | 0 | nodeid_t node; |
491 | 0 |
|
492 | 0 | ret = -EFAULT; |
493 | 0 | if ( copy_from_guest(&add, arg, 1) != 0 ) |
494 | 0 | break; |
495 | 0 |
|
496 | 0 | pdev_info.is_extfn = !!(add.flags & XEN_PCI_DEV_EXTFN); |
497 | 0 | if ( add.flags & XEN_PCI_DEV_VIRTFN ) |
498 | 0 | { |
499 | 0 | pdev_info.is_virtfn = 1; |
500 | 0 | pdev_info.physfn.bus = add.physfn.bus; |
501 | 0 | pdev_info.physfn.devfn = add.physfn.devfn; |
502 | 0 | } |
503 | 0 | else |
504 | 0 | pdev_info.is_virtfn = 0; |
505 | 0 |
|
506 | 0 | if ( add.flags & XEN_PCI_DEV_PXM ) |
507 | 0 | { |
508 | 0 | uint32_t pxm; |
509 | 0 | size_t optarr_off = offsetof(struct physdev_pci_device_add, optarr) / |
510 | 0 | sizeof(add.optarr[0]); |
511 | 0 |
|
512 | 0 | if ( copy_from_guest_offset(&pxm, arg, optarr_off, 1) ) |
513 | 0 | break; |
514 | 0 |
|
515 | 0 | node = pxm_to_node(pxm); |
516 | 0 | } |
517 | 0 | else |
518 | 0 | node = NUMA_NO_NODE; |
519 | 0 |
|
520 | 0 | ret = pci_add_device(add.seg, add.bus, add.devfn, &pdev_info, node); |
521 | 0 | break; |
522 | 0 | } |
523 | 0 |
|
524 | 0 | case PHYSDEVOP_pci_device_remove: { |
525 | 0 | struct physdev_pci_device dev; |
526 | 0 |
|
527 | 0 | ret = -EFAULT; |
528 | 0 | if ( copy_from_guest(&dev, arg, 1) != 0 ) |
529 | 0 | break; |
530 | 0 |
|
531 | 0 | ret = pci_remove_device(dev.seg, dev.bus, dev.devfn); |
532 | 0 | break; |
533 | 0 | } |
534 | 0 |
|
535 | 0 | case PHYSDEVOP_prepare_msix: |
536 | 0 | case PHYSDEVOP_release_msix: { |
537 | 0 | struct physdev_pci_device dev; |
538 | 0 |
|
539 | 0 | if ( copy_from_guest(&dev, arg, 1) ) |
540 | 0 | ret = -EFAULT; |
541 | 0 | else |
542 | 0 | ret = xsm_resource_setup_pci(XSM_PRIV, |
543 | 0 | (dev.seg << 16) | (dev.bus << 8) | |
544 | 0 | dev.devfn) ?: |
545 | 0 | pci_prepare_msix(dev.seg, dev.bus, dev.devfn, |
546 | 0 | cmd != PHYSDEVOP_prepare_msix); |
547 | 0 | break; |
548 | 0 | } |
549 | 0 |
|
550 | 0 | case PHYSDEVOP_pci_mmcfg_reserved: { |
551 | 0 | struct physdev_pci_mmcfg_reserved info; |
552 | 0 |
|
553 | 0 | ret = xsm_resource_setup_misc(XSM_PRIV); |
554 | 0 | if ( ret ) |
555 | 0 | break; |
556 | 0 |
|
557 | 0 | ret = -EFAULT; |
558 | 0 | if ( copy_from_guest(&info, arg, 1) ) |
559 | 0 | break; |
560 | 0 |
|
561 | 0 | ret = pci_mmcfg_reserved(info.address, info.segment, |
562 | 0 | info.start_bus, info.end_bus, info.flags); |
563 | 0 | if ( !ret && has_vpci(currd) ) |
564 | 0 | { |
565 | 0 | /* |
566 | 0 | * For HVM (PVH) domains try to add the newly found MMCFG to the |
567 | 0 | * domain. |
568 | 0 | */ |
569 | 0 | ret = register_vpci_mmcfg_handler(currd, info.address, |
570 | 0 | info.start_bus, info.end_bus, |
571 | 0 | info.segment); |
572 | 0 | } |
573 | 0 |
|
574 | 0 | break; |
575 | 0 | } |
576 | 0 |
|
577 | 0 | case PHYSDEVOP_restore_msi: { |
578 | 0 | struct physdev_restore_msi restore_msi; |
579 | 0 | struct pci_dev *pdev; |
580 | 0 |
|
581 | 0 | ret = -EFAULT; |
582 | 0 | if ( copy_from_guest(&restore_msi, arg, 1) != 0 ) |
583 | 0 | break; |
584 | 0 |
|
585 | 0 | pcidevs_lock(); |
586 | 0 | pdev = pci_get_pdev(0, restore_msi.bus, restore_msi.devfn); |
587 | 0 | ret = pdev ? pci_restore_msi_state(pdev) : -ENODEV; |
588 | 0 | pcidevs_unlock(); |
589 | 0 | break; |
590 | 0 | } |
591 | 0 |
|
592 | 0 | case PHYSDEVOP_restore_msi_ext: { |
593 | 0 | struct physdev_pci_device dev; |
594 | 0 | struct pci_dev *pdev; |
595 | 0 |
|
596 | 0 | ret = -EFAULT; |
597 | 0 | if ( copy_from_guest(&dev, arg, 1) != 0 ) |
598 | 0 | break; |
599 | 0 |
|
600 | 0 | pcidevs_lock(); |
601 | 0 | pdev = pci_get_pdev(dev.seg, dev.bus, dev.devfn); |
602 | 0 | ret = pdev ? pci_restore_msi_state(pdev) : -ENODEV; |
603 | 0 | pcidevs_unlock(); |
604 | 0 | break; |
605 | 0 | } |
606 | 0 |
|
607 | 0 | case PHYSDEVOP_setup_gsi: { |
608 | 0 | struct physdev_setup_gsi setup_gsi; |
609 | 0 |
|
610 | 0 | ret = -EFAULT; |
611 | 0 | if ( copy_from_guest(&setup_gsi, arg, 1) != 0 ) |
612 | 0 | break; |
613 | 0 | |
614 | 0 | ret = -EINVAL; |
615 | 0 | if ( setup_gsi.gsi < 0 || setup_gsi.gsi >= nr_irqs_gsi ) |
616 | 0 | break; |
617 | 0 |
|
618 | 0 | ret = xsm_resource_setup_gsi(XSM_PRIV, setup_gsi.gsi); |
619 | 0 | if ( ret ) |
620 | 0 | break; |
621 | 0 |
|
622 | 0 | ret = mp_register_gsi(setup_gsi.gsi, setup_gsi.triggering, |
623 | 0 | setup_gsi.polarity); |
624 | 0 | break; |
625 | 0 | } |
626 | 0 | case PHYSDEVOP_get_free_pirq: { |
627 | 0 | struct physdev_get_free_pirq out; |
628 | 0 |
|
629 | 0 | ret = -EFAULT; |
630 | 0 | if ( copy_from_guest(&out, arg, 1) != 0 ) |
631 | 0 | break; |
632 | 0 |
|
633 | 0 | spin_lock(&currd->event_lock); |
634 | 0 |
|
635 | 0 | ret = get_free_pirq(currd, out.type); |
636 | 0 | if ( ret >= 0 ) |
637 | 0 | { |
638 | 0 | struct pirq *info = pirq_get_info(currd, ret); |
639 | 0 |
|
640 | 0 | if ( info ) |
641 | 0 | info->arch.irq = PIRQ_ALLOCATED; |
642 | 0 | else |
643 | 0 | ret = -ENOMEM; |
644 | 0 | } |
645 | 0 |
|
646 | 0 | spin_unlock(&currd->event_lock); |
647 | 0 |
|
648 | 0 | if ( ret >= 0 ) |
649 | 0 | { |
650 | 0 | out.pirq = ret; |
651 | 0 | ret = __copy_to_guest(arg, &out, 1) ? -EFAULT : 0; |
652 | 0 | } |
653 | 0 |
|
654 | 0 | break; |
655 | 0 | } |
656 | 0 |
|
657 | 0 | case PHYSDEVOP_dbgp_op: { |
658 | 0 | struct physdev_dbgp_op op; |
659 | 0 |
|
660 | 0 | if ( !is_hardware_domain(currd) ) |
661 | 0 | ret = -EPERM; |
662 | 0 | else if ( copy_from_guest(&op, arg, 1) ) |
663 | 0 | ret = -EFAULT; |
664 | 0 | else |
665 | 0 | ret = dbgp_op(&op); |
666 | 0 | break; |
667 | 0 | } |
668 | 0 |
|
669 | 0 | default: |
670 | 0 | ret = -ENOSYS; |
671 | 0 | break; |
672 | 0 | } |
673 | 0 |
|
674 | 0 | return ret; |
675 | 0 | } Unexecuted instantiation: do_physdev_op Unexecuted instantiation: compat_physdev_op |
676 | | |
677 | | /* |
678 | | * Local variables: |
679 | | * mode: C |
680 | | * c-file-style: "BSD" |
681 | | * c-basic-offset: 4 |
682 | | * tab-width: 4 |
683 | | * indent-tabs-mode: nil |
684 | | * End: |
685 | | */ |