/root/src/xen/xen/arch/x86/hvm/intercept.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * intercept.c: Handle performance critical I/O packets in hypervisor space |
3 | | * |
4 | | * Copyright (c) 2004, Intel Corporation. |
5 | | * Copyright (c) 2008, Citrix Systems, Inc. |
6 | | * |
7 | | * This program is free software; you can redistribute it and/or modify it |
8 | | * under the terms and conditions of the GNU General Public License, |
9 | | * version 2, as published by the Free Software Foundation. |
10 | | * |
11 | | * This program is distributed in the hope it will be useful, but WITHOUT |
12 | | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
14 | | * more details. |
15 | | * |
16 | | * You should have received a copy of the GNU General Public License along with |
17 | | * this program; If not, see <http://www.gnu.org/licenses/>. |
18 | | */ |
19 | | |
20 | | #include <xen/types.h> |
21 | | #include <xen/sched.h> |
22 | | #include <asm/regs.h> |
23 | | #include <asm/hvm/hvm.h> |
24 | | #include <asm/hvm/support.h> |
25 | | #include <asm/hvm/domain.h> |
26 | | #include <xen/lib.h> |
27 | | #include <xen/sched.h> |
28 | | #include <asm/current.h> |
29 | | #include <io_ports.h> |
30 | | #include <xen/event.h> |
31 | | #include <xen/iommu.h> |
32 | | |
33 | | static bool_t hvm_mmio_accept(const struct hvm_io_handler *handler, |
34 | | const ioreq_t *p) |
35 | 1.92M | { |
36 | 1.92M | paddr_t first = hvm_mmio_first_byte(p), last; |
37 | 1.92M | |
38 | 1.92M | BUG_ON(handler->type != IOREQ_TYPE_COPY); |
39 | 1.92M | |
40 | 1.92M | if ( !handler->mmio.ops->check(current, first) ) |
41 | 1.80M | return 0; |
42 | 1.92M | |
43 | 1.92M | /* Make sure the handler will accept the whole access. */ |
44 | 118k | last = hvm_mmio_last_byte(p); |
45 | 118k | if ( last != first && |
46 | 56.4k | !handler->mmio.ops->check(current, last) ) |
47 | 0 | domain_crash(current->domain); |
48 | 118k | |
49 | 118k | return 1; |
50 | 1.92M | } |
51 | | |
52 | | static int hvm_mmio_read(const struct hvm_io_handler *handler, |
53 | | uint64_t addr, uint32_t size, uint64_t *data) |
54 | 56.0k | { |
55 | 56.0k | BUG_ON(handler->type != IOREQ_TYPE_COPY); |
56 | 56.0k | |
57 | 56.0k | return handler->mmio.ops->read(current, addr, size, data); |
58 | 56.0k | } |
59 | | |
60 | | static int hvm_mmio_write(const struct hvm_io_handler *handler, |
61 | | uint64_t addr, uint32_t size, uint64_t data) |
62 | 4.11k | { |
63 | 4.11k | BUG_ON(handler->type != IOREQ_TYPE_COPY); |
64 | 4.11k | |
65 | 4.11k | return handler->mmio.ops->write(current, addr, size, data); |
66 | 4.11k | } |
67 | | |
68 | | static const struct hvm_io_ops mmio_ops = { |
69 | | .accept = hvm_mmio_accept, |
70 | | .read = hvm_mmio_read, |
71 | | .write = hvm_mmio_write |
72 | | }; |
73 | | |
74 | | static bool_t hvm_portio_accept(const struct hvm_io_handler *handler, |
75 | | const ioreq_t *p) |
76 | 40.0k | { |
77 | 40.0k | unsigned int start = handler->portio.port; |
78 | 40.0k | unsigned int end = start + handler->portio.size; |
79 | 40.0k | |
80 | 40.0k | BUG_ON(handler->type != IOREQ_TYPE_PIO); |
81 | 40.0k | |
82 | 20.0k | return (p->addr >= start) && ((p->addr + p->size) <= end); |
83 | 40.0k | } |
84 | | |
85 | | static int hvm_portio_read(const struct hvm_io_handler *handler, |
86 | | uint64_t addr, uint32_t size, uint64_t *data) |
87 | 0 | { |
88 | 0 | uint32_t val = ~0u; |
89 | 0 | int rc; |
90 | 0 |
|
91 | 0 | BUG_ON(handler->type != IOREQ_TYPE_PIO); |
92 | 0 |
|
93 | 0 | rc = handler->portio.action(IOREQ_READ, addr, size, &val); |
94 | 0 | *data = val; |
95 | 0 |
|
96 | 0 | return rc; |
97 | 0 | } |
98 | | |
99 | | static int hvm_portio_write(const struct hvm_io_handler *handler, |
100 | | uint64_t addr, uint32_t size, uint64_t data) |
101 | 0 | { |
102 | 0 | uint32_t val = data; |
103 | 0 |
|
104 | 0 | BUG_ON(handler->type != IOREQ_TYPE_PIO); |
105 | 0 |
|
106 | 0 | return handler->portio.action(IOREQ_WRITE, addr, size, &val); |
107 | 0 | } |
108 | | |
109 | | static const struct hvm_io_ops portio_ops = { |
110 | | .accept = hvm_portio_accept, |
111 | | .read = hvm_portio_read, |
112 | | .write = hvm_portio_write |
113 | | }; |
114 | | |
115 | | int hvm_process_io_intercept(const struct hvm_io_handler *handler, |
116 | | ioreq_t *p) |
117 | 80.2k | { |
118 | 80.2k | const struct hvm_io_ops *ops = handler->ops; |
119 | 80.2k | int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size; |
120 | 80.2k | uint64_t data; |
121 | 80.2k | uint64_t addr; |
122 | 80.2k | |
123 | 80.2k | if ( p->dir == IOREQ_READ ) |
124 | 76.0k | { |
125 | 152k | for ( i = 0; i < p->count; i++ ) |
126 | 76.0k | { |
127 | 76.0k | addr = (p->type == IOREQ_TYPE_COPY) ? |
128 | 56.0k | p->addr + step * i : |
129 | 20.0k | p->addr; |
130 | 76.0k | data = 0; |
131 | 76.0k | rc = ops->read(handler, addr, p->size, &data); |
132 | 76.0k | if ( rc != X86EMUL_OKAY ) |
133 | 0 | break; |
134 | 76.0k | |
135 | 76.0k | if ( p->data_is_ptr ) |
136 | 0 | { |
137 | 0 | switch ( hvm_copy_to_guest_phys(p->data + step * i, |
138 | 0 | &data, p->size, current) ) |
139 | 0 | { |
140 | 0 | case HVMTRANS_okay: |
141 | 0 | break; |
142 | 0 | case HVMTRANS_bad_gfn_to_mfn: |
143 | 0 | /* Drop the write as real hardware would. */ |
144 | 0 | continue; |
145 | 0 | case HVMTRANS_bad_linear_to_gfn: |
146 | 0 | case HVMTRANS_gfn_paged_out: |
147 | 0 | case HVMTRANS_gfn_shared: |
148 | 0 | ASSERT_UNREACHABLE(); |
149 | 0 | /* fall through */ |
150 | 0 | default: |
151 | 0 | domain_crash(current->domain); |
152 | 0 | return X86EMUL_UNHANDLEABLE; |
153 | 0 | } |
154 | 0 | } |
155 | 76.0k | else |
156 | 76.0k | p->data = data; |
157 | 76.0k | } |
158 | 76.0k | } |
159 | 80.2k | else /* p->dir == IOREQ_WRITE */ |
160 | 4.17k | { |
161 | 8.35k | for ( i = 0; i < p->count; i++ ) |
162 | 4.17k | { |
163 | 4.17k | if ( p->data_is_ptr ) |
164 | 0 | { |
165 | 0 | data = 0; |
166 | 0 | switch ( hvm_copy_from_guest_phys(&data, p->data + step * i, |
167 | 0 | p->size) ) |
168 | 0 | { |
169 | 0 | case HVMTRANS_okay: |
170 | 0 | break; |
171 | 0 | case HVMTRANS_bad_gfn_to_mfn: |
172 | 0 | data = ~0; |
173 | 0 | break; |
174 | 0 | case HVMTRANS_bad_linear_to_gfn: |
175 | 0 | case HVMTRANS_gfn_paged_out: |
176 | 0 | case HVMTRANS_gfn_shared: |
177 | 0 | ASSERT_UNREACHABLE(); |
178 | 0 | /* fall through */ |
179 | 0 | default: |
180 | 0 | domain_crash(current->domain); |
181 | 0 | return X86EMUL_UNHANDLEABLE; |
182 | 0 | } |
183 | 0 | } |
184 | 4.17k | else |
185 | 4.17k | data = p->data; |
186 | 4.17k | |
187 | 4.17k | addr = (p->type == IOREQ_TYPE_COPY) ? |
188 | 4.11k | p->addr + step * i : |
189 | 58 | p->addr; |
190 | 4.17k | rc = ops->write(handler, addr, p->size, data); |
191 | 4.17k | if ( rc != X86EMUL_OKAY ) |
192 | 0 | break; |
193 | 4.17k | } |
194 | 4.17k | } |
195 | 80.2k | |
196 | 80.2k | if ( i ) |
197 | 80.2k | { |
198 | 80.2k | p->count = i; |
199 | 80.2k | rc = X86EMUL_OKAY; |
200 | 80.2k | } |
201 | 0 | else if ( rc == X86EMUL_UNHANDLEABLE ) |
202 | 0 | { |
203 | 0 | /* |
204 | 0 | * Don't forward entire batches to the device model: This would |
205 | 0 | * prevent the internal handlers to see subsequent iterations of |
206 | 0 | * the request. |
207 | 0 | */ |
208 | 0 | p->count = 1; |
209 | 0 | } |
210 | 80.2k | |
211 | 80.2k | return rc; |
212 | 80.2k | } |
213 | | |
214 | | static const struct hvm_io_handler *hvm_find_io_handler(const ioreq_t *p) |
215 | 505k | { |
216 | 505k | struct domain *curr_d = current->domain; |
217 | 505k | unsigned int i; |
218 | 505k | |
219 | 505k | BUG_ON((p->type != IOREQ_TYPE_PIO) && |
220 | 505k | (p->type != IOREQ_TYPE_COPY)); |
221 | 505k | |
222 | 4.40M | for ( i = 0; i < curr_d->arch.hvm_domain.io_handler_count; i++ ) |
223 | 4.01M | { |
224 | 4.01M | const struct hvm_io_handler *handler = |
225 | 4.01M | &curr_d->arch.hvm_domain.io_handler[i]; |
226 | 4.01M | const struct hvm_io_ops *ops = handler->ops; |
227 | 4.01M | |
228 | 4.01M | if ( handler->type != p->type ) |
229 | 2.01M | continue; |
230 | 4.01M | |
231 | 2.00M | if ( ops->accept(handler, p) ) |
232 | 120k | return handler; |
233 | 2.00M | } |
234 | 505k | |
235 | 384k | return NULL; |
236 | 505k | } |
237 | | |
238 | | int hvm_io_intercept(ioreq_t *p) |
239 | 80.2k | { |
240 | 80.2k | const struct hvm_io_handler *handler; |
241 | 80.2k | const struct hvm_io_ops *ops; |
242 | 80.2k | int rc; |
243 | 80.2k | |
244 | 80.2k | handler = hvm_find_io_handler(p); |
245 | 80.2k | |
246 | 80.2k | if ( handler == NULL ) |
247 | 20.0k | return X86EMUL_UNHANDLEABLE; |
248 | 80.2k | |
249 | 60.1k | rc = hvm_process_io_intercept(handler, p); |
250 | 60.1k | |
251 | 60.1k | ops = handler->ops; |
252 | 60.1k | if ( ops->complete != NULL ) |
253 | 0 | ops->complete(handler); |
254 | 60.1k | |
255 | 60.1k | return rc; |
256 | 80.2k | } |
257 | | |
258 | | struct hvm_io_handler *hvm_next_io_handler(struct domain *d) |
259 | 8 | { |
260 | 8 | unsigned int i = d->arch.hvm_domain.io_handler_count++; |
261 | 8 | |
262 | 8 | ASSERT(d->arch.hvm_domain.io_handler); |
263 | 8 | |
264 | 8 | if ( i == NR_IO_HANDLERS ) |
265 | 0 | { |
266 | 0 | domain_crash(d); |
267 | 0 | return NULL; |
268 | 0 | } |
269 | 8 | |
270 | 8 | return &d->arch.hvm_domain.io_handler[i]; |
271 | 8 | } |
272 | | |
273 | | void register_mmio_handler(struct domain *d, |
274 | | const struct hvm_mmio_ops *ops) |
275 | 4 | { |
276 | 4 | struct hvm_io_handler *handler = hvm_next_io_handler(d); |
277 | 4 | |
278 | 4 | if ( handler == NULL ) |
279 | 0 | return; |
280 | 4 | |
281 | 4 | handler->type = IOREQ_TYPE_COPY; |
282 | 4 | handler->ops = &mmio_ops; |
283 | 4 | handler->mmio.ops = ops; |
284 | 4 | } |
285 | | |
286 | | void register_portio_handler(struct domain *d, unsigned int port, |
287 | | unsigned int size, portio_action_t action) |
288 | 2 | { |
289 | 2 | struct hvm_io_handler *handler = hvm_next_io_handler(d); |
290 | 2 | |
291 | 2 | if ( handler == NULL ) |
292 | 0 | return; |
293 | 2 | |
294 | 2 | handler->type = IOREQ_TYPE_PIO; |
295 | 2 | handler->ops = &portio_ops; |
296 | 2 | handler->portio.port = port; |
297 | 2 | handler->portio.size = size; |
298 | 2 | handler->portio.action = action; |
299 | 2 | } |
300 | | |
301 | | void relocate_portio_handler(struct domain *d, unsigned int old_port, |
302 | | unsigned int new_port, unsigned int size) |
303 | 0 | { |
304 | 0 | unsigned int i; |
305 | 0 |
|
306 | 0 | for ( i = 0; i < d->arch.hvm_domain.io_handler_count; i++ ) |
307 | 0 | { |
308 | 0 | struct hvm_io_handler *handler = |
309 | 0 | &d->arch.hvm_domain.io_handler[i]; |
310 | 0 |
|
311 | 0 | if ( handler->type != IOREQ_TYPE_PIO ) |
312 | 0 | continue; |
313 | 0 |
|
314 | 0 | if ( (handler->portio.port == old_port) && |
315 | 0 | (handler->portio.size = size) ) |
316 | 0 | { |
317 | 0 | handler->portio.port = new_port; |
318 | 0 | break; |
319 | 0 | } |
320 | 0 | } |
321 | 0 | } |
322 | | |
323 | | bool_t hvm_mmio_internal(paddr_t gpa) |
324 | 425k | { |
325 | 425k | const struct hvm_io_handler *handler; |
326 | 425k | const struct hvm_io_ops *ops; |
327 | 425k | ioreq_t p = { |
328 | 425k | .type = IOREQ_TYPE_COPY, |
329 | 425k | .addr = gpa, |
330 | 425k | .count = 1, |
331 | 425k | .size = 1, |
332 | 425k | }; |
333 | 425k | |
334 | 425k | handler = hvm_find_io_handler(&p); |
335 | 425k | |
336 | 425k | if ( handler == NULL ) |
337 | 366k | return 0; |
338 | 425k | |
339 | 58.5k | ops = handler->ops; |
340 | 58.5k | if ( ops->complete != NULL ) |
341 | 0 | ops->complete(handler); |
342 | 58.5k | |
343 | 58.5k | return 1; |
344 | 425k | } |
345 | | |
346 | | /* |
347 | | * Local variables: |
348 | | * mode: C |
349 | | * c-file-style: "BSD" |
350 | | * c-basic-offset: 4 |
351 | | * tab-width: 4 |
352 | | * indent-tabs-mode: nil |
353 | | * End: |
354 | | */ |