Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/include/public/hvm/dm_op.h
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2016, Citrix Systems Inc
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a copy
5
 * of this software and associated documentation files (the "Software"), to
6
 * deal in the Software without restriction, including without limitation the
7
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8
 * sell copies of the Software, and to permit persons to whom the Software is
9
 * furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice shall be included in
12
 * all copies or substantial portions of the Software.
13
 *
14
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20
 * DEALINGS IN THE SOFTWARE.
21
 *
22
 */
23
24
#ifndef __XEN_PUBLIC_HVM_DM_OP_H__
25
#define __XEN_PUBLIC_HVM_DM_OP_H__
26
27
#include "../xen.h"
28
29
#if defined(__XEN__) || defined(__XEN_TOOLS__)
30
31
#include "../event_channel.h"
32
33
#ifndef uint64_aligned_t
34
#define uint64_aligned_t uint64_t
35
#endif
36
37
/*
38
 * IOREQ Servers
39
 *
40
 * The interface between an I/O emulator an Xen is called an IOREQ Server.
41
 * A domain supports a single 'legacy' IOREQ Server which is instantiated if
42
 * parameter...
43
 *
44
 * HVM_PARAM_IOREQ_PFN is read (to get the gfn containing the synchronous
45
 * ioreq structures), or...
46
 * HVM_PARAM_BUFIOREQ_PFN is read (to get the gfn containing the buffered
47
 * ioreq ring), or...
48
 * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
49
 * to request buffered I/O emulation).
50
 *
51
 * The following hypercalls facilitate the creation of IOREQ Servers for
52
 * 'secondary' emulators which are invoked to implement port I/O, memory, or
53
 * PCI config space ranges which they explicitly register.
54
 */
55
56
typedef uint16_t ioservid_t;
57
58
/*
59
 * XEN_DMOP_create_ioreq_server: Instantiate a new IOREQ Server for a
60
 *                               secondary emulator.
61
 *
62
 * The <id> handed back is unique for target domain. The valur of
63
 * <handle_bufioreq> should be one of HVM_IOREQSRV_BUFIOREQ_* defined in
64
 * hvm_op.h. If the value is HVM_IOREQSRV_BUFIOREQ_OFF then  the buffered
65
 * ioreq ring will not be allocated and hence all emulation requests to
66
 * this server will be synchronous.
67
 */
68
0
#define XEN_DMOP_create_ioreq_server 1
69
70
struct xen_dm_op_create_ioreq_server {
71
    /* IN - should server handle buffered ioreqs */
72
    uint8_t handle_bufioreq;
73
    uint8_t pad[3];
74
    /* OUT - server id */
75
    ioservid_t id;
76
};
77
78
/*
79
 * XEN_DMOP_get_ioreq_server_info: Get all the information necessary to
80
 *                                 access IOREQ Server <id>.
81
 *
82
 * The emulator needs to map the synchronous ioreq structures and buffered
83
 * ioreq ring (if it exists) that Xen uses to request emulation. These are
84
 * hosted in the target domain's gmfns <ioreq_gfn> and <bufioreq_gfn>
85
 * respectively. In addition, if the IOREQ Server is handling buffered
86
 * emulation requests, the emulator needs to bind to event channel
87
 * <bufioreq_port> to listen for them. (The event channels used for
88
 * synchronous emulation requests are specified in the per-CPU ioreq
89
 * structures in <ioreq_gfn>).
90
 * If the IOREQ Server is not handling buffered emulation requests then the
91
 * values handed back in <bufioreq_gfn> and <bufioreq_port> will both be 0.
92
 */
93
0
#define XEN_DMOP_get_ioreq_server_info 2
94
95
struct xen_dm_op_get_ioreq_server_info {
96
    /* IN - server id */
97
    ioservid_t id;
98
    uint16_t pad;
99
    /* OUT - buffered ioreq port */
100
    evtchn_port_t bufioreq_port;
101
    /* OUT - sync ioreq gfn */
102
    uint64_aligned_t ioreq_gfn;
103
    /* OUT - buffered ioreq gfn */
104
    uint64_aligned_t bufioreq_gfn;
105
};
106
107
/*
108
 * XEN_DMOP_map_io_range_to_ioreq_server: Register an I/O range for
109
 *                                        emulation by the client of
110
 *                                        IOREQ Server <id>.
111
 * XEN_DMOP_unmap_io_range_from_ioreq_server: Deregister an I/O range
112
 *                                            previously registered for
113
 *                                            emulation by the client of
114
 *                                            IOREQ Server <id>.
115
 *
116
 * There are three types of I/O that can be emulated: port I/O, memory
117
 * accesses and PCI config space accesses. The <type> field denotes which
118
 * type of range* the <start> and <end> (inclusive) fields are specifying.
119
 * PCI config space ranges are specified by segment/bus/device/function
120
 * values which should be encoded using the DMOP_PCI_SBDF helper macro
121
 * below.
122
 *
123
 * NOTE: unless an emulation request falls entirely within a range mapped
124
 * by a secondary emulator, it will not be passed to that emulator.
125
 */
126
0
#define XEN_DMOP_map_io_range_to_ioreq_server 3
127
0
#define XEN_DMOP_unmap_io_range_from_ioreq_server 4
128
129
struct xen_dm_op_ioreq_server_range {
130
    /* IN - server id */
131
    ioservid_t id;
132
    uint16_t pad;
133
    /* IN - type of range */
134
    uint32_t type;
135
0
# define XEN_DMOP_IO_RANGE_PORT   0 /* I/O port range */
136
0
# define XEN_DMOP_IO_RANGE_MEMORY 1 /* MMIO range */
137
0
# define XEN_DMOP_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range */
138
    /* IN - inclusive start and end of range */
139
    uint64_aligned_t start, end;
140
};
141
142
#define XEN_DMOP_PCI_SBDF(s,b,d,f) \
143
  ((((s) & 0xffff) << 16) |  \
144
   (((b) & 0xff) << 8) |     \
145
   (((d) & 0x1f) << 3) |     \
146
   ((f) & 0x07))
147
148
/*
149
 * XEN_DMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id>
150
 *
151
 * The IOREQ Server will not be passed any emulation requests until it is
152
 * in the enabled state.
153
 * Note that the contents of the ioreq_gfn and bufioreq_gfn (see
154
 * XEN_DMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server
155
 * is in the enabled state.
156
 */
157
0
#define XEN_DMOP_set_ioreq_server_state 5
158
159
struct xen_dm_op_set_ioreq_server_state {
160
    /* IN - server id */
161
    ioservid_t id;
162
    /* IN - enabled? */
163
    uint8_t enabled;
164
    uint8_t pad;
165
};
166
167
/*
168
 * XEN_DMOP_destroy_ioreq_server: Destroy the IOREQ Server <id>.
169
 *
170
 * Any registered I/O ranges will be automatically deregistered.
171
 */
172
0
#define XEN_DMOP_destroy_ioreq_server 6
173
174
struct xen_dm_op_destroy_ioreq_server {
175
    /* IN - server id */
176
    ioservid_t id;
177
    uint16_t pad;
178
};
179
180
/*
181
 * XEN_DMOP_track_dirty_vram: Track modifications to the specified pfn
182
 *                            range.
183
 *
184
 * NOTE: The bitmap passed back to the caller is passed in a
185
 *       secondary buffer.
186
 */
187
0
#define XEN_DMOP_track_dirty_vram 7
188
189
struct xen_dm_op_track_dirty_vram {
190
    /* IN - number of pages to be tracked */
191
    uint32_t nr;
192
    uint32_t pad;
193
    /* IN - first pfn to track */
194
    uint64_aligned_t first_pfn;
195
};
196
197
/*
198
 * XEN_DMOP_set_pci_intx_level: Set the logical level of one of a domain's
199
 *                              PCI INTx pins.
200
 */
201
0
#define XEN_DMOP_set_pci_intx_level 8
202
203
struct xen_dm_op_set_pci_intx_level {
204
    /* IN - PCI INTx identification (domain:bus:device:intx) */
205
    uint16_t domain;
206
    uint8_t bus, device, intx;
207
    /* IN - Level: 0 -> deasserted, 1 -> asserted */
208
    uint8_t  level;
209
};
210
211
/*
212
 * XEN_DMOP_set_isa_irq_level: Set the logical level of a one of a domain's
213
 *                             ISA IRQ lines.
214
 */
215
0
#define XEN_DMOP_set_isa_irq_level 9
216
217
struct xen_dm_op_set_isa_irq_level {
218
    /* IN - ISA IRQ (0-15) */
219
    uint8_t  isa_irq;
220
    /* IN - Level: 0 -> deasserted, 1 -> asserted */
221
    uint8_t  level;
222
};
223
224
/*
225
 * XEN_DMOP_set_pci_link_route: Map a PCI INTx line to an IRQ line.
226
 */
227
0
#define XEN_DMOP_set_pci_link_route 10
228
229
struct xen_dm_op_set_pci_link_route {
230
    /* PCI INTx line (0-3) */
231
    uint8_t  link;
232
    /* ISA IRQ (1-15) or 0 -> disable link */
233
    uint8_t  isa_irq;
234
};
235
236
/*
237
 * XEN_DMOP_modified_memory: Notify that a set of pages were modified by
238
 *                           an emulator.
239
 *
240
 * DMOP buf 1 contains an array of xen_dm_op_modified_memory_extent with
241
 * @nr_extents entries.
242
 *
243
 * On error, @nr_extents will contain the index+1 of the extent that
244
 * had the error.  It is not defined if or which pages may have been
245
 * marked as dirty, in this event.
246
 */
247
0
#define XEN_DMOP_modified_memory 11
248
249
struct xen_dm_op_modified_memory {
250
    /*
251
     * IN - Number of extents to be processed
252
     * OUT -returns n+1 for failing extent
253
     */
254
    uint32_t nr_extents;
255
    /* IN/OUT - Must be set to 0 */
256
    uint32_t opaque;
257
};
258
259
struct xen_dm_op_modified_memory_extent {
260
    /* IN - number of contiguous pages modified */
261
    uint32_t nr;
262
    uint32_t pad;
263
    /* IN - first pfn modified */
264
    uint64_aligned_t first_pfn;
265
};
266
267
/*
268
 * XEN_DMOP_set_mem_type: Notify that a region of memory is to be treated
269
 *                        in a specific way. (See definition of
270
 *                        hvmmem_type_t).
271
 *
272
 * NOTE: In the event of a continuation (return code -ERESTART), the
273
 *       @first_pfn is set to the value of the pfn of the remaining
274
 *       region and @nr reduced to the size of the remaining region.
275
 */
276
0
#define XEN_DMOP_set_mem_type 12
277
278
struct xen_dm_op_set_mem_type {
279
    /* IN - number of contiguous pages */
280
    uint32_t nr;
281
    /* IN - new hvmmem_type_t of region */
282
    uint16_t mem_type;
283
    uint16_t pad;
284
    /* IN - first pfn in region */
285
    uint64_aligned_t first_pfn;
286
};
287
288
/*
289
 * XEN_DMOP_inject_event: Inject an event into a VCPU, which will
290
 *                        get taken up when it is next scheduled.
291
 *
292
 * Note that the caller should know enough of the state of the CPU before
293
 * injecting, to know what the effect of injecting the event will be.
294
 */
295
0
#define XEN_DMOP_inject_event 13
296
297
struct xen_dm_op_inject_event {
298
    /* IN - index of vCPU */
299
    uint32_t vcpuid;
300
    /* IN - interrupt vector */
301
    uint8_t vector;
302
    /* IN - event type (DMOP_EVENT_* ) */
303
    uint8_t type;
304
/* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_* */
305
# define XEN_DMOP_EVENT_ext_int    0 /* external interrupt */
306
# define XEN_DMOP_EVENT_nmi        2 /* nmi */
307
# define XEN_DMOP_EVENT_hw_exc     3 /* hardware exception */
308
# define XEN_DMOP_EVENT_sw_int     4 /* software interrupt (CD nn) */
309
# define XEN_DMOP_EVENT_pri_sw_exc 5 /* ICEBP (F1) */
310
# define XEN_DMOP_EVENT_sw_exc     6 /* INT3 (CC), INTO (CE) */
311
    /* IN - instruction length */
312
    uint8_t insn_len;
313
    uint8_t pad0;
314
    /* IN - error code (or ~0 to skip) */
315
    uint32_t error_code;
316
    uint32_t pad1;
317
    /* IN - CR2 for page faults */
318
    uint64_aligned_t cr2;
319
};
320
321
/*
322
 * XEN_DMOP_inject_msi: Inject an MSI for an emulated device.
323
 */
324
0
#define XEN_DMOP_inject_msi 14
325
326
struct xen_dm_op_inject_msi {
327
    /* IN - MSI data (lower 32 bits) */
328
    uint32_t data;
329
    uint32_t pad;
330
    /* IN - MSI address (0xfeexxxxx) */
331
    uint64_aligned_t addr;
332
};
333
334
/*
335
 * XEN_DMOP_map_mem_type_to_ioreq_server : map or unmap the IOREQ Server <id>
336
 *                                      to specific memory type <type>
337
 *                                      for specific accesses <flags>
338
 *
339
 * For now, flags only accept the value of XEN_DMOP_IOREQ_MEM_ACCESS_WRITE,
340
 * which means only write operations are to be forwarded to an ioreq server.
341
 * Support for the emulation of read operations can be added when an ioreq
342
 * server has such requirement in future.
343
 */
344
0
#define XEN_DMOP_map_mem_type_to_ioreq_server 15
345
346
struct xen_dm_op_map_mem_type_to_ioreq_server {
347
    ioservid_t id;      /* IN - ioreq server id */
348
    uint16_t type;      /* IN - memory type */
349
    uint32_t flags;     /* IN - types of accesses to be forwarded to the
350
                           ioreq server. flags with 0 means to unmap the
351
                           ioreq server */
352
353
#define XEN_DMOP_IOREQ_MEM_ACCESS_READ (1u << 0)
354
0
#define XEN_DMOP_IOREQ_MEM_ACCESS_WRITE (1u << 1)
355
356
    uint64_t opaque;    /* IN/OUT - only used for hypercall continuation,
357
                           has to be set to zero by the caller */
358
};
359
360
/*
361
 * XEN_DMOP_remote_shutdown : Declare a shutdown for another domain
362
 *                            Identical to SCHEDOP_remote_shutdown
363
 */
364
0
#define XEN_DMOP_remote_shutdown 16
365
366
struct xen_dm_op_remote_shutdown {
367
    uint32_t reason;       /* SHUTDOWN_* => enum sched_shutdown_reason */
368
                           /* (Other reason values are not blocked) */
369
};
370
371
struct xen_dm_op {
372
    uint32_t op;
373
    uint32_t pad;
374
    union {
375
        struct xen_dm_op_create_ioreq_server create_ioreq_server;
376
        struct xen_dm_op_get_ioreq_server_info get_ioreq_server_info;
377
        struct xen_dm_op_ioreq_server_range map_io_range_to_ioreq_server;
378
        struct xen_dm_op_ioreq_server_range unmap_io_range_from_ioreq_server;
379
        struct xen_dm_op_set_ioreq_server_state set_ioreq_server_state;
380
        struct xen_dm_op_destroy_ioreq_server destroy_ioreq_server;
381
        struct xen_dm_op_track_dirty_vram track_dirty_vram;
382
        struct xen_dm_op_set_pci_intx_level set_pci_intx_level;
383
        struct xen_dm_op_set_isa_irq_level set_isa_irq_level;
384
        struct xen_dm_op_set_pci_link_route set_pci_link_route;
385
        struct xen_dm_op_modified_memory modified_memory;
386
        struct xen_dm_op_set_mem_type set_mem_type;
387
        struct xen_dm_op_inject_event inject_event;
388
        struct xen_dm_op_inject_msi inject_msi;
389
        struct xen_dm_op_map_mem_type_to_ioreq_server
390
                map_mem_type_to_ioreq_server;
391
        struct xen_dm_op_remote_shutdown remote_shutdown;
392
    } u;
393
};
394
395
#endif /* __XEN__ || __XEN_TOOLS__ */
396
397
struct xen_dm_op_buf {
398
    XEN_GUEST_HANDLE(void) h;
399
    xen_ulong_t size;
400
};
401
typedef struct xen_dm_op_buf xen_dm_op_buf_t;
402
DEFINE_XEN_GUEST_HANDLE(xen_dm_op_buf_t);
403
404
/* ` enum neg_errnoval
405
 * ` HYPERVISOR_dm_op(domid_t domid,
406
 * `                  unsigned int nr_bufs,
407
 * `                  xen_dm_op_buf_t bufs[])
408
 * `
409
 *
410
 * @domid is the domain the hypercall operates on.
411
 * @nr_bufs is the number of buffers in the @bufs array.
412
 * @bufs points to an array of buffers where @bufs[0] contains a struct
413
 * xen_dm_op, describing the specific device model operation and its
414
 * parameters.
415
 * @bufs[1..] may be referenced in the parameters for the purposes of
416
 * passing extra information to or from the domain.
417
 */
418
419
#endif /* __XEN_PUBLIC_HVM_DM_OP_H__ */
420
421
/*
422
 * Local variables:
423
 * mode: C
424
 * c-file-style: "BSD"
425
 * c-basic-offset: 4
426
 * tab-width: 4
427
 * indent-tabs-mode: nil
428
 * End:
429
 */