/root/src/xen/xen/include/public/memory.h
Line | Count | Source (jump to first uncovered line) |
1 | | /****************************************************************************** |
2 | | * memory.h |
3 | | * |
4 | | * Memory reservation and information. |
5 | | * |
6 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
7 | | * of this software and associated documentation files (the "Software"), to |
8 | | * deal in the Software without restriction, including without limitation the |
9 | | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
10 | | * sell copies of the Software, and to permit persons to whom the Software is |
11 | | * furnished to do so, subject to the following conditions: |
12 | | * |
13 | | * The above copyright notice and this permission notice shall be included in |
14 | | * all copies or substantial portions of the Software. |
15 | | * |
16 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
19 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
20 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
21 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
22 | | * DEALINGS IN THE SOFTWARE. |
23 | | * |
24 | | * Copyright (c) 2005, Keir Fraser <keir@xensource.com> |
25 | | */ |
26 | | |
27 | | #ifndef __XEN_PUBLIC_MEMORY_H__ |
28 | | #define __XEN_PUBLIC_MEMORY_H__ |
29 | | |
30 | | #include "xen.h" |
31 | | #include "physdev.h" |
32 | | |
33 | | /* |
34 | | * Increase or decrease the specified domain's memory reservation. Returns the |
35 | | * number of extents successfully allocated or freed. |
36 | | * arg == addr of struct xen_memory_reservation. |
37 | | */ |
38 | 0 | #define XENMEM_increase_reservation 0 |
39 | 4 | #define XENMEM_decrease_reservation 1 |
40 | 0 | #define XENMEM_populate_physmap 6 |
41 | | |
42 | | #if __XEN_INTERFACE_VERSION__ >= 0x00030209 |
43 | | /* |
44 | | * Maximum # bits addressable by the user of the allocated region (e.g., I/O |
45 | | * devices often have a 32-bit limitation even in 64-bit systems). If zero |
46 | | * then the user has no addressing restriction. This field is not used by |
47 | | * XENMEM_decrease_reservation. |
48 | | */ |
49 | | #define XENMEMF_address_bits(x) (x) |
50 | 0 | #define XENMEMF_get_address_bits(x) ((x) & 0xffu) |
51 | | /* NUMA node to allocate from. */ |
52 | | #define XENMEMF_node(x) (((x) + 1) << 8) |
53 | 0 | #define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu) |
54 | | /* Flag to populate physmap with populate-on-demand entries */ |
55 | 0 | #define XENMEMF_populate_on_demand (1<<16) |
56 | | /* Flag to request allocation only from the node specified */ |
57 | 0 | #define XENMEMF_exact_node_request (1<<17) |
58 | | #define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request) |
59 | | /* Flag to indicate the node specified is virtual node */ |
60 | 0 | #define XENMEMF_vnode (1<<18) |
61 | | #endif |
62 | | |
63 | | struct xen_memory_reservation { |
64 | | |
65 | | /* |
66 | | * XENMEM_increase_reservation: |
67 | | * OUT: MFN (*not* GMFN) bases of extents that were allocated |
68 | | * XENMEM_decrease_reservation: |
69 | | * IN: GMFN bases of extents to free |
70 | | * XENMEM_populate_physmap: |
71 | | * IN: GPFN bases of extents to populate with memory |
72 | | * OUT: GMFN bases of extents that were allocated |
73 | | * (NB. This command also updates the mach_to_phys translation table) |
74 | | * XENMEM_claim_pages: |
75 | | * IN: must be zero |
76 | | */ |
77 | | XEN_GUEST_HANDLE(xen_pfn_t) extent_start; |
78 | | |
79 | | /* Number of extents, and size/alignment of each (2^extent_order pages). */ |
80 | | xen_ulong_t nr_extents; |
81 | | unsigned int extent_order; |
82 | | |
83 | | #if __XEN_INTERFACE_VERSION__ >= 0x00030209 |
84 | | /* XENMEMF flags. */ |
85 | | unsigned int mem_flags; |
86 | | #else |
87 | | unsigned int address_bits; |
88 | | #endif |
89 | | |
90 | | /* |
91 | | * Domain whose reservation is being changed. |
92 | | * Unprivileged domains can specify only DOMID_SELF. |
93 | | */ |
94 | | domid_t domid; |
95 | | }; |
96 | | typedef struct xen_memory_reservation xen_memory_reservation_t; |
97 | | DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t); |
98 | | |
99 | | /* |
100 | | * An atomic exchange of memory pages. If return code is zero then |
101 | | * @out.extent_list provides GMFNs of the newly-allocated memory. |
102 | | * Returns zero on complete success, otherwise a negative error code. |
103 | | * On complete success then always @nr_exchanged == @in.nr_extents. |
104 | | * On partial success @nr_exchanged indicates how much work was done. |
105 | | * |
106 | | * Note that only PV guests can use this operation. |
107 | | */ |
108 | 0 | #define XENMEM_exchange 11 |
109 | | struct xen_memory_exchange { |
110 | | /* |
111 | | * [IN] Details of memory extents to be exchanged (GMFN bases). |
112 | | * Note that @in.address_bits is ignored and unused. |
113 | | */ |
114 | | struct xen_memory_reservation in; |
115 | | |
116 | | /* |
117 | | * [IN/OUT] Details of new memory extents. |
118 | | * We require that: |
119 | | * 1. @in.domid == @out.domid |
120 | | * 2. @in.nr_extents << @in.extent_order == |
121 | | * @out.nr_extents << @out.extent_order |
122 | | * 3. @in.extent_start and @out.extent_start lists must not overlap |
123 | | * 4. @out.extent_start lists GPFN bases to be populated |
124 | | * 5. @out.extent_start is overwritten with allocated GMFN bases |
125 | | */ |
126 | | struct xen_memory_reservation out; |
127 | | |
128 | | /* |
129 | | * [OUT] Number of input extents that were successfully exchanged: |
130 | | * 1. The first @nr_exchanged input extents were successfully |
131 | | * deallocated. |
132 | | * 2. The corresponding first entries in the output extent list correctly |
133 | | * indicate the GMFNs that were successfully exchanged. |
134 | | * 3. All other input and output extents are untouched. |
135 | | * 4. If not all input exents are exchanged then the return code of this |
136 | | * command will be non-zero. |
137 | | * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER! |
138 | | */ |
139 | | xen_ulong_t nr_exchanged; |
140 | | }; |
141 | | typedef struct xen_memory_exchange xen_memory_exchange_t; |
142 | | DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t); |
143 | | |
144 | | /* |
145 | | * Returns the maximum machine frame number of mapped RAM in this system. |
146 | | * This command always succeeds (it never returns an error code). |
147 | | * arg == NULL. |
148 | | */ |
149 | 0 | #define XENMEM_maximum_ram_page 2 |
150 | | |
151 | | /* |
152 | | * Returns the current or maximum memory reservation, in pages, of the |
153 | | * specified domain (may be DOMID_SELF). Returns -ve errcode on failure. |
154 | | * arg == addr of domid_t. |
155 | | */ |
156 | 0 | #define XENMEM_current_reservation 3 |
157 | 0 | #define XENMEM_maximum_reservation 4 |
158 | | |
159 | | /* |
160 | | * Returns the maximum GPFN in use by the guest, or -ve errcode on failure. |
161 | | */ |
162 | 0 | #define XENMEM_maximum_gpfn 14 |
163 | | |
164 | | /* |
165 | | * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys |
166 | | * mapping table. Architectures which do not have a m2p table do not implement |
167 | | * this command. |
168 | | * arg == addr of xen_machphys_mfn_list_t. |
169 | | */ |
170 | 0 | #define XENMEM_machphys_mfn_list 5 |
171 | | struct xen_machphys_mfn_list { |
172 | | /* |
173 | | * Size of the 'extent_start' array. Fewer entries will be filled if the |
174 | | * machphys table is smaller than max_extents * 2MB. |
175 | | */ |
176 | | unsigned int max_extents; |
177 | | |
178 | | /* |
179 | | * Pointer to buffer to fill with list of extent starts. If there are |
180 | | * any large discontiguities in the machine address space, 2MB gaps in |
181 | | * the machphys table will be represented by an MFN base of zero. |
182 | | */ |
183 | | XEN_GUEST_HANDLE(xen_pfn_t) extent_start; |
184 | | |
185 | | /* |
186 | | * Number of extents written to the above array. This will be smaller |
187 | | * than 'max_extents' if the machphys table is smaller than max_e * 2MB. |
188 | | */ |
189 | | unsigned int nr_extents; |
190 | | }; |
191 | | typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t; |
192 | | DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t); |
193 | | |
194 | | /* |
195 | | * For a compat caller, this is identical to XENMEM_machphys_mfn_list. |
196 | | * |
197 | | * For a non compat caller, this functions similarly to |
198 | | * XENMEM_machphys_mfn_list, but returns the mfns making up the compatibility |
199 | | * m2p table. |
200 | | */ |
201 | 0 | #define XENMEM_machphys_compat_mfn_list 25 |
202 | | |
203 | | /* |
204 | | * Returns the location in virtual address space of the machine_to_phys |
205 | | * mapping table. Architectures which do not have a m2p table, or which do not |
206 | | * map it by default into guest address space, do not implement this command. |
207 | | * arg == addr of xen_machphys_mapping_t. |
208 | | */ |
209 | 0 | #define XENMEM_machphys_mapping 12 |
210 | | struct xen_machphys_mapping { |
211 | | xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */ |
212 | | xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */ |
213 | | }; |
214 | | typedef struct xen_machphys_mapping xen_machphys_mapping_t; |
215 | | DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t); |
216 | | |
217 | | /* Source mapping space. */ |
218 | | /* ` enum phys_map_space { */ |
219 | 2 | #define XENMAPSPACE_shared_info 0 /* shared info page */ |
220 | 1 | #define XENMAPSPACE_grant_table 1 /* grant table page */ |
221 | 6 | #define XENMAPSPACE_gmfn 2 /* GMFN */ |
222 | 12 | #define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */ |
223 | 6 | #define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom, |
224 | | * XENMEM_add_to_physmap_batch only. */ |
225 | 3 | #define XENMAPSPACE_dev_mmio 5 /* device mmio region |
226 | | ARM only; the region is mapped in |
227 | | Stage-2 using the Normal Memory |
228 | | Inner/Outer Write-Back Cacheable |
229 | | memory attribute. */ |
230 | | /* ` } */ |
231 | | |
232 | | /* |
233 | | * Sets the GPFN at which a particular page appears in the specified guest's |
234 | | * pseudophysical address space. |
235 | | * arg == addr of xen_add_to_physmap_t. |
236 | | */ |
237 | 3 | #define XENMEM_add_to_physmap 7 |
238 | | struct xen_add_to_physmap { |
239 | | /* Which domain to change the mapping for. */ |
240 | | domid_t domid; |
241 | | |
242 | | /* Number of pages to go through for gmfn_range */ |
243 | | uint16_t size; |
244 | | |
245 | | unsigned int space; /* => enum phys_map_space */ |
246 | | |
247 | 0 | #define XENMAPIDX_grant_table_status 0x80000000 |
248 | | |
249 | | /* Index into space being mapped. */ |
250 | | xen_ulong_t idx; |
251 | | |
252 | | /* GPFN in domid where the source mapping page should appear. */ |
253 | | xen_pfn_t gpfn; |
254 | | }; |
255 | | typedef struct xen_add_to_physmap xen_add_to_physmap_t; |
256 | | DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t); |
257 | | |
258 | | /* A batched version of add_to_physmap. */ |
259 | 0 | #define XENMEM_add_to_physmap_batch 23 |
260 | | struct xen_add_to_physmap_batch { |
261 | | /* IN */ |
262 | | /* Which domain to change the mapping for. */ |
263 | | domid_t domid; |
264 | | uint16_t space; /* => enum phys_map_space */ |
265 | | |
266 | | /* Number of pages to go through */ |
267 | | uint16_t size; |
268 | | |
269 | | #if __XEN_INTERFACE_VERSION__ < 0x00040700 |
270 | | domid_t foreign_domid; /* IFF gmfn_foreign. Should be 0 for other spaces. */ |
271 | | #else |
272 | | union xen_add_to_physmap_batch_extra { |
273 | | domid_t foreign_domid; /* gmfn_foreign */ |
274 | | uint16_t res0; /* All the other spaces. Should be 0 */ |
275 | | } u; |
276 | | #endif |
277 | | |
278 | | /* Indexes into space being mapped. */ |
279 | | XEN_GUEST_HANDLE(xen_ulong_t) idxs; |
280 | | |
281 | | /* GPFN in domid where the source mapping page should appear. */ |
282 | | XEN_GUEST_HANDLE(xen_pfn_t) gpfns; |
283 | | |
284 | | /* OUT */ |
285 | | |
286 | | /* Per index error code. */ |
287 | | XEN_GUEST_HANDLE(int) errs; |
288 | | }; |
289 | | typedef struct xen_add_to_physmap_batch xen_add_to_physmap_batch_t; |
290 | | DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_batch_t); |
291 | | |
292 | | #if __XEN_INTERFACE_VERSION__ < 0x00040400 |
293 | | #define XENMEM_add_to_physmap_range XENMEM_add_to_physmap_batch |
294 | | #define xen_add_to_physmap_range xen_add_to_physmap_batch |
295 | | typedef struct xen_add_to_physmap_batch xen_add_to_physmap_range_t; |
296 | | DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t); |
297 | | #endif |
298 | | |
299 | | /* |
300 | | * Unmaps the page appearing at a particular GPFN from the specified guest's |
301 | | * pseudophysical address space. |
302 | | * arg == addr of xen_remove_from_physmap_t. |
303 | | */ |
304 | 0 | #define XENMEM_remove_from_physmap 15 |
305 | | struct xen_remove_from_physmap { |
306 | | /* Which domain to change the mapping for. */ |
307 | | domid_t domid; |
308 | | |
309 | | /* GPFN of the current mapping of the page. */ |
310 | | xen_pfn_t gpfn; |
311 | | }; |
312 | | typedef struct xen_remove_from_physmap xen_remove_from_physmap_t; |
313 | | DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t); |
314 | | |
315 | | /*** REMOVED ***/ |
316 | | /*#define XENMEM_translate_gpfn_list 8*/ |
317 | | |
318 | | /* |
319 | | * Returns the pseudo-physical memory map as it was when the domain |
320 | | * was started (specified by XENMEM_set_memory_map). |
321 | | * arg == addr of xen_memory_map_t. |
322 | | */ |
323 | 1 | #define XENMEM_memory_map 9 |
324 | | struct xen_memory_map { |
325 | | /* |
326 | | * On call the number of entries which can be stored in buffer. On |
327 | | * return the number of entries which have been stored in |
328 | | * buffer. |
329 | | */ |
330 | | unsigned int nr_entries; |
331 | | |
332 | | /* |
333 | | * Entries in the buffer are in the same format as returned by the |
334 | | * BIOS INT 0x15 EAX=0xE820 call. |
335 | | */ |
336 | | XEN_GUEST_HANDLE(void) buffer; |
337 | | }; |
338 | | typedef struct xen_memory_map xen_memory_map_t; |
339 | | DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t); |
340 | | |
341 | | /* |
342 | | * Returns the real physical memory map. Passes the same structure as |
343 | | * XENMEM_memory_map. |
344 | | * Specifying buffer as NULL will return the number of entries required |
345 | | * to store the complete memory map. |
346 | | * arg == addr of xen_memory_map_t. |
347 | | */ |
348 | 0 | #define XENMEM_machine_memory_map 10 |
349 | | |
350 | | /* |
351 | | * Set the pseudo-physical memory map of a domain, as returned by |
352 | | * XENMEM_memory_map. |
353 | | * arg == addr of xen_foreign_memory_map_t. |
354 | | */ |
355 | 0 | #define XENMEM_set_memory_map 13 |
356 | | struct xen_foreign_memory_map { |
357 | | domid_t domid; |
358 | | struct xen_memory_map map; |
359 | | }; |
360 | | typedef struct xen_foreign_memory_map xen_foreign_memory_map_t; |
361 | | DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t); |
362 | | |
363 | 0 | #define XENMEM_set_pod_target 16 |
364 | 0 | #define XENMEM_get_pod_target 17 |
365 | | struct xen_pod_target { |
366 | | /* IN */ |
367 | | uint64_t target_pages; |
368 | | /* OUT */ |
369 | | uint64_t tot_pages; |
370 | | uint64_t pod_cache_pages; |
371 | | uint64_t pod_entries; |
372 | | /* IN */ |
373 | | domid_t domid; |
374 | | }; |
375 | | typedef struct xen_pod_target xen_pod_target_t; |
376 | | |
377 | | #if defined(__XEN__) || defined(__XEN_TOOLS__) |
378 | | |
379 | | #ifndef uint64_aligned_t |
380 | | #define uint64_aligned_t uint64_t |
381 | | #endif |
382 | | |
383 | | /* |
384 | | * Get the number of MFNs saved through memory sharing. |
385 | | * The call never fails. |
386 | | */ |
387 | 0 | #define XENMEM_get_sharing_freed_pages 18 |
388 | 0 | #define XENMEM_get_sharing_shared_pages 19 |
389 | | |
390 | 0 | #define XENMEM_paging_op 20 |
391 | 0 | #define XENMEM_paging_op_nominate 0 |
392 | 0 | #define XENMEM_paging_op_evict 1 |
393 | 0 | #define XENMEM_paging_op_prep 2 |
394 | | |
395 | | struct xen_mem_paging_op { |
396 | | uint8_t op; /* XENMEM_paging_op_* */ |
397 | | domid_t domain; |
398 | | |
399 | | /* PAGING_PREP IN: buffer to immediately fill page in */ |
400 | | uint64_aligned_t buffer; |
401 | | /* Other OPs */ |
402 | | uint64_aligned_t gfn; /* IN: gfn of page being operated on */ |
403 | | }; |
404 | | typedef struct xen_mem_paging_op xen_mem_paging_op_t; |
405 | | DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t); |
406 | | |
407 | 0 | #define XENMEM_access_op 21 |
408 | 0 | #define XENMEM_access_op_set_access 0 |
409 | 0 | #define XENMEM_access_op_get_access 1 |
410 | | /* |
411 | | * XENMEM_access_op_enable_emulate and XENMEM_access_op_disable_emulate are |
412 | | * currently unused, but since they have been in use please do not reuse them. |
413 | | * |
414 | | * #define XENMEM_access_op_enable_emulate 2 |
415 | | * #define XENMEM_access_op_disable_emulate 3 |
416 | | */ |
417 | 0 | #define XENMEM_access_op_set_access_multi 4 |
418 | | |
419 | | typedef enum { |
420 | | XENMEM_access_n, |
421 | | XENMEM_access_r, |
422 | | XENMEM_access_w, |
423 | | XENMEM_access_rw, |
424 | | XENMEM_access_x, |
425 | | XENMEM_access_rx, |
426 | | XENMEM_access_wx, |
427 | | XENMEM_access_rwx, |
428 | | /* |
429 | | * Page starts off as r-x, but automatically |
430 | | * change to r-w on a write |
431 | | */ |
432 | | XENMEM_access_rx2rw, |
433 | | /* |
434 | | * Log access: starts off as n, automatically |
435 | | * goes to rwx, generating an event without |
436 | | * pausing the vcpu |
437 | | */ |
438 | | XENMEM_access_n2rwx, |
439 | | /* Take the domain default */ |
440 | | XENMEM_access_default |
441 | | } xenmem_access_t; |
442 | | |
443 | | struct xen_mem_access_op { |
444 | | /* XENMEM_access_op_* */ |
445 | | uint8_t op; |
446 | | /* xenmem_access_t */ |
447 | | uint8_t access; |
448 | | domid_t domid; |
449 | | /* |
450 | | * Number of pages for set op (or size of pfn_list for |
451 | | * XENMEM_access_op_set_access_multi) |
452 | | * Ignored on setting default access and other ops |
453 | | */ |
454 | | uint32_t nr; |
455 | | /* |
456 | | * First pfn for set op |
457 | | * pfn for get op |
458 | | * ~0ull is used to set and get the default access for pages |
459 | | */ |
460 | | uint64_aligned_t pfn; |
461 | | /* |
462 | | * List of pfns to set access for |
463 | | * Used only with XENMEM_access_op_set_access_multi |
464 | | */ |
465 | | XEN_GUEST_HANDLE(const_uint64) pfn_list; |
466 | | /* |
467 | | * Corresponding list of access settings for pfn_list |
468 | | * Used only with XENMEM_access_op_set_access_multi |
469 | | */ |
470 | | XEN_GUEST_HANDLE(const_uint8) access_list; |
471 | | }; |
472 | | typedef struct xen_mem_access_op xen_mem_access_op_t; |
473 | | DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t); |
474 | | |
475 | 0 | #define XENMEM_sharing_op 22 |
476 | 0 | #define XENMEM_sharing_op_nominate_gfn 0 |
477 | 0 | #define XENMEM_sharing_op_nominate_gref 1 |
478 | 0 | #define XENMEM_sharing_op_share 2 |
479 | 0 | #define XENMEM_sharing_op_debug_gfn 3 |
480 | | #define XENMEM_sharing_op_debug_mfn 4 |
481 | 0 | #define XENMEM_sharing_op_debug_gref 5 |
482 | 0 | #define XENMEM_sharing_op_add_physmap 6 |
483 | 0 | #define XENMEM_sharing_op_audit 7 |
484 | 0 | #define XENMEM_sharing_op_range_share 8 |
485 | | |
486 | 0 | #define XENMEM_SHARING_OP_S_HANDLE_INVALID (-10) |
487 | 0 | #define XENMEM_SHARING_OP_C_HANDLE_INVALID (-9) |
488 | | |
489 | | /* The following allows sharing of grant refs. This is useful |
490 | | * for sharing utilities sitting as "filters" in IO backends |
491 | | * (e.g. memshr + blktap(2)). The IO backend is only exposed |
492 | | * to grant references, and this allows sharing of the grefs */ |
493 | 0 | #define XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG (xen_mk_ullong(1) << 62) |
494 | | |
495 | | #define XENMEM_SHARING_OP_FIELD_MAKE_GREF(field, val) \ |
496 | | (field) = (XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG | val) |
497 | | #define XENMEM_SHARING_OP_FIELD_IS_GREF(field) \ |
498 | 0 | ((field) & XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG) |
499 | | #define XENMEM_SHARING_OP_FIELD_GET_GREF(field) \ |
500 | 0 | ((field) & (~XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG)) |
501 | | |
502 | | struct xen_mem_sharing_op { |
503 | | uint8_t op; /* XENMEM_sharing_op_* */ |
504 | | domid_t domain; |
505 | | |
506 | | union { |
507 | | struct mem_sharing_op_nominate { /* OP_NOMINATE_xxx */ |
508 | | union { |
509 | | uint64_aligned_t gfn; /* IN: gfn to nominate */ |
510 | | uint32_t grant_ref; /* IN: grant ref to nominate */ |
511 | | } u; |
512 | | uint64_aligned_t handle; /* OUT: the handle */ |
513 | | } nominate; |
514 | | struct mem_sharing_op_share { /* OP_SHARE/ADD_PHYSMAP */ |
515 | | uint64_aligned_t source_gfn; /* IN: the gfn of the source page */ |
516 | | uint64_aligned_t source_handle; /* IN: handle to the source page */ |
517 | | uint64_aligned_t client_gfn; /* IN: the client gfn */ |
518 | | uint64_aligned_t client_handle; /* IN: handle to the client page */ |
519 | | domid_t client_domain; /* IN: the client domain id */ |
520 | | } share; |
521 | | struct mem_sharing_op_range { /* OP_RANGE_SHARE */ |
522 | | uint64_aligned_t first_gfn; /* IN: the first gfn */ |
523 | | uint64_aligned_t last_gfn; /* IN: the last gfn */ |
524 | | uint64_aligned_t opaque; /* Must be set to 0 */ |
525 | | domid_t client_domain; /* IN: the client domain id */ |
526 | | uint16_t _pad[3]; /* Must be set to 0 */ |
527 | | } range; |
528 | | struct mem_sharing_op_debug { /* OP_DEBUG_xxx */ |
529 | | union { |
530 | | uint64_aligned_t gfn; /* IN: gfn to debug */ |
531 | | uint64_aligned_t mfn; /* IN: mfn to debug */ |
532 | | uint32_t gref; /* IN: gref to debug */ |
533 | | } u; |
534 | | } debug; |
535 | | } u; |
536 | | }; |
537 | | typedef struct xen_mem_sharing_op xen_mem_sharing_op_t; |
538 | | DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t); |
539 | | |
540 | | /* |
541 | | * Attempt to stake a claim for a domain on a quantity of pages |
542 | | * of system RAM, but _not_ assign specific pageframes. Only |
543 | | * arithmetic is performed so the hypercall is very fast and need |
544 | | * not be preemptible, thus sidestepping time-of-check-time-of-use |
545 | | * races for memory allocation. Returns 0 if the hypervisor page |
546 | | * allocator has atomically and successfully claimed the requested |
547 | | * number of pages, else non-zero. |
548 | | * |
549 | | * Any domain may have only one active claim. When sufficient memory |
550 | | * has been allocated to resolve the claim, the claim silently expires. |
551 | | * Claiming zero pages effectively resets any outstanding claim and |
552 | | * is always successful. |
553 | | * |
554 | | * Note that a valid claim may be staked even after memory has been |
555 | | * allocated for a domain. In this case, the claim is not incremental, |
556 | | * i.e. if the domain's tot_pages is 3, and a claim is staked for 10, |
557 | | * only 7 additional pages are claimed. |
558 | | * |
559 | | * Caller must be privileged or the hypercall fails. |
560 | | */ |
561 | 0 | #define XENMEM_claim_pages 24 |
562 | | |
563 | | /* |
564 | | * XENMEM_claim_pages flags - the are no flags at this time. |
565 | | * The zero value is appropriate. |
566 | | */ |
567 | | |
568 | | /* |
569 | | * With some legacy devices, certain guest-physical addresses cannot safely |
570 | | * be used for other purposes, e.g. to map guest RAM. This hypercall |
571 | | * enumerates those regions so the toolstack can avoid using them. |
572 | | */ |
573 | 0 | #define XENMEM_reserved_device_memory_map 27 |
574 | | struct xen_reserved_device_memory { |
575 | | xen_pfn_t start_pfn; |
576 | | xen_ulong_t nr_pages; |
577 | | }; |
578 | | typedef struct xen_reserved_device_memory xen_reserved_device_memory_t; |
579 | | DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_t); |
580 | | |
581 | | struct xen_reserved_device_memory_map { |
582 | 0 | #define XENMEM_RDM_ALL 1 /* Request all regions (ignore dev union). */ |
583 | | /* IN */ |
584 | | uint32_t flags; |
585 | | /* |
586 | | * IN/OUT |
587 | | * |
588 | | * Gets set to the required number of entries when too low, |
589 | | * signaled by error code -ERANGE. |
590 | | */ |
591 | | unsigned int nr_entries; |
592 | | /* OUT */ |
593 | | XEN_GUEST_HANDLE(xen_reserved_device_memory_t) buffer; |
594 | | /* IN */ |
595 | | union { |
596 | | struct physdev_pci_device pci; |
597 | | } dev; |
598 | | }; |
599 | | typedef struct xen_reserved_device_memory_map xen_reserved_device_memory_map_t; |
600 | | DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_map_t); |
601 | | |
602 | | #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ |
603 | | |
604 | | /* |
605 | | * XENMEM_get_vnumainfo used by guest to get |
606 | | * vNUMA topology from hypervisor. |
607 | | */ |
608 | 0 | #define XENMEM_get_vnumainfo 26 |
609 | | |
610 | | /* vNUMA node memory ranges */ |
611 | | struct xen_vmemrange { |
612 | | uint64_t start, end; |
613 | | unsigned int flags; |
614 | | unsigned int nid; |
615 | | }; |
616 | | typedef struct xen_vmemrange xen_vmemrange_t; |
617 | | DEFINE_XEN_GUEST_HANDLE(xen_vmemrange_t); |
618 | | |
619 | | /* |
620 | | * vNUMA topology specifies vNUMA node number, distance table, |
621 | | * memory ranges and vcpu mapping provided for guests. |
622 | | * XENMEM_get_vnumainfo hypercall expects to see from guest |
623 | | * nr_vnodes, nr_vmemranges and nr_vcpus to indicate available memory. |
624 | | * After filling guests structures, nr_vnodes, nr_vmemranges and nr_vcpus |
625 | | * copied back to guest. Domain returns expected values of nr_vnodes, |
626 | | * nr_vmemranges and nr_vcpus to guest if the values where incorrect. |
627 | | */ |
628 | | struct xen_vnuma_topology_info { |
629 | | /* IN */ |
630 | | domid_t domid; |
631 | | uint16_t pad; |
632 | | /* IN/OUT */ |
633 | | unsigned int nr_vnodes; |
634 | | unsigned int nr_vcpus; |
635 | | unsigned int nr_vmemranges; |
636 | | /* OUT */ |
637 | | union { |
638 | | XEN_GUEST_HANDLE(uint) h; |
639 | | uint64_t pad; |
640 | | } vdistance; |
641 | | union { |
642 | | XEN_GUEST_HANDLE(uint) h; |
643 | | uint64_t pad; |
644 | | } vcpu_to_vnode; |
645 | | union { |
646 | | XEN_GUEST_HANDLE(xen_vmemrange_t) h; |
647 | | uint64_t pad; |
648 | | } vmemrange; |
649 | | }; |
650 | | typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t; |
651 | | DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t); |
652 | | |
653 | | /* Next available subop number is 28 */ |
654 | | |
655 | | #endif /* __XEN_PUBLIC_MEMORY_H__ */ |
656 | | |
657 | | /* |
658 | | * Local variables: |
659 | | * mode: C |
660 | | * c-file-style: "BSD" |
661 | | * c-basic-offset: 4 |
662 | | * tab-width: 4 |
663 | | * indent-tabs-mode: nil |
664 | | * End: |
665 | | */ |