debuggers.hg

view tools/libxc/xc_core_ia64.c @ 19826:2f9e1348aa98

x86_64: allow more vCPU-s per guest

Since the shared info layout is fixed, guests are required to use
VCPUOP_register_vcpu_info prior to booting any vCPU beyond the
traditional limit of 32.

MAX_VIRT_CPUS, being an implemetation detail of the hypervisor, is no
longer being exposed in the public headers.

The tools changes are clearly incomplete (and done only so things
would
build again), and the current state of the tools (using scalar
variables all over the place to represent vCPU bitmaps) very likely
doesn't permit booting DomU-s with more than the traditional number of
vCPU-s. Testing of the extended functionality was done with Dom0 (96
vCPU-s, as well as 128 vCPU-s out of which the kernel elected - by way
of a simple kernel side patch - to use only some, resulting in a
sparse
bitmap).

ia64 changes only to make things build, and build-tested only (and the
tools part only as far as the build would go without encountering
unrelated problems in the blktap code).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 18 10:14:16 2009 +0100 (2009-06-18)
parents 0ab57e6e440a
children 3ffdb094c2c0 779c0ef9682c
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
17 * VA Linux Systems Japan K.K.
18 *
19 */
21 #include "xg_private.h"
22 #include "xc_core.h"
23 #include "xc_efi.h"
24 #include "xc_dom.h"
25 #include <inttypes.h>
27 int
28 xc_core_arch_gpfn_may_present(struct xc_core_arch_context *arch_ctxt,
29 unsigned long pfn)
30 {
31 if (arch_ctxt->p2m_table.p2m == NULL)
32 return 1; /* default to trying to map the page */
34 return xc_ia64_p2m_present(&arch_ctxt->p2m_table, pfn);
35 }
37 static int
38 xc_memory_map_cmp(const void *lhs__, const void *rhs__)
39 {
40 const struct xc_core_memory_map *lhs =
41 (const struct xc_core_memory_map *)lhs__;
42 const struct xc_core_memory_map *rhs =
43 (const struct xc_core_memory_map *)rhs__;
45 if (lhs->addr < rhs->addr)
46 return -1;
47 if (lhs->addr > rhs->addr)
48 return 1;
50 /* memory map overlap isn't allowed. complain */
51 DPRINTF("duplicated addresses are detected "
52 "(0x%" PRIx64 ", 0x%" PRIx64 "), "
53 "(0x%" PRIx64 ", 0x%" PRIx64 ")\n",
54 lhs->addr, lhs->size, rhs->addr, rhs->size);
55 return 0;
56 }
58 int
59 xc_core_arch_auto_translated_physmap(const xc_dominfo_t *info)
60 {
61 /*
62 * on ia64, both paravirtualize domain and hvm domain are
63 * auto_translated_physmap mode
64 */
65 return 1;
66 }
68 /* see setup_guest() @ xc_linux_build.c */
69 static int
70 memory_map_get_old_domu(int xc_handle, xc_dominfo_t *info,
71 shared_info_any_t *live_shinfo,
72 xc_core_memory_map_t **mapp, unsigned int *nr_entries)
73 {
74 xc_core_memory_map_t *map = NULL;
76 map = malloc(sizeof(*map));
77 if ( map == NULL )
78 {
79 PERROR("Could not allocate memory");
80 goto out;
81 }
83 map->addr = 0;
84 map->size = info->max_memkb * 1024;
86 *mapp = map;
87 *nr_entries = 1;
88 return 0;
90 out:
91 if ( map != NULL )
92 free(map);
93 return -1;
94 }
96 /* see setup_guest() @ xc_ia64_hvm_build.c */
97 static int
98 memory_map_get_old_hvm(int xc_handle, xc_dominfo_t *info,
99 shared_info_any_t *live_shinfo,
100 xc_core_memory_map_t **mapp, unsigned int *nr_entries)
101 {
102 const xc_core_memory_map_t gfw_map[] = {
103 {IO_PAGE_START, IO_PAGE_SIZE},
104 {STORE_PAGE_START, STORE_PAGE_SIZE},
105 {BUFFER_IO_PAGE_START, BUFFER_IO_PAGE_SIZE},
106 {BUFFER_PIO_PAGE_START, BUFFER_PIO_PAGE_SIZE},
107 {GFW_START, GFW_SIZE},
108 };
109 const unsigned int nr_gfw_map = sizeof(gfw_map)/sizeof(gfw_map[0]);
110 xc_core_memory_map_t *map = NULL;
111 unsigned int i;
113 #define VGA_IO_END (VGA_IO_START + VGA_IO_SIZE)
114 /* [0, VGA_IO_START) [VGA_IO_END, 3GB), [4GB, ...) + gfw_map */
115 map = malloc((3 + nr_gfw_map) * sizeof(*map));
116 if ( map == NULL )
117 {
118 PERROR("Could not allocate memory");
119 goto out;
120 }
122 for ( i = 0; i < nr_gfw_map; i++ )
123 map[i] = gfw_map[i];
124 map[i].addr = 0;
125 map[i].size = info->max_memkb * 1024;
126 i++;
127 if ( map[i - 1].size < VGA_IO_END )
128 {
129 map[i - 1].size = VGA_IO_START;
130 }
131 else
132 {
133 map[i].addr = VGA_IO_END;
134 map[i].size = map[i - 1].size - VGA_IO_END;
135 map[i - 1].size = VGA_IO_START;
136 i++;
137 if ( map[i - 1].addr + map[i - 1].size > MMIO_START )
138 {
139 map[i].addr = MMIO_START + 1 * MEM_G;
140 map[i].size = map[i - 1].addr + map[i - 1].size - MMIO_START;
141 map[i - 1].size = MMIO_START - map[i - 1].addr;
142 i++;
143 }
144 }
145 *mapp = map;
146 *nr_entries = i;
147 qsort(map, *nr_entries, sizeof(map[0]), &xc_memory_map_cmp);
148 return 0;
150 out:
151 if ( map != NULL )
152 free(map);
153 return -1;
154 }
156 static int
157 memory_map_get_old(int xc_handle, xc_dominfo_t *info,
158 shared_info_any_t *live_shinfo,
159 xc_core_memory_map_t **mapp, unsigned int *nr_entries)
160 {
161 if ( info->hvm )
162 return memory_map_get_old_hvm(xc_handle, info, live_shinfo,
163 mapp, nr_entries);
164 if ( live_shinfo == NULL )
165 return -1;
166 return memory_map_get_old_domu(xc_handle, info, live_shinfo,
167 mapp, nr_entries);
168 }
170 int
171 xc_core_arch_memory_map_get(int xc_handle,
172 struct xc_core_arch_context *arch_ctxt,
173 xc_dominfo_t *info,
174 shared_info_any_t *live_shinfo,
175 xc_core_memory_map_t **mapp,
176 unsigned int *nr_entries)
177 {
178 int ret = -1;
180 xen_ia64_memmap_info_t *memmap_info = NULL;
181 xc_core_memory_map_t *map;
182 char *start;
183 char *end;
184 char *p;
185 efi_memory_desc_t *md;
187 if ( live_shinfo == NULL )
188 {
189 ERROR("can't access shared info");
190 goto old;
191 }
193 /* copy before use in case someone updating them */
194 if (xc_ia64_copy_memmap(xc_handle, info->domid, &live_shinfo->s,
195 &memmap_info, NULL)) {
196 goto old;
197 }
199 *nr_entries = memmap_info->efi_memmap_size / memmap_info->efi_memdesc_size;
200 map = malloc(*nr_entries * sizeof(*md));
201 if ( map == NULL )
202 {
203 PERROR("Could not allocate memory for memmap.");
204 free(memmap_info);
205 return -1;
206 }
207 *mapp = map;
209 *nr_entries = 0;
210 start = (char*)&memmap_info->memdesc;
211 end = start + memmap_info->efi_memmap_size;
212 for ( p = start; p < end; p += memmap_info->efi_memdesc_size )
213 {
214 md = (efi_memory_desc_t*)p;
215 if ( md->type != EFI_CONVENTIONAL_MEMORY ||
216 md->attribute != EFI_MEMORY_WB ||
217 md->num_pages == 0 )
218 continue;
220 map[*nr_entries].addr = md->phys_addr;
221 map[*nr_entries].size = md->num_pages << EFI_PAGE_SHIFT;
222 (*nr_entries)++;
223 }
224 ret = 0;
226 xc_ia64_p2m_map(&arch_ctxt->p2m_table, xc_handle, info->domid,
227 memmap_info, 0);
228 if ( memmap_info != NULL )
229 free(memmap_info);
230 qsort(map, *nr_entries, sizeof(map[0]), &xc_memory_map_cmp);
231 return ret;
233 old:
234 DPRINTF("Falling back old method.\n");
235 return memory_map_get_old(xc_handle, info, live_shinfo, mapp, nr_entries);
236 }
238 int
239 xc_core_arch_map_p2m(int xc_handle, unsigned int guest_width, xc_dominfo_t *info,
240 shared_info_any_t *live_shinfo, xen_pfn_t **live_p2m,
241 unsigned long *pfnp)
242 {
243 /*
244 * on ia64, both paravirtualize domain and hvm domain are
245 * auto_translated_physmap mode
246 */
247 errno = ENOSYS;
248 return -1;
249 }
251 void
252 xc_core_arch_context_init(struct xc_core_arch_context* arch_ctxt)
253 {
254 arch_ctxt->mapped_regs_size =
255 (XMAPPEDREGS_SIZE < PAGE_SIZE) ? PAGE_SIZE: XMAPPEDREGS_SIZE;
256 arch_ctxt->nr_vcpus = 0;
257 arch_ctxt->mapped_regs = NULL;
259 xc_ia64_p2m_init(&arch_ctxt->p2m_table);
260 }
262 void
263 xc_core_arch_context_free(struct xc_core_arch_context* arch_ctxt)
264 {
265 int i;
266 for ( i = 0; i < arch_ctxt->nr_vcpus; i++ )
267 if ( arch_ctxt->mapped_regs[i] != NULL )
268 munmap(arch_ctxt->mapped_regs[i], arch_ctxt->mapped_regs_size);
269 free(arch_ctxt->mapped_regs);
270 xc_ia64_p2m_unmap(&arch_ctxt->p2m_table);
271 }
273 int
274 xc_core_arch_context_get(struct xc_core_arch_context* arch_ctxt,
275 vcpu_guest_context_any_t* ctxt_any,
276 int xc_handle, uint32_t domid)
277 {
278 vcpu_guest_context_t *ctxt = &ctxt_any->c;
279 mapped_regs_t* mapped_regs;
281 if ( ctxt->privregs_pfn == VGC_PRIVREGS_HVM )
282 return 0; /* VTi domain case */
284 if ( ctxt->privregs_pfn == INVALID_P2M_ENTRY )
285 {
286 PERROR("Could not get mmapped privregs gmfn");
287 errno = ENOENT;
288 return -1;
289 }
290 if ( !(arch_ctxt->nr_vcpus & (arch_ctxt->nr_vcpus - 1)) ) {
291 unsigned int nr = arch_ctxt->nr_vcpus ? arch_ctxt->nr_vcpus << 1 : 1;
292 mapped_regs_t** new = realloc(arch_ctxt->mapped_regs,
293 nr * sizeof(*new));
295 if ( !new )
296 {
297 PERROR("Could not alloc mapped regs pointer array");
298 return -1;
299 }
300 memset(new + arch_ctxt->nr_vcpus, 0,
301 (nr - arch_ctxt->nr_vcpus) * sizeof(*new));
302 arch_ctxt->mapped_regs = new;
303 }
305 mapped_regs = xc_map_foreign_range(xc_handle, domid,
306 arch_ctxt->mapped_regs_size,
307 PROT_READ, ctxt->privregs_pfn);
308 if ( mapped_regs == NULL )
309 {
310 PERROR("Could not map mapped privregs");
311 return -1;
312 }
313 arch_ctxt->mapped_regs[arch_ctxt->nr_vcpus] = mapped_regs;
314 arch_ctxt->nr_vcpus++;
315 return 0;
316 }
318 int
319 xc_core_arch_context_get_shdr(struct xc_core_arch_context *arch_ctxt,
320 struct xc_core_section_headers *sheaders,
321 struct xc_core_strtab *strtab,
322 uint64_t *filesz, uint64_t offset)
323 {
324 int sts = -1;
325 Elf64_Shdr *shdr;
327 if ( arch_ctxt->nr_vcpus == 0 )
328 {
329 /* VTi domain case */
330 *filesz = 0;
331 return 0;
332 }
334 /* mmapped priv regs */
335 shdr = xc_core_shdr_get(sheaders);
336 if ( shdr == NULL )
337 {
338 PERROR("Could not get section header for .xen_ia64_mapped_regs");
339 return sts;
340 }
341 *filesz = arch_ctxt->mapped_regs_size * arch_ctxt->nr_vcpus;
342 sts = xc_core_shdr_set(shdr, strtab, XEN_DUMPCORE_SEC_IA64_MAPPED_REGS,
343 SHT_PROGBITS, offset, *filesz,
344 __alignof__(*arch_ctxt->mapped_regs[0]),
345 arch_ctxt->mapped_regs_size);
346 return sts;
347 }
349 int
350 xc_core_arch_context_dump(struct xc_core_arch_context* arch_ctxt,
351 void* args, dumpcore_rtn_t dump_rtn)
352 {
353 int sts = 0;
354 int i;
356 /* ia64 mapped_regs: .xen_ia64_mapped_regs */
357 for ( i = 0; i < arch_ctxt->nr_vcpus; i++ )
358 {
359 sts = dump_rtn(args, (char*)arch_ctxt->mapped_regs[i],
360 arch_ctxt->mapped_regs_size);
361 if ( sts != 0 )
362 break;
363 }
364 return sts;
365 }
367 /*
368 * Local variables:
369 * mode: C
370 * c-set-style: "BSD"
371 * c-basic-offset: 4
372 * tab-width: 4
373 * indent-tabs-mode: nil
374 * End:
375 */