debuggers.hg

view tools/libxc/xc_core_ia64.c @ 16715:c5deb251b9dc

Update version to 3.2.0-rc4
author Keir Fraser <keir.fraser@citrix.com>
date Sat Dec 29 17:57:37 2007 +0000 (2007-12-29)
parents ecbda3783c85
children 26ecd1f9e128
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
17 * VA Linux Systems Japan K.K.
18 *
19 */
21 #include "xg_private.h"
22 #include "xc_core.h"
23 #include "xc_efi.h"
24 #include "xc_dom.h"
25 #include <inttypes.h>
27 int
28 xc_core_arch_gpfn_may_present(struct xc_core_arch_context *arch_ctxt,
29 unsigned long pfn)
30 {
31 if (arch_ctxt->p2m_table.p2m == NULL)
32 return 1; /* default to trying to map the page */
34 return xc_ia64_p2m_present(&arch_ctxt->p2m_table, pfn);
35 }
37 static int
38 xc_memory_map_cmp(const void *lhs__, const void *rhs__)
39 {
40 const struct xc_core_memory_map *lhs =
41 (const struct xc_core_memory_map *)lhs__;
42 const struct xc_core_memory_map *rhs =
43 (const struct xc_core_memory_map *)rhs__;
45 if (lhs->addr < rhs->addr)
46 return -1;
47 if (lhs->addr > rhs->addr)
48 return 1;
50 /* memory map overlap isn't allowed. complain */
51 DPRINTF("duplicated addresses are detected "
52 "(0x%" PRIx64 ", 0x%" PRIx64 "), "
53 "(0x%" PRIx64 ", 0x%" PRIx64 ")\n",
54 lhs->addr, lhs->size, rhs->addr, rhs->size);
55 return 0;
56 }
58 int
59 xc_core_arch_auto_translated_physmap(const xc_dominfo_t *info)
60 {
61 /*
62 * on ia64, both paravirtualize domain and hvm domain are
63 * auto_translated_physmap mode
64 */
65 return 1;
66 }
68 /* see setup_guest() @ xc_linux_build.c */
69 static int
70 memory_map_get_old_domu(int xc_handle, xc_dominfo_t *info,
71 shared_info_t *live_shinfo,
72 xc_core_memory_map_t **mapp, unsigned int *nr_entries)
73 {
74 xc_core_memory_map_t *map = NULL;
76 map = malloc(sizeof(*map));
77 if ( map == NULL )
78 {
79 PERROR("Could not allocate memory");
80 goto out;
81 }
83 map->addr = 0;
84 map->size = info->max_memkb * 1024;
86 *mapp = map;
87 *nr_entries = 1;
88 return 0;
90 out:
91 if ( map != NULL )
92 free(map);
93 return -1;
94 }
96 /* see setup_guest() @ xc_ia64_hvm_build.c */
97 static int
98 memory_map_get_old_hvm(int xc_handle, xc_dominfo_t *info,
99 shared_info_t *live_shinfo,
100 xc_core_memory_map_t **mapp, unsigned int *nr_entries)
101 {
102 const xc_core_memory_map_t gfw_map[] = {
103 {IO_PAGE_START, IO_PAGE_SIZE},
104 {STORE_PAGE_START, STORE_PAGE_SIZE},
105 {BUFFER_IO_PAGE_START, BUFFER_IO_PAGE_SIZE},
106 {BUFFER_PIO_PAGE_START, BUFFER_PIO_PAGE_SIZE},
107 {GFW_START, GFW_SIZE},
108 };
109 const unsigned int nr_gfw_map = sizeof(gfw_map)/sizeof(gfw_map[0]);
110 xc_core_memory_map_t *map = NULL;
111 unsigned int i;
113 #define VGA_IO_END (VGA_IO_START + VGA_IO_SIZE)
114 /* [0, VGA_IO_START) [VGA_IO_END, 3GB), [4GB, ...) + gfw_map */
115 map = malloc((3 + nr_gfw_map) * sizeof(*map));
116 if ( map == NULL )
117 {
118 PERROR("Could not allocate memory");
119 goto out;
120 }
122 for ( i = 0; i < nr_gfw_map; i++ )
123 map[i] = gfw_map[i];
124 map[i].addr = 0;
125 map[i].size = info->max_memkb * 1024;
126 i++;
127 if ( map[i - 1].size < VGA_IO_END )
128 {
129 map[i - 1].size = VGA_IO_START;
130 }
131 else
132 {
133 map[i].addr = VGA_IO_END;
134 map[i].size = map[i - 1].size - VGA_IO_END;
135 map[i - 1].size = VGA_IO_START;
136 i++;
137 if ( map[i - 1].addr + map[i - 1].size > MMIO_START )
138 {
139 map[i].addr = MMIO_START + 1 * MEM_G;
140 map[i].size = map[i - 1].addr + map[i - 1].size - MMIO_START;
141 map[i - 1].size = MMIO_START - map[i - 1].addr;
142 i++;
143 }
144 }
145 *mapp = map;
146 *nr_entries = i;
147 qsort(map, *nr_entries, sizeof(map[0]), &xc_memory_map_cmp);
148 return 0;
150 out:
151 if ( map != NULL )
152 free(map);
153 return -1;
154 }
156 static int
157 memory_map_get_old(int xc_handle, xc_dominfo_t *info,
158 shared_info_t *live_shinfo,
159 xc_core_memory_map_t **mapp, unsigned int *nr_entries)
160 {
161 if ( info->hvm )
162 return memory_map_get_old_hvm(xc_handle, info, live_shinfo,
163 mapp, nr_entries);
164 if ( live_shinfo == NULL )
165 return -1;
166 return memory_map_get_old_domu(xc_handle, info, live_shinfo,
167 mapp, nr_entries);
168 }
170 int
171 xc_core_arch_memory_map_get(int xc_handle,
172 struct xc_core_arch_context *arch_ctxt,
173 xc_dominfo_t *info, shared_info_t *live_shinfo,
174 xc_core_memory_map_t **mapp,
175 unsigned int *nr_entries)
176 {
177 int ret = -1;
178 unsigned int memmap_info_num_pages;
179 unsigned long memmap_info_pfn;
181 xen_ia64_memmap_info_t *memmap_info_live;
182 xen_ia64_memmap_info_t *memmap_info = NULL;
183 unsigned long map_size;
184 xc_core_memory_map_t *map;
185 char *start;
186 char *end;
187 char *p;
188 efi_memory_desc_t *md;
190 if ( live_shinfo == NULL )
191 {
192 ERROR("can't access shared info");
193 goto old;
194 }
196 /* copy before use in case someone updating them */
197 memmap_info_num_pages = live_shinfo->arch.memmap_info_num_pages;
198 memmap_info_pfn = live_shinfo->arch.memmap_info_pfn;
199 if ( memmap_info_num_pages == 0 || memmap_info_pfn == 0 )
200 {
201 ERROR("memmap_info_num_pages 0x%x memmap_info_pfn 0x%lx",
202 memmap_info_num_pages, memmap_info_pfn);
203 goto old;
204 }
206 map_size = PAGE_SIZE * memmap_info_num_pages;
207 memmap_info_live = xc_map_foreign_range(xc_handle, info->domid,
208 map_size, PROT_READ, memmap_info_pfn);
209 if ( memmap_info_live == NULL )
210 {
211 PERROR("Could not map memmap info.");
212 return -1;
213 }
214 memmap_info = malloc(map_size);
215 if ( memmap_info == NULL )
216 {
217 munmap(memmap_info_live, map_size);
218 return -1;
219 }
220 memcpy(memmap_info, memmap_info_live, map_size); /* copy before use */
221 munmap(memmap_info_live, map_size);
223 if ( memmap_info->efi_memdesc_size != sizeof(*md) ||
224 (memmap_info->efi_memmap_size / memmap_info->efi_memdesc_size) == 0 ||
225 memmap_info->efi_memmap_size > map_size - sizeof(memmap_info) ||
226 memmap_info->efi_memdesc_version != EFI_MEMORY_DESCRIPTOR_VERSION )
227 {
228 PERROR("unknown memmap header. defaulting to compat mode.");
229 free(memmap_info);
230 goto old;
231 }
233 *nr_entries = memmap_info->efi_memmap_size / memmap_info->efi_memdesc_size;
234 map = malloc(*nr_entries * sizeof(*md));
235 if ( map == NULL )
236 {
237 PERROR("Could not allocate memory for memmap.");
238 free(memmap_info);
239 return -1;
240 }
241 *mapp = map;
243 *nr_entries = 0;
244 start = (char*)&memmap_info->memdesc;
245 end = start + memmap_info->efi_memmap_size;
246 for ( p = start; p < end; p += memmap_info->efi_memdesc_size )
247 {
248 md = (efi_memory_desc_t*)p;
249 if ( md->type != EFI_CONVENTIONAL_MEMORY ||
250 md->attribute != EFI_MEMORY_WB ||
251 md->num_pages == 0 )
252 continue;
254 map[*nr_entries].addr = md->phys_addr;
255 map[*nr_entries].size = md->num_pages << EFI_PAGE_SHIFT;
256 (*nr_entries)++;
257 }
258 ret = 0;
260 xc_ia64_p2m_map(&arch_ctxt->p2m_table, xc_handle, info->domid,
261 memmap_info, 0);
262 if ( memmap_info != NULL )
263 free(memmap_info);
264 qsort(map, *nr_entries, sizeof(map[0]), &xc_memory_map_cmp);
265 return ret;
267 old:
268 DPRINTF("Falling back old method.\n");
269 return memory_map_get_old(xc_handle, info, live_shinfo, mapp, nr_entries);
270 }
272 int
273 xc_core_arch_map_p2m(int xc_handle, xc_dominfo_t *info,
274 shared_info_t *live_shinfo, xen_pfn_t **live_p2m,
275 unsigned long *pfnp)
276 {
277 /*
278 * on ia64, both paravirtualize domain and hvm domain are
279 * auto_translated_physmap mode
280 */
281 errno = ENOSYS;
282 return -1;
283 }
285 void
286 xc_core_arch_context_init(struct xc_core_arch_context* arch_ctxt)
287 {
288 int i;
290 arch_ctxt->mapped_regs_size =
291 (XMAPPEDREGS_SIZE < PAGE_SIZE) ? PAGE_SIZE: XMAPPEDREGS_SIZE;
292 arch_ctxt->nr_vcpus = 0;
293 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
294 arch_ctxt->mapped_regs[i] = NULL;
296 xc_ia64_p2m_init(&arch_ctxt->p2m_table);
297 }
299 void
300 xc_core_arch_context_free(struct xc_core_arch_context* arch_ctxt)
301 {
302 int i;
303 for ( i = 0; i < arch_ctxt->nr_vcpus; i++ )
304 if ( arch_ctxt->mapped_regs[i] != NULL )
305 munmap(arch_ctxt->mapped_regs[i], arch_ctxt->mapped_regs_size);
306 xc_ia64_p2m_unmap(&arch_ctxt->p2m_table);
307 }
309 int
310 xc_core_arch_context_get(struct xc_core_arch_context* arch_ctxt,
311 vcpu_guest_context_t* ctxt,
312 int xc_handle, uint32_t domid)
313 {
314 mapped_regs_t* mapped_regs;
316 if ( ctxt->privregs_pfn == VGC_PRIVREGS_HVM )
317 return 0; /* VTi domain case */
319 if ( ctxt->privregs_pfn == INVALID_P2M_ENTRY )
320 {
321 PERROR("Could not get mmapped privregs gmfn");
322 errno = ENOENT;
323 return -1;
324 }
325 mapped_regs = xc_map_foreign_range(xc_handle, domid,
326 arch_ctxt->mapped_regs_size,
327 PROT_READ, ctxt->privregs_pfn);
328 if ( mapped_regs == NULL )
329 {
330 PERROR("Could not map mapped privregs");
331 return -1;
332 }
333 arch_ctxt->mapped_regs[arch_ctxt->nr_vcpus] = mapped_regs;
334 arch_ctxt->nr_vcpus++;
335 return 0;
336 }
338 int
339 xc_core_arch_context_get_shdr(struct xc_core_arch_context *arch_ctxt,
340 struct xc_core_section_headers *sheaders,
341 struct xc_core_strtab *strtab,
342 uint64_t *filesz, uint64_t offset)
343 {
344 int sts = -1;
345 Elf64_Shdr *shdr;
347 if ( arch_ctxt->nr_vcpus == 0 )
348 {
349 /* VTi domain case */
350 *filesz = 0;
351 return 0;
352 }
354 /* mmapped priv regs */
355 shdr = xc_core_shdr_get(sheaders);
356 if ( shdr == NULL )
357 {
358 PERROR("Could not get section header for .xen_ia64_mapped_regs");
359 return sts;
360 }
361 *filesz = arch_ctxt->mapped_regs_size * arch_ctxt->nr_vcpus;
362 sts = xc_core_shdr_set(shdr, strtab, XEN_DUMPCORE_SEC_IA64_MAPPED_REGS,
363 SHT_PROGBITS, offset, *filesz,
364 __alignof__(*arch_ctxt->mapped_regs[0]),
365 arch_ctxt->mapped_regs_size);
366 return sts;
367 }
369 int
370 xc_core_arch_context_dump(struct xc_core_arch_context* arch_ctxt,
371 void* args, dumpcore_rtn_t dump_rtn)
372 {
373 int sts = 0;
374 int i;
376 /* ia64 mapped_regs: .xen_ia64_mapped_regs */
377 for ( i = 0; i < arch_ctxt->nr_vcpus; i++ )
378 {
379 sts = dump_rtn(args, (char*)arch_ctxt->mapped_regs[i],
380 arch_ctxt->mapped_regs_size);
381 if ( sts != 0 )
382 break;
383 }
384 return sts;
385 }
387 /*
388 * Local variables:
389 * mode: C
390 * c-set-style: "BSD"
391 * c-basic-offset: 4
392 * tab-width: 4
393 * indent-tabs-mode: nil
394 * End:
395 */