debuggers.hg

view tools/ioemu/hw/xen_machine_fv.c @ 17010:90844659c458

Revert 16947:32b898768217027. Breaks HVM qcow-backed discs.
Sigend-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Feb 04 13:42:53 2008 +0000 (2008-02-04)
parents 32b898768217
children a905c582a406
line source
1 /*
2 * QEMU Xen FV Machine
3 *
4 * Copyright (c) 2003-2007 Fabrice Bellard
5 * Copyright (c) 2007 Red Hat
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
26 #include "vl.h"
27 #include <xen/hvm/params.h>
28 #include <sys/mman.h>
30 #if defined(MAPCACHE)
32 #if defined(__i386__)
33 #define MAX_MCACHE_SIZE 0x40000000 /* 1GB max for x86 */
34 #define MCACHE_BUCKET_SHIFT 16
35 #elif defined(__x86_64__)
36 #define MAX_MCACHE_SIZE 0x1000000000 /* 64GB max for x86_64 */
37 #define MCACHE_BUCKET_SHIFT 20
38 #endif
40 #define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
42 #define BITS_PER_LONG (sizeof(long)*8)
43 #define BITS_TO_LONGS(bits) \
44 (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
45 #define DECLARE_BITMAP(name,bits) \
46 unsigned long name[BITS_TO_LONGS(bits)]
47 #define test_bit(bit,map) \
48 (!!((map)[(bit)/BITS_PER_LONG] & (1UL << ((bit)%BITS_PER_LONG))))
50 struct map_cache {
51 unsigned long paddr_index;
52 uint8_t *vaddr_base;
53 DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE>>XC_PAGE_SHIFT);
54 };
56 static struct map_cache *mapcache_entry;
57 static unsigned long nr_buckets;
59 /* For most cases (>99.9%), the page address is the same. */
60 static unsigned long last_address_index = ~0UL;
61 static uint8_t *last_address_vaddr;
63 static int qemu_map_cache_init(void)
64 {
65 unsigned long size;
67 nr_buckets = (((MAX_MCACHE_SIZE >> XC_PAGE_SHIFT) +
68 (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >>
69 (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT));
71 /*
72 * Use mmap() directly: lets us allocate a big hash table with no up-front
73 * cost in storage space. The OS will allocate memory only for the buckets
74 * that we actually use. All others will contain all zeroes.
75 */
76 size = nr_buckets * sizeof(struct map_cache);
77 size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
78 fprintf(logfile, "qemu_map_cache_init nr_buckets = %lx size %lu\n", nr_buckets, size);
79 mapcache_entry = mmap(NULL, size, PROT_READ|PROT_WRITE,
80 MAP_SHARED|MAP_ANON, -1, 0);
81 if (mapcache_entry == MAP_FAILED) {
82 errno = ENOMEM;
83 return -1;
84 }
86 return 0;
87 }
89 static void qemu_remap_bucket(struct map_cache *entry,
90 unsigned long address_index)
91 {
92 uint8_t *vaddr_base;
93 unsigned long pfns[MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT];
94 unsigned int i, j;
96 if (entry->vaddr_base != NULL) {
97 errno = munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE);
98 if (errno) {
99 fprintf(logfile, "unmap fails %d\n", errno);
100 exit(-1);
101 }
102 }
104 for (i = 0; i < MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT; i++)
105 pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i;
107 vaddr_base = xc_map_foreign_batch(xc_handle, domid, PROT_READ|PROT_WRITE,
108 pfns, MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT);
109 if (vaddr_base == NULL) {
110 fprintf(logfile, "xc_map_foreign_batch error %d\n", errno);
111 exit(-1);
112 }
114 entry->vaddr_base = vaddr_base;
115 entry->paddr_index = address_index;
117 for (i = 0; i < MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT; i += BITS_PER_LONG) {
118 unsigned long word = 0;
119 j = ((i + BITS_PER_LONG) > (MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT)) ?
120 (MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT) % BITS_PER_LONG : BITS_PER_LONG;
121 while (j > 0)
122 word = (word << 1) | (((pfns[i + --j] >> 28) & 0xf) != 0xf);
123 entry->valid_mapping[i / BITS_PER_LONG] = word;
124 }
125 }
127 uint8_t *qemu_map_cache(target_phys_addr_t phys_addr)
128 {
129 struct map_cache *entry;
130 unsigned long address_index = phys_addr >> MCACHE_BUCKET_SHIFT;
131 unsigned long address_offset = phys_addr & (MCACHE_BUCKET_SIZE-1);
133 if (address_index == last_address_index)
134 return last_address_vaddr + address_offset;
136 entry = &mapcache_entry[address_index % nr_buckets];
138 if (entry->vaddr_base == NULL || entry->paddr_index != address_index ||
139 !test_bit(address_offset>>XC_PAGE_SHIFT, entry->valid_mapping))
140 qemu_remap_bucket(entry, address_index);
142 if (!test_bit(address_offset>>XC_PAGE_SHIFT, entry->valid_mapping))
143 return NULL;
145 last_address_index = address_index;
146 last_address_vaddr = entry->vaddr_base;
148 return last_address_vaddr + address_offset;
149 }
151 void qemu_invalidate_map_cache(void)
152 {
153 unsigned long i;
155 mapcache_lock();
157 for (i = 0; i < nr_buckets; i++) {
158 struct map_cache *entry = &mapcache_entry[i];
160 if (entry->vaddr_base == NULL)
161 continue;
163 errno = munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE);
164 if (errno) {
165 fprintf(logfile, "unmap fails %d\n", errno);
166 exit(-1);
167 }
169 entry->paddr_index = 0;
170 entry->vaddr_base = NULL;
171 }
173 last_address_index = ~0UL;
174 last_address_vaddr = NULL;
176 mapcache_unlock();
177 }
179 #endif /* defined(MAPCACHE) */
182 static void xen_init_fv(uint64_t ram_size, int vga_ram_size, char *boot_device,
183 DisplayState *ds, const char **fd_filename,
184 int snapshot,
185 const char *kernel_filename,
186 const char *kernel_cmdline,
187 const char *initrd_filename,
188 const char *direct_pci)
189 {
190 unsigned long ioreq_pfn;
191 extern void *shared_page;
192 extern void *buffered_io_page;
193 #ifdef __ia64__
194 unsigned long nr_pages;
195 xen_pfn_t *page_array;
196 extern void *buffered_pio_page;
197 int i;
198 #endif
200 #if defined(__i386__) || defined(__x86_64__)
202 if (qemu_map_cache_init()) {
203 fprintf(logfile, "qemu_map_cache_init returned: error %d\n", errno);
204 exit(-1);
205 }
206 #endif
208 xc_get_hvm_param(xc_handle, domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn);
209 fprintf(logfile, "shared page at pfn %lx\n", ioreq_pfn);
210 shared_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
211 PROT_READ|PROT_WRITE, ioreq_pfn);
212 if (shared_page == NULL) {
213 fprintf(logfile, "map shared IO page returned error %d\n", errno);
214 exit(-1);
215 }
217 xc_get_hvm_param(xc_handle, domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn);
218 fprintf(logfile, "buffered io page at pfn %lx\n", ioreq_pfn);
219 buffered_io_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
220 PROT_READ|PROT_WRITE, ioreq_pfn);
221 if (buffered_io_page == NULL) {
222 fprintf(logfile, "map buffered IO page returned error %d\n", errno);
223 exit(-1);
224 }
226 #if defined(__ia64__)
227 xc_get_hvm_param(xc_handle, domid, HVM_PARAM_BUFPIOREQ_PFN, &ioreq_pfn);
228 fprintf(logfile, "buffered pio page at pfn %lx\n", ioreq_pfn);
229 buffered_pio_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
230 PROT_READ|PROT_WRITE, ioreq_pfn);
231 if (buffered_pio_page == NULL) {
232 fprintf(logfile, "map buffered PIO page returned error %d\n", errno);
233 exit(-1);
234 }
236 nr_pages = ram_size / XC_PAGE_SIZE;
238 page_array = (xen_pfn_t *)malloc(nr_pages * sizeof(xen_pfn_t));
239 if (page_array == NULL) {
240 fprintf(logfile, "malloc returned error %d\n", errno);
241 exit(-1);
242 }
244 for (i = 0; i < nr_pages; i++)
245 page_array[i] = i;
247 /* VTI will not use memory between 3G~4G, so we just pass a legal pfn
248 to make QEMU map continuous virtual memory space */
249 if (ram_size > MMIO_START) {
250 for (i = 0 ; i < (MEM_G >> XC_PAGE_SHIFT); i++)
251 page_array[(MMIO_START >> XC_PAGE_SHIFT) + i] =
252 (STORE_PAGE_START >> XC_PAGE_SHIFT);
253 }
255 phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
256 PROT_READ|PROT_WRITE,
257 page_array, nr_pages);
258 if (phys_ram_base == 0) {
259 fprintf(logfile, "xc_map_foreign_batch returned error %d\n", errno);
260 exit(-1);
261 }
262 free(page_array);
263 #endif
265 timeoffset_get();
268 pc_machine.init(ram_size, vga_ram_size, boot_device, ds, fd_filename,
269 snapshot, kernel_filename, kernel_cmdline, initrd_filename,
270 direct_pci);
271 }
273 QEMUMachine xenfv_machine = {
274 "xenfv",
275 "Xen Fully-virtualized PC",
276 xen_init_fv,
277 };
279 /*
280 * Local variables:
281 * indent-tabs-mode: nil
282 * c-indent-level: 4
283 * c-basic-offset: 4
284 * tab-width: 4
285 * End:
286 */