#if defined(MAPCACHE)
-#if defined(__i386__)
-#define MAX_MCACHE_SIZE 0x40000000 /* 1GB max for x86 */
-#define MCACHE_BUCKET_SHIFT 16
-#elif defined(__x86_64__)
-#define MAX_MCACHE_SIZE 0x1000000000 /* 64GB max for x86_64 */
-#define MCACHE_BUCKET_SHIFT 20
-#endif
-
-#define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
-
#define BITS_PER_LONG (sizeof(long)*8)
#define BITS_TO_LONGS(bits) \
(((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
unsigned long paddr_index;
uint8_t *vaddr_base;
DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE>>XC_PAGE_SHIFT);
+ uint8_t lock;
+ struct map_cache *next;
+};
+
+struct map_cache_rev {
+ uint8_t *vaddr_req;
+ unsigned long paddr_index;
+ TAILQ_ENTRY(map_cache_rev) next;
};
static struct map_cache *mapcache_entry;
static unsigned long nr_buckets;
+TAILQ_HEAD(map_cache_head, map_cache_rev) locked_entries = TAILQ_HEAD_INITIALIZER(locked_entries);
/* For most cases (>99.9%), the page address is the same. */
static unsigned long last_address_index = ~0UL;
}
}
-uint8_t *qemu_map_cache(target_phys_addr_t phys_addr)
+uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, uint8_t lock)
{
- struct map_cache *entry;
+ struct map_cache *entry, *pentry = NULL;
unsigned long address_index = phys_addr >> MCACHE_BUCKET_SHIFT;
unsigned long address_offset = phys_addr & (MCACHE_BUCKET_SIZE-1);
- if (address_index == last_address_index)
+ if (address_index == last_address_index && !lock)
return last_address_vaddr + address_offset;
entry = &mapcache_entry[address_index % nr_buckets];
- if (entry->vaddr_base == NULL || entry->paddr_index != address_index ||
- !test_bit(address_offset>>XC_PAGE_SHIFT, entry->valid_mapping))
+ while (entry && entry->lock && entry->paddr_index != address_index && entry->vaddr_base) {
+ pentry = entry;
+ entry = entry->next;
+ }
+ if (!entry) {
+ entry = qemu_mallocz(sizeof(struct map_cache));
+ pentry->next = entry;
qemu_remap_bucket(entry, address_index);
+ } else if (!entry->lock) {
+ if (!entry->vaddr_base || entry->paddr_index != address_index || !test_bit(address_offset>>XC_PAGE_SHIFT, entry->valid_mapping))
+ qemu_remap_bucket(entry, address_index);
+ }
if (!test_bit(address_offset>>XC_PAGE_SHIFT, entry->valid_mapping)) {
last_address_index = ~0UL;
last_address_index = address_index;
last_address_vaddr = entry->vaddr_base;
+ if (lock) {
+ struct map_cache_rev *reventry = qemu_mallocz(sizeof(struct map_cache_rev));
+ entry->lock++;
+ reventry->vaddr_req = last_address_vaddr + address_offset;
+ reventry->paddr_index = last_address_index;
+ TAILQ_INSERT_TAIL(&locked_entries, reventry, next);
+ }
return last_address_vaddr + address_offset;
}
+void qemu_invalidate_entry(uint8_t *buffer)
+{
+ struct map_cache *entry = NULL, *next;
+ struct map_cache_rev *reventry;
+ unsigned long paddr_index;
+ int found = 0;
+
+ if (last_address_vaddr == buffer) {
+ last_address_index = ~0UL;
+ last_address_vaddr = NULL;
+ }
+
+ TAILQ_FOREACH(reventry, &locked_entries, next) {
+ if (reventry->vaddr_req == buffer) {
+ paddr_index = reventry->paddr_index;
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ fprintf(stderr, "qemu_invalidate_entry: could not find %p\n", buffer);
+ TAILQ_FOREACH(reventry, &locked_entries, next) {
+ fprintf(stderr, " %lx -> %p is present\n", reventry->paddr_index, reventry->vaddr_req);
+ }
+ return;
+ }
+ TAILQ_REMOVE(&locked_entries, reventry, next);
+ qemu_free(reventry);
+
+ next = &mapcache_entry[paddr_index];
+ if (next->paddr_index == paddr_index) {
+ next->lock--;
+ return;
+ }
+
+ while (next != NULL && next->paddr_index != paddr_index) {
+ entry = next;
+ next = next->next;
+ }
+ if (!next)
+ fprintf(logfile, "Trying to unmap address %p that is not in the mapcache!\n", buffer);
+
+ entry->next = next->next;
+ errno = munmap(next->vaddr_base, MCACHE_BUCKET_SIZE);
+ if (errno) {
+ fprintf(logfile, "unmap fails %d\n", errno);
+ exit(-1);
+ }
+ qemu_free(next);
+}
+
void qemu_invalidate_map_cache(void)
{
unsigned long i;
+ struct map_cache_rev *reventry;
+
+ qemu_aio_flush();
+
+ TAILQ_FOREACH(reventry, &locked_entries, next) {
+ fprintf(stderr, "There should be no locked mappings at this time, but %lx -> %p is present\n", reventry->paddr_index, reventry->vaddr_req);
+ }
mapcache_lock();
#if defined(__i386__) || defined(__x86_64__)
-#define phys_ram_addr(x) (qemu_map_cache(x))
+#define phys_ram_addr(x) (qemu_map_cache(x, 0))
#elif defined(__ia64__)
#define phys_ram_addr(x) (((x) < ram_size) ? (phys_ram_base + (x)) : NULL)
#endif
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) { }
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) { }
-#ifdef DIRECT_MMAP
-void *cpu_physical_memory_map(target_phys_addr_t addr,
- target_phys_addr_t *plen,
- int is_write) {
- xen_pfn_t first, last, count, i;
- target_phys_addr_t offset;
- void *vaddr;
-
- if (!*plen)
- return NULL;
-
- first = addr >> XC_PAGE_SHIFT;
- last = (addr + *plen - 1) >> XC_PAGE_SHIFT;
- count = last - first + 1;
- offset = addr & (XC_PAGE_SIZE-1);
-
- xen_pfn_t pfns[count];
-
-fprintf(stderr,"cpu_physical_memory_map tpa=%lx *plen=%lx"
- " first=%lx last=%lx count=%lx offset=%lx ",
- (unsigned long)addr,
- (unsigned long)*plen,
- (unsigned long)first,
- (unsigned long)last,
- (unsigned long)count,
- (unsigned long)offset);
-
- for (i = 0; i < count; i++)
- pfns[i] = first + i;
-
- vaddr = xc_map_foreign_batch(xc_handle, domid,
- is_write ? PROT_WRITE : PROT_READ,
- pfns, count);
-fprintf(stderr," => vaddr=%p\n", vaddr);
-
- if (!vaddr)
- perror("cpu_physical_memory_map: map failed");
-
- return vaddr;
-}
-
-void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
- int is_write, target_phys_addr_t access_len) {
- uintptr_t start, end;
- int ret;
-
- if (!len) return;
-
- start = (uintptr_t)buffer & ~((uintptr_t)XC_PAGE_SIZE - 1);
- end = ((uintptr_t)(buffer + len - 1) | ((uintptr_t)XC_PAGE_SIZE - 1)) + 1;
-
-fprintf(stderr,"cpu_physical_memory_unmap buffer=%p len=%lx"
- " start=%lx end=%lx XC_PAGE_SIZE-1=%lx\n",
- buffer,
- (unsigned long)len,
- (unsigned long)start,
- (unsigned long)end,
- (unsigned long)((uintptr_t)XC_PAGE_SIZE - 1)
- );
-
- ret = munmap((void*)start, end - start);
- if (ret)
- perror("cpu_physical_memory_unmap: munmap failed");
-}
-
-void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) {
- return 0;
-}
-void cpu_unregister_map_client(void *cookie) {
-}
-#endif /*DIRECT_MMAP*/
-
/* stub out various functions for Xen DM */
void dump_exec_info(FILE *f,
int (*cpu_fprintf)(FILE *f, const char *fmt, ...)) {
target_phys_addr_t *plen,
int is_write)
{
- target_phys_addr_t len = *plen;
- target_phys_addr_t done = 0;
- int l;
- uint8_t *ret = NULL;
- uint8_t *ptr;
- target_phys_addr_t page;
- PhysPageDesc *p;
- unsigned long addr1;
-
- while (len > 0) {
- page = addr & TARGET_PAGE_MASK;
- l = (page + TARGET_PAGE_SIZE) - addr;
- if (l > len)
- l = len;
-
- if (done || bounce.buffer) {
- break;
- }
- bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
- bounce.addr = addr;
- bounce.len = l;
- if (!is_write) {
- cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
- }
- ptr = bounce.buffer;
-
- if (!done) {
- ret = ptr;
- } else if (ret + done != ptr) {
- break;
- }
-
- len -= l;
- addr += l;
- done += l;
- }
- *plen = done;
- return ret;
+ unsigned long l = MCACHE_BUCKET_SIZE - (addr & (MCACHE_BUCKET_SIZE-1));
+ if ((*plen) > l)
+ *plen = l;
+ return qemu_map_cache(addr, 1);
}
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
int is_write, target_phys_addr_t access_len)
{
- assert(buffer == bounce.buffer);
- if (is_write) {
- cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
- }
- qemu_free(bounce.buffer);
- bounce.buffer = NULL;
- cpu_notify_map_clients();
+ qemu_invalidate_entry(buffer);
}