debuggers.hg

annotate xen/arch/x86/x86_32/domain_page.c @ 3674:fb875591fd72

bitkeeper revision 1.1159.223.63 (42028527-fv-d9BM0_LRp8UKGP19gQ)

Fix NMI deferral.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Thu Feb 03 20:10:15 2005 +0000 (2005-02-03)
parents b7f0cff13881
children 0a4b76b6b5a0
rev   line source
kaf24@1710 1 /******************************************************************************
kaf24@1710 2 * domain_page.h
kaf24@1710 3 *
kaf24@1710 4 * Allow temporary mapping of domain pages. Based on ideas from the
kaf24@1710 5 * Linux PKMAP code -- the copyrights and credits are retained below.
kaf24@1710 6 */
kaf24@1710 7
kaf24@1710 8 /*
kaf24@1710 9 * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
kaf24@1710 10 * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de *
kaf24@1710 11 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
kaf24@1710 12 */
kaf24@1710 13
kaf24@1710 14 #include <xen/config.h>
kaf24@1710 15 #include <xen/sched.h>
kaf24@1710 16 #include <xen/mm.h>
kaf24@1710 17 #include <xen/perfc.h>
kaf24@1710 18 #include <asm/domain_page.h>
kaf24@1710 19 #include <asm/flushtlb.h>
kaf24@2844 20 #include <asm/hardirq.h>
kaf24@1710 21
kaf24@1710 22 unsigned long *mapcache;
kaf24@2689 23 static unsigned int map_idx, epoch, shadow_epoch[NR_CPUS];
kaf24@1710 24 static spinlock_t map_lock = SPIN_LOCK_UNLOCKED;
kaf24@1710 25
kaf24@1710 26 /* Use a spare PTE bit to mark entries ready for recycling. */
kaf24@1710 27 #define READY_FOR_TLB_FLUSH (1<<10)
kaf24@1710 28
kaf24@1710 29 static void flush_all_ready_maps(void)
kaf24@1710 30 {
kaf24@1710 31 unsigned long *cache = mapcache;
kaf24@1710 32
kaf24@1710 33 /* A bit skanky -- depends on having an aligned PAGE_SIZE set of PTEs. */
kaf24@2689 34 do {
kaf24@2689 35 if ( (*cache & READY_FOR_TLB_FLUSH) )
kaf24@2689 36 *cache = 0;
kaf24@2689 37 }
kaf24@1710 38 while ( ((unsigned long)(++cache) & ~PAGE_MASK) != 0 );
kaf24@1710 39 }
kaf24@1710 40
kaf24@1710 41
kaf24@1710 42 void *map_domain_mem(unsigned long pa)
kaf24@1710 43 {
kaf24@1710 44 unsigned long va;
kaf24@1710 45 unsigned int idx, cpu = smp_processor_id();
kaf24@1710 46 unsigned long *cache = mapcache;
kaf24@1710 47
kaf24@2844 48 ASSERT(!in_irq());
kaf24@1710 49 perfc_incrc(map_domain_mem_count);
kaf24@1710 50
kaf24@2844 51 spin_lock(&map_lock);
kaf24@1710 52
kaf24@1710 53 /* Has some other CPU caused a wrap? We must flush if so. */
kaf24@2689 54 if ( epoch != shadow_epoch[cpu] )
kaf24@1710 55 {
kaf24@1710 56 perfc_incrc(domain_page_tlb_flush);
kaf24@1710 57 local_flush_tlb();
kaf24@2689 58 shadow_epoch[cpu] = epoch;
kaf24@1710 59 }
kaf24@1710 60
kaf24@2689 61 do {
kaf24@1710 62 idx = map_idx = (map_idx + 1) & (MAPCACHE_ENTRIES - 1);
kaf24@2689 63 if ( unlikely(idx == 0) )
kaf24@2689 64 {
kaf24@2689 65 flush_all_ready_maps();
kaf24@2689 66 perfc_incrc(domain_page_tlb_flush);
kaf24@2689 67 local_flush_tlb();
kaf24@2689 68 shadow_epoch[cpu] = ++epoch;
kaf24@2689 69 }
kaf24@1710 70 }
kaf24@2689 71 while ( cache[idx] != 0 );
kaf24@1710 72
kaf24@1710 73 cache[idx] = (pa & PAGE_MASK) | __PAGE_HYPERVISOR;
kaf24@1710 74
kaf24@2844 75 spin_unlock(&map_lock);
kaf24@1710 76
kaf24@1710 77 va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT) + (pa & ~PAGE_MASK);
kaf24@1710 78 return (void *)va;
kaf24@1710 79 }
kaf24@1710 80
kaf24@1710 81 void unmap_domain_mem(void *va)
kaf24@1710 82 {
kaf24@1710 83 unsigned int idx;
kaf24@1710 84 idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
kaf24@1710 85 mapcache[idx] |= READY_FOR_TLB_FLUSH;
kaf24@1710 86 }