debuggers.hg

view xen/arch/x86/x86_32/domain_page.c @ 3674:fb875591fd72

bitkeeper revision 1.1159.223.63 (42028527-fv-d9BM0_LRp8UKGP19gQ)

Fix NMI deferral.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Thu Feb 03 20:10:15 2005 +0000 (2005-02-03)
parents b7f0cff13881
children 0a4b76b6b5a0
line source
1 /******************************************************************************
2 * domain_page.h
3 *
4 * Allow temporary mapping of domain pages. Based on ideas from the
5 * Linux PKMAP code -- the copyrights and credits are retained below.
6 */
8 /*
9 * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
10 * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de *
11 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
12 */
14 #include <xen/config.h>
15 #include <xen/sched.h>
16 #include <xen/mm.h>
17 #include <xen/perfc.h>
18 #include <asm/domain_page.h>
19 #include <asm/flushtlb.h>
20 #include <asm/hardirq.h>
22 unsigned long *mapcache;
23 static unsigned int map_idx, epoch, shadow_epoch[NR_CPUS];
24 static spinlock_t map_lock = SPIN_LOCK_UNLOCKED;
26 /* Use a spare PTE bit to mark entries ready for recycling. */
27 #define READY_FOR_TLB_FLUSH (1<<10)
29 static void flush_all_ready_maps(void)
30 {
31 unsigned long *cache = mapcache;
33 /* A bit skanky -- depends on having an aligned PAGE_SIZE set of PTEs. */
34 do {
35 if ( (*cache & READY_FOR_TLB_FLUSH) )
36 *cache = 0;
37 }
38 while ( ((unsigned long)(++cache) & ~PAGE_MASK) != 0 );
39 }
42 void *map_domain_mem(unsigned long pa)
43 {
44 unsigned long va;
45 unsigned int idx, cpu = smp_processor_id();
46 unsigned long *cache = mapcache;
48 ASSERT(!in_irq());
49 perfc_incrc(map_domain_mem_count);
51 spin_lock(&map_lock);
53 /* Has some other CPU caused a wrap? We must flush if so. */
54 if ( epoch != shadow_epoch[cpu] )
55 {
56 perfc_incrc(domain_page_tlb_flush);
57 local_flush_tlb();
58 shadow_epoch[cpu] = epoch;
59 }
61 do {
62 idx = map_idx = (map_idx + 1) & (MAPCACHE_ENTRIES - 1);
63 if ( unlikely(idx == 0) )
64 {
65 flush_all_ready_maps();
66 perfc_incrc(domain_page_tlb_flush);
67 local_flush_tlb();
68 shadow_epoch[cpu] = ++epoch;
69 }
70 }
71 while ( cache[idx] != 0 );
73 cache[idx] = (pa & PAGE_MASK) | __PAGE_HYPERVISOR;
75 spin_unlock(&map_lock);
77 va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT) + (pa & ~PAGE_MASK);
78 return (void *)va;
79 }
81 void unmap_domain_mem(void *va)
82 {
83 unsigned int idx;
84 idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
85 mapcache[idx] |= READY_FOR_TLB_FLUSH;
86 }