debuggers.hg

view xen/arch/x86/x86_32/domain_page.c @ 22906:700ac6445812

Now add KDB to the non-kdb tree
author Mukesh Rathor
date Thu Feb 03 15:42:41 2011 -0800 (2011-02-03)
parents 5852612cd4c4
children
line source
1 /******************************************************************************
2 * domain_page.h
3 *
4 * Allow temporary mapping of domain pages.
5 *
6 * Copyright (c) 2003-2006, Keir Fraser <keir@xensource.com>
7 */
9 #include <xen/config.h>
10 #include <xen/sched.h>
11 #include <xen/mm.h>
12 #include <xen/perfc.h>
13 #include <xen/domain_page.h>
14 #include <asm/current.h>
15 #include <asm/flushtlb.h>
16 #include <asm/hardirq.h>
17 #include <asm/hvm/support.h>
18 #include <asm/fixmap.h>
20 static inline struct vcpu *mapcache_current_vcpu(void)
21 {
22 struct vcpu *v;
24 /* In the common case we use the mapcache of the running VCPU. */
25 v = current;
27 /*
28 * If guest_table is NULL, and we are running a paravirtualised guest,
29 * then it means we are running on the idle domain's page table and must
30 * therefore use its mapcache.
31 */
32 if ( unlikely(!pagetable_get_pfn(v->arch.guest_table)) && !is_hvm_vcpu(v) )
33 {
34 /* If we really are idling, perform lazy context switch now. */
35 if ( (v = idle_vcpu[smp_processor_id()]) == current )
36 sync_local_execstate();
37 /* We must now be running on the idle page table. */
38 ASSERT(read_cr3() == __pa(idle_pg_table));
39 }
41 return v;
42 }
44 void *map_domain_page(unsigned long mfn)
45 {
46 unsigned long va, flags;
47 unsigned int idx, i;
48 struct vcpu *v;
49 struct mapcache_domain *dcache;
50 struct mapcache_vcpu *vcache;
51 struct vcpu_maphash_entry *hashent;
53 perfc_incr(map_domain_page_count);
55 v = mapcache_current_vcpu();
56 /* Prevent vcpu pointer being used before initialize. */
57 ASSERT((unsigned long)v != 0xfffff000);
59 dcache = &v->domain->arch.mapcache;
60 vcache = &v->arch.mapcache;
62 local_irq_save(flags);
64 hashent = &vcache->hash[MAPHASH_HASHFN(mfn)];
65 if ( hashent->mfn == mfn )
66 {
67 idx = hashent->idx;
68 hashent->refcnt++;
69 ASSERT(idx < MAPCACHE_ENTRIES);
70 ASSERT(hashent->refcnt != 0);
71 ASSERT(l1e_get_pfn(dcache->l1tab[idx]) == mfn);
72 goto out;
73 }
75 spin_lock(&dcache->lock);
77 /* Has some other CPU caused a wrap? We must flush if so. */
78 if ( unlikely(dcache->epoch != vcache->shadow_epoch) )
79 {
80 vcache->shadow_epoch = dcache->epoch;
81 if ( NEED_FLUSH(this_cpu(tlbflush_time), dcache->tlbflush_timestamp) )
82 {
83 perfc_incr(domain_page_tlb_flush);
84 flush_tlb_local();
85 }
86 }
88 idx = find_next_zero_bit(dcache->inuse, MAPCACHE_ENTRIES, dcache->cursor);
89 if ( unlikely(idx >= MAPCACHE_ENTRIES) )
90 {
91 /* /First/, clean the garbage map and update the inuse list. */
92 for ( i = 0; i < ARRAY_SIZE(dcache->garbage); i++ )
93 {
94 unsigned long x = xchg(&dcache->garbage[i], 0);
95 dcache->inuse[i] &= ~x;
96 }
98 /* /Second/, flush TLBs. */
99 perfc_incr(domain_page_tlb_flush);
100 flush_tlb_local();
101 vcache->shadow_epoch = ++dcache->epoch;
102 dcache->tlbflush_timestamp = tlbflush_current_time();
104 idx = find_first_zero_bit(dcache->inuse, MAPCACHE_ENTRIES);
105 BUG_ON(idx >= MAPCACHE_ENTRIES);
106 }
108 set_bit(idx, dcache->inuse);
109 dcache->cursor = idx + 1;
111 spin_unlock(&dcache->lock);
113 l1e_write(&dcache->l1tab[idx], l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
115 out:
116 local_irq_restore(flags);
117 va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT);
118 return (void *)va;
119 }
121 void unmap_domain_page(const void *va)
122 {
123 unsigned int idx;
124 struct vcpu *v;
125 struct mapcache_domain *dcache;
126 unsigned long mfn, flags;
127 struct vcpu_maphash_entry *hashent;
129 ASSERT((void *)MAPCACHE_VIRT_START <= va);
130 ASSERT(va < (void *)MAPCACHE_VIRT_END);
132 v = mapcache_current_vcpu();
134 dcache = &v->domain->arch.mapcache;
136 idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
137 mfn = l1e_get_pfn(dcache->l1tab[idx]);
138 hashent = &v->arch.mapcache.hash[MAPHASH_HASHFN(mfn)];
140 local_irq_save(flags);
142 if ( hashent->idx == idx )
143 {
144 ASSERT(hashent->mfn == mfn);
145 ASSERT(hashent->refcnt != 0);
146 hashent->refcnt--;
147 }
148 else if ( hashent->refcnt == 0 )
149 {
150 if ( hashent->idx != MAPHASHENT_NOTINUSE )
151 {
152 /* /First/, zap the PTE. */
153 ASSERT(l1e_get_pfn(dcache->l1tab[hashent->idx]) == hashent->mfn);
154 l1e_write(&dcache->l1tab[hashent->idx], l1e_empty());
155 /* /Second/, mark as garbage. */
156 set_bit(hashent->idx, dcache->garbage);
157 }
159 /* Add newly-freed mapping to the maphash. */
160 hashent->mfn = mfn;
161 hashent->idx = idx;
162 }
163 else
164 {
165 /* /First/, zap the PTE. */
166 l1e_write(&dcache->l1tab[idx], l1e_empty());
167 /* /Second/, mark as garbage. */
168 set_bit(idx, dcache->garbage);
169 }
171 local_irq_restore(flags);
172 }
174 void mapcache_domain_init(struct domain *d)
175 {
176 d->arch.mapcache.l1tab = d->arch.mm_perdomain_pt +
177 (GDT_LDT_MBYTES << (20 - PAGE_SHIFT));
178 spin_lock_init(&d->arch.mapcache.lock);
179 }
181 void mapcache_vcpu_init(struct vcpu *v)
182 {
183 unsigned int i;
184 struct vcpu_maphash_entry *hashent;
186 /* Mark all maphash entries as not in use. */
187 for ( i = 0; i < MAPHASH_ENTRIES; i++ )
188 {
189 hashent = &v->arch.mapcache.hash[i];
190 hashent->mfn = ~0UL; /* never valid to map */
191 hashent->idx = MAPHASHENT_NOTINUSE;
192 }
193 }
195 #define GLOBALMAP_BITS (IOREMAP_MBYTES << (20 - PAGE_SHIFT))
196 static unsigned long inuse[BITS_TO_LONGS(GLOBALMAP_BITS)];
197 static unsigned long garbage[BITS_TO_LONGS(GLOBALMAP_BITS)];
198 static unsigned int inuse_cursor;
199 static DEFINE_SPINLOCK(globalmap_lock);
201 void *map_domain_page_global(unsigned long mfn)
202 {
203 l2_pgentry_t *pl2e;
204 l1_pgentry_t *pl1e;
205 unsigned int idx, i;
206 unsigned long va;
208 ASSERT(!in_irq() && local_irq_is_enabled());
210 /* At least half the ioremap space should be available to us. */
211 BUILD_BUG_ON(IOREMAP_VIRT_START + (IOREMAP_MBYTES << 19) >= FIXADDR_START);
213 spin_lock(&globalmap_lock);
215 idx = find_next_zero_bit(inuse, GLOBALMAP_BITS, inuse_cursor);
216 va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT);
217 if ( unlikely(va >= FIXADDR_START) )
218 {
219 /* /First/, clean the garbage map and update the inuse list. */
220 for ( i = 0; i < ARRAY_SIZE(garbage); i++ )
221 {
222 unsigned long x = xchg(&garbage[i], 0);
223 inuse[i] &= ~x;
224 }
226 /* /Second/, flush all TLBs to get rid of stale garbage mappings. */
227 flush_tlb_all();
229 idx = find_first_zero_bit(inuse, GLOBALMAP_BITS);
230 va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT);
231 if ( unlikely(va >= FIXADDR_START) )
232 {
233 spin_unlock(&globalmap_lock);
234 return NULL;
235 }
236 }
238 set_bit(idx, inuse);
239 inuse_cursor = idx + 1;
241 spin_unlock(&globalmap_lock);
243 pl2e = virt_to_xen_l2e(va);
244 pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
245 l1e_write(pl1e, l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
247 return (void *)va;
248 }
250 void unmap_domain_page_global(const void *va)
251 {
252 unsigned long __va = (unsigned long)va;
253 l2_pgentry_t *pl2e;
254 l1_pgentry_t *pl1e;
255 unsigned int idx;
257 ASSERT(__va >= IOREMAP_VIRT_START);
259 /* /First/, we zap the PTE. */
260 pl2e = virt_to_xen_l2e(__va);
261 pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(__va);
262 l1e_write(pl1e, l1e_empty());
264 /* /Second/, we add to the garbage map. */
265 idx = (__va - IOREMAP_VIRT_START) >> PAGE_SHIFT;
266 set_bit(idx, garbage);
267 }