debuggers.hg
changeset 3033:b3bd8fd6c418
bitkeeper revision 1.1159.1.422 (419a4ae88BHDjqoOOAAy4W4ksz30cg)
Make mmu_update queue per cpu.
XXX Reduce queue size to 1.
Make mmu_update queue per cpu.
XXX Reduce queue size to 1.
author | cl349@freefall.cl.cam.ac.uk |
---|---|
date | Tue Nov 16 18:46:00 2004 +0000 (2004-11-16) |
parents | 1f505eb80787 |
children | bfe070fe1dee |
files | linux-2.6.9-xen-sparse/arch/xen/i386/mm/hypervisor.c linux-2.6.9-xen-sparse/include/asm-xen/hypervisor.h |
line diff
1.1 --- a/linux-2.6.9-xen-sparse/arch/xen/i386/mm/hypervisor.c Tue Nov 16 18:41:25 2004 +0000 1.2 +++ b/linux-2.6.9-xen-sparse/arch/xen/i386/mm/hypervisor.c Tue Nov 16 18:46:00 2004 +0000 1.3 @@ -35,6 +35,7 @@ 1.4 #include <asm/pgtable.h> 1.5 #include <asm-xen/hypervisor.h> 1.6 #include <asm-xen/multicall.h> 1.7 +#include <linux/percpu.h> 1.8 1.9 /* 1.10 * This suffices to protect us if we ever move to SMP domains. 1.11 @@ -49,12 +50,15 @@ static spinlock_t update_lock = SPIN_LOC 1.12 #define QUEUE_SIZE 2048 1.13 #define pte_offset_kernel pte_offset 1.14 #else 1.15 +#ifdef CONFIG_SMP 1.16 +#define QUEUE_SIZE 1 1.17 +#else 1.18 #define QUEUE_SIZE 128 1.19 #endif 1.20 +#endif 1.21 1.22 -static mmu_update_t update_queue[QUEUE_SIZE]; 1.23 -unsigned int mmu_update_queue_idx = 0; 1.24 -#define idx mmu_update_queue_idx 1.25 +DEFINE_PER_CPU(mmu_update_t, update_queue[QUEUE_SIZE]); 1.26 +DEFINE_PER_CPU(unsigned int, mmu_update_queue_idx); 1.27 1.28 #if MMU_UPDATE_DEBUG > 0 1.29 page_update_debug_t update_debug_queue[QUEUE_SIZE] = {{0}}; 1.30 @@ -64,6 +68,8 @@ page_update_debug_t update_debug_queue[Q 1.31 #if MMU_UPDATE_DEBUG > 3 1.32 static void DEBUG_allow_pt_reads(void) 1.33 { 1.34 + int cpu = smp_processor_id(); 1.35 + int idx = per_cpu(mmu_update_queue_idx, cpu); 1.36 pte_t *pte; 1.37 mmu_update_t update; 1.38 int i; 1.39 @@ -79,6 +85,8 @@ static void DEBUG_allow_pt_reads(void) 1.40 } 1.41 static void DEBUG_disallow_pt_read(unsigned long va) 1.42 { 1.43 + int cpu = smp_processor_id(); 1.44 + int idx = per_cpu(mmu_update_queue_idx, cpu); 1.45 pte_t *pte; 1.46 pmd_t *pmd; 1.47 pgd_t *pgd; 1.48 @@ -118,9 +126,12 @@ static void DEBUG_disallow_pt_read(unsig 1.49 */ 1.50 void MULTICALL_flush_page_update_queue(void) 1.51 { 1.52 + int cpu = smp_processor_id(); 1.53 + int idx; 1.54 unsigned long flags; 1.55 unsigned int _idx; 1.56 spin_lock_irqsave(&update_lock, flags); 1.57 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.58 if ( (_idx = idx) != 0 ) 1.59 { 1.60 #if MMU_UPDATE_DEBUG > 1 1.61 @@ -130,10 +141,10 @@ void MULTICALL_flush_page_update_queue(v 1.62 #if MMU_UPDATE_DEBUG > 3 1.63 DEBUG_allow_pt_reads(); 1.64 #endif 1.65 - idx = 0; 1.66 + per_cpu(mmu_update_queue_idx, cpu) = 0; 1.67 wmb(); /* Make sure index is cleared first to avoid double updates. */ 1.68 queue_multicall3(__HYPERVISOR_mmu_update, 1.69 - (unsigned long)update_queue, 1.70 + (unsigned long)&per_cpu(update_queue[0], cpu), 1.71 (unsigned long)_idx, 1.72 (unsigned long)NULL); 1.73 } 1.74 @@ -142,17 +153,18 @@ void MULTICALL_flush_page_update_queue(v 1.75 1.76 static inline void __flush_page_update_queue(void) 1.77 { 1.78 - unsigned int _idx = idx; 1.79 + int cpu = smp_processor_id(); 1.80 + unsigned int _idx = per_cpu(mmu_update_queue_idx, cpu); 1.81 #if MMU_UPDATE_DEBUG > 1 1.82 - if (idx > 1) 1.83 + if (_idx > 1) 1.84 printk("Flushing %d entries from pt update queue\n", idx); 1.85 #endif 1.86 #if MMU_UPDATE_DEBUG > 3 1.87 DEBUG_allow_pt_reads(); 1.88 #endif 1.89 - idx = 0; 1.90 + per_cpu(mmu_update_queue_idx, cpu) = 0; 1.91 wmb(); /* Make sure index is cleared first to avoid double updates. */ 1.92 - if ( unlikely(HYPERVISOR_mmu_update(update_queue, _idx, NULL) < 0) ) 1.93 + if ( unlikely(HYPERVISOR_mmu_update(&per_cpu(update_queue[0], cpu), _idx, NULL) < 0) ) 1.94 { 1.95 printk(KERN_ALERT "Failed to execute MMU updates.\n"); 1.96 BUG(); 1.97 @@ -161,139 +173,175 @@ static inline void __flush_page_update_q 1.98 1.99 void _flush_page_update_queue(void) 1.100 { 1.101 + int cpu = smp_processor_id(); 1.102 unsigned long flags; 1.103 spin_lock_irqsave(&update_lock, flags); 1.104 - if ( idx != 0 ) __flush_page_update_queue(); 1.105 + if ( per_cpu(mmu_update_queue_idx, cpu) != 0 ) __flush_page_update_queue(); 1.106 spin_unlock_irqrestore(&update_lock, flags); 1.107 } 1.108 1.109 static inline void increment_index(void) 1.110 { 1.111 - idx++; 1.112 - if ( unlikely(idx == QUEUE_SIZE) ) __flush_page_update_queue(); 1.113 + int cpu = smp_processor_id(); 1.114 + per_cpu(mmu_update_queue_idx, cpu)++; 1.115 + if ( unlikely(per_cpu(mmu_update_queue_idx, cpu) == QUEUE_SIZE) ) __flush_page_update_queue(); 1.116 } 1.117 1.118 static inline void increment_index_and_flush(void) 1.119 { 1.120 - idx++; 1.121 + int cpu = smp_processor_id(); 1.122 + per_cpu(mmu_update_queue_idx, cpu)++; 1.123 __flush_page_update_queue(); 1.124 } 1.125 1.126 void queue_l1_entry_update(pte_t *ptr, unsigned long val) 1.127 { 1.128 + int cpu = smp_processor_id(); 1.129 + int idx; 1.130 unsigned long flags; 1.131 spin_lock_irqsave(&update_lock, flags); 1.132 #if MMU_UPDATE_DEBUG > 3 1.133 DEBUG_disallow_pt_read((unsigned long)ptr); 1.134 #endif 1.135 - update_queue[idx].ptr = virt_to_machine(ptr); 1.136 - update_queue[idx].val = val; 1.137 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.138 + per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr); 1.139 + per_cpu(update_queue[idx], cpu).val = val; 1.140 increment_index(); 1.141 spin_unlock_irqrestore(&update_lock, flags); 1.142 } 1.143 1.144 void queue_l2_entry_update(pmd_t *ptr, unsigned long val) 1.145 { 1.146 + int cpu = smp_processor_id(); 1.147 + int idx; 1.148 unsigned long flags; 1.149 spin_lock_irqsave(&update_lock, flags); 1.150 - update_queue[idx].ptr = virt_to_machine(ptr); 1.151 - update_queue[idx].val = val; 1.152 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.153 + per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr); 1.154 + per_cpu(update_queue[idx], cpu).val = val; 1.155 increment_index(); 1.156 spin_unlock_irqrestore(&update_lock, flags); 1.157 } 1.158 1.159 void queue_pt_switch(unsigned long ptr) 1.160 { 1.161 + int cpu = smp_processor_id(); 1.162 + int idx; 1.163 unsigned long flags; 1.164 spin_lock_irqsave(&update_lock, flags); 1.165 - update_queue[idx].ptr = phys_to_machine(ptr); 1.166 - update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 1.167 - update_queue[idx].val = MMUEXT_NEW_BASEPTR; 1.168 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.169 + per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr); 1.170 + per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND; 1.171 + per_cpu(update_queue[idx], cpu).val = MMUEXT_NEW_BASEPTR; 1.172 increment_index(); 1.173 spin_unlock_irqrestore(&update_lock, flags); 1.174 } 1.175 1.176 void queue_tlb_flush(void) 1.177 { 1.178 + int cpu = smp_processor_id(); 1.179 + int idx; 1.180 unsigned long flags; 1.181 spin_lock_irqsave(&update_lock, flags); 1.182 - update_queue[idx].ptr = MMU_EXTENDED_COMMAND; 1.183 - update_queue[idx].val = MMUEXT_TLB_FLUSH; 1.184 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.185 + per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND; 1.186 + per_cpu(update_queue[idx], cpu).val = MMUEXT_TLB_FLUSH; 1.187 increment_index(); 1.188 spin_unlock_irqrestore(&update_lock, flags); 1.189 } 1.190 1.191 void queue_invlpg(unsigned long ptr) 1.192 { 1.193 + int cpu = smp_processor_id(); 1.194 + int idx; 1.195 unsigned long flags; 1.196 spin_lock_irqsave(&update_lock, flags); 1.197 - update_queue[idx].ptr = MMU_EXTENDED_COMMAND; 1.198 - update_queue[idx].ptr |= ptr & PAGE_MASK; 1.199 - update_queue[idx].val = MMUEXT_INVLPG; 1.200 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.201 + per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND; 1.202 + per_cpu(update_queue[idx], cpu).ptr |= ptr & PAGE_MASK; 1.203 + per_cpu(update_queue[idx], cpu).val = MMUEXT_INVLPG; 1.204 increment_index(); 1.205 spin_unlock_irqrestore(&update_lock, flags); 1.206 } 1.207 1.208 void queue_pgd_pin(unsigned long ptr) 1.209 { 1.210 + int cpu = smp_processor_id(); 1.211 + int idx; 1.212 unsigned long flags; 1.213 spin_lock_irqsave(&update_lock, flags); 1.214 - update_queue[idx].ptr = phys_to_machine(ptr); 1.215 - update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 1.216 - update_queue[idx].val = MMUEXT_PIN_L2_TABLE; 1.217 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.218 + per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr); 1.219 + per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND; 1.220 + per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L2_TABLE; 1.221 increment_index(); 1.222 spin_unlock_irqrestore(&update_lock, flags); 1.223 } 1.224 1.225 void queue_pgd_unpin(unsigned long ptr) 1.226 { 1.227 + int cpu = smp_processor_id(); 1.228 + int idx; 1.229 unsigned long flags; 1.230 spin_lock_irqsave(&update_lock, flags); 1.231 - update_queue[idx].ptr = phys_to_machine(ptr); 1.232 - update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 1.233 - update_queue[idx].val = MMUEXT_UNPIN_TABLE; 1.234 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.235 + per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr); 1.236 + per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND; 1.237 + per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE; 1.238 increment_index(); 1.239 spin_unlock_irqrestore(&update_lock, flags); 1.240 } 1.241 1.242 void queue_pte_pin(unsigned long ptr) 1.243 { 1.244 + int cpu = smp_processor_id(); 1.245 + int idx; 1.246 unsigned long flags; 1.247 spin_lock_irqsave(&update_lock, flags); 1.248 - update_queue[idx].ptr = phys_to_machine(ptr); 1.249 - update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 1.250 - update_queue[idx].val = MMUEXT_PIN_L1_TABLE; 1.251 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.252 + per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr); 1.253 + per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND; 1.254 + per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L1_TABLE; 1.255 increment_index(); 1.256 spin_unlock_irqrestore(&update_lock, flags); 1.257 } 1.258 1.259 void queue_pte_unpin(unsigned long ptr) 1.260 { 1.261 + int cpu = smp_processor_id(); 1.262 + int idx; 1.263 unsigned long flags; 1.264 spin_lock_irqsave(&update_lock, flags); 1.265 - update_queue[idx].ptr = phys_to_machine(ptr); 1.266 - update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 1.267 - update_queue[idx].val = MMUEXT_UNPIN_TABLE; 1.268 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.269 + per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr); 1.270 + per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND; 1.271 + per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE; 1.272 increment_index(); 1.273 spin_unlock_irqrestore(&update_lock, flags); 1.274 } 1.275 1.276 void queue_set_ldt(unsigned long ptr, unsigned long len) 1.277 { 1.278 + int cpu = smp_processor_id(); 1.279 + int idx; 1.280 unsigned long flags; 1.281 spin_lock_irqsave(&update_lock, flags); 1.282 - update_queue[idx].ptr = MMU_EXTENDED_COMMAND | ptr; 1.283 - update_queue[idx].val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT); 1.284 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.285 + per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND | ptr; 1.286 + per_cpu(update_queue[idx], cpu).val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT); 1.287 increment_index(); 1.288 spin_unlock_irqrestore(&update_lock, flags); 1.289 } 1.290 1.291 void queue_machphys_update(unsigned long mfn, unsigned long pfn) 1.292 { 1.293 + int cpu = smp_processor_id(); 1.294 + int idx; 1.295 unsigned long flags; 1.296 spin_lock_irqsave(&update_lock, flags); 1.297 - update_queue[idx].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; 1.298 - update_queue[idx].val = pfn; 1.299 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.300 + per_cpu(update_queue[idx], cpu).ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; 1.301 + per_cpu(update_queue[idx], cpu).val = pfn; 1.302 increment_index(); 1.303 spin_unlock_irqrestore(&update_lock, flags); 1.304 } 1.305 @@ -301,119 +349,152 @@ void queue_machphys_update(unsigned long 1.306 /* queue and flush versions of the above */ 1.307 void xen_l1_entry_update(pte_t *ptr, unsigned long val) 1.308 { 1.309 + int cpu = smp_processor_id(); 1.310 + int idx; 1.311 unsigned long flags; 1.312 spin_lock_irqsave(&update_lock, flags); 1.313 #if MMU_UPDATE_DEBUG > 3 1.314 DEBUG_disallow_pt_read((unsigned long)ptr); 1.315 #endif 1.316 - update_queue[idx].ptr = virt_to_machine(ptr); 1.317 - update_queue[idx].val = val; 1.318 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.319 + per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr); 1.320 + per_cpu(update_queue[idx], cpu).val = val; 1.321 increment_index_and_flush(); 1.322 spin_unlock_irqrestore(&update_lock, flags); 1.323 } 1.324 1.325 void xen_l2_entry_update(pmd_t *ptr, unsigned long val) 1.326 { 1.327 + int cpu = smp_processor_id(); 1.328 + int idx; 1.329 unsigned long flags; 1.330 spin_lock_irqsave(&update_lock, flags); 1.331 - update_queue[idx].ptr = virt_to_machine(ptr); 1.332 - update_queue[idx].val = val; 1.333 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.334 + per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr); 1.335 + per_cpu(update_queue[idx], cpu).val = val; 1.336 increment_index_and_flush(); 1.337 spin_unlock_irqrestore(&update_lock, flags); 1.338 } 1.339 1.340 void xen_pt_switch(unsigned long ptr) 1.341 { 1.342 + int cpu = smp_processor_id(); 1.343 + int idx; 1.344 unsigned long flags; 1.345 spin_lock_irqsave(&update_lock, flags); 1.346 - update_queue[idx].ptr = phys_to_machine(ptr); 1.347 - update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 1.348 - update_queue[idx].val = MMUEXT_NEW_BASEPTR; 1.349 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.350 + per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr); 1.351 + per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND; 1.352 + per_cpu(update_queue[idx], cpu).val = MMUEXT_NEW_BASEPTR; 1.353 increment_index_and_flush(); 1.354 spin_unlock_irqrestore(&update_lock, flags); 1.355 } 1.356 1.357 void xen_tlb_flush(void) 1.358 { 1.359 + int cpu = smp_processor_id(); 1.360 + int idx; 1.361 unsigned long flags; 1.362 spin_lock_irqsave(&update_lock, flags); 1.363 - update_queue[idx].ptr = MMU_EXTENDED_COMMAND; 1.364 - update_queue[idx].val = MMUEXT_TLB_FLUSH; 1.365 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.366 + per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND; 1.367 + per_cpu(update_queue[idx], cpu).val = MMUEXT_TLB_FLUSH; 1.368 increment_index_and_flush(); 1.369 spin_unlock_irqrestore(&update_lock, flags); 1.370 } 1.371 1.372 void xen_invlpg(unsigned long ptr) 1.373 { 1.374 + int cpu = smp_processor_id(); 1.375 + int idx; 1.376 unsigned long flags; 1.377 spin_lock_irqsave(&update_lock, flags); 1.378 - update_queue[idx].ptr = MMU_EXTENDED_COMMAND; 1.379 - update_queue[idx].ptr |= ptr & PAGE_MASK; 1.380 - update_queue[idx].val = MMUEXT_INVLPG; 1.381 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.382 + per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND; 1.383 + per_cpu(update_queue[idx], cpu).ptr |= ptr & PAGE_MASK; 1.384 + per_cpu(update_queue[idx], cpu).val = MMUEXT_INVLPG; 1.385 increment_index_and_flush(); 1.386 spin_unlock_irqrestore(&update_lock, flags); 1.387 } 1.388 1.389 void xen_pgd_pin(unsigned long ptr) 1.390 { 1.391 + int cpu = smp_processor_id(); 1.392 + int idx; 1.393 unsigned long flags; 1.394 spin_lock_irqsave(&update_lock, flags); 1.395 - update_queue[idx].ptr = phys_to_machine(ptr); 1.396 - update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 1.397 - update_queue[idx].val = MMUEXT_PIN_L2_TABLE; 1.398 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.399 + per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr); 1.400 + per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND; 1.401 + per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L2_TABLE; 1.402 increment_index_and_flush(); 1.403 spin_unlock_irqrestore(&update_lock, flags); 1.404 } 1.405 1.406 void xen_pgd_unpin(unsigned long ptr) 1.407 { 1.408 + int cpu = smp_processor_id(); 1.409 + int idx; 1.410 unsigned long flags; 1.411 spin_lock_irqsave(&update_lock, flags); 1.412 - update_queue[idx].ptr = phys_to_machine(ptr); 1.413 - update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 1.414 - update_queue[idx].val = MMUEXT_UNPIN_TABLE; 1.415 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.416 + per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr); 1.417 + per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND; 1.418 + per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE; 1.419 increment_index_and_flush(); 1.420 spin_unlock_irqrestore(&update_lock, flags); 1.421 } 1.422 1.423 void xen_pte_pin(unsigned long ptr) 1.424 { 1.425 + int cpu = smp_processor_id(); 1.426 + int idx; 1.427 unsigned long flags; 1.428 spin_lock_irqsave(&update_lock, flags); 1.429 - update_queue[idx].ptr = phys_to_machine(ptr); 1.430 - update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 1.431 - update_queue[idx].val = MMUEXT_PIN_L1_TABLE; 1.432 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.433 + per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr); 1.434 + per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND; 1.435 + per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L1_TABLE; 1.436 increment_index_and_flush(); 1.437 spin_unlock_irqrestore(&update_lock, flags); 1.438 } 1.439 1.440 void xen_pte_unpin(unsigned long ptr) 1.441 { 1.442 + int cpu = smp_processor_id(); 1.443 + int idx; 1.444 unsigned long flags; 1.445 spin_lock_irqsave(&update_lock, flags); 1.446 - update_queue[idx].ptr = phys_to_machine(ptr); 1.447 - update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 1.448 - update_queue[idx].val = MMUEXT_UNPIN_TABLE; 1.449 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.450 + per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr); 1.451 + per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND; 1.452 + per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE; 1.453 increment_index_and_flush(); 1.454 spin_unlock_irqrestore(&update_lock, flags); 1.455 } 1.456 1.457 void xen_set_ldt(unsigned long ptr, unsigned long len) 1.458 { 1.459 + int cpu = smp_processor_id(); 1.460 + int idx; 1.461 unsigned long flags; 1.462 spin_lock_irqsave(&update_lock, flags); 1.463 - update_queue[idx].ptr = MMU_EXTENDED_COMMAND | ptr; 1.464 - update_queue[idx].val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT); 1.465 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.466 + per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND | ptr; 1.467 + per_cpu(update_queue[idx], cpu).val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT); 1.468 increment_index_and_flush(); 1.469 spin_unlock_irqrestore(&update_lock, flags); 1.470 } 1.471 1.472 void xen_machphys_update(unsigned long mfn, unsigned long pfn) 1.473 { 1.474 + int cpu = smp_processor_id(); 1.475 + int idx; 1.476 unsigned long flags; 1.477 spin_lock_irqsave(&update_lock, flags); 1.478 - update_queue[idx].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; 1.479 - update_queue[idx].val = pfn; 1.480 + idx = per_cpu(mmu_update_queue_idx, cpu); 1.481 + per_cpu(update_queue[idx], cpu).ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; 1.482 + per_cpu(update_queue[idx], cpu).val = pfn; 1.483 increment_index_and_flush(); 1.484 spin_unlock_irqrestore(&update_lock, flags); 1.485 }
2.1 --- a/linux-2.6.9-xen-sparse/include/asm-xen/hypervisor.h Tue Nov 16 18:41:25 2004 +0000 2.2 +++ b/linux-2.6.9-xen-sparse/include/asm-xen/hypervisor.h Tue Nov 16 18:46:00 2004 +0000 2.3 @@ -74,8 +74,6 @@ void lgdt_finish(void); 2.4 * be MACHINE addresses. 2.5 */ 2.6 2.7 -extern unsigned int mmu_update_queue_idx; 2.8 - 2.9 void queue_l1_entry_update(pte_t *ptr, unsigned long val); 2.10 void queue_l2_entry_update(pmd_t *ptr, unsigned long val); 2.11 void queue_pt_switch(unsigned long ptr); 2.12 @@ -185,12 +183,11 @@ extern page_update_debug_t update_debug_ 2.13 #endif 2.14 2.15 void _flush_page_update_queue(void); 2.16 -static inline int flush_page_update_queue(void) 2.17 -{ 2.18 - unsigned int idx = mmu_update_queue_idx; 2.19 - if ( idx != 0 ) _flush_page_update_queue(); 2.20 - return idx; 2.21 -} 2.22 +#define flush_page_update_queue() do { \ 2.23 + DECLARE_PER_CPU(unsigned int, mmu_update_queue_idx); \ 2.24 + if (per_cpu(mmu_update_queue_idx, smp_processor_id())) \ 2.25 + _flush_page_update_queue(); \ 2.26 +} while (0) 2.27 #define xen_flush_page_update_queue() (_flush_page_update_queue()) 2.28 #define XEN_flush_page_update_queue() (_flush_page_update_queue()) 2.29 void MULTICALL_flush_page_update_queue(void);