xen-vtx-unstable
changeset 4870:efc62ecb53c6
bitkeeper revision 1.1389.15.22 (42834743b1YPCf2OAXmLFLLg0agiUA)
Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xeno.bk
into labyrinth.cl.cam.ac.uk:/auto/homes/maw48/xen-3.0-resources.bk
Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xeno.bk
into labyrinth.cl.cam.ac.uk:/auto/homes/maw48/xen-3.0-resources.bk
author | maw48@labyrinth.cl.cam.ac.uk |
---|---|
date | Thu May 12 12:08:35 2005 +0000 (2005-05-12) |
parents | 9cc14753366a 88c4b08c5b36 |
children | 6abbf2bf89c0 |
files | BitKeeper/etc/logging_ok linux-2.6.11-xen-sparse/drivers/xen/blkback/blkback.c linux-2.6.11-xen-sparse/drivers/xen/blkfront/blkfront.c tools/libxc/xc.h tools/libxc/xc_private.c tools/libxc/xc_ptrace_core.c xen/arch/x86/acpi/boot.c xen/arch/x86/shadow.c xen/arch/x86/traps.c xen/include/asm-x86/domain.h xen/include/asm-x86/shadow.h |
line diff
1.1 --- a/BitKeeper/etc/logging_ok Thu May 12 00:59:33 2005 +0000 1.2 +++ b/BitKeeper/etc/logging_ok Thu May 12 12:08:35 2005 +0000 1.3 @@ -80,6 +80,7 @@ rn@wyvis.camb.intel-research.net 1.4 rn@wyvis.research.intel-research.net 1.5 rneugeba@wyvis.research 1.6 rneugeba@wyvis.research.intel-research.net 1.7 +rusty@rustcorp.com.au 1.8 ryanh@us.ibm.com 1.9 sd386@font.cl.cam.ac.uk 1.10 shand@spidean.research.intel-research.net
2.1 --- a/linux-2.6.11-xen-sparse/drivers/xen/blkback/blkback.c Thu May 12 00:59:33 2005 +0000 2.2 +++ b/linux-2.6.11-xen-sparse/drivers/xen/blkback/blkback.c Thu May 12 12:08:35 2005 +0000 2.3 @@ -486,12 +486,11 @@ static void dispatch_rw_block_io(blkif_t 2.4 preq.nr_sects += seg[i].nsec; 2.5 2.6 aop[i].u.map_grant_ref.host_virt_addr = MMAP_VADDR(pending_idx, i); 2.7 - 2.8 aop[i].u.map_grant_ref.dom = blkif->domid; 2.9 aop[i].u.map_grant_ref.ref = blkif_gref_from_fas(fas); 2.10 - aop[i].u.map_grant_ref.flags = ( GNTMAP_host_map | 2.11 - ( ( operation == READ ) ? 2.12 - 0 : GNTMAP_readonly ) ); 2.13 + aop[i].u.map_grant_ref.flags = GNTMAP_host_map; 2.14 + if ( operation == WRITE ) 2.15 + aop[i].u.map_grant_ref.flags |= GNTMAP_readonly; 2.16 } 2.17 2.18 if ( unlikely(HYPERVISOR_grant_table_op(
3.1 --- a/linux-2.6.11-xen-sparse/drivers/xen/blkfront/blkfront.c Thu May 12 00:59:33 2005 +0000 3.2 +++ b/linux-2.6.11-xen-sparse/drivers/xen/blkfront/blkfront.c Thu May 12 12:08:35 2005 +0000 3.3 @@ -824,7 +824,7 @@ static int blkif_queue_request(unsigned 3.4 buffer_ma >> PAGE_SHIFT, 3.5 ( operation == BLKIF_OP_WRITE ? 1 : 0 ) ); 3.6 3.7 - blk_shadow[id].frame[req->nr_segments] = 3.8 + blk_shadow[req->id].frame[req->nr_segments] = 3.9 buffer_ma >> PAGE_SHIFT; 3.10 3.11 req->frame_and_sects[req->nr_segments] =
4.1 --- a/tools/libxc/xc.h Thu May 12 00:59:33 2005 +0000 4.2 +++ b/tools/libxc/xc.h Thu May 12 12:08:35 2005 +0000 4.3 @@ -421,7 +421,7 @@ int xc_msr_write(int xc_handle, int cpu_ 4.4 /** 4.5 * Memory maps a range within one domain to a local address range. Mappings 4.6 * should be unmapped with munmap and should follow the same rules as mmap 4.7 - * regarding page alignment. 4.8 + * regarding page alignment. Returns NULL on failure. 4.9 * 4.10 * In Linux, the ring queue for the control channel is accessible by mapping 4.11 * the shared_info_frame (from xc_domain_getinfo()) + 2048. The structure
5.1 --- a/tools/libxc/xc_private.c Thu May 12 00:59:33 2005 +0000 5.2 +++ b/tools/libxc/xc_private.c Thu May 12 12:08:35 2005 +0000 5.3 @@ -13,18 +13,18 @@ void *xc_map_foreign_batch(int xc_handle 5.4 privcmd_mmapbatch_t ioctlx; 5.5 void *addr; 5.6 addr = mmap(NULL, num*PAGE_SIZE, prot, MAP_SHARED, xc_handle, 0); 5.7 - if ( addr != NULL ) 5.8 + if ( addr == MAP_FAILED ) 5.9 + return NULL; 5.10 + 5.11 + ioctlx.num=num; 5.12 + ioctlx.dom=dom; 5.13 + ioctlx.addr=(unsigned long)addr; 5.14 + ioctlx.arr=arr; 5.15 + if ( ioctl( xc_handle, IOCTL_PRIVCMD_MMAPBATCH, &ioctlx ) < 0 ) 5.16 { 5.17 - ioctlx.num=num; 5.18 - ioctlx.dom=dom; 5.19 - ioctlx.addr=(unsigned long)addr; 5.20 - ioctlx.arr=arr; 5.21 - if ( ioctl( xc_handle, IOCTL_PRIVCMD_MMAPBATCH, &ioctlx ) < 0 ) 5.22 - { 5.23 - perror("XXXXXXXX"); 5.24 - munmap(addr, num*PAGE_SIZE); 5.25 - return 0; 5.26 - } 5.27 + perror("XXXXXXXX"); 5.28 + munmap(addr, num*PAGE_SIZE); 5.29 + return NULL; 5.30 } 5.31 return addr; 5.32 5.33 @@ -40,19 +40,19 @@ void *xc_map_foreign_range(int xc_handle 5.34 privcmd_mmap_entry_t entry; 5.35 void *addr; 5.36 addr = mmap(NULL, size, prot, MAP_SHARED, xc_handle, 0); 5.37 - if ( addr != NULL ) 5.38 + if ( addr == MAP_FAILED ) 5.39 + return NULL; 5.40 + 5.41 + ioctlx.num=1; 5.42 + ioctlx.dom=dom; 5.43 + ioctlx.entry=&entry; 5.44 + entry.va=(unsigned long) addr; 5.45 + entry.mfn=mfn; 5.46 + entry.npages=(size+PAGE_SIZE-1)>>PAGE_SHIFT; 5.47 + if ( ioctl( xc_handle, IOCTL_PRIVCMD_MMAP, &ioctlx ) < 0 ) 5.48 { 5.49 - ioctlx.num=1; 5.50 - ioctlx.dom=dom; 5.51 - ioctlx.entry=&entry; 5.52 - entry.va=(unsigned long) addr; 5.53 - entry.mfn=mfn; 5.54 - entry.npages=(size+PAGE_SIZE-1)>>PAGE_SHIFT; 5.55 - if ( ioctl( xc_handle, IOCTL_PRIVCMD_MMAP, &ioctlx ) < 0 ) 5.56 - { 5.57 - munmap(addr, size); 5.58 - return 0; 5.59 - } 5.60 + munmap(addr, size); 5.61 + return NULL; 5.62 } 5.63 return addr; 5.64 }
6.1 --- a/tools/libxc/xc_ptrace_core.c Thu May 12 00:59:33 2005 +0000 6.2 +++ b/tools/libxc/xc_ptrace_core.c Thu May 12 12:08:35 2005 +0000 6.3 @@ -107,6 +107,7 @@ map_domain_va(unsigned long domfd, int c 6.4 { 6.5 unsigned long pde, page; 6.6 unsigned long va = (unsigned long)guest_va; 6.7 + void *v; 6.8 6.9 static unsigned long cr3_phys[MAX_VIRT_CPUS]; 6.10 static unsigned long *cr3_virt[MAX_VIRT_CPUS]; 6.11 @@ -120,13 +121,15 @@ map_domain_va(unsigned long domfd, int c 6.12 cr3_phys[cpu] = cr3[cpu]; 6.13 if (cr3_virt[cpu]) 6.14 munmap(cr3_virt[cpu], PAGE_SIZE); 6.15 - if ((cr3_virt[cpu] = mmap(NULL, PAGE_SIZE, PROT_READ, 6.16 - MAP_PRIVATE, domfd, map_mtop_offset(cr3_phys[cpu]))) == 6.17 - (unsigned long*)0xffffffff) 6.18 + v = mmap( 6.19 + NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd, 6.20 + map_mtop_offset(cr3_phys[cpu])); 6.21 + if (v == MAP_FAILED) 6.22 { 6.23 perror("mmap failed"); 6.24 goto error_out; 6.25 } 6.26 + cr3_virt[cpu] = v; 6.27 } 6.28 if ((pde = cr3_virt[cpu][vtopdi(va)]) == 0) /* logical address */ 6.29 goto error_out; 6.30 @@ -137,9 +140,12 @@ map_domain_va(unsigned long domfd, int c 6.31 pde_phys[cpu] = pde; 6.32 if (pde_virt[cpu]) 6.33 munmap(pde_virt[cpu], PAGE_SIZE); 6.34 - if ((pde_virt[cpu] = mmap(NULL, PAGE_SIZE, PROT_READ, 6.35 - MAP_PRIVATE, domfd, map_mtop_offset(pde_phys[cpu]))) == NULL) 6.36 + v = mmap( 6.37 + NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd, 6.38 + map_mtop_offset(pde_phys[cpu])); 6.39 + if (v == MAP_FAILED) 6.40 goto error_out; 6.41 + pde_virt[cpu] = v; 6.42 } 6.43 if ((page = pde_virt[cpu][vtopti(va)]) == 0) /* logical address */ 6.44 goto error_out; 6.45 @@ -150,12 +156,15 @@ map_domain_va(unsigned long domfd, int c 6.46 page_phys[cpu] = page; 6.47 if (page_virt[cpu]) 6.48 munmap(page_virt[cpu], PAGE_SIZE); 6.49 - if ((page_virt[cpu] = mmap(NULL, PAGE_SIZE, PROT_READ, 6.50 - MAP_PRIVATE, domfd, map_mtop_offset(page_phys[cpu]))) == NULL) { 6.51 + v = mmap( 6.52 + NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd, 6.53 + map_mtop_offset(page_phys[cpu])); 6.54 + if (v == MAP_FAILED) { 6.55 printf("cr3 %lx pde %lx page %lx pti %lx\n", cr3[cpu], pde, page, vtopti(va)); 6.56 page_phys[cpu] = 0; 6.57 goto error_out; 6.58 } 6.59 + page_virt[cpu] = v; 6.60 } 6.61 return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK)); 6.62
7.1 --- a/xen/arch/x86/acpi/boot.c Thu May 12 00:59:33 2005 +0000 7.2 +++ b/xen/arch/x86/acpi/boot.c Thu May 12 12:08:35 2005 +0000 7.3 @@ -36,26 +36,13 @@ 7.4 #include <asm/io.h> 7.5 #include <asm/irq.h> 7.6 #include <asm/mpspec.h> 7.7 +#include <mach_apic.h> 7.8 +#include <mach_mpparse.h> 7.9 7.10 int sbf_port; 7.11 #define end_pfn_map max_page 7.12 #define CONFIG_ACPI_PCI 7.13 7.14 -#ifdef CONFIG_X86_64 7.15 - 7.16 -static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id) { } 7.17 -extern void __init clustered_apic_check(void); 7.18 -static inline int ioapic_setup_disabled(void) { return 0; } 7.19 - 7.20 -#else /* X86 */ 7.21 - 7.22 -#ifdef CONFIG_X86_LOCAL_APIC 7.23 -#include <mach_apic.h> 7.24 -#include <mach_mpparse.h> 7.25 -#endif /* CONFIG_X86_LOCAL_APIC */ 7.26 - 7.27 -#endif /* X86 */ 7.28 - 7.29 #define BAD_MADT_ENTRY(entry, end) ( \ 7.30 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ 7.31 ((acpi_table_entry_header *)entry)->length != sizeof(*entry))
8.1 --- a/xen/arch/x86/shadow.c Thu May 12 00:59:33 2005 +0000 8.2 +++ b/xen/arch/x86/shadow.c Thu May 12 12:08:35 2005 +0000 8.3 @@ -1217,7 +1217,7 @@ static int shadow_mode_table_op( 8.4 int i, rc = 0; 8.5 struct exec_domain *ed; 8.6 8.7 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 8.8 + ASSERT(shadow_lock_is_acquired(d)); 8.9 8.10 SH_VLOG("shadow mode table op %lx %lx count %d", 8.11 pagetable_val(d->exec_domain[0]->arch.guest_table), /* XXX SMP */ 8.12 @@ -1813,7 +1813,7 @@ shadow_mark_mfn_out_of_sync(struct exec_ 8.13 struct pfn_info *page = &frame_table[mfn]; 8.14 struct out_of_sync_entry *entry = shadow_alloc_oos_entry(d); 8.15 8.16 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 8.17 + ASSERT(shadow_lock_is_acquired(d)); 8.18 ASSERT(pfn_valid(mfn)); 8.19 8.20 #ifndef NDEBUG 8.21 @@ -1943,7 +1943,7 @@ int __shadow_out_of_sync(struct exec_dom 8.22 l2_pgentry_t l2e; 8.23 unsigned long l1pfn, l1mfn; 8.24 8.25 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 8.26 + ASSERT(shadow_lock_is_acquired(d)); 8.27 ASSERT(VALID_M2P(l2pfn)); 8.28 8.29 perfc_incrc(shadow_out_of_sync_calls); 8.30 @@ -2127,7 +2127,7 @@ int shadow_remove_all_write_access( 8.31 u32 found = 0, fixups, write_refs; 8.32 unsigned long prediction, predicted_gpfn, predicted_smfn; 8.33 8.34 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 8.35 + ASSERT(shadow_lock_is_acquired(d)); 8.36 ASSERT(VALID_MFN(readonly_gmfn)); 8.37 8.38 perfc_incrc(remove_write_access); 8.39 @@ -2245,7 +2245,7 @@ u32 shadow_remove_all_access(struct doma 8.40 if ( unlikely(!shadow_mode_enabled(d)) ) 8.41 return 0; 8.42 8.43 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 8.44 + ASSERT(shadow_lock_is_acquired(d)); 8.45 perfc_incrc(remove_all_access); 8.46 8.47 for (i = 0; i < shadow_ht_buckets; i++) 8.48 @@ -2287,7 +2287,7 @@ static int resync_all(struct domain *d, 8.49 int unshadow; 8.50 int changed; 8.51 8.52 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 8.53 + ASSERT(shadow_lock_is_acquired(d)); 8.54 8.55 for ( entry = d->arch.out_of_sync; entry; entry = entry->next) 8.56 { 8.57 @@ -2485,7 +2485,7 @@ void __shadow_sync_all(struct domain *d) 8.58 8.59 perfc_incrc(shadow_sync_all); 8.60 8.61 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 8.62 + ASSERT(shadow_lock_is_acquired(d)); 8.63 8.64 // First, remove all write permissions to the page tables 8.65 //
9.1 --- a/xen/arch/x86/traps.c Thu May 12 00:59:33 2005 +0000 9.2 +++ b/xen/arch/x86/traps.c Thu May 12 12:08:35 2005 +0000 9.3 @@ -433,10 +433,19 @@ static inline int admin_io_okay( 9.4 #define outl_user(_v, _p, _d, _r) \ 9.5 (admin_io_okay(_p, 4, _d, _r) ? outl(_v, _p) : ((void)0)) 9.6 9.7 +/* Propagate a fault back to the guest kernel. */ 9.8 +#define USER_READ_FAULT 4 /* user mode, read fault */ 9.9 +#define USER_WRITE_FAULT 6 /* user mode, write fault */ 9.10 +#define PAGE_FAULT(_faultaddr, _errcode) \ 9.11 +({ propagate_page_fault(_faultaddr, _errcode); \ 9.12 + return EXCRET_fault_fixed; \ 9.13 +}) 9.14 + 9.15 +/* Isntruction fetch with error handling. */ 9.16 #define insn_fetch(_type, _size, _ptr) \ 9.17 ({ unsigned long _x; \ 9.18 if ( get_user(_x, (_type *)eip) ) \ 9.19 - goto read_fault; \ 9.20 + PAGE_FAULT(eip, USER_READ_FAULT); \ 9.21 eip += _size; (_type)_x; }) 9.22 9.23 static int emulate_privileged_op(struct cpu_user_regs *regs) 9.24 @@ -502,17 +511,17 @@ static int emulate_privileged_op(struct 9.25 case 1: 9.26 data = (u8)inb_user((u16)regs->edx, ed, regs); 9.27 if ( put_user((u8)data, (u8 *)regs->edi) ) 9.28 - goto write_fault; 9.29 + PAGE_FAULT(regs->edi, USER_WRITE_FAULT); 9.30 break; 9.31 case 2: 9.32 data = (u16)inw_user((u16)regs->edx, ed, regs); 9.33 if ( put_user((u16)data, (u16 *)regs->edi) ) 9.34 - goto write_fault; 9.35 + PAGE_FAULT(regs->edi, USER_WRITE_FAULT); 9.36 break; 9.37 case 4: 9.38 data = (u32)inl_user((u16)regs->edx, ed, regs); 9.39 if ( put_user((u32)data, (u32 *)regs->edi) ) 9.40 - goto write_fault; 9.41 + PAGE_FAULT(regs->edi, USER_WRITE_FAULT); 9.42 break; 9.43 } 9.44 regs->edi += (regs->eflags & EF_DF) ? -op_bytes : op_bytes; 9.45 @@ -527,17 +536,17 @@ static int emulate_privileged_op(struct 9.46 { 9.47 case 1: 9.48 if ( get_user(data, (u8 *)regs->esi) ) 9.49 - goto read_fault; 9.50 + PAGE_FAULT(regs->esi, USER_READ_FAULT); 9.51 outb_user((u8)data, (u16)regs->edx, ed, regs); 9.52 break; 9.53 case 2: 9.54 if ( get_user(data, (u16 *)regs->esi) ) 9.55 - goto read_fault; 9.56 + PAGE_FAULT(regs->esi, USER_READ_FAULT); 9.57 outw_user((u16)data, (u16)regs->edx, ed, regs); 9.58 break; 9.59 case 4: 9.60 if ( get_user(data, (u32 *)regs->esi) ) 9.61 - goto read_fault; 9.62 + PAGE_FAULT(regs->esi, USER_READ_FAULT); 9.63 outl_user((u32)data, (u16)regs->edx, ed, regs); 9.64 break; 9.65 } 9.66 @@ -736,14 +745,6 @@ static int emulate_privileged_op(struct 9.67 9.68 fail: 9.69 return 0; 9.70 - 9.71 - read_fault: 9.72 - propagate_page_fault(eip, 4); /* user mode, read fault */ 9.73 - return EXCRET_fault_fixed; 9.74 - 9.75 - write_fault: 9.76 - propagate_page_fault(eip, 6); /* user mode, write fault */ 9.77 - return EXCRET_fault_fixed; 9.78 } 9.79 9.80 asmlinkage int do_general_protection(struct cpu_user_regs *regs)
10.1 --- a/xen/include/asm-x86/domain.h Thu May 12 00:59:33 2005 +0000 10.2 +++ b/xen/include/asm-x86/domain.h Thu May 12 12:08:35 2005 +0000 10.3 @@ -30,7 +30,7 @@ struct arch_domain 10.4 10.5 /* Shadow mode status and controls. */ 10.6 unsigned int shadow_mode; /* flags to control shadow table operation */ 10.7 - spinlock_t shadow_lock; 10.8 + unsigned int shadow_nest; /* Recursive depth of shadow_lock() nesting */ 10.9 /* Shadow mode has tainted page reference counts? */ 10.10 unsigned int shadow_tainted_refcnts; 10.11
11.1 --- a/xen/include/asm-x86/shadow.h Thu May 12 00:59:33 2005 +0000 11.2 +++ b/xen/include/asm-x86/shadow.h Thu May 12 12:08:35 2005 +0000 11.3 @@ -60,9 +60,45 @@ 11.4 #define __linear_hl2_table ((l1_pgentry_t *)(LINEAR_PT_VIRT_START + \ 11.5 (PERDOMAIN_VIRT_START >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT)))) 11.6 11.7 -#define shadow_lock_init(_d) spin_lock_init(&(_d)->arch.shadow_lock) 11.8 -#define shadow_lock(_d) do { ASSERT(!spin_is_locked(&(_d)->arch.shadow_lock)); spin_lock(&(_d)->arch.shadow_lock); } while (0) 11.9 -#define shadow_unlock(_d) spin_unlock(&(_d)->arch.shadow_lock) 11.10 +/* 11.11 + * For now we use the per-domain BIGLOCK rather than a shadow-specific lock. 11.12 + * We usually have the BIGLOCK already acquired anyway, so this is unlikely 11.13 + * to cause much unnecessary extra serialisation. Also it's a recursive 11.14 + * lock, and there are some code paths containing nested shadow_lock(). 11.15 + * The #if0'ed code below is therefore broken until such nesting is removed. 11.16 + */ 11.17 +#if 0 11.18 +#define shadow_lock_init(_d) \ 11.19 + spin_lock_init(&(_d)->arch.shadow_lock) 11.20 +#define shadow_lock_is_acquired(_d) \ 11.21 + spin_is_locked(&(_d)->arch.shadow_lock) 11.22 +#define shadow_lock(_d) \ 11.23 +do { \ 11.24 + ASSERT(!shadow_lock_is_acquired(_d)); \ 11.25 + spin_lock(&(_d)->arch.shadow_lock); \ 11.26 +} while (0) 11.27 +#define shadow_unlock(_d) \ 11.28 +do { \ 11.29 + ASSERT(!shadow_lock_is_acquired(_d)); \ 11.30 + spin_unlock(&(_d)->arch.shadow_lock); \ 11.31 +} while (0) 11.32 +#else 11.33 +#define shadow_lock_init(_d) \ 11.34 + ((_d)->arch.shadow_nest = 0) 11.35 +#define shadow_lock_is_acquired(_d) \ 11.36 + (spin_is_locked(&(_d)->big_lock) && ((_d)->arch.shadow_nest != 0)) 11.37 +#define shadow_lock(_d) \ 11.38 +do { \ 11.39 + LOCK_BIGLOCK(_d); \ 11.40 + (_d)->arch.shadow_nest++; \ 11.41 +} while (0) 11.42 +#define shadow_unlock(_d) \ 11.43 +do { \ 11.44 + ASSERT(shadow_lock_is_acquired(_d)); \ 11.45 + (_d)->arch.shadow_nest--; \ 11.46 + UNLOCK_BIGLOCK(_d); \ 11.47 +} while (0) 11.48 +#endif 11.49 11.50 #define SHADOW_ENCODE_MIN_MAX(_min, _max) ((((L1_PAGETABLE_ENTRIES - 1) - (_max)) << 16) | (_min)) 11.51 #define SHADOW_MIN(_encoded) ((_encoded) & ((1u<<16) - 1)) 11.52 @@ -403,7 +439,7 @@ static inline int __mark_dirty(struct do 11.53 unsigned long pfn; 11.54 int rc = 0; 11.55 11.56 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 11.57 + ASSERT(shadow_lock_is_acquired(d)); 11.58 ASSERT(d->arch.shadow_dirty_bitmap != NULL); 11.59 11.60 if ( !VALID_MFN(mfn) ) 11.61 @@ -1137,7 +1173,7 @@ static inline unsigned long __shadow_sta 11.62 ? __gpfn_to_mfn(d, gpfn) 11.63 : INVALID_MFN); 11.64 11.65 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 11.66 + ASSERT(shadow_lock_is_acquired(d)); 11.67 ASSERT(gpfn == (gpfn & PGT_mfn_mask)); 11.68 ASSERT(stype && !(stype & ~PGT_type_mask)); 11.69 11.70 @@ -1186,7 +1222,7 @@ shadow_max_pgtable_type(struct domain *d 11.71 struct shadow_status *x; 11.72 u32 pttype = PGT_none, type; 11.73 11.74 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 11.75 + ASSERT(shadow_lock_is_acquired(d)); 11.76 ASSERT(gpfn == (gpfn & PGT_mfn_mask)); 11.77 11.78 perfc_incrc(shadow_max_type); 11.79 @@ -1280,7 +1316,7 @@ static inline void delete_shadow_status( 11.80 struct shadow_status *p, *x, *n, *head; 11.81 unsigned long key = gpfn | stype; 11.82 11.83 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 11.84 + ASSERT(shadow_lock_is_acquired(d)); 11.85 ASSERT(!(gpfn & ~PGT_mfn_mask)); 11.86 ASSERT(stype && !(stype & ~PGT_type_mask)); 11.87 11.88 @@ -1362,7 +1398,7 @@ static inline void set_shadow_status( 11.89 11.90 SH_VVLOG("set gpfn=%lx gmfn=%lx smfn=%lx t=%lx", gpfn, gmfn, smfn, stype); 11.91 11.92 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 11.93 + ASSERT(shadow_lock_is_acquired(d)); 11.94 11.95 ASSERT(shadow_mode_translate(d) || gpfn); 11.96 ASSERT(!(gpfn & ~PGT_mfn_mask));