debuggers.hg
changeset 3349:c754bd0be650
bitkeeper revision 1.1159.1.496 (41c85faeMBUejFtICiJueb_Xdh8yJA)
Priv-op emulation in Xen, for RDMSR/WRMSR/WBINVD. Cleaned up Linux
a bit as a result.
Priv-op emulation in Xen, for RDMSR/WRMSR/WBINVD. Cleaned up Linux
a bit as a result.
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Tue Dec 21 17:38:54 2004 +0000 (2004-12-21) |
parents | 0d35b10c2fec |
children | c55cd4b21325 |
files | .rootkeys linux-2.4.28-xen-sparse/include/asm-xen/msr.h linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/msr.h patches/linux-2.6.9/drm.patch patches/linux-2.6.9/nettel.patch xen/arch/x86/traps.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_64/mm.c xen/common/schedule.c xen/common/trace.c xen/include/asm-x86/mm.h xen/include/asm-x86/shadow.h |
line diff
1.1 --- a/.rootkeys Tue Dec 21 14:22:26 2004 +0000 1.2 +++ b/.rootkeys Tue Dec 21 17:38:54 2004 +0000 1.3 @@ -98,7 +98,6 @@ 3e5a4e673p7PEOyHFm3nHkYX6HQYBg linux-2.4 1.4 40d70c240tW7TWArl1VUgIFH2nVO1A linux-2.4.28-xen-sparse/include/asm-xen/keyboard.h 1.5 3e5a4e678ddsQOpbSiRdy1GRcDc9WA linux-2.4.28-xen-sparse/include/asm-xen/mmu_context.h 1.6 40d06e5b2YWInUX1Xv9amVANwd_2Xg linux-2.4.28-xen-sparse/include/asm-xen/module.h 1.7 -3f8707e7ZmZ6TxyX0ZUEfvhA2Pb_xQ linux-2.4.28-xen-sparse/include/asm-xen/msr.h 1.8 3e5a4e67mnQfh-R8KcQCaVo2Oho6yg linux-2.4.28-xen-sparse/include/asm-xen/page.h 1.9 409ba2e7ZfV5hqTvIzxLtpClnxtIzg linux-2.4.28-xen-sparse/include/asm-xen/pci.h 1.10 3e5a4e67uTYU5oEnIDjxuaez8njjqg linux-2.4.28-xen-sparse/include/asm-xen/pgalloc.h 1.11 @@ -232,7 +231,6 @@ 40f5623aDMCsWOFO0jktZ4e8sjwvEg linux-2.6 1.12 40f5623arsFXkGdPvIqvFi3yFXGR0Q linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h 1.13 41811f07Iri9hrvs97t-baxmhOwWDQ linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/smpboot_hooks.h 1.14 4120f807GCO0uqsLqdZj9csxR1Wthw linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mmu_context.h 1.15 -40f5623aFTyFTR-vdiA-KaGxk5JOKQ linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/msr.h 1.16 40f5623adgjZq9nAgCt0IXdWl7udSA linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/page.h 1.17 40f5623a54NuG-7qHihGYmw4wWQnMA linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/param.h 1.18 41137cc1kkvg0cg7uxddcEfjL7L67w linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pci.h 1.19 @@ -306,8 +304,6 @@ 413cb3b5eKxnzoodEqaWn2wrPnHWnA netbsd-2. 1.20 413cb3b5F56TvQWAmO5TsuzhtzLFPQ netbsd-2.0-xen-sparse/sys/arch/xen/xen/xenkbc.c 1.21 413cb3b53nyOv1OIeDSsCXhBFDXvJA netbsd-2.0-xen-sparse/sys/nfs/files.nfs 1.22 413aa1d0oNP8HXLvfPuMe6cSroUfSA patches/linux-2.6.9/agpgart.patch 1.23 -413aa1d0ewvSv-ohnNnQQNGsbPTTNA patches/linux-2.6.9/drm.patch 1.24 -418abc69J3F638vPO9MYoDGeYilxoQ patches/linux-2.6.9/nettel.patch 1.25 40e1b09db5mN69Ijj0X_Eol-S7dXiw tools/Make.defs 1.26 3f776bd1Hy9rn69ntXBhPReUFw9IEA tools/Makefile 1.27 4124b307nRyK3dhn1hAsvrY76NuV3g tools/check/Makefile
2.1 --- a/linux-2.4.28-xen-sparse/include/asm-xen/msr.h Tue Dec 21 14:22:26 2004 +0000 2.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 2.3 @@ -1,138 +0,0 @@ 2.4 -#ifndef __ASM_MSR_H 2.5 -#define __ASM_MSR_H 2.6 - 2.7 -/* 2.8 - * Access to machine-specific registers (available on 586 and better only) 2.9 - * Note: the rd* operations modify the parameters directly (without using 2.10 - * pointer indirection), this allows gcc to optimize better 2.11 - */ 2.12 - 2.13 -#define rdmsr(msr,val1,val2) \ 2.14 -{ \ 2.15 - dom0_op_t op; \ 2.16 - op.cmd = DOM0_MSR; \ 2.17 - op.u.msr.write = 0; \ 2.18 - op.u.msr.msr = msr; \ 2.19 - op.u.msr.cpu_mask = (1 << current->processor); \ 2.20 - HYPERVISOR_dom0_op(&op); \ 2.21 - val1 = op.u.msr.out1; \ 2.22 - val2 = op.u.msr.out2; \ 2.23 -} 2.24 - 2.25 -#define wrmsr(msr,val1,val2) \ 2.26 -{ \ 2.27 - dom0_op_t op; \ 2.28 - op.cmd = DOM0_MSR; \ 2.29 - op.u.msr.write = 1; \ 2.30 - op.u.msr.cpu_mask = (1 << current->processor); \ 2.31 - op.u.msr.msr = msr; \ 2.32 - op.u.msr.in1 = val1; \ 2.33 - op.u.msr.in2 = val2; \ 2.34 - HYPERVISOR_dom0_op(&op); \ 2.35 -} 2.36 - 2.37 -#define rdtsc(low,high) \ 2.38 - __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) 2.39 - 2.40 -#define rdtscl(low) \ 2.41 - __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx") 2.42 - 2.43 -#define rdtscll(val) \ 2.44 - __asm__ __volatile__("rdtsc" : "=A" (val)) 2.45 - 2.46 -#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) 2.47 - 2.48 -#define rdpmc(counter,low,high) \ 2.49 - __asm__ __volatile__("rdpmc" \ 2.50 - : "=a" (low), "=d" (high) \ 2.51 - : "c" (counter)) 2.52 - 2.53 -/* symbolic names for some interesting MSRs */ 2.54 -/* Intel defined MSRs. */ 2.55 -#define MSR_IA32_P5_MC_ADDR 0 2.56 -#define MSR_IA32_P5_MC_TYPE 1 2.57 -#define MSR_IA32_PLATFORM_ID 0x17 2.58 -#define MSR_IA32_EBL_CR_POWERON 0x2a 2.59 - 2.60 -#define MSR_IA32_APICBASE 0x1b 2.61 -#define MSR_IA32_APICBASE_BSP (1<<8) 2.62 -#define MSR_IA32_APICBASE_ENABLE (1<<11) 2.63 -#define MSR_IA32_APICBASE_BASE (0xfffff<<12) 2.64 - 2.65 -#define MSR_IA32_UCODE_WRITE 0x79 2.66 -#define MSR_IA32_UCODE_REV 0x8b 2.67 - 2.68 -#define MSR_IA32_BBL_CR_CTL 0x119 2.69 - 2.70 -#define MSR_IA32_MCG_CAP 0x179 2.71 -#define MSR_IA32_MCG_STATUS 0x17a 2.72 -#define MSR_IA32_MCG_CTL 0x17b 2.73 - 2.74 -#define MSR_IA32_THERM_CONTROL 0x19a 2.75 -#define MSR_IA32_THERM_INTERRUPT 0x19b 2.76 -#define MSR_IA32_THERM_STATUS 0x19c 2.77 -#define MSR_IA32_MISC_ENABLE 0x1a0 2.78 - 2.79 -#define MSR_IA32_DEBUGCTLMSR 0x1d9 2.80 -#define MSR_IA32_LASTBRANCHFROMIP 0x1db 2.81 -#define MSR_IA32_LASTBRANCHTOIP 0x1dc 2.82 -#define MSR_IA32_LASTINTFROMIP 0x1dd 2.83 -#define MSR_IA32_LASTINTTOIP 0x1de 2.84 - 2.85 -#define MSR_IA32_MC0_CTL 0x400 2.86 -#define MSR_IA32_MC0_STATUS 0x401 2.87 -#define MSR_IA32_MC0_ADDR 0x402 2.88 -#define MSR_IA32_MC0_MISC 0x403 2.89 - 2.90 -#define MSR_P6_PERFCTR0 0xc1 2.91 -#define MSR_P6_PERFCTR1 0xc2 2.92 -#define MSR_P6_EVNTSEL0 0x186 2.93 -#define MSR_P6_EVNTSEL1 0x187 2.94 - 2.95 -#define MSR_IA32_PERF_STATUS 0x198 2.96 -#define MSR_IA32_PERF_CTL 0x199 2.97 - 2.98 -/* AMD Defined MSRs */ 2.99 -#define MSR_K6_EFER 0xC0000080 2.100 -#define MSR_K6_STAR 0xC0000081 2.101 -#define MSR_K6_WHCR 0xC0000082 2.102 -#define MSR_K6_UWCCR 0xC0000085 2.103 -#define MSR_K6_EPMR 0xC0000086 2.104 -#define MSR_K6_PSOR 0xC0000087 2.105 -#define MSR_K6_PFIR 0xC0000088 2.106 - 2.107 -#define MSR_K7_EVNTSEL0 0xC0010000 2.108 -#define MSR_K7_PERFCTR0 0xC0010004 2.109 -#define MSR_K7_HWCR 0xC0010015 2.110 -#define MSR_K7_CLK_CTL 0xC001001b 2.111 -#define MSR_K7_FID_VID_CTL 0xC0010041 2.112 -#define MSR_K7_VID_STATUS 0xC0010042 2.113 - 2.114 -/* Centaur-Hauls/IDT defined MSRs. */ 2.115 -#define MSR_IDT_FCR1 0x107 2.116 -#define MSR_IDT_FCR2 0x108 2.117 -#define MSR_IDT_FCR3 0x109 2.118 -#define MSR_IDT_FCR4 0x10a 2.119 - 2.120 -#define MSR_IDT_MCR0 0x110 2.121 -#define MSR_IDT_MCR1 0x111 2.122 -#define MSR_IDT_MCR2 0x112 2.123 -#define MSR_IDT_MCR3 0x113 2.124 -#define MSR_IDT_MCR4 0x114 2.125 -#define MSR_IDT_MCR5 0x115 2.126 -#define MSR_IDT_MCR6 0x116 2.127 -#define MSR_IDT_MCR7 0x117 2.128 -#define MSR_IDT_MCR_CTRL 0x120 2.129 - 2.130 -/* VIA Cyrix defined MSRs*/ 2.131 -#define MSR_VIA_FCR 0x1107 2.132 -#define MSR_VIA_LONGHAUL 0x110a 2.133 -#define MSR_VIA_BCR2 0x1147 2.134 - 2.135 -/* Transmeta defined MSRs */ 2.136 -#define MSR_TMTA_LONGRUN_CTRL 0x80868010 2.137 -#define MSR_TMTA_LONGRUN_FLAGS 0x80868011 2.138 -#define MSR_TMTA_LRTI_READOUT 0x80868018 2.139 -#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a 2.140 - 2.141 -#endif /* __ASM_MSR_H */
3.1 --- a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/msr.h Tue Dec 21 14:22:26 2004 +0000 3.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 3.3 @@ -1,273 +0,0 @@ 3.4 -#ifndef __ASM_MSR_H 3.5 -#define __ASM_MSR_H 3.6 - 3.7 -#include <asm-xen/hypervisor.h> 3.8 - 3.9 -/* 3.10 - * Access to machine-specific registers (available on 586 and better only) 3.11 - * Note: the rd* operations modify the parameters directly (without using 3.12 - * pointer indirection), this allows gcc to optimize better 3.13 - */ 3.14 - 3.15 -extern int get_smp_processor_id(void); 3.16 - 3.17 -#define rdmsr(_msr,_val1,_val2) do { \ 3.18 - dom0_op_t op; \ 3.19 - op.cmd = DOM0_MSR; \ 3.20 - op.u.msr.write = 0; \ 3.21 - op.u.msr.msr = (_msr); \ 3.22 - op.u.msr.cpu_mask = (1 << get_smp_processor_id()); \ 3.23 - HYPERVISOR_dom0_op(&op); \ 3.24 - (_val1) = op.u.msr.out1; \ 3.25 - (_val2) = op.u.msr.out2; \ 3.26 -} while(0) 3.27 - 3.28 -#define wrmsr(_msr,_val1,_val2) do { \ 3.29 - dom0_op_t op; \ 3.30 - op.cmd = DOM0_MSR; \ 3.31 - op.u.msr.write = 1; \ 3.32 - op.u.msr.cpu_mask = (1 << get_smp_processor_id()); \ 3.33 - op.u.msr.msr = (_msr); \ 3.34 - op.u.msr.in1 = (_val1); \ 3.35 - op.u.msr.in2 = (_val2); \ 3.36 - HYPERVISOR_dom0_op(&op); \ 3.37 -} while(0) 3.38 - 3.39 -#define rdmsrl(msr,val) do { \ 3.40 - unsigned long l__,h__; \ 3.41 - rdmsr (msr, l__, h__); \ 3.42 - val = l__; \ 3.43 - val |= ((u64)h__<<32); \ 3.44 -} while(0) 3.45 - 3.46 -static inline void wrmsrl (unsigned long msr, unsigned long long val) 3.47 -{ 3.48 - unsigned long lo, hi; 3.49 - lo = (unsigned long) val; 3.50 - hi = val >> 32; 3.51 - wrmsr (msr, lo, hi); 3.52 -} 3.53 - 3.54 -#define rdtsc(low,high) \ 3.55 - __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) 3.56 - 3.57 -#define rdtscl(low) \ 3.58 - __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx") 3.59 - 3.60 -#define rdtscll(val) \ 3.61 - __asm__ __volatile__("rdtsc" : "=A" (val)) 3.62 - 3.63 -#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) 3.64 - 3.65 -#define rdpmc(counter,low,high) \ 3.66 - __asm__ __volatile__("rdpmc" \ 3.67 - : "=a" (low), "=d" (high) \ 3.68 - : "c" (counter)) 3.69 - 3.70 -/* symbolic names for some interesting MSRs */ 3.71 -/* Intel defined MSRs. */ 3.72 -#define MSR_IA32_P5_MC_ADDR 0 3.73 -#define MSR_IA32_P5_MC_TYPE 1 3.74 -#define MSR_IA32_PLATFORM_ID 0x17 3.75 -#define MSR_IA32_EBL_CR_POWERON 0x2a 3.76 - 3.77 -#define MSR_IA32_APICBASE 0x1b 3.78 -#define MSR_IA32_APICBASE_BSP (1<<8) 3.79 -#define MSR_IA32_APICBASE_ENABLE (1<<11) 3.80 -#define MSR_IA32_APICBASE_BASE (0xfffff<<12) 3.81 - 3.82 -#define MSR_IA32_UCODE_WRITE 0x79 3.83 -#define MSR_IA32_UCODE_REV 0x8b 3.84 - 3.85 -#define MSR_P6_PERFCTR0 0xc1 3.86 -#define MSR_P6_PERFCTR1 0xc2 3.87 - 3.88 -#define MSR_IA32_BBL_CR_CTL 0x119 3.89 - 3.90 -#define MSR_IA32_SYSENTER_CS 0x174 3.91 -#define MSR_IA32_SYSENTER_ESP 0x175 3.92 -#define MSR_IA32_SYSENTER_EIP 0x176 3.93 - 3.94 -#define MSR_IA32_MCG_CAP 0x179 3.95 -#define MSR_IA32_MCG_STATUS 0x17a 3.96 -#define MSR_IA32_MCG_CTL 0x17b 3.97 - 3.98 -/* P4/Xeon+ specific */ 3.99 -#define MSR_IA32_MCG_EAX 0x180 3.100 -#define MSR_IA32_MCG_EBX 0x181 3.101 -#define MSR_IA32_MCG_ECX 0x182 3.102 -#define MSR_IA32_MCG_EDX 0x183 3.103 -#define MSR_IA32_MCG_ESI 0x184 3.104 -#define MSR_IA32_MCG_EDI 0x185 3.105 -#define MSR_IA32_MCG_EBP 0x186 3.106 -#define MSR_IA32_MCG_ESP 0x187 3.107 -#define MSR_IA32_MCG_EFLAGS 0x188 3.108 -#define MSR_IA32_MCG_EIP 0x189 3.109 -#define MSR_IA32_MCG_RESERVED 0x18A 3.110 - 3.111 -#define MSR_P6_EVNTSEL0 0x186 3.112 -#define MSR_P6_EVNTSEL1 0x187 3.113 - 3.114 -#define MSR_IA32_PERF_STATUS 0x198 3.115 -#define MSR_IA32_PERF_CTL 0x199 3.116 - 3.117 -#define MSR_IA32_THERM_CONTROL 0x19a 3.118 -#define MSR_IA32_THERM_INTERRUPT 0x19b 3.119 -#define MSR_IA32_THERM_STATUS 0x19c 3.120 -#define MSR_IA32_MISC_ENABLE 0x1a0 3.121 - 3.122 -#define MSR_IA32_DEBUGCTLMSR 0x1d9 3.123 -#define MSR_IA32_LASTBRANCHFROMIP 0x1db 3.124 -#define MSR_IA32_LASTBRANCHTOIP 0x1dc 3.125 -#define MSR_IA32_LASTINTFROMIP 0x1dd 3.126 -#define MSR_IA32_LASTINTTOIP 0x1de 3.127 - 3.128 -#define MSR_IA32_MC0_CTL 0x400 3.129 -#define MSR_IA32_MC0_STATUS 0x401 3.130 -#define MSR_IA32_MC0_ADDR 0x402 3.131 -#define MSR_IA32_MC0_MISC 0x403 3.132 - 3.133 -/* Pentium IV performance counter MSRs */ 3.134 -#define MSR_P4_BPU_PERFCTR0 0x300 3.135 -#define MSR_P4_BPU_PERFCTR1 0x301 3.136 -#define MSR_P4_BPU_PERFCTR2 0x302 3.137 -#define MSR_P4_BPU_PERFCTR3 0x303 3.138 -#define MSR_P4_MS_PERFCTR0 0x304 3.139 -#define MSR_P4_MS_PERFCTR1 0x305 3.140 -#define MSR_P4_MS_PERFCTR2 0x306 3.141 -#define MSR_P4_MS_PERFCTR3 0x307 3.142 -#define MSR_P4_FLAME_PERFCTR0 0x308 3.143 -#define MSR_P4_FLAME_PERFCTR1 0x309 3.144 -#define MSR_P4_FLAME_PERFCTR2 0x30a 3.145 -#define MSR_P4_FLAME_PERFCTR3 0x30b 3.146 -#define MSR_P4_IQ_PERFCTR0 0x30c 3.147 -#define MSR_P4_IQ_PERFCTR1 0x30d 3.148 -#define MSR_P4_IQ_PERFCTR2 0x30e 3.149 -#define MSR_P4_IQ_PERFCTR3 0x30f 3.150 -#define MSR_P4_IQ_PERFCTR4 0x310 3.151 -#define MSR_P4_IQ_PERFCTR5 0x311 3.152 -#define MSR_P4_BPU_CCCR0 0x360 3.153 -#define MSR_P4_BPU_CCCR1 0x361 3.154 -#define MSR_P4_BPU_CCCR2 0x362 3.155 -#define MSR_P4_BPU_CCCR3 0x363 3.156 -#define MSR_P4_MS_CCCR0 0x364 3.157 -#define MSR_P4_MS_CCCR1 0x365 3.158 -#define MSR_P4_MS_CCCR2 0x366 3.159 -#define MSR_P4_MS_CCCR3 0x367 3.160 -#define MSR_P4_FLAME_CCCR0 0x368 3.161 -#define MSR_P4_FLAME_CCCR1 0x369 3.162 -#define MSR_P4_FLAME_CCCR2 0x36a 3.163 -#define MSR_P4_FLAME_CCCR3 0x36b 3.164 -#define MSR_P4_IQ_CCCR0 0x36c 3.165 -#define MSR_P4_IQ_CCCR1 0x36d 3.166 -#define MSR_P4_IQ_CCCR2 0x36e 3.167 -#define MSR_P4_IQ_CCCR3 0x36f 3.168 -#define MSR_P4_IQ_CCCR4 0x370 3.169 -#define MSR_P4_IQ_CCCR5 0x371 3.170 -#define MSR_P4_ALF_ESCR0 0x3ca 3.171 -#define MSR_P4_ALF_ESCR1 0x3cb 3.172 -#define MSR_P4_BPU_ESCR0 0x3b2 3.173 -#define MSR_P4_BPU_ESCR1 0x3b3 3.174 -#define MSR_P4_BSU_ESCR0 0x3a0 3.175 -#define MSR_P4_BSU_ESCR1 0x3a1 3.176 -#define MSR_P4_CRU_ESCR0 0x3b8 3.177 -#define MSR_P4_CRU_ESCR1 0x3b9 3.178 -#define MSR_P4_CRU_ESCR2 0x3cc 3.179 -#define MSR_P4_CRU_ESCR3 0x3cd 3.180 -#define MSR_P4_CRU_ESCR4 0x3e0 3.181 -#define MSR_P4_CRU_ESCR5 0x3e1 3.182 -#define MSR_P4_DAC_ESCR0 0x3a8 3.183 -#define MSR_P4_DAC_ESCR1 0x3a9 3.184 -#define MSR_P4_FIRM_ESCR0 0x3a4 3.185 -#define MSR_P4_FIRM_ESCR1 0x3a5 3.186 -#define MSR_P4_FLAME_ESCR0 0x3a6 3.187 -#define MSR_P4_FLAME_ESCR1 0x3a7 3.188 -#define MSR_P4_FSB_ESCR0 0x3a2 3.189 -#define MSR_P4_FSB_ESCR1 0x3a3 3.190 -#define MSR_P4_IQ_ESCR0 0x3ba 3.191 -#define MSR_P4_IQ_ESCR1 0x3bb 3.192 -#define MSR_P4_IS_ESCR0 0x3b4 3.193 -#define MSR_P4_IS_ESCR1 0x3b5 3.194 -#define MSR_P4_ITLB_ESCR0 0x3b6 3.195 -#define MSR_P4_ITLB_ESCR1 0x3b7 3.196 -#define MSR_P4_IX_ESCR0 0x3c8 3.197 -#define MSR_P4_IX_ESCR1 0x3c9 3.198 -#define MSR_P4_MOB_ESCR0 0x3aa 3.199 -#define MSR_P4_MOB_ESCR1 0x3ab 3.200 -#define MSR_P4_MS_ESCR0 0x3c0 3.201 -#define MSR_P4_MS_ESCR1 0x3c1 3.202 -#define MSR_P4_PMH_ESCR0 0x3ac 3.203 -#define MSR_P4_PMH_ESCR1 0x3ad 3.204 -#define MSR_P4_RAT_ESCR0 0x3bc 3.205 -#define MSR_P4_RAT_ESCR1 0x3bd 3.206 -#define MSR_P4_SAAT_ESCR0 0x3ae 3.207 -#define MSR_P4_SAAT_ESCR1 0x3af 3.208 -#define MSR_P4_SSU_ESCR0 0x3be 3.209 -#define MSR_P4_SSU_ESCR1 0x3bf /* guess: not defined in manual */ 3.210 -#define MSR_P4_TBPU_ESCR0 0x3c2 3.211 -#define MSR_P4_TBPU_ESCR1 0x3c3 3.212 -#define MSR_P4_TC_ESCR0 0x3c4 3.213 -#define MSR_P4_TC_ESCR1 0x3c5 3.214 -#define MSR_P4_U2L_ESCR0 0x3b0 3.215 -#define MSR_P4_U2L_ESCR1 0x3b1 3.216 - 3.217 -/* AMD Defined MSRs */ 3.218 -#define MSR_K6_EFER 0xC0000080 3.219 -#define MSR_K6_STAR 0xC0000081 3.220 -#define MSR_K6_WHCR 0xC0000082 3.221 -#define MSR_K6_UWCCR 0xC0000085 3.222 -#define MSR_K6_EPMR 0xC0000086 3.223 -#define MSR_K6_PSOR 0xC0000087 3.224 -#define MSR_K6_PFIR 0xC0000088 3.225 - 3.226 -#define MSR_K7_EVNTSEL0 0xC0010000 3.227 -#define MSR_K7_EVNTSEL1 0xC0010001 3.228 -#define MSR_K7_EVNTSEL2 0xC0010002 3.229 -#define MSR_K7_EVNTSEL3 0xC0010003 3.230 -#define MSR_K7_PERFCTR0 0xC0010004 3.231 -#define MSR_K7_PERFCTR1 0xC0010005 3.232 -#define MSR_K7_PERFCTR2 0xC0010006 3.233 -#define MSR_K7_PERFCTR3 0xC0010007 3.234 -#define MSR_K7_HWCR 0xC0010015 3.235 -#define MSR_K7_CLK_CTL 0xC001001b 3.236 -#define MSR_K7_FID_VID_CTL 0xC0010041 3.237 -#define MSR_K7_FID_VID_STATUS 0xC0010042 3.238 - 3.239 -/* extended feature register */ 3.240 -#define MSR_EFER 0xc0000080 3.241 - 3.242 -/* EFER bits: */ 3.243 - 3.244 -/* Execute Disable enable */ 3.245 -#define _EFER_NX 11 3.246 -#define EFER_NX (1<<_EFER_NX) 3.247 - 3.248 -/* Centaur-Hauls/IDT defined MSRs. */ 3.249 -#define MSR_IDT_FCR1 0x107 3.250 -#define MSR_IDT_FCR2 0x108 3.251 -#define MSR_IDT_FCR3 0x109 3.252 -#define MSR_IDT_FCR4 0x10a 3.253 - 3.254 -#define MSR_IDT_MCR0 0x110 3.255 -#define MSR_IDT_MCR1 0x111 3.256 -#define MSR_IDT_MCR2 0x112 3.257 -#define MSR_IDT_MCR3 0x113 3.258 -#define MSR_IDT_MCR4 0x114 3.259 -#define MSR_IDT_MCR5 0x115 3.260 -#define MSR_IDT_MCR6 0x116 3.261 -#define MSR_IDT_MCR7 0x117 3.262 -#define MSR_IDT_MCR_CTRL 0x120 3.263 - 3.264 -/* VIA Cyrix defined MSRs*/ 3.265 -#define MSR_VIA_FCR 0x1107 3.266 -#define MSR_VIA_LONGHAUL 0x110a 3.267 -#define MSR_VIA_RNG 0x110b 3.268 -#define MSR_VIA_BCR2 0x1147 3.269 - 3.270 -/* Transmeta defined MSRs */ 3.271 -#define MSR_TMTA_LONGRUN_CTRL 0x80868010 3.272 -#define MSR_TMTA_LONGRUN_FLAGS 0x80868011 3.273 -#define MSR_TMTA_LRTI_READOUT 0x80868018 3.274 -#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a 3.275 - 3.276 -#endif /* __ASM_MSR_H */
4.1 --- a/patches/linux-2.6.9/drm.patch Tue Dec 21 14:22:26 2004 +0000 4.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 4.3 @@ -1,12 +0,0 @@ 4.4 -diff -ur linux-2.6.9/drivers/char/drm/ati_pcigart.h linux-2.6.9-new/drivers/char/drm/ati_pcigart.h 4.5 ---- linux-2.6.9/drivers/char/drm/ati_pcigart.h 2004-10-18 22:55:07.000000000 +0100 4.6 -+++ linux-2.6.9-new/drivers/char/drm/ati_pcigart.h 2004-11-28 19:42:41.000000000 +0000 4.7 -@@ -158,7 +158,7 @@ 4.8 - ret = 1; 4.9 - 4.10 - #if defined(__i386__) || defined(__x86_64__) 4.11 -- asm volatile ( "wbinvd" ::: "memory" ); 4.12 -+ wbinvd(); 4.13 - #else 4.14 - mb(); 4.15 - #endif
5.1 --- a/patches/linux-2.6.9/nettel.patch Tue Dec 21 14:22:26 2004 +0000 5.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 5.3 @@ -1,30 +0,0 @@ 5.4 -diff -ur linux-2.6.9/drivers/mtd/maps/nettel.c linux-2.6.9-new/drivers/mtd/maps/nettel.c 5.5 ---- linux-2.6.9/drivers/mtd/maps/nettel.c 2004-10-18 22:53:44.000000000 +0100 5.6 -+++ linux-2.6.9-new/drivers/mtd/maps/nettel.c 2004-11-28 19:45:35.000000000 +0000 5.7 -@@ -270,7 +270,7 @@ 5.8 - maxsize = AMD_WINDOW_MAXSIZE; 5.9 - 5.10 - *amdpar = SC520_PAR(SC520_PAR_BOOTCS, amdaddr, maxsize); 5.11 -- __asm__ ("wbinvd"); 5.12 -+ wbinvd(); 5.13 - 5.14 - nettel_amd_map.phys = amdaddr; 5.15 - nettel_amd_map.virt = (unsigned long) 5.16 -@@ -382,7 +382,7 @@ 5.17 - */ 5.18 - intel1addr = intel0addr + intel0size; 5.19 - *intel1par = SC520_PAR(intel1cs, intel1addr, maxsize); 5.20 -- __asm__ ("wbinvd"); 5.21 -+ wbinvd(); 5.22 - 5.23 - maxsize += intel0size; 5.24 - 5.25 -@@ -408,7 +408,7 @@ 5.26 - intel1size = intel_mtd->size - intel0size; 5.27 - if (intel1size > 0) { 5.28 - *intel1par = SC520_PAR(intel1cs, intel1addr, intel1size); 5.29 -- __asm__ ("wbinvd"); 5.30 -+ wbinvd(); 5.31 - } else { 5.32 - *intel1par = 0; 5.33 - }
6.1 --- a/xen/arch/x86/traps.c Tue Dec 21 14:22:26 2004 +0000 6.2 +++ b/xen/arch/x86/traps.c Tue Dec 21 17:38:54 2004 +0000 6.3 @@ -51,6 +51,7 @@ 6.4 #include <asm/uaccess.h> 6.5 #include <asm/i387.h> 6.6 #include <asm/debugger.h> 6.7 +#include <asm/msr.h> 6.8 6.9 #if defined(__i386__) 6.10 6.11 @@ -481,6 +482,49 @@ asmlinkage int do_page_fault(struct xen_ 6.12 return 0; 6.13 } 6.14 6.15 +static int emulate_privileged_op(struct xen_regs *regs) 6.16 +{ 6.17 + u16 opcode; 6.18 + 6.19 + if ( get_user(opcode, (u16 *)regs->eip) || ((opcode & 0xff) != 0x0f) ) 6.20 + return 0; 6.21 + 6.22 + switch ( opcode >> 8 ) 6.23 + { 6.24 + case 0x09: /* WBINVD */ 6.25 + if ( !IS_CAPABLE_PHYSDEV(current->domain) ) 6.26 + { 6.27 + DPRINTK("Non-physdev domain attempted WBINVD.\n"); 6.28 + return 0; 6.29 + } 6.30 + wbinvd(); 6.31 + regs->eip += 2; 6.32 + return 1; 6.33 + 6.34 + case 0x30: /* WRMSR */ 6.35 + if ( !IS_PRIV(current->domain) ) 6.36 + { 6.37 + DPRINTK("Non-priv domain attempted WRMSR.\n"); 6.38 + return 0; 6.39 + } 6.40 + wrmsr(regs->ecx, regs->eax, regs->edx); 6.41 + regs->eip += 2; 6.42 + return 1; 6.43 + 6.44 + case 0x32: /* RDMSR */ 6.45 + if ( !IS_PRIV(current->domain) ) 6.46 + { 6.47 + DPRINTK("Non-priv domain attempted RDMSR.\n"); 6.48 + return 0; 6.49 + } 6.50 + rdmsr(regs->ecx, regs->eax, regs->edx); 6.51 + regs->eip += 2; 6.52 + return 1; 6.53 + } 6.54 + 6.55 + return 0; 6.56 +} 6.57 + 6.58 asmlinkage int do_general_protection(struct xen_regs *regs) 6.59 { 6.60 struct exec_domain *ed = current; 6.61 @@ -529,6 +573,12 @@ asmlinkage int do_general_protection(str 6.62 } 6.63 } 6.64 6.65 + /* Emulate some simple privileged instructions when exec'ed in ring 1. */ 6.66 + if ( (regs->error_code == 0) && 6.67 + RING_1(regs) && 6.68 + emulate_privileged_op(regs) ) 6.69 + return 0; 6.70 + 6.71 #if defined(__i386__) 6.72 if ( VM_ASSIST(d, VMASST_TYPE_4gb_segments) && 6.73 (regs->error_code == 0) &&
7.1 --- a/xen/arch/x86/x86_32/mm.c Tue Dec 21 14:22:26 2004 +0000 7.2 +++ b/xen/arch/x86/x86_32/mm.c Tue Dec 21 17:38:54 2004 +0000 7.3 @@ -466,14 +466,4 @@ void memguard_unguard_range(void *p, uns 7.4 __memguard_change_range(p, l, 0); 7.5 } 7.6 7.7 -int memguard_is_guarded(void *p) 7.8 -{ 7.9 - l1_pgentry_t *l1; 7.10 - l2_pgentry_t *l2; 7.11 - unsigned long _p = (unsigned long)p; 7.12 - l2 = &idle_pg_table[l2_table_offset(_p)]; 7.13 - l1 = l2_pgentry_to_l1(*l2) + l1_table_offset(_p); 7.14 - return !(l1_pgentry_val(*l1) & _PAGE_PRESENT); 7.15 -} 7.16 - 7.17 #endif
8.1 --- a/xen/arch/x86/x86_64/mm.c Tue Dec 21 14:22:26 2004 +0000 8.2 +++ b/xen/arch/x86/x86_64/mm.c Tue Dec 21 17:38:54 2004 +0000 8.3 @@ -363,6 +363,14 @@ long do_update_descriptor( 8.4 8.5 #ifdef MEMORY_GUARD 8.6 8.7 +#if 1 8.8 + 8.9 +void *memguard_init(void *heap_start) { return heap_start; } 8.10 +void memguard_guard_range(void *p, unsigned long l) {} 8.11 +void memguard_unguard_range(void *p, unsigned long l) {} 8.12 + 8.13 +#else 8.14 + 8.15 void *memguard_init(void *heap_start) 8.16 { 8.17 l1_pgentry_t *l1; 8.18 @@ -425,14 +433,6 @@ void memguard_unguard_range(void *p, uns 8.19 __memguard_change_range(p, l, 0); 8.20 } 8.21 8.22 -int memguard_is_guarded(void *p) 8.23 -{ 8.24 - l1_pgentry_t *l1; 8.25 - l2_pgentry_t *l2; 8.26 - unsigned long _p = (unsigned long)p; 8.27 - l2 = &idle_pg_table[l2_table_offset(_p)]; 8.28 - l1 = l2_pgentry_to_l1(*l2) + l1_table_offset(_p); 8.29 - return !(l1_pgentry_val(*l1) & _PAGE_PRESENT); 8.30 -} 8.31 +#endif 8.32 8.33 #endif
9.1 --- a/xen/common/schedule.c Tue Dec 21 14:22:26 2004 +0000 9.2 +++ b/xen/common/schedule.c Tue Dec 21 17:38:54 2004 +0000 9.3 @@ -224,24 +224,24 @@ void domain_sleep(struct exec_domain *d) 9.4 } 9.5 } 9.6 9.7 -void domain_wake(struct exec_domain *d) 9.8 +void domain_wake(struct exec_domain *ed) 9.9 { 9.10 unsigned long flags; 9.11 9.12 - spin_lock_irqsave(&schedule_data[d->processor].schedule_lock, flags); 9.13 + spin_lock_irqsave(&schedule_data[ed->processor].schedule_lock, flags); 9.14 9.15 - if ( likely(domain_runnable(d)) ) 9.16 + if ( likely(domain_runnable(ed)) ) 9.17 { 9.18 - TRACE_2D(TRC_SCHED_WAKE, d->id, d); 9.19 - SCHED_OP(wake, d); 9.20 + TRACE_2D(TRC_SCHED_WAKE, ed->domain->id, ed); 9.21 + SCHED_OP(wake, ed); 9.22 #ifdef WAKE_HISTO 9.23 - d->wokenup = NOW(); 9.24 + ed->wokenup = NOW(); 9.25 #endif 9.26 } 9.27 9.28 - clear_bit(EDF_MIGRATED, &d->ed_flags); 9.29 + clear_bit(EDF_MIGRATED, &ed->ed_flags); 9.30 9.31 - spin_unlock_irqrestore(&schedule_data[d->processor].schedule_lock, flags); 9.32 + spin_unlock_irqrestore(&schedule_data[ed->processor].schedule_lock, flags); 9.33 } 9.34 9.35 /* Block the currently-executing domain until a pertinent event occurs. */ 9.36 @@ -250,7 +250,7 @@ long do_block(void) 9.37 ASSERT(current->domain->id != IDLE_DOMAIN_ID); 9.38 current->vcpu_info->evtchn_upcall_mask = 0; 9.39 set_bit(EDF_BLOCKED, ¤t->ed_flags); 9.40 - TRACE_2D(TRC_SCHED_BLOCK, current->id, current); 9.41 + TRACE_2D(TRC_SCHED_BLOCK, current->domain->id, current); 9.42 __enter_scheduler(); 9.43 return 0; 9.44 } 9.45 @@ -258,7 +258,7 @@ long do_block(void) 9.46 /* Voluntarily yield the processor for this allocation. */ 9.47 static long do_yield(void) 9.48 { 9.49 - TRACE_2D(TRC_SCHED_YIELD, current->id, current); 9.50 + TRACE_2D(TRC_SCHED_YIELD, current->domain->id, current); 9.51 __enter_scheduler(); 9.52 return 0; 9.53 } 9.54 @@ -447,7 +447,7 @@ void __enter_scheduler(void) 9.55 } 9.56 #endif 9.57 9.58 - TRACE_2D(TRC_SCHED_SWITCH, next->id, next); 9.59 + TRACE_2D(TRC_SCHED_SWITCH, next->domain->id, next); 9.60 9.61 switch_to(prev, next); 9.62
10.1 --- a/xen/common/trace.c Tue Dec 21 14:22:26 2004 +0000 10.2 +++ b/xen/common/trace.c Tue Dec 21 17:38:54 2004 +0000 10.3 @@ -28,6 +28,8 @@ 10.4 #include <asm/atomic.h> 10.5 #include <public/dom0_ops.h> 10.6 10.7 +extern unsigned int opt_tbuf_size; 10.8 + 10.9 /* Pointers to the meta-data objects for all system trace buffers */ 10.10 struct t_buf *t_bufs[NR_CPUS]; 10.11 10.12 @@ -43,7 +45,6 @@ int tb_init_done = 0; 10.13 */ 10.14 void init_trace_bufs(void) 10.15 { 10.16 - extern int opt_tbuf_size; 10.17 int i, order; 10.18 unsigned long nr_pages; 10.19 char *rawbuf; 10.20 @@ -102,10 +103,8 @@ void init_trace_bufs(void) 10.21 */ 10.22 int get_tb_info(dom0_gettbufs_t *st) 10.23 { 10.24 - if(tb_init_done) 10.25 + if ( tb_init_done ) 10.26 { 10.27 - extern unsigned int opt_tbuf_size; 10.28 - 10.29 st->mach_addr = __pa(t_bufs[0]); 10.30 st->size = opt_tbuf_size * PAGE_SIZE; 10.31
11.1 --- a/xen/include/asm-x86/mm.h Tue Dec 21 14:22:26 2004 +0000 11.2 +++ b/xen/include/asm-x86/mm.h Tue Dec 21 17:38:54 2004 +0000 11.3 @@ -237,12 +237,10 @@ extern unsigned long *phys_to_machine_ma 11.4 void *memguard_init(void *heap_start); 11.5 void memguard_guard_range(void *p, unsigned long l); 11.6 void memguard_unguard_range(void *p, unsigned long l); 11.7 -int memguard_is_guarded(void *p); 11.8 #else 11.9 #define memguard_init(_s) (_s) 11.10 #define memguard_guard_range(_p,_l) ((void)0) 11.11 #define memguard_unguard_range(_p,_l) ((void)0) 11.12 -#define memguard_is_guarded(_p) (0) 11.13 #endif 11.14 11.15
12.1 --- a/xen/include/asm-x86/shadow.h Tue Dec 21 14:22:26 2004 +0000 12.2 +++ b/xen/include/asm-x86/shadow.h Tue Dec 21 17:38:54 2004 +0000 12.3 @@ -186,15 +186,12 @@ static inline int __mark_dirty( struct m 12.4 #ifndef NDEBUG 12.5 else if ( mfn < max_page ) 12.6 { 12.7 - unsigned long *esp; 12.8 SH_LOG("mark_dirty OOR! mfn=%x pfn=%lx max=%x (mm %p)", 12.9 mfn, pfn, m->shadow_dirty_bitmap_size, m ); 12.10 SH_LOG("dom=%p caf=%08x taf=%08x\n", 12.11 frame_table[mfn].u.inuse.domain, 12.12 frame_table[mfn].count_info, 12.13 frame_table[mfn].u.inuse.type_info ); 12.14 - __asm__ __volatile__ ("movl %%esp,%0" : "=r" (esp) : ); 12.15 - show_trace(esp); 12.16 } 12.17 #endif 12.18