debuggers.hg

changeset 19962:7406764457a0

x86: move init_tss into per-CPU space

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Jul 13 11:31:08 2009 +0100 (2009-07-13)
parents 895695d91ec1
children ed76e4bbea83
files xen/arch/x86/acpi/suspend.c xen/arch/x86/cpu/common.c xen/arch/x86/domain.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/setup.c xen/arch/x86/traps.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_32/supervisor_mode_kernel.S xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/traps.c xen/include/asm-x86/processor.h
line diff
     1.1 --- a/xen/arch/x86/acpi/suspend.c	Mon Jul 13 11:19:31 2009 +0100
     1.2 +++ b/xen/arch/x86/acpi/suspend.c	Mon Jul 13 11:31:08 2009 +0100
     1.3 @@ -57,7 +57,7 @@ void restore_rest_processor_state(void)
     1.4      }
     1.5  #else /* !defined(CONFIG_X86_64) */
     1.6      if ( supervisor_mode_kernel && cpu_has_sep )
     1.7 -        wrmsr(MSR_IA32_SYSENTER_ESP, &init_tss[smp_processor_id()].esp1, 0);
     1.8 +        wrmsr(MSR_IA32_SYSENTER_ESP, &this_cpu(init_tss).esp1, 0);
     1.9  #endif
    1.10  
    1.11      /* Maybe load the debug registers. */
     2.1 --- a/xen/arch/x86/cpu/common.c	Mon Jul 13 11:19:31 2009 +0100
     2.2 +++ b/xen/arch/x86/cpu/common.c	Mon Jul 13 11:31:08 2009 +0100
     2.3 @@ -576,7 +576,7 @@ void __init early_cpu_init(void)
     2.4  void __cpuinit cpu_init(void)
     2.5  {
     2.6  	int cpu = smp_processor_id();
     2.7 -	struct tss_struct *t = &init_tss[cpu];
     2.8 +	struct tss_struct *t = &this_cpu(init_tss);
     2.9  	struct desc_ptr gdt_desc = {
    2.10  		.base = (unsigned long)(this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY),
    2.11  		.limit = LAST_RESERVED_GDT_BYTE
     3.1 --- a/xen/arch/x86/domain.c	Mon Jul 13 11:19:31 2009 +0100
     3.2 +++ b/xen/arch/x86/domain.c	Mon Jul 13 11:31:08 2009 +0100
     3.3 @@ -1223,7 +1223,7 @@ static void save_segments(struct vcpu *v
     3.4  
     3.5  static inline void switch_kernel_stack(struct vcpu *v)
     3.6  {
     3.7 -    struct tss_struct *tss = &init_tss[smp_processor_id()];
     3.8 +    struct tss_struct *tss = &this_cpu(init_tss);
     3.9      tss->esp1 = v->arch.guest_context.kernel_sp;
    3.10      tss->ss1  = v->arch.guest_context.kernel_ss;
    3.11  }
     4.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Mon Jul 13 11:19:31 2009 +0100
     4.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Mon Jul 13 11:31:08 2009 +0100
     4.3 @@ -502,7 +502,7 @@ static void vmx_set_host_env(struct vcpu
     4.4      __vmwrite(HOST_IDTR_BASE, (unsigned long)idt_tables[cpu]);
     4.5  
     4.6      __vmwrite(HOST_TR_SELECTOR, TSS_ENTRY << 3);
     4.7 -    __vmwrite(HOST_TR_BASE, (unsigned long)&init_tss[cpu]);
     4.8 +    __vmwrite(HOST_TR_BASE, (unsigned long)&per_cpu(init_tss, cpu));
     4.9  
    4.10      __vmwrite(HOST_SYSENTER_ESP, get_stack_bottom());
    4.11  
     5.1 --- a/xen/arch/x86/setup.c	Mon Jul 13 11:19:31 2009 +0100
     5.2 +++ b/xen/arch/x86/setup.c	Mon Jul 13 11:31:08 2009 +0100
     5.3 @@ -117,7 +117,7 @@ DEFINE_PER_CPU(struct desc_struct *, com
     5.4      = boot_cpu_compat_gdt_table;
     5.5  #endif
     5.6  
     5.7 -struct tss_struct init_tss[NR_CPUS];
     5.8 +DEFINE_PER_CPU(struct tss_struct, init_tss);
     5.9  
    5.10  char __attribute__ ((__section__(".bss.stack_aligned"))) cpu0_stack[STACK_SIZE];
    5.11  
     6.1 --- a/xen/arch/x86/traps.c	Mon Jul 13 11:19:31 2009 +0100
     6.2 +++ b/xen/arch/x86/traps.c	Mon Jul 13 11:31:08 2009 +0100
     6.3 @@ -326,7 +326,7 @@ void show_stack_overflow(unsigned int cp
     6.4  
     6.5      printk("Valid stack range: %p-%p, sp=%p, tss.esp0=%p\n",
     6.6             (void *)esp_top, (void *)esp_bottom, (void *)esp,
     6.7 -           (void *)init_tss[cpu].esp0);
     6.8 +           (void *)per_cpu(init_tss, cpu).esp0);
     6.9  
    6.10      /* Trigger overflow trace if %esp is within 512 bytes of the guard page. */
    6.11      if ( ((unsigned long)(esp - esp_top) > 512) &&
    6.12 @@ -3066,7 +3066,7 @@ void set_intr_gate(unsigned int n, void 
    6.13  
    6.14  void load_TR(void)
    6.15  {
    6.16 -    struct tss_struct *tss = &init_tss[smp_processor_id()];
    6.17 +    struct tss_struct *tss = &this_cpu(init_tss);
    6.18      struct desc_ptr old_gdt, tss_gdt = {
    6.19          .base = (long)(this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY),
    6.20          .limit = LAST_RESERVED_GDT_BYTE
     7.1 --- a/xen/arch/x86/x86_32/mm.c	Mon Jul 13 11:19:31 2009 +0100
     7.2 +++ b/xen/arch/x86/x86_32/mm.c	Mon Jul 13 11:31:08 2009 +0100
     7.3 @@ -227,8 +227,7 @@ long subarch_memory_op(int op, XEN_GUEST
     7.4  
     7.5  long do_stack_switch(unsigned long ss, unsigned long esp)
     7.6  {
     7.7 -    int nr = smp_processor_id();
     7.8 -    struct tss_struct *t = &init_tss[nr];
     7.9 +    struct tss_struct *t = &this_cpu(init_tss);
    7.10  
    7.11      fixup_guest_stack_selector(current->domain, ss);
    7.12  
     8.1 --- a/xen/arch/x86/x86_32/supervisor_mode_kernel.S	Mon Jul 13 11:19:31 2009 +0100
     8.2 +++ b/xen/arch/x86/x86_32/supervisor_mode_kernel.S	Mon Jul 13 11:31:08 2009 +0100
     8.3 @@ -102,8 +102,8 @@ ENTRY(fixup_ring0_guest_stack)
     8.4  
     8.5          movl  $PER_CPU_GDT_ENTRY*8,%ecx
     8.6          lsll  %ecx,%ecx
     8.7 -        shll  $7,%ecx                                   # Each TSS entry is 0x80 bytes
     8.8 -        addl  $init_tss,%ecx
     8.9 +        shll  $PERCPU_SHIFT,%ecx
    8.10 +        addl  $per_cpu__init_tss,%ecx
    8.11  
    8.12          # Load Xen stack from TSS.
    8.13          movw  TSS_ss0(%ecx),%ax
     9.1 --- a/xen/arch/x86/x86_32/traps.c	Mon Jul 13 11:19:31 2009 +0100
     9.2 +++ b/xen/arch/x86/x86_32/traps.c	Mon Jul 13 11:31:08 2009 +0100
     9.3 @@ -204,7 +204,7 @@ asmlinkage void do_double_fault(void)
     9.4      asm ( "lsll %1, %0" : "=r" (cpu) : "rm" (PER_CPU_GDT_ENTRY << 3) );
     9.5  
     9.6      /* Find information saved during fault and dump it to the console. */
     9.7 -    tss = &init_tss[cpu];
     9.8 +    tss = &per_cpu(init_tss, cpu);
     9.9      printk("*** DOUBLE FAULT ***\n");
    9.10      print_xen_info();
    9.11      printk("CPU:    %d\nEIP:    %04x:[<%08x>]",
    10.1 --- a/xen/arch/x86/x86_64/traps.c	Mon Jul 13 11:19:31 2009 +0100
    10.2 +++ b/xen/arch/x86/x86_64/traps.c	Mon Jul 13 11:31:08 2009 +0100
    10.3 @@ -433,13 +433,13 @@ void __devinit subarch_percpu_traps_init
    10.4      BUILD_BUG_ON((IST_MAX + 2) * PAGE_SIZE + PRIMARY_STACK_SIZE > STACK_SIZE);
    10.5  
    10.6      /* Machine Check handler has its own per-CPU 4kB stack. */
    10.7 -    init_tss[cpu].ist[IST_MCE] = (unsigned long)&stack[IST_MCE * PAGE_SIZE];
    10.8 +    this_cpu(init_tss).ist[IST_MCE] = (unsigned long)&stack[IST_MCE * PAGE_SIZE];
    10.9  
   10.10      /* Double-fault handler has its own per-CPU 4kB stack. */
   10.11 -    init_tss[cpu].ist[IST_DF] = (unsigned long)&stack[IST_DF * PAGE_SIZE];
   10.12 +    this_cpu(init_tss).ist[IST_DF] = (unsigned long)&stack[IST_DF * PAGE_SIZE];
   10.13  
   10.14      /* NMI handler has its own per-CPU 4kB stack. */
   10.15 -    init_tss[cpu].ist[IST_NMI] = (unsigned long)&stack[IST_NMI * PAGE_SIZE];
   10.16 +    this_cpu(init_tss).ist[IST_NMI] = (unsigned long)&stack[IST_NMI * PAGE_SIZE];
   10.17  
   10.18      /* Trampoline for SYSCALL entry from long mode. */
   10.19      stack = &stack[IST_MAX * PAGE_SIZE]; /* Skip the IST stacks. */
    11.1 --- a/xen/include/asm-x86/processor.h	Mon Jul 13 11:19:31 2009 +0100
    11.2 +++ b/xen/include/asm-x86/processor.h	Mon Jul 13 11:31:08 2009 +0100
    11.3 @@ -456,7 +456,7 @@ struct tss_struct {
    11.4  extern idt_entry_t idt_table[];
    11.5  extern idt_entry_t *idt_tables[];
    11.6  
    11.7 -extern struct tss_struct init_tss[NR_CPUS];
    11.8 +DECLARE_PER_CPU(struct tss_struct, init_tss);
    11.9  
   11.10  extern void init_int80_direct_trap(struct vcpu *v);
   11.11