void restore_rest_processor_state(void)
{
- int cpu = smp_processor_id();
- struct tss_struct *t = &init_tss[cpu];
struct vcpu *v = current;
- /* Rewriting the TSS desc is necessary to clear the Busy flag. */
- set_tss_desc(cpu, t);
- load_TR(cpu);
+ load_TR();
#if defined(CONFIG_X86_64)
/* Recover syscall MSRs */
__set_intr_gate(n, 0, addr);
}
-void set_tss_desc(unsigned int n, void *addr)
+void load_TR(void)
{
+ struct tss_struct *tss = &init_tss[smp_processor_id()];
+ struct desc_ptr old_gdt, tss_gdt = {
+ .base = (long)(this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY),
+ .limit = LAST_RESERVED_GDT_BYTE
+ };
+
_set_tssldt_desc(
- per_cpu(gdt_table, n) + TSS_ENTRY - FIRST_RESERVED_GDT_ENTRY,
- (unsigned long)addr,
+ this_cpu(gdt_table) + TSS_ENTRY - FIRST_RESERVED_GDT_ENTRY,
+ (unsigned long)tss,
offsetof(struct tss_struct, __cacheline_filler) - 1,
9);
#ifdef CONFIG_COMPAT
_set_tssldt_desc(
- per_cpu(compat_gdt_table, n) + TSS_ENTRY - FIRST_RESERVED_GDT_ENTRY,
- (unsigned long)addr,
+ this_cpu(compat_gdt_table) + TSS_ENTRY - FIRST_RESERVED_GDT_ENTRY,
+ (unsigned long)tss,
offsetof(struct tss_struct, __cacheline_filler) - 1,
- 9);
+ 11);
#endif
+
+ /* Switch to non-compat GDT (which has B bit clear) to execute LTR. */
+ asm volatile (
+ "sgdt %1; lgdt %2; ltr %%ax; lgdt %1"
+ : : "a" (TSS_ENTRY << 3), "m" (old_gdt), "m" (tss_gdt) : "memory" );
}
void __devinit percpu_traps_init(void)
#ifndef __ASSEMBLY__
-#define load_TR(n) __asm__ __volatile__ ("ltr %%ax" : : "a" (TSS_ENTRY<<3) )
-
#if defined(__x86_64__)
#define GUEST_KERNEL_RPL(d) (is_pv_32bit_domain(d) ? 1 : 3)
#elif defined(__i386__)
#endif
extern void set_intr_gate(unsigned int irq, void * addr);
-extern void set_tss_desc(unsigned int n, void *addr);
+extern void load_TR(void);
#endif /* !__ASSEMBLY__ */