debuggers.hg

diff xen/arch/x86/x86_64/mm.c @ 3632:fec8b1778268

bitkeeper revision 1.1159.212.60 (41febc4bKKSkh9u-Zes9v2CmBuLZxA)

More bootstrap fixes for x86/64. Next thing to do is sort out the IDT and
get traps.c working; then we can get rid of a bunch of dummy labels from
end of boot/x86_64.S. We're also going to need some kind of entry.S before
we can safely enable interrupts. Also bear in mind that not all of physical
RAM may be mapped (only first 1GB) and no m2p table is yet allocated or
mapped. Plenty to be done!
author kaf24@viper.(none)
date Mon Jan 31 23:16:27 2005 +0000 (2005-01-31)
parents c754bd0be650
children d55d523078f7
line diff
     1.1 --- a/xen/arch/x86/x86_64/mm.c	Sat Jan 29 22:42:20 2005 +0000
     1.2 +++ b/xen/arch/x86/x86_64/mm.c	Mon Jan 31 23:16:27 2005 +0000
     1.3 @@ -27,39 +27,114 @@
     1.4  #include <asm/fixmap.h>
     1.5  #include <asm/domain_page.h>
     1.6  
     1.7 -static inline void set_pte_phys(unsigned long vaddr,
     1.8 -                                l1_pgentry_t entry)
     1.9 -{
    1.10 -    l4_pgentry_t *l4ent;
    1.11 -    l3_pgentry_t *l3ent;
    1.12 -    l2_pgentry_t *l2ent;
    1.13 -    l1_pgentry_t *l1ent;
    1.14 +unsigned long m2p_start_mfn; /* XXX Kill this (in 32-bit code also). */
    1.15  
    1.16 -    l4ent = &idle_pg_table[l4_table_offset(vaddr)];
    1.17 -    l3ent = l4_pgentry_to_l3(*l4ent) + l3_table_offset(vaddr);
    1.18 -    l2ent = l3_pgentry_to_l2(*l3ent) + l2_table_offset(vaddr);
    1.19 -    l1ent = l2_pgentry_to_l1(*l2ent) + l1_table_offset(vaddr);
    1.20 -    *l1ent = entry;
    1.21 -
    1.22 -    /* It's enough to flush this one mapping. */
    1.23 -    __flush_tlb_one(vaddr);
    1.24 +void *safe_page_alloc(void)
    1.25 +{
    1.26 +    extern int early_boot;
    1.27 +    if ( early_boot )
    1.28 +        return __va(alloc_boot_pages(PAGE_SIZE, PAGE_SIZE));
    1.29 +    return (void *)alloc_xenheap_page();
    1.30  }
    1.31  
    1.32 -
    1.33 -void __set_fixmap(enum fixed_addresses idx, 
    1.34 -                  l1_pgentry_t entry)
    1.35 +/* Map physical byte range (@p, @p+@s) at virt address @v in pagetable @pt. */
    1.36 +int map_pages(
    1.37 +    pagetable_t *pt,
    1.38 +    unsigned long v,
    1.39 +    unsigned long p,
    1.40 +    unsigned long s,
    1.41 +    unsigned long flags)
    1.42  {
    1.43 -    unsigned long address = fix_to_virt(idx);
    1.44 +    l4_pgentry_t *pl4e;
    1.45 +    l3_pgentry_t *pl3e;
    1.46 +    l2_pgentry_t *pl2e;
    1.47 +    l1_pgentry_t *pl1e;
    1.48 +    void         *newpg;
    1.49 +
    1.50 +    while ( s != 0 )
    1.51 +    {
    1.52 +        pl4e = &pt[l4_table_offset(v)];
    1.53 +        if ( !(l4_pgentry_val(*pl4e) & _PAGE_PRESENT) )
    1.54 +        {
    1.55 +            newpg = safe_page_alloc();
    1.56 +            clear_page(newpg);
    1.57 +            *pl4e = mk_l4_pgentry(__pa(newpg) | __PAGE_HYPERVISOR);
    1.58 +        }
    1.59 +
    1.60 +        pl3e = l4_pgentry_to_l3(*pl4e) + l3_table_offset(v);
    1.61 +        if ( !(l3_pgentry_val(*pl3e) & _PAGE_PRESENT) )
    1.62 +        {
    1.63 +            newpg = safe_page_alloc();
    1.64 +            clear_page(newpg);
    1.65 +            *pl3e = mk_l3_pgentry(__pa(newpg) | __PAGE_HYPERVISOR);
    1.66 +        }
    1.67 +
    1.68 +        pl2e = l3_pgentry_to_l2(*pl3e) + l2_table_offset(v);
    1.69 +
    1.70 +        if ( ((s|v|p) & ((1<<L2_PAGETABLE_SHIFT)-1)) == 0 )
    1.71 +        {
    1.72 +            /* Super-page mapping. */
    1.73 +            if ( (l2_pgentry_val(*pl2e) & _PAGE_PRESENT) )
    1.74 +                __flush_tlb_pge();
    1.75 +            *pl2e = mk_l2_pgentry(p|flags|_PAGE_PSE);
    1.76  
    1.77 -    if ( likely(idx < __end_of_fixed_addresses) )
    1.78 -        set_pte_phys(address, entry);
    1.79 -    else
    1.80 -        printk("Invalid __set_fixmap\n");
    1.81 +            v += 1 << L2_PAGETABLE_SHIFT;
    1.82 +            p += 1 << L2_PAGETABLE_SHIFT;
    1.83 +            s -= 1 << L2_PAGETABLE_SHIFT;
    1.84 +        }
    1.85 +        else
    1.86 +        {
    1.87 +            /* Normal page mapping. */
    1.88 +            if ( !(l2_pgentry_val(*pl2e) & _PAGE_PRESENT) )
    1.89 +            {
    1.90 +                newpg = safe_page_alloc();
    1.91 +                clear_page(newpg);
    1.92 +                *pl2e = mk_l2_pgentry(__pa(newpg) | __PAGE_HYPERVISOR);
    1.93 +            }
    1.94 +            pl1e = l2_pgentry_to_l1(*pl2e) + l1_table_offset(v);
    1.95 +            if ( (l1_pgentry_val(*pl1e) & _PAGE_PRESENT) )
    1.96 +                __flush_tlb_one(v);
    1.97 +            *pl1e = mk_l1_pgentry(p|flags);
    1.98 +
    1.99 +            v += 1 << L1_PAGETABLE_SHIFT;
   1.100 +            p += 1 << L1_PAGETABLE_SHIFT;
   1.101 +            s -= 1 << L1_PAGETABLE_SHIFT;
   1.102 +        }
   1.103 +    }
   1.104 +
   1.105 +    return 0;
   1.106 +}
   1.107 +
   1.108 +void __set_fixmap(
   1.109 +    enum fixed_addresses idx, unsigned long p, unsigned long flags)
   1.110 +{
   1.111 +    if ( unlikely(idx >= __end_of_fixed_addresses) )
   1.112 +        BUG();
   1.113 +    map_pages(idle_pg_table, fix_to_virt(idx), p, PAGE_SIZE, flags);
   1.114  }
   1.115  
   1.116  
   1.117  void __init paging_init(void)
   1.118  {
   1.119 +    void *newpt;
   1.120 +
   1.121 +    /* Allocate and map the machine-to-phys table. */
   1.122 +    /* XXX TODO XXX */
   1.123 +
   1.124 +    /* Create page table for ioremap(). */
   1.125 +    newpt = (void *)alloc_xenheap_page();
   1.126 +    clear_page(newpt);
   1.127 +    idle_pg_table[IOREMAP_VIRT_START >> L4_PAGETABLE_SHIFT] = 
   1.128 +        mk_l4_pgentry(__pa(newpt) | __PAGE_HYPERVISOR);
   1.129 +
   1.130 +    /* Create read-only mapping of MPT for guest-OS use. */
   1.131 +    newpt = (void *)alloc_xenheap_page();
   1.132 +    clear_page(newpt);
   1.133 +    idle_pg_table[RO_MPT_VIRT_START >> L4_PAGETABLE_SHIFT] = 
   1.134 +        mk_l4_pgentry((__pa(newpt) | __PAGE_HYPERVISOR | _PAGE_USER) &
   1.135 +                      ~_PAGE_RW);
   1.136 +    /* XXX TODO: Copy appropriate L3 entries from RDWR_MPT_VIRT_START XXX */
   1.137 +
   1.138      /* Set up linear page table mapping. */
   1.139      idle_pg_table[LINEAR_PT_VIRT_START >> L4_PAGETABLE_SHIFT] =
   1.140          mk_l4_pgentry(__pa(idle_pg_table) | __PAGE_HYPERVISOR);