debuggers.hg
changeset 10680:4db818a7dc3f
[MINIOS]Mapping page frames on demand added to the memory management.
Signed-off-by: Steven Smith <sos22@cam.ac.uk>
Signed-off-by: Grzegorz Milos <gm281@cam.ac.uk>
Signed-off-by: Steven Smith <sos22@cam.ac.uk>
Signed-off-by: Grzegorz Milos <gm281@cam.ac.uk>
author | kfraser@localhost.localdomain |
---|---|
date | Wed Jul 05 14:29:13 2006 +0100 (2006-07-05) |
parents | 462d6e4cb29a |
children | bbea54da02b5 |
files | extras/mini-os/include/mm.h extras/mini-os/mm.c |
line diff
1.1 --- a/extras/mini-os/include/mm.h Wed Jul 05 14:27:27 2006 +0100 1.2 +++ b/extras/mini-os/include/mm.h Wed Jul 05 14:29:13 2006 +0100 1.3 @@ -196,9 +196,11 @@ static __inline__ paddr_t machine_to_phy 1.4 #define to_virt(x) ((void *)((unsigned long)(x)+VIRT_START)) 1.5 1.6 #define virt_to_pfn(_virt) (PFN_DOWN(to_phys(_virt))) 1.7 +#define virt_to_mfn(_virt) (pfn_to_mfn(virt_to_pfn(_virt))) 1.8 #define mach_to_virt(_mach) (to_virt(machine_to_phys(_mach))) 1.9 +#define virt_to_mach(_virt) (phys_to_machine(to_phys(_virt))) 1.10 #define mfn_to_virt(_mfn) (to_virt(mfn_to_pfn(_mfn) << PAGE_SHIFT)) 1.11 -#define pfn_to_virt(_pfn) (to_virt(_pfn << PAGE_SHIFT)) 1.12 +#define pfn_to_virt(_pfn) (to_virt((_pfn) << PAGE_SHIFT)) 1.13 1.14 /* Pagetable walking. */ 1.15 #define pte_to_mfn(_pte) (((_pte) & (PADDR_MASK&PAGE_MASK)) >> L1_PAGETABLE_SHIFT) 1.16 @@ -206,7 +208,7 @@ static __inline__ paddr_t machine_to_phy 1.17 1.18 void init_mm(void); 1.19 unsigned long alloc_pages(int order); 1.20 -#define alloc_page() alloc_pages(0); 1.21 +#define alloc_page() alloc_pages(0) 1.22 void free_pages(void *pointer, int order); 1.23 1.24 static __inline__ int get_order(unsigned long size) 1.25 @@ -219,4 +221,6 @@ static __inline__ int get_order(unsigned 1.26 } 1.27 1.28 1.29 +void *map_frames(unsigned long *f, unsigned long n); 1.30 + 1.31 #endif /* _MM_H_ */
2.1 --- a/extras/mini-os/mm.c Wed Jul 05 14:27:27 2006 +0100 2.2 +++ b/extras/mini-os/mm.c Wed Jul 05 14:29:13 2006 +0100 2.3 @@ -343,7 +343,7 @@ void free_pages(void *pointer, int order 2.4 break; 2.5 2.6 /* Merge with successor */ 2.7 - freed_ct = (chunk_tail_t *)((char *)to_merge_ch + mask); 2.8 + freed_ct = (chunk_tail_t *)((char *)to_merge_ch + mask) - 1; 2.9 } 2.10 2.11 /* We are commited to merging, unlink the chunk */ 2.12 @@ -612,6 +612,107 @@ void mem_test(unsigned long *start_add, 2.13 2.14 } 2.15 2.16 +static pgentry_t *demand_map_pgt; 2.17 +static void *demand_map_area_start; 2.18 + 2.19 +static void init_demand_mapping_area(unsigned long max_pfn) 2.20 +{ 2.21 + unsigned long mfn; 2.22 + pgentry_t *tab; 2.23 + unsigned long start_addr; 2.24 + unsigned long pt_pfn; 2.25 + unsigned offset; 2.26 + 2.27 + /* Round up to four megs. + 1024 rather than + 1023 since we want 2.28 + to be sure we don't end up in the same place we started. */ 2.29 + max_pfn = (max_pfn + L1_PAGETABLE_ENTRIES) & ~(L1_PAGETABLE_ENTRIES - 1); 2.30 + if (max_pfn == 0 || 2.31 + (unsigned long)pfn_to_virt(max_pfn + L1_PAGETABLE_ENTRIES) >= 2.32 + HYPERVISOR_VIRT_START) { 2.33 + printk("Too much memory; no room for demand map hole.\n"); 2.34 + do_exit(); 2.35 + } 2.36 + 2.37 + demand_map_area_start = pfn_to_virt(max_pfn); 2.38 + printk("Demand map pfns start at %lx (%p).\n", max_pfn, 2.39 + demand_map_area_start); 2.40 + start_addr = (unsigned long)demand_map_area_start; 2.41 + 2.42 + tab = (pgentry_t *)start_info.pt_base; 2.43 + mfn = virt_to_mfn(start_info.pt_base); 2.44 + pt_pfn = virt_to_pfn(alloc_page()); 2.45 + 2.46 +#if defined(__x86_64__) 2.47 + offset = l4_table_offset(start_addr); 2.48 + if (!(tab[offset] & _PAGE_PRESENT)) { 2.49 + new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME); 2.50 + pt_pfn = virt_to_pfn(alloc_page()); 2.51 + } 2.52 + ASSERT(tab[offset] & _PAGE_PRESENT); 2.53 + mfn = pte_to_mfn(tab[offset]); 2.54 + tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT); 2.55 +#endif 2.56 +#if defined(__x86_64__) || defined(CONFIG_X86_PAE) 2.57 + offset = l3_table_offset(start_addr); 2.58 + if (!(tab[offset] & _PAGE_PRESENT)) { 2.59 + new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME); 2.60 + pt_pfn = virt_to_pfn(alloc_page()); 2.61 + } 2.62 + ASSERT(tab[offset] & _PAGE_PRESENT); 2.63 + mfn = pte_to_mfn(tab[offset]); 2.64 + tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT); 2.65 +#endif 2.66 + offset = l2_table_offset(start_addr); 2.67 + if (tab[offset] & _PAGE_PRESENT) { 2.68 + printk("Demand map area already has a page table covering it?\n"); 2.69 + BUG(); 2.70 + } 2.71 + demand_map_pgt = pfn_to_virt(pt_pfn); 2.72 + new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME); 2.73 + ASSERT(tab[offset] & _PAGE_PRESENT); 2.74 +} 2.75 + 2.76 +void *map_frames(unsigned long *f, unsigned long n) 2.77 +{ 2.78 + unsigned long x; 2.79 + unsigned long y = 0; 2.80 + mmu_update_t mmu_updates[16]; 2.81 + int rc; 2.82 + 2.83 + if (n > 16) { 2.84 + printk("Tried to map too many (%ld) frames at once.\n", n); 2.85 + return NULL; 2.86 + } 2.87 + 2.88 + /* Find a run of n contiguous frames */ 2.89 + for (x = 0; x <= 1024 - n; x += y + 1) { 2.90 + for (y = 0; y < n; y++) 2.91 + if (demand_map_pgt[y] & _PAGE_PRESENT) 2.92 + break; 2.93 + if (y == n) 2.94 + break; 2.95 + } 2.96 + if (y != n) { 2.97 + printk("Failed to map %ld frames!\n", n); 2.98 + return NULL; 2.99 + } 2.100 + 2.101 + /* Found it at x. Map it in. */ 2.102 + for (y = 0; y < n; y++) { 2.103 + mmu_updates[y].ptr = virt_to_mach(&demand_map_pgt[x + y]); 2.104 + mmu_updates[y].val = (f[y] << PAGE_SHIFT) | L1_PROT; 2.105 + } 2.106 + 2.107 + rc = HYPERVISOR_mmu_update(mmu_updates, n, NULL, DOMID_SELF); 2.108 + if (rc < 0) { 2.109 + printk("Map %ld failed: %d.\n", n, rc); 2.110 + return NULL; 2.111 + } else { 2.112 + return (void *)(unsigned long)((unsigned long)demand_map_area_start + 2.113 + x * PAGE_SIZE); 2.114 + } 2.115 +} 2.116 + 2.117 void init_mm(void) 2.118 { 2.119 2.120 @@ -643,4 +744,24 @@ void init_mm(void) 2.121 (u_long)to_virt(PFN_PHYS(max_pfn)), PFN_PHYS(max_pfn)); 2.122 init_page_allocator(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn)); 2.123 printk("MM: done\n"); 2.124 + 2.125 + init_demand_mapping_area(max_pfn); 2.126 + printk("Initialised demand area.\n"); 2.127 } 2.128 + 2.129 +void sanity_check(void) 2.130 +{ 2.131 + int x; 2.132 + chunk_head_t *head; 2.133 + 2.134 + for (x = 0; x < FREELIST_SIZE; x++) { 2.135 + for (head = free_head[x]; !FREELIST_EMPTY(head); head = head->next) { 2.136 + ASSERT(!allocated_in_map(virt_to_pfn(head))); 2.137 + if (head->next) 2.138 + ASSERT(head->next->pprev == &head->next); 2.139 + } 2.140 + if (free_head[x]) { 2.141 + ASSERT(free_head[x]->pprev == &free_head[x]); 2.142 + } 2.143 + } 2.144 +}