debuggers.hg
changeset 4564:c1b75b4f338c
bitkeeper revision 1.1159.258.93 (425ed73d07ZuW2sSaCdiIwlCRXD8IQ)
Improved handling of non-RAM pages. Deal with RAM holes.
Signed-off-by: Keir Fraser <keir@xensource.com>
Improved handling of non-RAM pages. Deal with RAM holes.
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Thu Apr 14 20:49:01 2005 +0000 (2005-04-14) |
parents | ecf23e1373cc |
children | 5517afc58cf9 36b47a20cf62 |
files | xen/arch/x86/memory.c xen/common/grant_table.c xen/include/asm-x86/page.h |
line diff
1.1 --- a/xen/arch/x86/memory.c Wed Apr 13 22:21:19 2005 +0000 1.2 +++ b/xen/arch/x86/memory.c Thu Apr 14 20:49:01 2005 +0000 1.3 @@ -101,6 +101,7 @@ 1.4 #include <asm/uaccess.h> 1.5 #include <asm/domain_page.h> 1.6 #include <asm/ldt.h> 1.7 +#include <asm/e820.h> 1.8 1.9 #ifdef VERBOSE 1.10 #define MEM_LOG(_f, _a...) \ 1.11 @@ -168,7 +169,8 @@ void __init init_frametable(void) 1.12 1.13 void arch_init_memory(void) 1.14 { 1.15 - unsigned long i; 1.16 + unsigned long i, j, pfn, nr_pfns; 1.17 + struct pfn_info *page; 1.18 1.19 /* 1.20 * We are rather picky about the layout of 'struct pfn_info'. The 1.21 @@ -203,8 +205,8 @@ void arch_init_memory(void) 1.22 1.23 /* 1.24 * Initialise our DOMID_IO domain. 1.25 - * This domain owns no pages but is considered a special case when 1.26 - * mapping I/O pages, as the mappings occur at the priv of the caller. 1.27 + * This domain owns I/O pages that are within the range of the pfn_info 1.28 + * array. Mappings occur at the priv of the caller. 1.29 */ 1.30 dom_io = alloc_domain_struct(); 1.31 atomic_set(&dom_io->refcnt, 1); 1.32 @@ -213,11 +215,40 @@ void arch_init_memory(void) 1.33 /* M2P table is mappable read-only by privileged domains. */ 1.34 for ( i = 0; i < 1024; i++ ) 1.35 { 1.36 - frame_table[m2p_start_mfn+i].count_info = PGC_allocated | 1; 1.37 - /* gdt to make sure it's only mapped read-only by non-privileged 1.38 - domains. */ 1.39 - frame_table[m2p_start_mfn+i].u.inuse.type_info = PGT_gdt_page | 1; 1.40 - frame_table[m2p_start_mfn+i].u.inuse.domain = dom_xen; 1.41 + /* Ensure it's mapped read-only by guests (use GDT type). */ 1.42 + page = &frame_table[m2p_start_mfn+i]; 1.43 + page->count_info = PGC_allocated | 1; 1.44 + page->u.inuse.type_info = PGT_gdt_page | PGT_validated | 1; 1.45 + page->u.inuse.domain = dom_xen; 1.46 + } 1.47 + 1.48 + /* First 1MB of RAM is historically marked as I/O. */ 1.49 + for ( i = 0; i < 0x100; i++ ) 1.50 + { 1.51 + page = &frame_table[i]; 1.52 + page->count_info = PGC_allocated | 1; 1.53 + page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1; 1.54 + page->u.inuse.domain = dom_io; 1.55 + } 1.56 + 1.57 + /* Any non-RAM areas in the e820 map are considered to be for I/O. */ 1.58 + for ( i = 0; i < e820.nr_map; i++ ) 1.59 + { 1.60 + if ( e820.map[i].type == E820_RAM ) 1.61 + continue; 1.62 + pfn = e820.map[i].addr >> PAGE_SHIFT; 1.63 + nr_pfns = (e820.map[i].size + 1.64 + (e820.map[i].addr & ~PAGE_MASK) + 1.65 + ~PAGE_MASK) >> PAGE_SHIFT; 1.66 + for ( j = 0; j < nr_pfns; j++ ) 1.67 + { 1.68 + if ( !pfn_valid(pfn+j) ) 1.69 + continue; 1.70 + page = &frame_table[pfn+j]; 1.71 + page->count_info = PGC_allocated | 1; 1.72 + page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1; 1.73 + page->u.inuse.domain = dom_io; 1.74 + } 1.75 } 1.76 } 1.77 1.78 @@ -298,13 +329,7 @@ static int get_page_from_pagenr(unsigned 1.79 { 1.80 struct pfn_info *page = &frame_table[page_nr]; 1.81 1.82 - if ( unlikely(!pfn_is_ram(page_nr)) ) 1.83 - { 1.84 - MEM_LOG("Pfn %08lx is not RAM", page_nr); 1.85 - return 0; 1.86 - } 1.87 - 1.88 - if ( unlikely(!get_page(page, d)) ) 1.89 + if ( unlikely(!pfn_valid(page_nr)) || unlikely(!get_page(page, d)) ) 1.90 { 1.91 MEM_LOG("Could not get page ref for pfn %08lx", page_nr); 1.92 return 0; 1.93 @@ -410,20 +435,25 @@ get_page_from_l1e( 1.94 return 0; 1.95 } 1.96 1.97 - if ( unlikely(!pfn_is_ram(pfn)) ) 1.98 + if ( unlikely(!pfn_valid(pfn)) || 1.99 + unlikely(page->u.inuse.domain == dom_io) ) 1.100 { 1.101 - /* Revert to caller privileges if FD == DOMID_IO. */ 1.102 + /* DOMID_IO reverts to caller for privilege checks. */ 1.103 if ( d == dom_io ) 1.104 d = current; 1.105 1.106 - if ( IS_PRIV(d) ) 1.107 + if ( (!IS_PRIV(d)) && 1.108 + (!IS_CAPABLE_PHYSDEV(d) || !domain_iomem_in_pfn(d, pfn)) ) 1.109 + { 1.110 + MEM_LOG("Non-privileged attempt to map I/O space %08lx", pfn); 1.111 + return 0; 1.112 + } 1.113 + 1.114 + /* No reference counting for out-of-range I/O pages. */ 1.115 + if ( !pfn_valid(pfn) ) 1.116 return 1; 1.117 1.118 - if ( IS_CAPABLE_PHYSDEV(d) ) 1.119 - return domain_iomem_in_pfn(d, pfn); 1.120 - 1.121 - MEM_LOG("Non-privileged attempt to map I/O space %08lx", pfn); 1.122 - return 0; 1.123 + d = dom_io; 1.124 } 1.125 1.126 return ((l1v & _PAGE_RW) ? 1.127 @@ -468,7 +498,7 @@ static void put_page_from_l1e(l1_pgentry 1.128 struct pfn_info *page = &frame_table[pfn]; 1.129 struct domain *e; 1.130 1.131 - if ( !(l1v & _PAGE_PRESENT) || !pfn_is_ram(pfn) ) 1.132 + if ( !(l1v & _PAGE_PRESENT) || !pfn_valid(pfn) ) 1.133 return; 1.134 1.135 e = page->u.inuse.domain; 1.136 @@ -1114,7 +1144,7 @@ static int do_extended_command(unsigned 1.137 gntref = (grant_ref_t)((val & 0xFF00) | ((ptr >> 2) & 0x00FF)); 1.138 1.139 if ( unlikely(IS_XEN_HEAP_FRAME(page)) || 1.140 - unlikely(!pfn_is_ram(pfn)) || 1.141 + unlikely(!pfn_valid(pfn)) || 1.142 unlikely((e = find_domain_by_id(domid)) == NULL) ) 1.143 { 1.144 MEM_LOG("Bad frame (%08lx) or bad domid (%d).\n", pfn, domid);
2.1 --- a/xen/common/grant_table.c Wed Apr 13 22:21:19 2005 +0000 2.2 +++ b/xen/common/grant_table.c Thu Apr 14 20:49:01 2005 +0000 2.3 @@ -169,7 +169,7 @@ static void 2.4 2.5 /* rmb(); */ /* not on x86 */ 2.6 frame = sha->frame; 2.7 - if ( unlikely(!pfn_is_ram(frame)) || 2.8 + if ( unlikely(!pfn_valid(frame)) || 2.9 unlikely(!((flags & GNTMAP_readonly) ? 2.10 get_page(&frame_table[frame], rd) : 2.11 get_page_and_type(&frame_table[frame], rd,
3.1 --- a/xen/include/asm-x86/page.h Wed Apr 13 22:21:19 2005 +0000 3.2 +++ b/xen/include/asm-x86/page.h Thu Apr 14 20:49:01 2005 +0000 3.3 @@ -105,14 +105,7 @@ typedef struct { unsigned long pt_lo; } 3.4 #define pfn_to_page(_pfn) (frame_table + (_pfn)) 3.5 #define phys_to_page(kaddr) (frame_table + ((kaddr) >> PAGE_SHIFT)) 3.6 #define virt_to_page(kaddr) (frame_table + (__pa(kaddr) >> PAGE_SHIFT)) 3.7 -#define VALID_PAGE(page) ((page - frame_table) < max_mapnr) 3.8 - 3.9 -/* 3.10 - * NB. We don't currently track I/O holes in the physical RAM space. 3.11 - * For now we guess that I/O devices will be mapped in the first 1MB 3.12 - * (e.g., VGA buffers) or beyond the end of physical RAM. 3.13 - */ 3.14 -#define pfn_is_ram(_pfn) (((_pfn) > 0x100) && ((_pfn) < max_page)) 3.15 +#define pfn_valid(_pfn) ((_pfn) < max_page) 3.16 3.17 /* High table entries are reserved by the hypervisor. */ 3.18 #define DOMAIN_ENTRIES_PER_L2_PAGETABLE \