debuggers.hg
changeset 4570:b4ebb22003b1
bitkeeper revision 1.1299 (425eef16s6fX6jXC3hHMyjTl5h4U4Q)
Forward port of I/O-page fix in 2.0 series.
Signed-off-by: Keir Fraser <keir@xensource.com>
Forward port of I/O-page fix in 2.0 series.
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Thu Apr 14 22:30:46 2005 +0000 (2005-04-14) |
parents | 99e5c8f17f91 |
children | c62f4ac13428 |
files | xen/arch/x86/mm.c xen/arch/x86/shadow.c xen/common/grant_table.c xen/include/asm-x86/page.h xen/include/asm-x86/shadow.h |
line diff
1.1 --- a/xen/arch/x86/mm.c Thu Apr 14 21:23:30 2005 +0000 1.2 +++ b/xen/arch/x86/mm.c Thu Apr 14 22:30:46 2005 +0000 1.3 @@ -171,6 +171,9 @@ void arch_init_memory(void) 1.4 { 1.5 extern void subarch_init_memory(struct domain *); 1.6 1.7 + unsigned long i, j, pfn, nr_pfns; 1.8 + struct pfn_info *page; 1.9 + 1.10 memset(percpu_info, 0, sizeof(percpu_info)); 1.11 1.12 /* 1.13 @@ -184,13 +187,42 @@ void arch_init_memory(void) 1.14 1.15 /* 1.16 * Initialise our DOMID_IO domain. 1.17 - * This domain owns no pages but is considered a special case when 1.18 - * mapping I/O pages, as the mappings occur at the priv of the caller. 1.19 + * This domain owns I/O pages that are within the range of the pfn_info 1.20 + * array. Mappings occur at the priv of the caller. 1.21 */ 1.22 dom_io = alloc_domain_struct(); 1.23 atomic_set(&dom_io->refcnt, 1); 1.24 dom_io->id = DOMID_IO; 1.25 1.26 + /* First 1MB of RAM is historically marked as I/O. */ 1.27 + for ( i = 0; i < 0x100; i++ ) 1.28 + { 1.29 + page = &frame_table[i]; 1.30 + page->count_info = PGC_allocated | 1; 1.31 + page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1; 1.32 + page_set_owner(page, dom_io); 1.33 + } 1.34 + 1.35 + /* Any non-RAM areas in the e820 map are considered to be for I/O. */ 1.36 + for ( i = 0; i < e820.nr_map; i++ ) 1.37 + { 1.38 + if ( e820.map[i].type == E820_RAM ) 1.39 + continue; 1.40 + pfn = e820.map[i].addr >> PAGE_SHIFT; 1.41 + nr_pfns = (e820.map[i].size + 1.42 + (e820.map[i].addr & ~PAGE_MASK) + 1.43 + ~PAGE_MASK) >> PAGE_SHIFT; 1.44 + for ( j = 0; j < nr_pfns; j++ ) 1.45 + { 1.46 + if ( !pfn_valid(pfn+j) ) 1.47 + continue; 1.48 + page = &frame_table[pfn+j]; 1.49 + page->count_info = PGC_allocated | 1; 1.50 + page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1; 1.51 + page_set_owner(page, dom_io); 1.52 + } 1.53 + } 1.54 + 1.55 subarch_init_memory(dom_xen); 1.56 } 1.57 1.58 @@ -306,13 +338,7 @@ static int get_page_from_pagenr(unsigned 1.59 { 1.60 struct pfn_info *page = &frame_table[page_nr]; 1.61 1.62 - if ( unlikely(!pfn_is_ram(page_nr)) ) 1.63 - { 1.64 - MEM_LOG("Pfn %p is not RAM", page_nr); 1.65 - return 0; 1.66 - } 1.67 - 1.68 - if ( unlikely(!get_page(page, d)) ) 1.69 + if ( unlikely(!pfn_valid(page_nr)) || unlikely(!get_page(page, d)) ) 1.70 { 1.71 MEM_LOG("Could not get page ref for pfn %p", page_nr); 1.72 return 0; 1.73 @@ -419,20 +445,25 @@ get_page_from_l1e( 1.74 return 0; 1.75 } 1.76 1.77 - if ( unlikely(!pfn_is_ram(mfn)) ) 1.78 + if ( unlikely(!pfn_valid(mfn)) || 1.79 + unlikely(page_get_owner(page) == dom_io) ) 1.80 { 1.81 - /* Revert to caller privileges if FD == DOMID_IO. */ 1.82 + /* DOMID_IO reverts to caller for privilege checks. */ 1.83 if ( d == dom_io ) 1.84 d = current->domain; 1.85 1.86 - if ( IS_PRIV(d) ) 1.87 + if ( (!IS_PRIV(d)) && 1.88 + (!IS_CAPABLE_PHYSDEV(d) || !domain_iomem_in_pfn(d, mfn)) ) 1.89 + { 1.90 + MEM_LOG("Non-privileged attempt to map I/O space %08lx", mfn); 1.91 + return 0; 1.92 + } 1.93 + 1.94 + /* No reference counting for out-of-range I/O pages. */ 1.95 + if ( !pfn_valid(mfn) ) 1.96 return 1; 1.97 1.98 - if ( IS_CAPABLE_PHYSDEV(d) ) 1.99 - return domain_iomem_in_pfn(d, mfn); 1.100 - 1.101 - MEM_LOG("Non-privileged attempt to map I/O space %p", mfn); 1.102 - return 0; 1.103 + d = dom_io; 1.104 } 1.105 1.106 return ((l1v & _PAGE_RW) ? 1.107 @@ -529,7 +560,7 @@ void put_page_from_l1e(l1_pgentry_t l1e, 1.108 struct pfn_info *page = &frame_table[pfn]; 1.109 struct domain *e; 1.110 1.111 - if ( !(l1v & _PAGE_PRESENT) || !pfn_is_ram(pfn) ) 1.112 + if ( !(l1v & _PAGE_PRESENT) || !pfn_valid(pfn) ) 1.113 return; 1.114 1.115 e = page_get_owner(page); 1.116 @@ -2851,7 +2882,7 @@ void ptwr_destroy(struct domain *d) 1.117 gntref = (grant_ref_t)((val & 0xFF00) | ((ptr >> 2) & 0x00FF)); 1.118 1.119 if ( unlikely(IS_XEN_HEAP_FRAME(page)) || 1.120 - unlikely(!pfn_is_ram(pfn)) || 1.121 + unlikely(!pfn_valid(pfn)) || 1.122 unlikely((e = find_domain_by_id(domid)) == NULL) ) 1.123 { 1.124 MEM_LOG("Bad frame (%p) or bad domid (%d).\n", pfn, domid);
2.1 --- a/xen/arch/x86/shadow.c Thu Apr 14 21:23:30 2005 +0000 2.2 +++ b/xen/arch/x86/shadow.c Thu Apr 14 22:30:46 2005 +0000 2.3 @@ -1727,7 +1727,7 @@ shadow_mark_mfn_out_of_sync(struct exec_ 2.4 struct out_of_sync_entry *entry = shadow_alloc_oos_entry(d); 2.5 2.6 ASSERT(spin_is_locked(&d->arch.shadow_lock)); 2.7 - ASSERT(pfn_is_ram(mfn)); 2.8 + ASSERT(pfn_valid(mfn)); 2.9 ASSERT((page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page); 2.10 2.11 FSH_LOG("%s(gpfn=%p, mfn=%p) c=%p t=%p", __func__,
3.1 --- a/xen/common/grant_table.c Thu Apr 14 21:23:30 2005 +0000 3.2 +++ b/xen/common/grant_table.c Thu Apr 14 22:30:46 2005 +0000 3.3 @@ -161,7 +161,7 @@ static int 3.4 3.5 frame = __gpfn_to_mfn_foreign(granting_d, sha->frame); 3.6 3.7 - if ( unlikely(!pfn_is_ram(frame)) || 3.8 + if ( unlikely(!pfn_valid(frame)) || 3.9 unlikely(!((dev_hst_ro_flags & GNTMAP_readonly) ? 3.10 get_page(&frame_table[frame], granting_d) : 3.11 get_page_and_type(&frame_table[frame], granting_d,
4.1 --- a/xen/include/asm-x86/page.h Thu Apr 14 21:23:30 2005 +0000 4.2 +++ b/xen/include/asm-x86/page.h Thu Apr 14 22:30:46 2005 +0000 4.3 @@ -35,14 +35,7 @@ typedef struct { unsigned long pt_lo; } 4.4 #define pfn_to_page(_pfn) (frame_table + (_pfn)) 4.5 #define phys_to_page(kaddr) (frame_table + ((kaddr) >> PAGE_SHIFT)) 4.6 #define virt_to_page(kaddr) (frame_table + (__pa(kaddr) >> PAGE_SHIFT)) 4.7 -#define VALID_PAGE(page) ((page - frame_table) < max_mapnr) 4.8 - 4.9 -/* 4.10 - * NB. We don't currently track I/O holes in the physical RAM space. 4.11 - * For now we guess that I/O devices will be mapped in the first 1MB 4.12 - * (e.g., VGA buffers) or beyond the end of physical RAM. 4.13 - */ 4.14 -#define pfn_is_ram(_pfn) (((_pfn) > 0x100) && ((_pfn) < max_page)) 4.15 +#define pfn_valid(_pfn) ((_pfn) < max_page) 4.16 4.17 /* High table entries are reserved by the hypervisor. */ 4.18 #define DOMAIN_ENTRIES_PER_L2_PAGETABLE \
5.1 --- a/xen/include/asm-x86/shadow.h Thu Apr 14 21:23:30 2005 +0000 5.2 +++ b/xen/include/asm-x86/shadow.h Thu Apr 14 22:30:46 2005 +0000 5.3 @@ -85,7 +85,7 @@ static inline int page_is_page_table(str 5.4 5.5 static inline int mfn_is_page_table(unsigned long mfn) 5.6 { 5.7 - if ( !pfn_is_ram(mfn) ) 5.8 + if ( !pfn_valid(mfn) ) 5.9 return 0; 5.10 5.11 return frame_table[mfn].count_info & PGC_page_table; 5.12 @@ -98,7 +98,7 @@ static inline int page_out_of_sync(struc 5.13 5.14 static inline int mfn_out_of_sync(unsigned long mfn) 5.15 { 5.16 - if ( !pfn_is_ram(mfn) ) 5.17 + if ( !pfn_valid(mfn) ) 5.18 return 0; 5.19 5.20 return frame_table[mfn].count_info & PGC_out_of_sync; 5.21 @@ -280,7 +280,7 @@ shadow_get_page_from_l1e(l1_pgentry_t l1 5.22 if ( unlikely(!res) && IS_PRIV(d) && !shadow_mode_translate(d) && 5.23 !(l1_pgentry_val(nl1e) & L1_DISALLOW_MASK) && 5.24 (mfn = l1_pgentry_to_pfn(nl1e)) && 5.25 - pfn_is_ram(mfn) && 5.26 + pfn_valid(mfn) && 5.27 (owner = page_get_owner(pfn_to_page(l1_pgentry_to_pfn(nl1e)))) && 5.28 (d != owner) ) 5.29 { 5.30 @@ -426,7 +426,7 @@ get_shadow_ref(unsigned long smfn) 5.31 { 5.32 u32 x, nx; 5.33 5.34 - ASSERT(pfn_is_ram(smfn)); 5.35 + ASSERT(pfn_valid(smfn)); 5.36 5.37 x = frame_table[smfn].count_info; 5.38 nx = x + 1; 5.39 @@ -455,7 +455,7 @@ put_shadow_ref(unsigned long smfn) 5.40 { 5.41 u32 x, nx; 5.42 5.43 - ASSERT(pfn_is_ram(smfn)); 5.44 + ASSERT(pfn_valid(smfn)); 5.45 5.46 x = frame_table[smfn].count_info; 5.47 nx = x - 1;