debuggers.hg
changeset 608:6b2bf4c01047
bitkeeper revision 1.314 (3f0bfa1daRYfG19cIu-EaJkDAAJMag)
Use Keir's version of dom0_memory.c
Use Keir's version of dom0_memory.c
author | rac61@labyrinth.cl.cam.ac.uk |
---|---|
date | Wed Jul 09 11:18:53 2003 +0000 (2003-07-09) |
parents | a4f6a72f64e0 7dd26f39dacb |
children | ddb65ffc61be d1c49d8a0744 |
files | .rootkeys BitKeeper/etc/logging_ok xen/arch/i386/time.c xenolinux-2.4.21-sparse/arch/xeno/drivers/dom0/dom0_memory.c xenolinux-2.4.21-sparse/arch/xeno/mm/Makefile xenolinux-2.4.21-sparse/arch/xeno/mm/ioremap.c xenolinux-2.4.21-sparse/include/asm-xeno/io.h xenolinux-2.4.21-sparse/mkbuildtree |
line diff
1.1 --- a/.rootkeys Wed Jul 09 11:12:32 2003 +0000 1.2 +++ b/.rootkeys Wed Jul 09 11:18:53 2003 +0000 1.3 @@ -528,6 +528,7 @@ 3e5a4e66l8Q5Tv-6B3lQIRmaVbFPzg xenolinux 1.4 3e5a4e66TyNNUEXkr5RxqvQhXK1MQA xenolinux-2.4.21-sparse/arch/xeno/mm/get_unmapped_area.c 1.5 3e5a4e668SE9rixq4ahho9rNhLUUFQ xenolinux-2.4.21-sparse/arch/xeno/mm/hypervisor.c 1.6 3e5a4e661gLzzff25pJooKIIWe7IWg xenolinux-2.4.21-sparse/arch/xeno/mm/init.c 1.7 +3f0bed43UUdQichXAiVNrjV-y2Kzcg xenolinux-2.4.21-sparse/arch/xeno/mm/ioremap.c 1.8 3e5a4e66qRlSTcjafidMB6ulECADvg xenolinux-2.4.21-sparse/arch/xeno/vmlinux.lds 1.9 3ea53c6em6uzVHSiGqrbbAVofyRY_g xenolinux-2.4.21-sparse/drivers/block/genhd.c 1.10 3e5a4e66mrtlmV75L1tjKDg8RaM5gA xenolinux-2.4.21-sparse/drivers/block/ll_rw_blk.c 1.11 @@ -544,7 +545,6 @@ 3e5a4e66SYp_UpAVcF8Lc1wa3Qtgzw xenolinux 1.12 3e5a4e67w_DWgjIJ17Tlossu1LGujQ xenolinux-2.4.21-sparse/include/asm-xeno/highmem.h 1.13 3e5a4e67YtcyDLQsShhCfQwPSELfvA xenolinux-2.4.21-sparse/include/asm-xeno/hw_irq.h 1.14 3e5a4e677VBavzM1UZIEcH1B-RlXMA xenolinux-2.4.21-sparse/include/asm-xeno/hypervisor.h 1.15 -3e5a4e67Ulv-Ll8Zp4j2GwMwQ8aAXQ xenolinux-2.4.21-sparse/include/asm-xeno/io.h 1.16 3e5a4e673p7PEOyHFm3nHkYX6HQYBg xenolinux-2.4.21-sparse/include/asm-xeno/irq.h 1.17 3ead095db_LRUXnxaqs0dA1DWhPoQQ xenolinux-2.4.21-sparse/include/asm-xeno/keyboard.h 1.18 3e5a4e67zoNch27qYhEBpr2k6SABOg xenolinux-2.4.21-sparse/include/asm-xeno/mmu.h
2.1 --- a/BitKeeper/etc/logging_ok Wed Jul 09 11:12:32 2003 +0000 2.2 +++ b/BitKeeper/etc/logging_ok Wed Jul 09 11:18:53 2003 +0000 2.3 @@ -19,6 +19,7 @@ rac61@labyrinth.cl.cam.ac.uk 2.4 rgr22@boulderdash.cl.cam.ac.uk 2.5 rn@wyvis.camb.intel-research.net 2.6 rn@wyvis.research.intel-research.net 2.7 +rneugeba@wyvis.research.intel-research.net 2.8 smh22@boulderdash.cl.cam.ac.uk 2.9 smh22@labyrinth.cl.cam.ac.uk 2.10 smh22@uridium.cl.cam.ac.uk
3.1 --- a/xen/arch/i386/time.c Wed Jul 09 11:12:32 2003 +0000 3.2 +++ b/xen/arch/i386/time.c Wed Jul 09 11:18:53 2003 +0000 3.3 @@ -185,11 +185,41 @@ mktime (unsigned int year, unsigned int 3.4 )*60 + sec; /* finally seconds */ 3.5 } 3.6 3.7 -static unsigned long get_cmos_time(void) 3.8 +static unsigned long __get_cmos_time(void) 3.9 { 3.10 unsigned int year, mon, day, hour, min, sec; 3.11 + /* Linux waits here for a the Update-In-Progress (UIP) flag going 3.12 + * from 1 to 0. This can take up to a second. This is not acceptable 3.13 + * for the use in Xen and this code is therfor removed at the cost 3.14 + * of reduced accuracy. */ 3.15 + sec = CMOS_READ(RTC_SECONDS); 3.16 + min = CMOS_READ(RTC_MINUTES); 3.17 + hour = CMOS_READ(RTC_HOURS); 3.18 + day = CMOS_READ(RTC_DAY_OF_MONTH); 3.19 + mon = CMOS_READ(RTC_MONTH); 3.20 + year = CMOS_READ(RTC_YEAR); 3.21 + 3.22 + if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) 3.23 + { 3.24 + BCD_TO_BIN(sec); 3.25 + BCD_TO_BIN(min); 3.26 + BCD_TO_BIN(hour); 3.27 + BCD_TO_BIN(day); 3.28 + BCD_TO_BIN(mon); 3.29 + BCD_TO_BIN(year); 3.30 + } 3.31 + 3.32 + if ((year += 1900) < 1970) 3.33 + year += 100; 3.34 + 3.35 + return mktime(year, mon, day, hour, min, sec); 3.36 +} 3.37 + 3.38 +/* the more accurate version waits for a change */ 3.39 +static unsigned long get_cmos_time(void) 3.40 +{ 3.41 + unsigned long res; 3.42 int i; 3.43 - 3.44 spin_lock(&rtc_lock); 3.45 /* The Linux interpretation of the CMOS clock register contents: 3.46 * When the Update-In-Progress (UIP) flag goes from 1 to 0, the 3.47 @@ -203,29 +233,10 @@ static unsigned long get_cmos_time(void) 3.48 for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */ 3.49 if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)) 3.50 break; 3.51 - do { /* Isn't this overkill ? UIP above should guarantee consistency */ 3.52 - sec = CMOS_READ(RTC_SECONDS); 3.53 - min = CMOS_READ(RTC_MINUTES); 3.54 - hour = CMOS_READ(RTC_HOURS); 3.55 - day = CMOS_READ(RTC_DAY_OF_MONTH); 3.56 - mon = CMOS_READ(RTC_MONTH); 3.57 - year = CMOS_READ(RTC_YEAR); 3.58 - } while (sec != CMOS_READ(RTC_SECONDS)); 3.59 - if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) 3.60 - { 3.61 - BCD_TO_BIN(sec); 3.62 - BCD_TO_BIN(min); 3.63 - BCD_TO_BIN(hour); 3.64 - BCD_TO_BIN(day); 3.65 - BCD_TO_BIN(mon); 3.66 - BCD_TO_BIN(year); 3.67 - } 3.68 + res = __get_cmos_time(); 3.69 spin_unlock(&rtc_lock); 3.70 - if ((year += 1900) < 1970) 3.71 - year += 100; 3.72 - printk(".... CMOS Clock: %02d/%02d/%04d %02d:%02d:%02d\n", 3.73 - day, mon, year, hour, min, sec); 3.74 - return mktime(year, mon, day, hour, min, sec); 3.75 + return res; 3.76 + 3.77 } 3.78 3.79 /*************************************************************************** 3.80 @@ -368,6 +379,64 @@ static void update_time(unsigned long fo 3.81 add_ac_timer(&update_timer); 3.82 } 3.83 3.84 + 3.85 +/* 3.86 + * VERY crude way to keep system time from drfiting. 3.87 + * Update the scaling factor using the RTC 3.88 + * This is done periodically of it's own timer 3.89 + * We maintain an array of cpu frequencies. 3.90 + * - index 0 -> go slower 3.91 + * - index 1 -> frequency as determined during calibration 3.92 + * - index 2 -> go faster 3.93 + */ 3.94 +#define UPDATE_PERIOD SECONDS(50) 3.95 +static struct ac_timer scale_timer; 3.96 +static unsigned long init_cmos_time; 3.97 +static u64 cpu_freqs[3]; 3.98 +static void update_scale(unsigned long foo) 3.99 +{ 3.100 + unsigned long flags; 3.101 + unsigned long cmos_time; 3.102 + s_time_t now; 3.103 + s32 st, ct, dt; 3.104 + u64 scale; 3.105 + int freq_index; 3.106 + 3.107 + spin_lock(&rtc_lock); 3.108 + cmos_time = __get_cmos_time(); 3.109 + spin_unlock(&rtc_lock); 3.110 + 3.111 + spin_lock_irqsave(&stime_lock, flags); 3.112 + now = __get_s_time(); 3.113 + 3.114 + ct = (cmos_time - init_cmos_time); 3.115 + st = (s32)(now/SECONDS(1)); 3.116 + dt = ct - st; 3.117 + 3.118 + /* work out adjustment to scaling factor. allow +/- 1s drift */ 3.119 + if (dt < -1) freq_index = 0; /* go slower */ 3.120 + else if (dt > 1) freq_index = 2; /* go faster */ 3.121 + else freq_index = 1; /* correct speed */ 3.122 + 3.123 + if (dt <= -10 || dt >= 10) 3.124 + printk("Large time drift (cmos time - system time = %ds)\n", dt); 3.125 + 3.126 + /* set new frequency */ 3.127 + cpu_freq = cpu_freqs[freq_index]; 3.128 + 3.129 + /* adjust scaling factor */ 3.130 + scale = 1000000000LL << 32; 3.131 + scale /= cpu_freq; 3.132 + st_scale_f = scale & 0xffffffff; 3.133 + st_scale_i = scale >> 32; 3.134 + 3.135 + spin_unlock_irqrestore(&stime_lock, flags); 3.136 + scale_timer.expires = now + UPDATE_PERIOD; 3.137 + add_ac_timer(&scale_timer); 3.138 + TRC(printk(" %ds[%d] ", dt, freq_index)); 3.139 +} 3.140 + 3.141 + 3.142 /*************************************************************************** 3.143 * Init Xeno Time 3.144 * This has to be done after all CPUs have been booted 3.145 @@ -376,7 +445,9 @@ int __init init_xeno_time() 3.146 { 3.147 int cpu = smp_processor_id(); 3.148 u32 cpu_cycle; /* time of one cpu cyle in pico-seconds */ 3.149 - u64 scale; /* scale factor */ 3.150 + u64 scale; /* scale factor */ 3.151 + s64 freq_off; 3.152 + 3.153 3.154 spin_lock_init(&stime_lock); 3.155 spin_lock_init(&wctime_lock); 3.156 @@ -385,15 +456,26 @@ int __init init_xeno_time() 3.157 3.158 /* System Time */ 3.159 cpu_cycle = (u32) (1000000000LL/cpu_khz); /* in pico seconds */ 3.160 + 3.161 scale = 1000000000LL << 32; 3.162 scale /= cpu_freq; 3.163 st_scale_f = scale & 0xffffffff; 3.164 st_scale_i = scale >> 32; 3.165 3.166 + 3.167 + /* calculate adjusted frequencies */ 3.168 + freq_off = cpu_freq/1000; /* .1% */ 3.169 + cpu_freqs[0] = cpu_freq + freq_off; 3.170 + cpu_freqs[1] = cpu_freq; 3.171 + cpu_freqs[2] = cpu_freq - freq_off; 3.172 + 3.173 /* Wall Clock time */ 3.174 wall_clock_time.tv_sec = get_cmos_time(); 3.175 wall_clock_time.tv_usec = 0; 3.176 3.177 + /* init cmos_time for synchronising */ 3.178 + init_cmos_time = wall_clock_time.tv_sec - 3; 3.179 + 3.180 /* set starting times */ 3.181 stime_now = (s_time_t)0; 3.182 rdtscl(stime_pcc); 3.183 @@ -404,12 +486,19 @@ int __init init_xeno_time() 3.184 update_timer.data = 1; 3.185 update_timer.function = &update_time; 3.186 update_time(0); 3.187 + 3.188 + init_ac_timer(&scale_timer, 0); 3.189 + scale_timer.data = 4; 3.190 + scale_timer.function = &update_scale; 3.191 + update_scale(0); 3.192 3.193 - printk(".... System Time: %lldns\n", NOW()); 3.194 - printk(".....cpu_cycle: %u ps\n", cpu_cycle); 3.195 - printk(".... st_scale_f: %X\n", st_scale_f); 3.196 - printk(".... st_scale_i: %X\n", st_scale_i); 3.197 - printk(".... stime_pcc: %u\n", stime_pcc); 3.198 + printk(".... System Time: %lldns\n", NOW()); 3.199 + printk(".....cpu_freq: %08X%08X\n", (u32)(cpu_freq>>32), (u32)cpu_freq); 3.200 + printk(".....cpu_cycle: %u ps\n", cpu_cycle); 3.201 + printk(".....scale: %08X%08X\n", (u32)(scale>>32), (u32)scale); 3.202 + printk(".... st_scale_f: %X\n", st_scale_f); 3.203 + printk(".... st_scale_i: %X\n", st_scale_i); 3.204 + printk(".... stime_pcc: %u\n", stime_pcc); 3.205 3.206 printk(".... Wall Clock: %lds %ldus\n", wall_clock_time.tv_sec, 3.207 wall_clock_time.tv_usec);
4.1 --- a/xenolinux-2.4.21-sparse/arch/xeno/drivers/dom0/dom0_memory.c Wed Jul 09 11:12:32 2003 +0000 4.2 +++ b/xenolinux-2.4.21-sparse/arch/xeno/drivers/dom0/dom0_memory.c Wed Jul 09 11:18:53 2003 +0000 4.3 @@ -16,67 +16,23 @@ 4.4 4.5 #include "dom0_ops.h" 4.6 4.7 -extern struct list_head * find_direct(struct list_head *, unsigned long); 4.8 - 4.9 -/* 4.10 - * bd240: functions below perform direct mapping to the real physical pages 4.11 - * needed for mapping various hypervisor specific structures needed in dom0 4.12 - * userspace by various management applications such as domain builder etc. 4.13 - */ 4.14 - 4.15 -#define direct_set_pte(pteptr, pteval) queue_l1_entry_update(__pa(pteptr)|PGREQ_UNCHECKED_UPDATE, (pteval).pte_low) 4.16 - 4.17 -#define direct_pte_clear(pteptr) queue_l1_entry_update(__pa(pteptr)|PGREQ_UNCHECKED_UPDATE, 0) 4.18 - 4.19 -#define __direct_pte(x) ((pte_t) { (x) } ) 4.20 -#define __direct_mk_pte(page_nr,pgprot) __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot)) 4.21 -#define direct_mk_pte_phys(physpage, pgprot) __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot) 4.22 - 4.23 -/* Note: this is only safe if the mm semaphore is held when called. */ 4.24 - 4.25 -static int direct_remap_page(unsigned long from, unsigned long phys_addr, pgprot_t prot) 4.26 -{ 4.27 - struct mm_struct *mm = current->mm; 4.28 - pgd_t * dir; 4.29 - pmd_t *pmd; 4.30 - pte_t *pte; 4.31 - 4.32 - pte_t oldpage; 4.33 +#define MAP_CONT 0 4.34 +#define MAP_DISCONT 1 4.35 4.36 - dir = pgd_offset(mm, from); 4.37 - flush_cache_range(mm, from, from + PAGE_SIZE); 4.38 - 4.39 - spin_lock(&mm->page_table_lock); 4.40 - pmd = pmd_alloc(mm, dir, from); 4.41 - if (!pmd) 4.42 - return -ENOMEM; 4.43 - pte = pte_alloc(mm, pmd, from); 4.44 - if (!pte) { 4.45 - /* XXX free pmd? */ 4.46 - return -ENOMEM; 4.47 - } 4.48 - 4.49 - /* Sanity check */ 4.50 - oldpage = ptep_get_and_clear(pte); 4.51 - if (!pte_none(oldpage)) { 4.52 - printk("Page already in use!\n"); 4.53 - BUG(); 4.54 - } 4.55 - direct_set_pte(pte, direct_mk_pte_phys(phys_addr, prot)); 4.56 - 4.57 - spin_unlock(&mm->page_table_lock); 4.58 - 4.59 - flush_tlb_range(mm, from, from + PAGE_SIZE); 4.60 - 4.61 - return 0; 4.62 -} 4.63 +extern struct list_head * find_direct(struct list_head *, unsigned long); 4.64 +extern int direct_remap_area_pages(struct mm_struct *, unsigned long, 4.65 + unsigned long, unsigned long, pgprot_t); 4.66 +extern void direct_zap_page_range(struct mm_struct *, unsigned long, 4.67 + unsigned long); 4.68 4.69 /* 4.70 * used for remapping discontiguous bits of domain's memory, pages to map are 4.71 * found from frame table beginning at the given first_pg index 4.72 */ 4.73 -static int direct_remap_disc_page_range(unsigned long from, 4.74 - unsigned long first_pg, int tot_pages, pgprot_t prot) 4.75 +int direct_remap_disc_page_range(unsigned long from, 4.76 + unsigned long first_pg, 4.77 + int tot_pages, 4.78 + pgprot_t prot) 4.79 { 4.80 dom0_op_t dom0_op; 4.81 unsigned long *pfns = (unsigned long *)get_free_page(GFP_KERNEL); 4.82 @@ -97,8 +53,9 @@ static int direct_remap_disc_page_range( 4.83 4.84 for ( i = 0; i < pages; i++ ) 4.85 { 4.86 - if(direct_remap_page(start, pfns[i] << PAGE_SHIFT, 4.87 - prot)) 4.88 + if(direct_remap_area_pages(current->mm, 4.89 + start, pfns[i] << PAGE_SHIFT, 4.90 + PAGE_SIZE, prot)) 4.91 goto out; 4.92 start += PAGE_SIZE; 4.93 tot_pages--; 4.94 @@ -110,30 +67,26 @@ static int direct_remap_disc_page_range( 4.95 return tot_pages; 4.96 } 4.97 4.98 -/* below functions replace standard sys_mmap and sys_munmap which are 4.99 - * absolutely useless for direct memory mapping. direct_zap* functions 4.100 - * are minor ammendments to the original versions in mm/memory.c. the 4.101 - * changes are to enable unmapping of real physical addresses. 4.102 - */ 4.103 4.104 unsigned long direct_mmap(unsigned long phys_addr, unsigned long size, 4.105 - pgprot_t prot, int tot_pages) 4.106 + pgprot_t prot, int flag, int tot_pages) 4.107 { 4.108 direct_mmap_node_t * dmmap; 4.109 struct list_head * entry; 4.110 unsigned long addr; 4.111 int ret = 0; 4.112 4.113 - if(!(size & ~PAGE_MASK)) 4.114 - return -EINVAL; 4.115 - 4.116 - if(!capable(CAP_SYS_ADMIN)) 4.117 - return -EPERM; 4.118 + if(!capable(CAP_SYS_ADMIN)){ 4.119 + ret = -EPERM; 4.120 + goto out; 4.121 + } 4.122 4.123 /* get unmapped area invokes xen specific arch_get_unmapped_area */ 4.124 addr = get_unmapped_area(NULL, 0, size, 0, 0); 4.125 - if(addr & ~PAGE_MASK) 4.126 - return -ENOMEM; 4.127 + if(addr & ~PAGE_MASK){ 4.128 + ret = -ENOMEM; 4.129 + goto out; 4.130 + } 4.131 4.132 /* add node on the list of directly mapped areas, make sure the 4.133 * list remains sorted. 4.134 @@ -143,72 +96,40 @@ unsigned long direct_mmap(unsigned long 4.135 dmmap->vm_end = addr + size; 4.136 entry = find_direct(¤t->mm->context.direct_list, addr); 4.137 if(entry != ¤t->mm->context.direct_list){ 4.138 - list_add_tail(&dmmap->list, entry); 4.139 + list_add_tail(&dmmap->list, entry); 4.140 } else { 4.141 - list_add_tail(&dmmap->list, ¤t->mm->context.direct_list); 4.142 + list_add_tail(&dmmap->list, ¤t->mm->context.direct_list); 4.143 } 4.144 4.145 - /* Acquire mm sem? XXX */ 4.146 /* and perform the mapping */ 4.147 - ret = direct_remap_disc_page_range(addr, phys_addr >> PAGE_SHIFT, 4.148 - tot_pages, prot); 4.149 - /* Drop mm sem? XXX */ 4.150 + if(flag == MAP_DISCONT){ 4.151 + ret = direct_remap_disc_page_range(addr, phys_addr >> PAGE_SHIFT, 4.152 + tot_pages, prot); 4.153 + } else { 4.154 + ret = direct_remap_area_pages(current->mm, 4.155 + addr, phys_addr, size, prot); 4.156 + } 4.157 4.158 if(ret == 0) 4.159 - return addr; 4.160 - else 4.161 - return ret; 4.162 -} 4.163 - 4.164 -/* 4.165 - * remove a user page 4.166 - * 4.167 - * There used to be a function here which could remove a whole range 4.168 - * of pages, but it was only ever called with that range equal to a 4.169 - * single page, so I simplified it a bit -- sos22. 4.170 - */ 4.171 -static void direct_zap_page(struct mm_struct *mm, unsigned long address) 4.172 -{ 4.173 - mmu_gather_t *tlb; 4.174 - pgd_t * dir; 4.175 - pmd_t * pmd; 4.176 - pte_t * pte; 4.177 + ret = addr; 4.178 4.179 - dir = pgd_offset(mm, address); 4.180 - 4.181 - /* 4.182 - * This is a long-lived spinlock. That's fine. 4.183 - * There's no contention, because the page table 4.184 - * lock only protects against kswapd anyway, and 4.185 - * even if kswapd happened to be looking at this 4.186 - * process we _want_ it to get stuck. 4.187 - */ 4.188 - spin_lock(&mm->page_table_lock); 4.189 - flush_cache_range(mm, address, address + PAGE_SIZE); 4.190 - 4.191 - tlb = tlb_gather_mmu(mm); 4.192 - pmd = pmd_offset(dir, address); 4.193 - pte = pte_offset(pmd, address); 4.194 - direct_pte_clear(pte); 4.195 - tlb_finish_mmu(tlb, address, address + PAGE_SIZE); 4.196 - 4.197 - /* decrementing rss removed */ 4.198 - spin_unlock(&mm->page_table_lock); 4.199 + out: 4.200 + return ret; 4.201 } 4.202 4.203 4.204 int direct_unmap(struct mm_struct *mm, unsigned long addr, unsigned long size) 4.205 { 4.206 + int count = 0, tot_pages = (size+PAGE_SIZE-1) >> PAGE_SHIFT; 4.207 direct_mmap_node_t * node; 4.208 struct list_head * curr; 4.209 struct list_head * direct_list = &mm->context.direct_list; 4.210 - unsigned long end; 4.211 4.212 curr = direct_list->next; 4.213 while ( curr != direct_list ) 4.214 { 4.215 node = list_entry(curr, direct_mmap_node_t, list); 4.216 - if ( node->vm_start == addr && node->vm_end == addr + size) 4.217 + if ( node->vm_start == addr ) 4.218 break; 4.219 curr = curr->next; 4.220 } 4.221 @@ -219,17 +140,11 @@ int direct_unmap(struct mm_struct *mm, u 4.222 list_del(&node->list); 4.223 kfree(node); 4.224 4.225 - if (size & ~PAGE_MASK) { 4.226 - printk("Managed to map something which isn\'t a multiple of a page size...\n"); 4.227 - BUG(); 4.228 - return -EINVAL; 4.229 - } 4.230 - 4.231 - end = addr + size; 4.232 - while ( addr < end ) 4.233 + while ( count < tot_pages ) 4.234 { 4.235 - direct_zap_page(mm, addr); 4.236 + direct_zap_page_range(mm, addr, PAGE_SIZE); 4.237 addr += PAGE_SIZE; 4.238 + count++; 4.239 } 4.240 4.241 return 0;
5.1 --- a/xenolinux-2.4.21-sparse/arch/xeno/mm/Makefile Wed Jul 09 11:12:32 2003 +0000 5.2 +++ b/xenolinux-2.4.21-sparse/arch/xeno/mm/Makefile Wed Jul 09 11:18:53 2003 +0000 5.3 @@ -9,7 +9,7 @@ 5.4 5.5 O_TARGET := mm.o 5.6 5.7 -obj-y := init.o fault.o extable.o pageattr.o hypervisor.o get_unmapped_area.o 5.8 +obj-y := init.o fault.o extable.o pageattr.o hypervisor.o get_unmapped_area.o ioremap.o 5.9 5.10 export-objs := pageattr.o 5.11
6.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 6.2 +++ b/xenolinux-2.4.21-sparse/arch/xeno/mm/ioremap.c Wed Jul 09 11:18:53 2003 +0000 6.3 @@ -0,0 +1,303 @@ 6.4 +/* 6.5 + * arch/xeno/mm/ioremap.c 6.6 + * 6.7 + * Re-map IO memory to kernel address space so that we can access it. 6.8 + * 6.9 + * (C) Copyright 1995 1996 Linus Torvalds 6.10 + * 6.11 + * Modifications for Xenolinux (c) 2003 Keir Fraser 6.12 + */ 6.13 + 6.14 +#include <linux/slab.h> 6.15 +#include <linux/mm.h> 6.16 +#include <linux/mman.h> 6.17 +#include <linux/vmalloc.h> 6.18 +#include <asm/io.h> 6.19 +#include <asm/pgalloc.h> 6.20 +#include <asm/uaccess.h> 6.21 +#include <asm/tlb.h> 6.22 +#include <asm/mmu.h> 6.23 + 6.24 +#define direct_set_pte(pteptr, pteval) \ 6.25 + queue_l1_entry_update(__pa(pteptr)|PGREQ_UNCHECKED_UPDATE, (pteval).pte_low) 6.26 +#define direct_pte_clear(pteptr) \ 6.27 + queue_l1_entry_update(__pa(pteptr)|PGREQ_UNCHECKED_UPDATE, 0) 6.28 +#define __direct_pte(x) ((pte_t) { (x) } ) 6.29 +#define __direct_mk_pte(page_nr,pgprot) \ 6.30 + __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot)) 6.31 +#define direct_mk_pte_phys(physpage, pgprot) \ 6.32 + __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot) 6.33 + 6.34 + 6.35 + 6.36 +/******************* Mapping a page range directly ************************/ 6.37 + 6.38 +static inline void direct_remap_area_pte(pte_t *pte, 6.39 + unsigned long address, 6.40 + unsigned long size, 6.41 + unsigned long machine_addr, 6.42 + pgprot_t prot) 6.43 +{ 6.44 + unsigned long end; 6.45 + 6.46 + address &= ~PMD_MASK; 6.47 + end = address + size; 6.48 + if (end > PMD_SIZE) 6.49 + end = PMD_SIZE; 6.50 + if (address >= end) 6.51 + BUG(); 6.52 + do { 6.53 + if (!pte_none(*pte)) { 6.54 + printk("direct_remap_area_pte: page already exists\n"); 6.55 + BUG(); 6.56 + } 6.57 + direct_set_pte(pte, direct_mk_pte_phys(machine_addr, prot)); 6.58 + address += PAGE_SIZE; 6.59 + machine_addr += PAGE_SIZE; 6.60 + pte++; 6.61 + } while (address && (address < end)); 6.62 +} 6.63 + 6.64 +static inline int direct_remap_area_pmd(struct mm_struct *mm, 6.65 + pmd_t *pmd, 6.66 + unsigned long address, 6.67 + unsigned long size, 6.68 + unsigned long machine_addr, 6.69 + pgprot_t prot) 6.70 +{ 6.71 + unsigned long end; 6.72 + 6.73 + address &= ~PGDIR_MASK; 6.74 + end = address + size; 6.75 + if (end > PGDIR_SIZE) 6.76 + end = PGDIR_SIZE; 6.77 + machine_addr -= address; 6.78 + if (address >= end) 6.79 + BUG(); 6.80 + do { 6.81 + pte_t * pte = pte_alloc(mm, pmd, address); 6.82 + if (!pte) 6.83 + return -ENOMEM; 6.84 + direct_remap_area_pte(pte, address, end - address, 6.85 + address + machine_addr, prot); 6.86 + address = (address + PMD_SIZE) & PMD_MASK; 6.87 + pmd++; 6.88 + } while (address && (address < end)); 6.89 + return 0; 6.90 +} 6.91 + 6.92 +int direct_remap_area_pages(struct mm_struct *mm, 6.93 + unsigned long address, 6.94 + unsigned long machine_addr, 6.95 + unsigned long size, 6.96 + pgprot_t prot) 6.97 +{ 6.98 + int error = 0; 6.99 + pgd_t * dir; 6.100 + unsigned long end = address + size; 6.101 + 6.102 + machine_addr -= address; 6.103 + dir = pgd_offset(mm, address); 6.104 + flush_cache_all(); 6.105 + if (address >= end) 6.106 + BUG(); 6.107 + spin_lock(&mm->page_table_lock); 6.108 + do { 6.109 + pmd_t *pmd = pmd_alloc(mm, dir, address); 6.110 + error = -ENOMEM; 6.111 + if (!pmd) 6.112 + break; 6.113 + error = direct_remap_area_pmd(mm, pmd, address, end - address, 6.114 + machine_addr + address, prot); 6.115 + if (error) 6.116 + break; 6.117 + address = (address + PGDIR_SIZE) & PGDIR_MASK; 6.118 + dir++; 6.119 + } while (address && (address < end)); 6.120 + spin_unlock(&mm->page_table_lock); 6.121 + flush_tlb_all(); 6.122 + return error; 6.123 +} 6.124 + 6.125 + 6.126 + 6.127 +/************************ Zapping a page range directly *******************/ 6.128 + 6.129 +static inline int direct_zap_pte_range(mmu_gather_t *tlb, 6.130 + pmd_t * pmd, 6.131 + unsigned long address, 6.132 + unsigned long size) 6.133 +{ 6.134 + unsigned long offset; 6.135 + pte_t * ptep; 6.136 + int freed = 0; 6.137 + 6.138 + if (pmd_none(*pmd)) 6.139 + return 0; 6.140 + if (pmd_bad(*pmd)) { 6.141 + pmd_ERROR(*pmd); 6.142 + pmd_clear(pmd); 6.143 + return 0; 6.144 + } 6.145 + ptep = pte_offset(pmd, address); 6.146 + offset = address & ~PMD_MASK; 6.147 + if (offset + size > PMD_SIZE) 6.148 + size = PMD_SIZE - offset; 6.149 + size &= PAGE_MASK; 6.150 + for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) { 6.151 + pte_t pte = *ptep; 6.152 + if (pte_none(pte)) 6.153 + continue; 6.154 + freed++; 6.155 + direct_pte_clear(ptep); 6.156 + } 6.157 + 6.158 + return freed; 6.159 +} 6.160 + 6.161 +static inline int direct_zap_pmd_range(mmu_gather_t *tlb, 6.162 + pgd_t * dir, 6.163 + unsigned long address, 6.164 + unsigned long size) 6.165 +{ 6.166 + pmd_t * pmd; 6.167 + unsigned long end; 6.168 + int freed; 6.169 + 6.170 + if (pgd_none(*dir)) 6.171 + return 0; 6.172 + if (pgd_bad(*dir)) { 6.173 + pgd_ERROR(*dir); 6.174 + pgd_clear(dir); 6.175 + return 0; 6.176 + } 6.177 + pmd = pmd_offset(dir, address); 6.178 + end = address + size; 6.179 + if (end > ((address + PGDIR_SIZE) & PGDIR_MASK)) 6.180 + end = ((address + PGDIR_SIZE) & PGDIR_MASK); 6.181 + freed = 0; 6.182 + do { 6.183 + freed += direct_zap_pte_range(tlb, pmd, address, end - address); 6.184 + address = (address + PMD_SIZE) & PMD_MASK; 6.185 + pmd++; 6.186 + } while (address < end); 6.187 + return freed; 6.188 +} 6.189 + 6.190 +void direct_zap_page_range(struct mm_struct *mm, 6.191 + unsigned long address, 6.192 + unsigned long size) 6.193 +{ 6.194 + mmu_gather_t *tlb; 6.195 + pgd_t * dir; 6.196 + unsigned long start = address, end = address + size; 6.197 + int freed = 0; 6.198 + 6.199 + dir = pgd_offset(mm, address); 6.200 + 6.201 + if (address >= end) 6.202 + BUG(); 6.203 + spin_lock(&mm->page_table_lock); 6.204 + flush_cache_range(mm, address, end); 6.205 + tlb = tlb_gather_mmu(mm); 6.206 + 6.207 + do { 6.208 + freed += direct_zap_pmd_range(tlb, dir, address, end - address); 6.209 + address = (address + PGDIR_SIZE) & PGDIR_MASK; 6.210 + dir++; 6.211 + } while (address && (address < end)); 6.212 + 6.213 + /* this will flush any remaining tlb entries */ 6.214 + tlb_finish_mmu(tlb, start, end); 6.215 + 6.216 + /* decrementing rss removed */ 6.217 + spin_unlock(&mm->page_table_lock); 6.218 +} 6.219 + 6.220 + 6.221 + 6.222 +/****************** Generic public functions ****************************/ 6.223 + 6.224 +/* 6.225 + * Remap an arbitrary machine address space into the kernel virtual 6.226 + * address space. Needed when a privileged instance of Xenolinux wants 6.227 + * to access space outside its world directly. 6.228 + * 6.229 + * NOTE! We need to allow non-page-aligned mappings too: we will obviously 6.230 + * have to convert them into an offset in a page-aligned mapping, but the 6.231 + * caller shouldn't need to know that small detail. 6.232 + */ 6.233 +void * __ioremap(unsigned long machine_addr, 6.234 + unsigned long size, 6.235 + unsigned long flags) 6.236 +{ 6.237 + void * addr; 6.238 + struct vm_struct * area; 6.239 + unsigned long offset, last_addr; 6.240 + pgprot_t prot; 6.241 + 6.242 + /* Only privileged Xenolinux can make unchecked pagetable updates. */ 6.243 + if ( !(start_info.flags & SIF_PRIVILEGED) ) 6.244 + return NULL; 6.245 + 6.246 + /* Don't allow wraparound or zero size */ 6.247 + last_addr = machine_addr + size - 1; 6.248 + if (!size || last_addr < machine_addr) 6.249 + return NULL; 6.250 + 6.251 + /* Mappings have to be page-aligned */ 6.252 + offset = machine_addr & ~PAGE_MASK; 6.253 + machine_addr &= PAGE_MASK; 6.254 + size = PAGE_ALIGN(last_addr) - machine_addr; 6.255 + 6.256 + /* Ok, go for it */ 6.257 + area = get_vm_area(size, VM_IOREMAP); 6.258 + if (!area) 6.259 + return NULL; 6.260 + addr = area->addr; 6.261 + prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | 6.262 + flags); 6.263 + if (direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(addr), 6.264 + machine_addr, size, prot)) { 6.265 + vfree(addr); 6.266 + return NULL; 6.267 + } 6.268 + return (void *) (offset + (char *)addr); 6.269 +} 6.270 + 6.271 +/* 6.272 + * 'vfree' is basically inlined here. This is because we use a different 6.273 + * function to zap the associated page range. 6.274 + */ 6.275 +void iounmap(void *addr) 6.276 +{ 6.277 + struct vm_struct **p, *tmp; 6.278 + 6.279 + addr = (void *)((unsigned long)addr & PAGE_MASK); 6.280 + 6.281 + if (addr == NULL) 6.282 + return; 6.283 + 6.284 + write_lock(&vmlist_lock); 6.285 + 6.286 + for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { 6.287 + if (tmp->addr == addr) { 6.288 + *p = tmp->next; 6.289 + direct_zap_page_range(&init_mm, 6.290 + VMALLOC_VMADDR(tmp->addr), 6.291 + tmp->size); 6.292 + write_unlock(&vmlist_lock); 6.293 + kfree(tmp); 6.294 + return; 6.295 + } 6.296 + } 6.297 + 6.298 + write_unlock(&vmlist_lock); 6.299 + printk(KERN_ERR "Trying to iounmap() nonexistent vm area (%p)\n", addr); 6.300 +} 6.301 + 6.302 + 6.303 +#if 0 /* We don't support these functions. They shouldn't be required. */ 6.304 +void __init *bt_ioremap(unsigned long machine_addr, unsigned long size) {} 6.305 +void __init bt_iounmap(void *addr, unsigned long size) {} 6.306 +#endif
7.1 --- a/xenolinux-2.4.21-sparse/include/asm-xeno/io.h Wed Jul 09 11:12:32 2003 +0000 7.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 7.3 @@ -1,376 +0,0 @@ 7.4 -#ifndef _ASM_IO_H 7.5 -#define _ASM_IO_H 7.6 - 7.7 -#include <linux/config.h> 7.8 -#include <asm/hypervisor.h> 7.9 -/* 7.10 - * This file contains the definitions for the x86 IO instructions 7.11 - * inb/inw/inl/outb/outw/outl and the "string versions" of the same 7.12 - * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" 7.13 - * versions of the single-IO instructions (inb_p/inw_p/..). 7.14 - * 7.15 - * This file is not meant to be obfuscating: it's just complicated 7.16 - * to (a) handle it all in a way that makes gcc able to optimize it 7.17 - * as well as possible and (b) trying to avoid writing the same thing 7.18 - * over and over again with slight variations and possibly making a 7.19 - * mistake somewhere. 7.20 - */ 7.21 - 7.22 -/* 7.23 - * Thanks to James van Artsdalen for a better timing-fix than 7.24 - * the two short jumps: using outb's to a nonexistent port seems 7.25 - * to guarantee better timings even on fast machines. 7.26 - * 7.27 - * On the other hand, I'd like to be sure of a non-existent port: 7.28 - * I feel a bit unsafe about using 0x80 (should be safe, though) 7.29 - * 7.30 - * Linus 7.31 - */ 7.32 - 7.33 - /* 7.34 - * Bit simplified and optimized by Jan Hubicka 7.35 - * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. 7.36 - * 7.37 - * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, 7.38 - * isa_read[wl] and isa_write[wl] fixed 7.39 - * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> 7.40 - */ 7.41 - 7.42 -#define IO_SPACE_LIMIT 0xffff 7.43 - 7.44 -#define XQUAD_PORTIO_BASE 0xfe400000 7.45 -#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ 7.46 -#define XQUAD_PORTIO_LEN 0x80000 /* Only remapping first 2 quads */ 7.47 - 7.48 -#ifdef __KERNEL__ 7.49 - 7.50 -#include <linux/vmalloc.h> 7.51 - 7.52 -/* 7.53 - * Temporary debugging check to catch old code using 7.54 - * unmapped ISA addresses. Will be removed in 2.4. 7.55 - */ 7.56 -#if CONFIG_DEBUG_IOVIRT 7.57 - extern void *__io_virt_debug(unsigned long x, const char *file, int line); 7.58 - extern unsigned long __io_phys_debug(unsigned long x, const char *file, int line); 7.59 - #define __io_virt(x) __io_virt_debug((unsigned long)(x), __FILE__, __LINE__) 7.60 -//#define __io_phys(x) __io_phys_debug((unsigned long)(x), __FILE__, __LINE__) 7.61 -#else 7.62 - #define __io_virt(x) ((void *)(x)) 7.63 -//#define __io_phys(x) __pa(x) 7.64 -#endif 7.65 - 7.66 -/** 7.67 - * virt_to_phys - map virtual addresses to physical 7.68 - * @address: address to remap 7.69 - * 7.70 - * The returned physical address is the physical (CPU) mapping for 7.71 - * the memory address given. It is only valid to use this function on 7.72 - * addresses directly mapped or allocated via kmalloc. 7.73 - * 7.74 - * This function does not give bus mappings for DMA transfers. In 7.75 - * almost all conceivable cases a device driver should not be using 7.76 - * this function 7.77 - */ 7.78 - 7.79 -static inline unsigned long virt_to_phys(volatile void * address) 7.80 -{ 7.81 - return __pa(address); 7.82 -} 7.83 - 7.84 -/** 7.85 - * phys_to_virt - map physical address to virtual 7.86 - * @address: address to remap 7.87 - * 7.88 - * The returned virtual address is a current CPU mapping for 7.89 - * the memory address given. It is only valid to use this function on 7.90 - * addresses that have a kernel mapping 7.91 - * 7.92 - * This function does not handle bus mappings for DMA transfers. In 7.93 - * almost all conceivable cases a device driver should not be using 7.94 - * this function 7.95 - */ 7.96 - 7.97 -static inline void * phys_to_virt(unsigned long address) 7.98 -{ 7.99 - return __va(address); 7.100 -} 7.101 - 7.102 -/* 7.103 - * Change "struct page" to physical address. 7.104 - */ 7.105 -#ifdef CONFIG_HIGHMEM64G 7.106 -#define page_to_phys(page) ((u64)(page - mem_map) << PAGE_SHIFT) 7.107 -#else 7.108 -#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT) 7.109 -#endif 7.110 - 7.111 -/* 7.112 - * IO bus memory addresses are also 1:1 with the physical address 7.113 - */ 7.114 -#define virt_to_bus virt_to_phys 7.115 -#define bus_to_virt phys_to_virt 7.116 -#define page_to_bus page_to_phys 7.117 - 7.118 -/* 7.119 - * readX/writeX() are used to access memory mapped devices. On some 7.120 - * architectures the memory mapped IO stuff needs to be accessed 7.121 - * differently. On the x86 architecture, we just read/write the 7.122 - * memory location directly. 7.123 - */ 7.124 - 7.125 -#define readb(addr) (*(volatile unsigned char *) __io_virt(addr)) 7.126 -#define readw(addr) (*(volatile unsigned short *) __io_virt(addr)) 7.127 -#define readl(addr) (*(volatile unsigned int *) __io_virt(addr)) 7.128 -#define __raw_readb readb 7.129 -#define __raw_readw readw 7.130 -#define __raw_readl readl 7.131 - 7.132 -#define writeb(b,addr) (*(volatile unsigned char *) __io_virt(addr) = (b)) 7.133 -#define writew(b,addr) (*(volatile unsigned short *) __io_virt(addr) = (b)) 7.134 -#define writel(b,addr) (*(volatile unsigned int *) __io_virt(addr) = (b)) 7.135 -#define __raw_writeb writeb 7.136 -#define __raw_writew writew 7.137 -#define __raw_writel writel 7.138 - 7.139 -#define memset_io(a,b,c) __memset(__io_virt(a),(b),(c)) 7.140 -#define memcpy_fromio(a,b,c) __memcpy((a),__io_virt(b),(c)) 7.141 -#define memcpy_toio(a,b,c) __memcpy(__io_virt(a),(b),(c)) 7.142 - 7.143 -/* 7.144 - * ISA space is 'always mapped' on a typical x86 system, no need to 7.145 - * explicitly ioremap() it. The fact that the ISA IO space is mapped 7.146 - * to PAGE_OFFSET is pure coincidence - it does not mean ISA values 7.147 - * are physical addresses. The following constant pointer can be 7.148 - * used as the IO-area pointer (it can be iounmapped as well, so the 7.149 - * analogy with PCI is quite large): 7.150 - */ 7.151 -#define __ISA_IO_base ((char *)(PAGE_OFFSET)) 7.152 - 7.153 -#define isa_readb(a) readb(__ISA_IO_base + (a)) 7.154 -#define isa_readw(a) readw(__ISA_IO_base + (a)) 7.155 -#define isa_readl(a) readl(__ISA_IO_base + (a)) 7.156 -#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a)) 7.157 -#define isa_writew(w,a) writew(w,__ISA_IO_base + (a)) 7.158 -#define isa_writel(l,a) writel(l,__ISA_IO_base + (a)) 7.159 -#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c)) 7.160 -#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c)) 7.161 -#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c)) 7.162 - 7.163 - 7.164 -/* 7.165 - * Again, i386 does not require mem IO specific function. 7.166 - */ 7.167 - 7.168 -#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),__io_virt(b),(c),(d)) 7.169 -#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),__io_virt(__ISA_IO_base + (b)),(c),(d)) 7.170 - 7.171 -/** 7.172 - * check_signature - find BIOS signatures 7.173 - * @io_addr: mmio address to check 7.174 - * @signature: signature block 7.175 - * @length: length of signature 7.176 - * 7.177 - * Perform a signature comparison with the mmio address io_addr. This 7.178 - * address should have been obtained by ioremap. 7.179 - * Returns 1 on a match. 7.180 - */ 7.181 - 7.182 -static inline int check_signature(unsigned long io_addr, 7.183 - const unsigned char *signature, int length) 7.184 -{ 7.185 - int retval = 0; 7.186 - do { 7.187 - if (readb(io_addr) != *signature) 7.188 - goto out; 7.189 - io_addr++; 7.190 - signature++; 7.191 - length--; 7.192 - } while (length); 7.193 - retval = 1; 7.194 -out: 7.195 - return retval; 7.196 -} 7.197 - 7.198 -/** 7.199 - * isa_check_signature - find BIOS signatures 7.200 - * @io_addr: mmio address to check 7.201 - * @signature: signature block 7.202 - * @length: length of signature 7.203 - * 7.204 - * Perform a signature comparison with the ISA mmio address io_addr. 7.205 - * Returns 1 on a match. 7.206 - * 7.207 - * This function is deprecated. New drivers should use ioremap and 7.208 - * check_signature. 7.209 - */ 7.210 - 7.211 - 7.212 -static inline int isa_check_signature(unsigned long io_addr, 7.213 - const unsigned char *signature, int length) 7.214 -{ 7.215 - int retval = 0; 7.216 - do { 7.217 - if (isa_readb(io_addr) != *signature) 7.218 - goto out; 7.219 - io_addr++; 7.220 - signature++; 7.221 - length--; 7.222 - } while (length); 7.223 - retval = 1; 7.224 -out: 7.225 - return retval; 7.226 -} 7.227 - 7.228 -/* 7.229 - * Cache management 7.230 - * 7.231 - * This needed for two cases 7.232 - * 1. Out of order aware processors 7.233 - * 2. Accidentally out of order processors (PPro errata #51) 7.234 - */ 7.235 - 7.236 -#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) 7.237 - 7.238 -static inline void flush_write_buffers(void) 7.239 -{ 7.240 - __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory"); 7.241 -} 7.242 - 7.243 -#define dma_cache_inv(_start,_size) flush_write_buffers() 7.244 -#define dma_cache_wback(_start,_size) flush_write_buffers() 7.245 -#define dma_cache_wback_inv(_start,_size) flush_write_buffers() 7.246 - 7.247 -#else 7.248 - 7.249 -/* Nothing to do */ 7.250 - 7.251 -#define dma_cache_inv(_start,_size) do { } while (0) 7.252 -#define dma_cache_wback(_start,_size) do { } while (0) 7.253 -#define dma_cache_wback_inv(_start,_size) do { } while (0) 7.254 -#define flush_write_buffers() 7.255 - 7.256 -#endif 7.257 - 7.258 -#endif /* __KERNEL__ */ 7.259 - 7.260 -#ifdef SLOW_IO_BY_JUMPING 7.261 -#define __SLOW_DOWN_IO "\njmp 1f\n1:\tjmp 1f\n1:" 7.262 -#else 7.263 -#define __SLOW_DOWN_IO "\noutb %%al,$0x80" 7.264 -#endif 7.265 - 7.266 -#ifdef REALLY_SLOW_IO 7.267 -#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO 7.268 -#else 7.269 -#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO 7.270 -#endif 7.271 - 7.272 -#ifdef CONFIG_MULTIQUAD 7.273 -extern void *xquad_portio; /* Where the IO area was mapped */ 7.274 -#endif /* CONFIG_MULTIQUAD */ 7.275 - 7.276 -/* 7.277 - * Talk about misusing macros.. 7.278 - */ 7.279 -#define __OUT1(s,x) \ 7.280 -static inline void out##s(unsigned x value, unsigned short port) { 7.281 - 7.282 -#define __OUT2(s,s1,s2) \ 7.283 -__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" 7.284 - 7.285 -#if defined (CONFIG_MULTIQUAD) && !defined(STANDALONE) 7.286 -#define __OUTQ(s,ss,x) /* Do the equivalent of the portio op on quads */ \ 7.287 -static inline void out##ss(unsigned x value, unsigned short port) { \ 7.288 - if (xquad_portio) \ 7.289 - write##s(value, (unsigned long) xquad_portio + port); \ 7.290 - else /* We're still in early boot, running on quad 0 */ \ 7.291 - out##ss##_local(value, port); \ 7.292 -} \ 7.293 -static inline void out##ss##_quad(unsigned x value, unsigned short port, int quad) { \ 7.294 - if (xquad_portio) \ 7.295 - write##s(value, (unsigned long) xquad_portio + (XQUAD_PORTIO_QUAD*quad)\ 7.296 - + port); \ 7.297 -} 7.298 - 7.299 -#define __INQ(s,ss) /* Do the equivalent of the portio op on quads */ \ 7.300 -static inline RETURN_TYPE in##ss(unsigned short port) { \ 7.301 - if (xquad_portio) \ 7.302 - return read##s((unsigned long) xquad_portio + port); \ 7.303 - else /* We're still in early boot, running on quad 0 */ \ 7.304 - return in##ss##_local(port); \ 7.305 -} \ 7.306 -static inline RETURN_TYPE in##ss##_quad(unsigned short port, int quad) { \ 7.307 - if (xquad_portio) \ 7.308 - return read##s((unsigned long) xquad_portio + (XQUAD_PORTIO_QUAD*quad)\ 7.309 - + port); \ 7.310 - else\ 7.311 - return 0;\ 7.312 -} 7.313 -#endif /* CONFIG_MULTIQUAD && !STANDALONE */ 7.314 - 7.315 -#if !defined(CONFIG_MULTIQUAD) || defined(STANDALONE) 7.316 -#define __OUT(s,s1,x) \ 7.317 -__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ 7.318 -__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} 7.319 -#else 7.320 -/* Make the default portio routines operate on quad 0 */ 7.321 -#define __OUT(s,s1,x) \ 7.322 -__OUT1(s##_local,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ 7.323 -__OUT1(s##_p_local,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \ 7.324 -__OUTQ(s,s,x) \ 7.325 -__OUTQ(s,s##_p,x) 7.326 -#endif /* !CONFIG_MULTIQUAD || STANDALONE */ 7.327 - 7.328 -#define __IN1(s) \ 7.329 -static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; 7.330 - 7.331 -#define __IN2(s,s1,s2) \ 7.332 -__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" 7.333 - 7.334 -#if !defined(CONFIG_MULTIQUAD) || defined(STANDALONE) 7.335 -#define __IN(s,s1,i...) \ 7.336 -__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ 7.337 -__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } 7.338 -#else 7.339 -/* Make the default portio routines operate on quad 0 */ 7.340 -#define __IN(s,s1,i...) \ 7.341 -__IN1(s##_local) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ 7.342 -__IN1(s##_p_local) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ 7.343 -__INQ(s,s) \ 7.344 -__INQ(s,s##_p) 7.345 -#endif /* !CONFIG_MULTIQUAD || STANDALONE */ 7.346 - 7.347 -#define __INS(s) \ 7.348 -static inline void ins##s(unsigned short port, void * addr, unsigned long count) \ 7.349 -{ __asm__ __volatile__ ("rep ; ins" #s \ 7.350 -: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } 7.351 - 7.352 -#define __OUTS(s) \ 7.353 -static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ 7.354 -{ __asm__ __volatile__ ("rep ; outs" #s \ 7.355 -: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } 7.356 - 7.357 -#define RETURN_TYPE unsigned char 7.358 -__IN(b,"") 7.359 -#undef RETURN_TYPE 7.360 -#define RETURN_TYPE unsigned short 7.361 -__IN(w,"") 7.362 -#undef RETURN_TYPE 7.363 -#define RETURN_TYPE unsigned int 7.364 -__IN(l,"") 7.365 -#undef RETURN_TYPE 7.366 - 7.367 -__OUT(b,"b",char) 7.368 -__OUT(w,"w",short) 7.369 -__OUT(l,,int) 7.370 - 7.371 -__INS(b) 7.372 -__INS(w) 7.373 -__INS(l) 7.374 - 7.375 -__OUTS(b) 7.376 -__OUTS(w) 7.377 -__OUTS(l) 7.378 - 7.379 -#endif
8.1 --- a/xenolinux-2.4.21-sparse/mkbuildtree Wed Jul 09 11:12:32 2003 +0000 8.2 +++ b/xenolinux-2.4.21-sparse/mkbuildtree Wed Jul 09 11:18:53 2003 +0000 8.3 @@ -111,6 +111,7 @@ ln -sf ../asm-i386/hdreg.h 8.4 ln -sf ../asm-i386/i387.h 8.5 ln -sf ../asm-i386/ide.h 8.6 ln -sf ../asm-i386/init.h 8.7 +ln -sf ../asm-i386/io.h 8.8 ln -sf ../asm-i386/io_apic.h 8.9 ln -sf ../asm-i386/ioctl.h 8.10 ln -sf ../asm-i386/ioctls.h