xen-vtx-unstable
changeset 6532:112d44270733
Merge.
line diff
9.1 --- a/extras/mini-os/include/hypervisor.h Wed Aug 24 16:16:52 2005 -0700 9.2 +++ b/extras/mini-os/include/hypervisor.h Thu Aug 25 11:18:47 2005 -0700 9.3 @@ -80,17 +80,43 @@ static __inline__ int HYPERVISOR_set_tra 9.4 9.5 static __inline__ int HYPERVISOR_mmu_update(mmu_update_t *req, 9.6 int count, 9.7 - int *success_count) 9.8 + int *success_count, 9.9 + domid_t domid) 9.10 { 9.11 int ret; 9.12 + unsigned long ign1, ign2, ign3, ign4; 9.13 + 9.14 __asm__ __volatile__ ( 9.15 TRAP_INSTR 9.16 - : "=a" (ret) : "0" (__HYPERVISOR_mmu_update), 9.17 - _a1 (req), _a2 (count), _a3 (success_count) : "memory" ); 9.18 + : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4) 9.19 + : "0" (__HYPERVISOR_mmu_update), "1" (req), "2" (count), 9.20 + "3" (success_count), "4" (domid) 9.21 + : "memory" ); 9.22 9.23 return ret; 9.24 } 9.25 9.26 + 9.27 +static __inline__ int HYPERVISOR_mmuext_op(struct mmuext_op *op, 9.28 + int count, 9.29 + int *success_count, 9.30 + domid_t domid) 9.31 +{ 9.32 + int ret; 9.33 + unsigned long ign1, ign2, ign3, ign4; 9.34 + 9.35 + __asm__ __volatile__ ( 9.36 + TRAP_INSTR 9.37 + : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4) 9.38 + : "0" (__HYPERVISOR_mmuext_op), "1" (op), "2" (count), 9.39 + "3" (success_count), "4" (domid) 9.40 + : "memory" ); 9.41 + 9.42 + return ret; 9.43 +} 9.44 + 9.45 + 9.46 + 9.47 static __inline__ int HYPERVISOR_set_gdt(unsigned long *frame_list, int entries) 9.48 { 9.49 int ret;
10.1 --- a/extras/mini-os/include/mm.h Wed Aug 24 16:16:52 2005 -0700 10.2 +++ b/extras/mini-os/include/mm.h Thu Aug 25 11:18:47 2005 -0700 10.3 @@ -43,13 +43,27 @@ 10.4 #define PADDR_MASK ((1UL << PADDR_BITS)-1) 10.5 #define VADDR_MASK ((1UL << VADDR_BITS)-1) 10.6 10.7 -#define pte_to_mfn(_pte) (((_pte) & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT) 10.8 +#define pte_to_mfn(_pte) (((_pte) & (PADDR_MASK&PAGE_MASK)) >> L1_PAGETABLE_SHIFT) 10.9 + 10.10 +#endif 10.11 + 10.12 + 10.13 + 10.14 +#ifdef __i386__ 10.15 + 10.16 +#define L1_PAGETABLE_SHIFT 12 10.17 +#define L2_PAGETABLE_SHIFT 22 10.18 + 10.19 +#define L1_PAGETABLE_ENTRIES 1024 10.20 +#define L2_PAGETABLE_ENTRIES 1024 10.21 +#endif 10.22 10.23 /* Given a virtual address, get an entry offset into a page table. */ 10.24 #define l1_table_offset(_a) \ 10.25 (((_a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1)) 10.26 #define l2_table_offset(_a) \ 10.27 (((_a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1)) 10.28 +#ifdef __x86_64__ 10.29 #define l3_table_offset(_a) \ 10.30 (((_a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1)) 10.31 #define l4_table_offset(_a) \ 10.32 @@ -67,13 +81,16 @@ 10.33 #define _PAGE_PSE 0x080UL 10.34 #define _PAGE_GLOBAL 0x100UL 10.35 10.36 -#define PAGE_SHIFT 12 10.37 -#define PAGE_SIZE (1UL << PAGE_SHIFT) 10.38 +#define L1_PROT (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED) 10.39 +#define L2_PROT (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_USER) 10.40 + 10.41 +#define PAGE_SIZE (1UL << L1_PAGETABLE_SHIFT) 10.42 +#define PAGE_SHIFT L1_PAGETABLE_SHIFT 10.43 #define PAGE_MASK (~(PAGE_SIZE-1)) 10.44 10.45 -#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) 10.46 -#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) 10.47 -#define PFN_PHYS(x) ((x) << PAGE_SHIFT) 10.48 +#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> L1_PAGETABLE_SHIFT) 10.49 +#define PFN_DOWN(x) ((x) >> L1_PAGETABLE_SHIFT) 10.50 +#define PFN_PHYS(x) ((x) << L1_PAGETABLE_SHIFT) 10.51 10.52 /* to align the pointer to the (next) page boundary */ 10.53 #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) 10.54 @@ -83,14 +100,14 @@ extern unsigned long *phys_to_machine_ma 10.55 #define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)]) 10.56 static __inline__ unsigned long phys_to_machine(unsigned long phys) 10.57 { 10.58 - unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT); 10.59 - machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); 10.60 + unsigned long machine = pfn_to_mfn(phys >> L1_PAGETABLE_SHIFT); 10.61 + machine = (machine << L1_PAGETABLE_SHIFT) | (phys & ~PAGE_MASK); 10.62 return machine; 10.63 } 10.64 static __inline__ unsigned long machine_to_phys(unsigned long machine) 10.65 { 10.66 - unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT); 10.67 - phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); 10.68 + unsigned long phys = mfn_to_pfn(machine >> L1_PAGETABLE_SHIFT); 10.69 + phys = (phys << L1_PAGETABLE_SHIFT) | (machine & ~PAGE_MASK); 10.70 return phys; 10.71 } 10.72 10.73 @@ -105,7 +122,10 @@ static __inline__ unsigned long machine_ 10.74 #define __va to_virt 10.75 #define __pa to_phys 10.76 10.77 +#define virt_to_pfn(_virt) (PFN_DOWN(to_phys(_virt))) 10.78 + 10.79 void init_mm(void); 10.80 unsigned long alloc_pages(int order); 10.81 +int is_mfn_mapped(unsigned long mfn); 10.82 10.83 #endif /* _MM_H_ */
12.1 --- a/extras/mini-os/kernel.c Wed Aug 24 16:16:52 2005 -0700 12.2 +++ b/extras/mini-os/kernel.c Thu Aug 25 11:18:47 2005 -0700 12.3 @@ -133,7 +133,7 @@ void start_kernel(start_info_t *si) 12.4 for ( ; ; ) 12.5 { 12.6 // HYPERVISOR_yield(); 12.7 - block(1); 12.8 + block(100); 12.9 i++; 12.10 } 12.11 }
13.1 --- a/extras/mini-os/mm.c Wed Aug 24 16:16:52 2005 -0700 13.2 +++ b/extras/mini-os/mm.c Thu Aug 25 11:18:47 2005 -0700 13.3 @@ -5,9 +5,9 @@ 13.4 * 13.5 * File: mm.c 13.6 * Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk) 13.7 - * Changes: 13.8 + * Changes: Grzegorz Milos 13.9 * 13.10 - * Date: Aug 2003 13.11 + * Date: Aug 2003, chages Aug 2005 13.12 * 13.13 * Environment: Xen Minimal OS 13.14 * Description: memory management related functions 13.15 @@ -41,86 +41,18 @@ 13.16 #include <types.h> 13.17 #include <lib.h> 13.18 13.19 + 13.20 +#ifdef MM_DEBUG 13.21 +#define DEBUG(_f, _a...) \ 13.22 + printk("MINI_OS(file=mm.c, line=%d) " _f "\n", __LINE__, ## _a) 13.23 +#else 13.24 +#define DEBUG(_f, _a...) ((void)0) 13.25 +#endif 13.26 + 13.27 unsigned long *phys_to_machine_mapping; 13.28 extern char *stack; 13.29 extern char _text, _etext, _edata, _end; 13.30 13.31 -static void init_page_allocator(unsigned long min, unsigned long max); 13.32 - 13.33 -void init_mm(void) 13.34 -{ 13.35 - 13.36 - unsigned long start_pfn, max_pfn, max_free_pfn; 13.37 - 13.38 - unsigned long *pgd = (unsigned long *)start_info.pt_base; 13.39 - 13.40 - printk("MM: Init\n"); 13.41 - 13.42 - printk(" _text: %p\n", &_text); 13.43 - printk(" _etext: %p\n", &_etext); 13.44 - printk(" _edata: %p\n", &_edata); 13.45 - printk(" stack start: %p\n", &stack); 13.46 - printk(" _end: %p\n", &_end); 13.47 - 13.48 - /* set up minimal memory infos */ 13.49 - start_pfn = PFN_UP(to_phys(&_end)); 13.50 - max_pfn = start_info.nr_pages; 13.51 - 13.52 - printk(" start_pfn: %lx\n", start_pfn); 13.53 - printk(" max_pfn: %lx\n", max_pfn); 13.54 - 13.55 - /* 13.56 - * we know where free tables start (start_pfn) and how many we 13.57 - * have (max_pfn). 13.58 - * 13.59 - * Currently the hypervisor stores page tables it providesin the 13.60 - * high region of the this memory range. 13.61 - * 13.62 - * next we work out how far down this goes (max_free_pfn) 13.63 - * 13.64 - * XXX this assumes the hypervisor provided page tables to be in 13.65 - * the upper region of our initial memory. I don't know if this 13.66 - * is always true. 13.67 - */ 13.68 - 13.69 - max_free_pfn = PFN_DOWN(to_phys(pgd)); 13.70 -#ifdef __i386__ 13.71 - { 13.72 - unsigned long *pgd = (unsigned long *)start_info.pt_base; 13.73 - unsigned long pte; 13.74 - int i; 13.75 - printk(" pgd(pa(pgd)): %lx(%lx)", (u_long)pgd, to_phys(pgd)); 13.76 - 13.77 - for ( i = 0; i < (HYPERVISOR_VIRT_START>>22); i++ ) 13.78 - { 13.79 - unsigned long pgde = *pgd++; 13.80 - if ( !(pgde & 1) ) continue; 13.81 - pte = machine_to_phys(pgde & PAGE_MASK); 13.82 - printk(" PT(%x): %lx(%lx)", i, (u_long)to_virt(pte), pte); 13.83 - if (PFN_DOWN(pte) <= max_free_pfn) 13.84 - max_free_pfn = PFN_DOWN(pte); 13.85 - } 13.86 - } 13.87 - max_free_pfn--; 13.88 - printk(" max_free_pfn: %lx\n", max_free_pfn); 13.89 - 13.90 - /* 13.91 - * now we can initialise the page allocator 13.92 - */ 13.93 - printk("MM: Initialise page allocator for %lx(%lx)-%lx(%lx)\n", 13.94 - (u_long)to_virt(PFN_PHYS(start_pfn)), PFN_PHYS(start_pfn), 13.95 - (u_long)to_virt(PFN_PHYS(max_free_pfn)), PFN_PHYS(max_free_pfn)); 13.96 - init_page_allocator(PFN_PHYS(start_pfn), PFN_PHYS(max_free_pfn)); 13.97 -#endif 13.98 - 13.99 - 13.100 - /* Now initialise the physical->machine mapping table. */ 13.101 - 13.102 - 13.103 - printk("MM: done\n"); 13.104 - 13.105 - 13.106 -} 13.107 13.108 /********************* 13.109 * ALLOCATION BITMAP 13.110 @@ -214,6 +146,59 @@ static chunk_head_t free_tail[FREELIST_ 13.111 #define round_pgdown(_p) ((_p)&PAGE_MASK) 13.112 #define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK) 13.113 13.114 +#ifdef MM_DEBUG 13.115 +/* 13.116 + * Prints allocation[0/1] for @nr_pages, starting at @start 13.117 + * address (virtual). 13.118 + */ 13.119 +static void print_allocation(void *start, int nr_pages) 13.120 +{ 13.121 + unsigned long pfn_start = virt_to_pfn(start); 13.122 + int count; 13.123 + for(count = 0; count < nr_pages; count++) 13.124 + if(allocated_in_map(pfn_start + count)) printk("1"); 13.125 + else printk("0"); 13.126 + 13.127 + printk("\n"); 13.128 +} 13.129 + 13.130 +/* 13.131 + * Prints chunks (making them with letters) for @nr_pages starting 13.132 + * at @start (virtual). 13.133 + */ 13.134 +static void print_chunks(void *start, int nr_pages) 13.135 +{ 13.136 + char chunks[1001], current='A'; 13.137 + int order, count; 13.138 + chunk_head_t *head; 13.139 + unsigned long pfn_start = virt_to_pfn(start); 13.140 + 13.141 + memset(chunks, (int)'_', 1000); 13.142 + if(nr_pages > 1000) 13.143 + { 13.144 + DEBUG("Can only pring 1000 pages. Increase buffer size."); 13.145 + } 13.146 + 13.147 + for(order=0; order < FREELIST_SIZE; order++) 13.148 + { 13.149 + head = free_head[order]; 13.150 + while(!FREELIST_EMPTY(head)) 13.151 + { 13.152 + for(count = 0; count < 1<< head->level; count++) 13.153 + { 13.154 + if(count + virt_to_pfn(head) - pfn_start < 1000) 13.155 + chunks[count + virt_to_pfn(head) - pfn_start] = current; 13.156 + } 13.157 + head = head->next; 13.158 + current++; 13.159 + } 13.160 + } 13.161 + chunks[nr_pages] = '\0'; 13.162 + printk("%s\n", chunks); 13.163 +} 13.164 +#endif 13.165 + 13.166 + 13.167 13.168 /* 13.169 * Initialise allocator, placing addresses [@min,@max] in free pool. 13.170 @@ -328,3 +313,198 @@ unsigned long alloc_pages(int order) 13.171 return 0; 13.172 } 13.173 13.174 +void free_pages(void *pointer, int order) 13.175 +{ 13.176 + chunk_head_t *freed_ch, *to_merge_ch; 13.177 + chunk_tail_t *freed_ct; 13.178 + unsigned long mask; 13.179 + 13.180 + /* First free the chunk */ 13.181 + map_free(virt_to_pfn(pointer), 1 << order); 13.182 + 13.183 + /* Create free chunk */ 13.184 + freed_ch = (chunk_head_t *)pointer; 13.185 + freed_ct = (chunk_tail_t *)((char *)pointer + (1<<(order + PAGE_SHIFT)))-1; 13.186 + 13.187 + /* Now, possibly we can conseal chunks together */ 13.188 + while(order < FREELIST_SIZE) 13.189 + { 13.190 + mask = 1 << (order + PAGE_SHIFT); 13.191 + if((unsigned long)freed_ch & mask) 13.192 + { 13.193 + to_merge_ch = (chunk_head_t *)((char *)freed_ch - mask); 13.194 + if(allocated_in_map(virt_to_pfn(to_merge_ch)) || 13.195 + to_merge_ch->level != order) 13.196 + break; 13.197 + 13.198 + /* Merge with predecessor */ 13.199 + freed_ch = to_merge_ch; 13.200 + } 13.201 + else 13.202 + { 13.203 + to_merge_ch = (chunk_head_t *)((char *)freed_ch + mask); 13.204 + if(allocated_in_map(virt_to_pfn(to_merge_ch)) || 13.205 + to_merge_ch->level != order) 13.206 + break; 13.207 + 13.208 + /* Merge with successor */ 13.209 + freed_ct = (chunk_tail_t *)((char *)to_merge_ch + mask); 13.210 + } 13.211 + 13.212 + /* We are commited to merging, unlink the chunk */ 13.213 + *(to_merge_ch->pprev) = to_merge_ch->next; 13.214 + to_merge_ch->next->pprev = to_merge_ch->pprev; 13.215 + 13.216 + order++; 13.217 + } 13.218 + 13.219 + /* Link the new chunk */ 13.220 + freed_ch->level = order; 13.221 + freed_ch->next = free_head[order]; 13.222 + freed_ch->pprev = &free_head[order]; 13.223 + freed_ct->level = order; 13.224 + 13.225 + freed_ch->next->pprev = &freed_ch->next; 13.226 + free_head[order] = freed_ch; 13.227 + 13.228 +} 13.229 +void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn) 13.230 +{ 13.231 + unsigned long pfn_to_map, pt_frame; 13.232 + unsigned long mach_ptd, max_mach_ptd; 13.233 + int count; 13.234 + unsigned long mach_pte, virt_pte; 13.235 + unsigned long *ptd = (unsigned long *)start_info.pt_base; 13.236 + mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1]; 13.237 + struct mmuext_op pin_request; 13.238 + 13.239 + /* Firstly work out what is the first pfn that is not yet in page tables 13.240 + NB. Assuming that builder fills whole pt_frames (which it does at the 13.241 + moment) 13.242 + */ 13.243 + pfn_to_map = (start_info.nr_pt_frames - 1) * L1_PAGETABLE_ENTRIES; 13.244 + DEBUG("start_pfn=%ld, first pfn_to_map %ld, max_pfn=%ld", 13.245 + *start_pfn, pfn_to_map, *max_pfn); 13.246 + 13.247 + /* Machine address of page table directory */ 13.248 + mach_ptd = phys_to_machine(to_phys(start_info.pt_base)); 13.249 + mach_ptd += sizeof(void *) * 13.250 + l2_table_offset((unsigned long)to_virt(PFN_PHYS(pfn_to_map))); 13.251 + 13.252 + max_mach_ptd = sizeof(void *) * 13.253 + l2_table_offset((unsigned long)to_virt(PFN_PHYS(*max_pfn))); 13.254 + 13.255 + /* Check that we are not trying to access Xen region */ 13.256 + if(max_mach_ptd > sizeof(void *) * l2_table_offset(HYPERVISOR_VIRT_START)) 13.257 + { 13.258 + printk("WARNING: mini-os will not use all the memory supplied\n"); 13.259 + max_mach_ptd = sizeof(void *) * l2_table_offset(HYPERVISOR_VIRT_START); 13.260 + *max_pfn = virt_to_pfn(HYPERVISOR_VIRT_START - PAGE_SIZE); 13.261 + } 13.262 + max_mach_ptd += phys_to_machine(to_phys(start_info.pt_base)); 13.263 + DEBUG("Max_mach_ptd 0x%lx", max_mach_ptd); 13.264 + 13.265 + pt_frame = *start_pfn; 13.266 + /* Should not happen - no empty, mapped pages */ 13.267 + if(pt_frame >= pfn_to_map) 13.268 + { 13.269 + printk("ERROR: Not even a single empty, mapped page\n"); 13.270 + *(int*)0=0; 13.271 + } 13.272 + 13.273 + while(mach_ptd < max_mach_ptd) 13.274 + { 13.275 + /* Correct protection needs to be set for the new page table frame */ 13.276 + virt_pte = (unsigned long)to_virt(PFN_PHYS(pt_frame)); 13.277 + mach_pte = ptd[l2_table_offset(virt_pte)] & ~(PAGE_SIZE-1); 13.278 + mach_pte += sizeof(void *) * l1_table_offset(virt_pte); 13.279 + DEBUG("New page table page: pfn=0x%lx, mfn=0x%lx, virt_pte=0x%lx, " 13.280 + "mach_pte=0x%lx", pt_frame, pfn_to_mfn(pt_frame), 13.281 + virt_pte, mach_pte); 13.282 + 13.283 + /* Update the entry */ 13.284 + mmu_updates[0].ptr = mach_pte; 13.285 + mmu_updates[0].val = pfn_to_mfn(pt_frame) << PAGE_SHIFT | 13.286 + (L1_PROT & ~_PAGE_RW); 13.287 + if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0) 13.288 + { 13.289 + printk("PTE for new page table page could not be updated\n"); 13.290 + *(int*)0=0; 13.291 + } 13.292 + 13.293 + /* Pin the page to provide correct protection */ 13.294 + pin_request.cmd = MMUEXT_PIN_L1_TABLE; 13.295 + pin_request.mfn = pfn_to_mfn(pt_frame); 13.296 + if(HYPERVISOR_mmuext_op(&pin_request, 1, NULL, DOMID_SELF) < 0) 13.297 + { 13.298 + printk("ERROR: pinning failed\n"); 13.299 + *(int*)0=0; 13.300 + } 13.301 + 13.302 + /* Now fill the new page table page with entries. 13.303 + Update the page directory as well. */ 13.304 + count = 0; 13.305 + mmu_updates[count].ptr = mach_ptd; 13.306 + mmu_updates[count].val = pfn_to_mfn(pt_frame) << PAGE_SHIFT | 13.307 + L2_PROT; 13.308 + count++; 13.309 + mach_ptd += sizeof(void *); 13.310 + mach_pte = phys_to_machine(PFN_PHYS(pt_frame++)); 13.311 + 13.312 + for(;count <= L1_PAGETABLE_ENTRIES && pfn_to_map <= *max_pfn; count++) 13.313 + { 13.314 + mmu_updates[count].ptr = mach_pte; 13.315 + mmu_updates[count].val = 13.316 + pfn_to_mfn(pfn_to_map++) << PAGE_SHIFT | L1_PROT; 13.317 + if(count == 1) DEBUG("mach_pte 0x%lx", mach_pte); 13.318 + mach_pte += sizeof(void *); 13.319 + } 13.320 + if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0) 13.321 + { 13.322 + printk("ERROR: mmu_update failed\n"); 13.323 + *(int*)0=0; 13.324 + } 13.325 + (*start_pfn)++; 13.326 + } 13.327 + 13.328 + *start_pfn = pt_frame; 13.329 +} 13.330 + 13.331 +void init_mm(void) 13.332 +{ 13.333 + 13.334 + unsigned long start_pfn, max_pfn; 13.335 + 13.336 + printk("MM: Init\n"); 13.337 + 13.338 + printk(" _text: %p\n", &_text); 13.339 + printk(" _etext: %p\n", &_etext); 13.340 + printk(" _edata: %p\n", &_edata); 13.341 + printk(" stack start: %p\n", &stack); 13.342 + printk(" _end: %p\n", &_end); 13.343 + 13.344 + /* set up minimal memory infos */ 13.345 + phys_to_machine_mapping = (unsigned long *)start_info.mfn_list; 13.346 + 13.347 + /* First page follows page table pages and 3 more pages (store page etc) */ 13.348 + start_pfn = PFN_UP(__pa(start_info.pt_base)) + start_info.nr_pt_frames + 3; 13.349 + max_pfn = start_info.nr_pages; 13.350 + 13.351 + printk(" start_pfn: %lx\n", start_pfn); 13.352 + printk(" max_pfn: %lx\n", max_pfn); 13.353 + 13.354 + 13.355 + build_pagetable(&start_pfn, &max_pfn); 13.356 + 13.357 +#ifdef __i386__ 13.358 + /* 13.359 + * now we can initialise the page allocator 13.360 + */ 13.361 + printk("MM: Initialise page allocator for %lx(%lx)-%lx(%lx)\n", 13.362 + (u_long)to_virt(PFN_PHYS(start_pfn)), PFN_PHYS(start_pfn), 13.363 + (u_long)to_virt(PFN_PHYS(max_pfn)), PFN_PHYS(max_pfn)); 13.364 + init_page_allocator(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn)); 13.365 +#endif 13.366 + 13.367 + printk("MM: done\n"); 13.368 +}
51.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/common.c Wed Aug 24 16:16:52 2005 -0700 51.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/common.c Thu Aug 25 11:18:47 2005 -0700 51.3 @@ -19,11 +19,13 @@ 51.4 51.5 #include "cpu.h" 51.6 51.7 +#ifndef CONFIG_XEN 51.8 DEFINE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]); 51.9 EXPORT_PER_CPU_SYMBOL(cpu_gdt_table); 51.10 51.11 DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); 51.12 EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); 51.13 +#endif 51.14 51.15 static int cachesize_override __initdata = -1; 51.16 static int disable_x86_fxsr __initdata = 0;
64.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c Wed Aug 24 16:16:52 2005 -0700 64.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c Thu Aug 25 11:18:47 2005 -0700 64.3 @@ -131,15 +131,7 @@ static void map_cpu_to_logical_apicid(vo 64.4 */ 64.5 void __init smp_alloc_memory(void) 64.6 { 64.7 -#if 1 64.8 - int cpu; 64.9 - 64.10 - for (cpu = 1; cpu < NR_CPUS; cpu++) { 64.11 - cpu_gdt_descr[cpu].address = (unsigned long) 64.12 - alloc_bootmem_low_pages(PAGE_SIZE); 64.13 - /* XXX free unused pages later */ 64.14 - } 64.15 -#else 64.16 +#if 0 64.17 trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE); 64.18 /* 64.19 * Has to be in very low memory so we can execute 64.20 @@ -861,8 +853,8 @@ static int __init do_boot_cpu(int apicid 64.21 atomic_set(&init_deasserted, 0); 64.22 64.23 #if 1 64.24 - if (cpu_gdt_descr[0].size > PAGE_SIZE) 64.25 - BUG(); 64.26 + cpu_gdt_descr[cpu].address = __get_free_page(GFP_KERNEL); 64.27 + BUG_ON(cpu_gdt_descr[0].size > PAGE_SIZE); 64.28 cpu_gdt_descr[cpu].size = cpu_gdt_descr[0].size; 64.29 printk("GDT: copying %d bytes from %lx to %lx\n", 64.30 cpu_gdt_descr[0].size, cpu_gdt_descr[0].address,
67.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/traps.c Wed Aug 24 16:16:52 2005 -0700 67.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/traps.c Thu Aug 25 11:18:47 2005 -0700 67.3 @@ -871,6 +871,7 @@ fastcall void do_simd_coprocessor_error( 67.4 } 67.5 } 67.6 67.7 +#ifndef CONFIG_XEN 67.8 fastcall void setup_x86_bogus_stack(unsigned char * stk) 67.9 { 67.10 unsigned long *switch16_ptr, *switch32_ptr; 67.11 @@ -915,6 +916,7 @@ fastcall unsigned char * fixup_x86_bogus 67.12 memcpy(stack32, stack16, len); 67.13 return stack32; 67.14 } 67.15 +#endif 67.16 67.17 /* 67.18 * 'math_state_restore()' saves the current math information in the
77.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/gnttab.c Wed Aug 24 16:16:52 2005 -0700 77.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/gnttab.c Thu Aug 25 11:18:47 2005 -0700 77.3 @@ -34,9 +34,11 @@ 77.4 77.5 77.6 EXPORT_SYMBOL(gnttab_grant_foreign_access); 77.7 +EXPORT_SYMBOL(gnttab_end_foreign_access_ref); 77.8 EXPORT_SYMBOL(gnttab_end_foreign_access); 77.9 EXPORT_SYMBOL(gnttab_query_foreign_access); 77.10 EXPORT_SYMBOL(gnttab_grant_foreign_transfer); 77.11 +EXPORT_SYMBOL(gnttab_end_foreign_transfer_ref); 77.12 EXPORT_SYMBOL(gnttab_end_foreign_transfer); 77.13 EXPORT_SYMBOL(gnttab_alloc_grant_references); 77.14 EXPORT_SYMBOL(gnttab_free_grant_references); 77.15 @@ -160,7 +162,7 @@ gnttab_query_foreign_access(grant_ref_t 77.16 } 77.17 77.18 void 77.19 -gnttab_end_foreign_access(grant_ref_t ref, int readonly) 77.20 +gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) 77.21 { 77.22 u16 flags, nflags; 77.23 77.24 @@ -170,7 +172,12 @@ gnttab_end_foreign_access(grant_ref_t re 77.25 printk(KERN_ALERT "WARNING: g.e. still in use!\n"); 77.26 } 77.27 while ( (nflags = synch_cmpxchg(&shared[ref].flags, flags, 0)) != flags ); 77.28 +} 77.29 77.30 +void 77.31 +gnttab_end_foreign_access(grant_ref_t ref, int readonly) 77.32 +{ 77.33 + gnttab_end_foreign_access_ref(ref, readonly); 77.34 put_free_entry(ref); 77.35 } 77.36 77.37 @@ -201,20 +208,13 @@ gnttab_grant_foreign_transfer_ref(grant_ 77.38 } 77.39 77.40 unsigned long 77.41 -gnttab_end_foreign_transfer(grant_ref_t ref) 77.42 +gnttab_end_foreign_transfer_ref(grant_ref_t ref) 77.43 { 77.44 unsigned long frame = 0; 77.45 u16 flags; 77.46 77.47 flags = shared[ref].flags; 77.48 -#ifdef CONFIG_XEN_NETDEV_GRANT_RX 77.49 - /* 77.50 - * But can't flags == (GTF_accept_transfer | GTF_transfer_completed) 77.51 - * if gnttab_donate executes without interruption??? 77.52 - */ 77.53 -#else 77.54 - ASSERT(flags == (GTF_accept_transfer | GTF_transfer_committed)); 77.55 -#endif 77.56 + 77.57 /* 77.58 * If a transfer is committed then wait for the frame address to appear. 77.59 * Otherwise invalidate the grant entry against future use. 77.60 @@ -224,8 +224,14 @@ gnttab_end_foreign_transfer(grant_ref_t 77.61 while ( unlikely((frame = shared[ref].frame) == 0) ) 77.62 cpu_relax(); 77.63 77.64 + return frame; 77.65 +} 77.66 + 77.67 +unsigned long 77.68 +gnttab_end_foreign_transfer(grant_ref_t ref) 77.69 +{ 77.70 + unsigned long frame = gnttab_end_foreign_transfer_ref(ref); 77.71 put_free_entry(ref); 77.72 - 77.73 return frame; 77.74 } 77.75
78.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c Wed Aug 24 16:16:52 2005 -0700 78.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c Thu Aug 25 11:18:47 2005 -0700 78.3 @@ -129,14 +129,6 @@ static int __do_suspend(void *ignore) 78.4 /* Hmmm... a cleaner interface to suspend/resume blkdevs would be nice. */ 78.5 /* XXX SMH: yes it would :-( */ 78.6 78.7 -#ifdef CONFIG_XEN_NETDEV_FRONTEND 78.8 - extern void netif_suspend(void); 78.9 - extern void netif_resume(void); 78.10 -#else 78.11 -#define netif_suspend() do{}while(0) 78.12 -#define netif_resume() do{}while(0) 78.13 -#endif 78.14 - 78.15 #ifdef CONFIG_XEN_USB_FRONTEND 78.16 extern void usbif_resume(); 78.17 #else 78.18 @@ -218,8 +210,6 @@ static int __do_suspend(void *ignore) 78.19 kmem_cache_shrink(pgd_cache); 78.20 #endif 78.21 78.22 - netif_suspend(); 78.23 - 78.24 time_suspend(); 78.25 78.26 #ifdef CONFIG_SMP 78.27 @@ -277,8 +267,6 @@ static int __do_suspend(void *ignore) 78.28 78.29 time_resume(); 78.30 78.31 - netif_resume(); 78.32 - 78.33 usbif_resume(); 78.34 78.35 for_each_cpu_mask(i, prev_present_cpus) {
87.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/early_printk.c Wed Aug 24 16:16:52 2005 -0700 87.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/early_printk.c Thu Aug 25 11:18:47 2005 -0700 87.3 @@ -6,6 +6,8 @@ 87.4 #include <asm/io.h> 87.5 #include <asm/processor.h> 87.6 87.7 +#ifndef CONFIG_XEN 87.8 + 87.9 /* Simple VGA output */ 87.10 87.11 #ifdef __i386__ 87.12 @@ -59,7 +61,6 @@ static struct console early_vga_console 87.13 .index = -1, 87.14 }; 87.15 87.16 -#ifndef CONFIG_XEN 87.17 /* Serial functions loosely based on a similar package from Klaus P. Gerlicher */ 87.18 87.19 static int early_serial_base = 0x3f8; /* ttyS0 */ 87.20 @@ -148,7 +149,8 @@ static __init void early_serial_init(cha 87.21 outb((divisor >> 8) & 0xff, early_serial_base + DLH); 87.22 outb(c & ~DLAB, early_serial_base + LCR); 87.23 } 87.24 -#else 87.25 + 87.26 +#else /* CONFIG_XEN */ 87.27 87.28 static void 87.29 early_serial_write(struct console *con, const char *s, unsigned count) 87.30 @@ -167,6 +169,13 @@ early_serial_write(struct console *con, 87.31 static __init void early_serial_init(char *s) 87.32 { 87.33 } 87.34 + 87.35 +/* 87.36 + * No early VGA console on Xen, as we do not have convenient ISA-space 87.37 + * mappings. Someone should fix this for domain 0. For now, use fake serial. 87.38 + */ 87.39 +#define early_vga_console early_serial_console 87.40 + 87.41 #endif 87.42 87.43 static struct console early_serial_console = {
91.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/head.S Wed Aug 24 16:16:52 2005 -0700 91.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/head.S Thu Aug 25 11:18:47 2005 -0700 91.3 @@ -206,11 +206,13 @@ ENTRY(cpu_gdt_table) 91.4 .quad 0,0,0 /* three TLS descriptors */ 91.5 .quad 0 /* unused now? __KERNEL16_CS - 16bit PM for S3 wakeup. */ 91.6 91.7 -gdt_end: 91.8 +gdt_end: 91.9 +#if 0 91.10 /* asm/segment.h:GDT_ENTRIES must match this */ 91.11 /* This should be a multiple of the cache line size */ 91.12 /* GDTs of other CPUs: */ 91.13 .fill (GDT_SIZE * NR_CPUS) - (gdt_end - cpu_gdt_table) 91.14 +#endif 91.15 91.16 .org 0x8000 91.17 ENTRY(empty_zero_page)
97.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c Wed Aug 24 16:16:52 2005 -0700 97.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c Thu Aug 25 11:18:47 2005 -0700 97.3 @@ -536,48 +536,7 @@ static inline void copy_edd(void) 97.4 } 97.5 #endif 97.6 97.7 -#ifdef CONFIG_XEN 97.8 -#define reserve_ebda_region() void(0) 97.9 - 97.10 -static void __init print_memory_map(char *who) 97.11 -{ 97.12 - int i; 97.13 - 97.14 - for (i = 0; i < e820.nr_map; i++) { 97.15 - early_printk(" %s: %016Lx - %016Lx ", who, 97.16 - e820.map[i].addr, 97.17 - e820.map[i].addr + e820.map[i].size); 97.18 - switch (e820.map[i].type) { 97.19 - case E820_RAM: early_printk("(usable)\n"); 97.20 - break; 97.21 - case E820_RESERVED: 97.22 - early_printk("(reserved)\n"); 97.23 - break; 97.24 - case E820_ACPI: 97.25 - early_printk("(ACPI data)\n"); 97.26 - break; 97.27 - case E820_NVS: 97.28 - early_printk("(ACPI NVS)\n"); 97.29 - break; 97.30 - default: early_printk("type %u\n", e820.map[i].type); 97.31 - break; 97.32 - } 97.33 - } 97.34 -} 97.35 - 97.36 -void __init smp_alloc_memory(void) 97.37 -{ 97.38 - int cpu; 97.39 - 97.40 - for (cpu = 1; cpu < NR_CPUS; cpu++) { 97.41 - cpu_gdt_descr[cpu].address = (unsigned long) 97.42 - alloc_bootmem_low_pages(PAGE_SIZE); 97.43 - /* XXX free unused pages later */ 97.44 - } 97.45 -} 97.46 - 97.47 - 97.48 -#else 97.49 +#ifndef CONFIG_XEN 97.50 #define EBDA_ADDR_POINTER 0x40E 97.51 static void __init reserve_ebda_region(void) 97.52 { 97.53 @@ -628,7 +587,6 @@ void __init setup_arch(char **cmdline_p) 97.54 VMASST_TYPE_writable_pagetables); 97.55 97.56 ARCH_SETUP 97.57 - print_memory_map(machine_specific_memory_setup()); 97.58 #else 97.59 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); 97.60 drive_info = DRIVE_INFO; 97.61 @@ -744,9 +702,6 @@ void __init setup_arch(char **cmdline_p) 97.62 } 97.63 } 97.64 #endif 97.65 -#ifdef CONFIG_SMP 97.66 - smp_alloc_memory(); 97.67 -#endif 97.68 #else /* CONFIG_XEN */ 97.69 #ifdef CONFIG_BLK_DEV_INITRD 97.70 if (LOADER_TYPE && INITRD_START) {
98.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup64.c Wed Aug 24 16:16:52 2005 -0700 98.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup64.c Thu Aug 25 11:18:47 2005 -0700 98.3 @@ -286,10 +286,10 @@ void __init cpu_init (void) 98.4 98.5 memcpy(me->thread.tls_array, cpu_gdt_table[cpu], GDT_ENTRY_TLS_ENTRIES * 8); 98.6 #else 98.7 - memcpy(me->thread.tls_array, &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN], 98.8 + memcpy(me->thread.tls_array, &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN], 98.9 GDT_ENTRY_TLS_ENTRIES * 8); 98.10 98.11 - cpu_gdt_init(&cpu_gdt_descr[cpu]); 98.12 + cpu_gdt_init(&cpu_gdt_descr[cpu]); 98.13 #endif 98.14 98.15 /*
100.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c Wed Aug 24 16:16:52 2005 -0700 100.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c Thu Aug 25 11:18:47 2005 -0700 100.3 @@ -739,8 +739,8 @@ static int __cpuinit do_boot_cpu(int cpu 100.4 atomic_set(&init_deasserted, 0); 100.5 100.6 #ifdef CONFIG_XEN 100.7 - if (cpu_gdt_descr[0].size > PAGE_SIZE) 100.8 - BUG(); 100.9 + cpu_gdt_descr[cpu].address = __get_free_page(GFP_KERNEL); 100.10 + BUG_ON(cpu_gdt_descr[0].size > PAGE_SIZE); 100.11 cpu_gdt_descr[cpu].size = cpu_gdt_descr[0].size; 100.12 memcpy((void *)cpu_gdt_descr[cpu].address, 100.13 (void *)cpu_gdt_descr[0].address, cpu_gdt_descr[0].size); 100.14 @@ -798,6 +798,8 @@ static int __cpuinit do_boot_cpu(int cpu 100.15 ctxt.ctrlreg[3] = virt_to_mfn(init_level4_pgt) << PAGE_SHIFT; 100.16 100.17 boot_error = HYPERVISOR_boot_vcpu(cpu, &ctxt); 100.18 + if (boot_error) 100.19 + printk("boot error: %ld\n", boot_error); 100.20 100.21 if (!boot_error) { 100.22 /*
106.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c Wed Aug 24 16:16:52 2005 -0700 106.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c Thu Aug 25 11:18:47 2005 -0700 106.3 @@ -536,70 +536,38 @@ static void __init find_early_table_spac 106.4 round_up(ptes * 8, PAGE_SIZE); 106.5 } 106.6 106.7 -static void xen_copy_pt(void) 106.8 -{ 106.9 - unsigned long va = __START_KERNEL_map; 106.10 - unsigned long addr, *pte_page; 106.11 - int i; 106.12 - pud_t *pud; pmd_t *pmd; pte_t *pte; 106.13 - unsigned long *page = (unsigned long *) init_level4_pgt; 106.14 - 106.15 - addr = (unsigned long) page[pgd_index(va)]; 106.16 - addr_to_page(addr, page); 106.17 - 106.18 - pud = (pud_t *) &page[pud_index(va)]; 106.19 - addr = page[pud_index(va)]; 106.20 - addr_to_page(addr, page); 106.21 - 106.22 - level3_kernel_pgt[pud_index(va)] = 106.23 - __pud(__pa_symbol(level2_kernel_pgt) | _KERNPG_TABLE | _PAGE_USER); 106.24 - 106.25 - for (;;) { 106.26 - pmd = (pmd_t *) &page[pmd_index(va)]; 106.27 - if (pmd_present(*pmd)) { 106.28 - level2_kernel_pgt[pmd_index(va)] = *pmd; 106.29 - /* 106.30 - * if pmd is valid, check pte. 106.31 - */ 106.32 - addr = page[pmd_index(va)]; 106.33 - addr_to_page(addr, pte_page); 106.34 - 106.35 - for (i = 0; i < PTRS_PER_PTE; i++) { 106.36 - pte = (pte_t *) &pte_page[pte_index(va)]; 106.37 - if (pte_present(*pte)) 106.38 - va += PAGE_SIZE; 106.39 - else 106.40 - break; 106.41 - } 106.42 - 106.43 - } else 106.44 - break; 106.45 - } 106.46 - 106.47 - init_level4_pgt[pgd_index(__START_KERNEL_map)] = 106.48 - mk_kernel_pgd(__pa_symbol(level3_kernel_pgt)); 106.49 -} 106.50 - 106.51 void __init xen_init_pt(void) 106.52 { 106.53 + unsigned long addr, *page; 106.54 int i; 106.55 106.56 for (i = 0; i < NR_CPUS; i++) 106.57 per_cpu(cur_pgd, i) = init_mm.pgd; 106.58 106.59 - memcpy((void *)init_level4_pgt, 106.60 - (void *)xen_start_info.pt_base, PAGE_SIZE); 106.61 - 106.62 + memset((void *)init_level4_pgt, 0, PAGE_SIZE); 106.63 memset((void *)level3_kernel_pgt, 0, PAGE_SIZE); 106.64 memset((void *)level2_kernel_pgt, 0, PAGE_SIZE); 106.65 106.66 - xen_copy_pt(); 106.67 + /* Find the initial pte page that was built for us. */ 106.68 + page = (unsigned long *)xen_start_info.pt_base; 106.69 + addr = page[pgd_index(__START_KERNEL_map)]; 106.70 + addr_to_page(addr, page); 106.71 + addr = page[pud_index(__START_KERNEL_map)]; 106.72 + addr_to_page(addr, page); 106.73 + 106.74 + /* Construct mapping of initial pte page in our own directories. */ 106.75 + init_level4_pgt[pgd_index(__START_KERNEL_map)] = 106.76 + mk_kernel_pgd(__pa_symbol(level3_kernel_pgt)); 106.77 + level3_kernel_pgt[pud_index(__START_KERNEL_map)] = 106.78 + __pud(__pa_symbol(level2_kernel_pgt) | 106.79 + _KERNPG_TABLE | _PAGE_USER); 106.80 + memcpy((void *)level2_kernel_pgt, page, PAGE_SIZE); 106.81 106.82 make_page_readonly(init_level4_pgt); 106.83 + make_page_readonly(init_level4_user_pgt); 106.84 make_page_readonly(level3_kernel_pgt); 106.85 + make_page_readonly(level3_user_pgt); 106.86 make_page_readonly(level2_kernel_pgt); 106.87 - make_page_readonly(init_level4_user_pgt); 106.88 - make_page_readonly(level3_user_pgt); /* for vsyscall stuff */ 106.89 106.90 xen_pgd_pin(__pa_symbol(init_level4_pgt)); 106.91 xen_pgd_pin(__pa_symbol(init_level4_user_pgt)); 106.92 @@ -609,7 +577,6 @@ void __init xen_init_pt(void) 106.93 106.94 set_pgd((pgd_t *)(init_level4_user_pgt + 511), 106.95 mk_kernel_pgd(__pa_symbol(level3_user_pgt))); 106.96 - 106.97 } 106.98 106.99 /* 106.100 @@ -617,69 +584,58 @@ void __init xen_init_pt(void) 106.101 * mapping done by Xen is minimal (e.g. 8MB) and we need to extend the 106.102 * mapping for early initialization. 106.103 */ 106.104 - 106.105 -#define MIN_INIT_SIZE 0x800000 106.106 static unsigned long current_size, extended_size; 106.107 106.108 void __init extend_init_mapping(void) 106.109 { 106.110 unsigned long va = __START_KERNEL_map; 106.111 - unsigned long addr, *pte_page; 106.112 - 106.113 - unsigned long phys; 106.114 + unsigned long phys, addr, *pte_page; 106.115 pmd_t *pmd; 106.116 pte_t *pte, new_pte; 106.117 unsigned long *page = (unsigned long *) init_level4_pgt; 106.118 int i; 106.119 106.120 - addr = (unsigned long) page[pgd_index(va)]; 106.121 + addr = page[pgd_index(va)]; 106.122 addr_to_page(addr, page); 106.123 - 106.124 addr = page[pud_index(va)]; 106.125 addr_to_page(addr, page); 106.126 106.127 for (;;) { 106.128 - pmd = (pmd_t *) &page[pmd_index(va)]; 106.129 - if (pmd_present(*pmd)) { 106.130 - /* 106.131 - * if pmd is valid, check pte. 106.132 - */ 106.133 - addr = page[pmd_index(va)]; 106.134 - addr_to_page(addr, pte_page); 106.135 - 106.136 - for (i = 0; i < PTRS_PER_PTE; i++) { 106.137 - pte = (pte_t *) &pte_page[pte_index(va)]; 106.138 - 106.139 - if (pte_present(*pte)) { 106.140 - va += PAGE_SIZE; 106.141 - current_size += PAGE_SIZE; 106.142 - } else 106.143 - break; 106.144 - } 106.145 - 106.146 - } else 106.147 - break; 106.148 + pmd = (pmd_t *)&page[pmd_index(va)]; 106.149 + if (!pmd_present(*pmd)) 106.150 + break; 106.151 + addr = page[pmd_index(va)]; 106.152 + addr_to_page(addr, pte_page); 106.153 + for (i = 0; i < PTRS_PER_PTE; i++) { 106.154 + pte = (pte_t *) &pte_page[pte_index(va)]; 106.155 + if (!pte_present(*pte)) 106.156 + break; 106.157 + va += PAGE_SIZE; 106.158 + current_size += PAGE_SIZE; 106.159 + } 106.160 } 106.161 106.162 - for (; va < __START_KERNEL_map + current_size + tables_space; ) { 106.163 + while (va < __START_KERNEL_map + current_size + tables_space) { 106.164 pmd = (pmd_t *) &page[pmd_index(va)]; 106.165 - 106.166 - if (pmd_none(*pmd)) { 106.167 - pte_page = (unsigned long *) alloc_static_page(&phys); 106.168 - make_page_readonly(pte_page); 106.169 - xen_pte_pin(phys); 106.170 - set_pmd(pmd, __pmd(phys | _KERNPG_TABLE | _PAGE_USER)); 106.171 + if (!pmd_none(*pmd)) 106.172 + continue; 106.173 + pte_page = (unsigned long *) alloc_static_page(&phys); 106.174 + make_page_readonly(pte_page); 106.175 + xen_pte_pin(phys); 106.176 + set_pmd(pmd, __pmd(phys | _KERNPG_TABLE | _PAGE_USER)); 106.177 + for (i = 0; i < PTRS_PER_PTE; i++, va += PAGE_SIZE) { 106.178 + new_pte = pfn_pte( 106.179 + (va - __START_KERNEL_map) >> PAGE_SHIFT, 106.180 + __pgprot(_KERNPG_TABLE | _PAGE_USER)); 106.181 + pte = (pte_t *)&pte_page[pte_index(va)]; 106.182 + xen_l1_entry_update(pte, new_pte); 106.183 + extended_size += PAGE_SIZE; 106.184 + } 106.185 + } 106.186 106.187 - for (i = 0; i < PTRS_PER_PTE; i++, va += PAGE_SIZE) { 106.188 - new_pte = pfn_pte((va - __START_KERNEL_map) >> PAGE_SHIFT, 106.189 - __pgprot(_KERNPG_TABLE | _PAGE_USER)); 106.190 - 106.191 - pte = (pte_t *) &pte_page[pte_index(va)]; 106.192 - xen_l1_entry_update(pte, new_pte); 106.193 - extended_size += PAGE_SIZE; 106.194 - } 106.195 - } 106.196 - } 106.197 + /* Kill mapping of low 1MB. */ 106.198 + for (va = __START_KERNEL_map; va < (unsigned long)&_text; va += PAGE_SIZE) 106.199 + HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0); 106.200 } 106.201 106.202 106.203 @@ -720,10 +676,6 @@ void __init init_memory_mapping(unsigned 106.204 106.205 start_pfn = ((current_size + extended_size) >> PAGE_SHIFT); 106.206 106.207 - /* 106.208 - * TBD: Need to calculate at runtime 106.209 - */ 106.210 - 106.211 __flush_tlb_all(); 106.212 init_mapping_done = 1; 106.213 }
115.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c Wed Aug 24 16:16:52 2005 -0700 115.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c Thu Aug 25 11:18:47 2005 -0700 115.3 @@ -57,26 +57,26 @@ static int blkback_remove(struct xenbus_ 115.4 /* Front end tells us frame. */ 115.5 static void frontend_changed(struct xenbus_watch *watch, const char *node) 115.6 { 115.7 - unsigned long sharedmfn; 115.8 + unsigned long ring_ref; 115.9 unsigned int evtchn; 115.10 int err; 115.11 struct backend_info *be 115.12 = container_of(watch, struct backend_info, watch); 115.13 115.14 /* If other end is gone, delete ourself. */ 115.15 - if (!xenbus_exists(be->frontpath, "")) { 115.16 + if (node && !xenbus_exists(be->frontpath, "")) { 115.17 xenbus_rm(be->dev->nodename, ""); 115.18 device_unregister(&be->dev->dev); 115.19 return; 115.20 } 115.21 - if (be->blkif->status == CONNECTED) 115.22 + if (be->blkif == NULL || be->blkif->status == CONNECTED) 115.23 return; 115.24 115.25 - err = xenbus_gather(be->frontpath, "grant-id", "%lu", &sharedmfn, 115.26 + err = xenbus_gather(be->frontpath, "ring-ref", "%lu", &ring_ref, 115.27 "event-channel", "%u", &evtchn, NULL); 115.28 if (err) { 115.29 xenbus_dev_error(be->dev, err, 115.30 - "reading %s/grant-id and event-channel", 115.31 + "reading %s/ring-ref and event-channel", 115.32 be->frontpath); 115.33 return; 115.34 } 115.35 @@ -113,11 +113,10 @@ static void frontend_changed(struct xenb 115.36 } 115.37 115.38 /* Map the shared frame, irq etc. */ 115.39 - err = blkif_map(be->blkif, sharedmfn, evtchn); 115.40 + err = blkif_map(be->blkif, ring_ref, evtchn); 115.41 if (err) { 115.42 - xenbus_dev_error(be->dev, err, 115.43 - "mapping shared-frame %lu port %u", 115.44 - sharedmfn, evtchn); 115.45 + xenbus_dev_error(be->dev, err, "mapping ring-ref %lu port %u", 115.46 + ring_ref, evtchn); 115.47 goto abort; 115.48 } 115.49 115.50 @@ -139,62 +138,22 @@ static void backend_changed(struct xenbu 115.51 { 115.52 int err; 115.53 char *p; 115.54 - char *frontend; 115.55 long int handle, pdev; 115.56 struct backend_info *be 115.57 = container_of(watch, struct backend_info, backend_watch); 115.58 struct xenbus_device *dev = be->dev; 115.59 115.60 - frontend = NULL; 115.61 - err = xenbus_gather(dev->nodename, 115.62 - "frontend-id", "%li", &be->frontend_id, 115.63 - "frontend", NULL, &frontend, 115.64 - NULL); 115.65 - if (XENBUS_EXIST_ERR(err) || 115.66 - strlen(frontend) == 0 || !xenbus_exists(frontend, "")) { 115.67 - /* If we can't get a frontend path and a frontend-id, 115.68 - * then our bus-id is no longer valid and we need to 115.69 - * destroy the backend device. 115.70 - */ 115.71 - goto device_fail; 115.72 - } 115.73 - if (err < 0) { 115.74 - xenbus_dev_error(dev, err, 115.75 - "reading %s/frontend or frontend-id", 115.76 - dev->nodename); 115.77 - goto device_fail; 115.78 - } 115.79 - 115.80 - if (!be->frontpath || strcmp(frontend, be->frontpath)) { 115.81 - if (be->watch.node) 115.82 - unregister_xenbus_watch(&be->watch); 115.83 - if (be->frontpath) 115.84 - kfree(be->frontpath); 115.85 - be->frontpath = frontend; 115.86 - frontend = NULL; 115.87 - be->watch.node = be->frontpath; 115.88 - be->watch.callback = frontend_changed; 115.89 - err = register_xenbus_watch(&be->watch); 115.90 - if (err) { 115.91 - be->watch.node = NULL; 115.92 - xenbus_dev_error(dev, err, 115.93 - "adding frontend watch on %s", 115.94 - be->frontpath); 115.95 - goto device_fail; 115.96 - } 115.97 - } 115.98 - 115.99 err = xenbus_scanf(dev->nodename, "physical-device", "%li", &pdev); 115.100 if (XENBUS_EXIST_ERR(err)) 115.101 - goto out; 115.102 + return; 115.103 if (err < 0) { 115.104 xenbus_dev_error(dev, err, "reading physical-device"); 115.105 - goto device_fail; 115.106 + return; 115.107 } 115.108 if (be->pdev && be->pdev != pdev) { 115.109 printk(KERN_WARNING 115.110 "changing physical-device not supported\n"); 115.111 - goto device_fail; 115.112 + return; 115.113 } 115.114 be->pdev = pdev; 115.115 115.116 @@ -215,32 +174,25 @@ static void backend_changed(struct xenbu 115.117 err = PTR_ERR(be->blkif); 115.118 be->blkif = NULL; 115.119 xenbus_dev_error(dev, err, "creating block interface"); 115.120 - goto device_fail; 115.121 + return; 115.122 } 115.123 115.124 err = vbd_create(be->blkif, handle, be->pdev, be->readonly); 115.125 if (err) { 115.126 xenbus_dev_error(dev, err, "creating vbd structure"); 115.127 - goto device_fail; 115.128 + return; 115.129 } 115.130 115.131 - frontend_changed(&be->watch, be->frontpath); 115.132 + /* Pass in NULL node to skip exist test. */ 115.133 + frontend_changed(&be->watch, NULL); 115.134 } 115.135 - 115.136 - out: 115.137 - if (frontend) 115.138 - kfree(frontend); 115.139 - return; 115.140 - 115.141 - device_fail: 115.142 - device_unregister(&be->dev->dev); 115.143 - goto out; 115.144 } 115.145 115.146 static int blkback_probe(struct xenbus_device *dev, 115.147 const struct xenbus_device_id *id) 115.148 { 115.149 struct backend_info *be; 115.150 + char *frontend; 115.151 int err; 115.152 115.153 be = kmalloc(sizeof(*be), GFP_KERNEL); 115.154 @@ -248,24 +200,63 @@ static int blkback_probe(struct xenbus_d 115.155 xenbus_dev_error(dev, -ENOMEM, "allocating backend structure"); 115.156 return -ENOMEM; 115.157 } 115.158 + memset(be, 0, sizeof(*be)); 115.159 115.160 - memset(be, 0, sizeof(*be)); 115.161 + frontend = NULL; 115.162 + err = xenbus_gather(dev->nodename, 115.163 + "frontend-id", "%li", &be->frontend_id, 115.164 + "frontend", NULL, &frontend, 115.165 + NULL); 115.166 + if (XENBUS_EXIST_ERR(err)) 115.167 + goto free_be; 115.168 + if (err < 0) { 115.169 + xenbus_dev_error(dev, err, 115.170 + "reading %s/frontend or frontend-id", 115.171 + dev->nodename); 115.172 + goto free_be; 115.173 + } 115.174 + if (strlen(frontend) == 0 || !xenbus_exists(frontend, "")) { 115.175 + /* If we can't get a frontend path and a frontend-id, 115.176 + * then our bus-id is no longer valid and we need to 115.177 + * destroy the backend device. 115.178 + */ 115.179 + err = -ENOENT; 115.180 + goto free_be; 115.181 + } 115.182 115.183 be->dev = dev; 115.184 be->backend_watch.node = dev->nodename; 115.185 be->backend_watch.callback = backend_changed; 115.186 err = register_xenbus_watch(&be->backend_watch); 115.187 if (err) { 115.188 + be->backend_watch.node = NULL; 115.189 xenbus_dev_error(dev, err, "adding backend watch on %s", 115.190 dev->nodename); 115.191 goto free_be; 115.192 } 115.193 115.194 + be->frontpath = frontend; 115.195 + be->watch.node = be->frontpath; 115.196 + be->watch.callback = frontend_changed; 115.197 + err = register_xenbus_watch(&be->watch); 115.198 + if (err) { 115.199 + be->watch.node = NULL; 115.200 + xenbus_dev_error(dev, err, 115.201 + "adding frontend watch on %s", 115.202 + be->frontpath); 115.203 + goto free_be; 115.204 + } 115.205 + 115.206 dev->data = be; 115.207 115.208 backend_changed(&be->backend_watch, dev->nodename); 115.209 - return err; 115.210 + return 0; 115.211 + 115.212 free_be: 115.213 + if (be->backend_watch.node) 115.214 + unregister_xenbus_watch(&be->backend_watch); 115.215 + if (frontend) 115.216 + kfree(frontend); 115.217 kfree(be); 115.218 return err; 115.219 }
116.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c Wed Aug 24 16:16:52 2005 -0700 116.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c Thu Aug 25 11:18:47 2005 -0700 116.3 @@ -1084,7 +1084,8 @@ static void watch_for_status(struct xenb 116.4 "sector-size", "%lu", §or_size, 116.5 NULL); 116.6 if (err) { 116.7 - xenbus_dev_error(info->xbdev, err, "reading backend fields"); 116.8 + xenbus_dev_error(info->xbdev, err, 116.9 + "reading backend fields at %s", watch->node); 116.10 return; 116.11 } 116.12 116.13 @@ -1123,12 +1124,12 @@ static int setup_blkring(struct xenbus_d 116.14 xenbus_dev_error(dev, err, "granting access to ring page"); 116.15 return err; 116.16 } 116.17 - info->grant_id = err; 116.18 + info->ring_ref = err; 116.19 116.20 op.u.alloc_unbound.dom = info->backend_id; 116.21 err = HYPERVISOR_event_channel_op(&op); 116.22 if (err) { 116.23 - gnttab_end_foreign_access(info->grant_id, 0); 116.24 + gnttab_end_foreign_access(info->ring_ref, 0); 116.25 free_page((unsigned long)info->ring.sring); 116.26 info->ring.sring = 0; 116.27 xenbus_dev_error(dev, err, "allocating event channel"); 116.28 @@ -1176,9 +1177,9 @@ static int talk_to_backend(struct xenbus 116.29 goto destroy_blkring; 116.30 } 116.31 116.32 - err = xenbus_printf(dev->nodename, "grant-id","%u", info->grant_id); 116.33 + err = xenbus_printf(dev->nodename, "ring-ref","%u", info->ring_ref); 116.34 if (err) { 116.35 - message = "writing grant-id"; 116.36 + message = "writing ring-ref"; 116.37 goto abort_transaction; 116.38 } 116.39 err = xenbus_printf(dev->nodename,
117.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h Wed Aug 24 16:16:52 2005 -0700 117.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h Thu Aug 25 11:18:47 2005 -0700 117.3 @@ -112,7 +112,7 @@ struct blkfront_info 117.4 int connected; 117.5 char *backend; 117.6 int backend_id; 117.7 - int grant_id; 117.8 + int ring_ref; 117.9 blkif_front_ring_t ring; 117.10 unsigned int evtchn; 117.11 struct xlbd_major_info *mi;
124.1 --- a/linux-2.6-xen-sparse/drivers/xen/console/console.c Wed Aug 24 16:16:52 2005 -0700 124.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/console.c Thu Aug 25 11:18:47 2005 -0700 124.3 @@ -240,7 +240,11 @@ console_initcall(xen_console_init); 124.4 #endif 124.5 124.6 /*** Useful function for console debugging -- goes straight to Xen. ***/ 124.7 +#ifdef CONFIG_XEN_PRIVILEGED_GUEST 124.8 asmlinkage int xprintk(const char *fmt, ...) 124.9 +#else 124.10 +asmlinkage int xprintk(const char *fmt, ...) 124.11 +#endif 124.12 { 124.13 va_list args; 124.14 int printk_len;
125.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/Makefile Wed Aug 24 16:16:52 2005 -0700 125.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/Makefile Thu Aug 25 11:18:47 2005 -0700 125.3 @@ -1,2 +1,2 @@ 125.4 125.5 -obj-y := netback.o control.o interface.o loopback.o 125.6 +obj-y := netback.o xenbus.o interface.o loopback.o
126.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/common.h Wed Aug 24 16:16:52 2005 -0700 126.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/common.h Thu Aug 25 11:18:47 2005 -0700 126.3 @@ -59,6 +59,7 @@ typedef struct netif_st { 126.4 grant_ref_t rx_shmem_ref; 126.5 #endif 126.6 unsigned int evtchn; 126.7 + unsigned int remote_evtchn; 126.8 126.9 /* The shared rings and indexes. */ 126.10 netif_tx_interface_t *tx; 126.11 @@ -82,36 +83,30 @@ typedef struct netif_st { 126.12 /* Miscellaneous private stuff. */ 126.13 enum { DISCONNECTED, DISCONNECTING, CONNECTED } status; 126.14 int active; 126.15 - /* 126.16 - * DISCONNECT response is deferred until pending requests are ack'ed. 126.17 - * We therefore need to store the id from the original request. 126.18 - */ 126.19 - u8 disconnect_rspid; 126.20 - struct netif_st *hash_next; 126.21 struct list_head list; /* scheduling list */ 126.22 atomic_t refcnt; 126.23 struct net_device *dev; 126.24 struct net_device_stats stats; 126.25 126.26 - struct work_struct work; 126.27 + struct work_struct free_work; 126.28 } netif_t; 126.29 126.30 -void netif_create(netif_be_create_t *create); 126.31 -void netif_destroy(netif_be_destroy_t *destroy); 126.32 -void netif_creditlimit(netif_be_creditlimit_t *creditlimit); 126.33 -void netif_connect(netif_be_connect_t *connect); 126.34 -int netif_disconnect(netif_be_disconnect_t *disconnect, u8 rsp_id); 126.35 -void netif_disconnect_complete(netif_t *netif); 126.36 -netif_t *netif_find_by_handle(domid_t domid, unsigned int handle); 126.37 +void netif_creditlimit(netif_t *netif); 126.38 +int netif_disconnect(netif_t *netif); 126.39 + 126.40 +netif_t *alloc_netif(domid_t domid, unsigned int handle, u8 be_mac[ETH_ALEN]); 126.41 +void free_netif_callback(netif_t *netif); 126.42 +int netif_map(netif_t *netif, unsigned long tx_ring_ref, 126.43 + unsigned long rx_ring_ref, unsigned int evtchn); 126.44 + 126.45 #define netif_get(_b) (atomic_inc(&(_b)->refcnt)) 126.46 #define netif_put(_b) \ 126.47 do { \ 126.48 if ( atomic_dec_and_test(&(_b)->refcnt) ) \ 126.49 - netif_disconnect_complete(_b); \ 126.50 + free_netif_callback(_b); \ 126.51 } while (0) 126.52 126.53 -void netif_interface_init(void); 126.54 -void netif_ctrlif_init(void); 126.55 +void netif_xenbus_init(void); 126.56 126.57 void netif_schedule_work(netif_t *netif); 126.58 void netif_deschedule_work(netif_t *netif);
127.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/control.c Wed Aug 24 16:16:52 2005 -0700 127.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 127.3 @@ -1,58 +0,0 @@ 127.4 -/****************************************************************************** 127.5 - * arch/xen/drivers/netif/backend/control.c 127.6 - * 127.7 - * Routines for interfacing with the control plane. 127.8 - * 127.9 - * Copyright (c) 2004, Keir Fraser 127.10 - */ 127.11 - 127.12 -#include "common.h" 127.13 - 127.14 -static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id) 127.15 -{ 127.16 - DPRINTK("Received netif backend message, subtype=%d\n", msg->subtype); 127.17 - 127.18 - switch ( msg->subtype ) 127.19 - { 127.20 - case CMSG_NETIF_BE_CREATE: 127.21 - netif_create((netif_be_create_t *)&msg->msg[0]); 127.22 - break; 127.23 - case CMSG_NETIF_BE_DESTROY: 127.24 - netif_destroy((netif_be_destroy_t *)&msg->msg[0]); 127.25 - break; 127.26 - case CMSG_NETIF_BE_CREDITLIMIT: 127.27 - netif_creditlimit((netif_be_creditlimit_t *)&msg->msg[0]); 127.28 - break; 127.29 - case CMSG_NETIF_BE_CONNECT: 127.30 - netif_connect((netif_be_connect_t *)&msg->msg[0]); 127.31 - break; 127.32 - case CMSG_NETIF_BE_DISCONNECT: 127.33 - if ( !netif_disconnect((netif_be_disconnect_t *)&msg->msg[0],msg->id) ) 127.34 - return; /* Sending the response is deferred until later. */ 127.35 - break; 127.36 - default: 127.37 - DPRINTK("Parse error while reading message subtype %d, len %d\n", 127.38 - msg->subtype, msg->length); 127.39 - msg->length = 0; 127.40 - break; 127.41 - } 127.42 - 127.43 - ctrl_if_send_response(msg); 127.44 -} 127.45 - 127.46 -void netif_ctrlif_init(void) 127.47 -{ 127.48 - ctrl_msg_t cmsg; 127.49 - netif_be_driver_status_t st; 127.50 - 127.51 - (void)ctrl_if_register_receiver(CMSG_NETIF_BE, netif_ctrlif_rx, 127.52 - CALLBACK_IN_BLOCKING_CONTEXT); 127.53 - 127.54 - /* Send a driver-UP notification to the domain controller. */ 127.55 - cmsg.type = CMSG_NETIF_BE; 127.56 - cmsg.subtype = CMSG_NETIF_BE_DRIVER_STATUS; 127.57 - cmsg.length = sizeof(netif_be_driver_status_t); 127.58 - st.status = NETIF_DRIVER_STATUS_UP; 127.59 - memcpy(cmsg.msg, &st, sizeof(st)); 127.60 - ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE); 127.61 -}
128.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c Wed Aug 24 16:16:52 2005 -0700 128.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c Thu Aug 25 11:18:47 2005 -0700 128.3 @@ -9,24 +9,6 @@ 128.4 #include "common.h" 128.5 #include <linux/rtnetlink.h> 128.6 128.7 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 128.8 -#define VMALLOC_VMADDR(x) ((unsigned long)(x)) 128.9 -#endif 128.10 - 128.11 -#define NETIF_HASHSZ 1024 128.12 -#define NETIF_HASH(_d,_h) (((int)(_d)^(int)(_h))&(NETIF_HASHSZ-1)) 128.13 - 128.14 -static netif_t *netif_hash[NETIF_HASHSZ]; 128.15 - 128.16 -netif_t *netif_find_by_handle(domid_t domid, unsigned int handle) 128.17 -{ 128.18 - netif_t *netif = netif_hash[NETIF_HASH(domid, handle)]; 128.19 - while ( (netif != NULL) && 128.20 - ((netif->domid != domid) || (netif->handle != handle)) ) 128.21 - netif = netif->hash_next; 128.22 - return netif; 128.23 -} 128.24 - 128.25 static void __netif_up(netif_t *netif) 128.26 { 128.27 struct net_device *dev = netif->dev; 128.28 @@ -51,7 +33,7 @@ static void __netif_down(netif_t *netif) 128.29 static int net_open(struct net_device *dev) 128.30 { 128.31 netif_t *netif = netdev_priv(dev); 128.32 - if ( netif->status == CONNECTED ) 128.33 + if (netif->status == CONNECTED) 128.34 __netif_up(netif); 128.35 netif_start_queue(dev); 128.36 return 0; 128.37 @@ -61,92 +43,23 @@ static int net_close(struct net_device * 128.38 { 128.39 netif_t *netif = netdev_priv(dev); 128.40 netif_stop_queue(dev); 128.41 - if ( netif->status == CONNECTED ) 128.42 + if (netif->status == CONNECTED) 128.43 __netif_down(netif); 128.44 return 0; 128.45 } 128.46 128.47 -static void __netif_disconnect_complete(void *arg) 128.48 +netif_t *alloc_netif(domid_t domid, unsigned int handle, u8 be_mac[ETH_ALEN]) 128.49 { 128.50 - netif_t *netif = (netif_t *)arg; 128.51 - ctrl_msg_t cmsg; 128.52 - netif_be_disconnect_t disc; 128.53 -#if defined(CONFIG_XEN_NETDEV_GRANT_RX) || defined(CONFIG_XEN_NETDEV_GRANT_TX) 128.54 - struct gnttab_unmap_grant_ref op; 128.55 -#endif 128.56 - 128.57 - /* 128.58 - * These can't be done in netif_disconnect() because at that point there 128.59 - * may be outstanding requests in the network stack whose asynchronous 128.60 - * responses must still be notified to the remote driver. 128.61 - */ 128.62 - 128.63 -#ifdef CONFIG_XEN_NETDEV_GRANT_TX 128.64 - op.host_addr = netif->tx_shmem_vaddr; 128.65 - op.handle = netif->tx_shmem_handle; 128.66 - op.dev_bus_addr = 0; 128.67 - BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)); 128.68 -#endif 128.69 - 128.70 -#ifdef CONFIG_XEN_NETDEV_GRANT_RX 128.71 - op.host_addr = netif->rx_shmem_vaddr; 128.72 - op.handle = netif->rx_shmem_handle; 128.73 - op.dev_bus_addr = 0; 128.74 - BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)); 128.75 -#endif 128.76 - 128.77 - 128.78 - vfree(netif->tx); /* Frees netif->rx as well. */ 128.79 - 128.80 - /* Construct the deferred response message. */ 128.81 - cmsg.type = CMSG_NETIF_BE; 128.82 - cmsg.subtype = CMSG_NETIF_BE_DISCONNECT; 128.83 - cmsg.id = netif->disconnect_rspid; 128.84 - cmsg.length = sizeof(netif_be_disconnect_t); 128.85 - disc.domid = netif->domid; 128.86 - disc.netif_handle = netif->handle; 128.87 - disc.status = NETIF_BE_STATUS_OKAY; 128.88 - memcpy(cmsg.msg, &disc, sizeof(disc)); 128.89 - 128.90 - /* 128.91 - * Make sure message is constructed /before/ status change, because 128.92 - * after the status change the 'netif' structure could be deallocated at 128.93 - * any time. Also make sure we send the response /after/ status change, 128.94 - * as otherwise a subsequent CONNECT request could spuriously fail if 128.95 - * another CPU doesn't see the status change yet. 128.96 - */ 128.97 - mb(); 128.98 - if ( netif->status != DISCONNECTING ) 128.99 - BUG(); 128.100 - netif->status = DISCONNECTED; 128.101 - mb(); 128.102 - 128.103 - /* Send the successful response. */ 128.104 - ctrl_if_send_response(&cmsg); 128.105 -} 128.106 - 128.107 -void netif_disconnect_complete(netif_t *netif) 128.108 -{ 128.109 - INIT_WORK(&netif->work, __netif_disconnect_complete, (void *)netif); 128.110 - schedule_work(&netif->work); 128.111 -} 128.112 - 128.113 -void netif_create(netif_be_create_t *create) 128.114 -{ 128.115 - int err = 0; 128.116 - domid_t domid = create->domid; 128.117 - unsigned int handle = create->netif_handle; 128.118 + int err = 0, i; 128.119 struct net_device *dev; 128.120 - netif_t **pnetif, *netif; 128.121 - char name[IFNAMSIZ] = {}; 128.122 + netif_t *netif; 128.123 + char name[IFNAMSIZ] = {}; 128.124 128.125 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); 128.126 dev = alloc_netdev(sizeof(netif_t), name, ether_setup); 128.127 - if ( dev == NULL ) 128.128 - { 128.129 + if (dev == NULL) { 128.130 DPRINTK("Could not create netif: out of memory\n"); 128.131 - create->status = NETIF_BE_STATUS_OUT_OF_MEMORY; 128.132 - return; 128.133 + return NULL; 128.134 } 128.135 128.136 netif = netdev_priv(dev); 128.137 @@ -161,19 +74,6 @@ void netif_create(netif_be_create_t *cre 128.138 netif->credit_usec = 0UL; 128.139 init_timer(&netif->credit_timeout); 128.140 128.141 - pnetif = &netif_hash[NETIF_HASH(domid, handle)]; 128.142 - while ( *pnetif != NULL ) 128.143 - { 128.144 - if ( ((*pnetif)->domid == domid) && ((*pnetif)->handle == handle) ) 128.145 - { 128.146 - DPRINTK("Could not create netif: already exists\n"); 128.147 - create->status = NETIF_BE_STATUS_INTERFACE_EXISTS; 128.148 - free_netdev(dev); 128.149 - return; 128.150 - } 128.151 - pnetif = &(*pnetif)->hash_next; 128.152 - } 128.153 - 128.154 dev->hard_start_xmit = netif_be_start_xmit; 128.155 dev->get_stats = netif_be_get_stats; 128.156 dev->open = net_open; 128.157 @@ -183,10 +83,10 @@ void netif_create(netif_be_create_t *cre 128.158 /* Disable queuing. */ 128.159 dev->tx_queue_len = 0; 128.160 128.161 - if ( (create->be_mac[0] == 0) && (create->be_mac[1] == 0) && 128.162 - (create->be_mac[2] == 0) && (create->be_mac[3] == 0) && 128.163 - (create->be_mac[4] == 0) && (create->be_mac[5] == 0) ) 128.164 - { 128.165 + for (i = 0; i < ETH_ALEN; i++) 128.166 + if (be_mac[i] != 0) 128.167 + break; 128.168 + if (i == ETH_ALEN) { 128.169 /* 128.170 * Initialise a dummy MAC address. We choose the numerically largest 128.171 * non-broadcast address to prevent the address getting stolen by an 128.172 @@ -194,87 +94,200 @@ void netif_create(netif_be_create_t *cre 128.173 */ 128.174 memset(dev->dev_addr, 0xFF, ETH_ALEN); 128.175 dev->dev_addr[0] &= ~0x01; 128.176 - } 128.177 - else 128.178 - { 128.179 - memcpy(dev->dev_addr, create->be_mac, ETH_ALEN); 128.180 - } 128.181 - 128.182 - memcpy(netif->fe_dev_addr, create->mac, ETH_ALEN); 128.183 + } else 128.184 + memcpy(dev->dev_addr, be_mac, ETH_ALEN); 128.185 128.186 rtnl_lock(); 128.187 err = register_netdevice(dev); 128.188 rtnl_unlock(); 128.189 - 128.190 - if ( err != 0 ) 128.191 - { 128.192 + if (err) { 128.193 DPRINTK("Could not register new net device %s: err=%d\n", 128.194 dev->name, err); 128.195 - create->status = NETIF_BE_STATUS_OUT_OF_MEMORY; 128.196 free_netdev(dev); 128.197 - return; 128.198 + return NULL; 128.199 } 128.200 128.201 - netif->hash_next = *pnetif; 128.202 - *pnetif = netif; 128.203 + DPRINTK("Successfully created netif\n"); 128.204 + return netif; 128.205 +} 128.206 + 128.207 +static int map_frontend_page(netif_t *netif, unsigned long localaddr, 128.208 + unsigned long tx_ring_ref, unsigned long rx_ring_ref) 128.209 +{ 128.210 +#if !defined(CONFIG_XEN_NETDEV_GRANT_TX)||!defined(CONFIG_XEN_NETDEV_GRANT_RX) 128.211 + pgprot_t prot = __pgprot(_KERNPG_TABLE); 128.212 + int err; 128.213 +#endif 128.214 +#if defined(CONFIG_XEN_NETDEV_GRANT_TX) 128.215 + { 128.216 + struct gnttab_map_grant_ref op; 128.217 + 128.218 + /* Map: Use the Grant table reference */ 128.219 + op.host_addr = localaddr; 128.220 + op.flags = GNTMAP_host_map; 128.221 + op.ref = tx_ring_ref; 128.222 + op.dom = netif->domid; 128.223 + 128.224 + BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) ); 128.225 + if (op.handle < 0) { 128.226 + DPRINTK(" Grant table operation failure !\n"); 128.227 + return op.handle; 128.228 + } 128.229 128.230 - DPRINTK("Successfully created netif\n"); 128.231 - create->status = NETIF_BE_STATUS_OKAY; 128.232 + netif->tx_shmem_ref = tx_ring_ref; 128.233 + netif->tx_shmem_handle = op.handle; 128.234 + netif->tx_shmem_vaddr = localaddr; 128.235 + } 128.236 +#else 128.237 + err = direct_remap_area_pages(&init_mm, localaddr, 128.238 + tx_ring_ref<<PAGE_SHIFT, PAGE_SIZE, 128.239 + prot, netif->domid); 128.240 + if (err) 128.241 + return err; 128.242 +#endif 128.243 + 128.244 +#if defined(CONFIG_XEN_NETDEV_GRANT_RX) 128.245 + { 128.246 + struct gnttab_map_grant_ref op; 128.247 + 128.248 + /* Map: Use the Grant table reference */ 128.249 + op.host_addr = localaddr + PAGE_SIZE; 128.250 + op.flags = GNTMAP_host_map; 128.251 + op.ref = rx_ring_ref; 128.252 + op.dom = netif->domid; 128.253 + 128.254 + BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) ); 128.255 + if (op.handle < 0) { 128.256 + DPRINTK(" Grant table operation failure !\n"); 128.257 + return op.handle; 128.258 + } 128.259 + 128.260 + netif->rx_shmem_ref = rx_ring_ref; 128.261 + netif->rx_shmem_handle = op.handle; 128.262 + netif->rx_shmem_vaddr = localaddr + PAGE_SIZE; 128.263 + } 128.264 +#else 128.265 + err = direct_remap_area_pages(&init_mm, localaddr + PAGE_SIZE, 128.266 + rx_ring_ref<<PAGE_SHIFT, PAGE_SIZE, 128.267 + prot, netif->domid); 128.268 + if (err) 128.269 + return err; 128.270 +#endif 128.271 + 128.272 + return 0; 128.273 } 128.274 128.275 -void netif_destroy(netif_be_destroy_t *destroy) 128.276 +static void unmap_frontend_page(netif_t *netif) 128.277 { 128.278 - domid_t domid = destroy->domid; 128.279 - unsigned int handle = destroy->netif_handle; 128.280 - netif_t **pnetif, *netif; 128.281 +#if defined(CONFIG_XEN_NETDEV_GRANT_RX) || defined(CONFIG_XEN_NETDEV_GRANT_TX) 128.282 + struct gnttab_unmap_grant_ref op; 128.283 +#endif 128.284 + 128.285 +#ifdef CONFIG_XEN_NETDEV_GRANT_TX 128.286 + op.host_addr = netif->tx_shmem_vaddr; 128.287 + op.handle = netif->tx_shmem_handle; 128.288 + op.dev_bus_addr = 0; 128.289 + BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)); 128.290 +#endif 128.291 + 128.292 +#ifdef CONFIG_XEN_NETDEV_GRANT_RX 128.293 + op.host_addr = netif->rx_shmem_vaddr; 128.294 + op.handle = netif->rx_shmem_handle; 128.295 + op.dev_bus_addr = 0; 128.296 + BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)); 128.297 +#endif 128.298 +} 128.299 128.300 - pnetif = &netif_hash[NETIF_HASH(domid, handle)]; 128.301 - while ( (netif = *pnetif) != NULL ) 128.302 - { 128.303 - if ( (netif->domid == domid) && (netif->handle == handle) ) 128.304 - { 128.305 - if ( netif->status != DISCONNECTED ) 128.306 - goto still_connected; 128.307 - goto destroy; 128.308 - } 128.309 - pnetif = &netif->hash_next; 128.310 +int netif_map(netif_t *netif, unsigned long tx_ring_ref, 128.311 + unsigned long rx_ring_ref, unsigned int evtchn) 128.312 +{ 128.313 + struct vm_struct *vma; 128.314 + evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain }; 128.315 + int err; 128.316 + 128.317 + vma = get_vm_area(2*PAGE_SIZE, VM_IOREMAP); 128.318 + if (vma == NULL) 128.319 + return -ENOMEM; 128.320 + 128.321 + err = map_frontend_page(netif, (unsigned long)vma->addr, tx_ring_ref, 128.322 + rx_ring_ref); 128.323 + if (err) { 128.324 + vfree(vma->addr); 128.325 + return err; 128.326 + } 128.327 + 128.328 + op.u.bind_interdomain.dom1 = DOMID_SELF; 128.329 + op.u.bind_interdomain.dom2 = netif->domid; 128.330 + op.u.bind_interdomain.port1 = 0; 128.331 + op.u.bind_interdomain.port2 = evtchn; 128.332 + err = HYPERVISOR_event_channel_op(&op); 128.333 + if (err) { 128.334 + unmap_frontend_page(netif); 128.335 + vfree(vma->addr); 128.336 + return err; 128.337 } 128.338 128.339 - destroy->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND; 128.340 - return; 128.341 + netif->evtchn = op.u.bind_interdomain.port1; 128.342 + netif->remote_evtchn = evtchn; 128.343 + 128.344 + netif->tx = (netif_tx_interface_t *)vma->addr; 128.345 + netif->rx = (netif_rx_interface_t *)((char *)vma->addr + PAGE_SIZE); 128.346 + netif->tx->resp_prod = netif->rx->resp_prod = 0; 128.347 + netif_get(netif); 128.348 + wmb(); /* Other CPUs see new state before interface is started. */ 128.349 128.350 - still_connected: 128.351 - destroy->status = NETIF_BE_STATUS_INTERFACE_CONNECTED; 128.352 - return; 128.353 + rtnl_lock(); 128.354 + netif->status = CONNECTED; 128.355 + wmb(); 128.356 + if (netif_running(netif->dev)) 128.357 + __netif_up(netif); 128.358 + rtnl_unlock(); 128.359 128.360 - destroy: 128.361 - *pnetif = netif->hash_next; 128.362 - unregister_netdev(netif->dev); 128.363 - free_netdev(netif->dev); 128.364 - destroy->status = NETIF_BE_STATUS_OKAY; 128.365 + return 0; 128.366 } 128.367 128.368 -void netif_creditlimit(netif_be_creditlimit_t *creditlimit) 128.369 +static void free_netif(void *arg) 128.370 { 128.371 - domid_t domid = creditlimit->domid; 128.372 - unsigned int handle = creditlimit->netif_handle; 128.373 - netif_t *netif; 128.374 + evtchn_op_t op = { .cmd = EVTCHNOP_close }; 128.375 + netif_t *netif = (netif_t *)arg; 128.376 + 128.377 + /* 128.378 + * These can't be done in netif_disconnect() because at that point there 128.379 + * may be outstanding requests in the network stack whose asynchronous 128.380 + * responses must still be notified to the remote driver. 128.381 + */ 128.382 128.383 - netif = netif_find_by_handle(domid, handle); 128.384 - if ( unlikely(netif == NULL) ) 128.385 - { 128.386 - DPRINTK("netif_creditlimit attempted for non-existent netif" 128.387 - " (%u,%u)\n", creditlimit->domid, creditlimit->netif_handle); 128.388 - creditlimit->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND; 128.389 - return; 128.390 + op.u.close.port = netif->evtchn; 128.391 + op.u.close.dom = DOMID_SELF; 128.392 + HYPERVISOR_event_channel_op(&op); 128.393 + op.u.close.port = netif->remote_evtchn; 128.394 + op.u.close.dom = netif->domid; 128.395 + HYPERVISOR_event_channel_op(&op); 128.396 + 128.397 + unregister_netdev(netif->dev); 128.398 + 128.399 + if (netif->tx) { 128.400 + unmap_frontend_page(netif); 128.401 + vfree(netif->tx); /* Frees netif->rx as well. */ 128.402 } 128.403 128.404 + free_netdev(netif->dev); 128.405 +} 128.406 + 128.407 +void free_netif_callback(netif_t *netif) 128.408 +{ 128.409 + INIT_WORK(&netif->free_work, free_netif, (void *)netif); 128.410 + schedule_work(&netif->free_work); 128.411 +} 128.412 + 128.413 +void netif_creditlimit(netif_t *netif) 128.414 +{ 128.415 +#if 0 128.416 /* Set the credit limit (reset remaining credit to new limit). */ 128.417 netif->credit_bytes = netif->remaining_credit = creditlimit->credit_bytes; 128.418 netif->credit_usec = creditlimit->period_usec; 128.419 128.420 - if ( netif->status == CONNECTED ) 128.421 - { 128.422 + if (netif->status == CONNECTED) { 128.423 /* 128.424 * Schedule work so that any packets waiting under previous credit 128.425 * limit are dealt with (acts like a replenishment point). 128.426 @@ -282,184 +295,22 @@ void netif_creditlimit(netif_be_creditli 128.427 netif->credit_timeout.expires = jiffies; 128.428 netif_schedule_work(netif); 128.429 } 128.430 - 128.431 - creditlimit->status = NETIF_BE_STATUS_OKAY; 128.432 +#endif 128.433 } 128.434 128.435 -void netif_connect(netif_be_connect_t *connect) 128.436 +int netif_disconnect(netif_t *netif) 128.437 { 128.438 - domid_t domid = connect->domid; 128.439 - unsigned int handle = connect->netif_handle; 128.440 - unsigned int evtchn = connect->evtchn; 128.441 - unsigned long tx_shmem_frame = connect->tx_shmem_frame; 128.442 - unsigned long rx_shmem_frame = connect->rx_shmem_frame; 128.443 - struct vm_struct *vma; 128.444 -#if !defined(CONFIG_XEN_NETDEV_GRANT_TX)||!defined(CONFIG_XEN_NETDEV_GRANT_RX) 128.445 - pgprot_t prot = __pgprot(_KERNPG_TABLE); 128.446 - int error; 128.447 -#endif 128.448 - netif_t *netif; 128.449 - 128.450 - netif = netif_find_by_handle(domid, handle); 128.451 - if ( unlikely(netif == NULL) ) { 128.452 - DPRINTK("netif_connect attempted for non-existent netif (%u,%u)\n", 128.453 - connect->domid, connect->netif_handle); 128.454 - connect->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND; 128.455 - return; 128.456 - } 128.457 - 128.458 - if ( netif->status != DISCONNECTED ) { 128.459 - connect->status = NETIF_BE_STATUS_INTERFACE_CONNECTED; 128.460 - return; 128.461 - } 128.462 - 128.463 - if ( (vma = get_vm_area(2*PAGE_SIZE, VM_IOREMAP)) == NULL ) { 128.464 - connect->status = NETIF_BE_STATUS_OUT_OF_MEMORY; 128.465 - return; 128.466 - } 128.467 - 128.468 - 128.469 -#if defined(CONFIG_XEN_NETDEV_GRANT_TX) 128.470 - { 128.471 - struct gnttab_map_grant_ref op; 128.472 - int tx_ref = connect->tx_shmem_ref; 128.473 - 128.474 - /* Map: Use the Grant table reference */ 128.475 - op.host_addr = VMALLOC_VMADDR(vma->addr); 128.476 - op.flags = GNTMAP_host_map; 128.477 - op.ref = tx_ref; 128.478 - op.dom = domid; 128.479 - 128.480 - if ((HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) < 0) || 128.481 - (op.handle < 0)) { 128.482 - DPRINTK(" Grant table operation failure !\n"); 128.483 - connect->status = NETIF_BE_STATUS_MAPPING_ERROR; 128.484 - vfree(vma->addr); 128.485 - return; 128.486 - } 128.487 - 128.488 - netif->tx_shmem_ref = tx_ref; 128.489 - netif->tx_shmem_handle = op.handle; 128.490 - netif->tx_shmem_vaddr = VMALLOC_VMADDR(vma->addr); 128.491 - } 128.492 - 128.493 - 128.494 -#else 128.495 - error = direct_remap_area_pages(&init_mm, 128.496 - VMALLOC_VMADDR(vma->addr), 128.497 - tx_shmem_frame<<PAGE_SHIFT, PAGE_SIZE, 128.498 - prot, domid); 128.499 - if ( error != 0 ) 128.500 - { 128.501 - if ( error == -ENOMEM ) 128.502 - connect->status = NETIF_BE_STATUS_OUT_OF_MEMORY; 128.503 - else if ( error == -EFAULT ) 128.504 - connect->status = NETIF_BE_STATUS_MAPPING_ERROR; 128.505 - else 128.506 - connect->status = NETIF_BE_STATUS_ERROR; 128.507 - vfree(vma->addr); 128.508 - return; 128.509 - } 128.510 -#endif 128.511 - 128.512 128.513 -#if defined(CONFIG_XEN_NETDEV_GRANT_RX) 128.514 - { 128.515 - struct gnttab_map_grant_ref op; 128.516 - int rx_ref = connect->rx_shmem_ref; 128.517 - 128.518 - 128.519 - /* Map: Use the Grant table reference */ 128.520 - op.host_addr = VMALLOC_VMADDR(vma->addr) + PAGE_SIZE; 128.521 - op.flags = GNTMAP_host_map; 128.522 - op.ref = rx_ref; 128.523 - op.dom = domid; 128.524 - 128.525 - if ((HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) < 0) || 128.526 - (op.handle < 0)) { 128.527 - DPRINTK(" Grant table operation failure !\n"); 128.528 - connect->status = NETIF_BE_STATUS_MAPPING_ERROR; 128.529 - vfree(vma->addr); 128.530 - return; 128.531 - } 128.532 - 128.533 - netif->rx_shmem_ref = rx_ref; 128.534 - netif->rx_shmem_handle = handle; 128.535 - netif->rx_shmem_vaddr = VMALLOC_VMADDR(vma->addr) + PAGE_SIZE; 128.536 - } 128.537 -#else 128.538 - error = direct_remap_area_pages(&init_mm, 128.539 - VMALLOC_VMADDR(vma->addr) + PAGE_SIZE, 128.540 - rx_shmem_frame<<PAGE_SHIFT, PAGE_SIZE, 128.541 - prot, domid); 128.542 - if ( error != 0 ) 128.543 - { 128.544 - if ( error == -ENOMEM ) 128.545 - connect->status = NETIF_BE_STATUS_OUT_OF_MEMORY; 128.546 - else if ( error == -EFAULT ) 128.547 - connect->status = NETIF_BE_STATUS_MAPPING_ERROR; 128.548 - else 128.549 - connect->status = NETIF_BE_STATUS_ERROR; 128.550 - vfree(vma->addr); 128.551 - return; 128.552 - } 128.553 - 128.554 -#endif 128.555 - 128.556 - netif->evtchn = evtchn; 128.557 - netif->tx_shmem_frame = tx_shmem_frame; 128.558 - netif->rx_shmem_frame = rx_shmem_frame; 128.559 - netif->tx = 128.560 - (netif_tx_interface_t *)vma->addr; 128.561 - netif->rx = 128.562 - (netif_rx_interface_t *)((char *)vma->addr + PAGE_SIZE); 128.563 - netif->tx->resp_prod = netif->rx->resp_prod = 0; 128.564 - netif_get(netif); 128.565 - wmb(); /* Other CPUs see new state before interface is started. */ 128.566 - 128.567 - rtnl_lock(); 128.568 - netif->status = CONNECTED; 128.569 - wmb(); 128.570 - if ( netif_running(netif->dev) ) 128.571 - __netif_up(netif); 128.572 - rtnl_unlock(); 128.573 - 128.574 - connect->status = NETIF_BE_STATUS_OKAY; 128.575 -} 128.576 - 128.577 -int netif_disconnect(netif_be_disconnect_t *disconnect, u8 rsp_id) 128.578 -{ 128.579 - domid_t domid = disconnect->domid; 128.580 - unsigned int handle = disconnect->netif_handle; 128.581 - netif_t *netif; 128.582 - 128.583 - netif = netif_find_by_handle(domid, handle); 128.584 - if ( unlikely(netif == NULL) ) 128.585 - { 128.586 - DPRINTK("netif_disconnect attempted for non-existent netif" 128.587 - " (%u,%u)\n", disconnect->domid, disconnect->netif_handle); 128.588 - disconnect->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND; 128.589 - return 1; /* Caller will send response error message. */ 128.590 - } 128.591 - 128.592 - if ( netif->status == CONNECTED ) 128.593 - { 128.594 + if (netif->status == CONNECTED) { 128.595 rtnl_lock(); 128.596 netif->status = DISCONNECTING; 128.597 - netif->disconnect_rspid = rsp_id; 128.598 wmb(); 128.599 - if ( netif_running(netif->dev) ) 128.600 + if (netif_running(netif->dev)) 128.601 __netif_down(netif); 128.602 rtnl_unlock(); 128.603 netif_put(netif); 128.604 return 0; /* Caller should not send response message. */ 128.605 } 128.606 128.607 - disconnect->status = NETIF_BE_STATUS_OKAY; 128.608 return 1; 128.609 } 128.610 - 128.611 -void netif_interface_init(void) 128.612 -{ 128.613 - memset(netif_hash, 0, sizeof(netif_hash)); 128.614 -}
129.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Wed Aug 24 16:16:52 2005 -0700 129.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Thu Aug 25 11:18:47 2005 -0700 129.3 @@ -13,10 +13,6 @@ 129.4 #include "common.h" 129.5 #include <asm-xen/balloon.h> 129.6 129.7 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) 129.8 -#include <linux/delay.h> 129.9 -#endif 129.10 - 129.11 #if defined(CONFIG_XEN_NETDEV_GRANT_TX) || defined(CONFIG_XEN_NETDEV_GRANT_RX) 129.12 #include <asm-xen/xen-public/grant_table.h> 129.13 #include <asm-xen/gnttab.h> 129.14 @@ -153,11 +149,7 @@ static inline void maybe_schedule_tx_act 129.15 static inline int is_xen_skb(struct sk_buff *skb) 129.16 { 129.17 extern kmem_cache_t *skbuff_cachep; 129.18 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 129.19 kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next; 129.20 -#else 129.21 - kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->list.next; 129.22 -#endif 129.23 return (cp == skbuff_cachep); 129.24 } 129.25 129.26 @@ -642,11 +634,7 @@ static void net_tx_action(unsigned long 129.27 netif->credit_timeout.expires = next_credit; 129.28 netif->credit_timeout.data = (unsigned long)netif; 129.29 netif->credit_timeout.function = tx_credit_callback; 129.30 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 129.31 add_timer_on(&netif->credit_timeout, smp_processor_id()); 129.32 -#else 129.33 - add_timer(&netif->credit_timeout); 129.34 -#endif 129.35 break; 129.36 } 129.37 } 129.38 @@ -966,8 +954,6 @@ static int __init netback_init(void) 129.39 net_timer.data = 0; 129.40 net_timer.function = net_alarm; 129.41 129.42 - netif_interface_init(); 129.43 - 129.44 page = balloon_alloc_empty_page_range(MAX_PENDING_REQS); 129.45 BUG_ON(page == NULL); 129.46 mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); 129.47 @@ -987,7 +973,7 @@ static int __init netback_init(void) 129.48 spin_lock_init(&net_schedule_list_lock); 129.49 INIT_LIST_HEAD(&net_schedule_list); 129.50 129.51 - netif_ctrlif_init(); 129.52 + netif_xenbus_init(); 129.53 129.54 (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG), 129.55 netif_be_dbg, SA_SHIRQ,
130.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 130.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c Thu Aug 25 11:18:47 2005 -0700 130.3 @@ -0,0 +1,257 @@ 130.4 +/* Xenbus code for netif backend 130.5 + Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au> 130.6 + 130.7 + This program is free software; you can redistribute it and/or modify 130.8 + it under the terms of the GNU General Public License as published by 130.9 + the Free Software Foundation; either version 2 of the License, or 130.10 + (at your option) any later version. 130.11 + 130.12 + This program is distributed in the hope that it will be useful, 130.13 + but WITHOUT ANY WARRANTY; without even the implied warranty of 130.14 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 130.15 + GNU General Public License for more details. 130.16 + 130.17 + You should have received a copy of the GNU General Public License 130.18 + along with this program; if not, write to the Free Software 130.19 + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 130.20 +*/ 130.21 +#include <stdarg.h> 130.22 +#include <linux/module.h> 130.23 +#include <asm-xen/xenbus.h> 130.24 +#include "common.h" 130.25 + 130.26 +struct backend_info 130.27 +{ 130.28 + struct xenbus_device *dev; 130.29 + 130.30 + /* our communications channel */ 130.31 + netif_t *netif; 130.32 + 130.33 + long int frontend_id; 130.34 +#if 0 130.35 + long int pdev; 130.36 + long int readonly; 130.37 +#endif 130.38 + 130.39 + /* watch back end for changes */ 130.40 + struct xenbus_watch backend_watch; 130.41 + 130.42 + /* watch front end for changes */ 130.43 + struct xenbus_watch watch; 130.44 + char *frontpath; 130.45 +}; 130.46 + 130.47 +static int netback_remove(struct xenbus_device *dev) 130.48 +{ 130.49 + struct backend_info *be = dev->data; 130.50 + 130.51 + if (be->watch.node) 130.52 + unregister_xenbus_watch(&be->watch); 130.53 + unregister_xenbus_watch(&be->backend_watch); 130.54 + if (be->netif) 130.55 + netif_disconnect(be->netif); 130.56 + if (be->frontpath) 130.57 + kfree(be->frontpath); 130.58 + kfree(be); 130.59 + return 0; 130.60 +} 130.61 + 130.62 +/* Front end tells us frame. */ 130.63 +static void frontend_changed(struct xenbus_watch *watch, const char *node) 130.64 +{ 130.65 + unsigned long tx_ring_ref, rx_ring_ref; 130.66 + unsigned int evtchn; 130.67 + int err; 130.68 + struct backend_info *be 130.69 + = container_of(watch, struct backend_info, watch); 130.70 + char *mac, *e, *s; 130.71 + int i; 130.72 + 130.73 + /* If other end is gone, delete ourself. */ 130.74 + if (node && !xenbus_exists(be->frontpath, "")) { 130.75 + xenbus_rm(be->dev->nodename, ""); 130.76 + device_unregister(&be->dev->dev); 130.77 + return; 130.78 + } 130.79 + if (be->netif == NULL || be->netif->status == CONNECTED) 130.80 + return; 130.81 + 130.82 + mac = xenbus_read(be->frontpath, "mac", NULL); 130.83 + if (IS_ERR(mac)) { 130.84 + err = PTR_ERR(mac); 130.85 + xenbus_dev_error(be->dev, err, "reading %s/mac", 130.86 + be->dev->nodename); 130.87 + return; 130.88 + } 130.89 + s = mac; 130.90 + for (i = 0; i < ETH_ALEN; i++) { 130.91 + be->netif->fe_dev_addr[i] = simple_strtoul(s, &e, 16); 130.92 + if (s == e || (e[0] != ':' && e[0] != 0)) { 130.93 + kfree(mac); 130.94 + err = -ENOENT; 130.95 + xenbus_dev_error(be->dev, err, "parsing %s/mac", 130.96 + be->dev->nodename); 130.97 + return; 130.98 + } 130.99 + s = &e[1]; 130.100 + } 130.101 + kfree(mac); 130.102 + 130.103 + err = xenbus_gather(be->frontpath, "tx-ring-ref", "%lu", &tx_ring_ref, 130.104 + "rx-ring-ref", "%lu", &rx_ring_ref, 130.105 + "event-channel", "%u", &evtchn, NULL); 130.106 + if (err) { 130.107 + xenbus_dev_error(be->dev, err, 130.108 + "reading %s/ring-ref and event-channel", 130.109 + be->frontpath); 130.110 + return; 130.111 + } 130.112 + 130.113 + /* Map the shared frame, irq etc. */ 130.114 + err = netif_map(be->netif, tx_ring_ref, rx_ring_ref, evtchn); 130.115 + if (err) { 130.116 + xenbus_dev_error(be->dev, err, 130.117 + "mapping shared-frames %lu/%lu port %u", 130.118 + tx_ring_ref, rx_ring_ref, evtchn); 130.119 + return; 130.120 + } 130.121 + 130.122 + xenbus_dev_ok(be->dev); 130.123 + 130.124 + return; 130.125 +} 130.126 + 130.127 +/* 130.128 + Setup supplies physical device. 130.129 + We provide event channel and device details to front end. 130.130 + Frontend supplies shared frame and event channel. 130.131 + */ 130.132 +static void backend_changed(struct xenbus_watch *watch, const char *node) 130.133 +{ 130.134 + int err; 130.135 + long int handle; 130.136 + struct backend_info *be 130.137 + = container_of(watch, struct backend_info, backend_watch); 130.138 + struct xenbus_device *dev = be->dev; 130.139 + u8 be_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 }; 130.140 + 130.141 + err = xenbus_scanf(dev->nodename, "handle", "%li", &handle); 130.142 + if (XENBUS_EXIST_ERR(err)) 130.143 + return; 130.144 + if (err < 0) { 130.145 + xenbus_dev_error(dev, err, "reading handle"); 130.146 + return; 130.147 + } 130.148 + 130.149 + if (be->netif == NULL) { 130.150 + be->netif = alloc_netif(be->frontend_id, handle, be_mac); 130.151 + if (IS_ERR(be->netif)) { 130.152 + err = PTR_ERR(be->netif); 130.153 + be->netif = NULL; 130.154 + xenbus_dev_error(dev, err, "creating interface"); 130.155 + return; 130.156 + } 130.157 + 130.158 +#if 0 130.159 + err = vbd_create(be->netif, handle, be->pdev, be->readonly); 130.160 + if (err) { 130.161 + xenbus_dev_error(dev, err, "creating vbd structure"); 130.162 + return; 130.163 + } 130.164 +#endif 130.165 + 130.166 + /* Pass in NULL node to skip exist test. */ 130.167 + frontend_changed(&be->watch, NULL); 130.168 + } 130.169 +} 130.170 + 130.171 +static int netback_probe(struct xenbus_device *dev, 130.172 + const struct xenbus_device_id *id) 130.173 +{ 130.174 + struct backend_info *be; 130.175 + char *frontend; 130.176 + int err; 130.177 + 130.178 + be = kmalloc(sizeof(*be), GFP_KERNEL); 130.179 + if (!be) { 130.180 + xenbus_dev_error(dev, -ENOMEM, "allocating backend structure"); 130.181 + return -ENOMEM; 130.182 + } 130.183 + memset(be, 0, sizeof(*be)); 130.184 + 130.185 + frontend = NULL; 130.186 + err = xenbus_gather(dev->nodename, 130.187 + "frontend-id", "%li", &be->frontend_id, 130.188 + "frontend", NULL, &frontend, 130.189 + NULL); 130.190 + if (XENBUS_EXIST_ERR(err)) 130.191 + goto free_be; 130.192 + if (err < 0) { 130.193 + xenbus_dev_error(dev, err, 130.194 + "reading %s/frontend or frontend-id", 130.195 + dev->nodename); 130.196 + goto free_be; 130.197 + } 130.198 + if (strlen(frontend) == 0 || !xenbus_exists(frontend, "")) { 130.199 + /* If we can't get a frontend path and a frontend-id, 130.200 + * then our bus-id is no longer valid and we need to 130.201 + * destroy the backend device. 130.202 + */ 130.203 + err = -ENOENT; 130.204 + goto free_be; 130.205 + } 130.206 + 130.207 + be->dev = dev; 130.208 + be->backend_watch.node = dev->nodename; 130.209 + be->backend_watch.callback = backend_changed; 130.210 + err = register_xenbus_watch(&be->backend_watch); 130.211 + if (err) { 130.212 + be->backend_watch.node = NULL; 130.213 + xenbus_dev_error(dev, err, "adding backend watch on %s", 130.214 + dev->nodename); 130.215 + goto free_be; 130.216 + } 130.217 + 130.218 + be->frontpath = frontend; 130.219 + be->watch.node = be->frontpath; 130.220 + be->watch.callback = frontend_changed; 130.221 + err = register_xenbus_watch(&be->watch); 130.222 + if (err) { 130.223 + be->watch.node = NULL; 130.224 + xenbus_dev_error(dev, err, 130.225 + "adding frontend watch on %s", 130.226 + be->frontpath); 130.227 + goto free_be; 130.228 + } 130.229 + 130.230 + dev->data = be; 130.231 + 130.232 + backend_changed(&be->backend_watch, dev->nodename); 130.233 + return 0; 130.234 + 130.235 + free_be: 130.236 + if (be->backend_watch.node) 130.237 + unregister_xenbus_watch(&be->backend_watch); 130.238 + if (frontend) 130.239 + kfree(frontend); 130.240 + kfree(be); 130.241 + return err; 130.242 +} 130.243 + 130.244 +static struct xenbus_device_id netback_ids[] = { 130.245 + { "vif" }, 130.246 + { "" } 130.247 +}; 130.248 + 130.249 +static struct xenbus_driver netback = { 130.250 + .name = "vif", 130.251 + .owner = THIS_MODULE, 130.252 + .ids = netback_ids, 130.253 + .probe = netback_probe, 130.254 + .remove = netback_remove, 130.255 +}; 130.256 + 130.257 +void netif_xenbus_init(void) 130.258 +{ 130.259 + xenbus_register_backend(&netback); 130.260 +}
131.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Wed Aug 24 16:16:52 2005 -0700 131.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Thu Aug 25 11:18:47 2005 -0700 131.3 @@ -48,7 +48,7 @@ 131.4 #include <asm/io.h> 131.5 #include <asm/uaccess.h> 131.6 #include <asm-xen/evtchn.h> 131.7 -#include <asm-xen/ctrl_if.h> 131.8 +#include <asm-xen/xenbus.h> 131.9 #include <asm-xen/xen-public/io/netif.h> 131.10 #include <asm-xen/balloon.h> 131.11 #include <asm/page.h> 131.12 @@ -112,10 +112,14 @@ static grant_ref_t grant_rx_ref[NETIF_RX 131.13 #endif 131.14 131.15 #if defined(CONFIG_XEN_NETDEV_GRANT_TX) || defined(CONFIG_XEN_NETDEV_GRANT_RX) 131.16 -static domid_t rdomid = 0; 131.17 #define GRANT_INVALID_REF (0xFFFF) 131.18 #endif 131.19 131.20 +#define NETIF_STATE_DISCONNECTED 0 131.21 +#define NETIF_STATE_CONNECTED 1 131.22 + 131.23 +static unsigned int netif_state = NETIF_STATE_DISCONNECTED; 131.24 + 131.25 static void network_tx_buf_gc(struct net_device *dev); 131.26 static void network_alloc_rx_buffers(struct net_device *dev); 131.27 131.28 @@ -133,12 +137,11 @@ static void xennet_proc_delif(struct net 131.29 #define xennet_proc_delif(d) ((void)0) 131.30 #endif 131.31 131.32 -static struct list_head dev_list; 131.33 - 131.34 +#define netfront_info net_private 131.35 struct net_private 131.36 { 131.37 struct list_head list; 131.38 - struct net_device *dev; 131.39 + struct net_device *netdev; 131.40 131.41 struct net_device_stats stats; 131.42 NETIF_RING_IDX rx_resp_cons, tx_resp_cons; 131.43 @@ -176,6 +179,14 @@ struct net_private 131.44 */ 131.45 struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1]; 131.46 struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1]; 131.47 + 131.48 + struct xenbus_device *xbdev; 131.49 + char *backend; 131.50 + int backend_id; 131.51 + struct xenbus_watch watch; 131.52 + int tx_ring_ref; 131.53 + int rx_ring_ref; 131.54 + u8 mac[ETH_ALEN]; 131.55 }; 131.56 131.57 /* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */ 131.58 @@ -187,20 +198,14 @@ struct net_private 131.59 (_list)[0] = (_list)[_id]; \ 131.60 (unsigned short)_id; }) 131.61 131.62 -static char *status_name[] = { 131.63 - [NETIF_INTERFACE_STATUS_CLOSED] = "closed", 131.64 - [NETIF_INTERFACE_STATUS_DISCONNECTED] = "disconnected", 131.65 - [NETIF_INTERFACE_STATUS_CONNECTED] = "connected", 131.66 - [NETIF_INTERFACE_STATUS_CHANGED] = "changed", 131.67 -}; 131.68 - 131.69 +#ifdef DEBUG 131.70 static char *be_state_name[] = { 131.71 [BEST_CLOSED] = "closed", 131.72 [BEST_DISCONNECTED] = "disconnected", 131.73 [BEST_CONNECTED] = "connected", 131.74 }; 131.75 +#endif 131.76 131.77 -#define DEBUG 131.78 #ifdef DEBUG 131.79 #define DPRINTK(fmt, args...) \ 131.80 printk(KERN_ALERT "xen_net (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args) 131.81 @@ -212,89 +217,6 @@ static char *be_state_name[] = { 131.82 #define WPRINTK(fmt, args...) \ 131.83 printk(KERN_WARNING "xen_net: " fmt, ##args) 131.84 131.85 -static struct net_device *find_dev_by_handle(unsigned int handle) 131.86 -{ 131.87 - struct list_head *ent; 131.88 - struct net_private *np; 131.89 - list_for_each (ent, &dev_list) { 131.90 - np = list_entry(ent, struct net_private, list); 131.91 - if (np->handle == handle) 131.92 - return np->dev; 131.93 - } 131.94 - return NULL; 131.95 -} 131.96 - 131.97 -/** Network interface info. */ 131.98 -struct netif_ctrl { 131.99 - /** Number of interfaces. */ 131.100 - int interface_n; 131.101 - /** Number of connected interfaces. */ 131.102 - int connected_n; 131.103 - /** Error code. */ 131.104 - int err; 131.105 - int up; 131.106 -}; 131.107 - 131.108 -static struct netif_ctrl netctrl; 131.109 - 131.110 -static void netctrl_init(void) 131.111 -{ 131.112 - memset(&netctrl, 0, sizeof(netctrl)); 131.113 - netctrl.up = NETIF_DRIVER_STATUS_DOWN; 131.114 -} 131.115 - 131.116 -/** Get or set a network interface error. 131.117 - */ 131.118 -static int netctrl_err(int err) 131.119 -{ 131.120 - if ((err < 0) && !netctrl.err) 131.121 - netctrl.err = err; 131.122 - return netctrl.err; 131.123 -} 131.124 - 131.125 -/** Test if all network interfaces are connected. 131.126 - * 131.127 - * @return 1 if all connected, 0 if not, negative error code otherwise 131.128 - */ 131.129 -static int netctrl_connected(void) 131.130 -{ 131.131 - int ok; 131.132 - 131.133 - if (netctrl.err) 131.134 - ok = netctrl.err; 131.135 - else if (netctrl.up == NETIF_DRIVER_STATUS_UP) 131.136 - ok = (netctrl.connected_n == netctrl.interface_n); 131.137 - else 131.138 - ok = 0; 131.139 - 131.140 - return ok; 131.141 -} 131.142 - 131.143 -/** Count the connected network interfaces. 131.144 - * 131.145 - * @return connected count 131.146 - */ 131.147 -static int netctrl_connected_count(void) 131.148 -{ 131.149 - 131.150 - struct list_head *ent; 131.151 - struct net_private *np; 131.152 - unsigned int connected; 131.153 - 131.154 - connected = 0; 131.155 - 131.156 - list_for_each(ent, &dev_list) { 131.157 - np = list_entry(ent, struct net_private, list); 131.158 - if (np->backend_state == BEST_CONNECTED) 131.159 - connected++; 131.160 - } 131.161 - 131.162 - netctrl.connected_n = connected; 131.163 - DPRINTK("> connected_n=%d interface_n=%d\n", 131.164 - netctrl.connected_n, netctrl.interface_n); 131.165 - return connected; 131.166 -} 131.167 - 131.168 /** Send a packet on a net device to encourage switches to learn the 131.169 * MAC. We send a fake ARP request. 131.170 * 131.171 @@ -364,7 +286,7 @@ static void network_tx_buf_gc(struct net 131.172 "still in use by backend domain.\n"); 131.173 goto out; 131.174 } 131.175 - gnttab_end_foreign_access(grant_tx_ref[id], GNTMAP_readonly); 131.176 + gnttab_end_foreign_access_ref(grant_tx_ref[id], GNTMAP_readonly); 131.177 gnttab_release_grant_reference(&gref_tx_head, grant_tx_ref[id]); 131.178 grant_tx_ref[id] = GRANT_INVALID_REF; 131.179 #endif 131.180 @@ -448,7 +370,7 @@ static void network_alloc_rx_buffers(str 131.181 BUG(); 131.182 } 131.183 grant_rx_ref[id] = ref; 131.184 - gnttab_grant_foreign_transfer_ref(ref, rdomid, 131.185 + gnttab_grant_foreign_transfer_ref(ref, np->backend_id, 131.186 virt_to_mfn(skb->head)); 131.187 np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.gref = ref; 131.188 #endif 131.189 @@ -544,7 +466,7 @@ static int network_start_xmit(struct sk_ 131.190 BUG(); 131.191 } 131.192 mfn = virt_to_mfn(skb->data); 131.193 - gnttab_grant_foreign_access_ref(ref, rdomid, mfn, GNTMAP_readonly); 131.194 + gnttab_grant_foreign_access_ref(ref, np->backend_id, mfn, GNTMAP_readonly); 131.195 tx->addr = ref << PAGE_SHIFT; 131.196 grant_tx_ref[id] = ref; 131.197 #else 131.198 @@ -650,7 +572,7 @@ static int netif_poll(struct net_device 131.199 #ifdef CONFIG_XEN_NETDEV_GRANT_RX 131.200 ref = grant_rx_ref[rx->id]; 131.201 grant_rx_ref[rx->id] = GRANT_INVALID_REF; 131.202 - mfn = gnttab_end_foreign_transfer(ref); 131.203 + mfn = gnttab_end_foreign_transfer_ref(ref); 131.204 gnttab_release_grant_reference(&gref_rx_head, ref); 131.205 #endif 131.206 131.207 @@ -809,7 +731,7 @@ static int network_close(struct net_devi 131.208 { 131.209 struct net_private *np = netdev_priv(dev); 131.210 np->user_state = UST_CLOSED; 131.211 - netif_stop_queue(np->dev); 131.212 + netif_stop_queue(np->netdev); 131.213 return 0; 131.214 } 131.215 131.216 @@ -821,8 +743,7 @@ static struct net_device_stats *network_ 131.217 } 131.218 131.219 131.220 -static void network_connect(struct net_device *dev, 131.221 - netif_fe_interface_status_t *status) 131.222 +static void network_connect(struct net_device *dev) 131.223 { 131.224 struct net_private *np; 131.225 int i, requeue_idx; 131.226 @@ -890,7 +811,7 @@ static void network_connect(struct net_d 131.227 */ 131.228 np->backend_state = BEST_CONNECTED; 131.229 wmb(); 131.230 - notify_via_evtchn(status->evtchn); 131.231 + notify_via_evtchn(np->evtchn); 131.232 network_tx_buf_gc(dev); 131.233 131.234 if (np->user_state == UST_OPEN) 131.235 @@ -900,148 +821,21 @@ static void network_connect(struct net_d 131.236 spin_unlock_irq(&np->tx_lock); 131.237 } 131.238 131.239 -static void vif_show(struct net_private *np) 131.240 +static void show_device(struct net_private *np) 131.241 { 131.242 #ifdef DEBUG 131.243 - if (np) { 131.244 - IPRINTK("<vif handle=%u %s(%s) evtchn=%u tx=%p rx=%p>\n", 131.245 - np->handle, 131.246 - be_state_name[np->backend_state], 131.247 - np->user_state ? "open" : "closed", 131.248 - np->evtchn, 131.249 - np->tx, 131.250 - np->rx); 131.251 - } else { 131.252 - IPRINTK("<vif NULL>\n"); 131.253 - } 131.254 -#endif 131.255 -} 131.256 - 131.257 -/* Send a connect message to xend to tell it to bring up the interface. */ 131.258 -static void send_interface_connect(struct net_private *np) 131.259 -{ 131.260 - int err; 131.261 - ctrl_msg_t cmsg = { 131.262 - .type = CMSG_NETIF_FE, 131.263 - .subtype = CMSG_NETIF_FE_INTERFACE_CONNECT, 131.264 - .length = sizeof(netif_fe_interface_connect_t), 131.265 - }; 131.266 - netif_fe_interface_connect_t *msg = (void*)cmsg.msg; 131.267 - 131.268 - msg->handle = np->handle; 131.269 - msg->tx_shmem_frame = virt_to_mfn(np->tx); 131.270 -#ifdef CONFIG_XEN_NETDEV_GRANT_TX 131.271 - err = gnttab_grant_foreign_access(rdomid, msg->tx_shmem_frame, 0); 131.272 - if (err < 0) { 131.273 - printk(KERN_ALERT "#### netfront can't grant access to tx_shmem\n"); 131.274 - BUG(); 131.275 - } 131.276 - msg->tx_shmem_ref = err; 131.277 -#endif 131.278 - 131.279 - msg->rx_shmem_frame = virt_to_mfn(np->rx); 131.280 -#ifdef CONFIG_XEN_NETDEV_GRANT_RX 131.281 - err = gnttab_grant_foreign_access(rdomid, msg->rx_shmem_frame, 0); 131.282 - if (err < 0) { 131.283 - printk(KERN_ALERT "#### netfront can't grant access to rx_shmem\n"); 131.284 - BUG(); 131.285 - } 131.286 - msg->rx_shmem_ref = err; 131.287 + if (np) { 131.288 + IPRINTK("<vif handle=%u %s(%s) evtchn=%u tx=%p rx=%p>\n", 131.289 + np->handle, 131.290 + be_state_name[np->backend_state], 131.291 + np->user_state ? "open" : "closed", 131.292 + np->evtchn, 131.293 + np->tx, 131.294 + np->rx); 131.295 + } else { 131.296 + IPRINTK("<vif NULL>\n"); 131.297 + } 131.298 #endif 131.299 - 131.300 - ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE); 131.301 -} 131.302 - 131.303 -/* Send a driver status notification to the domain controller. */ 131.304 -static int send_driver_status(int ok) 131.305 -{ 131.306 - int err = 0; 131.307 - ctrl_msg_t cmsg = { 131.308 - .type = CMSG_NETIF_FE, 131.309 - .subtype = CMSG_NETIF_FE_DRIVER_STATUS, 131.310 - .length = sizeof(netif_fe_driver_status_t), 131.311 - }; 131.312 - netif_fe_driver_status_t *msg = (void*)cmsg.msg; 131.313 - 131.314 - msg->status = (ok ? NETIF_DRIVER_STATUS_UP : NETIF_DRIVER_STATUS_DOWN); 131.315 - err = ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE); 131.316 - return err; 131.317 -} 131.318 - 131.319 -/* Stop network device and free tx/rx queues and irq. 131.320 - */ 131.321 -static void vif_release(struct net_private *np) 131.322 -{ 131.323 - /* Stop old i/f to prevent errors whilst we rebuild the state. */ 131.324 - spin_lock_irq(&np->tx_lock); 131.325 - spin_lock(&np->rx_lock); 131.326 - netif_stop_queue(np->dev); 131.327 - /* np->backend_state = BEST_DISCONNECTED; */ 131.328 - spin_unlock(&np->rx_lock); 131.329 - spin_unlock_irq(&np->tx_lock); 131.330 - 131.331 - /* Free resources. */ 131.332 - if ( np->tx != NULL ) 131.333 - { 131.334 - unbind_evtchn_from_irqhandler(np->evtchn, np->dev); 131.335 - free_page((unsigned long)np->tx); 131.336 - free_page((unsigned long)np->rx); 131.337 - np->evtchn = 0; 131.338 - np->tx = NULL; 131.339 - np->rx = NULL; 131.340 - } 131.341 -} 131.342 - 131.343 -/* Release vif resources and close it down completely. 131.344 - */ 131.345 -static void vif_close(struct net_private *np) 131.346 -{ 131.347 - WPRINTK("Unexpected netif-CLOSED message in state %s\n", 131.348 - be_state_name[np->backend_state]); 131.349 - vif_release(np); 131.350 - np->backend_state = BEST_CLOSED; 131.351 - /* todo: take dev down and free. */ 131.352 - vif_show(np); 131.353 -} 131.354 - 131.355 -/* Move the vif into disconnected state. 131.356 - * Allocates tx/rx pages. 131.357 - * Sends connect message to xend. 131.358 - */ 131.359 -static void vif_disconnect(struct net_private *np) 131.360 -{ 131.361 - if(np->tx) free_page((unsigned long)np->tx); 131.362 - if(np->rx) free_page((unsigned long)np->rx); 131.363 - // Before this np->tx and np->rx had better be null. 131.364 - np->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL); 131.365 - np->rx = (netif_rx_interface_t *)__get_free_page(GFP_KERNEL); 131.366 - memset(np->tx, 0, PAGE_SIZE); 131.367 - memset(np->rx, 0, PAGE_SIZE); 131.368 - np->backend_state = BEST_DISCONNECTED; 131.369 - send_interface_connect(np); 131.370 - vif_show(np); 131.371 -} 131.372 - 131.373 -/* Begin interface recovery. 131.374 - * 131.375 - * NB. Whilst we're recovering, we turn the carrier state off. We 131.376 - * take measures to ensure that this device isn't used for 131.377 - * anything. We also stop the queue for this device. Various 131.378 - * different approaches (e.g. continuing to buffer packets) have 131.379 - * been tested but don't appear to improve the overall impact on 131.380 - * TCP connections. 131.381 - * 131.382 - * TODO: (MAW) Change the Xend<->Guest protocol so that a recovery 131.383 - * is initiated by a special "RESET" message - disconnect could 131.384 - * just mean we're not allowed to use this interface any more. 131.385 - */ 131.386 -static void vif_reset(struct net_private *np) 131.387 -{ 131.388 - IPRINTK("Attempting to reconnect network interface: handle=%u\n", 131.389 - np->handle); 131.390 - vif_release(np); 131.391 - vif_disconnect(np); 131.392 - vif_show(np); 131.393 } 131.394 131.395 /* Move the vif into connected state. 131.396 @@ -1049,26 +843,22 @@ static void vif_reset(struct net_private 131.397 * Binds the irq to the event channel. 131.398 */ 131.399 static void 131.400 -vif_connect(struct net_private *np, netif_fe_interface_status_t *status) 131.401 +connect_device(struct net_private *np, unsigned int evtchn) 131.402 { 131.403 - struct net_device *dev = np->dev; 131.404 - memcpy(dev->dev_addr, status->mac, ETH_ALEN); 131.405 - network_connect(dev, status); 131.406 - np->evtchn = status->evtchn; 131.407 -#if defined(CONFIG_XEN_NETDEV_GRANT_TX) || defined(CONFIG_XEN_NETDEV_GRANT_RX) 131.408 - rdomid = status->domid; 131.409 -#endif 131.410 - (void)bind_evtchn_to_irqhandler( 131.411 - np->evtchn, netif_int, SA_SAMPLE_RANDOM, dev->name, dev); 131.412 - netctrl_connected_count(); 131.413 - (void)send_fake_arp(dev); 131.414 - vif_show(np); 131.415 + struct net_device *dev = np->netdev; 131.416 + memcpy(dev->dev_addr, np->mac, ETH_ALEN); 131.417 + np->evtchn = evtchn; 131.418 + network_connect(dev); 131.419 + (void)bind_evtchn_to_irqhandler( 131.420 + np->evtchn, netif_int, SA_SAMPLE_RANDOM, dev->name, dev); 131.421 + (void)send_fake_arp(dev); 131.422 + show_device(np); 131.423 } 131.424 131.425 static struct ethtool_ops network_ethtool_ops = 131.426 { 131.427 - .get_tx_csum = ethtool_op_get_tx_csum, 131.428 - .set_tx_csum = ethtool_op_set_tx_csum, 131.429 + .get_tx_csum = ethtool_op_get_tx_csum, 131.430 + .set_tx_csum = ethtool_op_set_tx_csum, 131.431 }; 131.432 131.433 /** Create a network device. 131.434 @@ -1076,22 +866,24 @@ static struct ethtool_ops network_ethtoo 131.435 * @param val return parameter for created device 131.436 * @return 0 on success, error code otherwise 131.437 */ 131.438 -static int create_netdev(int handle, struct net_device **val) 131.439 +static int create_netdev(int handle, struct xenbus_device *dev, 131.440 + struct net_device **val) 131.441 { 131.442 int i, err = 0; 131.443 - struct net_device *dev = NULL; 131.444 + struct net_device *netdev = NULL; 131.445 struct net_private *np = NULL; 131.446 131.447 - if ((dev = alloc_etherdev(sizeof(struct net_private))) == NULL) { 131.448 + if ((netdev = alloc_etherdev(sizeof(struct net_private))) == NULL) { 131.449 printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__); 131.450 err = -ENOMEM; 131.451 goto exit; 131.452 } 131.453 131.454 - np = netdev_priv(dev); 131.455 + np = netdev_priv(netdev); 131.456 np->backend_state = BEST_CLOSED; 131.457 np->user_state = UST_CLOSED; 131.458 np->handle = handle; 131.459 + np->xbdev = dev; 131.460 131.461 spin_lock_init(&np->tx_lock); 131.462 spin_lock_init(&np->rx_lock); 131.463 @@ -1115,268 +907,53 @@ static int create_netdev(int handle, str 131.464 #endif 131.465 } 131.466 131.467 - dev->open = network_open; 131.468 - dev->hard_start_xmit = network_start_xmit; 131.469 - dev->stop = network_close; 131.470 - dev->get_stats = network_get_stats; 131.471 - dev->poll = netif_poll; 131.472 - dev->weight = 64; 131.473 - dev->features = NETIF_F_IP_CSUM; 131.474 + netdev->open = network_open; 131.475 + netdev->hard_start_xmit = network_start_xmit; 131.476 + netdev->stop = network_close; 131.477 + netdev->get_stats = network_get_stats; 131.478 + netdev->poll = netif_poll; 131.479 + netdev->weight = 64; 131.480 + netdev->features = NETIF_F_IP_CSUM; 131.481 131.482 - SET_ETHTOOL_OPS(dev, &network_ethtool_ops); 131.483 + SET_ETHTOOL_OPS(netdev, &network_ethtool_ops); 131.484 131.485 - if ((err = register_netdev(dev)) != 0) { 131.486 + if ((err = register_netdev(netdev)) != 0) { 131.487 printk(KERN_WARNING "%s> register_netdev err=%d\n", __FUNCTION__, err); 131.488 goto exit; 131.489 } 131.490 131.491 - if ((err = xennet_proc_addif(dev)) != 0) { 131.492 - unregister_netdev(dev); 131.493 + if ((err = xennet_proc_addif(netdev)) != 0) { 131.494 + unregister_netdev(netdev); 131.495 goto exit; 131.496 } 131.497 131.498 - np->dev = dev; 131.499 - list_add(&np->list, &dev_list); 131.500 + np->netdev = netdev; 131.501 131.502 exit: 131.503 - if ((err != 0) && (dev != NULL)) 131.504 - kfree(dev); 131.505 + if ((err != 0) && (netdev != NULL)) 131.506 + kfree(netdev); 131.507 else if (val != NULL) 131.508 - *val = dev; 131.509 - return err; 131.510 -} 131.511 - 131.512 -/* Get the target interface for a status message. 131.513 - * Creates the interface when it makes sense. 131.514 - * The returned interface may be null when there is no error. 131.515 - * 131.516 - * @param status status message 131.517 - * @param np return parameter for interface state 131.518 - * @return 0 on success, error code otherwise 131.519 - */ 131.520 -static int 131.521 -target_vif(netif_fe_interface_status_t *status, struct net_private **np) 131.522 -{ 131.523 - int err = 0; 131.524 - struct net_device *dev; 131.525 - 131.526 - DPRINTK("> handle=%d\n", status->handle); 131.527 - if (status->handle < 0) { 131.528 - err = -EINVAL; 131.529 - goto exit; 131.530 - } 131.531 - 131.532 - if ((dev = find_dev_by_handle(status->handle)) != NULL) 131.533 - goto exit; 131.534 - 131.535 - if (status->status == NETIF_INTERFACE_STATUS_CLOSED) 131.536 - goto exit; 131.537 - if (status->status == NETIF_INTERFACE_STATUS_CHANGED) 131.538 - goto exit; 131.539 - 131.540 - /* It's a new interface in a good state - create it. */ 131.541 - DPRINTK("> create device...\n"); 131.542 - if ((err = create_netdev(status->handle, &dev)) != 0) 131.543 - goto exit; 131.544 - 131.545 - netctrl.interface_n++; 131.546 - 131.547 - exit: 131.548 - if (np != NULL) 131.549 - *np = ((dev && !err) ? netdev_priv(dev) : NULL); 131.550 - DPRINTK("< err=%d\n", err); 131.551 + *val = netdev; 131.552 return err; 131.553 } 131.554 131.555 -/* Handle an interface status message. */ 131.556 -static void netif_interface_status(netif_fe_interface_status_t *status) 131.557 +static int destroy_netdev(struct net_device *netdev) 131.558 { 131.559 - int err = 0; 131.560 - struct net_private *np = NULL; 131.561 - 131.562 - DPRINTK("> status=%s handle=%d\n", 131.563 - status_name[status->status], status->handle); 131.564 - 131.565 - if ((err = target_vif(status, &np)) != 0) { 131.566 - WPRINTK("Invalid netif: handle=%u\n", status->handle); 131.567 - return; 131.568 - } 131.569 - 131.570 - if (np == NULL) { 131.571 - DPRINTK("> no vif\n"); 131.572 - return; 131.573 - } 131.574 - 131.575 - switch (status->status) { 131.576 - case NETIF_INTERFACE_STATUS_CLOSED: 131.577 - switch (np->backend_state) { 131.578 - case BEST_CLOSED: 131.579 - case BEST_DISCONNECTED: 131.580 - case BEST_CONNECTED: 131.581 - vif_close(np); 131.582 - break; 131.583 - } 131.584 - break; 131.585 - 131.586 - case NETIF_INTERFACE_STATUS_DISCONNECTED: 131.587 - switch (np->backend_state) { 131.588 - case BEST_CLOSED: 131.589 - vif_disconnect(np); 131.590 - break; 131.591 - case BEST_DISCONNECTED: 131.592 - case BEST_CONNECTED: 131.593 - vif_reset(np); 131.594 - break; 131.595 - } 131.596 - break; 131.597 + struct net_private *np = NULL; 131.598 131.599 - case NETIF_INTERFACE_STATUS_CONNECTED: 131.600 - switch (np->backend_state) { 131.601 - case BEST_CLOSED: 131.602 - WPRINTK("Unexpected netif status %s in state %s\n", 131.603 - status_name[status->status], 131.604 - be_state_name[np->backend_state]); 131.605 - vif_disconnect(np); 131.606 - vif_connect(np, status); 131.607 - break; 131.608 - case BEST_DISCONNECTED: 131.609 - vif_connect(np, status); 131.610 - break; 131.611 - } 131.612 - break; 131.613 - 131.614 - case NETIF_INTERFACE_STATUS_CHANGED: 131.615 - /* 131.616 - * The domain controller is notifying us that a device has been 131.617 - * added or removed. 131.618 - */ 131.619 - break; 131.620 - 131.621 - default: 131.622 - WPRINTK("Invalid netif status code %d\n", status->status); 131.623 - break; 131.624 - } 131.625 - 131.626 - vif_show(np); 131.627 -} 131.628 - 131.629 -/* 131.630 - * Initialize the network control interface. 131.631 - */ 131.632 -static void netif_driver_status(netif_fe_driver_status_t *status) 131.633 -{ 131.634 - netctrl.up = status->status; 131.635 - netctrl_connected_count(); 131.636 -} 131.637 - 131.638 -/* Receive handler for control messages. */ 131.639 -static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id) 131.640 -{ 131.641 - 131.642 - switch (msg->subtype) { 131.643 - case CMSG_NETIF_FE_INTERFACE_STATUS: 131.644 - netif_interface_status((netif_fe_interface_status_t *) &msg->msg[0]); 131.645 - break; 131.646 - 131.647 - case CMSG_NETIF_FE_DRIVER_STATUS: 131.648 - netif_driver_status((netif_fe_driver_status_t *) &msg->msg[0]); 131.649 - break; 131.650 +#ifdef CONFIG_PROC_FS 131.651 + xennet_proc_delif(netdev); 131.652 +#endif 131.653 131.654 - default: 131.655 - msg->length = 0; 131.656 - break; 131.657 - } 131.658 - 131.659 - ctrl_if_send_response(msg); 131.660 -} 131.661 - 131.662 - 131.663 -#if 1 131.664 -/* Wait for all interfaces to be connected. 131.665 - * 131.666 - * This works OK, but we'd like to use the probing mode (see below). 131.667 - */ 131.668 -static int probe_interfaces(void) 131.669 -{ 131.670 - int err = 0, conn = 0; 131.671 - int wait_i, wait_n = 100; 131.672 + unregister_netdev(netdev); 131.673 131.674 - DPRINTK(">\n"); 131.675 + np = netdev_priv(netdev); 131.676 + list_del(&np->list); 131.677 131.678 - for (wait_i = 0; wait_i < wait_n; wait_i++) { 131.679 - DPRINTK("> wait_i=%d\n", wait_i); 131.680 - conn = netctrl_connected(); 131.681 - if(conn) break; 131.682 - DPRINTK("> schedule_timeout...\n"); 131.683 - set_current_state(TASK_INTERRUPTIBLE); 131.684 - schedule_timeout(10); 131.685 - } 131.686 + kfree(netdev); 131.687 131.688 - DPRINTK("> wait finished...\n"); 131.689 - if (conn <= 0) { 131.690 - err = netctrl_err(-ENETDOWN); 131.691 - WPRINTK("Failed to connect all virtual interfaces: err=%d\n", err); 131.692 - } 131.693 - 131.694 - DPRINTK("< err=%d\n", err); 131.695 - 131.696 - return err; 131.697 + return 0; 131.698 } 131.699 -#else 131.700 -/* Probe for interfaces until no more are found. 131.701 - * 131.702 - * This is the mode we'd like to use, but at the moment it panics the kernel. 131.703 -*/ 131.704 -static int probe_interfaces(void) 131.705 -{ 131.706 - int err = 0; 131.707 - int wait_i, wait_n = 100; 131.708 - ctrl_msg_t cmsg = { 131.709 - .type = CMSG_NETIF_FE, 131.710 - .subtype = CMSG_NETIF_FE_INTERFACE_STATUS, 131.711 - .length = sizeof(netif_fe_interface_status_t), 131.712 - }; 131.713 - netif_fe_interface_status_t msg = {}; 131.714 - ctrl_msg_t rmsg = {}; 131.715 - netif_fe_interface_status_t *reply = (void*)rmsg.msg; 131.716 - int state = TASK_UNINTERRUPTIBLE; 131.717 - u32 query = -1; 131.718 - 131.719 - DPRINTK(">\n"); 131.720 - 131.721 - netctrl.interface_n = 0; 131.722 - for (wait_i = 0; wait_i < wait_n; wait_i++) { 131.723 - DPRINTK("> wait_i=%d query=%d\n", wait_i, query); 131.724 - msg.handle = query; 131.725 - memcpy(cmsg.msg, &msg, sizeof(msg)); 131.726 - DPRINTK("> set_current_state...\n"); 131.727 - set_current_state(state); 131.728 - DPRINTK("> rmsg=%p msg=%p, reply=%p\n", &rmsg, rmsg.msg, reply); 131.729 - DPRINTK("> sending...\n"); 131.730 - err = ctrl_if_send_message_and_get_response(&cmsg, &rmsg, state); 131.731 - DPRINTK("> err=%d\n", err); 131.732 - if(err) goto exit; 131.733 - DPRINTK("> rmsg=%p msg=%p, reply=%p\n", &rmsg, rmsg.msg, reply); 131.734 - if((int)reply->handle < 0) { 131.735 - // No more interfaces. 131.736 - break; 131.737 - } 131.738 - query = -reply->handle - 2; 131.739 - DPRINTK(">netif_interface_status ...\n"); 131.740 - netif_interface_status(reply); 131.741 - } 131.742 - 131.743 - exit: 131.744 - if (err) { 131.745 - err = netctrl_err(-ENETDOWN); 131.746 - WPRINTK("Connecting virtual network interfaces failed: err=%d\n", err); 131.747 - } 131.748 - 131.749 - DPRINTK("< err=%d\n", err); 131.750 - return err; 131.751 -} 131.752 - 131.753 -#endif 131.754 131.755 /* 131.756 * We use this notifier to send out a fake ARP reply to reset switches and 131.757 @@ -1387,19 +964,11 @@ inetdev_notify(struct notifier_block *th 131.758 { 131.759 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 131.760 struct net_device *dev = ifa->ifa_dev->dev; 131.761 - struct list_head *ent; 131.762 - struct net_private *np; 131.763 131.764 - if (event != NETDEV_UP) 131.765 - goto out; 131.766 - 131.767 - list_for_each (ent, &dev_list) { 131.768 - np = list_entry(ent, struct net_private, list); 131.769 - if (np->dev == dev) 131.770 - (void)send_fake_arp(dev); 131.771 - } 131.772 + /* UP event and is it one of our devices? */ 131.773 + if (event == NETDEV_UP && dev->open == network_open) 131.774 + (void)send_fake_arp(dev); 131.775 131.776 - out: 131.777 return NOTIFY_DONE; 131.778 } 131.779 131.780 @@ -1409,12 +978,367 @@ static struct notifier_block notifier_in 131.781 .priority = 0 131.782 }; 131.783 131.784 +static struct xenbus_device_id netfront_ids[] = { 131.785 + { "vif" }, 131.786 + { "" } 131.787 +}; 131.788 + 131.789 +static void watch_for_status(struct xenbus_watch *watch, const char *node) 131.790 +{ 131.791 +} 131.792 + 131.793 +static int setup_device(struct xenbus_device *dev, struct netfront_info *info) 131.794 +{ 131.795 + evtchn_op_t op = { .cmd = EVTCHNOP_alloc_unbound }; 131.796 + int err; 131.797 + 131.798 +#ifdef CONFIG_XEN_NETDEV_GRANT_TX 131.799 + info->tx_ring_ref = GRANT_INVALID_REF; 131.800 +#endif 131.801 +#ifdef CONFIG_XEN_NETDEV_GRANT_RX 131.802 + info->rx_ring_ref = GRANT_INVALID_REF; 131.803 +#endif 131.804 + 131.805 + info->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL); 131.806 + if (info->tx == 0) { 131.807 + err = -ENOMEM; 131.808 + xenbus_dev_error(dev, err, "allocating tx ring page"); 131.809 + goto out; 131.810 + } 131.811 + info->rx = (netif_rx_interface_t *)__get_free_page(GFP_KERNEL); 131.812 + if (info->rx == 0) { 131.813 + err = -ENOMEM; 131.814 + xenbus_dev_error(dev, err, "allocating rx ring page"); 131.815 + goto out; 131.816 + } 131.817 + memset(info->tx, 0, PAGE_SIZE); 131.818 + memset(info->rx, 0, PAGE_SIZE); 131.819 + info->backend_state = BEST_DISCONNECTED; 131.820 + 131.821 +#ifdef CONFIG_XEN_NETDEV_GRANT_TX 131.822 + err = gnttab_grant_foreign_access(info->backend_id, 131.823 + virt_to_mfn(info->tx), 0); 131.824 + if (err < 0) { 131.825 + xenbus_dev_error(dev, err, "granting access to tx ring page"); 131.826 + goto out; 131.827 + } 131.828 + info->tx_ring_ref = err; 131.829 +#else 131.830 + info->tx_ring_ref = virt_to_mfn(info->tx); 131.831 +#endif 131.832 + 131.833 +#ifdef CONFIG_XEN_NETDEV_GRANT_RX 131.834 + err = gnttab_grant_foreign_access(info->backend_id, 131.835 + virt_to_mfn(info->rx), 0); 131.836 + if (err < 0) { 131.837 + xenbus_dev_error(dev, err, "granting access to rx ring page"); 131.838 + goto out; 131.839 + } 131.840 + info->rx_ring_ref = err; 131.841 +#else 131.842 + info->rx_ring_ref = virt_to_mfn(info->rx); 131.843 +#endif 131.844 + 131.845 + op.u.alloc_unbound.dom = info->backend_id; 131.846 + err = HYPERVISOR_event_channel_op(&op); 131.847 + if (err) { 131.848 + xenbus_dev_error(dev, err, "allocating event channel"); 131.849 + goto out; 131.850 + } 131.851 + connect_device(info, op.u.alloc_unbound.port); 131.852 + return 0; 131.853 + 131.854 + out: 131.855 + if (info->tx) 131.856 + free_page((unsigned long)info->tx); 131.857 + info->tx = 0; 131.858 + if (info->rx) 131.859 + free_page((unsigned long)info->rx); 131.860 + info->rx = 0; 131.861 +#ifdef CONFIG_XEN_NETDEV_GRANT_TX 131.862 + if (info->tx_ring_ref != GRANT_INVALID_REF) 131.863 + gnttab_end_foreign_access(info->tx_ring_ref, 0); 131.864 + info->tx_ring_ref = GRANT_INVALID_REF; 131.865 +#endif 131.866 +#ifdef CONFIG_XEN_NETDEV_GRANT_RX 131.867 + if (info->rx_ring_ref != GRANT_INVALID_REF) 131.868 + gnttab_end_foreign_access(info->rx_ring_ref, 0); 131.869 + info->rx_ring_ref = GRANT_INVALID_REF; 131.870 +#endif 131.871 + return err; 131.872 +} 131.873 + 131.874 +static void netif_free(struct netfront_info *info) 131.875 +{ 131.876 + if (info->tx) 131.877 + free_page((unsigned long)info->tx); 131.878 + info->tx = 0; 131.879 + if (info->rx) 131.880 + free_page((unsigned long)info->rx); 131.881 + info->rx = 0; 131.882 +#ifdef CONFIG_XEN_NETDEV_GRANT_TX 131.883 + if (info->tx_ring_ref != GRANT_INVALID_REF) 131.884 + gnttab_end_foreign_access(info->tx_ring_ref, 0); 131.885 + info->tx_ring_ref = GRANT_INVALID_REF; 131.886 +#endif 131.887 +#ifdef CONFIG_XEN_NETDEV_GRANT_RX 131.888 + if (info->rx_ring_ref != GRANT_INVALID_REF) 131.889 + gnttab_end_foreign_access(info->rx_ring_ref, 0); 131.890 + info->rx_ring_ref = GRANT_INVALID_REF; 131.891 +#endif 131.892 + unbind_evtchn_from_irqhandler(info->evtchn, info->netdev); 131.893 + info->evtchn = 0; 131.894 +} 131.895 + 131.896 +/* Stop network device and free tx/rx queues and irq. 131.897 + */ 131.898 +static void shutdown_device(struct net_private *np) 131.899 +{ 131.900 + /* Stop old i/f to prevent errors whilst we rebuild the state. */ 131.901 + spin_lock_irq(&np->tx_lock); 131.902 + spin_lock(&np->rx_lock); 131.903 + netif_stop_queue(np->netdev); 131.904 + /* np->backend_state = BEST_DISCONNECTED; */ 131.905 + spin_unlock(&np->rx_lock); 131.906 + spin_unlock_irq(&np->tx_lock); 131.907 + 131.908 + /* Free resources. */ 131.909 + netif_free(np); 131.910 +} 131.911 + 131.912 +/* Common code used when first setting up, and when resuming. */ 131.913 +static int talk_to_backend(struct xenbus_device *dev, 131.914 + struct netfront_info *info) 131.915 +{ 131.916 + char *backend, *mac, *e, *s; 131.917 + const char *message; 131.918 + int err, i; 131.919 + 131.920 + backend = NULL; 131.921 + err = xenbus_gather(dev->nodename, 131.922 + "backend-id", "%i", &info->backend_id, 131.923 + "backend", NULL, &backend, 131.924 + NULL); 131.925 + if (XENBUS_EXIST_ERR(err)) 131.926 + goto out; 131.927 + if (backend && strlen(backend) == 0) { 131.928 + err = -ENOENT; 131.929 + goto out; 131.930 + } 131.931 + if (err < 0) { 131.932 + xenbus_dev_error(dev, err, "reading %s/backend or backend-id", 131.933 + dev->nodename); 131.934 + goto out; 131.935 + } 131.936 + 131.937 + mac = xenbus_read(dev->nodename, "mac", NULL); 131.938 + if (IS_ERR(mac)) { 131.939 + err = PTR_ERR(mac); 131.940 + xenbus_dev_error(dev, err, "reading %s/mac", 131.941 + dev->nodename); 131.942 + goto out; 131.943 + } 131.944 + s = mac; 131.945 + for (i = 0; i < ETH_ALEN; i++) { 131.946 + info->mac[i] = simple_strtoul(s, &e, 16); 131.947 + if (s == e || (e[0] != ':' && e[0] != 0)) { 131.948 + kfree(mac); 131.949 + err = -ENOENT; 131.950 + xenbus_dev_error(dev, err, "parsing %s/mac", 131.951 + dev->nodename); 131.952 + goto out; 131.953 + } 131.954 + s = &e[1]; 131.955 + } 131.956 + kfree(mac); 131.957 + 131.958 + /* Create shared ring, alloc event channel. */ 131.959 + err = setup_device(dev, info); 131.960 + if (err) { 131.961 + xenbus_dev_error(dev, err, "setting up ring"); 131.962 + goto out; 131.963 + } 131.964 + 131.965 + err = xenbus_transaction_start(dev->nodename); 131.966 + if (err) { 131.967 + xenbus_dev_error(dev, err, "starting transaction"); 131.968 + goto destroy_ring; 131.969 + } 131.970 + 131.971 + err = xenbus_printf(dev->nodename, "tx-ring-ref","%u", 131.972 + info->tx_ring_ref); 131.973 + if (err) { 131.974 + message = "writing tx ring-ref"; 131.975 + goto abort_transaction; 131.976 + } 131.977 + err = xenbus_printf(dev->nodename, "rx-ring-ref","%u", 131.978 + info->rx_ring_ref); 131.979 + if (err) { 131.980 + message = "writing rx ring-ref"; 131.981 + goto abort_transaction; 131.982 + } 131.983 + err = xenbus_printf(dev->nodename, 131.984 + "event-channel", "%u", info->evtchn); 131.985 + if (err) { 131.986 + message = "writing event-channel"; 131.987 + goto abort_transaction; 131.988 + } 131.989 + 131.990 + info->backend = backend; 131.991 + backend = NULL; 131.992 + 131.993 + info->watch.node = info->backend; 131.994 + info->watch.callback = watch_for_status; 131.995 + err = register_xenbus_watch(&info->watch); 131.996 + if (err) { 131.997 + message = "registering watch on backend"; 131.998 + goto abort_transaction; 131.999 + } 131.1000 + 131.1001 + err = xenbus_transaction_end(0); 131.1002 + if (err) { 131.1003 + xenbus_dev_error(dev, err, "completing transaction"); 131.1004 + goto destroy_ring; 131.1005 + } 131.1006 + 131.1007 + netif_state = NETIF_STATE_CONNECTED; 131.1008 + 131.1009 + out: 131.1010 + if (backend) 131.1011 + kfree(backend); 131.1012 + return err; 131.1013 + 131.1014 + abort_transaction: 131.1015 + xenbus_transaction_end(1); 131.1016 + /* Have to do this *outside* transaction. */ 131.1017 + xenbus_dev_error(dev, err, "%s", message); 131.1018 + destroy_ring: 131.1019 + shutdown_device(info); 131.1020 + goto out; 131.1021 +} 131.1022 + 131.1023 +/* Setup supplies the backend dir, virtual device. 131.1024 + 131.1025 + We place an event channel and shared frame entries. 131.1026 + We watch backend to wait if it's ok. */ 131.1027 +static int netfront_probe(struct xenbus_device *dev, 131.1028 + const struct xenbus_device_id *id) 131.1029 +{ 131.1030 + int err; 131.1031 + struct net_device *netdev; 131.1032 + struct netfront_info *info; 131.1033 + unsigned int handle; 131.1034 + 131.1035 + err = xenbus_scanf(dev->nodename, "handle", "%u", &handle); 131.1036 + if (XENBUS_EXIST_ERR(err)) 131.1037 + return err; 131.1038 + if (err < 0) { 131.1039 + xenbus_dev_error(dev, err, "reading handle"); 131.1040 + return err; 131.1041 + } 131.1042 + 131.1043 + err = create_netdev(handle, dev, &netdev); 131.1044 + if (err) { 131.1045 + xenbus_dev_error(dev, err, "creating netdev"); 131.1046 + return err; 131.1047 + } 131.1048 + 131.1049 + info = netdev_priv(netdev); 131.1050 + err = talk_to_backend(dev, info); 131.1051 + if (err) { 131.1052 + destroy_netdev(netdev); 131.1053 + return err; 131.1054 + } 131.1055 + 131.1056 + /* Call once in case entries already there. */ 131.1057 + watch_for_status(&info->watch, info->watch.node); 131.1058 + 131.1059 + return 0; 131.1060 +} 131.1061 + 131.1062 +static int netfront_remove(struct xenbus_device *dev) 131.1063 +{ 131.1064 + struct netfront_info *info = dev->data; 131.1065 + 131.1066 + if (info->backend) 131.1067 + unregister_xenbus_watch(&info->watch); 131.1068 + 131.1069 + netif_free(info); 131.1070 + 131.1071 + kfree(info->backend); 131.1072 + kfree(info); 131.1073 + 131.1074 + return 0; 131.1075 +} 131.1076 + 131.1077 +static int netfront_suspend(struct xenbus_device *dev) 131.1078 +{ 131.1079 + struct net_private *np = dev->data; 131.1080 + /* Avoid having tx/rx stuff happen until we're ready. */ 131.1081 + unbind_evtchn_from_irqhandler(np->evtchn, np->netdev); 131.1082 + return 0; 131.1083 +} 131.1084 + 131.1085 +static int netfront_resume(struct xenbus_device *dev) 131.1086 +{ 131.1087 + struct net_private *np = dev->data; 131.1088 + /* 131.1089 + * Connect regardless of whether IFF_UP flag set. 131.1090 + * Stop bad things from happening until we're back up. 131.1091 + */ 131.1092 + np->backend_state = BEST_DISCONNECTED; 131.1093 + memset(np->tx, 0, PAGE_SIZE); 131.1094 + memset(np->rx, 0, PAGE_SIZE); 131.1095 + 131.1096 + // send_interface_connect(np); 131.1097 + return 0; 131.1098 +} 131.1099 + 131.1100 +static struct xenbus_driver netfront = { 131.1101 + .name = "vif", 131.1102 + .owner = THIS_MODULE, 131.1103 + .ids = netfront_ids, 131.1104 + .probe = netfront_probe, 131.1105 + .remove = netfront_remove, 131.1106 + .resume = netfront_resume, 131.1107 + .suspend = netfront_suspend, 131.1108 +}; 131.1109 + 131.1110 +static void __init init_net_xenbus(void) 131.1111 +{ 131.1112 + xenbus_register_device(&netfront); 131.1113 +} 131.1114 + 131.1115 +static int wait_for_netif(void) 131.1116 +{ 131.1117 + int err = 0; 131.1118 + int i; 131.1119 + 131.1120 + /* 131.1121 + * We should figure out how many and which devices we need to 131.1122 + * proceed and only wait for those. For now, continue once the 131.1123 + * first device is around. 131.1124 + */ 131.1125 + for ( i=0; netif_state != NETIF_STATE_CONNECTED && (i < 10*HZ); i++ ) 131.1126 + { 131.1127 + set_current_state(TASK_INTERRUPTIBLE); 131.1128 + schedule_timeout(1); 131.1129 + } 131.1130 + 131.1131 + if (netif_state != NETIF_STATE_CONNECTED) { 131.1132 + WPRINTK("Timeout connecting to device!\n"); 131.1133 + err = -ENOSYS; 131.1134 + } 131.1135 + return err; 131.1136 +} 131.1137 + 131.1138 static int __init netif_init(void) 131.1139 { 131.1140 int err = 0; 131.1141 131.1142 if (xen_start_info.flags & SIF_INITDOMAIN) 131.1143 return 0; 131.1144 + 131.1145 #ifdef CONFIG_XEN_NETDEV_GRANT_TX 131.1146 /* A grant for every ring slot */ 131.1147 if (gnttab_alloc_grant_references(NETIF_TX_RING_SIZE, 131.1148 @@ -1438,17 +1362,13 @@ static int __init netif_init(void) 131.1149 return err; 131.1150 131.1151 IPRINTK("Initialising virtual ethernet driver.\n"); 131.1152 - INIT_LIST_HEAD(&dev_list); 131.1153 + 131.1154 (void)register_inetaddr_notifier(¬ifier_inetdev); 131.1155 - netctrl_init(); 131.1156 - (void)ctrl_if_register_receiver(CMSG_NETIF_FE, netif_ctrlif_rx, 131.1157 - CALLBACK_IN_BLOCKING_CONTEXT); 131.1158 - send_driver_status(1); 131.1159 - err = probe_interfaces(); 131.1160 - if (err) 131.1161 - ctrl_if_unregister_receiver(CMSG_NETIF_FE, netif_ctrlif_rx); 131.1162 + 131.1163 + init_net_xenbus(); 131.1164 131.1165 - DPRINTK("< err=%d\n", err); 131.1166 + wait_for_netif(); 131.1167 + 131.1168 return err; 131.1169 } 131.1170 131.1171 @@ -1462,47 +1382,6 @@ static void netif_exit(void) 131.1172 #endif 131.1173 } 131.1174 131.1175 -static void vif_suspend(struct net_private *np) 131.1176 -{ 131.1177 - /* Avoid having tx/rx stuff happen until we're ready. */ 131.1178 - unbind_evtchn_from_irqhandler(np->evtchn, np->dev); 131.1179 -} 131.1180 - 131.1181 -static void vif_resume(struct net_private *np) 131.1182 -{ 131.1183 - /* 131.1184 - * Connect regardless of whether IFF_UP flag set. 131.1185 - * Stop bad things from happening until we're back up. 131.1186 - */ 131.1187 - np->backend_state = BEST_DISCONNECTED; 131.1188 - memset(np->tx, 0, PAGE_SIZE); 131.1189 - memset(np->rx, 0, PAGE_SIZE); 131.1190 - 131.1191 - send_interface_connect(np); 131.1192 -} 131.1193 - 131.1194 -void netif_suspend(void) 131.1195 -{ 131.1196 - struct list_head *ent; 131.1197 - struct net_private *np; 131.1198 - 131.1199 - list_for_each (ent, &dev_list) { 131.1200 - np = list_entry(ent, struct net_private, list); 131.1201 - vif_suspend(np); 131.1202 - } 131.1203 -} 131.1204 - 131.1205 -void netif_resume(void) 131.1206 -{ 131.1207 - struct list_head *ent; 131.1208 - struct net_private *np; 131.1209 - 131.1210 - list_for_each (ent, &dev_list) { 131.1211 - np = list_entry(ent, struct net_private, list); 131.1212 - vif_resume(np); 131.1213 - } 131.1214 -} 131.1215 - 131.1216 #ifdef CONFIG_PROC_FS 131.1217 131.1218 #define TARGET_MIN 0UL
153.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/page.h Wed Aug 24 16:16:52 2005 -0700 153.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/page.h Thu Aug 25 11:18:47 2005 -0700 153.3 @@ -65,8 +65,26 @@ 153.4 extern unsigned int *phys_to_machine_mapping; 153.5 #define pfn_to_mfn(pfn) \ 153.6 ((unsigned long)phys_to_machine_mapping[(unsigned int)(pfn)] & 0x7FFFFFFFUL) 153.7 -#define mfn_to_pfn(mfn) \ 153.8 -((unsigned long)machine_to_phys_mapping[(unsigned int)(mfn)]) 153.9 +static inline unsigned long mfn_to_pfn(unsigned long mfn) 153.10 +{ 153.11 + unsigned int pfn; 153.12 + 153.13 + /* 153.14 + * The array access can fail (e.g., device space beyond end of RAM). 153.15 + * In such cases it doesn't matter what we return (we return garbage), 153.16 + * but we must handle the fault without crashing! 153.17 + */ 153.18 + asm ( 153.19 + "1: movl %1,%0\n" 153.20 + "2:\n" 153.21 + ".section __ex_table,\"a\"\n" 153.22 + " .align 4\n" 153.23 + " .long 1b,2b\n" 153.24 + ".previous" 153.25 + : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]) ); 153.26 + 153.27 + return (unsigned long)pfn; 153.28 +} 153.29 153.30 /* Definitions for machine and pseudophysical addresses. */ 153.31 #ifdef CONFIG_X86_PAE
166.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/page.h Wed Aug 24 16:16:52 2005 -0700 166.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/page.h Thu Aug 25 11:18:47 2005 -0700 166.3 @@ -67,8 +67,26 @@ void copy_page(void *, void *); 166.4 extern u32 *phys_to_machine_mapping; 166.5 #define pfn_to_mfn(pfn) \ 166.6 ((unsigned long)phys_to_machine_mapping[(unsigned int)(pfn)] & 0x7FFFFFFFUL) 166.7 -#define mfn_to_pfn(mfn) \ 166.8 -((unsigned long)machine_to_phys_mapping[(unsigned int)(mfn)]) 166.9 +static inline unsigned long mfn_to_pfn(unsigned long mfn) 166.10 +{ 166.11 + unsigned int pfn; 166.12 + 166.13 + /* 166.14 + * The array access can fail (e.g., device space beyond end of RAM). 166.15 + * In such cases it doesn't matter what we return (we return garbage), 166.16 + * but we must handle the fault without crashing! 166.17 + */ 166.18 + asm ( 166.19 + "1: movl %1,%k0\n" 166.20 + "2:\n" 166.21 + ".section __ex_table,\"a\"\n" 166.22 + " .align 8\n" 166.23 + " .quad 1b,2b\n" 166.24 + ".previous" 166.25 + : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]) ); 166.26 + 166.27 + return (unsigned long)pfn; 166.28 +} 166.29 166.30 /* Definitions for machine and pseudophysical addresses. */ 166.31 typedef unsigned long paddr_t;
173.1 --- a/linux-2.6-xen-sparse/include/asm-xen/gnttab.h Wed Aug 24 16:16:52 2005 -0700 173.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/gnttab.h Thu Aug 25 11:18:47 2005 -0700 173.3 @@ -30,10 +30,12 @@ struct gnttab_free_callback { 173.4 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, 173.5 int readonly); 173.6 173.7 +void gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly); 173.8 void gnttab_end_foreign_access(grant_ref_t ref, int readonly); 173.9 173.10 int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn); 173.11 173.12 +unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref); 173.13 unsigned long gnttab_end_foreign_transfer(grant_ref_t ref); 173.14 173.15 int gnttab_query_foreign_access(grant_ref_t ref);
184.1 --- a/patches/linux-2.6.12/workaround_double_br_del_if.patch Wed Aug 24 16:16:52 2005 -0700 184.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 184.3 @@ -1,11 +0,0 @@ 184.4 ---- linux-2.6.12/net/bridge/br_if.c 2005-06-17 14:48:29.000000000 -0500 184.5 -+++ linux-2.6.12-xen0-smp/net/bridge/br_if.c 2005-08-18 15:17:27.302615846 -0500 184.6 -@@ -382,7 +382,7 @@ 184.7 - { 184.8 - struct net_bridge_port *p = dev->br_port; 184.9 - 184.10 -- if (!p || p->br != br) 184.11 -+ if (!p || p->br != br || p->state == BR_STATE_DISABLED) 184.12 - return -EINVAL; 184.13 - 184.14 - br_sysfs_removeif(p);
188.1 --- a/tools/blktap/blktaplib.h Wed Aug 24 16:16:52 2005 -0700 188.2 +++ b/tools/blktap/blktaplib.h Thu Aug 25 11:18:47 2005 -0700 188.3 @@ -7,7 +7,7 @@ 188.4 #ifndef __BLKTAPLIB_H__ 188.5 #define __BLKTAPLIB_H__ 188.6 188.7 -#include <xc.h> 188.8 +#include <xenctrl.h> 188.9 #include <sys/user.h> 188.10 #include <xen/xen.h> 188.11 #include <xen/io/blkif.h>
189.1 --- a/tools/blktap/parallax/block-async.h Wed Aug 24 16:16:52 2005 -0700 189.2 +++ b/tools/blktap/parallax/block-async.h Thu Aug 25 11:18:47 2005 -0700 189.3 @@ -7,7 +7,7 @@ 189.4 #define _BLOCKASYNC_H_ 189.5 189.6 #include <assert.h> 189.7 -#include <xc.h> 189.8 +#include <xenctrl.h> 189.9 #include "vdi.h" 189.10 189.11 struct io_ret
190.1 --- a/tools/blktap/parallax/blockstore.h Wed Aug 24 16:16:52 2005 -0700 190.2 +++ b/tools/blktap/parallax/blockstore.h Thu Aug 25 11:18:47 2005 -0700 190.3 @@ -10,7 +10,7 @@ 190.4 #define __BLOCKSTORE_H__ 190.5 190.6 #include <netinet/in.h> 190.7 -#include <xc.h> 190.8 +#include <xenctrl.h> 190.9 190.10 #define BLOCK_SIZE 4096 190.11 #define BLOCK_SHIFT 12
191.1 --- a/tools/console/Makefile Wed Aug 24 16:16:52 2005 -0700 191.2 +++ b/tools/console/Makefile Thu Aug 25 11:18:47 2005 -0700 191.3 @@ -26,11 +26,11 @@ clean: 191.4 191.5 xenconsoled: $(patsubst %.c,%.o,$(wildcard daemon/*.c)) 191.6 $(CC) $(CFLAGS) $^ -o $@ -L$(XEN_LIBXC) -L$(XEN_XENSTORE) \ 191.7 - -lxc -lxenstore 191.8 + -lxenctrl -lxenstore 191.9 191.10 xenconsole: $(patsubst %.c,%.o,$(wildcard client/*.c)) 191.11 $(CC) $(CFLAGS) $^ -o $@ -L$(XEN_LIBXC) -L$(XEN_XENSTORE) \ 191.12 - -lxc -lxenstore 191.13 + -lxenctrl -lxenstore 191.14 191.15 install: $(BIN) 191.16 $(INSTALL_DIR) -p $(DESTDIR)/$(DAEMON_INSTALL_DIR)
192.1 --- a/tools/console/client/main.c Wed Aug 24 16:16:52 2005 -0700 192.2 +++ b/tools/console/client/main.c Thu Aug 25 11:18:47 2005 -0700 192.3 @@ -36,7 +36,7 @@ 192.4 #include <errno.h> 192.5 #include <pty.h> 192.6 192.7 -#include "xc.h" 192.8 +#include "xenctrl.h" 192.9 #include "xs.h" 192.10 192.11 #define ESCAPE_CHARACTER 0x1d
193.1 --- a/tools/console/daemon/io.c Wed Aug 24 16:16:52 2005 -0700 193.2 +++ b/tools/console/daemon/io.c Thu Aug 25 11:18:47 2005 -0700 193.3 @@ -23,7 +23,7 @@ 193.4 #include "utils.h" 193.5 #include "io.h" 193.6 193.7 -#include "xc.h" 193.8 +#include "xenctrl.h" 193.9 #include "xs.h" 193.10 #include "xen/io/domain_controller.h" 193.11 #include "xcs_proto.h"
194.1 --- a/tools/console/daemon/main.c Wed Aug 24 16:16:52 2005 -0700 194.2 +++ b/tools/console/daemon/main.c Thu Aug 25 11:18:47 2005 -0700 194.3 @@ -25,7 +25,7 @@ 194.4 #include <unistd.h> 194.5 #include <sys/types.h> 194.6 194.7 -#include "xc.h" 194.8 +#include "xenctrl.h" 194.9 #include "xen/io/domain_controller.h" 194.10 #include "xcs_proto.h" 194.11
195.1 --- a/tools/console/daemon/utils.c Wed Aug 24 16:16:52 2005 -0700 195.2 +++ b/tools/console/daemon/utils.c Thu Aug 25 11:18:47 2005 -0700 195.3 @@ -33,7 +33,7 @@ 195.4 #include <sys/un.h> 195.5 #include <string.h> 195.6 195.7 -#include "xc.h" 195.8 +#include "xenctrl.h" 195.9 #include "xen/io/domain_controller.h" 195.10 #include "xcs_proto.h" 195.11
196.1 --- a/tools/debugger/gdb/gdb-6.2.1-xen-sparse/gdb/gdbserver/linux-xen-low.c Wed Aug 24 16:16:52 2005 -0700 196.2 +++ b/tools/debugger/gdb/gdb-6.2.1-xen-sparse/gdb/gdbserver/linux-xen-low.c Thu Aug 25 11:18:47 2005 -0700 196.3 @@ -35,7 +35,7 @@ 196.4 #include <stdlib.h> 196.5 #include <unistd.h> 196.6 #include <errno.h> 196.7 -#include <xc.h> 196.8 +#include <xenctrl.h> 196.9 #define TRACE_ENTER /* printf("enter %s\n", __FUNCTION__) */ 196.10 long (*myptrace)(enum __ptrace_request, pid_t, long, long); 196.11 int (*myxcwait)(int domain, int *status, int options) ;
198.1 --- a/tools/debugger/libxendebug/Makefile Wed Aug 24 16:16:52 2005 -0700 198.2 +++ b/tools/debugger/libxendebug/Makefile Thu Aug 25 11:18:47 2005 -0700 198.3 @@ -20,7 +20,7 @@ CFLAGS += $(INCLUDES) -I. -I$(XEN_ROOT 198.4 CFLAGS += -Wp,-MD,.$(@F).d 198.5 DEPS = .*.d 198.6 198.7 -LDFLAGS += -L$(XEN_ROOT)/tools/libxc -lxc 198.8 +LDFLAGS += -L$(XEN_ROOT)/tools/libxc -lxenctrl 198.9 198.10 LIB_OBJS := $(patsubst %.c,%.o,$(SRCS)) 198.11 PIC_OBJS := $(patsubst %.c,%.opic,$(SRCS))
199.1 --- a/tools/debugger/libxendebug/xendebug.c Wed Aug 24 16:16:52 2005 -0700 199.2 +++ b/tools/debugger/libxendebug/xendebug.c Thu Aug 25 11:18:47 2005 -0700 199.3 @@ -12,7 +12,7 @@ 199.4 #include <string.h> 199.5 #include <errno.h> 199.6 #include <sys/mman.h> 199.7 -#include <xc.h> 199.8 +#include <xenctrl.h> 199.9 #include "list.h" 199.10 199.11 #if defined(__i386__)
200.1 --- a/tools/debugger/libxendebug/xendebug.h Wed Aug 24 16:16:52 2005 -0700 200.2 +++ b/tools/debugger/libxendebug/xendebug.h Thu Aug 25 11:18:47 2005 -0700 200.3 @@ -9,7 +9,7 @@ 200.4 #ifndef _XENDEBUG_H_DEFINED 200.5 #define _XENDEBUG_H_DEFINED 200.6 200.7 -#include <xc.h> 200.8 +#include <xenctrl.h> 200.9 200.10 int xendebug_attach(int xc_handle, 200.11 u32 domid,
222.1 --- a/tools/debugger/pdb/pdb_caml_domain.c Wed Aug 24 16:16:52 2005 -0700 222.2 +++ b/tools/debugger/pdb/pdb_caml_domain.c Thu Aug 25 11:18:47 2005 -0700 222.3 @@ -6,7 +6,7 @@ 222.4 * PDB's OCaml interface library for debugging domains 222.5 */ 222.6 222.7 -#include <xc.h> 222.8 +#include <xenctrl.h> 222.9 #include <xendebug.h> 222.10 #include <errno.h> 222.11 #include <stdio.h>
223.1 --- a/tools/debugger/pdb/pdb_caml_evtchn.c Wed Aug 24 16:16:52 2005 -0700 223.2 +++ b/tools/debugger/pdb/pdb_caml_evtchn.c Thu Aug 25 11:18:47 2005 -0700 223.3 @@ -6,7 +6,7 @@ 223.4 * PDB's OCaml interface library for event channels 223.5 */ 223.6 223.7 -#include <xc.h> 223.8 +#include <xenctrl.h> 223.9 #include <stdio.h> 223.10 #include <stdlib.h> 223.11 #include <string.h>
224.1 --- a/tools/debugger/pdb/pdb_caml_process.c Wed Aug 24 16:16:52 2005 -0700 224.2 +++ b/tools/debugger/pdb/pdb_caml_process.c Thu Aug 25 11:18:47 2005 -0700 224.3 @@ -15,7 +15,7 @@ 224.4 #include <caml/memory.h> 224.5 #include <caml/mlvalues.h> 224.6 224.7 -#include <xc.h> 224.8 +#include <xenctrl.h> 224.9 #include <xen/xen.h> 224.10 #include <xen/io/domain_controller.h> 224.11 #include <xen/linux/privcmd.h>
225.1 --- a/tools/debugger/pdb/pdb_caml_xc.c Wed Aug 24 16:16:52 2005 -0700 225.2 +++ b/tools/debugger/pdb/pdb_caml_xc.c Thu Aug 25 11:18:47 2005 -0700 225.3 @@ -6,7 +6,7 @@ 225.4 * PDB's OCaml interface library for debugging domains 225.5 */ 225.6 225.7 -#include <xc.h> 225.8 +#include <xenctrl.h> 225.9 #include <xendebug.h> 225.10 #include <errno.h> 225.11 #include <stdio.h>
226.1 --- a/tools/debugger/pdb/pdb_caml_xcs.c Wed Aug 24 16:16:52 2005 -0700 226.2 +++ b/tools/debugger/pdb/pdb_caml_xcs.c Thu Aug 25 11:18:47 2005 -0700 226.3 @@ -17,7 +17,7 @@ 226.4 #include <sys/types.h> 226.5 #include <sys/socket.h> 226.6 #include <errno.h> 226.7 -#include <xc.h> 226.8 +#include <xenctrl.h> 226.9 226.10 #include <xen/xen.h> 226.11 #include <xen/io/domain_controller.h>
228.1 --- a/tools/debugger/pdb/pdb_xen.c Wed Aug 24 16:16:52 2005 -0700 228.2 +++ b/tools/debugger/pdb/pdb_xen.c Thu Aug 25 11:18:47 2005 -0700 228.3 @@ -7,7 +7,7 @@ 228.4 * PDB interface library for accessing Xen 228.5 */ 228.6 228.7 -#include <xc.h> 228.8 +#include <xenctrl.h> 228.9 #include <stdio.h> 228.10 #include <stdlib.h> 228.11 #include <errno.h>
231.1 --- a/tools/examples/Makefile Wed Aug 24 16:16:52 2005 -0700 231.2 +++ b/tools/examples/Makefile Thu Aug 25 11:18:47 2005 -0700 231.3 @@ -24,10 +24,14 @@ XEN_SCRIPTS += block-enbd 231.4 XEN_BOOT_DIR = /usr/lib/xen/boot 231.5 XEN_BOOT = mem-map.sxp 231.6 231.7 +XEN_HOTPLUG_DIR = /etc/hotplug.d/xen-backend 231.8 +XEN_HOTPLUG_SCRIPTS = backend.hotplug 231.9 + 231.10 all: 231.11 build: 231.12 231.13 -install: all install-initd install-configs install-scripts install-boot 231.14 +install: all install-initd install-configs install-scripts install-boot \ 231.15 + install-hotplug 231.16 231.17 install-initd: 231.18 [ -d $(DESTDIR)/etc/init.d ] || $(INSTALL_DIR) $(DESTDIR)/etc/init.d 231.19 @@ -60,4 +64,12 @@ install-boot: 231.20 $(INSTALL_PROG) $$i $(DESTDIR)$(XEN_BOOT_DIR); \ 231.21 done 231.22 231.23 +install-hotplug: 231.24 + [ -d $(DESTDIR)$(XEN_HOTPLUG_DIR) ] || \ 231.25 + $(INSTALL_DIR) $(DESTDIR)$(XEN_HOTPLUG_DIR) 231.26 + for i in $(XEN_HOTPLUG_SCRIPTS); \ 231.27 + do [ -a $(DESTDIR)$(XEN_HOTPLUG_DIR)/$$i ] || \ 231.28 + $(INSTALL_PROG) $$i $(DESTDIR)$(XEN_HOTPLUG_DIR); \ 231.29 + done 231.30 + 231.31 clean:
232.1 --- a/tools/examples/README Wed Aug 24 16:16:52 2005 -0700 232.2 +++ b/tools/examples/README Thu Aug 25 11:18:47 2005 -0700 232.3 @@ -9,9 +9,20 @@ If you write a useful script and would l 232.4 send it (preferably with a little summary to go in this file) to 232.5 <xen-devel@lists.sourceforge.net> so we can add it to this directory. 232.6 232.7 +block-enbd - binds/unbinds network block devices 232.8 +block-file - binds/unbinds file to loopback device 232.9 +mem-map.sxp - memory map xend configuration file. 232.10 network - default network setup script called by xend at startup. 232.11 +network-route - default xen network start/stop script. 232.12 +network-nat - default xen network start/stop script when using NAT. 232.13 vif-bridge - default virtual network interface setup script. 232.14 +vif-route - default xen virtual network start/stop script 232.15 +vif-nat - configures vif in routed-nat mode. 232.16 xend-config.sxp - default xend configuration file. 232.17 xmexample1 - example configuration script for 'xm create'. 232.18 xmexample2 - a more complex configuration script for 'xm create'. 232.19 +xmexample3 - an advanced configuration script for 'xm create' 232.20 + that utilizes the vmid. 232.21 +xmexample.vmx - a configuration script for creating a vmx domain with 232.22 + 'xm create'. 232.23
233.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 233.2 +++ b/tools/examples/backend.hotplug Thu Aug 25 11:18:47 2005 -0700 233.3 @@ -0,0 +1,21 @@ 233.4 +#! /bin/sh 233.5 + 233.6 +#DEVPATH=/devices/xen-backend/vif-1-0 233.7 +#ACTION=add 233.8 + 233.9 +PATH=/etc/xen/scripts:$PATH 233.10 + 233.11 +DEV=$(basename "$DEVPATH") 233.12 +case "$ACTION" in 233.13 + add) 233.14 + case "$DEV" in 233.15 + vif-*) 233.16 + vif=$(echo "$DEV" | sed 's/-\([0-9]*\)-\([0-9]*\)/\1.\2/') 233.17 + vif-bridge up domain=unknown vif="$vif" mac=fe:ff:ff:ff:ff:ff bridge=xen-br0 >/dev/null 2>&1 233.18 + ;; 233.19 + esac 233.20 + ;; 233.21 + remove) 233.22 + ;; 233.23 +esac 233.24 +
235.1 --- a/tools/examples/vif-bridge Wed Aug 24 16:16:52 2005 -0700 235.2 +++ b/tools/examples/vif-bridge Thu Aug 25 11:18:47 2005 -0700 235.3 @@ -74,8 +74,10 @@ if [ "${bridge}" == "null" ] ; then 235.4 exit 235.5 fi 235.6 235.7 -# Add/remove vif to/from bridge. 235.8 -brctl ${brcmd} ${bridge} ${vif} 235.9 +# Add vif to bridge. vifs are auto-removed from bridge. 235.10 +if [ "${brcmd}" == "addif" ] ; then 235.11 + brctl ${brcmd} ${bridge} ${vif} 235.12 +fi 235.13 ifconfig ${vif} $OP 235.14 235.15 if [ ${ip} ] ; then
238.1 --- a/tools/firmware/acpi/acpi2_0.h Wed Aug 24 16:16:52 2005 -0700 238.2 +++ b/tools/firmware/acpi/acpi2_0.h Thu Aug 25 11:18:47 2005 -0700 238.3 @@ -18,7 +18,7 @@ 238.4 #ifndef _ACPI_2_0_H_ 238.5 #define _ACPI_2_0_H_ 238.6 238.7 -#include "xc.h" // for u8, u16, u32, u64 definition 238.8 +#include "xenctrl.h" // for u8, u16, u32, u64 definition 238.9 238.10 #pragma pack (1) 238.11
243.1 --- a/tools/ioemu/hw/i8254.c Wed Aug 24 16:16:52 2005 -0700 243.2 +++ b/tools/ioemu/hw/i8254.c Thu Aug 25 11:18:47 2005 -0700 243.3 @@ -22,7 +22,7 @@ 243.4 * THE SOFTWARE. 243.5 */ 243.6 #include "vl.h" 243.7 -#include "xc.h" 243.8 +#include "xenctrl.h" 243.9 #include <io/ioreq.h> 243.10 243.11 //#define DEBUG_PIT
244.1 --- a/tools/ioemu/hw/i8259.c Wed Aug 24 16:16:52 2005 -0700 244.2 +++ b/tools/ioemu/hw/i8259.c Thu Aug 25 11:18:47 2005 -0700 244.3 @@ -22,7 +22,7 @@ 244.4 * THE SOFTWARE. 244.5 */ 244.6 #include "vl.h" 244.7 -#include "xc.h" 244.8 +#include "xenctrl.h" 244.9 #include <io/ioreq.h> 244.10 244.11 /* debug PIC */
246.1 --- a/tools/ioemu/hw/ioapic.h Wed Aug 24 16:16:52 2005 -0700 246.2 +++ b/tools/ioemu/hw/ioapic.h Thu Aug 25 11:18:47 2005 -0700 246.3 @@ -26,7 +26,7 @@ 246.4 #ifndef __IOAPIC_H 246.5 #define __IOAPIC_H 246.6 246.7 -#include "xc.h" 246.8 +#include "xenctrl.h" 246.9 #include <io/ioreq.h> 246.10 #include <io/vmx_vlapic.h> 246.11
251.1 --- a/tools/ioemu/target-i386-dm/Makefile Wed Aug 24 16:16:52 2005 -0700 251.2 +++ b/tools/ioemu/target-i386-dm/Makefile Thu Aug 25 11:18:47 2005 -0700 251.3 @@ -188,7 +188,7 @@ endif 251.4 ######################################################### 251.5 251.6 DEFINES+=-D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -DAPIC_SUPPORT 251.7 -LIBS+=-lm -L../../libxc -lxc 251.8 +LIBS+=-lm -L../../libxc -lxenctrl 251.9 ifndef CONFIG_USER_ONLY 251.10 LIBS+=-lz 251.11 endif
252.1 --- a/tools/ioemu/target-i386-dm/helper2.c Wed Aug 24 16:16:52 2005 -0700 252.2 +++ b/tools/ioemu/target-i386-dm/helper2.c Thu Aug 25 11:18:47 2005 -0700 252.3 @@ -47,7 +47,7 @@ 252.4 #include <fcntl.h> 252.5 #include <sys/ioctl.h> 252.6 252.7 -#include "xc.h" 252.8 +#include "xenctrl.h" 252.9 #include <io/ioreq.h> 252.10 252.11 #include "cpu.h"
254.1 --- a/tools/ioemu/vl.c Wed Aug 24 16:16:52 2005 -0700 254.2 +++ b/tools/ioemu/vl.c Thu Aug 25 11:18:47 2005 -0700 254.3 @@ -72,7 +72,7 @@ 254.4 #endif 254.5 #endif /* CONFIG_SDL */ 254.6 254.7 -#include "xc.h" 254.8 +#include "xenctrl.h" 254.9 #include "exec-all.h" 254.10 254.11 //#define DO_TB_FLUSH
257.1 --- a/tools/libxc/Makefile Wed Aug 24 16:16:52 2005 -0700 257.2 +++ b/tools/libxc/Makefile Thu Aug 25 11:18:47 2005 -0700 257.3 @@ -12,28 +12,32 @@ CC = gcc 257.4 XEN_ROOT = ../.. 257.5 include $(XEN_ROOT)/tools/Rules.mk 257.6 257.7 -SRCS := 257.8 -SRCS += xc_sedf.c 257.9 -SRCS += xc_bvtsched.c 257.10 -SRCS += xc_core.c 257.11 -SRCS += xc_domain.c 257.12 -SRCS += xc_evtchn.c 257.13 -SRCS += xc_gnttab.c 257.14 -SRCS += xc_load_bin.c 257.15 -SRCS += xc_load_elf.c 257.16 -SRCS += xc_linux_build.c 257.17 -SRCS += xc_misc.c 257.18 -SRCS += xc_physdev.c 257.19 -SRCS += xc_private.c 257.20 +SRCS := 257.21 +BUILD_SRCS := 257.22 +SRCS += xc_bvtsched.c 257.23 +SRCS += xc_core.c 257.24 +SRCS += xc_domain.c 257.25 +SRCS += xc_evtchn.c 257.26 +SRCS += xc_gnttab.c 257.27 +SRCS += xc_misc.c 257.28 +SRCS += xc_physdev.c 257.29 +SRCS += xc_private.c 257.30 +SRCS += xc_sedf.c 257.31 + 257.32 ifeq ($(XEN_TARGET_ARCH),ia64) 257.33 -SRCS += xc_ia64_stubs.c 257.34 +BUILD_SRCS += xc_ia64_stubs.c 257.35 else 257.36 -SRCS += xc_load_aout9.c 257.37 -SRCS += xc_linux_restore.c 257.38 -SRCS += xc_linux_save.c 257.39 -SRCS += xc_vmx_build.c 257.40 -SRCS += xc_ptrace.c 257.41 -SRCS += xc_ptrace_core.c 257.42 +SRCS += xc_ptrace.c 257.43 +SRCS += xc_ptrace_core.c 257.44 + 257.45 +BUILD_SRCS := xc_load_aout9.c 257.46 +BUILD_SRCS += xc_load_bin.c 257.47 +BUILD_SRCS += xc_load_elf.c 257.48 +BUILD_SRCS += xc_linux_build.c 257.49 +BUILD_SRCS += xc_linux_restore.c 257.50 +BUILD_SRCS += xc_linux_save.c 257.51 +BUILD_SRCS += xc_vmx_build.c 257.52 +BUILD_SRCS += xg_private.c 257.53 endif 257.54 257.55 CFLAGS += -Wall 257.56 @@ -43,13 +47,20 @@ CFLAGS += -fno-strict-aliasing 257.57 CFLAGS += $(INCLUDES) -I. 257.58 # Get gcc to generate the dependencies for us. 257.59 CFLAGS += -Wp,-MD,.$(@F).d 257.60 +LDFLAGS += -L. 257.61 DEPS = .*.d 257.62 257.63 LIB_OBJS := $(patsubst %.c,%.o,$(SRCS)) 257.64 PIC_OBJS := $(patsubst %.c,%.opic,$(SRCS)) 257.65 257.66 -LIB := libxc.a libxc-pic.a 257.67 -LIB += libxc.so libxc.so.$(MAJOR) libxc.so.$(MAJOR).$(MINOR) 257.68 +LIB_BUILD_OBJS := $(patsubst %.c,%.o,$(BUILD_SRCS)) 257.69 +PIC_BUILD_OBJS := $(patsubst %.c,%.opic,$(BUILD_SRCS)) 257.70 + 257.71 +LIB := libxenctrl.a 257.72 +LIB += libxenctrl.so libxenctrl.so.$(MAJOR) libxenctrl.so.$(MAJOR).$(MINOR) 257.73 + 257.74 +LIB += libxenguest.a 257.75 +LIB += libxenguest.so libxenguest.so.$(MAJOR) libxenguest.so.$(MAJOR).$(MINOR) 257.76 257.77 all: build 257.78 build: check-for-zlib mk-symlinks 257.79 @@ -77,11 +88,16 @@ mk-symlinks: 257.80 install: build 257.81 [ -d $(DESTDIR)/usr/$(LIBDIR) ] || $(INSTALL_DIR) $(DESTDIR)/usr/$(LIBDIR) 257.82 [ -d $(DESTDIR)/usr/include ] || $(INSTALL_DIR) $(DESTDIR)/usr/include 257.83 - $(INSTALL_PROG) libxc.so.$(MAJOR).$(MINOR) $(DESTDIR)/usr/$(LIBDIR) 257.84 - $(INSTALL_DATA) libxc.a $(DESTDIR)/usr/$(LIBDIR) 257.85 - ln -sf libxc.so.$(MAJOR).$(MINOR) $(DESTDIR)/usr/$(LIBDIR)/libxc.so.$(MAJOR) 257.86 - ln -sf libxc.so.$(MAJOR) $(DESTDIR)/usr/$(LIBDIR)/libxc.so 257.87 - $(INSTALL_DATA) xc.h $(DESTDIR)/usr/include 257.88 + $(INSTALL_PROG) libxenctrl.so.$(MAJOR).$(MINOR) $(DESTDIR)/usr/$(LIBDIR) 257.89 + $(INSTALL_DATA) libxenctrl.a $(DESTDIR)/usr/$(LIBDIR) 257.90 + ln -sf libxenctrl.so.$(MAJOR).$(MINOR) $(DESTDIR)/usr/$(LIBDIR)/libxenctrl.so.$(MAJOR) 257.91 + ln -sf libxenctrl.so.$(MAJOR) $(DESTDIR)/usr/$(LIBDIR)/libxenctrl.so 257.92 + $(INSTALL_DATA) xenctrl.h $(DESTDIR)/usr/include 257.93 + 257.94 + $(INSTALL_PROG) libxenguest.so.$(MAJOR).$(MINOR) $(DESTDIR)/usr/$(LIBDIR) 257.95 + $(INSTALL_DATA) libxenguest.a $(DESTDIR)/usr/$(LIBDIR) 257.96 + ln -sf libxenguest.so.$(MAJOR).$(MINOR) $(DESTDIR)/usr/$(LIBDIR)/libxenguest.so.$(MAJOR) 257.97 + ln -sf libxenguest.so.$(MAJOR) $(DESTDIR)/usr/$(LIBDIR)/libxenguest.so 257.98 257.99 .PHONY: TAGS clean rpm install all 257.100 257.101 @@ -100,18 +116,30 @@ rpm: build 257.102 mv staging/i386/*.rpm . 257.103 rm -rf staging 257.104 257.105 -libxc.a: $(LIB_OBJS) 257.106 - $(AR) rc $@ $^ 257.107 +# libxenctrl 257.108 257.109 -libxc-pic.a: $(PIC_OBJS) 257.110 +libxenctrl.a: $(LIB_OBJS) 257.111 $(AR) rc $@ $^ 257.112 257.113 -libxc.so: libxc.so.$(MAJOR) 257.114 +libxenctrl.so: libxenctrl.so.$(MAJOR) 257.115 ln -sf $< $@ 257.116 -libxc.so.$(MAJOR): libxc.so.$(MAJOR).$(MINOR) 257.117 +libxenctrl.so.$(MAJOR): libxenctrl.so.$(MAJOR).$(MINOR) 257.118 ln -sf $< $@ 257.119 257.120 -libxc.so.$(MAJOR).$(MINOR): $(PIC_OBJS) 257.121 - $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-soname -Wl,libxc.so.$(MAJOR) -shared -o $@ $^ -lz 257.122 +libxenctrl.so.$(MAJOR).$(MINOR): $(PIC_OBJS) 257.123 + $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-soname -Wl,libxenctrl.so.$(MAJOR) -shared -o $@ $^ 257.124 + 257.125 +# libxenguest 257.126 + 257.127 +libxenguest.a: $(LIB_BUILD_OBJS) 257.128 + $(AR) rc $@ $^ 257.129 + 257.130 +libxenguest.so: libxenguest.so.$(MAJOR) 257.131 + ln -sf $< $@ 257.132 +libxenguest.so.$(MAJOR): libxenguest.so.$(MAJOR).$(MINOR) 257.133 + ln -sf $< $@ 257.134 + 257.135 +libxenguest.so.$(MAJOR).$(MINOR): $(PIC_BUILD_OBJS) 257.136 + $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-soname -Wl,libxenguest.so.$(MAJOR) -shared -o $@ $^ -lz -lxenctrl 257.137 257.138 -include $(DEPS)
259.1 --- a/tools/libxc/xc.h Wed Aug 24 16:16:52 2005 -0700 259.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 259.3 @@ -1,558 +0,0 @@ 259.4 -/****************************************************************************** 259.5 - * xc.h 259.6 - * 259.7 - * A library for low-level access to the Xen control interfaces. 259.8 - * 259.9 - * Copyright (c) 2003-2004, K A Fraser. 259.10 - */ 259.11 - 259.12 -#ifndef __XC_H__ 259.13 -#define __XC_H__ 259.14 - 259.15 -#include <stdint.h> 259.16 - 259.17 -typedef uint8_t u8; 259.18 -typedef uint16_t u16; 259.19 -typedef uint32_t u32; 259.20 -typedef uint64_t u64; 259.21 -typedef int8_t s8; 259.22 -typedef int16_t s16; 259.23 -typedef int32_t s32; 259.24 -typedef int64_t s64; 259.25 - 259.26 -#include <sys/ptrace.h> 259.27 -#include <xen/xen.h> 259.28 -#include <xen/dom0_ops.h> 259.29 -#include <xen/event_channel.h> 259.30 -#include <xen/sched_ctl.h> 259.31 -#include <xen/acm.h> 259.32 - 259.33 -#ifdef __ia64__ 259.34 -#define XC_PAGE_SHIFT 14 259.35 -#else 259.36 -#define XC_PAGE_SHIFT 12 259.37 -#endif 259.38 -#define XC_PAGE_SIZE (1UL << XC_PAGE_SHIFT) 259.39 -#define XC_PAGE_MASK (~(XC_PAGE_SIZE-1)) 259.40 - 259.41 -/* 259.42 - * DEFINITIONS FOR CPU BARRIERS 259.43 - */ 259.44 - 259.45 -#if defined(__i386__) 259.46 -#define mb() __asm__ __volatile__ ( "lock; addl $0,0(%%esp)" : : : "memory" ) 259.47 -#define rmb() __asm__ __volatile__ ( "lock; addl $0,0(%%esp)" : : : "memory" ) 259.48 -#define wmb() __asm__ __volatile__ ( "" : : : "memory") 259.49 -#elif defined(__x86_64__) 259.50 -#define mb() __asm__ __volatile__ ( "mfence" : : : "memory") 259.51 -#define rmb() __asm__ __volatile__ ( "lfence" : : : "memory") 259.52 -#define wmb() __asm__ __volatile__ ( "" : : : "memory") 259.53 -#elif defined(__ia64__) 259.54 -/* FIXME */ 259.55 -#define mb() 259.56 -#define rmb() 259.57 -#define wmb() 259.58 -#else 259.59 -#error "Define barriers" 259.60 -#endif 259.61 - 259.62 -/* 259.63 - * INITIALIZATION FUNCTIONS 259.64 - */ 259.65 - 259.66 -/** 259.67 - * This function opens a handle to the hypervisor interface. This function can 259.68 - * be called multiple times within a single process. Multiple processes can 259.69 - * have an open hypervisor interface at the same time. 259.70 - * 259.71 - * Each call to this function should have a corresponding call to 259.72 - * xc_interface_close(). 259.73 - * 259.74 - * This function can fail if the caller does not have superuser permission or 259.75 - * if a Xen-enabled kernel is not currently running. 259.76 - * 259.77 - * @return a handle to the hypervisor interface or -1 on failure 259.78 - */ 259.79 -int xc_interface_open(void); 259.80 - 259.81 -/** 259.82 - * This function closes an open hypervisor interface. 259.83 - * 259.84 - * This function can fail if the handle does not represent an open interface or 259.85 - * if there were problems closing the interface. 259.86 - * 259.87 - * @parm xc_handle a handle to an open hypervisor interface 259.88 - * @return 0 on success, -1 otherwise. 259.89 - */ 259.90 -int xc_interface_close(int xc_handle); 259.91 - 259.92 -/* 259.93 - * DOMAIN DEBUGGING FUNCTIONS 259.94 - */ 259.95 - 259.96 -typedef struct xc_core_header { 259.97 - unsigned int xch_magic; 259.98 - unsigned int xch_nr_vcpus; 259.99 - unsigned int xch_nr_pages; 259.100 - unsigned int xch_ctxt_offset; 259.101 - unsigned int xch_index_offset; 259.102 - unsigned int xch_pages_offset; 259.103 -} xc_core_header_t; 259.104 - 259.105 - 259.106 -long xc_ptrace(enum __ptrace_request request, 259.107 - u32 domid, 259.108 - long addr, 259.109 - long data); 259.110 - 259.111 -long xc_ptrace_core(enum __ptrace_request request, 259.112 - u32 domid, 259.113 - long addr, 259.114 - long data); 259.115 - 259.116 -int xc_waitdomain(int domain, 259.117 - int *status, 259.118 - int options); 259.119 - 259.120 -int xc_waitdomain_core(int domain, 259.121 - int *status, 259.122 - int options); 259.123 - 259.124 -/* 259.125 - * DOMAIN MANAGEMENT FUNCTIONS 259.126 - */ 259.127 - 259.128 -typedef struct { 259.129 - u32 domid; 259.130 - u32 ssidref; 259.131 - unsigned int dying:1, crashed:1, shutdown:1, 259.132 - paused:1, blocked:1, running:1; 259.133 - unsigned int shutdown_reason; /* only meaningful if shutdown==1 */ 259.134 - unsigned long nr_pages; 259.135 - unsigned long shared_info_frame; 259.136 - u64 cpu_time; 259.137 - unsigned long max_memkb; 259.138 - unsigned int vcpus; 259.139 - s32 vcpu_to_cpu[MAX_VIRT_CPUS]; 259.140 - cpumap_t cpumap[MAX_VIRT_CPUS]; 259.141 -} xc_dominfo_t; 259.142 - 259.143 -typedef dom0_getdomaininfo_t xc_domaininfo_t; 259.144 -int xc_domain_create(int xc_handle, 259.145 - u32 ssidref, 259.146 - u32 *pdomid); 259.147 - 259.148 - 259.149 -int xc_domain_dumpcore(int xc_handle, 259.150 - u32 domid, 259.151 - const char *corename); 259.152 - 259.153 - 259.154 -/** 259.155 - * This function pauses a domain. A paused domain still exists in memory 259.156 - * however it does not receive any timeslices from the hypervisor. 259.157 - * 259.158 - * @parm xc_handle a handle to an open hypervisor interface 259.159 - * @parm domid the domain id to pause 259.160 - * @return 0 on success, -1 on failure. 259.161 - */ 259.162 -int xc_domain_pause(int xc_handle, 259.163 - u32 domid); 259.164 -/** 259.165 - * This function unpauses a domain. The domain should have been previously 259.166 - * paused. 259.167 - * 259.168 - * @parm xc_handle a handle to an open hypervisor interface 259.169 - * @parm domid the domain id to unpause 259.170 - * return 0 on success, -1 on failure 259.171 - */ 259.172 -int xc_domain_unpause(int xc_handle, 259.173 - u32 domid); 259.174 - 259.175 -/** 259.176 - * This function will destroy a domain. Destroying a domain removes the domain 259.177 - * completely from memory. This function should be called after sending the 259.178 - * domain a SHUTDOWN control message to free up the domain resources. 259.179 - * 259.180 - * @parm xc_handle a handle to an open hypervisor interface 259.181 - * @parm domid the domain id to destroy 259.182 - * @return 0 on success, -1 on failure 259.183 - */ 259.184 -int xc_domain_destroy(int xc_handle, 259.185 - u32 domid); 259.186 -int xc_domain_pincpu(int xc_handle, 259.187 - u32 domid, 259.188 - int vcpu, 259.189 - cpumap_t *cpumap); 259.190 -/** 259.191 - * This function will return information about one or more domains. It is 259.192 - * designed to iterate over the list of domains. If a single domain is 259.193 - * requested, this function will return the next domain in the list - if 259.194 - * one exists. It is, therefore, important in this case to make sure the 259.195 - * domain requested was the one returned. 259.196 - * 259.197 - * @parm xc_handle a handle to an open hypervisor interface 259.198 - * @parm first_domid the first domain to enumerate information from. Domains 259.199 - * are currently enumerate in order of creation. 259.200 - * @parm max_doms the number of elements in info 259.201 - * @parm info an array of max_doms size that will contain the information for 259.202 - * the enumerated domains. 259.203 - * @return the number of domains enumerated or -1 on error 259.204 - */ 259.205 -int xc_domain_getinfo(int xc_handle, 259.206 - u32 first_domid, 259.207 - unsigned int max_doms, 259.208 - xc_dominfo_t *info); 259.209 - 259.210 -/** 259.211 - * This function will return information about one or more domains, using a 259.212 - * single hypercall. The domain information will be stored into the supplied 259.213 - * array of xc_domaininfo_t structures. 259.214 - * 259.215 - * @parm xc_handle a handle to an open hypervisor interface 259.216 - * @parm first_domain the first domain to enumerate information from. 259.217 - * Domains are currently enumerate in order of creation. 259.218 - * @parm max_domains the number of elements in info 259.219 - * @parm info an array of max_doms size that will contain the information for 259.220 - * the enumerated domains. 259.221 - * @return the number of domains enumerated or -1 on error 259.222 - */ 259.223 -int xc_domain_getinfolist(int xc_handle, 259.224 - u32 first_domain, 259.225 - unsigned int max_domains, 259.226 - xc_domaininfo_t *info); 259.227 - 259.228 -/** 259.229 - * This function returns information about one domain. This information is 259.230 - * more detailed than the information from xc_domain_getinfo(). 259.231 - * 259.232 - * @parm xc_handle a handle to an open hypervisor interface 259.233 - * @parm domid the domain to get information from 259.234 - * @parm info a pointer to an xc_domaininfo_t to store the domain information 259.235 - * @parm ctxt a pointer to a structure to store the execution context of the 259.236 - * domain 259.237 - * @return 0 on success, -1 on failure 259.238 - */ 259.239 -int xc_domain_get_vcpu_context(int xc_handle, 259.240 - u32 domid, 259.241 - u32 vcpu, 259.242 - vcpu_guest_context_t *ctxt); 259.243 - 259.244 -int xc_domain_setcpuweight(int xc_handle, 259.245 - u32 domid, 259.246 - float weight); 259.247 -long long xc_domain_get_cpu_usage(int xc_handle, 259.248 - domid_t domid, 259.249 - int vcpu); 259.250 - 259.251 - 259.252 -typedef dom0_shadow_control_stats_t xc_shadow_control_stats_t; 259.253 -int xc_shadow_control(int xc_handle, 259.254 - u32 domid, 259.255 - unsigned int sop, 259.256 - unsigned long *dirty_bitmap, 259.257 - unsigned long pages, 259.258 - xc_shadow_control_stats_t *stats); 259.259 - 259.260 - 259.261 -#define XCFLAGS_VERBOSE 1 259.262 -#define XCFLAGS_LIVE 2 259.263 -#define XCFLAGS_DEBUG 4 259.264 -#define XCFLAGS_CONFIGURE 8 259.265 - 259.266 -struct XcIOContext; 259.267 - 259.268 -/** 259.269 - * This function will save a domain running Linux. 259.270 - * 259.271 - * @parm xc_handle a handle to an open hypervisor interface 259.272 - * @parm fd the file descriptor to save a domain to 259.273 - * @parm dom the id of the domain 259.274 - * @return 0 on success, -1 on failure 259.275 - */ 259.276 -int xc_linux_save(int xc_handle, int fd, u32 dom); 259.277 - 259.278 -/** 259.279 - * This function will restore a saved domain running Linux. 259.280 - * 259.281 - * @parm xc_handle a handle to an open hypervisor interface 259.282 - * @parm fd the file descriptor to restore a domain from 259.283 - * @parm dom the id of the domain 259.284 - * @parm nr_pfns the number of pages 259.285 - * @parm store_evtchn the store event channel for this domain to use 259.286 - * @parm store_mfn returned with the mfn of the store page 259.287 - * @return 0 on success, -1 on failure 259.288 - */ 259.289 -int xc_linux_restore(int xc_handle, int io_fd, u32 dom, unsigned long nr_pfns, 259.290 - unsigned int store_evtchn, unsigned long *store_mfn); 259.291 - 259.292 -int xc_linux_build(int xc_handle, 259.293 - u32 domid, 259.294 - const char *image_name, 259.295 - const char *ramdisk_name, 259.296 - const char *cmdline, 259.297 - unsigned int control_evtchn, 259.298 - unsigned long flags, 259.299 - unsigned int vcpus, 259.300 - unsigned int store_evtchn, 259.301 - unsigned long *store_mfn); 259.302 - 259.303 -struct mem_map; 259.304 -int xc_vmx_build(int xc_handle, 259.305 - u32 domid, 259.306 - int memsize, 259.307 - const char *image_name, 259.308 - struct mem_map *memmap, 259.309 - const char *ramdisk_name, 259.310 - const char *cmdline, 259.311 - unsigned int control_evtchn, 259.312 - unsigned long flags, 259.313 - unsigned int vcpus, 259.314 - unsigned int store_evtchn, 259.315 - unsigned long *store_mfn); 259.316 - 259.317 -int xc_bvtsched_global_set(int xc_handle, 259.318 - unsigned long ctx_allow); 259.319 - 259.320 -int xc_bvtsched_domain_set(int xc_handle, 259.321 - u32 domid, 259.322 - u32 mcuadv, 259.323 - int warpback, 259.324 - s32 warpvalue, 259.325 - long long warpl, 259.326 - long long warpu); 259.327 - 259.328 -int xc_bvtsched_global_get(int xc_handle, 259.329 - unsigned long *ctx_allow); 259.330 - 259.331 -int xc_bvtsched_domain_get(int xc_handle, 259.332 - u32 domid, 259.333 - u32 *mcuadv, 259.334 - int *warpback, 259.335 - s32 *warpvalue, 259.336 - long long *warpl, 259.337 - long long *warpu); 259.338 - 259.339 -int xc_sedf_domain_set(int xc_handle, 259.340 - u32 domid, 259.341 - u64 period, u64 slice, u64 latency, u16 extratime, u16 weight); 259.342 - 259.343 -int xc_sedf_domain_get(int xc_handle, 259.344 - u32 domid, 259.345 - u64* period, u64 *slice, u64 *latency, u16 *extratime, u16* weight); 259.346 - 259.347 -typedef evtchn_status_t xc_evtchn_status_t; 259.348 - 259.349 -/* 259.350 - * EVENT CHANNEL FUNCTIONS 259.351 - */ 259.352 - 259.353 -/** 259.354 - * This function allocates an unbound port. Ports are named endpoints used for 259.355 - * interdomain communication. This function is most useful in opening a 259.356 - * well-known port within a domain to receive events on. 259.357 - * 259.358 - * @parm xc_handle a handle to an open hypervisor interface 259.359 - * @parm dom the ID of the domain. This maybe DOMID_SELF 259.360 - * @parm port a pointer to a port. This is an in/out parameter. If *port is 259.361 - * 0, then a new port will be assigned, if port is > 0 then that 259.362 - * port is allocated if the port is unallocated. 259.363 - * @return 0 on success, -1 on failure 259.364 - */ 259.365 -int xc_evtchn_alloc_unbound(int xc_handle, 259.366 - u32 dom, 259.367 - int *port); 259.368 - 259.369 -/** 259.370 - * This function creates a pair of ports between two domains. A port can only 259.371 - * be bound once within a domain. 259.372 - * 259.373 - * @parm xc_handle a handle to an open hypervisor interface 259.374 - * @parm dom1 one of the two domains to connect. Can be DOMID_SELF. 259.375 - * @parm dom2 the other domain to connect. Can be DOMID_SELF. 259.376 - * @parm port1 an in/out parameter. If > 0, then try to connect *port. If 259.377 - * 0, then allocate a new port and store the port in *port. 259.378 - * @parm port2 the port connected on port2. This parameter behaves the same 259.379 - * way as port1. 259.380 - * @return 0 on success, -1 on error. 259.381 - */ 259.382 -int xc_evtchn_bind_interdomain(int xc_handle, 259.383 - u32 dom1, 259.384 - u32 dom2, 259.385 - int *port1, 259.386 - int *port2); 259.387 -int xc_evtchn_bind_virq(int xc_handle, 259.388 - int virq, 259.389 - int *port); 259.390 - 259.391 -/** 259.392 - * This function will close a single port on an event channel. 259.393 - * 259.394 - * @parm xc_handle a handle to an open hypervisor interface 259.395 - * @parm dom the domain that the port exists on. May be DOMID_SELF. 259.396 - * @parm port the port to close 259.397 - * @return 0 on success, -1 on error 259.398 - */ 259.399 -int xc_evtchn_close(int xc_handle, 259.400 - u32 dom, /* may be DOMID_SELF */ 259.401 - int port); 259.402 - 259.403 -/** 259.404 - * This function generates a notify event on a bound port. 259.405 - * 259.406 - * Notifies can be read within Linux by opening /dev/xen/evtchn and reading 259.407 - * a 16 bit value. The result will be the port the event occurred on. When 259.408 - * events occur, the port is masked until the 16 bit port value is written back 259.409 - * to the file. When /dev/xen/evtchn is opened, it has to be bound via an 259.410 - * ioctl to each port to listen on. The ioctl for binding is _IO('E', 2). The 259.411 - * parameter is the port to listen on. 259.412 - * 259.413 - * @parm xc_handle a handle to an open hypervisor interface 259.414 - * @parm local_port the port to generate the notify on 259.415 - * @return 0 on success, -1 on error 259.416 - */ 259.417 -int xc_evtchn_send(int xc_handle, 259.418 - int local_port); 259.419 -int xc_evtchn_status(int xc_handle, 259.420 - u32 dom, /* may be DOMID_SELF */ 259.421 - int port, 259.422 - xc_evtchn_status_t *status); 259.423 - 259.424 -int xc_physdev_pci_access_modify(int xc_handle, 259.425 - u32 domid, 259.426 - int bus, 259.427 - int dev, 259.428 - int func, 259.429 - int enable); 259.430 - 259.431 -int xc_readconsolering(int xc_handle, 259.432 - char **pbuffer, 259.433 - unsigned int *pnr_chars, 259.434 - int clear); 259.435 - 259.436 -typedef dom0_physinfo_t xc_physinfo_t; 259.437 -int xc_physinfo(int xc_handle, 259.438 - xc_physinfo_t *info); 259.439 - 259.440 -int xc_sched_id(int xc_handle, 259.441 - int *sched_id); 259.442 - 259.443 -int xc_domain_setmaxmem(int xc_handle, 259.444 - u32 domid, 259.445 - unsigned int max_memkb); 259.446 - 259.447 -int xc_domain_memory_increase_reservation(int xc_handle, 259.448 - u32 domid, 259.449 - unsigned int mem_kb); 259.450 - 259.451 -typedef dom0_perfc_desc_t xc_perfc_desc_t; 259.452 -/* IMPORTANT: The caller is responsible for mlock()'ing the @desc array. */ 259.453 -int xc_perfc_control(int xc_handle, 259.454 - u32 op, 259.455 - xc_perfc_desc_t *desc); 259.456 - 259.457 -/* read/write msr */ 259.458 -long long xc_msr_read(int xc_handle, int cpu_mask, int msr); 259.459 -int xc_msr_write(int xc_handle, int cpu_mask, int msr, unsigned int low, 259.460 - unsigned int high); 259.461 - 259.462 -/** 259.463 - * Memory maps a range within one domain to a local address range. Mappings 259.464 - * should be unmapped with munmap and should follow the same rules as mmap 259.465 - * regarding page alignment. Returns NULL on failure. 259.466 - * 259.467 - * In Linux, the ring queue for the control channel is accessible by mapping 259.468 - * the shared_info_frame (from xc_domain_getinfo()) + 2048. The structure 259.469 - * stored there is of type control_if_t. 259.470 - * 259.471 - * @parm xc_handle a handle on an open hypervisor interface 259.472 - * @parm dom the domain to map memory from 259.473 - * @parm size the amount of memory to map (in multiples of page size) 259.474 - * @parm prot same flag as in mmap(). 259.475 - * @parm mfn the frame address to map. 259.476 - */ 259.477 -void *xc_map_foreign_range(int xc_handle, u32 dom, 259.478 - int size, int prot, 259.479 - unsigned long mfn ); 259.480 - 259.481 -void *xc_map_foreign_batch(int xc_handle, u32 dom, int prot, 259.482 - unsigned long *arr, int num ); 259.483 - 259.484 -int xc_get_pfn_list(int xc_handle, u32 domid, unsigned long *pfn_buf, 259.485 - unsigned long max_pfns); 259.486 - 259.487 -int xc_ia64_get_pfn_list(int xc_handle, u32 domid, unsigned long *pfn_buf, 259.488 - unsigned int start_page, unsigned int nr_pages); 259.489 - 259.490 -/*\ 259.491 - * GRANT TABLE FUNCTIONS 259.492 -\*/ 259.493 - 259.494 -/** 259.495 - * This function opens a handle to the more restricted grant table hypervisor 259.496 - * interface. This may be used where the standard interface is not 259.497 - * available because the domain is not privileged. 259.498 - * This function can be called multiple times within a single process. 259.499 - * Multiple processes can have an open hypervisor interface at the same time. 259.500 - * 259.501 - * Each call to this function should have a corresponding call to 259.502 - * xc_grant_interface_close(). 259.503 - * 259.504 - * This function can fail if a Xen-enabled kernel is not currently running. 259.505 - * 259.506 - * @return a handle to the hypervisor grant table interface or -1 on failure 259.507 - */ 259.508 -int xc_grant_interface_open(void); 259.509 - 259.510 -/** 259.511 - * This function closes an open grant table hypervisor interface. 259.512 - * 259.513 - * This function can fail if the handle does not represent an open interface or 259.514 - * if there were problems closing the interface. 259.515 - * 259.516 - * @parm xc_handle a handle to an open grant table hypervisor interface 259.517 - * @return 0 on success, -1 otherwise. 259.518 - */ 259.519 -int xc_grant_interface_close(int xc_handle); 259.520 - 259.521 -int xc_gnttab_map_grant_ref(int xc_handle, 259.522 - u64 host_virt_addr, 259.523 - u32 dom, 259.524 - u16 ref, 259.525 - u16 flags, 259.526 - s16 *handle, 259.527 - u64 *dev_bus_addr); 259.528 - 259.529 -int xc_gnttab_unmap_grant_ref(int xc_handle, 259.530 - u64 host_virt_addr, 259.531 - u64 dev_bus_addr, 259.532 - u16 handle, 259.533 - s16 *status); 259.534 - 259.535 -int xc_gnttab_setup_table(int xc_handle, 259.536 - u32 dom, 259.537 - u16 nr_frames, 259.538 - s16 *status, 259.539 - unsigned long **frame_list); 259.540 - 259.541 -/* Grant debug builds only: */ 259.542 -int xc_gnttab_dump_table(int xc_handle, 259.543 - u32 dom, 259.544 - s16 *status); 259.545 - 259.546 -/* Get current total pages allocated to a domain. */ 259.547 -long xc_get_tot_pages(int xc_handle, u32 domid); 259.548 - 259.549 -/* Execute a privileged dom0 operation. */ 259.550 -int xc_dom0_op(int xc_handle, dom0_op_t *op); 259.551 - 259.552 -/* Initializes the store (for dom0) 259.553 - remote_port should be the remote end of a bound interdomain channel between 259.554 - the store and dom0. 259.555 - 259.556 - This function returns a shared frame that should be passed to 259.557 - xs_introduce_domain 259.558 - */ 259.559 -long xc_init_store(int xc_handle, int remote_port); 259.560 - 259.561 -#endif /* __XC_H__ */
260.1 --- a/tools/libxc/xc_core.c Wed Aug 24 16:16:52 2005 -0700 260.2 +++ b/tools/libxc/xc_core.c Thu Aug 25 11:18:47 2005 -0700 260.3 @@ -1,4 +1,4 @@ 260.4 -#include "xc_private.h" 260.5 +#include "xg_private.h" 260.6 #define ELFSIZE 32 260.7 #include "xc_elf.h" 260.8 #include <stdlib.h>
261.1 --- a/tools/libxc/xc_domain.c Wed Aug 24 16:16:52 2005 -0700 261.2 +++ b/tools/libxc/xc_domain.c Thu Aug 25 11:18:47 2005 -0700 261.3 @@ -266,7 +266,7 @@ int xc_domain_memory_increase_reservatio 261.4 int err; 261.5 unsigned int npages = mem_kb / (PAGE_SIZE/1024); 261.6 261.7 - err = do_dom_mem_op(xc_handle, MEMOP_increase_reservation, NULL, 261.8 + err = xc_dom_mem_op(xc_handle, MEMOP_increase_reservation, NULL, 261.9 npages, 0, domid); 261.10 if (err == npages) 261.11 return 0;
263.1 --- a/tools/libxc/xc_linux_build.c Wed Aug 24 16:16:52 2005 -0700 263.2 +++ b/tools/libxc/xc_linux_build.c Thu Aug 25 11:18:47 2005 -0700 263.3 @@ -2,7 +2,8 @@ 263.4 * xc_linux_build.c 263.5 */ 263.6 263.7 -#include "xc_private.h" 263.8 +#include "xg_private.h" 263.9 +#include <xenctrl.h> 263.10 263.11 #if defined(__i386__) 263.12 #define ELFSIZE 32 263.13 @@ -340,7 +341,7 @@ static int setup_guest(int xc_handle, 263.14 unsigned long count, i; 263.15 start_info_t *start_info; 263.16 shared_info_t *shared_info; 263.17 - mmu_t *mmu = NULL; 263.18 + xc_mmu_t *mmu = NULL; 263.19 int rc; 263.20 263.21 unsigned long nr_pt_pages; 263.22 @@ -490,7 +491,7 @@ static int setup_guest(int xc_handle, 263.23 } 263.24 } 263.25 263.26 - if ( (mmu = init_mmu_updates(xc_handle, dom)) == NULL ) 263.27 + if ( (mmu = xc_init_mmu_updates(xc_handle, dom)) == NULL ) 263.28 goto error_out; 263.29 263.30 /* setup page tables */ 263.31 @@ -520,9 +521,9 @@ static int setup_guest(int xc_handle, 263.32 page_array[physmap_pfn++]); 263.33 for ( count = 0; count < nr_pages; count++ ) 263.34 { 263.35 - if ( add_mmu_update(xc_handle, mmu, 263.36 - (page_array[count] << PAGE_SHIFT) | 263.37 - MMU_MACHPHYS_UPDATE, count) ) 263.38 + if ( xc_add_mmu_update(xc_handle, mmu, 263.39 + (page_array[count] << PAGE_SHIFT) | 263.40 + MMU_MACHPHYS_UPDATE, count) ) 263.41 { 263.42 munmap(physmap, PAGE_SIZE); 263.43 goto error_out; 263.44 @@ -602,7 +603,7 @@ static int setup_guest(int xc_handle, 263.45 munmap(shared_info, PAGE_SIZE); 263.46 263.47 /* Send the page update requests down to the hypervisor. */ 263.48 - if ( finish_mmu_updates(xc_handle, mmu) ) 263.49 + if ( xc_finish_mmu_updates(xc_handle, mmu) ) 263.50 goto error_out; 263.51 263.52 free(mmu); 263.53 @@ -676,7 +677,7 @@ int xc_linux_build(int xc_handle, 263.54 263.55 op.cmd = DOM0_GETDOMAININFO; 263.56 op.u.getdomaininfo.domain = (domid_t)domid; 263.57 - if ( (do_dom0_op(xc_handle, &op) < 0) || 263.58 + if ( (xc_dom0_op(xc_handle, &op) < 0) || 263.59 ((u16)op.u.getdomaininfo.domain != domid) ) 263.60 { 263.61 PERROR("Could not get info on domain"); 263.62 @@ -793,7 +794,7 @@ int xc_linux_build(int xc_handle, 263.63 launch_op.u.setdomaininfo.ctxt = ctxt; 263.64 263.65 launch_op.cmd = DOM0_SETDOMAININFO; 263.66 - rc = do_dom0_op(xc_handle, &launch_op); 263.67 + rc = xc_dom0_op(xc_handle, &launch_op); 263.68 263.69 return rc; 263.70
264.1 --- a/tools/libxc/xc_linux_restore.c Wed Aug 24 16:16:52 2005 -0700 264.2 +++ b/tools/libxc/xc_linux_restore.c Thu Aug 25 11:18:47 2005 -0700 264.3 @@ -6,7 +6,12 @@ 264.4 * Copyright (c) 2003, K A Fraser. 264.5 */ 264.6 264.7 -#include "xc_private.h" 264.8 +#include <stdlib.h> 264.9 +#include <unistd.h> 264.10 + 264.11 +#include "xg_private.h" 264.12 +#include <xenctrl.h> 264.13 + 264.14 #include <xen/linux/suspend.h> 264.15 264.16 #define MAX_BATCH_SIZE 1024 264.17 @@ -89,7 +94,7 @@ int xc_linux_restore(int xc_handle, int 264.18 264.19 char *region_base; 264.20 264.21 - mmu_t *mmu = NULL; 264.22 + xc_mmu_t *mmu = NULL; 264.23 264.24 /* used by debug verify code */ 264.25 unsigned long buf[PAGE_SIZE/sizeof(unsigned long)]; 264.26 @@ -132,7 +137,7 @@ int xc_linux_restore(int xc_handle, int 264.27 /* Get the domain's shared-info frame. */ 264.28 op.cmd = DOM0_GETDOMAININFO; 264.29 op.u.getdomaininfo.domain = (domid_t)dom; 264.30 - if (do_dom0_op(xc_handle, &op) < 0) { 264.31 + if (xc_dom0_op(xc_handle, &op) < 0) { 264.32 ERR("Could not get information on new domain"); 264.33 goto out; 264.34 } 264.35 @@ -158,7 +163,7 @@ int xc_linux_restore(int xc_handle, int 264.36 goto out; 264.37 } 264.38 264.39 - mmu = init_mmu_updates(xc_handle, dom); 264.40 + mmu = xc_init_mmu_updates(xc_handle, dom); 264.41 if (mmu == NULL) { 264.42 ERR("Could not initialise for MMU updates"); 264.43 goto out; 264.44 @@ -355,8 +360,9 @@ int xc_linux_restore(int xc_handle, int 264.45 } 264.46 } 264.47 264.48 - if ( add_mmu_update(xc_handle, mmu, 264.49 - (mfn<<PAGE_SHIFT) | MMU_MACHPHYS_UPDATE, pfn) ) 264.50 + if ( xc_add_mmu_update(xc_handle, mmu, 264.51 + (mfn<<PAGE_SHIFT) | MMU_MACHPHYS_UPDATE, 264.52 + pfn) ) 264.53 { 264.54 printf("machpys mfn=%ld pfn=%ld\n",mfn,pfn); 264.55 goto out; 264.56 @@ -370,7 +376,7 @@ int xc_linux_restore(int xc_handle, int 264.57 264.58 DPRINTF("Received all pages\n"); 264.59 264.60 - if ( finish_mmu_updates(xc_handle, mmu) ) 264.61 + if ( xc_finish_mmu_updates(xc_handle, mmu) ) 264.62 goto out; 264.63 264.64 /* 264.65 @@ -388,14 +394,14 @@ int xc_linux_restore(int xc_handle, int 264.66 pin[nr_pins].mfn = pfn_to_mfn_table[i]; 264.67 if ( ++nr_pins == MAX_PIN_BATCH ) 264.68 { 264.69 - if ( do_mmuext_op(xc_handle, pin, nr_pins, dom) < 0 ) 264.70 + if ( xc_mmuext_op(xc_handle, pin, nr_pins, dom) < 0 ) 264.71 goto out; 264.72 nr_pins = 0; 264.73 } 264.74 } 264.75 264.76 if ( (nr_pins != 0) && 264.77 - (do_mmuext_op(xc_handle, pin, nr_pins, dom) < 0) ) 264.78 + (xc_mmuext_op(xc_handle, pin, nr_pins, dom) < 0) ) 264.79 goto out; 264.80 264.81 DPRINTF("\b\b\b\b100%%\n"); 264.82 @@ -435,7 +441,7 @@ int xc_linux_restore(int xc_handle, int 264.83 264.84 if ( count > 0 ) 264.85 { 264.86 - if ( (rc = do_dom_mem_op( xc_handle, 264.87 + if ( (rc = xc_dom_mem_op( xc_handle, 264.88 MEMOP_decrease_reservation, 264.89 pfntab, count, 0, dom )) <0 ) 264.90 { 264.91 @@ -586,7 +592,7 @@ int xc_linux_restore(int xc_handle, int 264.92 op.u.setdomaininfo.domain = (domid_t)dom; 264.93 op.u.setdomaininfo.vcpu = 0; 264.94 op.u.setdomaininfo.ctxt = &ctxt; 264.95 - rc = do_dom0_op(xc_handle, &op); 264.96 + rc = xc_dom0_op(xc_handle, &op); 264.97 264.98 if ( rc != 0 ) 264.99 { 264.100 @@ -597,7 +603,7 @@ int xc_linux_restore(int xc_handle, int 264.101 DPRINTF("Domain ready to be unpaused\n"); 264.102 op.cmd = DOM0_UNPAUSEDOMAIN; 264.103 op.u.unpausedomain.domain = (domid_t)dom; 264.104 - rc = do_dom0_op(xc_handle, &op); 264.105 + rc = xc_dom0_op(xc_handle, &op); 264.106 if (rc == 0) { 264.107 /* Success: print the domain id. */ 264.108 DPRINTF("DOM=%u\n", dom);
265.1 --- a/tools/libxc/xc_linux_save.c Wed Aug 24 16:16:52 2005 -0700 265.2 +++ b/tools/libxc/xc_linux_save.c Thu Aug 25 11:18:47 2005 -0700 265.3 @@ -7,11 +7,15 @@ 265.4 */ 265.5 265.6 #include <inttypes.h> 265.7 +#include <time.h> 265.8 +#include <stdlib.h> 265.9 +#include <unistd.h> 265.10 #include <sys/time.h> 265.11 -#include "xc_private.h" 265.12 + 265.13 +#include "xg_private.h" 265.14 + 265.15 #include <xen/linux/suspend.h> 265.16 #include <xen/io/domain_controller.h> 265.17 -#include <time.h> 265.18 265.19 #define BATCH_SIZE 1024 /* 1024 pages (4MB) at a time */ 265.20 265.21 @@ -772,7 +776,7 @@ int xc_linux_save(int xc_handle, int io_ 265.22 goto out; 265.23 } 265.24 265.25 - if ( get_pfn_type_batch(xc_handle, dom, batch, pfn_type) ){ 265.26 + if ( xc_get_pfn_type_batch(xc_handle, dom, batch, pfn_type) ){ 265.27 ERR("get_pfn_type_batch failed"); 265.28 goto out; 265.29 }
266.1 --- a/tools/libxc/xc_load_aout9.c Wed Aug 24 16:16:52 2005 -0700 266.2 +++ b/tools/libxc/xc_load_aout9.c Thu Aug 25 11:18:47 2005 -0700 266.3 @@ -1,5 +1,5 @@ 266.4 266.5 -#include "xc_private.h" 266.6 +#include "xg_private.h" 266.7 #include "xc_aout9.h" 266.8 266.9 #if defined(__i386__)
267.1 --- a/tools/libxc/xc_load_bin.c Wed Aug 24 16:16:52 2005 -0700 267.2 +++ b/tools/libxc/xc_load_bin.c Thu Aug 25 11:18:47 2005 -0700 267.3 @@ -66,7 +66,7 @@ 267.4 * Free Software Foundation, Inc. 267.5 */ 267.6 267.7 -#include "xc_private.h" 267.8 +#include "xg_private.h" 267.9 #include <stdlib.h> 267.10 267.11 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
268.1 --- a/tools/libxc/xc_load_elf.c Wed Aug 24 16:16:52 2005 -0700 268.2 +++ b/tools/libxc/xc_load_elf.c Thu Aug 25 11:18:47 2005 -0700 268.3 @@ -2,7 +2,7 @@ 268.4 * xc_elf_load.c 268.5 */ 268.6 268.7 -#include "xc_private.h" 268.8 +#include "xg_private.h" 268.9 268.10 #if defined(__i386__) 268.11 #define ELFSIZE 32
269.1 --- a/tools/libxc/xc_private.c Wed Aug 24 16:16:52 2005 -0700 269.2 +++ b/tools/libxc/xc_private.c Thu Aug 25 11:18:47 2005 -0700 269.3 @@ -64,8 +64,8 @@ void *xc_map_foreign_range(int xc_handle 269.4 /*******************/ 269.5 269.6 /* NB: arr must be mlock'ed */ 269.7 -int get_pfn_type_batch(int xc_handle, 269.8 - u32 dom, int num, unsigned long *arr) 269.9 +int xc_get_pfn_type_batch(int xc_handle, 269.10 + u32 dom, int num, unsigned long *arr) 269.11 { 269.12 dom0_op_t op; 269.13 op.cmd = DOM0_GETPAGEFRAMEINFO2; 269.14 @@ -92,25 +92,40 @@ unsigned int get_pfn_type(int xc_handle, 269.15 return op.u.getpageframeinfo.type; 269.16 } 269.17 269.18 - 269.19 - 269.20 -/*******************/ 269.21 +int xc_mmuext_op( 269.22 + int xc_handle, 269.23 + struct mmuext_op *op, 269.24 + unsigned int nr_ops, 269.25 + domid_t dom) 269.26 +{ 269.27 + privcmd_hypercall_t hypercall; 269.28 + long ret = -EINVAL; 269.29 269.30 -int pin_table( 269.31 - int xc_handle, unsigned int type, unsigned long mfn, domid_t dom) 269.32 -{ 269.33 - struct mmuext_op op; 269.34 + hypercall.op = __HYPERVISOR_mmuext_op; 269.35 + hypercall.arg[0] = (unsigned long)op; 269.36 + hypercall.arg[1] = (unsigned long)nr_ops; 269.37 + hypercall.arg[2] = (unsigned long)0; 269.38 + hypercall.arg[3] = (unsigned long)dom; 269.39 269.40 - op.cmd = type; 269.41 - op.mfn = mfn; 269.42 + if ( mlock(op, nr_ops*sizeof(*op)) != 0 ) 269.43 + { 269.44 + PERROR("Could not lock memory for Xen hypercall"); 269.45 + goto out1; 269.46 + } 269.47 269.48 - if ( do_mmuext_op(xc_handle, &op, 1, dom) < 0 ) 269.49 - return 1; 269.50 + if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 ) 269.51 + { 269.52 + fprintf(stderr, "Dom_mem operation failed (rc=%ld errno=%d)-- need to" 269.53 + " rebuild the user-space tool set?\n",ret,errno); 269.54 + } 269.55 269.56 - return 0; 269.57 -} 269.58 + safe_munlock(op, nr_ops*sizeof(*op)); 269.59 269.60 -static int flush_mmu_updates(int xc_handle, mmu_t *mmu) 269.61 + out1: 269.62 + return ret; 269.63 +} 269.64 + 269.65 +static int flush_mmu_updates(int xc_handle, xc_mmu_t *mmu) 269.66 { 269.67 int err = 0; 269.68 privcmd_hypercall_t hypercall; 269.69 @@ -145,9 +160,9 @@ static int flush_mmu_updates(int xc_hand 269.70 return err; 269.71 } 269.72 269.73 -mmu_t *init_mmu_updates(int xc_handle, domid_t dom) 269.74 +xc_mmu_t *xc_init_mmu_updates(int xc_handle, domid_t dom) 269.75 { 269.76 - mmu_t *mmu = malloc(sizeof(mmu_t)); 269.77 + xc_mmu_t *mmu = malloc(sizeof(xc_mmu_t)); 269.78 if ( mmu == NULL ) 269.79 return mmu; 269.80 mmu->idx = 0; 269.81 @@ -155,8 +170,8 @@ mmu_t *init_mmu_updates(int xc_handle, d 269.82 return mmu; 269.83 } 269.84 269.85 -int add_mmu_update(int xc_handle, mmu_t *mmu, 269.86 - unsigned long ptr, unsigned long val) 269.87 +int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu, 269.88 + unsigned long ptr, unsigned long val) 269.89 { 269.90 mmu->updates[mmu->idx].ptr = ptr; 269.91 mmu->updates[mmu->idx].val = val; 269.92 @@ -167,11 +182,48 @@ int add_mmu_update(int xc_handle, mmu_t 269.93 return 0; 269.94 } 269.95 269.96 -int finish_mmu_updates(int xc_handle, mmu_t *mmu) 269.97 +int xc_finish_mmu_updates(int xc_handle, xc_mmu_t *mmu) 269.98 { 269.99 return flush_mmu_updates(xc_handle, mmu); 269.100 } 269.101 269.102 +int xc_dom_mem_op(int xc_handle, 269.103 + unsigned int memop, 269.104 + unsigned int *extent_list, 269.105 + unsigned int nr_extents, 269.106 + unsigned int extent_order, 269.107 + domid_t domid) 269.108 +{ 269.109 + privcmd_hypercall_t hypercall; 269.110 + long ret = -EINVAL; 269.111 + 269.112 + hypercall.op = __HYPERVISOR_dom_mem_op; 269.113 + hypercall.arg[0] = (unsigned long)memop; 269.114 + hypercall.arg[1] = (unsigned long)extent_list; 269.115 + hypercall.arg[2] = (unsigned long)nr_extents; 269.116 + hypercall.arg[3] = (unsigned long)extent_order; 269.117 + hypercall.arg[4] = (unsigned long)domid; 269.118 + 269.119 + if ( (extent_list != NULL) && 269.120 + (mlock(extent_list, nr_extents*sizeof(unsigned long)) != 0) ) 269.121 + { 269.122 + PERROR("Could not lock memory for Xen hypercall"); 269.123 + goto out1; 269.124 + } 269.125 + 269.126 + if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 ) 269.127 + { 269.128 + fprintf(stderr, "Dom_mem operation failed (rc=%ld errno=%d)-- need to" 269.129 + " rebuild the user-space tool set?\n",ret,errno); 269.130 + } 269.131 + 269.132 + if ( extent_list != NULL ) 269.133 + safe_munlock(extent_list, nr_extents*sizeof(unsigned long)); 269.134 + 269.135 + out1: 269.136 + return ret; 269.137 +} 269.138 + 269.139 269.140 long long xc_domain_get_cpu_usage( int xc_handle, domid_t domid, int vcpu ) 269.141 { 269.142 @@ -190,19 +242,6 @@ long long xc_domain_get_cpu_usage( int x 269.143 } 269.144 269.145 269.146 -/* This is shared between save and restore, and may generally be useful. */ 269.147 -unsigned long csum_page (void * page) 269.148 -{ 269.149 - int i; 269.150 - unsigned long *p = page; 269.151 - unsigned long long sum=0; 269.152 - 269.153 - for ( i = 0; i < (PAGE_SIZE/sizeof(unsigned long)); i++ ) 269.154 - sum += p[i]; 269.155 - 269.156 - return sum ^ (sum>>32); 269.157 -} 269.158 - 269.159 unsigned long xc_get_m2p_start_mfn ( int xc_handle ) 269.160 { 269.161 unsigned long mfn; 269.162 @@ -332,53 +371,6 @@ unsigned long xc_get_filesz(int fd) 269.163 return sz; 269.164 } 269.165 269.166 -char *xc_read_kernel_image(const char *filename, unsigned long *size) 269.167 -{ 269.168 - int kernel_fd = -1; 269.169 - gzFile kernel_gfd = NULL; 269.170 - char *image = NULL; 269.171 - unsigned int bytes; 269.172 - 269.173 - if ( (kernel_fd = open(filename, O_RDONLY)) < 0 ) 269.174 - { 269.175 - PERROR("Could not open kernel image"); 269.176 - goto out; 269.177 - } 269.178 - 269.179 - if ( (*size = xc_get_filesz(kernel_fd)) == 0 ) 269.180 - { 269.181 - PERROR("Could not read kernel image"); 269.182 - goto out; 269.183 - } 269.184 - 269.185 - if ( (kernel_gfd = gzdopen(kernel_fd, "rb")) == NULL ) 269.186 - { 269.187 - PERROR("Could not allocate decompression state for state file"); 269.188 - goto out; 269.189 - } 269.190 - 269.191 - if ( (image = malloc(*size)) == NULL ) 269.192 - { 269.193 - PERROR("Could not allocate memory for kernel image"); 269.194 - goto out; 269.195 - } 269.196 - 269.197 - if ( (bytes = gzread(kernel_gfd, image, *size)) != *size ) 269.198 - { 269.199 - PERROR("Error reading kernel image, could not" 269.200 - " read the whole image (%d != %ld).", bytes, *size); 269.201 - free(image); 269.202 - image = NULL; 269.203 - } 269.204 - 269.205 - out: 269.206 - if ( kernel_gfd != NULL ) 269.207 - gzclose(kernel_gfd); 269.208 - else if ( kernel_fd >= 0 ) 269.209 - close(kernel_fd); 269.210 - return image; 269.211 -} 269.212 - 269.213 void xc_map_memcpy(unsigned long dst, char *src, unsigned long size, 269.214 int xch, u32 dom, unsigned long *parray, 269.215 unsigned long vstart)
270.1 --- a/tools/libxc/xc_private.h Wed Aug 24 16:16:52 2005 -0700 270.2 +++ b/tools/libxc/xc_private.h Thu Aug 25 11:18:47 2005 -0700 270.3 @@ -1,124 +1,26 @@ 270.4 270.5 -#ifndef __XC_PRIVATE_H__ 270.6 -#define __XC_PRIVATE_H__ 270.7 +#ifndef XC_PRIVATE_H 270.8 +#define XC_PRIVATE_H 270.9 270.10 #include <unistd.h> 270.11 #include <stdio.h> 270.12 #include <errno.h> 270.13 #include <fcntl.h> 270.14 +#include <string.h> 270.15 #include <sys/mman.h> 270.16 #include <sys/types.h> 270.17 #include <sys/stat.h> 270.18 #include <stdlib.h> 270.19 #include <sys/ioctl.h> 270.20 -#include <errno.h> 270.21 -#include <string.h> 270.22 270.23 -#include "xc.h" 270.24 +#include "xenctrl.h" 270.25 270.26 #include <xen/linux/privcmd.h> 270.27 270.28 -#define _PAGE_PRESENT 0x001 270.29 -#define _PAGE_RW 0x002 270.30 -#define _PAGE_USER 0x004 270.31 -#define _PAGE_PWT 0x008 270.32 -#define _PAGE_PCD 0x010 270.33 -#define _PAGE_ACCESSED 0x020 270.34 -#define _PAGE_DIRTY 0x040 270.35 -#define _PAGE_PAT 0x080 270.36 -#define _PAGE_PSE 0x080 270.37 -#define _PAGE_GLOBAL 0x100 270.38 - 270.39 -#if defined(__i386__) 270.40 -#define L1_PAGETABLE_SHIFT 12 270.41 -#define L2_PAGETABLE_SHIFT 22 270.42 -#define L1_PAGETABLE_SHIFT_PAE 12 270.43 -#define L2_PAGETABLE_SHIFT_PAE 21 270.44 -#define L3_PAGETABLE_SHIFT_PAE 30 270.45 -#elif defined(__x86_64__) 270.46 -#define L1_PAGETABLE_SHIFT 12 270.47 -#define L2_PAGETABLE_SHIFT 21 270.48 -#define L3_PAGETABLE_SHIFT 30 270.49 -#define L4_PAGETABLE_SHIFT 39 270.50 -#endif 270.51 - 270.52 -#if defined(__i386__) 270.53 -#define ENTRIES_PER_L1_PAGETABLE 1024 270.54 -#define ENTRIES_PER_L2_PAGETABLE 1024 270.55 -#define L1_PAGETABLE_ENTRIES_PAE 512 270.56 -#define L2_PAGETABLE_ENTRIES_PAE 512 270.57 -#define L3_PAGETABLE_ENTRIES_PAE 4 270.58 -#elif defined(__x86_64__) 270.59 -#define L1_PAGETABLE_ENTRIES 512 270.60 -#define L2_PAGETABLE_ENTRIES 512 270.61 -#define L3_PAGETABLE_ENTRIES 512 270.62 -#define L4_PAGETABLE_ENTRIES 512 270.63 -#endif 270.64 - 270.65 #define PAGE_SHIFT XC_PAGE_SHIFT 270.66 #define PAGE_SIZE (1UL << PAGE_SHIFT) 270.67 #define PAGE_MASK (~(PAGE_SIZE-1)) 270.68 270.69 -typedef u32 l1_pgentry_32_t; 270.70 -typedef u32 l2_pgentry_32_t; 270.71 -typedef u64 l1_pgentry_64_t; 270.72 -typedef u64 l2_pgentry_64_t; 270.73 -typedef u64 l3_pgentry_64_t; 270.74 -typedef unsigned long l1_pgentry_t; 270.75 -typedef unsigned long l2_pgentry_t; 270.76 -#if defined(__x86_64__) 270.77 -typedef unsigned long l3_pgentry_t; 270.78 -typedef unsigned long l4_pgentry_t; 270.79 -#endif 270.80 - 270.81 -#if defined(__i386__) 270.82 -#define l1_table_offset(_a) \ 270.83 - (((_a) >> L1_PAGETABLE_SHIFT) & (ENTRIES_PER_L1_PAGETABLE - 1)) 270.84 -#define l2_table_offset(_a) \ 270.85 - ((_a) >> L2_PAGETABLE_SHIFT) 270.86 -#define l1_table_offset_pae(_a) \ 270.87 - (((_a) >> L1_PAGETABLE_SHIFT_PAE) & (L1_PAGETABLE_ENTRIES_PAE - 1)) 270.88 -#define l2_table_offset_pae(_a) \ 270.89 - (((_a) >> L2_PAGETABLE_SHIFT_PAE) & (L2_PAGETABLE_ENTRIES_PAE - 1)) 270.90 -#define l3_table_offset_pae(_a) \ 270.91 - (((_a) >> L3_PAGETABLE_SHIFT_PAE) & (L3_PAGETABLE_ENTRIES_PAE - 1)) 270.92 -#elif defined(__x86_64__) 270.93 -#define l1_table_offset(_a) \ 270.94 - (((_a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1)) 270.95 -#define l2_table_offset(_a) \ 270.96 - (((_a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1)) 270.97 -#define l3_table_offset(_a) \ 270.98 - (((_a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1)) 270.99 -#define l4_table_offset(_a) \ 270.100 - (((_a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1)) 270.101 -#endif 270.102 - 270.103 -struct domain_setup_info 270.104 -{ 270.105 - unsigned long v_start; 270.106 - unsigned long v_end; 270.107 - unsigned long v_kernstart; 270.108 - unsigned long v_kernend; 270.109 - unsigned long v_kernentry; 270.110 - 270.111 - unsigned int load_symtab; 270.112 - unsigned int pae_kernel; 270.113 - unsigned long symtab_addr; 270.114 - unsigned long symtab_len; 270.115 -}; 270.116 - 270.117 -typedef int (*parseimagefunc)(char *image, unsigned long image_size, 270.118 - struct domain_setup_info *dsi); 270.119 -typedef int (*loadimagefunc)(char *image, unsigned long image_size, int xch, 270.120 - u32 dom, unsigned long *parray, 270.121 - struct domain_setup_info *dsi); 270.122 - 270.123 -struct load_funcs 270.124 -{ 270.125 - parseimagefunc parseimage; 270.126 - loadimagefunc loadimage; 270.127 -}; 270.128 - 270.129 #define ERROR(_m, _a...) \ 270.130 do { \ 270.131 int __saved_errno = errno; \ 270.132 @@ -186,97 +88,6 @@ static inline int do_dom0_op(int xc_hand 270.133 return ret; 270.134 } 270.135 270.136 -static inline int do_dom_mem_op(int xc_handle, 270.137 - unsigned int memop, 270.138 - unsigned int *extent_list, 270.139 - unsigned int nr_extents, 270.140 - unsigned int extent_order, 270.141 - domid_t domid) 270.142 -{ 270.143 - privcmd_hypercall_t hypercall; 270.144 - long ret = -EINVAL; 270.145 - 270.146 - hypercall.op = __HYPERVISOR_dom_mem_op; 270.147 - hypercall.arg[0] = (unsigned long)memop; 270.148 - hypercall.arg[1] = (unsigned long)extent_list; 270.149 - hypercall.arg[2] = (unsigned long)nr_extents; 270.150 - hypercall.arg[3] = (unsigned long)extent_order; 270.151 - hypercall.arg[4] = (unsigned long)domid; 270.152 - 270.153 - if ( (extent_list != NULL) && 270.154 - (mlock(extent_list, nr_extents*sizeof(unsigned long)) != 0) ) 270.155 - { 270.156 - PERROR("Could not lock memory for Xen hypercall"); 270.157 - goto out1; 270.158 - } 270.159 - 270.160 - if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 ) 270.161 - { 270.162 - fprintf(stderr, "Dom_mem operation failed (rc=%ld errno=%d)-- need to" 270.163 - " rebuild the user-space tool set?\n",ret,errno); 270.164 - } 270.165 - 270.166 - if ( extent_list != NULL ) 270.167 - safe_munlock(extent_list, nr_extents*sizeof(unsigned long)); 270.168 - 270.169 - out1: 270.170 - return ret; 270.171 -} 270.172 - 270.173 -static inline int do_mmuext_op( 270.174 - int xc_handle, 270.175 - struct mmuext_op *op, 270.176 - unsigned int nr_ops, 270.177 - domid_t dom) 270.178 -{ 270.179 - privcmd_hypercall_t hypercall; 270.180 - long ret = -EINVAL; 270.181 - 270.182 - hypercall.op = __HYPERVISOR_mmuext_op; 270.183 - hypercall.arg[0] = (unsigned long)op; 270.184 - hypercall.arg[1] = (unsigned long)nr_ops; 270.185 - hypercall.arg[2] = (unsigned long)0; 270.186 - hypercall.arg[3] = (unsigned long)dom; 270.187 - 270.188 - if ( mlock(op, nr_ops*sizeof(*op)) != 0 ) 270.189 - { 270.190 - PERROR("Could not lock memory for Xen hypercall"); 270.191 - goto out1; 270.192 - } 270.193 - 270.194 - if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 ) 270.195 - { 270.196 - fprintf(stderr, "Dom_mem operation failed (rc=%ld errno=%d)-- need to" 270.197 - " rebuild the user-space tool set?\n",ret,errno); 270.198 - } 270.199 - 270.200 - safe_munlock(op, nr_ops*sizeof(*op)); 270.201 - 270.202 - out1: 270.203 - return ret; 270.204 -} 270.205 - 270.206 - 270.207 -/* 270.208 - * PFN mapping. 270.209 - */ 270.210 -int get_pfn_type_batch(int xc_handle, u32 dom, int num, unsigned long *arr); 270.211 -unsigned long csum_page (void * page); 270.212 - 270.213 -/* 270.214 - * MMU updates. 270.215 - */ 270.216 -#define MAX_MMU_UPDATES 1024 270.217 -typedef struct { 270.218 - mmu_update_t updates[MAX_MMU_UPDATES]; 270.219 - int idx; 270.220 - domid_t subject; 270.221 -} mmu_t; 270.222 -mmu_t *init_mmu_updates(int xc_handle, domid_t dom); 270.223 -int add_mmu_update(int xc_handle, mmu_t *mmu, 270.224 - unsigned long ptr, unsigned long val); 270.225 -int finish_mmu_updates(int xc_handle, mmu_t *mmu); 270.226 - 270.227 270.228 /* 270.229 * ioctl-based mfn mapping interface 270.230 @@ -296,38 +107,4 @@ typedef struct privcmd_mmap { 270.231 } privcmd_mmap_t; 270.232 */ 270.233 270.234 -#define mfn_mapper_queue_size 128 270.235 - 270.236 -typedef struct mfn_mapper { 270.237 - int xc_handle; 270.238 - int size; 270.239 - int prot; 270.240 - int error; 270.241 - int max_queue_size; 270.242 - void * addr; 270.243 - privcmd_mmap_t ioctl; 270.244 - 270.245 -} mfn_mapper_t; 270.246 - 270.247 -unsigned long xc_get_m2p_start_mfn (int xc_handle); 270.248 - 270.249 -int xc_copy_to_domain_page(int xc_handle, u32 domid, 270.250 - unsigned long dst_pfn, void *src_page); 270.251 - 270.252 -unsigned long xc_get_filesz(int fd); 270.253 - 270.254 -char *xc_read_kernel_image(const char *filename, unsigned long *size); 270.255 - 270.256 -void xc_map_memcpy(unsigned long dst, char *src, unsigned long size, 270.257 - int xch, u32 dom, unsigned long *parray, 270.258 - unsigned long vstart); 270.259 - 270.260 -int pin_table(int xc_handle, unsigned int type, unsigned long mfn, 270.261 - domid_t dom); 270.262 - 270.263 -/* image loading */ 270.264 -int probe_elf(char *image, unsigned long image_size, struct load_funcs *funcs); 270.265 -int probe_bin(char *image, unsigned long image_size, struct load_funcs *funcs); 270.266 -int probe_aout9(char *image, unsigned long image_size, struct load_funcs *funcs); 270.267 - 270.268 #endif /* __XC_PRIVATE_H__ */
272.1 --- a/tools/libxc/xc_vmx_build.c Wed Aug 24 16:16:52 2005 -0700 272.2 +++ b/tools/libxc/xc_vmx_build.c Thu Aug 25 11:18:47 2005 -0700 272.3 @@ -3,7 +3,7 @@ 272.4 */ 272.5 272.6 #include <stddef.h> 272.7 -#include "xc_private.h" 272.8 +#include "xg_private.h" 272.9 #define ELFSIZE 32 272.10 #include "xc_elf.h" 272.11 #include <stdlib.h> 272.12 @@ -243,7 +243,7 @@ static int setup_guest(int xc_handle, 272.13 shared_info_t *shared_info; 272.14 struct linux_boot_params * boot_paramsp; 272.15 __u16 * boot_gdtp; 272.16 - mmu_t *mmu = NULL; 272.17 + xc_mmu_t *mmu = NULL; 272.18 int rc; 272.19 272.20 unsigned long nr_pt_pages; 272.21 @@ -358,7 +358,7 @@ static int setup_guest(int xc_handle, 272.22 } 272.23 } 272.24 272.25 - if ( (mmu = init_mmu_updates(xc_handle, dom)) == NULL ) 272.26 + if ( (mmu = xc_init_mmu_updates(xc_handle, dom)) == NULL ) 272.27 goto error_out; 272.28 272.29 #ifdef __i386__ 272.30 @@ -459,9 +459,9 @@ static int setup_guest(int xc_handle, 272.31 /* Write the machine->phys table entries. */ 272.32 for ( count = 0; count < nr_pages; count++ ) 272.33 { 272.34 - if ( add_mmu_update(xc_handle, mmu, 272.35 - (page_array[count] << PAGE_SHIFT) | 272.36 - MMU_MACHPHYS_UPDATE, count) ) 272.37 + if ( xc_add_mmu_update(xc_handle, mmu, 272.38 + (page_array[count] << PAGE_SHIFT) | 272.39 + MMU_MACHPHYS_UPDATE, count) ) 272.40 goto error_out; 272.41 } 272.42 272.43 @@ -587,7 +587,7 @@ static int setup_guest(int xc_handle, 272.44 #endif 272.45 272.46 /* Send the page update requests down to the hypervisor. */ 272.47 - if ( finish_mmu_updates(xc_handle, mmu) ) 272.48 + if ( xc_finish_mmu_updates(xc_handle, mmu) ) 272.49 goto error_out; 272.50 272.51 free(mmu); 272.52 @@ -708,7 +708,7 @@ int xc_vmx_build(int xc_handle, 272.53 272.54 op.cmd = DOM0_GETDOMAININFO; 272.55 op.u.getdomaininfo.domain = (domid_t)domid; 272.56 - if ( (do_dom0_op(xc_handle, &op) < 0) || 272.57 + if ( (xc_dom0_op(xc_handle, &op) < 0) || 272.58 ((u16)op.u.getdomaininfo.domain != domid) ) 272.59 { 272.60 PERROR("Could not get info on domain"); 272.61 @@ -789,7 +789,7 @@ int xc_vmx_build(int xc_handle, 272.62 launch_op.u.setdomaininfo.ctxt = ctxt; 272.63 272.64 launch_op.cmd = DOM0_SETDOMAININFO; 272.65 - rc = do_dom0_op(xc_handle, &launch_op); 272.66 + rc = xc_dom0_op(xc_handle, &launch_op); 272.67 272.68 return rc; 272.69
273.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 273.2 +++ b/tools/libxc/xenctrl.h Thu Aug 25 11:18:47 2005 -0700 273.3 @@ -0,0 +1,526 @@ 273.4 +/****************************************************************************** 273.5 + * xenctrl.h 273.6 + * 273.7 + * A library for low-level access to the Xen control interfaces. 273.8 + * 273.9 + * Copyright (c) 2003-2004, K A Fraser. 273.10 + */ 273.11 + 273.12 +#ifndef XENCTRL_H 273.13 +#define XENCTRL_H 273.14 + 273.15 +#include <stdint.h> 273.16 + 273.17 +typedef uint8_t u8; 273.18 +typedef uint16_t u16; 273.19 +typedef uint32_t u32; 273.20 +typedef uint64_t u64; 273.21 +typedef int8_t s8; 273.22 +typedef int16_t s16; 273.23 +typedef int32_t s32; 273.24 +typedef int64_t s64; 273.25 + 273.26 +#include <sys/ptrace.h> 273.27 +#include <xen/xen.h> 273.28 +#include <xen/dom0_ops.h> 273.29 +#include <xen/event_channel.h> 273.30 +#include <xen/sched_ctl.h> 273.31 +#include <xen/acm.h> 273.32 + 273.33 +#ifdef __ia64__ 273.34 +#define XC_PAGE_SHIFT 14 273.35 +#else 273.36 +#define XC_PAGE_SHIFT 12 273.37 +#endif 273.38 +#define XC_PAGE_SIZE (1UL << XC_PAGE_SHIFT) 273.39 +#define XC_PAGE_MASK (~(XC_PAGE_SIZE-1)) 273.40 + 273.41 +/* 273.42 + * DEFINITIONS FOR CPU BARRIERS 273.43 + */ 273.44 + 273.45 +#if defined(__i386__) 273.46 +#define mb() __asm__ __volatile__ ( "lock; addl $0,0(%%esp)" : : : "memory" ) 273.47 +#define rmb() __asm__ __volatile__ ( "lock; addl $0,0(%%esp)" : : : "memory" ) 273.48 +#define wmb() __asm__ __volatile__ ( "" : : : "memory") 273.49 +#elif defined(__x86_64__) 273.50 +#define mb() __asm__ __volatile__ ( "mfence" : : : "memory") 273.51 +#define rmb() __asm__ __volatile__ ( "lfence" : : : "memory") 273.52 +#define wmb() __asm__ __volatile__ ( "" : : : "memory") 273.53 +#elif defined(__ia64__) 273.54 +/* FIXME */ 273.55 +#define mb() 273.56 +#define rmb() 273.57 +#define wmb() 273.58 +#else 273.59 +#error "Define barriers" 273.60 +#endif 273.61 + 273.62 +/* 273.63 + * INITIALIZATION FUNCTIONS 273.64 + */ 273.65 + 273.66 +/** 273.67 + * This function opens a handle to the hypervisor interface. This function can 273.68 + * be called multiple times within a single process. Multiple processes can 273.69 + * have an open hypervisor interface at the same time. 273.70 + * 273.71 + * Each call to this function should have a corresponding call to 273.72 + * xc_interface_close(). 273.73 + * 273.74 + * This function can fail if the caller does not have superuser permission or 273.75 + * if a Xen-enabled kernel is not currently running. 273.76 + * 273.77 + * @return a handle to the hypervisor interface or -1 on failure 273.78 + */ 273.79 +int xc_interface_open(void); 273.80 + 273.81 +/** 273.82 + * This function closes an open hypervisor interface. 273.83 + * 273.84 + * This function can fail if the handle does not represent an open interface or 273.85 + * if there were problems closing the interface. 273.86 + * 273.87 + * @parm xc_handle a handle to an open hypervisor interface 273.88 + * @return 0 on success, -1 otherwise. 273.89 + */ 273.90 +int xc_interface_close(int xc_handle); 273.91 + 273.92 +/* 273.93 + * DOMAIN DEBUGGING FUNCTIONS 273.94 + */ 273.95 + 273.96 +typedef struct xc_core_header { 273.97 + unsigned int xch_magic; 273.98 + unsigned int xch_nr_vcpus; 273.99 + unsigned int xch_nr_pages; 273.100 + unsigned int xch_ctxt_offset; 273.101 + unsigned int xch_index_offset; 273.102 + unsigned int xch_pages_offset; 273.103 +} xc_core_header_t; 273.104 + 273.105 + 273.106 +long xc_ptrace(enum __ptrace_request request, 273.107 + u32 domid, 273.108 + long addr, 273.109 + long data); 273.110 + 273.111 +long xc_ptrace_core(enum __ptrace_request request, 273.112 + u32 domid, 273.113 + long addr, 273.114 + long data); 273.115 + 273.116 +int xc_waitdomain(int domain, 273.117 + int *status, 273.118 + int options); 273.119 + 273.120 +int xc_waitdomain_core(int domain, 273.121 + int *status, 273.122 + int options); 273.123 + 273.124 +/* 273.125 + * DOMAIN MANAGEMENT FUNCTIONS 273.126 + */ 273.127 + 273.128 +typedef struct { 273.129 + u32 domid; 273.130 + u32 ssidref; 273.131 + unsigned int dying:1, crashed:1, shutdown:1, 273.132 + paused:1, blocked:1, running:1; 273.133 + unsigned int shutdown_reason; /* only meaningful if shutdown==1 */ 273.134 + unsigned long nr_pages; 273.135 + unsigned long shared_info_frame; 273.136 + u64 cpu_time; 273.137 + unsigned long max_memkb; 273.138 + unsigned int vcpus; 273.139 + s32 vcpu_to_cpu[MAX_VIRT_CPUS]; 273.140 + cpumap_t cpumap[MAX_VIRT_CPUS]; 273.141 +} xc_dominfo_t; 273.142 + 273.143 +typedef dom0_getdomaininfo_t xc_domaininfo_t; 273.144 +int xc_domain_create(int xc_handle, 273.145 + u32 ssidref, 273.146 + u32 *pdomid); 273.147 + 273.148 + 273.149 +int xc_domain_dumpcore(int xc_handle, 273.150 + u32 domid, 273.151 + const char *corename); 273.152 + 273.153 + 273.154 +/** 273.155 + * This function pauses a domain. A paused domain still exists in memory 273.156 + * however it does not receive any timeslices from the hypervisor. 273.157 + * 273.158 + * @parm xc_handle a handle to an open hypervisor interface 273.159 + * @parm domid the domain id to pause 273.160 + * @return 0 on success, -1 on failure. 273.161 + */ 273.162 +int xc_domain_pause(int xc_handle, 273.163 + u32 domid); 273.164 +/** 273.165 + * This function unpauses a domain. The domain should have been previously 273.166 + * paused. 273.167 + * 273.168 + * @parm xc_handle a handle to an open hypervisor interface 273.169 + * @parm domid the domain id to unpause 273.170 + * return 0 on success, -1 on failure 273.171 + */ 273.172 +int xc_domain_unpause(int xc_handle, 273.173 + u32 domid); 273.174 + 273.175 +/** 273.176 + * This function will destroy a domain. Destroying a domain removes the domain 273.177 + * completely from memory. This function should be called after sending the 273.178 + * domain a SHUTDOWN control message to free up the domain resources. 273.179 + * 273.180 + * @parm xc_handle a handle to an open hypervisor interface 273.181 + * @parm domid the domain id to destroy 273.182 + * @return 0 on success, -1 on failure 273.183 + */ 273.184 +int xc_domain_destroy(int xc_handle, 273.185 + u32 domid); 273.186 +int xc_domain_pincpu(int xc_handle, 273.187 + u32 domid, 273.188 + int vcpu, 273.189 + cpumap_t *cpumap); 273.190 +/** 273.191 + * This function will return information about one or more domains. It is 273.192 + * designed to iterate over the list of domains. If a single domain is 273.193 + * requested, this function will return the next domain in the list - if 273.194 + * one exists. It is, therefore, important in this case to make sure the 273.195 + * domain requested was the one returned. 273.196 + * 273.197 + * @parm xc_handle a handle to an open hypervisor interface 273.198 + * @parm first_domid the first domain to enumerate information from. Domains 273.199 + * are currently enumerate in order of creation. 273.200 + * @parm max_doms the number of elements in info 273.201 + * @parm info an array of max_doms size that will contain the information for 273.202 + * the enumerated domains. 273.203 + * @return the number of domains enumerated or -1 on error 273.204 + */ 273.205 +int xc_domain_getinfo(int xc_handle, 273.206 + u32 first_domid, 273.207 + unsigned int max_doms, 273.208 + xc_dominfo_t *info); 273.209 + 273.210 +/** 273.211 + * This function will return information about one or more domains, using a 273.212 + * single hypercall. The domain information will be stored into the supplied 273.213 + * array of xc_domaininfo_t structures. 273.214 + * 273.215 + * @parm xc_handle a handle to an open hypervisor interface 273.216 + * @parm first_domain the first domain to enumerate information from. 273.217 + * Domains are currently enumerate in order of creation. 273.218 + * @parm max_domains the number of elements in info 273.219 + * @parm info an array of max_doms size that will contain the information for 273.220 + * the enumerated domains. 273.221 + * @return the number of domains enumerated or -1 on error 273.222 + */ 273.223 +int xc_domain_getinfolist(int xc_handle, 273.224 + u32 first_domain, 273.225 + unsigned int max_domains, 273.226 + xc_domaininfo_t *info); 273.227 + 273.228 +/** 273.229 + * This function returns information about one domain. This information is 273.230 + * more detailed than the information from xc_domain_getinfo(). 273.231 + * 273.232 + * @parm xc_handle a handle to an open hypervisor interface 273.233 + * @parm domid the domain to get information from 273.234 + * @parm info a pointer to an xc_domaininfo_t to store the domain information 273.235 + * @parm ctxt a pointer to a structure to store the execution context of the 273.236 + * domain 273.237 + * @return 0 on success, -1 on failure 273.238 + */ 273.239 +int xc_domain_get_vcpu_context(int xc_handle, 273.240 + u32 domid, 273.241 + u32 vcpu, 273.242 + vcpu_guest_context_t *ctxt); 273.243 + 273.244 +int xc_domain_setcpuweight(int xc_handle, 273.245 + u32 domid, 273.246 + float weight); 273.247 +long long xc_domain_get_cpu_usage(int xc_handle, 273.248 + domid_t domid, 273.249 + int vcpu); 273.250 + 273.251 + 273.252 +typedef dom0_shadow_control_stats_t xc_shadow_control_stats_t; 273.253 +int xc_shadow_control(int xc_handle, 273.254 + u32 domid, 273.255 + unsigned int sop, 273.256 + unsigned long *dirty_bitmap, 273.257 + unsigned long pages, 273.258 + xc_shadow_control_stats_t *stats); 273.259 + 273.260 +int xc_bvtsched_global_set(int xc_handle, 273.261 + unsigned long ctx_allow); 273.262 + 273.263 +int xc_bvtsched_domain_set(int xc_handle, 273.264 + u32 domid, 273.265 + u32 mcuadv, 273.266 + int warpback, 273.267 + s32 warpvalue, 273.268 + long long warpl, 273.269 + long long warpu); 273.270 + 273.271 +int xc_bvtsched_global_get(int xc_handle, 273.272 + unsigned long *ctx_allow); 273.273 + 273.274 +int xc_bvtsched_domain_get(int xc_handle, 273.275 + u32 domid, 273.276 + u32 *mcuadv, 273.277 + int *warpback, 273.278 + s32 *warpvalue, 273.279 + long long *warpl, 273.280 + long long *warpu); 273.281 + 273.282 +int xc_sedf_domain_set(int xc_handle, 273.283 + u32 domid, 273.284 + u64 period, u64 slice, u64 latency, u16 extratime, u16 weight); 273.285 + 273.286 +int xc_sedf_domain_get(int xc_handle, 273.287 + u32 domid, 273.288 + u64* period, u64 *slice, u64 *latency, u16 *extratime, u16* weight); 273.289 + 273.290 +typedef evtchn_status_t xc_evtchn_status_t; 273.291 + 273.292 +/* 273.293 + * EVENT CHANNEL FUNCTIONS 273.294 + */ 273.295 + 273.296 +/** 273.297 + * This function allocates an unbound port. Ports are named endpoints used for 273.298 + * interdomain communication. This function is most useful in opening a 273.299 + * well-known port within a domain to receive events on. 273.300 + * 273.301 + * @parm xc_handle a handle to an open hypervisor interface 273.302 + * @parm dom the ID of the domain. This maybe DOMID_SELF 273.303 + * @parm port a pointer to a port. This is an in/out parameter. If *port is 273.304 + * 0, then a new port will be assigned, if port is > 0 then that 273.305 + * port is allocated if the port is unallocated. 273.306 + * @return 0 on success, -1 on failure 273.307 + */ 273.308 +int xc_evtchn_alloc_unbound(int xc_handle, 273.309 + u32 dom, 273.310 + int *port); 273.311 + 273.312 +/** 273.313 + * This function creates a pair of ports between two domains. A port can only 273.314 + * be bound once within a domain. 273.315 + * 273.316 + * @parm xc_handle a handle to an open hypervisor interface 273.317 + * @parm dom1 one of the two domains to connect. Can be DOMID_SELF. 273.318 + * @parm dom2 the other domain to connect. Can be DOMID_SELF. 273.319 + * @parm port1 an in/out parameter. If > 0, then try to connect *port. If 273.320 + * 0, then allocate a new port and store the port in *port. 273.321 + * @parm port2 the port connected on port2. This parameter behaves the same 273.322 + * way as port1. 273.323 + * @return 0 on success, -1 on error. 273.324 + */ 273.325 +int xc_evtchn_bind_interdomain(int xc_handle, 273.326 + u32 dom1, 273.327 + u32 dom2, 273.328 + int *port1, 273.329 + int *port2); 273.330 +int xc_evtchn_bind_virq(int xc_handle, 273.331 + int virq, 273.332 + int *port); 273.333 + 273.334 +/** 273.335 + * This function will close a single port on an event channel. 273.336 + * 273.337 + * @parm xc_handle a handle to an open hypervisor interface 273.338 + * @parm dom the domain that the port exists on. May be DOMID_SELF. 273.339 + * @parm port the port to close 273.340 + * @return 0 on success, -1 on error 273.341 + */ 273.342 +int xc_evtchn_close(int xc_handle, 273.343 + u32 dom, /* may be DOMID_SELF */ 273.344 + int port); 273.345 + 273.346 +/** 273.347 + * This function generates a notify event on a bound port. 273.348 + * 273.349 + * Notifies can be read within Linux by opening /dev/xen/evtchn and reading 273.350 + * a 16 bit value. The result will be the port the event occurred on. When 273.351 + * events occur, the port is masked until the 16 bit port value is written back 273.352 + * to the file. When /dev/xen/evtchn is opened, it has to be bound via an 273.353 + * ioctl to each port to listen on. The ioctl for binding is _IO('E', 2). The 273.354 + * parameter is the port to listen on. 273.355 + * 273.356 + * @parm xc_handle a handle to an open hypervisor interface 273.357 + * @parm local_port the port to generate the notify on 273.358 + * @return 0 on success, -1 on error 273.359 + */ 273.360 +int xc_evtchn_send(int xc_handle, 273.361 + int local_port); 273.362 +int xc_evtchn_status(int xc_handle, 273.363 + u32 dom, /* may be DOMID_SELF */ 273.364 + int port, 273.365 + xc_evtchn_status_t *status); 273.366 + 273.367 +int xc_physdev_pci_access_modify(int xc_handle, 273.368 + u32 domid, 273.369 + int bus, 273.370 + int dev, 273.371 + int func, 273.372 + int enable); 273.373 + 273.374 +int xc_readconsolering(int xc_handle, 273.375 + char **pbuffer, 273.376 + unsigned int *pnr_chars, 273.377 + int clear); 273.378 + 273.379 +typedef dom0_physinfo_t xc_physinfo_t; 273.380 +int xc_physinfo(int xc_handle, 273.381 + xc_physinfo_t *info); 273.382 + 273.383 +int xc_sched_id(int xc_handle, 273.384 + int *sched_id); 273.385 + 273.386 +int xc_domain_setmaxmem(int xc_handle, 273.387 + u32 domid, 273.388 + unsigned int max_memkb); 273.389 + 273.390 +int xc_domain_memory_increase_reservation(int xc_handle, 273.391 + u32 domid, 273.392 + unsigned int mem_kb); 273.393 + 273.394 +typedef dom0_perfc_desc_t xc_perfc_desc_t; 273.395 +/* IMPORTANT: The caller is responsible for mlock()'ing the @desc array. */ 273.396 +int xc_perfc_control(int xc_handle, 273.397 + u32 op, 273.398 + xc_perfc_desc_t *desc); 273.399 + 273.400 +/* read/write msr */ 273.401 +long long xc_msr_read(int xc_handle, int cpu_mask, int msr); 273.402 +int xc_msr_write(int xc_handle, int cpu_mask, int msr, unsigned int low, 273.403 + unsigned int high); 273.404 + 273.405 +/** 273.406 + * Memory maps a range within one domain to a local address range. Mappings 273.407 + * should be unmapped with munmap and should follow the same rules as mmap 273.408 + * regarding page alignment. Returns NULL on failure. 273.409 + * 273.410 + * In Linux, the ring queue for the control channel is accessible by mapping 273.411 + * the shared_info_frame (from xc_domain_getinfo()) + 2048. The structure 273.412 + * stored there is of type control_if_t. 273.413 + * 273.414 + * @parm xc_handle a handle on an open hypervisor interface 273.415 + * @parm dom the domain to map memory from 273.416 + * @parm size the amount of memory to map (in multiples of page size) 273.417 + * @parm prot same flag as in mmap(). 273.418 + * @parm mfn the frame address to map. 273.419 + */ 273.420 +void *xc_map_foreign_range(int xc_handle, u32 dom, 273.421 + int size, int prot, 273.422 + unsigned long mfn ); 273.423 + 273.424 +void *xc_map_foreign_batch(int xc_handle, u32 dom, int prot, 273.425 + unsigned long *arr, int num ); 273.426 + 273.427 +int xc_get_pfn_list(int xc_handle, u32 domid, unsigned long *pfn_buf, 273.428 + unsigned long max_pfns); 273.429 + 273.430 +int xc_ia64_get_pfn_list(int xc_handle, u32 domid, unsigned long *pfn_buf, 273.431 + unsigned int start_page, unsigned int nr_pages); 273.432 + 273.433 +int xc_mmuext_op(int xc_handle, struct mmuext_op *op, unsigned int nr_ops, 273.434 + domid_t dom); 273.435 + 273.436 +int xc_dom_mem_op(int xc_handle, unsigned int memop, unsigned int *extent_list, 273.437 + unsigned int nr_extents, unsigned int extent_order, 273.438 + domid_t domid); 273.439 + 273.440 +int xc_get_pfn_type_batch(int xc_handle, u32 dom, int num, unsigned long *arr); 273.441 + 273.442 + 273.443 +/*\ 273.444 + * GRANT TABLE FUNCTIONS 273.445 +\*/ 273.446 + 273.447 +/** 273.448 + * This function opens a handle to the more restricted grant table hypervisor 273.449 + * interface. This may be used where the standard interface is not 273.450 + * available because the domain is not privileged. 273.451 + * This function can be called multiple times within a single process. 273.452 + * Multiple processes can have an open hypervisor interface at the same time. 273.453 + * 273.454 + * Each call to this function should have a corresponding call to 273.455 + * xc_grant_interface_close(). 273.456 + * 273.457 + * This function can fail if a Xen-enabled kernel is not currently running. 273.458 + * 273.459 + * @return a handle to the hypervisor grant table interface or -1 on failure 273.460 + */ 273.461 +int xc_grant_interface_open(void); 273.462 + 273.463 +/** 273.464 + * This function closes an open grant table hypervisor interface. 273.465 + * 273.466 + * This function can fail if the handle does not represent an open interface or 273.467 + * if there were problems closing the interface. 273.468 + * 273.469 + * @parm xc_handle a handle to an open grant table hypervisor interface 273.470 + * @return 0 on success, -1 otherwise. 273.471 + */ 273.472 +int xc_grant_interface_close(int xc_handle); 273.473 + 273.474 +int xc_gnttab_map_grant_ref(int xc_handle, 273.475 + u64 host_virt_addr, 273.476 + u32 dom, 273.477 + u16 ref, 273.478 + u16 flags, 273.479 + s16 *handle, 273.480 + u64 *dev_bus_addr); 273.481 + 273.482 +int xc_gnttab_unmap_grant_ref(int xc_handle, 273.483 + u64 host_virt_addr, 273.484 + u64 dev_bus_addr, 273.485 + u16 handle, 273.486 + s16 *status); 273.487 + 273.488 +int xc_gnttab_setup_table(int xc_handle, 273.489 + u32 dom, 273.490 + u16 nr_frames, 273.491 + s16 *status, 273.492 + unsigned long **frame_list); 273.493 + 273.494 +/* Grant debug builds only: */ 273.495 +int xc_gnttab_dump_table(int xc_handle, 273.496 + u32 dom, 273.497 + s16 *status); 273.498 + 273.499 +/* Get current total pages allocated to a domain. */ 273.500 +long xc_get_tot_pages(int xc_handle, u32 domid); 273.501 + 273.502 +/* Execute a privileged dom0 operation. */ 273.503 +int xc_dom0_op(int xc_handle, dom0_op_t *op); 273.504 + 273.505 +/* Initializes the store (for dom0) 273.506 + remote_port should be the remote end of a bound interdomain channel between 273.507 + the store and dom0. 273.508 + 273.509 + This function returns a shared frame that should be passed to 273.510 + xs_introduce_domain 273.511 + */ 273.512 +long xc_init_store(int xc_handle, int remote_port); 273.513 + 273.514 +/* 273.515 + * MMU updates. 273.516 + */ 273.517 +#define MAX_MMU_UPDATES 1024 273.518 +struct xc_mmu { 273.519 + mmu_update_t updates[MAX_MMU_UPDATES]; 273.520 + int idx; 273.521 + domid_t subject; 273.522 +}; 273.523 +typedef struct xc_mmu xc_mmu_t; 273.524 +xc_mmu_t *xc_init_mmu_updates(int xc_handle, domid_t dom); 273.525 +int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu, 273.526 + unsigned long ptr, unsigned long val); 273.527 +int xc_finish_mmu_updates(int xc_handle, xc_mmu_t *mmu); 273.528 + 273.529 +#endif
274.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 274.2 +++ b/tools/libxc/xenguest.h Thu Aug 25 11:18:47 2005 -0700 274.3 @@ -0,0 +1,66 @@ 274.4 +/****************************************************************************** 274.5 + * xenguest.h 274.6 + * 274.7 + * A library for guest domain management in Xen. 274.8 + * 274.9 + * Copyright (c) 2003-2004, K A Fraser. 274.10 + */ 274.11 + 274.12 +#ifndef XENBUILD_H 274.13 +#define XENBUILD_H 274.14 + 274.15 +#define XCFLAGS_VERBOSE 1 274.16 +#define XCFLAGS_LIVE 2 274.17 +#define XCFLAGS_DEBUG 4 274.18 +#define XCFLAGS_CONFIGURE 8 274.19 + 274.20 +/** 274.21 + * This function will save a domain running Linux. 274.22 + * 274.23 + * @parm xc_handle a handle to an open hypervisor interface 274.24 + * @parm fd the file descriptor to save a domain to 274.25 + * @parm dom the id of the domain 274.26 + * @return 0 on success, -1 on failure 274.27 + */ 274.28 +int xc_linux_save(int xc_handle, int fd, uint32_t dom); 274.29 + 274.30 +/** 274.31 + * This function will restore a saved domain running Linux. 274.32 + * 274.33 + * @parm xc_handle a handle to an open hypervisor interface 274.34 + * @parm fd the file descriptor to restore a domain from 274.35 + * @parm dom the id of the domain 274.36 + * @parm nr_pfns the number of pages 274.37 + * @parm store_evtchn the store event channel for this domain to use 274.38 + * @parm store_mfn returned with the mfn of the store page 274.39 + * @return 0 on success, -1 on failure 274.40 + */ 274.41 +int xc_linux_restore(int xc_handle, int io_fd, uint32_t dom, unsigned long nr_pfns, 274.42 + unsigned int store_evtchn, unsigned long *store_mfn); 274.43 + 274.44 +int xc_linux_build(int xc_handle, 274.45 + uint32_t domid, 274.46 + const char *image_name, 274.47 + const char *ramdisk_name, 274.48 + const char *cmdline, 274.49 + unsigned int control_evtchn, 274.50 + unsigned long flags, 274.51 + unsigned int vcpus, 274.52 + unsigned int store_evtchn, 274.53 + unsigned long *store_mfn); 274.54 + 274.55 +struct mem_map; 274.56 +int xc_vmx_build(int xc_handle, 274.57 + uint32_t domid, 274.58 + int memsize, 274.59 + const char *image_name, 274.60 + struct mem_map *memmap, 274.61 + const char *ramdisk_name, 274.62 + const char *cmdline, 274.63 + unsigned int control_evtchn, 274.64 + unsigned long flags, 274.65 + unsigned int vcpus, 274.66 + unsigned int store_evtchn, 274.67 + unsigned long *store_mfn); 274.68 + 274.69 +#endif
275.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 275.2 +++ b/tools/libxc/xg_private.c Thu Aug 25 11:18:47 2005 -0700 275.3 @@ -0,0 +1,86 @@ 275.4 +/****************************************************************************** 275.5 + * xg_private.c 275.6 + * 275.7 + * Helper functions for the rest of the library. 275.8 + */ 275.9 + 275.10 +#include <stdlib.h> 275.11 +#include <zlib.h> 275.12 + 275.13 +#include "xg_private.h" 275.14 + 275.15 +char *xc_read_kernel_image(const char *filename, unsigned long *size) 275.16 +{ 275.17 + int kernel_fd = -1; 275.18 + gzFile kernel_gfd = NULL; 275.19 + char *image = NULL; 275.20 + unsigned int bytes; 275.21 + 275.22 + if ( (kernel_fd = open(filename, O_RDONLY)) < 0 ) 275.23 + { 275.24 + PERROR("Could not open kernel image"); 275.25 + goto out; 275.26 + } 275.27 + 275.28 + if ( (*size = xc_get_filesz(kernel_fd)) == 0 ) 275.29 + { 275.30 + PERROR("Could not read kernel image"); 275.31 + goto out; 275.32 + } 275.33 + 275.34 + if ( (kernel_gfd = gzdopen(kernel_fd, "rb")) == NULL ) 275.35 + { 275.36 + PERROR("Could not allocate decompression state for state file"); 275.37 + goto out; 275.38 + } 275.39 + 275.40 + if ( (image = malloc(*size)) == NULL ) 275.41 + { 275.42 + PERROR("Could not allocate memory for kernel image"); 275.43 + goto out; 275.44 + } 275.45 + 275.46 + if ( (bytes = gzread(kernel_gfd, image, *size)) != *size ) 275.47 + { 275.48 + PERROR("Error reading kernel image, could not" 275.49 + " read the whole image (%d != %ld).", bytes, *size); 275.50 + free(image); 275.51 + image = NULL; 275.52 + } 275.53 + 275.54 + out: 275.55 + if ( kernel_gfd != NULL ) 275.56 + gzclose(kernel_gfd); 275.57 + else if ( kernel_fd >= 0 ) 275.58 + close(kernel_fd); 275.59 + return image; 275.60 +} 275.61 + 275.62 +/*******************/ 275.63 + 275.64 +int pin_table( 275.65 + int xc_handle, unsigned int type, unsigned long mfn, domid_t dom) 275.66 +{ 275.67 + struct mmuext_op op; 275.68 + 275.69 + op.cmd = type; 275.70 + op.mfn = mfn; 275.71 + 275.72 + if ( xc_mmuext_op(xc_handle, &op, 1, dom) < 0 ) 275.73 + return 1; 275.74 + 275.75 + return 0; 275.76 +} 275.77 + 275.78 +/* This is shared between save and restore, and may generally be useful. */ 275.79 +unsigned long csum_page (void * page) 275.80 +{ 275.81 + int i; 275.82 + unsigned long *p = page; 275.83 + unsigned long long sum=0; 275.84 + 275.85 + for ( i = 0; i < (PAGE_SIZE/sizeof(unsigned long)); i++ ) 275.86 + sum += p[i]; 275.87 + 275.88 + return sum ^ (sum>>32); 275.89 +}
276.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 276.2 +++ b/tools/libxc/xg_private.h Thu Aug 25 11:18:47 2005 -0700 276.3 @@ -0,0 +1,170 @@ 276.4 +#ifndef XG_PRIVATE_H 276.5 +#define XG_PRIVATE_H 276.6 + 276.7 +#include <errno.h> 276.8 +#include <fcntl.h> 276.9 +#include <stdio.h> 276.10 +#include <string.h> 276.11 +#include <sys/mman.h> 276.12 +#include <sys/types.h> 276.13 +#include <sys/stat.h> 276.14 + 276.15 +#include "xenctrl.h" 276.16 + 276.17 +#include <xen/linux/privcmd.h> 276.18 + 276.19 +char *xc_read_kernel_image(const char *filename, unsigned long *size); 276.20 +unsigned long csum_page (void * page); 276.21 + 276.22 +#define _PAGE_PRESENT 0x001 276.23 +#define _PAGE_RW 0x002 276.24 +#define _PAGE_USER 0x004 276.25 +#define _PAGE_PWT 0x008 276.26 +#define _PAGE_PCD 0x010 276.27 +#define _PAGE_ACCESSED 0x020 276.28 +#define _PAGE_DIRTY 0x040 276.29 +#define _PAGE_PAT 0x080 276.30 +#define _PAGE_PSE 0x080 276.31 +#define _PAGE_GLOBAL 0x100 276.32 + 276.33 +#if defined(__i386__) 276.34 +#define L1_PAGETABLE_SHIFT 12 276.35 +#define L2_PAGETABLE_SHIFT 22 276.36 +#define L1_PAGETABLE_SHIFT_PAE 12 276.37 +#define L2_PAGETABLE_SHIFT_PAE 21 276.38 +#define L3_PAGETABLE_SHIFT_PAE 30 276.39 +#elif defined(__x86_64__) 276.40 +#define L1_PAGETABLE_SHIFT 12 276.41 +#define L2_PAGETABLE_SHIFT 21 276.42 +#define L3_PAGETABLE_SHIFT 30 276.43 +#define L4_PAGETABLE_SHIFT 39 276.44 +#endif 276.45 + 276.46 +#if defined(__i386__) 276.47 +#define ENTRIES_PER_L1_PAGETABLE 1024 276.48 +#define ENTRIES_PER_L2_PAGETABLE 1024 276.49 +#define L1_PAGETABLE_ENTRIES_PAE 512 276.50 +#define L2_PAGETABLE_ENTRIES_PAE 512 276.51 +#define L3_PAGETABLE_ENTRIES_PAE 4 276.52 +#elif defined(__x86_64__) 276.53 +#define L1_PAGETABLE_ENTRIES 512 276.54 +#define L2_PAGETABLE_ENTRIES 512 276.55 +#define L3_PAGETABLE_ENTRIES 512 276.56 +#define L4_PAGETABLE_ENTRIES 512 276.57 +#endif 276.58 + 276.59 +#define PAGE_SHIFT XC_PAGE_SHIFT 276.60 +#define PAGE_SIZE (1UL << PAGE_SHIFT) 276.61 +#define PAGE_MASK (~(PAGE_SIZE-1)) 276.62 + 276.63 +typedef u32 l1_pgentry_32_t; 276.64 +typedef u32 l2_pgentry_32_t; 276.65 +typedef u64 l1_pgentry_64_t; 276.66 +typedef u64 l2_pgentry_64_t; 276.67 +typedef u64 l3_pgentry_64_t; 276.68 +typedef unsigned long l1_pgentry_t; 276.69 +typedef unsigned long l2_pgentry_t; 276.70 +#if defined(__x86_64__) 276.71 +typedef unsigned long l3_pgentry_t; 276.72 +typedef unsigned long l4_pgentry_t; 276.73 +#endif 276.74 + 276.75 +#if defined(__i386__) 276.76 +#define l1_table_offset(_a) \ 276.77 + (((_a) >> L1_PAGETABLE_SHIFT) & (ENTRIES_PER_L1_PAGETABLE - 1)) 276.78 +#define l2_table_offset(_a) \ 276.79 + ((_a) >> L2_PAGETABLE_SHIFT) 276.80 +#define l1_table_offset_pae(_a) \ 276.81 + (((_a) >> L1_PAGETABLE_SHIFT_PAE) & (L1_PAGETABLE_ENTRIES_PAE - 1)) 276.82 +#define l2_table_offset_pae(_a) \ 276.83 + (((_a) >> L2_PAGETABLE_SHIFT_PAE) & (L2_PAGETABLE_ENTRIES_PAE - 1)) 276.84 +#define l3_table_offset_pae(_a) \ 276.85 + (((_a) >> L3_PAGETABLE_SHIFT_PAE) & (L3_PAGETABLE_ENTRIES_PAE - 1)) 276.86 +#elif defined(__x86_64__) 276.87 +#define l1_table_offset(_a) \ 276.88 + (((_a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1)) 276.89 +#define l2_table_offset(_a) \ 276.90 + (((_a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1)) 276.91 +#define l3_table_offset(_a) \ 276.92 + (((_a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1)) 276.93 +#define l4_table_offset(_a) \ 276.94 + (((_a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1)) 276.95 +#endif 276.96 + 276.97 +#define ERROR(_m, _a...) \ 276.98 +do { \ 276.99 + int __saved_errno = errno; \ 276.100 + fprintf(stderr, "ERROR: " _m "\n" , ## _a ); \ 276.101 + errno = __saved_errno; \ 276.102 +} while (0) 276.103 + 276.104 + 276.105 +#define PERROR(_m, _a...) \ 276.106 +do { \ 276.107 + int __saved_errno = errno; \ 276.108 + fprintf(stderr, "ERROR: " _m " (%d = %s)\n" , ## _a , \ 276.109 + __saved_errno, strerror(__saved_errno)); \ 276.110 + errno = __saved_errno; \ 276.111 +} while (0) 276.112 + 276.113 + 276.114 +struct domain_setup_info 276.115 +{ 276.116 + unsigned long v_start; 276.117 + unsigned long v_end; 276.118 + unsigned long v_kernstart; 276.119 + unsigned long v_kernend; 276.120 + unsigned long v_kernentry; 276.121 + 276.122 + unsigned int load_symtab; 276.123 + unsigned int pae_kernel; 276.124 + unsigned long symtab_addr; 276.125 + unsigned long symtab_len; 276.126 +}; 276.127 + 276.128 +typedef int (*parseimagefunc)(char *image, unsigned long image_size, 276.129 + struct domain_setup_info *dsi); 276.130 +typedef int (*loadimagefunc)(char *image, unsigned long image_size, int xch, 276.131 + u32 dom, unsigned long *parray, 276.132 + struct domain_setup_info *dsi); 276.133 + 276.134 +struct load_funcs 276.135 +{ 276.136 + parseimagefunc parseimage; 276.137 + loadimagefunc loadimage; 276.138 +}; 276.139 + 276.140 +#define mfn_mapper_queue_size 128 276.141 + 276.142 +typedef struct mfn_mapper { 276.143 + int xc_handle; 276.144 + int size; 276.145 + int prot; 276.146 + int error; 276.147 + int max_queue_size; 276.148 + void * addr; 276.149 + privcmd_mmap_t ioctl; 276.150 + 276.151 +} mfn_mapper_t; 276.152 + 276.153 +unsigned long xc_get_m2p_start_mfn (int xc_handle); 276.154 + 276.155 +int xc_copy_to_domain_page(int xc_handle, u32 domid, 276.156 + unsigned long dst_pfn, void *src_page); 276.157 + 276.158 +unsigned long xc_get_filesz(int fd); 276.159 + 276.160 +void xc_map_memcpy(unsigned long dst, char *src, unsigned long size, 276.161 + int xch, u32 dom, unsigned long *parray, 276.162 + unsigned long vstart); 276.163 + 276.164 +int pin_table(int xc_handle, unsigned int type, unsigned long mfn, 276.165 + domid_t dom); 276.166 + 276.167 +/* image loading */ 276.168 +int probe_elf(char *image, unsigned long image_size, struct load_funcs *funcs); 276.169 +int probe_bin(char *image, unsigned long image_size, struct load_funcs *funcs); 276.170 +int probe_aout9(char *image, unsigned long image_size, struct load_funcs *funcs); 276.171 + 276.172 +#endif 276.173 +
277.1 --- a/tools/misc/Makefile Wed Aug 24 16:16:52 2005 -0700 277.2 +++ b/tools/misc/Makefile Thu Aug 25 11:18:47 2005 -0700 277.3 @@ -50,4 +50,4 @@ clean: 277.4 $(CC) -c $(CFLAGS) -o $@ $< 277.5 277.6 $(TARGETS): %: %.o Makefile 277.7 - $(CC) $(CFLAGS) -o $@ $< -L$(XEN_LIBXC) -lxc 277.8 + $(CC) $(CFLAGS) -o $@ $< -L$(XEN_LIBXC) -lxenctrl
278.1 --- a/tools/misc/cpuperf/Makefile Wed Aug 24 16:16:52 2005 -0700 278.2 +++ b/tools/misc/cpuperf/Makefile Thu Aug 25 11:18:47 2005 -0700 278.3 @@ -37,7 +37,7 @@ clean: 278.4 $(CC) $(CFLAGS) -o $@ $< 278.5 278.6 cpuperf-xen: cpuperf.c $(HDRS) Makefile 278.7 - $(CC) $(CFLAGS) -I $(XEN_LIBXC) -L$(XEN_LIBXC) -lxc -DXENO -o $@ $< 278.8 + $(CC) $(CFLAGS) -I $(XEN_LIBXC) -L$(XEN_LIBXC) -lxenctrl -DXENO -o $@ $< 278.9 278.10 cpuperf-perfcntr: cpuperf.c $(HDRS) Makefile 278.11 $(CC) $(CFLAGS) -DPERFCNTR -o $@ $<
279.1 --- a/tools/misc/cpuperf/cpuperf_xeno.h Wed Aug 24 16:16:52 2005 -0700 279.2 +++ b/tools/misc/cpuperf/cpuperf_xeno.h Thu Aug 25 11:18:47 2005 -0700 279.3 @@ -9,7 +9,7 @@ 279.4 * 279.5 */ 279.6 279.7 -#include <xc.h> 279.8 +#include <xenctrl.h> 279.9 279.10 static int xc_handle; 279.11
280.1 --- a/tools/misc/xc_shadow.c Wed Aug 24 16:16:52 2005 -0700 280.2 +++ b/tools/misc/xc_shadow.c Thu Aug 25 11:18:47 2005 -0700 280.3 @@ -11,7 +11,7 @@ 280.4 */ 280.5 280.6 280.7 -#include <xc.h> 280.8 +#include <xenctrl.h> 280.9 #include <stdio.h> 280.10 #include <stdlib.h> 280.11 #include <sys/mman.h>
282.1 --- a/tools/misc/xenperf.c Wed Aug 24 16:16:52 2005 -0700 282.2 +++ b/tools/misc/xenperf.c Thu Aug 25 11:18:47 2005 -0700 282.3 @@ -11,7 +11,7 @@ 282.4 */ 282.5 282.6 282.7 -#include <xc.h> 282.8 +#include <xenctrl.h> 282.9 #include <stdio.h> 282.10 #include <stdlib.h> 282.11 #include <sys/mman.h>
283.1 --- a/tools/python/setup.py Wed Aug 24 16:16:52 2005 -0700 283.2 +++ b/tools/python/setup.py Thu Aug 25 11:18:47 2005 -0700 283.3 @@ -17,7 +17,7 @@ library_dirs = [ XEN_ROOT + "/tools/libx 283.4 XEN_ROOT + "/tools/xenstore", 283.5 ] 283.6 283.7 -libraries = [ "xc", "xenstore-pic" ] 283.8 +libraries = [ "xenctrl", "xenguest", "xenstore" ] 283.9 283.10 xc = Extension("xc", 283.11 extra_compile_args = extra_compile_args, 283.12 @@ -41,7 +41,7 @@ xs = Extension("xs", 283.13 sources = [ "xen/lowlevel/xs/xs.c" ]) 283.14 283.15 setup(name = 'xen', 283.16 - version = '2.0', 283.17 + version = '3.0', 283.18 description = 'Xen', 283.19 packages = ['xen', 283.20 'xen.lowlevel',
284.1 --- a/tools/python/xen/lowlevel/xc/xc.c Wed Aug 24 16:16:52 2005 -0700 284.2 +++ b/tools/python/xen/lowlevel/xc/xc.c Thu Aug 25 11:18:47 2005 -0700 284.3 @@ -5,7 +5,8 @@ 284.4 */ 284.5 284.6 #include <Python.h> 284.7 -#include <xc.h> 284.8 +#include <xenctrl.h> 284.9 +#include <xenguest.h> 284.10 #include <zlib.h> 284.11 #include <fcntl.h> 284.12 #include <netinet/in.h>
286.1 --- a/tools/python/xen/lowlevel/xu/xu.c Wed Aug 24 16:16:52 2005 -0700 286.2 +++ b/tools/python/xen/lowlevel/xu/xu.c Thu Aug 25 11:18:47 2005 -0700 286.3 @@ -21,7 +21,7 @@ 286.4 #include <unistd.h> 286.5 #include <errno.h> 286.6 #include <signal.h> 286.7 -#include <xc.h> 286.8 +#include <xenctrl.h> 286.9 286.10 #include <xen/xen.h> 286.11 #include <xen/io/domain_controller.h>
317.1 --- a/tools/python/xen/xend/XendDomainInfo.py Wed Aug 24 16:16:52 2005 -0700 317.2 +++ b/tools/python/xen/xend/XendDomainInfo.py Thu Aug 25 11:18:47 2005 -0700 317.3 @@ -265,6 +265,8 @@ class XendDomainInfo: 317.4 self.info = None 317.5 self.blkif_backend = False 317.6 self.netif_backend = False 317.7 + self.netif_idx = 0 317.8 + 317.9 #todo: state: running, suspended 317.10 self.state = STATE_VM_OK 317.11 self.state_updated = threading.Condition() 317.12 @@ -400,8 +402,7 @@ class XendDomainInfo: 317.13 db['virtual-device'] = "%i" % devnum 317.14 #db['backend'] = sxp.child_value(devconfig, 'backend', '0') 317.15 db['backend'] = backdb.getPath() 317.16 - db['backend-id'] = "%i" % int(sxp.child_value(devconfig, 317.17 - 'backend', '0')) 317.18 + db['backend-id'] = "%i" % backdom.id 317.19 317.20 backdb['frontend'] = db.getPath() 317.21 (type, params) = string.split(sxp.child_value(devconfig, 'uname'), ':', 1) 317.22 @@ -417,6 +418,37 @@ class XendDomainInfo: 317.23 db.saveDB(save=True) 317.24 317.25 return 317.26 + 317.27 + if type == 'vif': 317.28 + backdom = domain_exists(sxp.child_value(devconfig, 'backend', '0')) 317.29 + 317.30 + log.error(devconfig) 317.31 + 317.32 + devnum = self.netif_idx 317.33 + self.netif_idx += 1 317.34 + 317.35 + # create backend db 317.36 + backdb = backdom.db.addChild("/backend/%s/%s/%d" % 317.37 + (type, self.uuid, devnum)) 317.38 + 317.39 + # create frontend db 317.40 + db = self.db.addChild("/device/%s/%d" % (type, devnum)) 317.41 + 317.42 + backdb['frontend'] = db.getPath() 317.43 + backdb['frontend-id'] = "%i" % self.id 317.44 + backdb['handle'] = "%i" % devnum 317.45 + backdb.saveDB(save=True) 317.46 + 317.47 + db['backend'] = backdb.getPath() 317.48 + db['backend-id'] = "%i" % backdom.id 317.49 + db['handle'] = "%i" % devnum 317.50 + log.error(sxp.child_value(devconfig, 'mac')) 317.51 + db['mac'] = sxp.child_value(devconfig, 'mac') 317.52 + 317.53 + db.saveDB(save=True) 317.54 + 317.55 + return 317.56 + 317.57 ctrl = self.findDeviceController(type) 317.58 return ctrl.createDevice(devconfig, recreate=self.recreate, 317.59 change=change) 317.60 @@ -718,6 +750,11 @@ class XendDomainInfo: 317.61 devdb['node'].getData()) 317.62 typedb[dev].delete() 317.63 typedb.saveDB(save=True) 317.64 + if type == 'vif': 317.65 + typedb = ddb.addChild(type) 317.66 + for dev in typedb.keys(): 317.67 + typedb[dev].delete() 317.68 + typedb.saveDB(save=True) 317.69 317.70 def show(self): 317.71 """Print virtual machine info.
354.1 --- a/tools/python/xen/xm/main.py Wed Aug 24 16:16:52 2005 -0700 354.2 +++ b/tools/python/xen/xm/main.py Thu Aug 25 11:18:47 2005 -0700 354.3 @@ -665,8 +665,10 @@ def main(argv=sys.argv): 354.4 err("Most commands need root access. Please try again as root") 354.5 sys.exit(1) 354.6 except XendError, ex: 354.7 + if args[0] == "bogus": 354.8 + args.remove("bogus") 354.9 if len(args) > 0: 354.10 - handle_xend_error(argv[1], args[1], ex) 354.11 + handle_xend_error(argv[1], args[0], ex) 354.12 else: 354.13 print "Unexpected error:", sys.exc_info()[0] 354.14 print
395.1 --- a/tools/xcs/Makefile Wed Aug 24 16:16:52 2005 -0700 395.2 +++ b/tools/xcs/Makefile Thu Aug 25 11:18:47 2005 -0700 395.3 @@ -34,10 +34,10 @@ clean: 395.4 395.5 xcsdump: xcsdump.c dump.c 395.6 $(CC) $(CFLAGS) -o xcsdump xcsdump.c -L$(XEN_LIBXC) \ 395.7 - ctrl_interface.c evtchn.c dump.c -lxc 395.8 + ctrl_interface.c evtchn.c dump.c -lxenctrl 395.9 395.10 $(BIN): $(OBJS) 395.11 - $(CC) $(CFLAGS) $^ -o $@ -L$(XEN_LIBXC) -lxc 395.12 + $(CC) $(CFLAGS) $^ -o $@ -L$(XEN_LIBXC) -lxenctrl 395.13 395.14 $(OBJS): $(HDRS) 395.15
396.1 --- a/tools/xcs/dump.h Wed Aug 24 16:16:52 2005 -0700 396.2 +++ b/tools/xcs/dump.h Thu Aug 25 11:18:47 2005 -0700 396.3 @@ -20,7 +20,7 @@ 396.4 #define XENCTLD_ERROR_H 396.5 396.6 #include <stdint.h> 396.7 -#include <xc.h> 396.8 +#include <xenctrl.h> 396.9 #include <xen/io/domain_controller.h> 396.10 396.11 void dump_msg(const control_msg_t *msg, uint64_t flags);
397.1 --- a/tools/xcs/xcs.h Wed Aug 24 16:16:52 2005 -0700 397.2 +++ b/tools/xcs/xcs.h Thu Aug 25 11:18:47 2005 -0700 397.3 @@ -11,7 +11,7 @@ 397.4 #define __XCS_H__ 397.5 397.6 #include <pthread.h> 397.7 -#include <xc.h> 397.8 +#include <xenctrl.h> 397.9 #include <xen/xen.h> 397.10 #include <xen/io/domain_controller.h> 397.11 #include <xen/linux/privcmd.h>
398.1 --- a/tools/xcs/xcsdump.c Wed Aug 24 16:16:52 2005 -0700 398.2 +++ b/tools/xcs/xcsdump.c Thu Aug 25 11:18:47 2005 -0700 398.3 @@ -16,7 +16,7 @@ 398.4 #include <sys/socket.h> 398.5 #include <sys/un.h> 398.6 #include <ctype.h> 398.7 -#include <xc.h> 398.8 +#include <xenctrl.h> 398.9 #include <xen/xen.h> 398.10 #include <xen/io/domain_controller.h> 398.11 #include <getopt.h>
399.1 --- a/tools/xcutils/Makefile Wed Aug 24 16:16:52 2005 -0700 399.2 +++ b/tools/xcutils/Makefile Thu Aug 25 11:18:47 2005 -0700 399.3 @@ -30,7 +30,7 @@ PROG_DEP = .*.d 399.4 399.5 PROGRAMS = xc_restore xc_save 399.6 399.7 -LDLIBS = -L$(XEN_LIBXC) -lxc 399.8 +LDLIBS = -L$(XEN_LIBXC) -lxenguest -lxenctrl 399.9 399.10 .PHONY: all 399.11 all: build
400.1 --- a/tools/xcutils/xc_restore.c Wed Aug 24 16:16:52 2005 -0700 400.2 +++ b/tools/xcutils/xc_restore.c Thu Aug 25 11:18:47 2005 -0700 400.3 @@ -7,11 +7,12 @@ 400.4 * 400.5 */ 400.6 400.7 +#include <err.h> 400.8 #include <stdlib.h> 400.9 +#include <stdint.h> 400.10 #include <stdio.h> 400.11 -#include <err.h> 400.12 400.13 -#include <xc.h> 400.14 +#include <xenguest.h> 400.15 400.16 int 400.17 main(int argc, char **argv)
401.1 --- a/tools/xcutils/xc_save.c Wed Aug 24 16:16:52 2005 -0700 401.2 +++ b/tools/xcutils/xc_save.c Thu Aug 25 11:18:47 2005 -0700 401.3 @@ -7,11 +7,12 @@ 401.4 * 401.5 */ 401.6 401.7 +#include <err.h> 401.8 #include <stdlib.h> 401.9 +#include <stdint.h> 401.10 #include <stdio.h> 401.11 -#include <err.h> 401.12 401.13 -#include <xc.h> 401.14 +#include <xenguest.h> 401.15 401.16 int 401.17 main(int argc, char **argv)
416.1 --- a/tools/xenstore/Makefile Wed Aug 24 16:16:52 2005 -0700 416.2 +++ b/tools/xenstore/Makefile Thu Aug 25 11:18:47 2005 -0700 416.3 @@ -32,7 +32,7 @@ xen: 416.4 ln -sf $(XEN_ROOT)/xen/include/public $@ 416.5 416.6 xenstored: xenstored_core.o xenstored_watch.o xenstored_domain.o xenstored_transaction.o xs_lib.o talloc.o utils.o 416.7 - $(LINK.o) $^ $(LOADLIBES) $(LDLIBS) -lxc -o $@ 416.8 + $(LINK.o) $^ $(LOADLIBES) $(LDLIBS) -lxenctrl -o $@ 416.9 416.10 xenstored_test: xenstored_core_test.o xenstored_watch_test.o xenstored_domain_test.o xenstored_transaction_test.o xs_lib.o talloc_test.o fake_libxc.o utils.o 416.11 $(LINK.o) $^ $(LOADLIBES) $(LDLIBS) -o $@ 416.12 @@ -109,7 +109,7 @@ stresstest: xs_stress xenstored_test 416.13 export $(TESTENV); PID=`./xenstored_test --output-pid --trace-file=/tmp/trace`; ./xs_stress 5000; ret=$$?; kill $$PID; exit $$ret 416.14 416.15 xs_dom0_test: xs_dom0_test.o utils.o 416.16 - $(LINK.o) $^ $(LOADLIBES) $(LDLIBS) -lxc -o $@ 416.17 + $(LINK.o) $^ $(LOADLIBES) $(LDLIBS) -lxenctrl -o $@ 416.18 416.19 TAGS: 416.20 etags `find . -name '*.[ch]'`
435.1 --- a/tools/xenstore/xs_dom0_test.c Wed Aug 24 16:16:52 2005 -0700 435.2 +++ b/tools/xenstore/xs_dom0_test.c Thu Aug 25 11:18:47 2005 -0700 435.3 @@ -3,7 +3,7 @@ 435.4 #include <sys/ioctl.h> 435.5 #include "xs.h" 435.6 #include "utils.h" 435.7 -#include <xc.h> 435.8 +#include <xenctrl.h> 435.9 #include <xen/linux/privcmd.h> 435.10 #include <stdio.h> 435.11 #include <unistd.h>
437.1 --- a/tools/xenstore/xs_lib.h Wed Aug 24 16:16:52 2005 -0700 437.2 +++ b/tools/xenstore/xs_lib.h Thu Aug 25 11:18:47 2005 -0700 437.3 @@ -22,7 +22,7 @@ 437.4 437.5 #include <stdbool.h> 437.6 #include <limits.h> 437.7 -#include <xc.h> 437.8 +#include <xenctrl.h> 437.9 437.10 /* Bitmask of permissions. */ 437.11 enum xs_perm_type {
440.1 --- a/tools/xentrace/Makefile Wed Aug 24 16:16:52 2005 -0700 440.2 +++ b/tools/xentrace/Makefile Thu Aug 25 11:18:47 2005 -0700 440.3 @@ -36,4 +36,4 @@ clean: 440.4 $(RM) *.a *.so *.o *.rpm $(BIN) 440.5 440.6 %: %.c $(HDRS) Makefile 440.7 - $(CC) $(CFLAGS) -o $@ $< -L$(XEN_LIBXC) -lxc 440.8 + $(CC) $(CFLAGS) -o $@ $< -L$(XEN_LIBXC) -lxenctrl
441.1 --- a/tools/xentrace/xenctx.c Wed Aug 24 16:16:52 2005 -0700 441.2 +++ b/tools/xentrace/xenctx.c Thu Aug 25 11:18:47 2005 -0700 441.3 @@ -21,7 +21,7 @@ 441.4 #include <argp.h> 441.5 #include <signal.h> 441.6 441.7 -#include "xc.h" 441.8 +#include "xenctrl.h" 441.9 441.10 #ifdef __i386__ 441.11 void print_ctx(vcpu_guest_context_t *ctx1)
506.1 --- a/xen/arch/x86/x86_32/mm.c Wed Aug 24 16:16:52 2005 -0700 506.2 +++ b/xen/arch/x86/x86_32/mm.c Thu Aug 25 11:18:47 2005 -0700 506.3 @@ -93,15 +93,10 @@ void __init paging_init(void) 506.4 506.5 /* 506.6 * Allocate and map the machine-to-phys table and create read-only mapping 506.7 - * of MPT for guest-OS use. Without PAE we'll end up with one 4MB page, 506.8 - * with PAE we'll allocate 2MB pages depending on the amount of memory 506.9 - * installed, but at least 4MB to cover 4GB address space. This is needed 506.10 - * to make PCI I/O memory address lookups work in guests. 506.11 + * of MPT for guest-OS use. 506.12 */ 506.13 mpt_size = (max_page * 4) + (1UL << L2_PAGETABLE_SHIFT) - 1UL; 506.14 mpt_size &= ~((1UL << L2_PAGETABLE_SHIFT) - 1UL); 506.15 - if ( mpt_size < (4 << 20) ) 506.16 - mpt_size = 4 << 20; 506.17 for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++ ) 506.18 { 506.19 if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0)) == NULL )
509.1 --- a/xen/arch/x86/x86_64/mm.c Wed Aug 24 16:16:52 2005 -0700 509.2 +++ b/xen/arch/x86/x86_64/mm.c Thu Aug 25 11:18:47 2005 -0700 509.3 @@ -74,7 +74,7 @@ l2_pgentry_t *virt_to_xen_l2e(unsigned l 509.4 509.5 void __init paging_init(void) 509.6 { 509.7 - unsigned long i; 509.8 + unsigned long i, mpt_size; 509.9 l3_pgentry_t *l3_ro_mpt; 509.10 l2_pgentry_t *l2_ro_mpt; 509.11 struct pfn_info *pg; 509.12 @@ -98,16 +98,17 @@ void __init paging_init(void) 509.13 * Allocate and map the machine-to-phys table. 509.14 * This also ensures L3 is present for fixmaps. 509.15 */ 509.16 - for ( i = 0; i < max_page; i += ((1UL << L2_PAGETABLE_SHIFT) / 8) ) 509.17 + mpt_size = (max_page * 4) + (1UL << L2_PAGETABLE_SHIFT) - 1UL; 509.18 + mpt_size &= ~((1UL << L2_PAGETABLE_SHIFT) - 1UL); 509.19 + for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++ ) 509.20 { 509.21 - pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0); 509.22 - if ( pg == NULL ) 509.23 + if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0)) == NULL ) 509.24 panic("Not enough memory for m2p table\n"); 509.25 map_pages_to_xen( 509.26 - RDWR_MPT_VIRT_START + i*8, page_to_pfn(pg), 509.27 + RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT), page_to_pfn(pg), 509.28 1UL << PAGETABLE_ORDER, 509.29 PAGE_HYPERVISOR); 509.30 - memset((void *)(RDWR_MPT_VIRT_START + i*8), 0x55, 509.31 + memset((void *)(RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT)), 0x55, 509.32 1UL << L2_PAGETABLE_SHIFT); 509.33 *l2_ro_mpt++ = l2e_from_page( 509.34 pg, _PAGE_GLOBAL|_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT);