debuggers.hg
changeset 3568:47059455441d
bitkeeper revision 1.1159.230.2 (41f65234Pi4Crimteaw690fX-H8jyg)
Use list_for_each_entry() in preference to list_for_each().
signed-off-by: keir.fraser@cl.cam.ac.uk
Use list_for_each_entry() in preference to list_for_each().
signed-off-by: keir.fraser@cl.cam.ac.uk
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Tue Jan 25 14:05:40 2005 +0000 (2005-01-25) |
parents | f1210445f1f9 |
children | dee91b44a753 |
files | xen/arch/x86/domain.c xen/arch/x86/mpparse.c xen/arch/x86/pci-pc.c xen/arch/x86/pdb-stub.c xen/common/physdev.c xen/common/sched_atropos.c xen/common/sched_bvt.c xen/common/sched_rrobin.c xen/common/slab.c xen/drivers/pci/pci.c xen/include/xen/list.h xen/include/xen/pci.h |
line diff
1.1 --- a/xen/arch/x86/domain.c Tue Jan 25 14:05:02 2005 +0000 1.2 +++ b/xen/arch/x86/domain.c Tue Jan 25 14:05:40 2005 +0000 1.3 @@ -199,13 +199,11 @@ void machine_halt(void) 1.4 void dump_pageframe_info(struct domain *d) 1.5 { 1.6 struct pfn_info *page; 1.7 - struct list_head *ent; 1.8 1.9 if ( d->tot_pages < 10 ) 1.10 { 1.11 - list_for_each ( ent, &d->page_list ) 1.12 + list_for_each_entry ( page, &d->page_list, list ) 1.13 { 1.14 - page = list_entry(ent, struct pfn_info, list); 1.15 printk("Page %08x: caf=%08x, taf=%08x\n", 1.16 page_to_phys(page), page->count_info, 1.17 page->u.inuse.type_info);
2.1 --- a/xen/arch/x86/mpparse.c Tue Jan 25 14:05:02 2005 +0000 2.2 +++ b/xen/arch/x86/mpparse.c Tue Jan 25 14:05:40 2005 +0000 2.3 @@ -1232,7 +1232,6 @@ void __init mp_config_acpi_legacy_irqs ( 2.4 2.5 void __init mp_parse_prt (void) 2.6 { 2.7 - struct list_head *node = NULL; 2.8 struct acpi_prt_entry *entry = NULL; 2.9 int ioapic = -1; 2.10 int ioapic_pin = 0; 2.11 @@ -1245,9 +1244,7 @@ void __init mp_parse_prt (void) 2.12 * Parsing through the PCI Interrupt Routing Table (PRT) and program 2.13 * routing for all entries. 2.14 */ 2.15 - list_for_each(node, &acpi_prt.entries) { 2.16 - entry = list_entry(node, struct acpi_prt_entry, node); 2.17 - 2.18 + list_for_each_entry(entry, &acpi_prt.entries, node) { 2.19 /* Need to get irq for dynamic entry */ 2.20 if (entry->link.handle) { 2.21 irq = acpi_pci_link_get_irq(entry->link.handle, entry->link.index, &edge_level, &active_high_low);
3.1 --- a/xen/arch/x86/pci-pc.c Tue Jan 25 14:05:02 2005 +0000 3.2 +++ b/xen/arch/x86/pci-pc.c Tue Jan 25 14:05:40 2005 +0000 3.3 @@ -1372,11 +1372,9 @@ void __devinit pcibios_fixup_bus(struct 3.4 3.5 struct pci_bus * __devinit pcibios_scan_root(int busnum) 3.6 { 3.7 - struct list_head *list; 3.8 struct pci_bus *bus; 3.9 3.10 - list_for_each(list, &pci_root_buses) { 3.11 - bus = pci_bus_b(list); 3.12 + pci_for_each_bus(bus) { 3.13 if (bus->number == busnum) { 3.14 /* Already scanned */ 3.15 return bus;
4.1 --- a/xen/arch/x86/pdb-stub.c Tue Jan 25 14:05:02 2005 +0000 4.2 +++ b/xen/arch/x86/pdb-stub.c Tue Jan 25 14:05:40 2005 +0000 4.3 @@ -778,12 +778,10 @@ void pdb_bkpt_add (unsigned long cr3, un 4.4 struct pdb_breakpoint* pdb_bkpt_search (unsigned long cr3, 4.5 unsigned long address) 4.6 { 4.7 - struct list_head *list_entry; 4.8 struct pdb_breakpoint *bkpt; 4.9 4.10 - list_for_each(list_entry, &breakpoints.list) 4.11 + list_for_each_entry ( bkpt, &breakpoints.list, list ) 4.12 { 4.13 - bkpt = list_entry(list_entry, struct pdb_breakpoint, list); 4.14 if ( bkpt->cr3 == cr3 && bkpt->address == address ) 4.15 return bkpt; 4.16 } 4.17 @@ -797,12 +795,10 @@ struct pdb_breakpoint* pdb_bkpt_search ( 4.18 */ 4.19 int pdb_bkpt_remove (unsigned long cr3, unsigned long address) 4.20 { 4.21 - struct list_head *list_entry; 4.22 struct pdb_breakpoint *bkpt; 4.23 4.24 - list_for_each(list_entry, &breakpoints.list) 4.25 + list_for_each_entry ( bkpt, &breakpoints.list, list ) 4.26 { 4.27 - bkpt = list_entry(list_entry, struct pdb_breakpoint, list); 4.28 if ( bkpt->cr3 == cr3 && bkpt->address == address ) 4.29 { 4.30 list_del(&bkpt->list);
5.1 --- a/xen/common/physdev.c Tue Jan 25 14:05:02 2005 +0000 5.2 +++ b/xen/common/physdev.c Tue Jan 25 14:05:40 2005 +0000 5.3 @@ -73,11 +73,9 @@ typedef struct _phys_dev_st { 5.4 static phys_dev_t *find_pdev(struct domain *p, struct pci_dev *dev) 5.5 { 5.6 phys_dev_t *t, *res = NULL; 5.7 - struct list_head *tmp; 5.8 5.9 - list_for_each(tmp, &p->pcidev_list) 5.10 + list_for_each_entry ( t, &p->pcidev_list, node ) 5.11 { 5.12 - t = list_entry(tmp, phys_dev_t, node); 5.13 if ( dev == t->dev ) 5.14 { 5.15 res = t; 5.16 @@ -149,9 +147,9 @@ int physdev_pci_access_modify( 5.17 5.18 /* Make the domain privileged. */ 5.19 set_bit(DF_PHYSDEV, &p->flags); 5.20 - /* FIXME: MAW for now make the domain REALLY privileged so that it 5.21 - * can run a backend driver (hw access should work OK otherwise) */ 5.22 - set_bit(DF_PRIVILEGED, &p->flags); 5.23 + /* FIXME: MAW for now make the domain REALLY privileged so that it 5.24 + * can run a backend driver (hw access should work OK otherwise) */ 5.25 + set_bit(DF_PRIVILEGED, &p->flags); 5.26 5.27 /* Grant write access to the specified device. */ 5.28 if ( (pdev = pci_find_slot(bus, PCI_DEVFN(dev, func))) == NULL ) 5.29 @@ -214,17 +212,16 @@ int physdev_pci_access_modify( 5.30 int domain_iomem_in_pfn(struct domain *p, unsigned long pfn) 5.31 { 5.32 int ret = 0; 5.33 - struct list_head *l; 5.34 + phys_dev_t *phys_dev; 5.35 5.36 VERBOSE_INFO("Checking if physdev-capable domain %u needs access to " 5.37 "pfn %08lx\n", p->id, pfn); 5.38 5.39 spin_lock(&p->pcidev_lock); 5.40 5.41 - list_for_each(l, &p->pcidev_list) 5.42 + list_for_each_entry ( phys_dev, &p->pcidev_list, node ) 5.43 { 5.44 int i; 5.45 - phys_dev_t *phys_dev = list_entry(l, phys_dev_t, node); 5.46 struct pci_dev *pci_dev = phys_dev->dev; 5.47 5.48 for ( i = 0; (i < DEVICE_COUNT_RESOURCE) && (ret == 0); i++ ) 5.49 @@ -619,15 +616,11 @@ static long pci_cfgreg_write(int bus, in 5.50 static long pci_probe_root_buses(u32 *busmask) 5.51 { 5.52 phys_dev_t *pdev; 5.53 - struct list_head *tmp; 5.54 5.55 memset(busmask, 0, 256/8); 5.56 5.57 - list_for_each ( tmp, ¤t->pcidev_list ) 5.58 - { 5.59 - pdev = list_entry(tmp, phys_dev_t, node); 5.60 + list_for_each_entry ( pdev, ¤t->pcidev_list, node ) 5.61 set_bit(pdev->dev->bus->number, busmask); 5.62 - } 5.63 5.64 return 0; 5.65 }
6.1 --- a/xen/common/sched_atropos.c Tue Jan 25 14:05:02 2005 +0000 6.2 +++ b/xen/common/sched_atropos.c Tue Jan 25 14:05:40 2005 +0000 6.3 @@ -1,6 +1,6 @@ 6.4 /* 6.5 - * atropos.c 6.6 - * --------- 6.7 + * atropos.c 6.8 + * --------- 6.9 * 6.10 * Copyright (c) 1994 University of Cambridge Computer Laboratory. 6.11 * This is part of Nemesis; consult your contract for terms and conditions. 6.12 @@ -98,8 +98,9 @@ static inline int __task_on_runqueue(str 6.13 static int q_len(struct list_head *q) 6.14 { 6.15 int i = 0; 6.16 - struct list_head *tmp; 6.17 - list_for_each(tmp, q) i++; 6.18 + struct at_dom_info *tmp; 6.19 + list_for_each_entry ( tmp, q, waitq ) 6.20 + i++; 6.21 return i; 6.22 } 6.23 6.24 @@ -129,60 +130,39 @@ static inline struct domain *waitq_el(st 6.25 */ 6.26 static void requeue(struct domain *sdom) 6.27 { 6.28 - struct at_dom_info *inf = DOM_INFO(sdom); 6.29 - struct list_head *prev; 6.30 - struct list_head *next; 6.31 + struct at_dom_info *i, *inf = DOM_INFO(sdom); 6.32 6.33 - 6.34 - if(!domain_runnable(sdom)) return; 6.35 + if ( !domain_runnable(sdom) ) 6.36 + return; 6.37 6.38 - if(inf->state == ATROPOS_TASK_WAIT || 6.39 - inf->state == ATROPOS_TASK_UNBLOCKED) 6.40 + if ( (inf->state == ATROPOS_TASK_WAIT) || 6.41 + (inf->state == ATROPOS_TASK_UNBLOCKED) ) 6.42 { 6.43 - prev = WAITQ(sdom->processor); 6.44 - 6.45 - list_for_each(next, WAITQ(sdom->processor)) 6.46 + list_for_each_entry ( i, WAITQ(sdom->processor), waitq ) 6.47 { 6.48 - struct at_dom_info *i = 6.49 - list_entry(next, struct at_dom_info, waitq); 6.50 if ( i->deadline > inf->deadline ) 6.51 { 6.52 - __list_add(&inf->waitq, prev, next); 6.53 + __list_add(&inf->waitq, i->waitq.prev, &i->waitq); 6.54 break; 6.55 } 6.56 - 6.57 - prev = next; 6.58 } 6.59 6.60 - /* put the domain on the end of the list if it hasn't been put 6.61 - * elsewhere */ 6.62 - if ( next == WAITQ(sdom->processor) ) 6.63 + if ( &i->waitq == WAITQ(sdom->processor) ) 6.64 list_add_tail(&inf->waitq, WAITQ(sdom->processor)); 6.65 } 6.66 else if ( domain_runnable(sdom) ) 6.67 { 6.68 - /* insert into ordered run queue */ 6.69 - 6.70 - prev = RUNQ(sdom->processor); 6.71 - 6.72 - list_for_each(next, RUNQ(sdom->processor)) 6.73 + list_for_each_entry ( i, RUNQ(sdom->processor), run_list ) 6.74 { 6.75 - struct at_dom_info *p = list_entry(next, struct at_dom_info, 6.76 - run_list); 6.77 - 6.78 - if( p->deadline > inf->deadline || is_idle_task(p->owner) ) 6.79 + if ( (i->deadline > inf->deadline) || is_idle_task(i->owner) ) 6.80 { 6.81 - __list_add(&inf->run_list, prev, next); 6.82 + __list_add(&inf->run_list, i->run_list.prev, &i->run_list); 6.83 break; 6.84 } 6.85 - 6.86 - prev = next; 6.87 } 6.88 6.89 - if ( next == RUNQ(sdom->processor) ) 6.90 + if ( &i->waitq == RUNQ(sdom->processor) ) 6.91 list_add_tail(&inf->run_list, RUNQ(sdom->processor)); 6.92 - 6.93 - 6.94 } 6.95 /* silently ignore tasks in other states like BLOCKED, DYING, STOPPED, etc 6.96 * - they shouldn't be on any queue */ 6.97 @@ -194,7 +174,7 @@ static int at_alloc_task(struct domain * 6.98 ASSERT(p != NULL); 6.99 6.100 p->sched_priv = xmem_cache_alloc(dom_info_cache); 6.101 - if( p->sched_priv == NULL ) 6.102 + if ( p->sched_priv == NULL ) 6.103 return -1; 6.104 6.105 return 0; 6.106 @@ -294,26 +274,26 @@ static void unblock(struct domain *sdom) 6.107 { 6.108 /* Long blocking case */ 6.109 6.110 - /* The sdom has passed its deadline since it was blocked. 6.111 - Give it its new deadline based on the latency value. */ 6.112 - inf->prevddln = time; 6.113 + /* The sdom has passed its deadline since it was blocked. 6.114 + Give it its new deadline based on the latency value. */ 6.115 + inf->prevddln = time; 6.116 6.117 /* Scale the scheduling parameters as requested by the latency hint. */ 6.118 - inf->deadline = time + inf->latency; 6.119 + inf->deadline = time + inf->latency; 6.120 inf->slice = inf->nat_slice / ( inf->nat_period / inf->latency ); 6.121 inf->period = inf->latency; 6.122 - inf->remain = inf->slice; 6.123 + inf->remain = inf->slice; 6.124 } 6.125 else 6.126 { 6.127 /* Short blocking case */ 6.128 6.129 - /* We leave REMAIN intact, but put this domain on the WAIT 6.130 - queue marked as recently unblocked. It will be given 6.131 - priority over other domains on the wait queue until while 6.132 - REMAIN>0 in a generous attempt to help it make up for its 6.133 - own foolishness. */ 6.134 - if(inf->remain > 0) 6.135 + /* We leave REMAIN intact, but put this domain on the WAIT 6.136 + queue marked as recently unblocked. It will be given 6.137 + priority over other domains on the wait queue until while 6.138 + REMAIN>0 in a generous attempt to help it make up for its 6.139 + own foolishness. */ 6.140 + if(inf->remain > 0) 6.141 inf->state = ATROPOS_TASK_UNBLOCKED; 6.142 else 6.143 inf->state = ATROPOS_TASK_WAIT; 6.144 @@ -349,10 +329,10 @@ static void block(struct domain* sdom) 6.145 */ 6.146 task_slice_t ksched_scheduler(s_time_t time) 6.147 { 6.148 - struct domain *cur_sdom = current; /* Current sdom */ 6.149 - s_time_t newtime; 6.150 - s_time_t ranfor; /* How long the domain ran */ 6.151 - struct domain *sdom; /* tmp. scheduling domain */ 6.152 + struct domain *cur_sdom = current; /* Current sdom */ 6.153 + s_time_t newtime; 6.154 + s_time_t ranfor; /* How long the domain ran */ 6.155 + struct domain *sdom; /* tmp. scheduling domain */ 6.156 int cpu = cur_sdom->processor; /* current CPU */ 6.157 struct at_dom_info *cur_info; 6.158 static unsigned long waitq_rrobin = 0; 6.159 @@ -367,7 +347,7 @@ task_slice_t ksched_scheduler(s_time_t t 6.160 /* If we were spinning in the idle loop, there is no current 6.161 * domain to deschedule. */ 6.162 if (is_idle_task(cur_sdom)) 6.163 - goto deschedule_done; 6.164 + goto deschedule_done; 6.165 6.166 /***************************** 6.167 * 6.168 @@ -375,7 +355,7 @@ task_slice_t ksched_scheduler(s_time_t t 6.169 * 6.170 ****************************/ 6.171 6.172 - /* Record the time the domain was preempted and for how long it 6.173 + /* Record the time the domain was preempted and for how long it 6.174 ran. Work out if the domain is going to be blocked to save 6.175 some pointless queue shuffling */ 6.176 cur_sdom->lastdeschd = time; 6.177 @@ -388,26 +368,26 @@ task_slice_t ksched_scheduler(s_time_t t 6.178 (cur_info->state == ATROPOS_TASK_UNBLOCKED) ) 6.179 { 6.180 6.181 - /* In this block, we are doing accounting for an sdom which has 6.182 - been running in contracted time. Note that this could now happen 6.183 - even if the domain is on the wait queue (i.e. if it blocked) */ 6.184 + /* In this block, we are doing accounting for an sdom which has 6.185 + been running in contracted time. Note that this could now happen 6.186 + even if the domain is on the wait queue (i.e. if it blocked) */ 6.187 6.188 - /* Deduct guaranteed time from the domain */ 6.189 - cur_info->remain -= ranfor; 6.190 + /* Deduct guaranteed time from the domain */ 6.191 + cur_info->remain -= ranfor; 6.192 6.193 - /* If guaranteed time has run out... */ 6.194 - if ( cur_info->remain <= 0 ) 6.195 + /* If guaranteed time has run out... */ 6.196 + if ( cur_info->remain <= 0 ) 6.197 { 6.198 - /* Move domain to correct position in WAIT queue */ 6.199 + /* Move domain to correct position in WAIT queue */ 6.200 /* XXX sdom_unblocked doesn't need this since it is 6.201 already in the correct place. */ 6.202 - cur_info->state = ATROPOS_TASK_WAIT; 6.203 - } 6.204 + cur_info->state = ATROPOS_TASK_WAIT; 6.205 + } 6.206 } 6.207 6.208 requeue(cur_sdom); 6.209 6.210 -deschedule_done: 6.211 + deschedule_done: 6.212 /***************************** 6.213 * 6.214 * We have now successfully descheduled the current sdom. 6.215 @@ -424,10 +404,10 @@ deschedule_done: 6.216 ****************************/ 6.217 6.218 while(!list_empty(WAITQ(cpu)) && 6.219 - DOM_INFO(sdom = waitq_el(WAITQ(cpu)->next))->deadline <= time ) 6.220 + DOM_INFO(sdom = waitq_el(WAITQ(cpu)->next))->deadline <= time ) 6.221 { 6.222 6.223 - struct at_dom_info *inf = DOM_INFO(sdom); 6.224 + struct at_dom_info *inf = DOM_INFO(sdom); 6.225 dequeue(sdom); 6.226 6.227 if ( inf->period != inf->nat_period ) 6.228 @@ -444,22 +424,22 @@ deschedule_done: 6.229 } 6.230 } 6.231 6.232 - /* Domain begins a new period and receives a slice of CPU 6.233 - * If this domain has been blocking then throw away the 6.234 - * rest of it's remain - it can't be trusted */ 6.235 - if (inf->remain > 0) 6.236 - inf->remain = inf->slice; 6.237 + /* Domain begins a new period and receives a slice of CPU 6.238 + * If this domain has been blocking then throw away the 6.239 + * rest of it's remain - it can't be trusted */ 6.240 + if (inf->remain > 0) 6.241 + inf->remain = inf->slice; 6.242 else 6.243 - inf->remain += inf->slice; 6.244 + inf->remain += inf->slice; 6.245 6.246 - inf->prevddln = inf->deadline; 6.247 - inf->deadline += inf->period; 6.248 + inf->prevddln = inf->deadline; 6.249 + inf->deadline += inf->period; 6.250 6.251 if ( inf->remain <= 0 ) 6.252 inf->state = ATROPOS_TASK_WAIT; 6.253 6.254 - /* Place on the appropriate queue */ 6.255 - requeue(sdom); 6.256 + /* Place on the appropriate queue */ 6.257 + requeue(sdom); 6.258 } 6.259 6.260 /***************************** 6.261 @@ -484,30 +464,27 @@ deschedule_done: 6.262 * queue */ 6.263 if (cur_sdom->id == IDLE_DOMAIN_ID && !list_empty(WAITQ(cpu))) 6.264 { 6.265 - struct list_head *item; 6.266 + struct at_dom_info *inf; 6.267 + 6.268 + /* Try running a domain on the WAIT queue - this part of the 6.269 + scheduler isn't particularly efficient but then again, we 6.270 + don't have any guaranteed domains to worry about. */ 6.271 6.272 - /* Try running a domain on the WAIT queue - this part of the 6.273 - scheduler isn't particularly efficient but then again, we 6.274 - don't have any guaranteed domains to worry about. */ 6.275 - 6.276 - /* See if there are any unblocked domains on the WAIT 6.277 - queue who we can give preferential treatment to. */ 6.278 + /* See if there are any unblocked domains on the WAIT 6.279 + queue who we can give preferential treatment to. */ 6.280 6.281 - list_for_each(item, WAITQ(cpu)) 6.282 + list_for_each_entry ( inf, WAITQ(cpu), waitq ) 6.283 { 6.284 - struct at_dom_info *inf = 6.285 - list_entry(item, struct at_dom_info, waitq); 6.286 - 6.287 sdom = inf->owner; 6.288 6.289 - if (inf->state == ATROPOS_TASK_UNBLOCKED) 6.290 + if (inf->state == ATROPOS_TASK_UNBLOCKED) 6.291 { 6.292 - cur_sdom = sdom; 6.293 - cur_info = inf; 6.294 - newtime = time + inf->remain; 6.295 - goto found; 6.296 - } 6.297 - } 6.298 + cur_sdom = sdom; 6.299 + cur_info = inf; 6.300 + newtime = time + inf->remain; 6.301 + goto found; 6.302 + } 6.303 + } 6.304 6.305 /* init values needed to approximate round-robin for slack time */ 6.306 i = 0; 6.307 @@ -515,14 +492,11 @@ deschedule_done: 6.308 waitq_rrobin = 0; 6.309 6.310 6.311 - /* Last chance: pick a domain on the wait queue with the XTRA 6.312 - flag set. The NEXT_OPTM field is used to cheaply achieve 6.313 - an approximation of round-robin order */ 6.314 - list_for_each(item, WAITQ(cpu)) 6.315 + /* Last chance: pick a domain on the wait queue with the XTRA 6.316 + flag set. The NEXT_OPTM field is used to cheaply achieve 6.317 + an approximation of round-robin order */ 6.318 + list_for_each_entry ( inf, WAITQ(cpu), waitq ) 6.319 { 6.320 - struct at_dom_info *inf = 6.321 - list_entry(item, struct at_dom_info, waitq); 6.322 - 6.323 sdom = inf->owner; 6.324 6.325 if (inf->xtratime && i >= waitq_rrobin) 6.326 @@ -538,7 +512,7 @@ deschedule_done: 6.327 } 6.328 } 6.329 6.330 - found: 6.331 + found: 6.332 /********************** 6.333 * 6.334 * We now have to work out the time when we next need to 6.335 @@ -554,7 +528,7 @@ deschedule_done: 6.336 /* exhausted its time, cut short the time allocation */ 6.337 if (!list_empty(WAITQ(cpu))) 6.338 { 6.339 - newtime = MIN(newtime, 6.340 + newtime = MIN(newtime, 6.341 DOM_INFO(waitq_el(WAITQ(cpu)->next))->deadline); 6.342 } 6.343 6.344 @@ -603,44 +577,44 @@ static void at_dump_runq_el(struct domai 6.345 /* dump relevant per-cpu state for a run queue dump */ 6.346 static void at_dump_cpu_state(int cpu) 6.347 { 6.348 - struct list_head *list, *queue; 6.349 + struct list_head *queue; 6.350 int loop = 0; 6.351 struct at_dom_info *d_inf; 6.352 struct domain *d; 6.353 6.354 queue = RUNQ(cpu); 6.355 printk("\nRUNQUEUE rq %lx n: %lx, p: %lx\n", (unsigned long)queue, 6.356 - (unsigned long) queue->next, (unsigned long) queue->prev); 6.357 + (unsigned long) queue->next, (unsigned long) queue->prev); 6.358 6.359 - list_for_each ( list, queue ) 6.360 + list_for_each_entry ( d_inf, queue, run_list ) 6.361 { 6.362 - d_inf = list_entry(list, struct at_dom_info, run_list); 6.363 d = d_inf->owner; 6.364 printk("%3d: %d has=%c ", loop++, d->id, 6.365 - test_bit(DF_RUNNING, &d->flags) ? 'T':'F'); 6.366 + test_bit(DF_RUNNING, &d->flags) ? 'T':'F'); 6.367 at_dump_runq_el(d); 6.368 printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time); 6.369 printk(" l: %lx n: %lx p: %lx\n", 6.370 - (unsigned long)list, (unsigned long)list->next, 6.371 - (unsigned long)list->prev); 6.372 + (unsigned long)&d_inf->run_list, 6.373 + (unsigned long)d_inf->run_list.next, 6.374 + (unsigned long)d_inf->run_list.prev); 6.375 } 6.376 6.377 6.378 queue = WAITQ(cpu); 6.379 printk("\nWAITQUEUE rq %lx n: %lx, p: %lx\n", (unsigned long)queue, 6.380 - (unsigned long) queue->next, (unsigned long) queue->prev); 6.381 + (unsigned long) queue->next, (unsigned long) queue->prev); 6.382 6.383 - list_for_each ( list, queue ) 6.384 + list_for_each_entry ( d_inf, queue, waitq ) 6.385 { 6.386 - d_inf = list_entry(list, struct at_dom_info, waitq); 6.387 d = d_inf->owner; 6.388 printk("%3d: %d has=%c ", loop++, d->id, 6.389 - test_bit(DF_RUNNING, &d->flags) ? 'T':'F'); 6.390 + test_bit(DF_RUNNING, &d->flags) ? 'T':'F'); 6.391 at_dump_runq_el(d); 6.392 printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time); 6.393 printk(" l: %lx n: %lx p: %lx\n", 6.394 - (unsigned long)list, (unsigned long)list->next, 6.395 - (unsigned long)list->prev); 6.396 + (unsigned long)&d_inf->waitq, 6.397 + (unsigned long)d_inf->waitq.next, 6.398 + (unsigned long)d_inf->waitq.prev); 6.399 } 6.400 6.401 }
7.1 --- a/xen/common/sched_bvt.c Tue Jan 25 14:05:02 2005 +0000 7.2 +++ b/xen/common/sched_bvt.c Tue Jan 25 14:05:40 2005 +0000 7.3 @@ -348,7 +348,6 @@ int bvt_adjdom( 7.4 static task_slice_t bvt_do_schedule(s_time_t now) 7.5 { 7.6 struct domain *prev = current, *next = NULL, *next_prime, *p; 7.7 - struct list_head *tmp; 7.8 int cpu = prev->processor; 7.9 s32 r_time; /* time for new dom to run */ 7.10 u32 next_evt, next_prime_evt, min_avt; 7.11 @@ -392,10 +391,8 @@ static task_slice_t bvt_do_schedule(s_ti 7.12 next_prime_evt = ~0U; 7.13 min_avt = ~0U; 7.14 7.15 - list_for_each ( tmp, RUNQUEUE(cpu) ) 7.16 + list_for_each_entry ( p_inf, RUNQUEUE(cpu), run_list ) 7.17 { 7.18 - p_inf = list_entry(tmp, struct bvt_dom_info, run_list); 7.19 - 7.20 if ( p_inf->evt < next_evt ) 7.21 { 7.22 next_prime_inf = next_inf; 7.23 @@ -505,7 +502,7 @@ static void bvt_dump_settings(void) 7.24 7.25 static void bvt_dump_cpu_state(int i) 7.26 { 7.27 - struct list_head *list, *queue; 7.28 + struct list_head *queue; 7.29 int loop = 0; 7.30 struct bvt_dom_info *d_inf; 7.31 struct domain *d; 7.32 @@ -516,17 +513,15 @@ static void bvt_dump_cpu_state(int i) 7.33 printk("QUEUE rq %lx n: %lx, p: %lx\n", (unsigned long)queue, 7.34 (unsigned long) queue->next, (unsigned long) queue->prev); 7.35 7.36 - list_for_each ( list, queue ) 7.37 + list_for_each_entry ( d_inf, queue, run_list ) 7.38 { 7.39 - d_inf = list_entry(list, struct bvt_dom_info, run_list); 7.40 d = d_inf->domain; 7.41 printk("%3d: %u has=%c ", loop++, d->id, 7.42 test_bit(DF_RUNNING, &d->flags) ? 'T':'F'); 7.43 bvt_dump_runq_el(d); 7.44 printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time); 7.45 - printk(" l: %lx n: %lx p: %lx\n", 7.46 - (unsigned long)list, (unsigned long)list->next, 7.47 - (unsigned long)list->prev); 7.48 + printk(" l: %p n: %p p: %p\n", 7.49 + &d_inf->run_list, d_inf->run_list.next, d_inf->run_list.prev); 7.50 } 7.51 } 7.52
8.1 --- a/xen/common/sched_rrobin.c Tue Jan 25 14:05:02 2005 +0000 8.2 +++ b/xen/common/sched_rrobin.c Tue Jan 25 14:05:40 2005 +0000 8.3 @@ -187,7 +187,7 @@ static void rr_dump_domain(struct domain 8.4 8.5 static void rr_dump_cpu_state(int i) 8.6 { 8.7 - struct list_head *list, *queue; 8.8 + struct list_head *queue; 8.9 int loop = 0; 8.10 struct rrobin_dom_info *d_inf; 8.11 8.12 @@ -199,10 +199,9 @@ static void rr_dump_cpu_state(int i) 8.13 d_inf = list_entry(queue, struct rrobin_dom_info, run_list); 8.14 rr_dump_domain(d_inf->domain); 8.15 8.16 - list_for_each ( list, queue ) 8.17 + list_for_each_entry ( d_inf, queue, run_list ) 8.18 { 8.19 printk("%3d: ",loop++); 8.20 - d_inf = list_entry(list, struct rrobin_dom_info, run_list); 8.21 rr_dump_domain(d_inf->domain); 8.22 } 8.23 }
9.1 --- a/xen/common/slab.c Tue Jan 25 14:05:02 2005 +0000 9.2 +++ b/xen/common/slab.c Tue Jan 25 14:05:40 2005 +0000 9.3 @@ -774,11 +774,9 @@ xmem_cache_create (const char *name, siz 9.4 /* Need the semaphore to access the chain. */ 9.5 down(&cache_chain_sem); 9.6 { 9.7 - struct list_head *p; 9.8 + xmem_cache_t *pc; 9.9 9.10 - list_for_each(p, &cache_chain) { 9.11 - xmem_cache_t *pc = list_entry(p, xmem_cache_t, next); 9.12 - 9.13 + list_for_each_entry(pc, &cache_chain, next) { 9.14 /* The name field is constant - no lock needed. */ 9.15 if (!strcmp(pc->name, name)) 9.16 BUG(); 9.17 @@ -802,14 +800,14 @@ xmem_cache_create (const char *name, siz 9.18 */ 9.19 static int is_chained_xmem_cache(xmem_cache_t * cachep) 9.20 { 9.21 - struct list_head *p; 9.22 + xmem_cache_t *pc; 9.23 int ret = 0; 9.24 unsigned long spin_flags; 9.25 9.26 /* Find the cache in the chain of caches. */ 9.27 down(&cache_chain_sem); 9.28 - list_for_each(p, &cache_chain) { 9.29 - if (p == &cachep->next) { 9.30 + list_for_each_entry(pc, &cache_chain, next) { 9.31 + if (pc == &cachep) { 9.32 ret = 1; 9.33 break; 9.34 } 9.35 @@ -1765,7 +1763,6 @@ void dump_slabinfo() 9.36 p = &cache_cache.next; 9.37 do { 9.38 xmem_cache_t *cachep; 9.39 - struct list_head *q; 9.40 slab_t *slabp; 9.41 unsigned long active_objs; 9.42 unsigned long num_objs; 9.43 @@ -1776,22 +1773,19 @@ void dump_slabinfo() 9.44 spin_lock_irq(&cachep->spinlock); 9.45 active_objs = 0; 9.46 num_slabs = 0; 9.47 - list_for_each(q,&cachep->slabs_full) { 9.48 - slabp = list_entry(q, slab_t, list); 9.49 + list_for_each_entry(slabp, &cachep->slabs_full, list) { 9.50 if (slabp->inuse != cachep->num) 9.51 BUG(); 9.52 active_objs += cachep->num; 9.53 active_slabs++; 9.54 } 9.55 - list_for_each(q,&cachep->slabs_partial) { 9.56 - slabp = list_entry(q, slab_t, list); 9.57 + list_for_each_entry(slabp, &cachep->slabs_partial, list) { 9.58 if (slabp->inuse == cachep->num || !slabp->inuse) 9.59 BUG(); 9.60 active_objs += slabp->inuse; 9.61 active_slabs++; 9.62 } 9.63 - list_for_each(q,&cachep->slabs_free) { 9.64 - slabp = list_entry(q, slab_t, list); 9.65 + list_for_each_entry(slabp, &cachep->slabs_free, list) { 9.66 if (slabp->inuse) 9.67 BUG(); 9.68 num_slabs++;
10.1 --- a/xen/drivers/pci/pci.c Tue Jan 25 14:05:02 2005 +0000 10.2 +++ b/xen/drivers/pci/pci.c Tue Jan 25 14:05:40 2005 +0000 10.3 @@ -1565,15 +1565,15 @@ static int pci_pm_resume_device(struct p 10.4 10.5 static int pci_pm_save_state_bus(struct pci_bus *bus, u32 state) 10.6 { 10.7 - struct list_head *list; 10.8 + struct pci_bus *i; 10.9 int error = 0; 10.10 10.11 - list_for_each(list, &bus->children) { 10.12 - error = pci_pm_save_state_bus(pci_bus_b(list),state); 10.13 + list_for_each_entry(i, &bus->children, node) { 10.14 + error = pci_pm_save_state_bus(i, state); 10.15 if (error) return error; 10.16 } 10.17 - list_for_each(list, &bus->devices) { 10.18 - error = pci_pm_save_state_device(pci_dev_b(list),state); 10.19 + list_for_each_entry(i, &bus->devices, node) { 10.20 + error = pci_pm_save_state_device(i, state); 10.21 if (error) return error; 10.22 } 10.23 return 0; 10.24 @@ -1581,40 +1581,38 @@ static int pci_pm_save_state_bus(struct 10.25 10.26 static int pci_pm_suspend_bus(struct pci_bus *bus, u32 state) 10.27 { 10.28 - struct list_head *list; 10.29 + struct pci_bus *i; 10.30 10.31 /* Walk the bus children list */ 10.32 - list_for_each(list, &bus->children) 10.33 - pci_pm_suspend_bus(pci_bus_b(list),state); 10.34 + list_for_each_entry(i, &bus->children, node) 10.35 + pci_pm_suspend_bus(i, state); 10.36 10.37 /* Walk the device children list */ 10.38 - list_for_each(list, &bus->devices) 10.39 - pci_pm_suspend_device(pci_dev_b(list),state); 10.40 + list_for_each_entry(i, &bus->devices, node) 10.41 + pci_pm_suspend_device(i, state); 10.42 return 0; 10.43 } 10.44 10.45 static int pci_pm_resume_bus(struct pci_bus *bus) 10.46 { 10.47 - struct list_head *list; 10.48 + struct pci_bus *i; 10.49 10.50 /* Walk the device children list */ 10.51 - list_for_each(list, &bus->devices) 10.52 - pci_pm_resume_device(pci_dev_b(list)); 10.53 + list_for_each_entry(i, &bus->devices, node) 10.54 + pci_pm_resume_device(i); 10.55 10.56 /* And then walk the bus children */ 10.57 - list_for_each(list, &bus->children) 10.58 - pci_pm_resume_bus(pci_bus_b(list)); 10.59 + list_for_each_entry(i, &bus->children, node) 10.60 + pci_pm_resume_bus(i); 10.61 return 0; 10.62 } 10.63 10.64 static int pci_pm_save_state(u32 state) 10.65 { 10.66 - struct list_head *list; 10.67 struct pci_bus *bus; 10.68 int error = 0; 10.69 10.70 - list_for_each(list, &pci_root_buses) { 10.71 - bus = pci_bus_b(list); 10.72 + list_for_each_entry(bus, &pci_root_buses, node) { 10.73 error = pci_pm_save_state_bus(bus,state); 10.74 if (!error) 10.75 error = pci_pm_save_state_device(bus->self,state); 10.76 @@ -1624,11 +1622,9 @@ static int pci_pm_save_state(u32 state) 10.77 10.78 static int pci_pm_suspend(u32 state) 10.79 { 10.80 - struct list_head *list; 10.81 struct pci_bus *bus; 10.82 10.83 - list_for_each(list, &pci_root_buses) { 10.84 - bus = pci_bus_b(list); 10.85 + list_for_each_entry(bus, &pci_root_buses, node) { 10.86 pci_pm_suspend_bus(bus,state); 10.87 pci_pm_suspend_device(bus->self,state); 10.88 } 10.89 @@ -1637,11 +1633,9 @@ static int pci_pm_suspend(u32 state) 10.90 10.91 int pci_pm_resume(void) 10.92 { 10.93 - struct list_head *list; 10.94 struct pci_bus *bus; 10.95 10.96 - list_for_each(list, &pci_root_buses) { 10.97 - bus = pci_bus_b(list); 10.98 + list_for_each_entry(bus, &pci_root_buses, node) { 10.99 pci_pm_resume_device(bus->self); 10.100 pci_pm_resume_bus(bus); 10.101 }
11.1 --- a/xen/include/xen/list.h Tue Jan 25 14:05:02 2005 +0000 11.2 +++ b/xen/include/xen/list.h Tue Jan 25 14:05:40 2005 +0000 11.3 @@ -161,8 +161,6 @@ static __inline__ void list_splice(struc 11.4 for (pos = (head)->next, n = pos->next; pos != (head); \ 11.5 pos = n, n = pos->next) 11.6 11.7 -#endif 11.8 - 11.9 /** 11.10 * list_for_each_entry - iterate over list of given type 11.11 * @pos: the type * to use as a loop counter. 11.12 @@ -175,3 +173,6 @@ static __inline__ void list_splice(struc 11.13 &pos->member != (head); \ 11.14 pos = list_entry(pos->member.next, typeof(*pos), member), \ 11.15 prefetch(pos->member.next)) 11.16 + 11.17 +#endif /* _LINUX_LIST_H */ 11.18 +
12.1 --- a/xen/include/xen/pci.h Tue Jan 25 14:05:02 2005 +0000 12.2 +++ b/xen/include/xen/pci.h Tue Jan 25 14:05:40 2005 +0000 12.3 @@ -358,7 +358,7 @@ enum pci_mmap_state { 12.4 for(dev = pci_dev_g(pci_devices.prev); dev != pci_dev_g(&pci_devices); dev = pci_dev_g(dev->global_list.prev)) 12.5 12.6 #define pci_for_each_bus(bus) \ 12.7 -for(bus = pci_bus_b(pci_root_buses.next); bus != pci_bus_b(&pci_root_buses); bus = pci_bus_b(bus->node.next)) 12.8 + list_for_each_entry(bus, &pci_root_buses, node) 12.9 12.10 /* 12.11 * The pci_dev structure is used to describe both PCI and ISAPnP devices.