[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] list.h minor clean



Hi all,

        When I added (the typesafe) list_for_each_entry() to Linux's list.h I
considered just calling it list_for_each(), and replacing all the
callers, but the size of the task defeated me.  Xen, however, is
smaller.

You may think this patch sucks, but I had to try.  Boot tested.

Rusty.
PS.  I haven't seen a discussion on codingstyle: some of these are
definitely not Linux-style...

diff -urpN --exclude TAGS -X 
/home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal 
xen-unstable-base/xen/arch/x86/domain.c xen-unstable/xen/arch/x86/domain.c
--- xen-unstable-base/xen/arch/x86/domain.c     2005-01-23 15:46:09.000000000 
+1100
+++ xen-unstable/xen/arch/x86/domain.c  2005-01-24 11:52:40.000000000 +1100
@@ -206,13 +206,11 @@ void machine_halt(void)
 void dump_pageframe_info(struct domain *d)
 {
     struct pfn_info *page;
-    struct list_head *ent;
 
     if ( d->tot_pages < 10 )
     {
-        list_for_each ( ent, &d->page_list )
+        list_for_each ( page, &d->page_list, list )
         {
-            page = list_entry(ent, struct pfn_info, list);
             printk("Page %08x: caf=%08x, taf=%08x\n",
                    page_to_phys(page), page->count_info,
                    page->u.inuse.type_info);
diff -urpN --exclude TAGS -X 
/home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal 
xen-unstable-base/xen/arch/x86/mpparse.c xen-unstable/xen/arch/x86/mpparse.c
--- xen-unstable-base/xen/arch/x86/mpparse.c    2005-01-23 15:46:14.000000000 
+1100
+++ xen-unstable/xen/arch/x86/mpparse.c 2005-01-24 11:53:12.000000000 +1100
@@ -1232,7 +1232,6 @@ void __init mp_config_acpi_legacy_irqs (
 
 void __init mp_parse_prt (void)
 {
-       struct list_head        *node = NULL;
        struct acpi_prt_entry   *entry = NULL;
        int                     ioapic = -1;
        int                     ioapic_pin = 0;
@@ -1245,9 +1244,7 @@ void __init mp_parse_prt (void)
         * Parsing through the PCI Interrupt Routing Table (PRT) and program
         * routing for all entries.
         */
-       list_for_each(node, &acpi_prt.entries) {
-               entry = list_entry(node, struct acpi_prt_entry, node);
-
+       list_for_each(entry, &acpi_prt.entries, node) {
                /* Need to get irq for dynamic entry */
                if (entry->link.handle) {
                        irq = acpi_pci_link_get_irq(entry->link.handle, 
entry->link.index, &edge_level, &active_high_low);
diff -urpN --exclude TAGS -X 
/home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal 
xen-unstable-base/xen/arch/x86/pci-pc.c xen-unstable/xen/arch/x86/pci-pc.c
--- xen-unstable-base/xen/arch/x86/pci-pc.c     2005-01-23 15:46:15.000000000 
+1100
+++ xen-unstable/xen/arch/x86/pci-pc.c  2005-01-24 12:59:26.000000000 +1100
@@ -1372,11 +1372,9 @@ void __devinit  pcibios_fixup_bus(struct
 
 struct pci_bus * __devinit pcibios_scan_root(int busnum)
 {
-       struct list_head *list;
        struct pci_bus *bus;
 
-       list_for_each(list, &pci_root_buses) {
-               bus = pci_bus_b(list);
+       pci_for_each_bus(bus) {
                if (bus->number == busnum) {
                        /* Already scanned */
                        return bus;
diff -urpN --exclude TAGS -X 
/home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal 
xen-unstable-base/xen/arch/x86/pdb-stub.c xen-unstable/xen/arch/x86/pdb-stub.c
--- xen-unstable-base/xen/arch/x86/pdb-stub.c   2005-01-23 15:46:19.000000000 
+1100
+++ xen-unstable/xen/arch/x86/pdb-stub.c        2005-01-24 11:56:28.000000000 
+1100
@@ -778,12 +778,10 @@ void pdb_bkpt_add (unsigned long cr3, un
 struct pdb_breakpoint* pdb_bkpt_search (unsigned long cr3, 
                                        unsigned long address)
 {
-    struct list_head *list_entry;
     struct pdb_breakpoint *bkpt;
 
-    list_for_each(list_entry, &breakpoints.list)
+    list_for_each(bkpt, &breakpoints.list, list)
     {
-        bkpt = list_entry(list_entry, struct pdb_breakpoint, list);
        if ( bkpt->cr3 == cr3 && bkpt->address == address )
             return bkpt;
     }
@@ -798,11 +796,9 @@ struct pdb_breakpoint* pdb_bkpt_search (
 int pdb_bkpt_remove (unsigned long cr3, unsigned long address)
 {
     struct list_head *list_entry;
-    struct pdb_breakpoint *bkpt;
 
-    list_for_each(list_entry, &breakpoints.list)
+    list_for_each(bkpt, &breakpoints.list, list)
     {
-        bkpt = list_entry(list_entry, struct pdb_breakpoint, list);
        if ( bkpt->cr3 == cr3 && bkpt->address == address )
        {
             list_del(&bkpt->list);
diff -urpN --exclude TAGS -X 
/home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal 
xen-unstable-base/xen/common/physdev.c xen-unstable/xen/common/physdev.c
--- xen-unstable-base/xen/common/physdev.c      2005-01-23 15:46:04.000000000 
+1100
+++ xen-unstable/xen/common/physdev.c   2005-01-24 12:19:18.000000000 +1100
@@ -73,11 +73,9 @@ typedef struct _phys_dev_st {
 static phys_dev_t *find_pdev(struct domain *p, struct pci_dev *dev)
 {
     phys_dev_t *t, *res = NULL;
-    struct list_head *tmp;
 
-    list_for_each(tmp, &p->pcidev_list)
+    list_for_each(t, &p->pcidev_list, node)
     {
-        t = list_entry(tmp,  phys_dev_t, node);
         if ( dev == t->dev )
         {
             res = t;
@@ -230,17 +228,16 @@ int physdev_pci_access_modify(
 int domain_iomem_in_pfn(struct domain *p, unsigned long pfn)
 {
     int ret = 0;
-    struct list_head *l;
+       phys_dev_t *phys_dev;
 
     VERBOSE_INFO("Checking if physdev-capable domain %u needs access to "
                  "pfn %08lx\n", p->id, pfn);
     
     spin_lock(&p->pcidev_lock);
 
-    list_for_each(l, &p->pcidev_list)
+    list_for_each(phys_dev, &p->pcidev_list, node)
     {
         int i;
-        phys_dev_t *phys_dev = list_entry(l, phys_dev_t, node);
         struct pci_dev *pci_dev = phys_dev->dev;
 
         for ( i = 0; (i < DEVICE_COUNT_RESOURCE) && (ret == 0); i++ )
@@ -635,13 +632,11 @@ static long pci_cfgreg_write(int bus, in
 static long pci_probe_root_buses(u32 *busmask)
 {
     phys_dev_t *pdev;
-    struct list_head *tmp;
 
     memset(busmask, 0, 256/8);
 
-    list_for_each ( tmp, &current->domain->pcidev_list )
+    list_for_each ( pdev, &current->domain->pcidev_list, node )
     {
-        pdev = list_entry(tmp, phys_dev_t, node);
         set_bit(pdev->dev->bus->number, busmask);
     }
 
diff -urpN --exclude TAGS -X 
/home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal 
xen-unstable-base/xen/common/sched_atropos.c 
xen-unstable/xen/common/sched_atropos.c
--- xen-unstable-base/xen/common/sched_atropos.c        2005-01-23 
15:46:05.000000000 +1100
+++ xen-unstable/xen/common/sched_atropos.c     2005-01-24 12:11:57.000000000 
+1100
@@ -98,8 +98,8 @@ static inline int __task_on_runqueue(str
 static int q_len(struct list_head *q) 
 {
     int i = 0;
-    struct list_head *tmp;
-    list_for_each(tmp, q) i++;
+    struct at_dom_info *tmp;
+    list_for_each(tmp, q, waitq) i++;
     return i;
 }
 
@@ -129,9 +129,8 @@ static inline struct domain *waitq_el(st
  */
 static void requeue(struct domain *sdom)
 {
-    struct at_dom_info *inf = DOM_INFO(sdom);
+    struct at_dom_info *i, *inf = DOM_INFO(sdom);
     struct list_head *prev;
-    struct list_head *next;
 
 
     if(!domain_runnable(sdom)) return;
@@ -141,22 +140,20 @@ static void requeue(struct domain *sdom)
     {
         prev = WAITQ(sdom->processor);
 
-        list_for_each(next, WAITQ(sdom->processor))
+        list_for_each(i, WAITQ(sdom->processor), waitq)
         {
-            struct at_dom_info *i = 
-                list_entry(next, struct at_dom_info, waitq);
             if ( i->deadline > inf->deadline )
             {
-                __list_add(&inf->waitq, prev, next);
+                __list_add(&inf->waitq, prev, &i->waitq);
                 break;
             }
 
-            prev = next;
+            prev = &i->waitq;
         }
 
         /* put the domain on the end of the list if it hasn't been put
          * elsewhere */
-        if ( next == WAITQ(sdom->processor) )
+        if ( &i->waitq == WAITQ(sdom->processor) )
             list_add_tail(&inf->waitq, WAITQ(sdom->processor));
     }
     else if ( domain_runnable(sdom) )
@@ -165,21 +162,18 @@ static void requeue(struct domain *sdom)
         
         prev = RUNQ(sdom->processor);
 
-        list_for_each(next, RUNQ(sdom->processor))
+        list_for_each(i, RUNQ(sdom->processor), run_list)
         {
-            struct at_dom_info *p = list_entry(next, struct at_dom_info,
-                                               run_list);
-
-            if( p->deadline > inf->deadline || is_idle_task(p->owner) )
+            if( i->deadline > inf->deadline || is_idle_task(i->owner) )
             {
-                __list_add(&inf->run_list, prev, next);
+                __list_add(&inf->run_list, prev, &i->run_list);
                 break;
             }
 
             prev = next;
         }
 
-        if ( next == RUNQ(sdom->processor) )
+        if ( &i->waitq == RUNQ(sdom->processor) )
             list_add_tail(&inf->run_list, RUNQ(sdom->processor));
         
     
@@ -484,7 +478,7 @@ deschedule_done:
      * queue */
     if (cur_sdom->id == IDLE_DOMAIN_ID && !list_empty(WAITQ(cpu)))
     {
-        struct list_head *item;
+       struct at_dom_info *inf;
 
            /* Try running a domain on the WAIT queue - this part of the
                scheduler isn't particularly efficient but then again, we
@@ -493,11 +487,8 @@ deschedule_done:
            /* See if there are any unblocked domains on the WAIT
                queue who we can give preferential treatment to. */
         
-        list_for_each(item, WAITQ(cpu))
+        list_for_each(inf, WAITQ(cpu), waitq)
         {
-            struct at_dom_info *inf =
-                list_entry(item, struct at_dom_info, waitq);
-
             sdom = inf->owner;
             
                if (inf->state == ATROPOS_TASK_UNBLOCKED) 
@@ -518,11 +509,8 @@ deschedule_done:
            /* Last chance: pick a domain on the wait queue with the XTRA
                flag set.  The NEXT_OPTM field is used to cheaply achieve
                an approximation of round-robin order */
-        list_for_each(item, WAITQ(cpu))
+        list_for_each(inf, WAITQ(cpu), waitq)
         {
-            struct at_dom_info *inf =
-                list_entry(item, struct at_dom_info, waitq);
-            
             sdom = inf->owner;
             
             if (inf->xtratime && i >= waitq_rrobin) 
@@ -603,7 +591,7 @@ static void at_dump_runq_el(struct domai
 /* dump relevant per-cpu state for a run queue dump */
 static void at_dump_cpu_state(int cpu)
 {
-    struct list_head *list, *queue;
+    struct list_head *queue;
     int loop = 0;
     struct at_dom_info *d_inf;
     struct domain *d;
@@ -612,17 +600,17 @@ static void at_dump_cpu_state(int cpu)
     printk("\nRUNQUEUE rq %lx   n: %lx, p: %lx\n",  (unsigned long)queue,
     (unsigned long) queue->next, (unsigned long) queue->prev);
 
-    list_for_each ( list, queue )
+    list_for_each ( d_inf, queue, run_list )
     {
-        d_inf = list_entry(list, struct at_dom_info, run_list);
         d = d_inf->owner;
         printk("%3d: %d has=%c ", loop++, d->id, 
                                     test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
         at_dump_runq_el(d);
         printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
         printk("         l: %lx n: %lx  p: %lx\n",
-                        (unsigned long)list, (unsigned long)list->next,
-                        (unsigned long)list->prev);
+                        (unsigned long)&d_inf->list,
+                       (unsigned long)d_inf->list.next,
+                        (unsigned long)d_inf->list.prev);
     }
 
 
@@ -630,17 +618,17 @@ static void at_dump_cpu_state(int cpu)
     printk("\nWAITQUEUE rq %lx   n: %lx, p: %lx\n",  (unsigned long)queue,
     (unsigned long) queue->next, (unsigned long) queue->prev);
 
-    list_for_each ( list, queue )
+    list_for_each ( d_inf, queue, waitq )
     {
-        d_inf = list_entry(list, struct at_dom_info, waitq);
         d = d_inf->owner;
         printk("%3d: %d has=%c ", loop++, d->id, 
                                     test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
         at_dump_runq_el(d);
         printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
         printk("         l: %lx n: %lx  p: %lx\n",
-                        (unsigned long)list, (unsigned long)list->next,
-                        (unsigned long)list->prev);
+                        (unsigned long)&d_inf->list,
+                       (unsigned long)d_inf->list.next,
+                        (unsigned long)d_inf->list.prev);
     }
        
 }
diff -urpN --exclude TAGS -X 
/home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal 
xen-unstable-base/xen/common/sched_bvt.c xen-unstable/xen/common/sched_bvt.c
--- xen-unstable-base/xen/common/sched_bvt.c    2005-01-23 15:46:15.000000000 
+1100
+++ xen-unstable/xen/common/sched_bvt.c 2005-01-24 12:53:59.000000000 +1100
@@ -370,7 +370,6 @@ static task_slice_t bvt_do_schedule(s_ti
 {
     struct domain *d;
     struct exec_domain      *prev = current, *next = NULL, *next_prime, *ed; 
-    struct list_head   *tmp;
     int                 cpu = prev->processor;
     s32                 r_time;     /* time for new dom to run */
     u32                 next_evt, next_prime_evt, min_avt;
@@ -415,10 +414,8 @@ static task_slice_t bvt_do_schedule(s_ti
     next_prime_evt = ~0U;
     min_avt        = ~0U;
 
-    list_for_each ( tmp, RUNQUEUE(cpu) )
+    list_for_each ( p_einf, RUNQUEUE(cpu), run_list )
     {
-        p_einf = list_entry(tmp, struct bvt_edom_info, run_list);
-
         if ( p_einf->evt < next_evt )
         {
             next_prime_einf  = next_einf;
@@ -530,7 +527,7 @@ static void bvt_dump_settings(void)
 
 static void bvt_dump_cpu_state(int i)
 {
-    struct list_head *list, *queue;
+    struct list_head *queue;
     int loop = 0;
     struct bvt_edom_info *d_inf;
     struct exec_domain *d;
@@ -541,17 +538,17 @@ static void bvt_dump_cpu_state(int i)
     printk("QUEUE rq %lx   n: %lx, p: %lx\n",  (unsigned long)queue,
            (unsigned long) queue->next, (unsigned long) queue->prev);
 
-    list_for_each ( list, queue )
+    list_for_each ( d_inf, queue, run_list )
     {
-        d_inf = list_entry(list, struct bvt_edom_info, run_list);
         d = d_inf->exec_domain;
         printk("%3d: %u has=%c ", loop++, d->domain->id,
                test_bit(EDF_RUNNING, &d->ed_flags) ? 'T':'F');
         bvt_dump_runq_el(d);
         printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
         printk("         l: %lx n: %lx  p: %lx\n",
-               (unsigned long)list, (unsigned long)list->next,
-               (unsigned long)list->prev);
+               (unsigned long)&d_inf->run_list,
+                          (unsigned long)d_inf->run_list.next,
+               (unsigned long)d_inf->run_list.prev);
     }
 }
 
diff -urpN --exclude TAGS -X 
/home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal 
xen-unstable-base/xen/common/sched_rrobin.c 
xen-unstable/xen/common/sched_rrobin.c
--- xen-unstable-base/xen/common/sched_rrobin.c 2005-01-23 15:46:01.000000000 
+1100
+++ xen-unstable/xen/common/sched_rrobin.c      2005-01-24 11:57:22.000000000 
+1100
@@ -187,7 +187,7 @@ static void rr_dump_domain(struct domain
 
 static void rr_dump_cpu_state(int i)
 {
-    struct list_head *list, *queue;
+    struct list_head *queue;
     int loop = 0;
     struct rrobin_dom_info *d_inf;
 
@@ -199,10 +199,9 @@ static void rr_dump_cpu_state(int i)
     d_inf = list_entry(queue, struct rrobin_dom_info, run_list);
     rr_dump_domain(d_inf->domain);
  
-    list_for_each ( list, queue )
+    list_for_each ( d_inf, queue, run_list )
     {
         printk("%3d: ",loop++);
-        d_inf = list_entry(list, struct rrobin_dom_info, run_list);
         rr_dump_domain(d_inf->domain);
     }
 }
diff -urpN --exclude TAGS -X 
/home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal 
xen-unstable-base/xen/common/slab.c xen-unstable/xen/common/slab.c
--- xen-unstable-base/xen/common/slab.c 2005-01-23 15:46:05.000000000 +1100
+++ xen-unstable/xen/common/slab.c      2005-01-24 12:13:58.000000000 +1100
@@ -774,11 +774,9 @@ xmem_cache_create (const char *name, siz
     /* Need the semaphore to access the chain. */
     down(&cache_chain_sem);
     {
-        struct list_head *p;
-
-        list_for_each(p, &cache_chain) {
-            xmem_cache_t *pc = list_entry(p, xmem_cache_t, next);
+       xmem_cache_t *pc;
 
+        list_for_each(pc, &cache_chain, next) {
             /* The name field is constant - no lock needed. */
             if (!strcmp(pc->name, name))
                 BUG();
@@ -802,14 +800,14 @@ xmem_cache_create (const char *name, siz
  */
 static int is_chained_xmem_cache(xmem_cache_t * cachep)
 {
-    struct list_head *p;
+    xmem_cache_t *pc;
     int ret = 0;
     unsigned long spin_flags;
 
     /* Find the cache in the chain of caches. */
     down(&cache_chain_sem);
-    list_for_each(p, &cache_chain) {
-        if (p == &cachep->next) {
+    list_for_each(pc, &cache_chain, next) {
+        if (pc == &cachep) {
             ret = 1;
             break;
         }
@@ -1765,7 +1763,6 @@ void dump_slabinfo()
     p = &cache_cache.next;
     do {
         xmem_cache_t   *cachep;
-        struct list_head *q;
         slab_t         *slabp;
         unsigned long  active_objs;
         unsigned long  num_objs;
@@ -1776,22 +1773,19 @@ void dump_slabinfo()
         spin_lock_irq(&cachep->spinlock);
         active_objs = 0;
         num_slabs = 0;
-        list_for_each(q,&cachep->slabs_full) {
-            slabp = list_entry(q, slab_t, list);
+        list_for_each(slabp, &cachep->slabs_full, list) {
             if (slabp->inuse != cachep->num)
                 BUG();
             active_objs += cachep->num;
             active_slabs++;
         }
-        list_for_each(q,&cachep->slabs_partial) {
-            slabp = list_entry(q, slab_t, list);
+        list_for_each(slabp, &cachep->slabs_partial, list) {
             if (slabp->inuse == cachep->num || !slabp->inuse)
                 BUG();
             active_objs += slabp->inuse;
             active_slabs++;
         }
-        list_for_each(q,&cachep->slabs_free) {
-            slabp = list_entry(q, slab_t, list);
+        list_for_each(slabp, &cachep->slabs_free, list) {
             if (slabp->inuse)
                 BUG();
             num_slabs++;
diff -urpN --exclude TAGS -X 
/home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal 
xen-unstable-base/xen/drivers/pci/pci.c xen-unstable/xen/drivers/pci/pci.c
--- xen-unstable-base/xen/drivers/pci/pci.c     2005-01-23 15:46:12.000000000 
+1100
+++ xen-unstable/xen/drivers/pci/pci.c  2005-01-24 12:18:24.000000000 +1100
@@ -1565,15 +1565,15 @@ static int pci_pm_resume_device(struct p
 
 static int pci_pm_save_state_bus(struct pci_bus *bus, u32 state)
 {
-       struct list_head *list;
+       struct pci_bus *i;
        int error = 0;
 
-       list_for_each(list, &bus->children) {
-               error = pci_pm_save_state_bus(pci_bus_b(list),state);
+       list_for_each(i, &bus->children, node) {
+               error = pci_pm_save_state_bus(i, state);
                if (error) return error;
        }
-       list_for_each(list, &bus->devices) {
-               error = pci_pm_save_state_device(pci_dev_b(list),state);
+       list_for_each(i, &bus->devices, node) {
+               error = pci_pm_save_state_device(i, state);
                if (error) return error;
        }
        return 0;
@@ -1581,40 +1581,38 @@ static int pci_pm_save_state_bus(struct 
 
 static int pci_pm_suspend_bus(struct pci_bus *bus, u32 state)
 {
-       struct list_head *list;
+       struct pci_bus *i;
 
        /* Walk the bus children list */
-       list_for_each(list, &bus->children) 
-               pci_pm_suspend_bus(pci_bus_b(list),state);
+       list_for_each(i, &bus->children, node) 
+               pci_pm_suspend_bus(i, state);
 
        /* Walk the device children list */
-       list_for_each(list, &bus->devices)
-               pci_pm_suspend_device(pci_dev_b(list),state);
+       list_for_each(i, &bus->devices, node)
+               pci_pm_suspend_device(i, state);
        return 0;
 }
 
 static int pci_pm_resume_bus(struct pci_bus *bus)
 {
-       struct list_head *list;
+       struct pci_bus *i;
 
        /* Walk the device children list */
-       list_for_each(list, &bus->devices)
-               pci_pm_resume_device(pci_dev_b(list));
+       list_for_each(i, &bus->devices, node)
+               pci_pm_resume_device(i);
 
        /* And then walk the bus children */
-       list_for_each(list, &bus->children)
-               pci_pm_resume_bus(pci_bus_b(list));
+       list_for_each(i, &bus->children, node)
+               pci_pm_resume_bus(i);
        return 0;
 }
 
 static int pci_pm_save_state(u32 state)
 {
-       struct list_head *list;
        struct pci_bus *bus;
        int error = 0;
 
-       list_for_each(list, &pci_root_buses) {
-               bus = pci_bus_b(list);
+       list_for_each(bus, &pci_root_buses, node) {
                error = pci_pm_save_state_bus(bus,state);
                if (!error)
                        error = pci_pm_save_state_device(bus->self,state);
@@ -1624,11 +1622,9 @@ static int pci_pm_save_state(u32 state)
 
 static int pci_pm_suspend(u32 state)
 {
-       struct list_head *list;
        struct pci_bus *bus;
 
-       list_for_each(list, &pci_root_buses) {
-               bus = pci_bus_b(list);
+       list_for_each(bus, &pci_root_buses, node) {
                pci_pm_suspend_bus(bus,state);
                pci_pm_suspend_device(bus->self,state);
        }
@@ -1637,11 +1633,9 @@ static int pci_pm_suspend(u32 state)
 
 int pci_pm_resume(void)
 {
-       struct list_head *list;
        struct pci_bus *bus;
 
-       list_for_each(list, &pci_root_buses) {
-               bus = pci_bus_b(list);
+       list_for_each(bus, &pci_root_buses, node) {
                pci_pm_resume_device(bus->self);
                pci_pm_resume_bus(bus);
        }
diff -urpN --exclude TAGS -X 
/home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal 
xen-unstable-base/xen/include/xen/list.h xen-unstable/xen/include/xen/list.h
--- xen-unstable-base/xen/include/xen/list.h    2005-01-23 15:46:13.000000000 
+1100
+++ xen-unstable/xen/include/xen/list.h 2005-01-24 11:52:26.000000000 +1100
@@ -144,34 +144,26 @@ static __inline__ void list_splice(struc
        ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
 
 /**
- * list_for_each       -       iterate over a list
- * @pos:       the &struct list_head to use as a loop counter.
- * @head:      the head for your list.
- */
-#define list_for_each(pos, head) \
-       for (pos = (head)->next; pos != (head); pos = pos->next)
-               
-/**
- * list_for_each_safe  -       iterate over a list safe against removal of 
list entry
- * @pos:       the &struct list_head to use as a loop counter.
- * @n:         another &struct list_head to use as temporary storage
+ * list_for_each       -       iterate over list of given type
+ * @pos:       the type * to use as a loop counter.
  * @head:      the head for your list.
+ * @member:    the name of the list_struct within the struct.
  */
-#define list_for_each_safe(pos, n, head) \
-       for (pos = (head)->next, n = pos->next; pos != (head); \
-               pos = n, n = pos->next)
-
-#endif
+#define list_for_each(pos, head, member)                               \
+       for (pos = list_entry((head)->next, typeof(*pos), member);      \
+            &pos->member != (head);                                    \
+            pos = list_entry(pos->member.next, typeof(*pos), member))
 
 /**
- * list_for_each_entry -       iterate over list of given type
+ * list_for_each_safe - iterate over list of given type safe against removal 
of list entry
  * @pos:       the type * to use as a loop counter.
+ * @n:         another type * to use as temporary storage
  * @head:      the head for your list.
  * @member:    the name of the list_struct within the struct.
  */
-#define list_for_each_entry(pos, head, member)                         \
+#define list_for_each_safe(pos, n, head, member)                       \
        for (pos = list_entry((head)->next, typeof(*pos), member),      \
-                    prefetch(pos->member.next);                        \
+               n = list_entry(pos->member.next, typeof(*pos), member); \
             &pos->member != (head);                                    \
-            pos = list_entry(pos->member.next, typeof(*pos), member),  \
-                    prefetch(pos->member.next))
+            pos = n, n = list_entry(n->member.next, typeof(*n), member))
+#endif
diff -urpN --exclude TAGS -X 
/home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal 
xen-unstable-base/xen/include/xen/pci.h xen-unstable/xen/include/xen/pci.h
--- xen-unstable-base/xen/include/xen/pci.h     2005-01-23 15:46:15.000000000 
+1100
+++ xen-unstable/xen/include/xen/pci.h  2005-01-24 11:55:37.000000000 +1100
@@ -358,7 +358,7 @@ enum pci_mmap_state {
        for(dev = pci_dev_g(pci_devices.prev); dev != pci_dev_g(&pci_devices); 
dev = pci_dev_g(dev->global_list.prev))
 
 #define pci_for_each_bus(bus) \
-for(bus = pci_bus_b(pci_root_buses.next); bus != pci_bus_b(&pci_root_buses); 
bus = pci_bus_b(bus->node.next))
+       list_for_each(bus, &pci_root_buses, node)
 
 /*
  * The pci_dev structure is used to describe both PCI and ISAPnP devices.

-- 
A bad analogy is like a leaky screwdriver -- Richard Braakman



-------------------------------------------------------
This SF.Net email is sponsored by: IntelliVIEW -- Interactive Reporting
Tool for open source databases. Create drag-&-drop reports. Save time
by over 75%! Publish reports on the web. Export to DOC, XLS, RTF, etc.
Download a FREE copy at http://www.intelliview.com/go/osdn_nl
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxxx
https://lists.sourceforge.net/lists/listinfo/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.