WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 2/4] use xzalloc in common code

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 2/4] use xzalloc in common code
From: "Jan Beulich" <JBeulich@xxxxxxxx>
Date: Tue, 04 Oct 2011 12:48:19 +0100
Delivery-date: Tue, 04 Oct 2011 04:50:18 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -39,7 +39,7 @@ DEFINE_PER_CPU(struct cpupool *, cpupool
 
 static struct cpupool *alloc_cpupool_struct(void)
 {
-    return xmalloc(struct cpupool);
+    return xzalloc(struct cpupool);
 }
 
 static void free_cpupool_struct(struct cpupool *c)
@@ -118,7 +118,6 @@ static struct cpupool *cpupool_create(
     *perr = -ENOMEM;
     if ( (c = alloc_cpupool_struct()) == NULL )
         return NULL;
-    memset(c, 0, sizeof(*c));
 
     /* One reference for caller, one reference for cpupool_destroy(). */
     atomic_set(&c->refcnt, 2);
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -154,11 +154,9 @@ static unsigned int default_vcpu0_locati
 
     /* Do an initial CPU placement. Pick the least-populated CPU. */
     nr_cpus = last_cpu(cpu_online_map) + 1;
-    cnt = xmalloc_array(unsigned int, nr_cpus);
+    cnt = xzalloc_array(unsigned int, nr_cpus);
     if ( cnt )
     {
-        memset(cnt, 0, nr_cpus * sizeof(*cnt));
-
         rcu_read_lock(&domlist_read_lock);
         for_each_domain ( d )
             for_each_vcpu ( d, v )
@@ -510,9 +508,8 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
             BUG_ON(d->vcpu != NULL);
             BUG_ON(d->max_vcpus != 0);
 
-            if ( (vcpus = xmalloc_array(struct vcpu *, max)) == NULL )
+            if ( (vcpus = xzalloc_array(struct vcpu *, max)) == NULL )
                 goto maxvcpu_out;
-            memset(vcpus, 0, max * sizeof(*vcpus));
 
             /* Install vcpu array /then/ update max_vcpus. */
             d->vcpu = vcpus;
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -100,10 +100,9 @@ static int get_free_port(struct domain *
     if ( port == MAX_EVTCHNS(d) )
         return -ENOSPC;
 
-    chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
+    chn = xzalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
     if ( unlikely(chn == NULL) )
         return -ENOMEM;
-    memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
     bucket_from_port(d, port) = chn;
 
     for ( i = 0; i < EVTCHNS_PER_BUCKET; i++ )
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -2400,19 +2400,17 @@ grant_table_create(
     struct grant_table *t;
     int                 i;
 
-    if ( (t = xmalloc(struct grant_table)) == NULL )
+    if ( (t = xzalloc(struct grant_table)) == NULL )
         goto no_mem_0;
 
     /* Simple stuff. */
-    memset(t, 0, sizeof(*t));
     spin_lock_init(&t->lock);
     t->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
 
     /* Active grant table. */
-    if ( (t->active = xmalloc_array(struct active_grant_entry *,
+    if ( (t->active = xzalloc_array(struct active_grant_entry *,
                                     max_nr_active_grant_frames())) == NULL )
         goto no_mem_1;
-    memset(t->active, 0, max_nr_active_grant_frames() * sizeof(t->active[0]));
     for ( i = 0;
           i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
     {
@@ -2422,10 +2420,9 @@ grant_table_create(
     }
 
     /* Tracking of mapped foreign frames table */
-    if ( (t->maptrack = xmalloc_array(struct grant_mapping *,
+    if ( (t->maptrack = xzalloc_array(struct grant_mapping *,
                                       max_nr_maptrack_frames())) == NULL )
         goto no_mem_2;
-    memset(t->maptrack, 0, max_nr_maptrack_frames() * sizeof(t->maptrack[0]));
     if ( (t->maptrack[0] = alloc_xenheap_page()) == NULL )
         goto no_mem_3;
     clear_page(t->maptrack[0]);
@@ -2434,9 +2431,8 @@ grant_table_create(
         t->maptrack[0][i].ref = i+1;
 
     /* Shared grant table. */
-    if ( (t->shared_raw = xmalloc_array(void *, max_nr_grant_frames)) == NULL )
+    if ( (t->shared_raw = xzalloc_array(void *, max_nr_grant_frames)) == NULL )
         goto no_mem_3;
-    memset(t->shared_raw, 0, max_nr_grant_frames * sizeof(t->shared_raw[0]));
     for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
     {
         if ( (t->shared_raw[i] = alloc_xenheap_page()) == NULL )
@@ -2448,12 +2444,10 @@ grant_table_create(
         gnttab_create_shared_page(d, t, i);
 
     /* Status pages for grant table - for version 2 */
-    t->status = xmalloc_array(grant_status_t *,
+    t->status = xzalloc_array(grant_status_t *,
                               grant_to_status_frames(max_nr_grant_frames));
     if ( t->status == NULL )
         goto no_mem_4;
-    memset(t->status, 0,
-           grant_to_status_frames(max_nr_grant_frames) * sizeof(t->status[0]));
     t->nr_status_frames = 0;
 
     /* Okay, install the structure. */
--- a/xen/common/sched_arinc653.c
+++ b/xen/common/sched_arinc653.c
@@ -341,11 +341,10 @@ a653sched_init(struct scheduler *ops)
 {
     a653sched_priv_t *prv;
 
-    prv = xmalloc(a653sched_priv_t);
+    prv = xzalloc(a653sched_priv_t);
     if ( prv == NULL )
         return -ENOMEM;
 
-    memset(prv, 0, sizeof(*prv));
     ops->sched_data = prv;
 
     prv->schedule[0].dom_handle[0] = '\0';
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -351,10 +351,9 @@ csched_alloc_pdata(const struct schedule
     unsigned long flags;
 
     /* Allocate per-PCPU info */
-    spc = xmalloc(struct csched_pcpu);
+    spc = xzalloc(struct csched_pcpu);
     if ( spc == NULL )
         return NULL;
-    memset(spc, 0, sizeof(*spc));
 
     spin_lock_irqsave(&prv->lock, flags);
 
@@ -649,10 +648,9 @@ csched_alloc_vdata(const struct schedule
     struct csched_vcpu *svc;
 
     /* Allocate per-VCPU info */
-    svc = xmalloc(struct csched_vcpu);
+    svc = xzalloc(struct csched_vcpu);
     if ( svc == NULL )
         return NULL;
-    memset(svc, 0, sizeof(*svc));
 
     INIT_LIST_HEAD(&svc->runq_elem);
     INIT_LIST_HEAD(&svc->active_vcpu_elem);
@@ -837,10 +835,9 @@ csched_alloc_domdata(const struct schedu
 {
     struct csched_dom *sdom;
 
-    sdom = xmalloc(struct csched_dom);
+    sdom = xzalloc(struct csched_dom);
     if ( sdom == NULL )
         return NULL;
-    memset(sdom, 0, sizeof(*sdom));
 
     /* Initialize credit and weight */
     INIT_LIST_HEAD(&sdom->active_vcpu);
@@ -1513,11 +1510,10 @@ csched_init(struct scheduler *ops)
 {
     struct csched_private *prv;
 
-    prv = xmalloc(struct csched_private);
+    prv = xzalloc(struct csched_private);
     if ( prv == NULL )
         return -ENOMEM;
 
-    memset(prv, 0, sizeof(*prv));
     ops->sched_data = prv;
     spin_lock_init(&prv->lock);
     INIT_LIST_HEAD(&prv->active_sdom);
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -732,10 +732,9 @@ csched_alloc_vdata(const struct schedule
     struct csched_vcpu *svc;
 
     /* Allocate per-VCPU info */
-    svc = xmalloc(struct csched_vcpu);
+    svc = xzalloc(struct csched_vcpu);
     if ( svc == NULL )
         return NULL;
-    memset(svc, 0, sizeof(*svc));
 
     INIT_LIST_HEAD(&svc->rqd_elem);
     INIT_LIST_HEAD(&svc->sdom_elem);
@@ -1437,10 +1436,9 @@ csched_alloc_domdata(const struct schedu
     struct csched_dom *sdom;
     int flags;
 
-    sdom = xmalloc(struct csched_dom);
+    sdom = xzalloc(struct csched_dom);
     if ( sdom == NULL )
         return NULL;
-    memset(sdom, 0, sizeof(*sdom));
 
     /* Initialize credit and weight */
     INIT_LIST_HEAD(&sdom->vcpu);
@@ -2065,10 +2063,9 @@ csched_init(struct scheduler *ops)
      * set up basic structures, and a callback when the CPU info is
      * available. */
 
-    prv = xmalloc(struct csched_private);
+    prv = xzalloc(struct csched_private);
     if ( prv == NULL )
         return -ENOMEM;
-    memset(prv, 0, sizeof(*prv));
     ops->sched_data = prv;
     spin_lock_init(&prv->lock);
     INIT_LIST_HEAD(&prv->sdom);
--- a/xen/common/sched_sedf.c
+++ b/xen/common/sched_sedf.c
@@ -348,11 +348,10 @@ static void *sedf_alloc_vdata(const stru
 {
     struct sedf_vcpu_info *inf;
 
-    inf = xmalloc(struct sedf_vcpu_info);
+    inf = xzalloc(struct sedf_vcpu_info);
     if ( inf == NULL )
         return NULL;
 
-    memset(inf, 0, sizeof(struct sedf_vcpu_info));
     inf->vcpu = v;
 
     /* Every VCPU gets an equal share of extratime by default. */
@@ -387,9 +386,8 @@ sedf_alloc_pdata(const struct scheduler 
 {
     struct sedf_cpu_info *spc;
 
-    spc = xmalloc(struct sedf_cpu_info);
+    spc = xzalloc(struct sedf_cpu_info);
     BUG_ON(spc == NULL);
-    memset(spc, 0, sizeof(*spc));
     INIT_LIST_HEAD(&spc->waitq);
     INIT_LIST_HEAD(&spc->runnableq);
     INIT_LIST_HEAD(&spc->extraq[EXTRA_PEN_Q]);
@@ -415,15 +413,7 @@ static void sedf_free_vdata(const struct
 static void *
 sedf_alloc_domdata(const struct scheduler *ops, struct domain *d)
 {
-    void *mem;
-
-    mem = xmalloc(struct sedf_dom_info);
-    if ( mem == NULL )
-        return NULL;
-
-    memset(mem, 0, sizeof(struct sedf_dom_info));
-
-    return mem;
+    return xzalloc(struct sedf_dom_info);
 }
 
 static int sedf_init_domain(const struct scheduler *ops, struct domain *d)
@@ -1333,8 +1323,8 @@ static int sedf_adjust_weights(struct cp
     struct vcpu *p;
     struct domain      *d;
     unsigned int        cpu, nr_cpus = last_cpu(cpu_online_map) + 1;
-    int                *sumw = xmalloc_array(int, nr_cpus);
-    s_time_t           *sumt = xmalloc_array(s_time_t, nr_cpus);
+    int                *sumw = xzalloc_array(int, nr_cpus);
+    s_time_t           *sumt = xzalloc_array(s_time_t, nr_cpus);
 
     if ( !sumw || !sumt )
     {
@@ -1342,8 +1332,6 @@ static int sedf_adjust_weights(struct cp
         xfree(sumw);
         return -ENOMEM;
     }
-    memset(sumw, 0, nr_cpus * sizeof(*sumw));
-    memset(sumt, 0, nr_cpus * sizeof(*sumt));
 
     /* Sum across all weights. */
     rcu_read_lock(&domlist_read_lock);
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -233,14 +233,13 @@ int sched_move_domain(struct domain *d, 
     if ( domdata == NULL )
         return -ENOMEM;
 
-    vcpu_priv = xmalloc_array(void *, d->max_vcpus);
+    vcpu_priv = xzalloc_array(void *, d->max_vcpus);
     if ( vcpu_priv == NULL )
     {
         SCHED_OP(c->sched, free_domdata, domdata);
         return -ENOMEM;
     }
 
-    memset(vcpu_priv, 0, d->max_vcpus * sizeof(void *));
     for_each_vcpu ( d, v )
     {
         vcpu_priv[v->vcpu_id] = SCHED_OP(c->sched, alloc_vdata, v, domdata);
--- a/xen/common/wait.c
+++ b/xen/common/wait.c
@@ -41,11 +41,10 @@ int init_waitqueue_vcpu(struct vcpu *v)
 {
     struct waitqueue_vcpu *wqv;
 
-    wqv = xmalloc(struct waitqueue_vcpu);
+    wqv = xzalloc(struct waitqueue_vcpu);
     if ( wqv == NULL )
         return -ENOMEM;
 
-    memset(wqv, 0, sizeof(*wqv));
     INIT_LIST_HEAD(&wqv->list);
     wqv->vcpu = v;
 
--- a/xen/common/xenoprof.c
+++ b/xen/common/xenoprof.c
@@ -193,17 +193,14 @@ static int alloc_xenoprof_struct(
     unsigned max_max_samples;
     int i;
 
-    d->xenoprof = xmalloc(struct xenoprof);
-
+    d->xenoprof = xzalloc(struct xenoprof);
     if ( d->xenoprof == NULL )
     {
         printk("alloc_xenoprof_struct(): memory allocation failed\n");
         return -ENOMEM;
     }
 
-    memset(d->xenoprof, 0, sizeof(*d->xenoprof));
-
-    d->xenoprof->vcpu = xmalloc_array(struct xenoprof_vcpu, d->max_vcpus);
+    d->xenoprof->vcpu = xzalloc_array(struct xenoprof_vcpu, d->max_vcpus);
     if ( d->xenoprof->vcpu == NULL )
     {
         xfree(d->xenoprof);
@@ -212,8 +209,6 @@ static int alloc_xenoprof_struct(
         return -ENOMEM;
     }
 
-    memset(d->xenoprof->vcpu, 0, d->max_vcpus * sizeof(*d->xenoprof->vcpu));
-
     nvcpu = 0;
     for_each_vcpu ( d, v )
         nvcpu++;



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 2/4] use xzalloc in common code, Jan Beulich <=