WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] use xzalloc in x86 code

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] use xzalloc in x86 code
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Wed, 05 Oct 2011 01:00:17 +0100
Delivery-date: Tue, 04 Oct 2011 17:03:59 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1317730710 -7200
# Node ID 0b66e6450ffe6823d8b323ef4248b38fe7372d54
# Parent  4dc9c522abf516c3f6d4e9f7ef1222eb7fdedb06
use xzalloc in x86 code

This includes the removal of a redundant memset() from microcode_amd.c.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
---


diff -r 4dc9c522abf5 -r 0b66e6450ffe xen/arch/x86/acpi/cpu_idle.c
--- a/xen/arch/x86/acpi/cpu_idle.c      Tue Oct 04 14:17:28 2011 +0200
+++ b/xen/arch/x86/acpi/cpu_idle.c      Tue Oct 04 14:18:30 2011 +0200
@@ -602,10 +602,9 @@
     if ( !acpi_power )
     {
         int i;
-        acpi_power = xmalloc(struct acpi_processor_power);
+        acpi_power = xzalloc(struct acpi_processor_power);
         if ( !acpi_power )
             return -ENOMEM;
-        memset(acpi_power, 0, sizeof(*acpi_power));
 
         for ( i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++ )
             acpi_power->states[i].idx = i;
diff -r 4dc9c522abf5 -r 0b66e6450ffe xen/arch/x86/acpi/cpufreq/cpufreq.c
--- a/xen/arch/x86/acpi/cpufreq/cpufreq.c       Tue Oct 04 14:17:28 2011 +0200
+++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c       Tue Oct 04 14:18:30 2011 +0200
@@ -552,10 +552,9 @@
     struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
     struct processor_performance *perf;
 
-    data = xmalloc(struct acpi_cpufreq_data);
+    data = xzalloc(struct acpi_cpufreq_data);
     if (!data)
         return -ENOMEM;
-    memset(data, 0, sizeof(struct acpi_cpufreq_data));
 
     cpufreq_drv_data[cpu] = data;
 
diff -r 4dc9c522abf5 -r 0b66e6450ffe xen/arch/x86/acpi/cpufreq/powernow.c
--- a/xen/arch/x86/acpi/cpufreq/powernow.c      Tue Oct 04 14:17:28 2011 +0200
+++ b/xen/arch/x86/acpi/cpufreq/powernow.c      Tue Oct 04 14:18:30 2011 +0200
@@ -189,10 +189,9 @@
     uint64_t msr_content;
     struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
 
-    data = xmalloc(struct acpi_cpufreq_data);
+    data = xzalloc(struct acpi_cpufreq_data);
     if (!data)
         return -ENOMEM;
-    memset(data, 0, sizeof(struct acpi_cpufreq_data));
 
     cpufreq_drv_data[cpu] = data;
 
diff -r 4dc9c522abf5 -r 0b66e6450ffe xen/arch/x86/cpu/mcheck/mce.c
--- a/xen/arch/x86/cpu/mcheck/mce.c     Tue Oct 04 14:17:28 2011 +0200
+++ b/xen/arch/x86/cpu/mcheck/mce.c     Tue Oct 04 14:18:30 2011 +0200
@@ -113,7 +113,7 @@
     if (!mb)
         return NULL;
 
-    mb->bank_map = xmalloc_array(unsigned long,
+    mb->bank_map = xzalloc_array(unsigned long,
                                  BITS_TO_LONGS(nr_mce_banks));
     if (!mb->bank_map)
     {
@@ -122,7 +122,6 @@
     }
 
     mb->num = nr_mce_banks;
-    memset(mb->bank_map, 0, sizeof(long) * BITS_TO_LONGS(nr_mce_banks));
 
     return mb;
 }
diff -r 4dc9c522abf5 -r 0b66e6450ffe xen/arch/x86/cpu/mcheck/vmce.c
--- a/xen/arch/x86/cpu/mcheck/vmce.c    Tue Oct 04 14:17:28 2011 +0200
+++ b/xen/arch/x86/cpu/mcheck/vmce.c    Tue Oct 04 14:18:30 2011 +0200
@@ -362,14 +362,13 @@
 {
     struct bank_entry *entry;
 
-    entry = xmalloc(struct bank_entry);
+    entry = xzalloc(struct bank_entry);
     if ( entry == NULL )
     {
         printk(KERN_ERR "MCE: malloc bank_entry failed\n");
         return NULL;
     }
 
-    memset(entry, 0x0, sizeof(entry));
     INIT_LIST_HEAD(&entry->list);
     return entry;
 }
diff -r 4dc9c522abf5 -r 0b66e6450ffe xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Tue Oct 04 14:17:28 2011 +0200
+++ b/xen/arch/x86/domain.c     Tue Oct 04 14:18:30 2011 +0200
@@ -563,11 +563,10 @@
 
     if ( !is_idle_domain(d) )
     {
-        d->arch.cpuids = xmalloc_array(cpuid_input_t, MAX_CPUID_INPUT);
+        d->arch.cpuids = xzalloc_array(cpuid_input_t, MAX_CPUID_INPUT);
         rc = -ENOMEM;
         if ( d->arch.cpuids == NULL )
             goto fail;
-        memset(d->arch.cpuids, 0, MAX_CPUID_INPUT * sizeof(*d->arch.cpuids));
         for ( i = 0; i < MAX_CPUID_INPUT; i++ )
         {
             d->arch.cpuids[i].input[0] = XEN_CPUID_INPUT_UNUSED;
diff -r 4dc9c522abf5 -r 0b66e6450ffe xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c       Tue Oct 04 14:17:28 2011 +0200
+++ b/xen/arch/x86/domain_build.c       Tue Oct 04 14:18:30 2011 +0200
@@ -92,10 +92,9 @@
     if ( opt_dom0_max_vcpus > MAX_VIRT_CPUS )
         opt_dom0_max_vcpus = MAX_VIRT_CPUS;
 
-    dom0->vcpu = xmalloc_array(struct vcpu *, opt_dom0_max_vcpus);
+    dom0->vcpu = xzalloc_array(struct vcpu *, opt_dom0_max_vcpus);
     if ( !dom0->vcpu )
         return NULL;
-    memset(dom0->vcpu, 0, opt_dom0_max_vcpus * sizeof(*dom0->vcpu));
     dom0->max_vcpus = opt_dom0_max_vcpus;
 
     return alloc_vcpu(dom0, 0, 0);
diff -r 4dc9c522abf5 -r 0b66e6450ffe xen/arch/x86/hpet.c
--- a/xen/arch/x86/hpet.c       Tue Oct 04 14:17:28 2011 +0200
+++ b/xen/arch/x86/hpet.c       Tue Oct 04 14:18:30 2011 +0200
@@ -365,10 +365,9 @@
     num_chs = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
     num_chs++; /* Value read out starts from 0 */
 
-    hpet_events = xmalloc_array(struct hpet_event_channel, num_chs);
+    hpet_events = xzalloc_array(struct hpet_event_channel, num_chs);
     if ( !hpet_events )
         return;
-    memset(hpet_events, 0, num_chs * sizeof(*hpet_events));
 
     for ( i = 0; i < num_chs; i++ )
     {
@@ -504,10 +503,9 @@
             return;
 
         if ( !hpet_events )
-            hpet_events = xmalloc(struct hpet_event_channel);
+            hpet_events = xzalloc(struct hpet_event_channel);
         if ( !hpet_events )
             return;
-        memset(hpet_events, 0, sizeof(*hpet_events));
         hpet_events->irq = -1;
 
         /* Start HPET legacy interrupts */
diff -r 4dc9c522abf5 -r 0b66e6450ffe xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Tue Oct 04 14:17:28 2011 +0200
+++ b/xen/arch/x86/hvm/hvm.c    Tue Oct 04 14:18:30 2011 +0200
@@ -444,17 +444,13 @@
     INIT_LIST_HEAD(&d->arch.hvm_domain.msixtbl_list);
     spin_lock_init(&d->arch.hvm_domain.msixtbl_list_lock);
 
-    d->arch.hvm_domain.pbuf = xmalloc_array(char, HVM_PBUF_SIZE);
-    d->arch.hvm_domain.params = xmalloc_array(uint64_t, HVM_NR_PARAMS);
+    d->arch.hvm_domain.pbuf = xzalloc_array(char, HVM_PBUF_SIZE);
+    d->arch.hvm_domain.params = xzalloc_array(uint64_t, HVM_NR_PARAMS);
     d->arch.hvm_domain.io_handler = xmalloc(struct hvm_io_handler);
     rc = -ENOMEM;
     if ( !d->arch.hvm_domain.pbuf || !d->arch.hvm_domain.params ||
          !d->arch.hvm_domain.io_handler )
         goto fail0;
-    memset(d->arch.hvm_domain.pbuf, 0,
-           HVM_PBUF_SIZE * sizeof(*d->arch.hvm_domain.pbuf));
-    memset(d->arch.hvm_domain.params, 0,
-           HVM_NR_PARAMS * sizeof(*d->arch.hvm_domain.params));
     d->arch.hvm_domain.io_handler->num_slot = 0;
 
     hvm_init_guest_time(d);
diff -r 4dc9c522abf5 -r 0b66e6450ffe xen/arch/x86/hvm/mtrr.c
--- a/xen/arch/x86/hvm/mtrr.c   Tue Oct 04 14:17:28 2011 +0200
+++ b/xen/arch/x86/hvm/mtrr.c   Tue Oct 04 14:18:30 2011 +0200
@@ -202,10 +202,9 @@
 
     memset(m, 0, sizeof(*m));
 
-    m->var_ranges = xmalloc_array(struct mtrr_var_range, MTRR_VCNT);
+    m->var_ranges = xzalloc_array(struct mtrr_var_range, MTRR_VCNT);
     if ( m->var_ranges == NULL )
         return -ENOMEM;
-    memset(m->var_ranges, 0, MTRR_VCNT * sizeof(struct mtrr_var_range));
 
     m->mtrr_cap = (1u << 10) | (1u << 8) | MTRR_VCNT;
 
@@ -608,12 +607,10 @@
          !is_hvm_domain(d) )
         return -EINVAL;
 
-    range = xmalloc(struct hvm_mem_pinned_cacheattr_range);
+    range = xzalloc(struct hvm_mem_pinned_cacheattr_range);
     if ( range == NULL )
         return -ENOMEM;
 
-    memset(range, 0, sizeof(*range));
-
     range->start = gfn_start;
     range->end = gfn_end;
     range->type = type;
diff -r 4dc9c522abf5 -r 0b66e6450ffe xen/arch/x86/hvm/svm/vpmu.c
--- a/xen/arch/x86/hvm/svm/vpmu.c       Tue Oct 04 14:17:28 2011 +0200
+++ b/xen/arch/x86/hvm/svm/vpmu.c       Tue Oct 04 14:18:30 2011 +0200
@@ -323,8 +323,7 @@
         }
     }
 
-    ctxt = xmalloc_bytes(sizeof(struct amd_vpmu_context));
-
+    ctxt = xzalloc_bytes(sizeof(struct amd_vpmu_context));
     if ( !ctxt )
     {
         gdprintk(XENLOG_WARNING, "Insufficient memory for PMU, "
@@ -333,7 +332,6 @@
         return;
     }
 
-    memset(ctxt, 0, sizeof(struct amd_vpmu_context));
     vpmu->context = (void *)ctxt;
     vpmu->flags |= VPMU_CONTEXT_ALLOCATED;
 }
diff -r 4dc9c522abf5 -r 0b66e6450ffe xen/arch/x86/hvm/vmx/vpmu_core2.c
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c Tue Oct 04 14:17:28 2011 +0200
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c Tue Oct 04 14:18:30 2011 +0200
@@ -328,19 +328,15 @@
     vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL,
                  core2_calc_intial_glb_ctrl_msr());
 
-    pmu_enable = xmalloc_bytes(sizeof(struct core2_pmu_enable) +
-                 (core2_get_pmc_count()-1)*sizeof(char));
+    pmu_enable = xzalloc_bytes(sizeof(struct core2_pmu_enable) +
+                               core2_get_pmc_count() - 1);
     if ( !pmu_enable )
         goto out1;
-    memset(pmu_enable, 0, sizeof(struct core2_pmu_enable) +
-                 (core2_get_pmc_count()-1)*sizeof(char));
 
-    core2_vpmu_cxt = xmalloc_bytes(sizeof(struct core2_vpmu_context) +
+    core2_vpmu_cxt = xzalloc_bytes(sizeof(struct core2_vpmu_context) +
                     (core2_get_pmc_count()-1)*sizeof(struct arch_msr_pair));
     if ( !core2_vpmu_cxt )
         goto out2;
-    memset(core2_vpmu_cxt, 0, sizeof(struct core2_vpmu_context) +
-                    (core2_get_pmc_count()-1)*sizeof(struct arch_msr_pair));
     core2_vpmu_cxt->pmu_enable = pmu_enable;
     vpmu->context = (void *)core2_vpmu_cxt;
 
diff -r 4dc9c522abf5 -r 0b66e6450ffe xen/arch/x86/i387.c
--- a/xen/arch/x86/i387.c       Tue Oct 04 14:17:28 2011 +0200
+++ b/xen/arch/x86/i387.c       Tue Oct 04 14:18:30 2011 +0200
@@ -245,13 +245,12 @@
         v->arch.fpu_ctxt = &v->arch.xsave_area->fpu_sse;
     else
     {
-        v->arch.fpu_ctxt = _xmalloc(sizeof(v->arch.xsave_area->fpu_sse), 16);
+        v->arch.fpu_ctxt = _xzalloc(sizeof(v->arch.xsave_area->fpu_sse), 16);
         if ( !v->arch.fpu_ctxt )
         {
             rc = -ENOMEM;
             goto done;
         }
-        memset(v->arch.fpu_ctxt, 0, sizeof(v->arch.xsave_area->fpu_sse));
     }
 
 done:
diff -r 4dc9c522abf5 -r 0b66e6450ffe xen/arch/x86/io_apic.c
--- a/xen/arch/x86/io_apic.c    Tue Oct 04 14:17:28 2011 +0200
+++ b/xen/arch/x86/io_apic.c    Tue Oct 04 14:18:30 2011 +0200
@@ -1365,8 +1365,7 @@
     int i, apic;
 
     /* Initialise dynamic irq_2_pin free list. */
-    irq_2_pin = xmalloc_array(struct irq_pin_list, PIN_MAP_SIZE);
-    memset(irq_2_pin, 0, PIN_MAP_SIZE * sizeof(*irq_2_pin));
+    irq_2_pin = xzalloc_array(struct irq_pin_list, PIN_MAP_SIZE);
         
     for (i = 0; i < PIN_MAP_SIZE; i++)
         irq_2_pin[i].pin = -1;
diff -r 4dc9c522abf5 -r 0b66e6450ffe xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Tue Oct 04 14:17:28 2011 +0200
+++ b/xen/arch/x86/irq.c        Tue Oct 04 14:18:30 2011 +0200
@@ -328,17 +328,13 @@
     for (vector = 0; vector < NR_VECTORS; ++vector)
         this_cpu(vector_irq)[vector] = -1;
 
-    irq_desc = xmalloc_array(struct irq_desc, nr_irqs);
-    irq_cfg = xmalloc_array(struct irq_cfg, nr_irqs);
-    irq_vector = xmalloc_array(u8, nr_irqs_gsi);
+    irq_desc = xzalloc_array(struct irq_desc, nr_irqs);
+    irq_cfg = xzalloc_array(struct irq_cfg, nr_irqs);
+    irq_vector = xzalloc_array(u8, nr_irqs_gsi);
     
     if ( !irq_desc || !irq_cfg ||! irq_vector )
         return -ENOMEM;
 
-    memset(irq_desc, 0,  nr_irqs * sizeof(*irq_desc));
-    memset(irq_cfg, 0,  nr_irqs * sizeof(*irq_cfg));
-    memset(irq_vector, 0, nr_irqs_gsi * sizeof(*irq_vector));
-    
     for (irq = 0; irq < nr_irqs; irq++) {
         desc = irq_to_desc(irq);
         cfg = irq_cfg(irq);
@@ -1112,11 +1108,10 @@
 {
     size_t sz = is_hvm_domain(d) ? sizeof(struct pirq) :
                                    offsetof(struct pirq, arch.hvm);
-    struct pirq *pirq = xmalloc_bytes(sz);
+    struct pirq *pirq = xzalloc_bytes(sz);
 
     if ( pirq )
     {
-        memset(pirq, 0, sz);
         if ( is_hvm_domain(d) )
         {
             pirq->arch.hvm.emuirq = IRQ_UNBOUND;
diff -r 4dc9c522abf5 -r 0b66e6450ffe xen/arch/x86/microcode_amd.c
--- a/xen/arch/x86/microcode_amd.c      Tue Oct 04 14:17:28 2011 +0200
+++ b/xen/arch/x86/microcode_amd.c      Tue Oct 04 14:18:30 2011 +0200
@@ -231,7 +231,6 @@
         return -ENOMEM;
     }
 
-    memset(equiv_cpu_table, 0, size);
     memcpy(equiv_cpu_table, (const void *)&buf_pos[3], size);
 
     *offset = size + 12;       /* add header length */
diff -r 4dc9c522abf5 -r 0b66e6450ffe xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Tue Oct 04 14:17:28 2011 +0200
+++ b/xen/arch/x86/mm/shadow/common.c   Tue Oct 04 14:18:30 2011 +0200
@@ -1922,10 +1922,8 @@
     ASSERT(paging_locked_by_me(d));
     ASSERT(!d->arch.paging.shadow.hash_table);
 
-    table = xmalloc_array(struct page_info *, SHADOW_HASH_BUCKETS);
+    table = xzalloc_array(struct page_info *, SHADOW_HASH_BUCKETS);
     if ( !table ) return 1;
-    memset(table, 0, 
-           SHADOW_HASH_BUCKETS * sizeof (struct page_info *));
     d->arch.paging.shadow.hash_table = table;
     return 0;
 }
@@ -2816,7 +2814,7 @@
     /* Make sure this vcpu has a virtual TLB array allocated */
     if ( unlikely(!v->arch.paging.vtlb) )
     {
-        v->arch.paging.vtlb = xmalloc_array(struct shadow_vtlb, VTLB_ENTRIES);
+        v->arch.paging.vtlb = xzalloc_array(struct shadow_vtlb, VTLB_ENTRIES);
         if ( unlikely(!v->arch.paging.vtlb) )
         {
             SHADOW_ERROR("Could not allocate vTLB space for dom %u vcpu %u\n",
@@ -2824,8 +2822,6 @@
             domain_crash(v->domain);
             return;
         }
-        memset(v->arch.paging.vtlb, 0, 
-               VTLB_ENTRIES * sizeof (struct shadow_vtlb));
         spin_lock_init(&v->arch.paging.vtlb_lock);
     }
 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
@@ -3656,9 +3652,8 @@
             goto out_dirty_vram;
         memset(dirty_vram->sl1ma, ~0, sizeof(paddr_t) * nr);
 
-        if ( (dirty_vram->dirty_bitmap = xmalloc_array(uint8_t, dirty_size)) 
== NULL )
+        if ( (dirty_vram->dirty_bitmap = xzalloc_array(uint8_t, dirty_size)) 
== NULL )
             goto out_sl1ma;
-        memset(dirty_vram->dirty_bitmap, 0, dirty_size);
 
         dirty_vram->last_dirty = NOW();
 
diff -r 4dc9c522abf5 -r 0b66e6450ffe xen/arch/x86/oprofile/op_model_ppro.c
--- a/xen/arch/x86/oprofile/op_model_ppro.c     Tue Oct 04 14:17:28 2011 +0200
+++ b/xen/arch/x86/oprofile/op_model_ppro.c     Tue Oct 04 14:18:30 2011 +0200
@@ -226,10 +226,9 @@
        struct vpmu_struct *vpmu = vcpu_vpmu(v);
        struct arch_msr_pair *msr_content;
 
-       msr_content = xmalloc_bytes( sizeof(struct arch_msr_pair) * 
num_counters );
+       msr_content = xzalloc_array(struct arch_msr_pair, num_counters);
        if ( !msr_content )
                goto out;
-       memset(msr_content, 0, sizeof(struct arch_msr_pair) * num_counters);
        vpmu->context = (void *)msr_content;
        vpmu->flags = 0;
        vpmu->flags |= PASSIVE_DOMAIN_ALLOCATED;
diff -r 4dc9c522abf5 -r 0b66e6450ffe xen/arch/x86/x86_64/mmconfig-shared.c
--- a/xen/arch/x86/x86_64/mmconfig-shared.c     Tue Oct 04 14:17:28 2011 +0200
+++ b/xen/arch/x86/x86_64/mmconfig-shared.c     Tue Oct 04 14:18:30 2011 +0200
@@ -57,10 +57,9 @@
         pci_mmcfg_config_num = 0;
     else {
         pci_mmcfg_config_num = 1;
-        pci_mmcfg_config = xmalloc(struct acpi_mcfg_allocation);
+        pci_mmcfg_config = xzalloc(struct acpi_mcfg_allocation);
         if (!pci_mmcfg_config)
             return NULL;
-        memset(pci_mmcfg_config, 0, sizeof(pci_mmcfg_config[0]));
         pci_mmcfg_config[0].address = win << 16;
         pci_mmcfg_config[0].pci_segment = 0;
         pci_mmcfg_config[0].start_bus_number = 0;
@@ -111,10 +110,9 @@
         pci_mmcfg_config_num = 0;
 
     if (pci_mmcfg_config_num) {
-        pci_mmcfg_config = xmalloc(struct acpi_mcfg_allocation);
+        pci_mmcfg_config = xzalloc(struct acpi_mcfg_allocation);
         if (!pci_mmcfg_config)
             return NULL;
-        memset(pci_mmcfg_config, 0, sizeof(pci_mmcfg_config[0]));
         pci_mmcfg_config[0].address = pciexbar & mask;
         pci_mmcfg_config[0].pci_segment = 0;
         pci_mmcfg_config[0].start_bus_number = 0;
diff -r 4dc9c522abf5 -r 0b66e6450ffe xen/arch/x86/x86_64/mmconfig_64.c
--- a/xen/arch/x86/x86_64/mmconfig_64.c Tue Oct 04 14:17:28 2011 +0200
+++ b/xen/arch/x86/x86_64/mmconfig_64.c Tue Oct 04 14:18:30 2011 +0200
@@ -171,13 +171,12 @@
     if (pci_mmcfg_virt)
         return 0;
 
-    pci_mmcfg_virt = xmalloc_array(struct mmcfg_virt, pci_mmcfg_config_num);
+    pci_mmcfg_virt = xzalloc_array(struct mmcfg_virt, pci_mmcfg_config_num);
     if (pci_mmcfg_virt == NULL) {
         printk(KERN_ERR "PCI: Can not allocate memory for mmconfig 
structures\n");
         pci_mmcfg_config_num = 0;
         return 0;
     }
-    memset(pci_mmcfg_virt, 0, sizeof(*pci_mmcfg_virt) * pci_mmcfg_config_num);
 
     for (i = 0; i < pci_mmcfg_config_num; ++i) {
         pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
diff -r 4dc9c522abf5 -r 0b66e6450ffe xen/arch/x86/xstate.c
--- a/xen/arch/x86/xstate.c     Tue Oct 04 14:17:28 2011 +0200
+++ b/xen/arch/x86/xstate.c     Tue Oct 04 14:18:30 2011 +0200
@@ -105,11 +105,10 @@
     BUG_ON(xsave_cntxt_size < XSTATE_AREA_MIN_SIZE);
 
     /* XSAVE/XRSTOR requires the save area be 64-byte-boundary aligned. */
-    save_area = _xmalloc(xsave_cntxt_size, 64);
+    save_area = _xzalloc(xsave_cntxt_size, 64);
     if ( save_area == NULL )
         return -ENOMEM;
 
-    memset(save_area, 0, xsave_cntxt_size);
     ((u32 *)save_area)[6] = 0x1f80;  /* MXCSR */
     *(uint64_t *)(save_area + 512) = XSTATE_FP_SSE;  /* XSETBV */
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] use xzalloc in x86 code, Xen patchbot-unstable <=