[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/6] x86/mce: adapt mcation.c to Xen hypervisor coding style



Signed-off-by: Haozhong Zhang <haozhong.zhang@xxxxxxxxx>
---
 xen/arch/x86/cpu/mcheck/mcaction.c | 74 +++++++++++++++++++++-----------------
 1 file changed, 41 insertions(+), 33 deletions(-)

diff --git a/xen/arch/x86/cpu/mcheck/mcaction.c 
b/xen/arch/x86/cpu/mcheck/mcaction.c
index f959bed2cb..e42267414e 100644
--- a/xen/arch/x86/cpu/mcheck/mcaction.c
+++ b/xen/arch/x86/cpu/mcheck/mcaction.c
@@ -6,15 +6,16 @@
 
 static struct mcinfo_recovery *
 mci_action_add_pageoffline(int bank, struct mc_info *mi,
-                       uint64_t mfn, uint32_t status)
+                           uint64_t mfn, uint32_t status)
 {
     struct mcinfo_recovery *rec;
 
-    if (!mi)
+    if ( !mi )
         return NULL;
 
     rec = x86_mcinfo_reserve(mi, sizeof(*rec), MC_TYPE_RECOVERY);
-    if (!rec) {
+    if ( !rec )
+    {
         mi->flags |= MCINFO_FLAGS_UNCOMPLETE;
         return NULL;
     }
@@ -46,14 +47,15 @@ mc_memerr_dhandler(struct mca_binfo *binfo,
     int vmce_vcpuid;
     unsigned int mc_vcpuid;
 
-    if (!mc_check_addr(bank->mc_status, bank->mc_misc, MC_ADDR_PHYSICAL)) {
+    if ( !mc_check_addr(bank->mc_status, bank->mc_misc, MC_ADDR_PHYSICAL) )
+    {
         dprintk(XENLOG_WARNING,
-            "No physical address provided for memory error\n");
+                "No physical address provided for memory error\n");
         return;
     }
 
     mfn = bank->mc_addr >> PAGE_SHIFT;
-    if (offline_page(mfn, 1, &status))
+    if ( offline_page(mfn, 1, &status) )
     {
         dprintk(XENLOG_WARNING,
                 "Failed to offline page %lx for MCE error\n", mfn);
@@ -63,21 +65,26 @@ mc_memerr_dhandler(struct mca_binfo *binfo,
     mci_action_add_pageoffline(binfo->bank, binfo->mi, mfn, status);
 
     /* This is free page */
-    if (status & PG_OFFLINE_OFFLINED)
+    if ( status & PG_OFFLINE_OFFLINED )
         *result = MCER_RECOVERED;
-    else if (status & PG_OFFLINE_AGAIN)
+    else if ( status & PG_OFFLINE_AGAIN )
         *result = MCER_CONTINUE;
-    else if (status & PG_OFFLINE_PENDING) {
+    else if ( status & PG_OFFLINE_PENDING )
+    {
         /* This page has owner */
-        if (status & PG_OFFLINE_OWNED) {
+        if ( status & PG_OFFLINE_OWNED )
+        {
             bank->mc_domid = status >> PG_OFFLINE_OWNER_SHIFT;
             mce_printk(MCE_QUIET, "MCE: This error page is ownded"
-              " by DOM %d\n", bank->mc_domid);
-            /* XXX: Cannot handle shared pages yet
+                       " by DOM %d\n", bank->mc_domid);
+            /*
+             * XXX: Cannot handle shared pages yet
              * (this should identify all domains and gfn mapping to
-             *  the mfn in question) */
+             *  the mfn in question)
+             */
             BUG_ON( bank->mc_domid == DOMID_COW );
-            if ( bank->mc_domid != DOMID_XEN ) {
+            if ( bank->mc_domid != DOMID_XEN )
+            {
                 d = get_domain_by_id(bank->mc_domid);
                 ASSERT(d);
                 gfn = get_gpfn_from_mfn((bank->mc_addr) >> PAGE_SHIFT);
@@ -85,45 +92,46 @@ mc_memerr_dhandler(struct mca_binfo *binfo,
                 if ( unmmap_broken_page(d, _mfn(mfn), gfn) )
                 {
                     printk("Unmap broken memory %lx for DOM%d failed\n",
-                            mfn, d->domain_id);
+                           mfn, d->domain_id);
                     goto vmce_failed;
                 }
 
                 mc_vcpuid = global->mc_vcpuid;
-                if (mc_vcpuid == XEN_MC_VCPUID_INVALID ||
-                    /*
-                     * Because MC# may happen asynchronously with the actual
-                     * operation that triggers the error, the domain ID as
-                     * well as the vCPU ID collected in 'global' at MC# are
-                     * not always precise. In that case, fallback to broadcast.
-                     */
-                    global->mc_domid != bank->mc_domid ||
-                    (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
-                     (!(global->mc_gstatus & MCG_STATUS_LMCE) ||
-                      !(d->vcpu[mc_vcpuid]->arch.vmce.mcg_ext_ctl &
-                        MCG_EXT_CTL_LMCE_EN))))
+                if ( mc_vcpuid == XEN_MC_VCPUID_INVALID ||
+                     /*
+                      * Because MC# may happen asynchronously with the actual
+                      * operation that triggers the error, the domain ID as
+                      * well as the vCPU ID collected in 'global' at MC# are
+                      * not always precise. In that case, fallback to 
broadcast.
+                      */
+                     global->mc_domid != bank->mc_domid ||
+                     (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+                      (!(global->mc_gstatus & MCG_STATUS_LMCE) ||
+                       !(d->vcpu[mc_vcpuid]->arch.vmce.mcg_ext_ctl &
+                         MCG_EXT_CTL_LMCE_EN))) )
                     vmce_vcpuid = VMCE_INJECT_BROADCAST;
                 else
                     vmce_vcpuid = mc_vcpuid;
 
                 bank->mc_addr = gfn << PAGE_SHIFT |
-                  (bank->mc_addr & (PAGE_SIZE -1 ));
-                if (fill_vmsr_data(bank, d, global->mc_gstatus, vmce_vcpuid))
+                                (bank->mc_addr & (PAGE_SIZE - 1));
+                if ( fill_vmsr_data(bank, d, global->mc_gstatus, vmce_vcpuid) )
                 {
                     mce_printk(MCE_QUIET, "Fill vMCE# data for DOM%d "
-                      "failed\n", bank->mc_domid);
+                               "failed\n", bank->mc_domid);
                     goto vmce_failed;
                 }
 
-                /* We will inject vMCE to DOMU*/
+                /* We will inject vMCE to DOMU */
                 if ( inject_vmce(d, vmce_vcpuid) < 0 )
                 {
                     mce_printk(MCE_QUIET, "inject vMCE to DOM%d"
-                      " failed\n", d->domain_id);
+                               " failed\n", d->domain_id);
                     goto vmce_failed;
                 }
 
-                /* Impacted domain go on with domain's recovery job
+                /*
+                 * Impacted domain go on with domain's recovery job
                  * if the domain has its own MCA handler.
                  * For xen, it has contained the error and finished
                  * its own recovery job.
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.