[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 2/6] x86/APIC: drop clustered_apic_check() hook


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Fri, 5 Nov 2021 13:34:12 +0100
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=IY3+tB01KnV66hCC8nS+KXB316mGWF0nPKRUL2Nl/rc=; b=DNn8Xj0XWZDD5nZlAkWwnPQKMx9AdVqYACinAi+XmQ2zQ7dVzHvRx3ogUThBHRo9OVkLDjpeMU4GiB+0NB/QyGTLaViQ6rqTCuXPLBZCRa3xVpZEnqO/bdRvbBSwQRNN3Zde4fFf7esaOpgwry6W7WbOi7d/DDf7h6wycQUlc5Q65En2F6XYcJlkeXns8/5/vneDmEWnLsU/lTmC9G1i+r511V6GSVxqUYzOyNG9SWH3XjA5iqsBBdUBbGYG9tPZJ8OHsDMBzVC6fuV/AVYLZdpReYUqw/8gVdHd3TMd8LU0HNy1IXT0qu20+4TgwJ1RzVRDPMrxRfSDy9mHyk4kxQ==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=V7R0Xv5Ld6OuRQ8hsDlU6coEom2JxV+rXvxmRQyOrn+6sSbPn75u6j+skmdQWmpA+gXr7m9YkaLzIlEAekFgMrgjnHrhNPbgNZy1bWPvK/Qjo4AtbwRd7KAwOoWZq9fiBLeZdw2LoBTHYRuiDqQtNc/LrczF1dNP4piPpc2+YfcChOG1wUtgeTLd9KZA+OZsDUVGCmPRwznOe0x191UZVdEzzPIyl+rF68wqbWjvvTgvtAu0c13E+k6ruvOEgPipFErHMNwpxonsa8XhxKWcEc5H4MljYiF/pDj+6Y5tmzql3goe9XvtRv14K7vOjONIFzLZB+FSSfe4icj+8kBizg==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=suse.com;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>
  • Delivery-date: Fri, 05 Nov 2021 12:34:31 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

The hook functions have been empty forever (x2APIC) or issuing merely a
printk() for a long time (xAPIC). Since that printk() is (a) generally
useful (i.e. also in the x2APIC case) and (b) would better only be
issued once the final APIC driver to use was determined, move (and
generalize) it into connect_bsp_APIC().

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v2: New.

--- a/xen/arch/x86/acpi/boot.c
+++ b/xen/arch/x86/acpi/boot.c
@@ -674,9 +674,7 @@ static void __init acpi_process_madt(voi
                        error = acpi_parse_madt_ioapic_entries();
                        if (!error) {
                                acpi_ioapic = true;
-
                                smp_found_config = true;
-                               clustered_apic_check();
                        }
                }
                if (error == -EINVAL) {
--- a/xen/arch/x86/apic.c
+++ b/xen/arch/x86/apic.c
@@ -243,6 +243,12 @@ void __init connect_bsp_APIC(void)
         outb(0x70, 0x22);
         outb(0x01, 0x23);
     }
+
+    printk("Enabling APIC mode:  %s.  Using %d I/O APICs\n",
+           !INT_DEST_MODE ? "Physical"
+                          : init_apic_ldr == init_apic_ldr_flat ? "Flat"
+                                                                : "Clustered",
+           nr_ioapics);
     enable_apic_mode();
 }
 
--- a/xen/arch/x86/genapic/delivery.c
+++ b/xen/arch/x86/genapic/delivery.c
@@ -19,11 +19,6 @@ void init_apic_ldr_flat(void)
        apic_write(APIC_LDR, val);
 }
 
-void __init clustered_apic_check_flat(void)
-{
-       printk("Enabling APIC mode:  Flat.  Using %d I/O APICs\n", nr_ioapics);
-}
-
 const cpumask_t *vector_allocation_cpumask_flat(int cpu)
 {
        return &cpu_online_map;
@@ -43,11 +38,6 @@ void init_apic_ldr_phys(void)
        /* We only deliver in phys mode - no setup needed. */
 }
 
-void __init clustered_apic_check_phys(void)
-{
-       printk("Enabling APIC mode:  Phys.  Using %d I/O APICs\n", nr_ioapics);
-}
-
 const cpumask_t *vector_allocation_cpumask_phys(int cpu)
 {
        return cpumask_of(cpu);
--- a/xen/arch/x86/genapic/x2apic.c
+++ b/xen/arch/x86/genapic/x2apic.c
@@ -75,10 +75,6 @@ static void init_apic_ldr_x2apic_cluster
     cpumask_set_cpu(this_cpu, per_cpu(cluster_cpus, this_cpu));
 }
 
-static void __init clustered_apic_check_x2apic(void)
-{
-}
-
 static const cpumask_t *vector_allocation_cpumask_x2apic_cluster(int cpu)
 {
     return per_cpu(cluster_cpus, cpu);
@@ -175,7 +171,6 @@ static const struct genapic __initconstr
     .int_delivery_mode = dest_Fixed,
     .int_dest_mode = 0 /* physical delivery */,
     .init_apic_ldr = init_apic_ldr_phys,
-    .clustered_apic_check = clustered_apic_check_x2apic,
     .vector_allocation_cpumask = vector_allocation_cpumask_phys,
     .cpu_mask_to_apicid = cpu_mask_to_apicid_phys,
     .send_IPI_mask = send_IPI_mask_x2apic_phys,
@@ -187,7 +182,6 @@ static const struct genapic __initconstr
     .int_delivery_mode = dest_LowestPrio,
     .int_dest_mode = 1 /* logical delivery */,
     .init_apic_ldr = init_apic_ldr_x2apic_cluster,
-    .clustered_apic_check = clustered_apic_check_x2apic,
     .vector_allocation_cpumask = vector_allocation_cpumask_x2apic_cluster,
     .cpu_mask_to_apicid = cpu_mask_to_apicid_x2apic_cluster,
     .send_IPI_mask = send_IPI_mask_x2apic_cluster,
--- a/xen/arch/x86/mpparse.c
+++ b/xen/arch/x86/mpparse.c
@@ -410,7 +410,6 @@ static int __init smp_read_mpc(struct mp
                        }
                }
        }
-       clustered_apic_check();
        if (!num_processors)
                printk(KERN_ERR "SMP mptable: no processors registered!\n");
        return num_processors;
--- a/xen/include/asm-x86/genapic.h
+++ b/xen/include/asm-x86/genapic.h
@@ -32,7 +32,6 @@ struct genapic {
        int int_delivery_mode;
        int int_dest_mode;
        void (*init_apic_ldr)(void);
-       void (*clustered_apic_check)(void);
        const cpumask_t *(*vector_allocation_cpumask)(int cpu);
        unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask);
        void (*send_IPI_mask)(const cpumask_t *mask, int vector);
@@ -54,7 +53,6 @@ extern const struct genapic apic_bigsmp;
 void send_IPI_self_legacy(uint8_t vector);
 
 void init_apic_ldr_flat(void);
-void clustered_apic_check_flat(void);
 unsigned int cpu_mask_to_apicid_flat(const cpumask_t *cpumask);
 void send_IPI_mask_flat(const cpumask_t *mask, int vector);
 const cpumask_t *vector_allocation_cpumask_flat(int cpu);
@@ -62,14 +60,12 @@ const cpumask_t *vector_allocation_cpuma
        .int_delivery_mode = dest_LowestPrio, \
        .int_dest_mode = 1 /* logical delivery */, \
        .init_apic_ldr = init_apic_ldr_flat, \
-       .clustered_apic_check = clustered_apic_check_flat, \
        .vector_allocation_cpumask = vector_allocation_cpumask_flat, \
        .cpu_mask_to_apicid = cpu_mask_to_apicid_flat, \
        .send_IPI_mask = send_IPI_mask_flat, \
        .send_IPI_self = send_IPI_self_legacy
 
 void init_apic_ldr_phys(void);
-void clustered_apic_check_phys(void);
 unsigned int cpu_mask_to_apicid_phys(const cpumask_t *cpumask);
 void send_IPI_mask_phys(const cpumask_t *mask, int vector);
 const cpumask_t *vector_allocation_cpumask_phys(int cpu);
@@ -77,7 +73,6 @@ const cpumask_t *vector_allocation_cpuma
        .int_delivery_mode = dest_Fixed, \
        .int_dest_mode = 0 /* physical delivery */, \
        .init_apic_ldr = init_apic_ldr_phys, \
-       .clustered_apic_check = clustered_apic_check_phys, \
        .vector_allocation_cpumask = vector_allocation_cpumask_phys, \
        .cpu_mask_to_apicid = cpu_mask_to_apicid_phys, \
        .send_IPI_mask = send_IPI_mask_phys, \
--- a/xen/include/asm-x86/mach-generic/mach_apic.h
+++ b/xen/include/asm-x86/mach-generic/mach_apic.h
@@ -14,7 +14,6 @@
 #define INT_DEST_MODE (genapic.int_dest_mode)
 #define TARGET_CPUS ((const typeof(cpu_online_map) *)&cpu_online_map)
 #define init_apic_ldr (genapic.init_apic_ldr)
-#define clustered_apic_check (genapic.clustered_apic_check)
 #define cpu_mask_to_apicid(mask) ({ \
        /* \
         * There are a number of places where the address of a local variable \




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.