|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v3 for-4.21 2/9] x86/HPET: use single, global, low-priority vector for broadcast IRQ
Using dynamically allocated / maintained vectors has several downsides:
- possible nesting of IRQs due to the effects of IRQ migration,
- reduction of vectors available for devices,
- IRQs not moving as intended if there's shortage of vectors,
- higher runtime overhead.
As the vector also doesn't need to be of any priority (first and foremost
it really shouldn't be of higher or same priority as the timer IRQ, as
that raises TIMER_SOFTIRQ anyway), simply use the lowest one above the
legacy range. The vector needs reserving early, until it is known whether
it actually is used. If it isn't, it's made available for general use.
With a fixed vector, less updating is now necessary in
set_channel_irq_affinity(); in particular channels don't need transiently
masking anymore, as the necessary update is now atomic. To fully leverage
this, however, we want to stop using hpet_msi_set_affinity() there. With
the transient masking dropped, we're no longer at risk of missing events.
Fixes: 996576b965cc ("xen: allow up to 16383 cpus")
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Release-Acked-by: Oleksii Kurochko<oleksii.kurochko@xxxxxxxxx>
---
This is an alternative proposal to
https://lists.xen.org/archives/html/xen-devel/2014-03/msg00399.html.
Should we keep hpet_msi_set_affinity() at all? We'd better not have the
generic IRQ subsystem play with our IRQs' affinities, and fixup_irqs()
isn't relevant here. (If so, this likely would want to be a separate
patch, though.)
The hpet_enable_channel() call could in principle be made (effectively)
conditional, at the price of introducing a check in there. However, as
much as eliminating the masking didn't help with the many excess (early)
IRQs I'm observing on Intel hardware, doing so doesn't help either.
The Fixes: tag indicates where the problem got signficantly worse; in
principle it was there already before (crashing at perhaps 6 or 7 levels
of nested IRQs).
---
v3: Switch to using vector 0x30, to unbreak AMD, including an adjustment
to AMD IOMMU intremap logic. Adjust condition around assertions in
set_channel_irq_affinity().
v2: Re-work set_channel_irq_affinity() intensively. Re-base over the
dropping of another patch. Drop setup_vector_irq() change.
--- a/xen/arch/x86/hpet.c
+++ b/xen/arch/x86/hpet.c
@@ -9,17 +9,19 @@
#include <xen/timer.h>
#include <xen/smp.h>
#include <xen/softirq.h>
+#include <xen/cpuidle.h>
#include <xen/irq.h>
#include <xen/numa.h>
#include <xen/param.h>
#include <xen/sched.h>
#include <asm/apic.h>
-#include <asm/fixmap.h>
#include <asm/div64.h>
+#include <asm/fixmap.h>
+#include <asm/genapic.h>
#include <asm/hpet.h>
+#include <asm/irq-vectors.h>
#include <asm/msi.h>
-#include <xen/cpuidle.h>
#define MAX_DELTA_NS MILLISECS(10*1000)
#define MIN_DELTA_NS MICROSECS(20)
@@ -251,10 +253,9 @@ static void cf_check hpet_interrupt_hand
ch->event_handler(ch);
}
-static void cf_check hpet_msi_unmask(struct irq_desc *desc)
+static void hpet_enable_channel(struct hpet_event_channel *ch)
{
u32 cfg;
- struct hpet_event_channel *ch = desc->action->dev_id;
cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
cfg |= HPET_TN_ENABLE;
@@ -262,6 +263,11 @@ static void cf_check hpet_msi_unmask(str
ch->msi.msi_attrib.host_masked = 0;
}
+static void cf_check hpet_msi_unmask(struct irq_desc *desc)
+{
+ hpet_enable_channel(desc->action->dev_id);
+}
+
static void cf_check hpet_msi_mask(struct irq_desc *desc)
{
u32 cfg;
@@ -303,15 +309,13 @@ static void cf_check hpet_msi_set_affini
struct hpet_event_channel *ch = desc->action->dev_id;
struct msi_msg msg = ch->msi.msg;
- msg.dest32 = set_desc_affinity(desc, mask);
- if ( msg.dest32 == BAD_APICID )
- return;
+ /* This really is only for dump_irqs(). */
+ cpumask_copy(desc->arch.cpu_mask, mask);
- msg.data &= ~MSI_DATA_VECTOR_MASK;
- msg.data |= MSI_DATA_VECTOR(desc->arch.vector);
+ msg.dest32 = cpu_mask_to_apicid(mask);
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
msg.address_lo |= MSI_ADDR_DEST_ID(msg.dest32);
- if ( msg.data != ch->msi.msg.data || msg.dest32 != ch->msi.msg.dest32 )
+ if ( msg.dest32 != ch->msi.msg.dest32 )
hpet_msi_write(ch, &msg);
}
@@ -324,7 +328,7 @@ static hw_irq_controller hpet_msi_type =
.shutdown = hpet_msi_shutdown,
.enable = hpet_msi_unmask,
.disable = hpet_msi_mask,
- .ack = ack_nonmaskable_msi_irq,
+ .ack = irq_actor_none,
.end = end_nonmaskable_irq,
.set_affinity = hpet_msi_set_affinity,
};
@@ -343,6 +347,12 @@ static int __init hpet_setup_msi_irq(str
u32 cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
irq_desc_t *desc = irq_to_desc(ch->msi.irq);
+ clear_irq_vector(ch->msi.irq);
+ ret = bind_irq_vector(ch->msi.irq, HPET_BROADCAST_VECTOR, &cpu_online_map);
+ if ( ret )
+ return ret;
+ cpumask_setall(desc->affinity);
+
if ( iommu_intremap != iommu_intremap_off )
{
ch->msi.hpet_id = hpet_blockid;
@@ -472,19 +482,50 @@ static struct hpet_event_channel *hpet_g
static void set_channel_irq_affinity(struct hpet_event_channel *ch)
{
struct irq_desc *desc = irq_to_desc(ch->msi.irq);
+ struct msi_msg msg = ch->msi.msg;
ASSERT(!local_irq_is_enabled());
spin_lock(&desc->lock);
- hpet_msi_mask(desc);
- hpet_msi_set_affinity(desc, cpumask_of(ch->cpu));
- hpet_msi_unmask(desc);
+
+ per_cpu(vector_irq, ch->cpu)[HPET_BROADCAST_VECTOR] = ch->msi.irq;
+
+ /*
+ * Open-coding a reduced form of hpet_msi_set_affinity() here. With the
+ * actual update below (either of the IRTE or of [just] message address;
+ * with interrupt remapping message address/data don't change) now being
+ * atomic, we can avoid masking the IRQ around the update. As a result
+ * we're no longer at risk of missing IRQs (provided hpet_broadcast_enter()
+ * keeps setting the new deadline only afterwards).
+ */
+ cpumask_copy(desc->arch.cpu_mask, cpumask_of(ch->cpu));
+
spin_unlock(&desc->lock);
- spin_unlock(&ch->lock);
+ msg.dest32 = cpu_physical_id(ch->cpu);
+ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+ msg.address_lo |= MSI_ADDR_DEST_ID(msg.dest32);
+ if ( msg.dest32 != ch->msi.msg.dest32 )
+ {
+ ch->msi.msg = msg;
- /* We may have missed an interrupt due to the temporary masking. */
- if ( ch->event_handler && ch->next_event < NOW() )
- ch->event_handler(ch);
+ if ( iommu_intremap != iommu_intremap_off )
+ {
+ int rc = iommu_update_ire_from_msi(&ch->msi, &msg);
+
+ ASSERT(rc <= 0);
+ if ( rc >= 0 )
+ {
+ ASSERT(msg.data == hpet_read32(HPET_Tn_ROUTE(ch->idx)));
+ ASSERT(msg.address_lo ==
+ hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4));
+ }
+ }
+ else
+ hpet_write32(msg.address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
+ }
+
+ hpet_enable_channel(ch);
+ spin_unlock(&ch->lock);
}
static void hpet_attach_channel(unsigned int cpu,
@@ -622,6 +663,12 @@ void __init hpet_broadcast_init(void)
hpet_events->flags = HPET_EVT_LEGACY;
}
+void __init hpet_broadcast_late_init(void)
+{
+ if ( !num_hpets_used )
+ free_lopriority_vector(HPET_BROADCAST_VECTOR);
+}
+
void hpet_broadcast_resume(void)
{
u32 cfg;
--- a/xen/arch/x86/include/asm/hpet.h
+++ b/xen/arch/x86/include/asm/hpet.h
@@ -90,6 +90,7 @@ void hpet_disable_legacy_replacement_mod
* rather than using the LAPIC timer. Used for Cx state entry.
*/
void hpet_broadcast_init(void);
+void hpet_broadcast_late_init(void);
void hpet_broadcast_resume(void);
void cf_check hpet_broadcast_enter(void);
void cf_check hpet_broadcast_exit(void);
--- a/xen/arch/x86/include/asm/irq.h
+++ b/xen/arch/x86/include/asm/irq.h
@@ -116,6 +116,7 @@ void cf_check call_function_interrupt(vo
void cf_check irq_move_cleanup_interrupt(void);
uint8_t alloc_hipriority_vector(void);
+void free_lopriority_vector(uint8_t vector);
void set_direct_apic_vector(uint8_t vector, void (*handler)(void));
void alloc_direct_apic_vector(uint8_t *vector, void (*handler)(void));
--- a/xen/arch/x86/include/asm/irq-vectors.h
+++ b/xen/arch/x86/include/asm/irq-vectors.h
@@ -22,6 +22,9 @@
#define FIRST_LEGACY_VECTOR FIRST_DYNAMIC_VECTOR
#define LAST_LEGACY_VECTOR (FIRST_LEGACY_VECTOR + 0xf)
+/* HPET broadcast is statically allocated and wants to be low priority. */
+#define HPET_BROADCAST_VECTOR (LAST_LEGACY_VECTOR + 1)
+
#ifdef CONFIG_PV32
#define HYPERCALL_VECTOR 0x82
#endif
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -468,6 +468,12 @@ int __init init_irq_data(void)
vector++ )
__set_bit(vector, used_vectors);
+ /*
+ * Prevent the HPET broadcast vector from being used, until it is known
+ * whether it's actually needed.
+ */
+ __set_bit(HPET_BROADCAST_VECTOR, used_vectors);
+
return 0;
}
@@ -991,6 +997,13 @@ void alloc_direct_apic_vector(uint8_t *v
spin_unlock(&lock);
}
+/* This could free any vectors, but is needed only for low-prio ones. */
+void __init free_lopriority_vector(uint8_t vector)
+{
+ ASSERT(vector < FIRST_HIPRIORITY_VECTOR);
+ clear_bit(vector, used_vectors);
+}
+
static void cf_check irq_ratelimit_timer_fn(void *data)
{
struct irq_desc *desc, *tmp;
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -2675,6 +2675,8 @@ static int __init cf_check disable_pit_i
"Force enable with 'cpuidle'.\n");
}
+ hpet_broadcast_late_init();
+
return 0;
}
__initcall(disable_pit_irq);
--- a/xen/drivers/passthrough/amd/iommu_intr.c
+++ b/xen/drivers/passthrough/amd/iommu_intr.c
@@ -551,6 +551,13 @@ int cf_check amd_iommu_msi_msg_update_ir
for ( i = 1; i < nr; ++i )
msi_desc[i].remap_index = msi_desc->remap_index + i;
msg->data = data;
+ /*
+ * While the low address bits don't matter, "canonicalize" the address
+ * by zapping the bits that were transferred to the IRTE. This way
+ * callers can check for there actually needing to be an update to
+ * wherever the address is put.
+ */
+ msg->address_lo &= ~(MSI_ADDR_DESTMODE_MASK | MSI_ADDR_DEST_ID_MASK);
}
return rc;
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |