]> git.dujemihanovic.xyz Git - linux.git/commitdiff
genirq/msi: Activate Multi-MSI early when MSI_FLAG_ACTIVATE_EARLY is set
authorMarc Zyngier <maz@kernel.org>
Sat, 23 Jan 2021 12:27:59 +0000 (12:27 +0000)
committerThomas Gleixner <tglx@linutronix.de>
Sat, 30 Jan 2021 00:22:31 +0000 (01:22 +0100)
When MSI_FLAG_ACTIVATE_EARLY is set (which is the case for PCI),
__msi_domain_alloc_irqs() performs the activation of the interrupt (which
in the case of PCI results in the endpoint being programmed) as soon as the
interrupt is allocated.

But it appears that this is only done for the first vector, introducing an
inconsistent behaviour for PCI Multi-MSI.

Fix it by iterating over the number of vectors allocated to each MSI
descriptor. This is easily achieved by introducing a new
"for_each_msi_vector" iterator, together with a tiny bit of refactoring.

Fixes: f3b0946d629c ("genirq/msi: Make sure PCI MSIs are activated early")
Reported-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20210123122759.1781359-1-maz@kernel.org
include/linux/msi.h
kernel/irq/msi.c

index 360a0a7e73415fee1cacc582ba1da3ae9fec3e51..aef35fd1cf116f5d53ee922a8f81da575212ac38 100644 (file)
@@ -178,6 +178,12 @@ struct msi_desc {
        list_for_each_entry((desc), dev_to_msi_list((dev)), list)
 #define for_each_msi_entry_safe(desc, tmp, dev)        \
        list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
+#define for_each_msi_vector(desc, __irq, dev)                          \
+       for_each_msi_entry((desc), (dev))                               \
+               if ((desc)->irq)                                        \
+                       for (__irq = (desc)->irq;                       \
+                            __irq < ((desc)->irq + (desc)->nvec_used); \
+                            __irq++)
 
 #ifdef CONFIG_IRQ_MSI_IOMMU
 static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
index dc0e2d7fbdfd927c98aade002389549f89477a0c..b338d622f26e330e0e889c1453fec4b4849eab79 100644 (file)
@@ -436,22 +436,22 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
 
        can_reserve = msi_check_reservation_mode(domain, info, dev);
 
-       for_each_msi_entry(desc, dev) {
-               virq = desc->irq;
-               if (desc->nvec_used == 1)
-                       dev_dbg(dev, "irq %d for MSI\n", virq);
-               else
+       /*
+        * This flag is set by the PCI layer as we need to activate
+        * the MSI entries before the PCI layer enables MSI in the
+        * card. Otherwise the card latches a random msi message.
+        */
+       if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
+               goto skip_activate;
+
+       for_each_msi_vector(desc, i, dev) {
+               if (desc->irq == i) {
+                       virq = desc->irq;
                        dev_dbg(dev, "irq [%d-%d] for MSI\n",
                                virq, virq + desc->nvec_used - 1);
-               /*
-                * This flag is set by the PCI layer as we need to activate
-                * the MSI entries before the PCI layer enables MSI in the
-                * card. Otherwise the card latches a random msi message.
-                */
-               if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
-                       continue;
+               }
 
-               irq_data = irq_domain_get_irq_data(domain, desc->irq);
+               irq_data = irq_domain_get_irq_data(domain, i);
                if (!can_reserve) {
                        irqd_clr_can_reserve(irq_data);
                        if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
@@ -462,28 +462,24 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
                        goto cleanup;
        }
 
+skip_activate:
        /*
         * If these interrupts use reservation mode, clear the activated bit
         * so request_irq() will assign the final vector.
         */
        if (can_reserve) {
-               for_each_msi_entry(desc, dev) {
-                       irq_data = irq_domain_get_irq_data(domain, desc->irq);
+               for_each_msi_vector(desc, i, dev) {
+                       irq_data = irq_domain_get_irq_data(domain, i);
                        irqd_clr_activated(irq_data);
                }
        }
        return 0;
 
 cleanup:
-       for_each_msi_entry(desc, dev) {
-               struct irq_data *irqd;
-
-               if (desc->irq == virq)
-                       break;
-
-               irqd = irq_domain_get_irq_data(domain, desc->irq);
-               if (irqd_is_activated(irqd))
-                       irq_domain_deactivate_irq(irqd);
+       for_each_msi_vector(desc, i, dev) {
+               irq_data = irq_domain_get_irq_data(domain, i);
+               if (irqd_is_activated(irq_data))
+                       irq_domain_deactivate_irq(irq_data);
        }
        msi_domain_free_irqs(domain, dev);
        return ret;