[dpdk-dev] [PATCH 4/5] igb_uio: fix IRQ mode handling

Stephen Hemminger stephen at networkplumber.org
Wed May 14 18:34:05 CEST 2014


The igb_uio driver does not handle interrupt mode correctly.
It doesn't do the right thing and start with the most desired
mode and fall back to legacy modes as needed.

It also has a custom pci config lock was broken for any recent kernels.
Instead pci_intx functions that provide the needed access.

VMWare PCI emulation of interrupts is particularly broken.
E1000 on VMWare doesn't work at all because INTX support
in ESX doesn't really work. The kernel pci_intx routines check
this and detect the failure. To handle this case fallback
to an even more legacy mode using irq enable/disable.

Signed-off-by: Stephen Hemminger <shemming at brocade.com>

--- a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c	2014-05-14 09:32:21.542850352 -0700
+++ b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c	2014-05-14 09:32:21.542850352 -0700
@@ -36,27 +36,14 @@
 #include <xen/xen.h>
 #endif
 
-/**
- * MSI-X related macros, copy from linux/pci_regs.h in kernel 2.6.39,
- * but none of them in kernel 2.6.35.
- */
-#ifndef PCI_MSIX_ENTRY_SIZE
-#define PCI_MSIX_ENTRY_SIZE             16
-#define PCI_MSIX_ENTRY_LOWER_ADDR       0
-#define PCI_MSIX_ENTRY_UPPER_ADDR       4
-#define PCI_MSIX_ENTRY_DATA             8
-#define PCI_MSIX_ENTRY_VECTOR_CTRL      12
-#define PCI_MSIX_ENTRY_CTRL_MASKBIT     1
-#endif
-
 #define IGBUIO_NUM_MSI_VECTORS 1
 
 /* interrupt mode */
 enum igbuio_intr_mode {
 	IGBUIO_LEGACY_INTR_MODE = 0,
+	IGBUIO_INTX_MODE,
 	IGBUIO_MSI_INTR_MODE,
-	IGBUIO_MSIX_INTR_MODE,
-	IGBUIO_INTR_MODE_MAX
+	IGBUIO_MSIX_INTR_MODE
 };
 
 /**
@@ -65,13 +52,13 @@ enum igbuio_intr_mode {
 struct rte_uio_pci_dev {
 	struct uio_info info;
 	struct pci_dev *pdev;
-	spinlock_t lock; /* spinlock for accessing PCI config space or msix data in multi tasks/isr */
+	spinlock_t lock;
 	enum igbuio_intr_mode mode;
-	struct msix_entry \
-		msix_entries[IGBUIO_NUM_MSI_VECTORS]; /* pointer to the msix vectors to be allocated later */
+	unsigned long flags;
+	struct msix_entry msix_entries[IGBUIO_NUM_MSI_VECTORS];
 };
 
-static char *intr_mode = NULL;
+static char *intr_mode;
 static enum igbuio_intr_mode igbuio_intr_mode_preferred = IGBUIO_MSIX_INTR_MODE;
 
 /* PCI device id table */
@@ -161,81 +148,45 @@ static const struct attribute_group dev_
 	.attrs = dev_attrs,
 };
 
-static inline int
-pci_lock(struct pci_dev * pdev)
-{
-	/* Some function names changes between 3.2.0 and 3.3.0... */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)
-	pci_block_user_cfg_access(pdev);
-	return 1;
-#else
-	return pci_cfg_access_trylock(pdev);
-#endif
-}
 
-static inline void
-pci_unlock(struct pci_dev * pdev)
-{
-	/* Some function names changes between 3.2.0 and 3.3.0... */
 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)
-	pci_unblock_user_cfg_access(pdev);
-#else
-	pci_cfg_access_unlock(pdev);
-#endif
-}
-
-/**
- * It masks the msix on/off of generating MSI-X messages.
+/* Check if INTX works to control irq's.
+ * Set's INTX_DISABLE flag and reads it back
  */
-static int
-igbuio_msix_mask_irq(struct msi_desc *desc, int32_t state)
+static bool pci_intx_mask_supported(struct pci_dev *dev)
 {
-	uint32_t mask_bits = desc->masked;
-	unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
-						PCI_MSIX_ENTRY_VECTOR_CTRL;
-
-	if (state != 0)
-		mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
-	else
-		mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
-
-	if (mask_bits != desc->masked) {
-		writel(mask_bits, desc->mask_base + offset);
-		readl(desc->mask_base);
-		desc->masked = mask_bits;
-	}
+	bool mask_supported = false;
+	uint16_t orig, new
 
-	return 0;
+	pci_block_user_cfg_access(dev);
+	pci_read_config_word(pdev, PCI_COMMAND, &orig);
+	pci_write_config_word(dev, PCI_COMMAND,
+			      orig ^ PCI_COMMAND_INTX_DISABLE);
+	pci_read_config_word(dev, PCI_COMMAND, &new);
+
+	if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
+		dev_err(&dev->dev, "Command register changed from "
+			"0x%x to 0x%x: driver or hardware bug?\n", orig, new);
+	} else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
+		mask_supported = true;
+		pci_write_config_word(dev, PCI_COMMAND, orig);
+	}
+	pci_unblock_user_cfg_access(dev);
 }
 
-/**
- * This function sets/clears the masks for generating LSC interrupts.
- *
- * @param info
- *   The pointer to struct uio_info.
- * @param on
- *   The on/off flag of masking LSC.
- * @return
- *   -On success, zero value.
- *   -On failure, a negative value.
- */
-static int
-igbuio_set_interrupt_mask(struct rte_uio_pci_dev *udev, int32_t state)
+static bool pci_check_and_mask_intx(struct pci_dev *pdev)
 {
-	struct pci_dev *pdev = udev->pdev;
+	bool pending;
+	uint32_t status;
 
-	if (udev->mode == IGBUIO_MSIX_INTR_MODE) {
-		struct msi_desc *desc;
+	pci_block_user_cfg_access(dev);
+	pci_read_config_dword(pdev, PCI_COMMAND, &status);
 
-		list_for_each_entry(desc, &pdev->msi_list, list) {
-			igbuio_msix_mask_irq(desc, state);
-		}
-	}
-	else if (udev->mode == IGBUIO_LEGACY_INTR_MODE) {
-		uint32_t status;
+	/* interrupt is not ours, goes to out */
+	pending = (((status >> 16) & PCI_STATUS_INTERRUPT) != 0);
+	if (pending) {
 		uint16_t old, new;
 
-		pci_read_config_dword(pdev, PCI_COMMAND, &status);
 		old = status;
 		if (state != 0)
 			new = old & (~PCI_COMMAND_INTX_DISABLE);
@@ -245,9 +196,11 @@ igbuio_set_interrupt_mask(struct rte_uio
 		if (old != new)
 			pci_write_config_word(pdev, PCI_COMMAND, new);
 	}
+	pci_unblock_user_cfg_access(dev);
 
-	return 0;
+	return pending;
 }
+#endif
 
 /**
  * This is the irqcontrol callback to be registered to uio_info.
@@ -267,17 +220,15 @@ igbuio_pci_irqcontrol(struct uio_info *i
 {
 	unsigned long flags;
 	struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info);
-	struct pci_dev *pdev = udev->pdev;
 
 	spin_lock_irqsave(&udev->lock, flags);
-	if (!pci_lock(pdev)) {
-		spin_unlock_irqrestore(&udev->lock, flags);
-		return -1;
+	if (irq_state) {
+		if (test_and_clear_bit(0, &udev->flags))
+			enable_irq(info->irq);
+	} else {
+		if (!test_and_set_bit(0, &udev->flags))
+			disable_irq(info->irq);
 	}
-
-	igbuio_set_interrupt_mask(udev, irq_state);
-
-	pci_unlock(pdev);
 	spin_unlock_irqrestore(&udev->lock, flags);
 
 	return 0;
@@ -290,37 +241,21 @@ igbuio_pci_irqcontrol(struct uio_info *i
 static irqreturn_t
 igbuio_pci_irqhandler(int irq, struct uio_info *info)
 {
-	irqreturn_t ret = IRQ_NONE;
-	unsigned long flags;
 	struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info);
-	struct pci_dev *pdev = udev->pdev;
-	uint32_t cmd_status_dword;
-	uint16_t status;
 
-	spin_lock_irqsave(&udev->lock, flags);
-	/* block userspace PCI config reads/writes */
-	if (!pci_lock(pdev))
-		goto spin_unlock;
-
-	/* for legacy mode, interrupt maybe shared */
-	if (udev->mode == IGBUIO_LEGACY_INTR_MODE) {
-		pci_read_config_dword(pdev, PCI_COMMAND, &cmd_status_dword);
-		status = cmd_status_dword >> 16;
-		/* interrupt is not ours, goes to out */
-		if (!(status & PCI_STATUS_INTERRUPT))
-			goto done;
-	}
-
-	igbuio_set_interrupt_mask(udev, 0);
-	ret = IRQ_HANDLED;
-done:
-	/* unblock userspace PCI config reads/writes */
-	pci_unlock(pdev);
-spin_unlock:
-	spin_unlock_irqrestore(&udev->lock, flags);
-	pr_info("irq 0x%x %s\n", irq, (ret == IRQ_HANDLED) ? "handled" : "not handled");
+	/* Legacy mode need to mask in hardware */
+	if (udev->mode == IGBUIO_INTX_MODE) {
+		if (!pci_check_and_mask_intx(udev->pdev))
+			return IRQ_NONE;
+	} else if (udev->mode == IGBUIO_LEGACY_INTR_MODE) {
+		spin_lock(&udev->lock);
+		if (!test_and_set_bit(0, &udev->flags))
+			disable_irq_nosync(irq);
+		spin_unlock(&udev->lock);
+	}
 
-	return ret;
+	/* Message signal mode, no share IRQ and automasked */
+	return IRQ_HANDLED;
 }
 
 #ifdef CONFIG_XEN_DOM0
@@ -471,6 +406,7 @@ static int
 igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	struct rte_uio_pci_dev *udev;
+	int err;
 
 	udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
 	if (!udev)
@@ -522,38 +458,50 @@ igbuio_pci_probe(struct pci_dev *dev, co
 #endif
 	udev->info.priv = udev;
 	udev->pdev = dev;
-	udev->mode = 0; /* set the default value for interrupt mode */
+	udev->info.irq = dev->irq;
+	udev->info.irq_flags = 0;
 	spin_lock_init(&udev->lock);
 
-	/* check if it need to try msix first */
-	if (igbuio_intr_mode_preferred == IGBUIO_MSIX_INTR_MODE) {
+	switch (igbuio_intr_mode_preferred) {
+	case IGBUIO_MSIX_INTR_MODE: {
 		int vector;
 
-		for (vector = 0; vector < IGBUIO_NUM_MSI_VECTORS; vector ++)
+		for (vector = 0; vector < IGBUIO_NUM_MSI_VECTORS; vector++)
 			udev->msix_entries[vector].entry = vector;
 
-		if (pci_enable_msix(udev->pdev, udev->msix_entries, IGBUIO_NUM_MSI_VECTORS) == 0) {
+		err = pci_enable_msix(udev->pdev, udev->msix_entries,
+				      IGBUIO_NUM_MSI_VECTORS);
+		if (err == 0) {
 			udev->mode = IGBUIO_MSIX_INTR_MODE;
+			udev->info.irq = udev->msix_entries[0].vector;
+			break;
 		}
-		else {
-			pci_disable_msix(udev->pdev);
-			pr_info("fail to enable pci msix, or not enough msix entries\n");
-		}
+		dev_dbg(&dev->dev,
+				"Failed to enable MSI-X interrupts\n");
+		/* fall through */
 	}
-	switch (udev->mode) {
-	case IGBUIO_MSIX_INTR_MODE:
-		udev->info.irq_flags = 0;
-		udev->info.irq = udev->msix_entries[0].vector;
-		break;
 	case IGBUIO_MSI_INTR_MODE:
-		break;
+		err = pci_enable_msi(dev);
+		if (err == 0) {
+			udev->info.irq = dev->irq;
+			udev->mode = IGBUIO_MSI_INTR_MODE;
+			break;
+		}
+		dev_dbg(&dev->dev,
+			"Failed to enable MSI interrupts\n");
+		/* fall through */
+	case IGBUIO_INTX_MODE:
+		if (pci_intx_mask_supported(dev)) {
+			udev->info.irq_flags = IRQF_SHARED;
+			udev->mode = IGBUIO_INTX_MODE;
+			break;
+		}
+		dev_info(&dev->dev, "PCI INTX mask not supported\n");
+		/* fall through */
 	case IGBUIO_LEGACY_INTR_MODE:
-		udev->info.irq_flags = IRQF_SHARED;
-		udev->info.irq = dev->irq;
-		break;
-	default:
-		break;
+		udev->mode = IGBUIO_LEGACY_INTR_MODE;
 	}
+	disable_irq(dev->irq);
 
 	pci_set_drvdata(dev, udev);
 	igbuio_pci_irqcontrol(&udev->info, 0);



More information about the dev mailing list