[ofa-general] Re: [PATCHv3 for-2.6.21] IB/mthca: fix race in QP destroy
Michael S. Tsirkin
mst at mellanox.co.il
Wed Mar 7 10:10:41 PST 2007
> Quoting Roland Dreier <rdreier at cisco.com>:
> Subject: Re: [PATCHv3 for-2.6.21] IB/mthca: fix race in QP destroy
>
> What do you think of something like this, plus merging the async event
> and command interface EQs?
Looks good, except that spin_lock/spin_unlock need a lock pointer.
Here's a (compile tested only) patch for merging async/command queues.
commit a0bc6fd18d8a1918a251576537350a369fb902ed
Author: Michael S. Tsirkin <mst at mellanox.co.il>
Date: Wed Mar 7 17:47:30 2007 +0200
Merge CMD and ASYNC EQs
Signed-off-by: Michael S. Tsirkin <mst at mellanox.co.il>
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index b7e42ef..a87903f 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -93,9 +93,8 @@ enum {
};
enum {
- MTHCA_EQ_CMD,
- MTHCA_EQ_ASYNC,
MTHCA_EQ_COMP,
+ MTHCA_EQ_ASYNC,
MTHCA_NUM_EQ
};
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 8ec9fa1..f7a41b8 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -110,11 +110,11 @@ enum {
(1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR) | \
(1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
(1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \
- (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))
+ (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT)) | \
+ (1ULL << MTHCA_EVENT_TYPE_CMD)
#define MTHCA_SRQ_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
(1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT))
-#define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD)
#define MTHCA_EQ_DB_INC_CI (1 << 24)
#define MTHCA_EQ_DB_REQ_NOT (2 << 24)
@@ -863,23 +863,17 @@ int mthca_init_eq_table(struct mthca_dev *dev)
if (err)
goto err_out_unmap;
- err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE,
+ err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_ASYNC_EQE +
+ MTHCA_NUM_SPARE_EQE,
(dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,
&dev->eq_table.eq[MTHCA_EQ_ASYNC]);
if (err)
goto err_out_comp;
- err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE,
- (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,
- &dev->eq_table.eq[MTHCA_EQ_CMD]);
- if (err)
- goto err_out_async;
-
if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
static const char *eq_name[] = {
[MTHCA_EQ_COMP] = DRV_NAME " (comp)",
[MTHCA_EQ_ASYNC] = DRV_NAME " (async)",
- [MTHCA_EQ_CMD] = DRV_NAME " (cmd)"
};
for (i = 0; i < MTHCA_NUM_EQ; ++i) {
@@ -889,7 +883,7 @@ int mthca_init_eq_table(struct mthca_dev *dev)
mthca_tavor_msi_x_interrupt,
0, eq_name[i], dev->eq_table.eq + i);
if (err)
- goto err_out_cmd;
+ goto err_out_async;
dev->eq_table.eq[i].have_irq = 1;
}
} else {
@@ -899,7 +893,7 @@ int mthca_init_eq_table(struct mthca_dev *dev)
mthca_tavor_interrupt,
IRQF_SHARED, DRV_NAME, dev);
if (err)
- goto err_out_cmd;
+ goto err_out_async;
dev->eq_table.have_irq = 1;
}
@@ -912,15 +906,6 @@ int mthca_init_eq_table(struct mthca_dev *dev)
mthca_warn(dev, "MAP_EQ for async EQ %d returned status 0x%02x\n",
dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status);
- err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
- 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
- if (err)
- mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n",
- dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err);
- if (status)
- mthca_warn(dev, "MAP_EQ for cmd EQ %d returned status 0x%02x\n",
- dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status);
-
for (i = 0; i < MTHCA_NUM_EQ; ++i)
if (mthca_is_memfree(dev))
arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);
@@ -929,11 +914,8 @@ int mthca_init_eq_table(struct mthca_dev *dev)
return 0;
-err_out_cmd:
- mthca_free_irqs(dev);
- mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]);
-
err_out_async:
+ mthca_free_irqs(dev);
mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
err_out_comp:
@@ -956,8 +938,6 @@ void mthca_cleanup_eq_table(struct mthca_dev *dev)
mthca_MAP_EQ(dev, async_mask(dev),
1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);
- mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
- 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
for (i = 0; i < MTHCA_NUM_EQ; ++i)
mthca_free_eq(dev, &dev->eq_table.eq[i]);
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 0d9b7d0..5bfef62 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -835,7 +835,7 @@ static int mthca_setup_hca(struct mthca_dev *dev)
if (err || status) {
mthca_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting.\n",
dev->mthca_flags & MTHCA_FLAG_MSI_X ?
- dev->eq_table.eq[MTHCA_EQ_CMD].msi_x_vector :
+ dev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector :
dev->pdev->irq);
if (dev->mthca_flags & (MTHCA_FLAG_MSI | MTHCA_FLAG_MSI_X))
mthca_err(dev, "Try again with MSI/MSI-X disabled.\n");
@@ -976,12 +976,11 @@ static void mthca_release_regions(struct pci_dev *pdev,
static int mthca_enable_msi_x(struct mthca_dev *mdev)
{
- struct msix_entry entries[3];
+ struct msix_entry entries[2];
int err;
entries[0].entry = 0;
entries[1].entry = 1;
- entries[2].entry = 2;
err = pci_enable_msix(mdev->pdev, entries, ARRAY_SIZE(entries));
if (err) {
@@ -993,7 +992,6 @@ static int mthca_enable_msi_x(struct mthca_dev *mdev)
mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = entries[0].vector;
mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = entries[1].vector;
- mdev->eq_table.eq[MTHCA_EQ_CMD ].msi_x_vector = entries[2].vector;
return 0;
}
--
MST
More information about the general
mailing list