[ofa-general] NOSRQ misc patch [PATCH V1]
Pradeep Satyanarayana
pradeeps at linux.vnet.ibm.com
Sat Jul 21 15:46:15 PDT 2007
This patch is to be applied on top of the IPOIB CM (NOSRQ) [PATCH V8].
This fixes the issues that Roland and Michael pointed out and more.
Signed-off-by: Pradeep Satyanarayana <pradeeps at linux.vnet.ibm.com>
---
--- a/linux-2.6.22/drivers/infiniband/ulp/ipoib/ipoib.h 2007-07-21 17:50:47.000000000 -0400
+++ b/linux-2.6.22/drivers/infiniband/ulp/ipoib/ipoib.h 2007-07-21 18:20:29.000000000 -0400
@@ -101,7 +101,6 @@ enum {
#define IPOIB_CM_OP_RECV (1ul << 30)
#define NOSRQ_INDEX_TABLE_SIZE 128
-#define NOSRQ_INDEX_MASK (NOSRQ_INDEX_TABLE_SIZE -1)
#else
#define IPOIB_CM_OP_RECV (0)
#endif
@@ -447,6 +446,7 @@ void ipoib_drain_cq(struct net_device *d
/* We don't support UC connections at the moment */
#define IPOIB_CM_SUPPORTED(ha) (ha[0] & (IPOIB_FLAGS_RC))
+extern int max_rc_qp ;
static inline int ipoib_cm_admin_enabled(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
--- a/linux-2.6.22/drivers/infiniband/ulp/ipoib/ipoib_cm.c 2007-07-21 17:50:47.000000000 -0400
+++ b/linux-2.6.22/drivers/infiniband/ulp/ipoib/ipoib_cm.c 2007-07-21 18:08:15.000000000 -0400
@@ -49,17 +49,18 @@ MODULE_PARM_DESC(cm_data_debug_level,
#include "ipoib.h"
-static int max_rc_qp = NOSRQ_INDEX_TABLE_SIZE;
+int max_rc_qp = NOSRQ_INDEX_TABLE_SIZE;
static int max_recv_buf = 1024; /* Default is 1024 MB */
module_param_named(nosrq_max_rc_qp, max_rc_qp, int, 0644);
-MODULE_PARM_DESC(nosrq_max_rc_qp, "Max number of NOSRQ RC QPs supported");
+MODULE_PARM_DESC(nosrq_max_rc_qp, "Max number of NOSRQ RC QPs supported; must be a power of 2");
module_param_named(max_receive_buffer, max_recv_buf, int, 0644);
MODULE_PARM_DESC(max_receive_buffer, "Max Receive Buffer Size in MB");
static atomic_t current_rc_qp = ATOMIC_INIT(0); /* Active number of RC QPs for NOSRQ */
+#define NOSRQ_INDEX_MASK (max_rc_qp -1)
#define IPOIB_CM_IETF_ID 0x1000000000000000ULL
#define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
@@ -1024,6 +1025,7 @@ void dev_stop_nosrq(struct ipoib_dev_pri
spin_unlock_irq(&priv->lock);
cancel_delayed_work(&priv->cm.stale_task);
+ kfree(priv->cm.rx_index_table);
}
void ipoib_cm_dev_stop(struct net_device *dev)
@@ -1168,9 +1170,9 @@ static struct ib_qp *ipoib_cm_create_tx_
attr.recv_cq = priv->cq;
attr.srq = priv->cm.srq;
attr.cap.max_send_wr = ipoib_sendq_size;
- attr.cap.max_recv_wr = 1;
+ attr.cap.max_recv_wr = 0;
attr.cap.max_send_sge = 1;
- attr.cap.max_recv_sge = 1;
+ attr.cap.max_recv_sge = 0;
attr.sq_sig_type = IB_SIGNAL_ALL_WR;
attr.qp_type = IB_QPT_RC;
attr.send_cq = cq;
@@ -1710,11 +1712,11 @@ int ipoib_cm_dev_init(struct net_device
* passive_ids. For quick and easy access we maintain a table
* of pointers to struct ipoib_cm_rx called the rx_index_table
*/
- priv->cm.rx_index_table = kzalloc(NOSRQ_INDEX_TABLE_SIZE *
- sizeof *priv->cm.rx_index_table,
- GFP_KERNEL);
+ priv->cm.rx_index_table = kcalloc(max_rc_qp,
+ sizeof *priv->cm.rx_index_table,
+ GFP_KERNEL);
if (!priv->cm.rx_index_table) {
- printk(KERN_WARNING "Failed to allocate NOSRQ_INDEX_TABLE\n");
+ printk(KERN_WARNING "Failed to allocate rx_index_table\n");
return -ENOMEM;
}
}
--- a/linux-2.6.22/drivers/infiniband/ulp/ipoib/ipoib_verbs.c 2007-07-21 17:50:47.000000000 -0400
+++ b/linux-2.6.22/drivers/infiniband/ulp/ipoib/ipoib_verbs.c 2007-07-21 18:09:26.000000000 -0400
@@ -180,11 +180,11 @@ int ipoib_transport_dev_init(struct net_
/* We increase the size of the CQ in the NOSRQ case to prevent CQ
* overflow. Every new REQ creates a new RX QP and each QP has an
* RX ring associated with it. Therefore we could have
- * NOSRQ_INDEX_TABLE_SIZE*ipoib_recvq_size + ipoib_sendq_size CQEs
+ * max_rc_qp*ipoib_recvq_size + ipoib_sendq_size CQEs
* in a CQ.
*/
if (!priv->cm.srq)
- size += (NOSRQ_INDEX_TABLE_SIZE - 1) * ipoib_recvq_size;
+ size += (max_rc_qp - 1) * ipoib_recvq_size;
#endif
priv->cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0);
More information about the general
mailing list