[ofa-general] [RFC][PATCH] IPoIB: release QP resources in error_list when maxium connections reached
Shirley Ma
mashirle at us.ibm.com
Mon Jul 28 13:50:17 PDT 2008
Repost it with the right format.
Signed-off-by: Shirley Ma <xma at us.ibm.com>
--------
drivers/infiniband/ulp/ipoib/ipoib_cm.c | 40 +++++++++++++++++++++++++++++++
1 files changed, 40 insertions(+), 0 deletions(-)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 90b8668..d3a8540 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -341,6 +341,24 @@ static void ipoib_cm_init_rx_wr(struct net_device *dev,
wr->num_sge = priv->cm.num_frags;
}
+static void ipoib_cm_free_rx_reap_list(struct net_device *dev);
+
+static void ipoib_cm_nonsrq_gc_connections(struct ipoib_dev_priv *priv)
+{
+ struct ipoib_cm_rx *p;
+ while (!list_empty(&priv->cm.rx_error_list)) {
+ /* List is sorted by LRU, start from tail,
+ * stop when we see a recently used entry */
+ p = list_entry(priv->cm.rx_error_list.prev, typeof(*p), list);
+ /* recyle QPs which in error status more than 10 secs */
+ if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT
+ + 10 * HZ));
+ break;
+ list_move(&p->list, &priv->cm.rx_reap_list);
+ }
+ return;
+}
+
static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
struct ipoib_cm_rx *rx)
{
@@ -352,6 +370,22 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
int ret;
int i;
+
+ spin_lock_irq(&priv->lock);
+ if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
+ if (!list_empty(&priv->cm.rx_error_list))
+ ipoib_cm_nonsrq_gc_connections(priv);
+ if (list_empty(&priv->cm.rx_reap_list)) {
+ spin_unlock_irq(&priv->lock);
+ printk(KERN_WARNING "Max nonsrq QP exceeded. Will REJ\n");
+ ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
+ return -EINVAL;
+ } else {
+ spin_unlock_irq(&priv->lock);
+ ipoib_cm_free_rx_reap_list(dev);
+ }
+ }
+
rx->rx_ring = kcalloc(ipoib_recvq_size, sizeof *rx->rx_ring, GFP_KERNEL);
if (!rx->rx_ring)
return -ENOMEM;
@@ -438,6 +472,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
struct ipoib_cm_rx *p;
unsigned psn;
int ret;
+ unsigned long flags;
ipoib_dbg(priv, "REQ arrived\n");
p = kzalloc(sizeof *p, GFP_KERNEL);
@@ -499,6 +534,7 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
{
struct ipoib_cm_rx *p;
struct ipoib_dev_priv *priv;
+ unsigned long flags;
p = cm_id->context;
priv = netdev_priv(p->dev);
@@ -507,6 +543,10 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
return ipoib_cm_req_handler(cm_id, event);
case IB_CM_DREQ_RECEIVED:
ib_send_cm_drep(cm_id, NULL, 0);
+ spin_lock_irq(&priv->lock);
+ list_move(&p->list, &priv->cm.rx_error_list);
+ p->state = IPOIB_CM_RX_ERROR;
+ spin_unlock_irq(&priv->lock);
if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
ipoib_warn(priv, "unable to move qp to error state\n");
return 0;
More information about the general
mailing list