[ofa-general] IPOIB CM (NOSRQ) patch for review

Pradeep Satyanarayana pradeep at us.ibm.com
Fri Mar 9 12:01:23 PST 2007


Here is a first version of the IPOIB_CM_NOSRQ patch for review. Will 
benefit adapters that do not (yet) support shared receive queues. This 
patch works in 
conjunction with the IPOIB CM patches submitted by Michael Tsirkin. That 
has now been integrated into Roland's 2.6.21-rc1 git tree and so this can 
be
applied on top of that tree.

Instead of the srq hanging off ipoib_cm_dev_priv, this patch introduces an 
rx_ring hanging off ipoib_cm_rx. There are some changes in the 
initialization
and cleanup paths since srqs are not used.

This has been tested on the IBM HCA with the ehca driver. Please note 
another small patch (not in this one) is needed to the ehca driver for it 
to work on the IBM HCAs.

Signed-off-by: Pradeep Satyanarayana <pradeep at us.ibm.com>
-----------------------------------------------------------------------------------------------

--- linux-2.6.21-rc1-mst/drivers/infiniband/ulp/ipoib/Makefile  2007-03-08 
17:09:48.000000000 -0800
+++ linux-2.6.21-rc1/drivers/infiniband/ulp/ipoib/Makefile      2007-03-09 
08:51:41.000000000 -0800
@@ -1,3 +1,4 @@
+EXTRA_CFLAGS += -DIPOIB_CM_NOSRQ
 obj-$(CONFIG_INFINIBAND_IPOIB)                 += ib_ipoib.o
 
 ib_ipoib-y                                     := ipoib_main.o \
--- linux-2.6.21-rc1-mst/drivers/infiniband/ulp/ipoib/ipoib.h   2007-03-08 
17:09:48.000000000 -0800
+++ linux-2.6.21-rc1/drivers/infiniband/ulp/ipoib/ipoib.h       2007-03-08 
17:35:07.000000000 -0800
@@ -98,7 +98,11 @@ enum {
 
 #define        IPOIB_OP_RECV   (1ul << 31)
 #ifdef CONFIG_INFINIBAND_IPOIB_CM
+#ifdef IPOIB_CM_NOSRQ
+#define        IPOIB_CM_OP_NOSRQ (1ul << 30)
+#else
 #define        IPOIB_CM_OP_SRQ (1ul << 30)
+#endif
 #else
 #define        IPOIB_CM_OP_SRQ (0)
 #endif
@@ -136,6 +140,9 @@ struct ipoib_cm_data {
 struct ipoib_cm_rx {
        struct ib_cm_id     *id;
        struct ib_qp        *qp;
+#ifdef IPOIB_CM_NOSRQ
+        struct ipoib_cm_rx_buf *rx_ring;
+#endif
        struct list_head     list;
        struct net_device   *dev;
        unsigned long        jiffies;
@@ -163,8 +170,10 @@ struct ipoib_cm_rx_buf {
 };
 
 struct ipoib_cm_dev_priv {
+#ifndef IPOIB_CM_NOSRQ
        struct ib_srq          *srq;
        struct ipoib_cm_rx_buf *srq_ring;
+#endif
        struct ib_cm_id        *id;
        struct list_head        passive_ids;
        struct work_struct      start_task;
--- linux-2.6.21-rc1-mst/drivers/infiniband/ulp/ipoib/ipoib_cm.c 
2007-03-08 17:09:48.000000000 -0800
+++ linux-2.6.21-rc1/drivers/infiniband/ulp/ipoib/ipoib_cm.c    2007-03-09 
08:39:00.000000000 -0800
@@ -76,12 +76,47 @@ static void ipoib_cm_dma_unmap_rx(struct
                ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, 
DMA_FROM_DEVICE);
 }
 
+#ifdef IPOIB_CM_NOSRQ
+static int ipoib_cm_post_receive(struct net_device *dev, u64 id)
+#else
 static int ipoib_cm_post_receive(struct net_device *dev, int id)
+#endif
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ib_recv_wr *bad_wr;
        int i, ret;
+#ifdef IPOIB_CM_NOSRQ
+        unsigned long flags;
+        struct ipoib_cm_rx *rx_ptr;
+        u32 qp_num = id  & 0xffffffff;
+        u64 wr_id = id >> 32;
+       int found = 0;
+
+        spin_lock_irqsave(&priv->lock, flags);
+        list_for_each_entry(rx_ptr, &priv->cm.passive_ids, list) 
+                if (qp_num == rx_ptr->qp->qp_num) {
+                       found = 1;
+                        break;
+               }
+        spin_unlock_irqrestore(&priv->lock, flags);
+       if (!found)
+               printk(KERN_WARNING "qp not on passive_ids list!!\n");
 
+       priv->cm.rx_wr.wr_id = wr_id << 32 | qp_num | IPOIB_CM_OP_NOSRQ; 
+
+       for (i = 0; i < IPOIB_CM_RX_SG; ++i)
+               priv->cm.rx_sge[i].addr = 
rx_ptr->rx_ring[wr_id].mapping[i];
+
+       ret = ib_post_recv(rx_ptr->qp, &priv->cm.rx_wr, &bad_wr);
+       if (unlikely(ret)) {
+               ipoib_warn(priv, "post recv failed for buf %d (%d)\n", 
+                          wr_id, ret);
+               ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, 
+                                     rx_ptr->rx_ring[wr_id].mapping);
+               dev_kfree_skb_any(rx_ptr->rx_ring[wr_id].skb);
+               rx_ptr->rx_ring[wr_id].skb = NULL;
+       }
+#else
        priv->cm.rx_wr.wr_id = id | IPOIB_CM_OP_SRQ;
 
        for (i = 0; i < IPOIB_CM_RX_SG; ++i)
@@ -96,15 +131,30 @@ static int ipoib_cm_post_receive(struct 
                priv->cm.srq_ring[id].skb = NULL;
        }
 
+#endif
        return ret;
 }
 
+#ifdef IPOIB_CM_NOSRQ
+static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, u64 
id, 
+                                            int frags,
+                                             u64 mapping[IPOIB_CM_RX_SG])
+#else
 static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, int 
id, int frags,
                                             u64 mapping[IPOIB_CM_RX_SG])
+#endif
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct sk_buff *skb;
        int i;
+#ifdef IPOIB_CM_NOSRQ
+        unsigned long flags;
+        struct ipoib_cm_rx *rx_ptr;
+        u32 qp_num = id & 0xffffffff;
+        u32 wr_id = id >> 32;
+      int found = 0;
+#endif
+
 
        skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
        if (unlikely(!skb))
@@ -136,7 +186,25 @@ static struct sk_buff *ipoib_cm_alloc_rx
                        goto partial_error;
        }
 
+#ifdef IPOIB_CM_NOSRQ
+
+        spin_lock_irqsave(&priv->lock, flags);
+        list_for_each_entry(rx_ptr, &priv->cm.passive_ids, list)
+                if(qp_num == rx_ptr->qp->qp_num) {
+                      found = 1;
+                        break;
+              }
+        spin_unlock_irqrestore(&priv->lock, flags);
+
+      if (!found)
+              printk(KERN_WARNING "qp not on passive_ids list!!\n");
+
+        /* Use the rx_ptr to get the requisite entry */
+        rx_ptr->rx_ring[wr_id].skb      = skb;
+
+#else
        priv->cm.srq_ring[id].skb = skb;
+#endif
        return skb;
 
 partial_error:
@@ -157,9 +225,16 @@ static struct ib_qp *ipoib_cm_create_rx_
        struct ib_qp_init_attr attr = {
                .send_cq = priv->cq, /* does not matter, we never send 
anything */
                .recv_cq = priv->cq,
+#ifdef IPOIB_CM_NOSRQ
+                .srq = NULL,
+#else
                .srq = priv->cm.srq,
+#endif
                .cap.max_send_wr = 1, /* FIXME: 0 Seems not to work */
+               .cap.max_recv_wr = ipoib_recvq_size + 1,
                .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
+               /* .cap.max_recv_sge = 1, */ /* Is this correct? */
+               .cap.max_recv_sge = IPOIB_CM_RX_SG, /* Is this correct? */
                .sq_sig_type = IB_SIGNAL_ALL_WR,
                .qp_type = IB_QPT_RC,
                .qp_context = p,
@@ -217,7 +292,11 @@ static int ipoib_cm_send_rep(struct net_
        rep.flow_control = 0;
        rep.rnr_retry_count = req->rnr_retry_count;
        rep.target_ack_delay = 20; /* FIXME */
+#ifdef IPOIB_CM_NOSRQ
+        rep.srq = 0;
+#else
        rep.srq = 1;
+#endif
        rep.qp_num = qp->qp_num;
        rep.starting_psn = psn;
        return ib_send_cm_rep(cm_id, &rep);
@@ -231,6 +310,12 @@ static int ipoib_cm_req_handler(struct i
        unsigned long flags;
        unsigned psn;
        int ret;
+       struct ib_qp_attr qp_attr;
+       int qp_attr_mask;
+#ifdef IPOIB_CM_NOSRQ
+        u32 qp_num;
+        u64 i;
+#endif
 
        ipoib_dbg(priv, "REQ arrived\n");
        p = kzalloc(sizeof *p, GFP_KERNEL);
@@ -244,10 +329,46 @@ static int ipoib_cm_req_handler(struct i
                goto err_qp;
        }
 
+#ifdef IPOIB_CM_NOSRQ
+        qp_num = p->qp->qp_num;
+
+        /* Allocate space for the rx_ring here */
+        p->rx_ring = kzalloc(ipoib_recvq_size * sizeof *p->rx_ring,
+                                GFP_KERNEL);
+
+        cm_id->context = p;
+        p->jiffies = jiffies;
+        spin_lock_irqsave(&priv->lock, flags);
+        list_add(&p->list, &priv->cm.passive_ids);
+        spin_unlock_irqrestore(&priv->lock, flags);
+
+       psn = random32() & 0xffffff;
+        ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
+        if (ret)
+                goto err_modify;
+
+        for (i = 0; i < ipoib_recvq_size; ++i) {
+                if (!ipoib_cm_alloc_rx_skb(dev, i << 32 | qp_num, 
+                                         IPOIB_CM_RX_SG - 1,
+                                          p->rx_ring[i].mapping)) {
+                        ipoib_warn(priv, "failed to allocate receive 
buffer %d\n", i);
+                        ipoib_cm_dev_cleanup(dev);
+                        return -ENOMEM;
+                }
+
+                if (ipoib_cm_post_receive(dev, i << 32 | qp_num)) {
+                        ipoib_warn(priv, "ipoib_ib_post_receive failed 
for buf %d\n", i);
+                        ipoib_cm_dev_cleanup(dev);
+                        return -EIO;
+                }
+        }
+
+#else
        psn = random32() & 0xffffff;
        ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
        if (ret)
                goto err_modify;
+#endif
 
        ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, 
psn);
        if (ret) {
@@ -255,11 +376,28 @@ static int ipoib_cm_req_handler(struct i
                goto err_rep;
        }
 
+       /* This is missing in Michael's code -Do we need this */
+        qp_attr.qp_state = IB_QPS_RTS;
+
+        ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
+        if (ret) {
+                ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", 
ret);
+                return ret;
+        }
+        ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
+        if (ret) {
+                ipoib_warn(priv, "failed to modify QP to RTS: %d\n", 
ret);
+                return ret;
+        }
+       /*** missing end ***/
+
+#ifndef IPOIB_CM_NOSRQ
        cm_id->context = p;
        p->jiffies = jiffies;
        spin_lock_irqsave(&priv->lock, flags);
        list_add(&p->list, &priv->cm.passive_ids);
        spin_unlock_irqrestore(&priv->lock, flags);
+#endif
        queue_delayed_work(ipoib_workqueue,
                           &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
        return 0;
@@ -344,7 +482,14 @@ static void skb_put_frags(struct sk_buff
 void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
+#ifdef IPOIB_CM_NOSRQ
+        struct ipoib_cm_rx *rx_ptr;
+        u32 qp_num = (wc->wr_id  & ~IPOIB_CM_OP_NOSRQ) & 0xffffffff;
+        u64 wr_id = wc->wr_id >> 32;
+        int found = 0;
+#else
        unsigned int wr_id = wc->wr_id & ~IPOIB_CM_OP_SRQ;
+#endif
        struct sk_buff *skb, *newskb;
        struct ipoib_cm_rx *p;
        unsigned long flags;
@@ -360,7 +505,23 @@ void ipoib_cm_handle_rx_wc(struct net_de
                return;
        }
 
+#ifdef IPOIB_CM_NOSRQ
+        spin_lock_irqsave(&priv->lock, flags);
+        list_for_each_entry(rx_ptr, &priv->cm.passive_ids, list)
+                if(qp_num == rx_ptr->qp->qp_num) {
+                       found = 1;
+                        break;
+               }
+        spin_unlock_irqrestore(&priv->lock, flags);
+ 
+       if (!found)
+               printk(KERN_WARNING "qp not on passive_ids list!!\n");
+
+        /* Use the rx_ptr to get the requisite entry */
+        skb     = rx_ptr->rx_ring[wr_id].skb;
+#else
        skb  = priv->cm.srq_ring[wr_id].skb;
+#endif
 
        if (unlikely(wc->status != IB_WC_SUCCESS)) {
                ipoib_dbg(priv, "cm recv error "
@@ -371,7 +532,12 @@ void ipoib_cm_handle_rx_wc(struct net_de
        }
 
        if (!likely(wr_id & IPOIB_CM_RX_UPDATE_MASK)) {
+#ifdef IPOIB_CM_NOSRQ
+                /* Temporary hack till ehca fixes wc->qp = NULL */
+                p = rx_ptr;
+#else
                p = wc->qp->qp_context;
+#endif
                if (time_after_eq(jiffies, p->jiffies + 
IPOIB_CM_RX_UPDATE_TIME)) {
                        spin_lock_irqsave(&priv->lock, flags);
                        p->jiffies = jiffies;
@@ -388,7 +554,12 @@ void ipoib_cm_handle_rx_wc(struct net_de
        frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
 (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
 
+#ifdef IPOIB_CM_NOSRQ
+        newskb = ipoib_cm_alloc_rx_skb(dev, wr_id << 32 | qp_num, frags,
+                     mapping);
+#else
        newskb = ipoib_cm_alloc_rx_skb(dev, wr_id, frags, mapping);
+#endif
        if (unlikely(!newskb)) {
                /*
                 * If we can't allocate a new RX buffer, dump
@@ -399,8 +570,13 @@ void ipoib_cm_handle_rx_wc(struct net_de
                goto repost;
        }
 
+#ifdef IPOIB_CM_NOSRQ
+        ipoib_cm_dma_unmap_rx(priv, frags, 
rx_ptr->rx_ring[wr_id].mapping);
+        memcpy(rx_ptr->rx_ring[wr_id].mapping, mapping, (frags + 1) * 
sizeof *mapping);
+#else
        ipoib_cm_dma_unmap_rx(priv, frags, 
priv->cm.srq_ring[wr_id].mapping);
        memcpy(priv->cm.srq_ring[wr_id].mapping, mapping, (frags + 1) * 
sizeof *mapping);
+#endif
 
        ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
                       wc->byte_len, wc->slid);
@@ -421,7 +597,11 @@ void ipoib_cm_handle_rx_wc(struct net_de
        netif_rx_ni(skb);
 
 repost:
+#ifdef IPOIB_CM_NOSRQ
+        if (unlikely(ipoib_cm_post_receive(dev, wr_id << 32 | qp_num)))
+#else
        if (unlikely(ipoib_cm_post_receive(dev, wr_id)))
+#endif
                ipoib_warn(priv, "ipoib_cm_post_receive failed "
                           "for buf %d\n", wr_id);
 }
@@ -613,6 +793,9 @@ void ipoib_cm_dev_stop(struct net_device
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_cm_rx *p;
        unsigned long flags;
+#ifdef IPOIB_CM_NOSRQ
+       int i;
+#endif
 
        if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
                return;
@@ -621,6 +804,16 @@ void ipoib_cm_dev_stop(struct net_device
        spin_lock_irqsave(&priv->lock, flags);
        while (!list_empty(&priv->cm.passive_ids)) {
                p = list_entry(priv->cm.passive_ids.next, typeof(*p), 
list);
+#ifdef IPOIB_CM_NOSRQ
+                for(i = 0; i < ipoib_recvq_size; ++i)
+                        if(p->rx_ring[i].skb) {
+                               ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG 
- 1,
+ p->rx_ring[i].mapping);
+                                dev_kfree_skb_any(p->rx_ring[i].skb);
+                                p->rx_ring[i].skb = NULL;
+                        }
+               kfree(p->rx_ring); 
+#endif
                list_del_init(&p->list);
                spin_unlock_irqrestore(&priv->lock, flags);
                ib_destroy_cm_id(p->id);
@@ -707,7 +900,11 @@ static struct ib_qp *ipoib_cm_create_tx_
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ib_qp_init_attr attr = {};
        attr.recv_cq = priv->cq;
+#ifdef IPOIB_CM_NOSRQ
+        attr.srq = NULL;
+#else
        attr.srq = priv->cm.srq;
+#endif
        attr.cap.max_send_wr = ipoib_sendq_size;
        attr.cap.max_send_sge = 1;
        attr.sq_sig_type = IB_SIGNAL_ALL_WR;
@@ -749,7 +946,11 @@ static int ipoib_cm_send_req(struct net_
        req.retry_count               = 0; /* RFC draft warns against 
retries */
        req.rnr_retry_count           = 0; /* RFC draft warns against 
retries */
        req.max_cm_retries            = 15;
+#ifdef IPOIB_CM_NOSRQ
+        req.srq                       = 0;
+#else
        req.srq                       = 1;
+#endif
        return ib_send_cm_req(id, &req);
 }
 
@@ -1089,6 +1290,9 @@ static void ipoib_cm_stale_task(struct w
                                                   cm.stale_task.work);
        struct ipoib_cm_rx *p;
        unsigned long flags;
+#ifdef IPOIB_CM_NOSRQ
+       int i;
+#endif
 
        spin_lock_irqsave(&priv->lock, flags);
        while (!list_empty(&priv->cm.passive_ids)) {
@@ -1097,6 +1301,17 @@ static void ipoib_cm_stale_task(struct w
                p = list_entry(priv->cm.passive_ids.prev, typeof(*p), 
list);
                if (time_after_eq(jiffies, p->jiffies + 
IPOIB_CM_RX_TIMEOUT))
                        break;
+#ifdef IPOIB_CM_NOSRQ
+                for(i = 0; i < ipoib_recvq_size; ++i)
+                        if(p->rx_ring[i].skb) {
+                               ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG 
- 1,
+ p->rx_ring[i].mapping);
+                                dev_kfree_skb_any(p->rx_ring[i].skb);
+                                p->rx_ring[i].skb = NULL;
+                        }
+               /* Free the rx_ring */
+               kfree(p->rx_ring);
+#endif
                list_del_init(&p->list);
                spin_unlock_irqrestore(&priv->lock, flags);
                ib_destroy_cm_id(p->id);
@@ -1154,12 +1369,14 @@ int ipoib_cm_add_mode_attr(struct net_de
 int ipoib_cm_dev_init(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
+#ifndef IPOIB_CM_NOSRQ
        struct ib_srq_init_attr srq_init_attr = {
                .attr = {
                        .max_wr  = ipoib_recvq_size,
                        .max_sge = IPOIB_CM_RX_SG
                }
        };
+#endif
        int ret, i;
 
        INIT_LIST_HEAD(&priv->cm.passive_ids);
@@ -1172,6 +1389,7 @@ int ipoib_cm_dev_init(struct net_device 
 
        skb_queue_head_init(&priv->cm.skb_queue);
 
+#ifndef IPOIB_CM_NOSRQ
        priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
        if (IS_ERR(priv->cm.srq)) {
                ret = PTR_ERR(priv->cm.srq);
@@ -1187,6 +1405,7 @@ int ipoib_cm_dev_init(struct net_device 
                ipoib_cm_dev_cleanup(dev);
                return -ENOMEM;
        }
+#endif
 
        for (i = 0; i < IPOIB_CM_RX_SG; ++i)
                priv->cm.rx_sge[i].lkey = priv->mr->lkey;
@@ -1198,6 +1417,10 @@ int ipoib_cm_dev_init(struct net_device 
        priv->cm.rx_wr.sg_list = priv->cm.rx_sge;
        priv->cm.rx_wr.num_sge = IPOIB_CM_RX_SG;
 
+#ifndef IPOIB_CM_NOSRQ
+        /* In the case of IPOIB_CM_NOSRQ we do the rest of the init in
+           ipoib_cm_req_handler() */
+
        for (i = 0; i < ipoib_recvq_size; ++i) {
                if (!ipoib_cm_alloc_rx_skb(dev, i, IPOIB_CM_RX_SG - 1,
                                           priv->cm.srq_ring[i].mapping)) 
{
@@ -1211,6 +1434,7 @@ int ipoib_cm_dev_init(struct net_device 
                        return -EIO;
                }
        }
+#endif
 
        priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
        return 0;
@@ -1221,10 +1445,21 @@ void ipoib_cm_dev_cleanup(struct net_dev
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        int i, ret;
 
+       ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
+
+#ifdef IPOIB_CM_NOSRQ
+        /* We need to destroy all the qps associated with the ipoib_cm_rx
+          linked list hanging off the ipoib_cm_dev_priv. We also need to
+          kfree the associated skb and also the ipoib_cm_rx structures
+          themselves */
+        /* We actually do this in ipoib_cm_dev_stop(). Since srq is
+          common to all qps it is done here for SRQ. For us the
+          right place is to do it in ipoib_cm_dev_stop() */
+
+#else
        if (!priv->cm.srq)
                return;
 
-       ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
 
        ret = ib_destroy_srq(priv->cm.srq);
        if (ret)
@@ -1242,4 +1477,5 @@ void ipoib_cm_dev_cleanup(struct net_dev
                }
        kfree(priv->cm.srq_ring);
        priv->cm.srq_ring = NULL;
+#endif
 }
--- linux-2.6.21-rc1-mst/drivers/infiniband/ulp/ipoib/ipoib_ib.c 
2007-03-08 17:09:48.000000000 -0800
+++ linux-2.6.21-rc1/drivers/infiniband/ulp/ipoib/ipoib_ib.c    2007-03-08 
17:35:07.000000000 -0800
@@ -282,12 +282,21 @@ static void ipoib_ib_handle_tx_wc(struct
 
 static void ipoib_ib_handle_wc(struct net_device *dev, struct ib_wc *wc)
 {
+#ifdef IPOIB_CM_NOSRQ
+       if (wc->wr_id & IPOIB_CM_OP_NOSRQ)
+               ipoib_cm_handle_rx_wc(dev, wc);
+       else if (wc->wr_id & IPOIB_OP_RECV)
+               ipoib_ib_handle_rx_wc(dev, wc);
+       else
+               ipoib_ib_handle_tx_wc(dev, wc);
+#else
        if (wc->wr_id & IPOIB_CM_OP_SRQ)
                ipoib_cm_handle_rx_wc(dev, wc);
        else if (wc->wr_id & IPOIB_OP_RECV)
                ipoib_ib_handle_rx_wc(dev, wc);
        else
                ipoib_ib_handle_tx_wc(dev, wc);
+#endif
 }
 
 void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)





Pradeep
pradeep at us.ibm.com
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.openfabrics.org/pipermail/general/attachments/20070309/e486383d/attachment.html>
-------------- next part --------------
A non-text attachment was scrubbed...
Name: ipoib_cm.nosrq.2621.patch
Type: application/octet-stream
Size: 17385 bytes
Desc: not available
URL: <http://lists.openfabrics.org/pipermail/general/attachments/20070309/e486383d/attachment.obj>


More information about the general mailing list