[ewg] [PATCH ofed_1_2] - chelsio bug fixes

Steve Wise swise at opengridcomputing.com
Wed Apr 25 09:24:29 PDT 2007


Vlad,

These changes are a set of bug fixes to the chelsio drivers as well as
support for their latest firmware.  This is required for OFED-1.2.

Please pull from:

git://git.openfabrics.org/~swise/ofed_1_2 ofed_1_2 


Thanks,

Steve.

---------
Shortlog:
---------

Divy Le Ray:
      Reuse the incoming skb when a clientless abort req is recieved.
      Remove assumption that PHY interrupts use GPIOs 3 and 5.

Steve Wise:
      Don't use physical addresses as mmap offsets.
      Support for new abort logic.
      Update required firmware revision.

------
Diffs:
------

commit a7e291a27cbd9488f5eb390e38a52ada2758b094
Author: Steve Wise <swise at opengridcomputing.com>
Date:   Tue Apr 24 12:57:51 2007 -0500

    Update required firmware revision.
    
    Signed-off-by: Steve Wise <swise at opengridcomputing.com>

diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
index bd7c4f7..17b9801 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/cxgb3/version.h
@@ -38,7 +38,7 @@ #define DRV_NAME "cxgb3"
 #define DRV_VERSION "1.0-ofed"
 
 /* Firmware version */
-#define FW_VERSION_MAJOR 3
-#define FW_VERSION_MINOR 3
+#define FW_VERSION_MAJOR 4
+#define FW_VERSION_MINOR 0
 #define FW_VERSION_MICRO 0
 #endif				/* __CHELSIO_VERSION_H */

commit afa256e0aa01f03c1f56960c2af8124352c7c72b
Author: Steve Wise <swise at opengridcomputing.com>
Date:   Tue Apr 24 10:31:24 2007 -0500

    Support for new abort logic.
    
    The HW now posts 2 ABORT_RPL and/or PEER_ABORT_REQ messages.  We need
    to handle them by silenty dropping the 1st but mark that we're ready
    for the final message.  This plugs some close races between the uP and HW.
    
    Signed-off-by: Steve Wise <swise at opengridcomputing.com>

diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 36ab39e..0d81e2f 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1108,6 +1108,15 @@ static int abort_rpl(struct t3cdev *tdev
 
 	PDBG("%s ep %p\n", __FUNCTION__, ep);
 
+	/*
+ 	 * We get 2 abort replies from the HW.  The first one must
+	 * be ignored except for scribbling that we need one more.
+	 */
+	if (!(ep->flags & ABORT_REQ_IN_PROGRESS)) {
+		ep->flags |= ABORT_REQ_IN_PROGRESS;
+		return CPL_RET_BUF_DONE;
+	}
+
 	close_complete_upcall(ep);
 	state_set(&ep->com, DEAD);
 	release_ep_resources(ep);
@@ -1475,6 +1484,15 @@ static int peer_abort(struct t3cdev *tde
 	int ret;
 	int state;
 
+	/*
+ 	 * We get 2 peer aborts from the HW.  The first one must
+	 * be ignored except for scribbling that we need one more.
+	 */
+	if (!(ep->flags & PEER_ABORT_IN_PROGRESS)) {
+		ep->flags |= PEER_ABORT_IN_PROGRESS;
+		return CPL_RET_BUF_DONE;
+	}
+
 	if (is_neg_adv_abort(req->status)) {
 		PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep,
 		     ep->hwtid);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
index 855f1ef..1d4a1a5 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h
@@ -143,6 +143,11 @@ enum iwch_ep_state {
 	DEAD,
 };
 
+enum iwch_ep_flags {
+	PEER_ABORT_IN_PROGRESS	= (1 << 0),
+	ABORT_REQ_IN_PROGRESS	= (1 << 1),
+};
+
 struct iwch_ep_common {
 	struct iw_cm_id *cm_id;
 	struct iwch_qp *qp;
@@ -181,6 +186,7 @@ struct iwch_ep {
 	u16 plen;
 	u32 ird;
 	u32 ord;
+	u32 flags;
 };
 
 static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id)

commit c7d9f4cc0d6a4695f3c96f7ae5a8310b2e8fa804
Author: Steve Wise <swise at opengridcomputing.com>
Date:   Tue Apr 24 10:31:19 2007 -0500

    Don't use physical addresses as mmap offsets.
    
    Currently iw_cxgb3 uses the physical address as the key/offset to return
    to the user process for maping kernel memory into userspace.  The user
    process then calls mmap() using this key as the offset.  Because the
    physical address is 64 bits, this introduces a problem with 32-bit
    userspace, which might not be able to pass an arbitrary 64-bit address
    back into the kernel (since mmap2() is limited to a 32-bit number of
    pages for the offset, which limits it to 44-bit addresses).
    
    Change the mmap logic to use a u32 counter as the offset for mapping.
    
    Signed-off-by: Steve Wise <swise at opengridcomputing.com>

diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index fe57d11..b0f7218 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -114,7 +114,7 @@ static struct ib_ucontext *iwch_alloc_uc
 	struct iwch_dev *rhp = to_iwch_dev(ibdev);
 
 	PDBG("%s ibdev %p\n", __FUNCTION__, ibdev);
-	context = kmalloc(sizeof(*context), GFP_KERNEL);
+	context = kzalloc(sizeof(*context), GFP_KERNEL);
 	if (!context)
 		return ERR_PTR(-ENOMEM);
 	cxio_init_ucontext(&rhp->rdev, &context->uctx);
@@ -140,13 +140,14 @@ static int iwch_destroy_cq(struct ib_cq 
 }
 
 static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries,
-			     struct ib_ucontext *context,
+			     struct ib_ucontext *ib_context,
 			     struct ib_udata *udata)
 {
 	struct iwch_dev *rhp;
 	struct iwch_cq *chp;
 	struct iwch_create_cq_resp uresp;
 	struct iwch_create_cq_req ureq;
+	struct iwch_ucontext *ucontext = NULL;
 
 	PDBG("%s ib_dev %p entries %d\n", __FUNCTION__, ibdev, entries);
 	rhp = to_iwch_dev(ibdev);
@@ -154,12 +155,15 @@ static struct ib_cq *iwch_create_cq(stru
 	if (!chp)
 		return ERR_PTR(-ENOMEM);
 
-	if (context && !t3a_device(rhp)) {
-		if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
-			kfree(chp);
-			return ERR_PTR(-EFAULT);
+ 	if (ib_context) {
+ 		ucontext = to_iwch_ucontext(ib_context);
+ 		if (!t3a_device(rhp)) {
+ 			if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
+ 				kfree(chp);
+ 				return ERR_PTR(-EFAULT);
+ 			}
+ 			chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
 		}
-		chp->user_rptr_addr = (u32 *)(unsigned long)ureq.user_rptr_addr;
 	}
 
 	if (t3a_device(rhp)) {
@@ -189,7 +193,7 @@ static struct ib_cq *iwch_create_cq(stru
 	init_waitqueue_head(&chp->wait);
 	insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
 
-	if (context) {
+	if (ucontext) {
 		struct iwch_mm_entry *mm;
 
 		mm = kmalloc(sizeof *mm, GFP_KERNEL);
@@ -199,16 +203,20 @@ static struct ib_cq *iwch_create_cq(stru
 		}
 		uresp.cqid = chp->cq.cqid;
 		uresp.size_log2 = chp->cq.size_log2;
-		uresp.physaddr = virt_to_phys(chp->cq.queue);
+		spin_lock(&ucontext->mmap_lock);
+		uresp.key = ucontext->key;
+		ucontext->key += PAGE_SIZE;
+		spin_unlock(&ucontext->mmap_lock);
 		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
 			kfree(mm);
 			iwch_destroy_cq(&chp->ibcq);
 			return ERR_PTR(-EFAULT);
 		}
-		mm->addr = uresp.physaddr;
+		mm->key = uresp.key;
+		mm->addr = virt_to_phys(chp->cq.queue);
 		mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
 					     sizeof (struct t3_cqe));
-		insert_mmap(to_iwch_ucontext(context), mm);
+		insert_mmap(ucontext, mm);
 	}
 	PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
 	     chp->cq.cqid, chp, (1 << chp->cq.size_log2),
@@ -315,14 +323,15 @@ static int iwch_arm_cq(struct ib_cq *ibc
 static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
 {
 	int len = vma->vm_end - vma->vm_start;
-	u64 pgaddr = vma->vm_pgoff << PAGE_SHIFT;
+	u32 key = vma->vm_pgoff << PAGE_SHIFT;
 	struct cxio_rdev *rdev_p;
 	int ret = 0;
 	struct iwch_mm_entry *mm;
 	struct iwch_ucontext *ucontext;
+	u64 addr;
 
-	PDBG("%s off 0x%lx addr 0x%llx len %d\n", __FUNCTION__, vma->vm_pgoff,
-	     pgaddr, len);
+	PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __FUNCTION__, vma->vm_pgoff,
+	     key, len);
 
 	if (vma->vm_start & (PAGE_SIZE-1)) {
                 return -EINVAL;
@@ -331,13 +340,14 @@ static int iwch_mmap(struct ib_ucontext 
 	rdev_p = &(to_iwch_dev(context->device)->rdev);
 	ucontext = to_iwch_ucontext(context);
 
-	mm = remove_mmap(ucontext, pgaddr, len);
+	mm = remove_mmap(ucontext, key, len);
 	if (!mm)
 		return -EINVAL;
+	addr = mm->addr;
 	kfree(mm);
 
-	if ((pgaddr >= rdev_p->rnic_info.udbell_physbase) &&
-	    (pgaddr < (rdev_p->rnic_info.udbell_physbase +
+	if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
+	    (addr < (rdev_p->rnic_info.udbell_physbase +
 		       rdev_p->rnic_info.udbell_len))) {
 
 		/*
@@ -350,15 +360,17 @@ static int iwch_mmap(struct ib_ucontext 
 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 		vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
 		vma->vm_flags &= ~VM_MAYREAD;
-		ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-				       len, vma->vm_page_prot);
+		ret = io_remap_pfn_range(vma, vma->vm_start, 
+					 addr >> PAGE_SHIFT,
+				         len, vma->vm_page_prot);
 	} else {
 
 		/*
 		 * Map WQ or CQ contig dma memory...
 		 */
-		ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-				       len, vma->vm_page_prot);
+		ret = remap_pfn_range(vma, vma->vm_start, 
+				      addr >> PAGE_SHIFT,
+				      len, vma->vm_page_prot);
 	}
 	
 	return ret;
@@ -838,18 +850,24 @@ static struct ib_qp *iwch_create_qp(stru
 		uresp.size_log2 = qhp->wq.size_log2;
 		uresp.sq_size_log2 = qhp->wq.sq_size_log2;
 		uresp.rq_size_log2 = qhp->wq.rq_size_log2;
-		uresp.physaddr = virt_to_phys(qhp->wq.queue);
-		uresp.doorbell = qhp->wq.udb;
+		spin_lock(&ucontext->mmap_lock);
+		uresp.key = ucontext->key;
+		ucontext->key += PAGE_SIZE;
+		uresp.db_key = ucontext->key;
+		ucontext->key += PAGE_SIZE;
+		spin_unlock(&ucontext->mmap_lock);
 		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
 			kfree(mm1);
 			kfree(mm2);
 			iwch_destroy_qp(&qhp->ibqp);
 			return ERR_PTR(-EFAULT);
 		}
-		mm1->addr = uresp.physaddr;
+		mm1->key = uresp.key;
+		mm1->addr = virt_to_phys(qhp->wq.queue);
 		mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
 		insert_mmap(ucontext, mm1);
-		mm2->addr = uresp.doorbell & PAGE_MASK;
+		mm2->key = uresp.db_key;
+		mm2->addr = qhp->wq.udb & PAGE_MASK;
 		mm2->len = PAGE_SIZE;
 		insert_mmap(ucontext, mm2);
 	}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index 998b323..ae57478 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -183,6 +183,7 @@ struct ib_qp *iwch_get_qp(struct ib_devi
 struct iwch_ucontext {
 	struct ib_ucontext ibucontext;
 	struct cxio_ucontext uctx;
+	u32 key;
 	spinlock_t mmap_lock;
 	struct list_head mmaps;
 };
@@ -195,11 +196,12 @@ static inline struct iwch_ucontext *to_i
 struct iwch_mm_entry {
 	struct list_head entry;
 	u64 addr;
+	u32 key;
 	unsigned len;
 };
 
 static inline struct iwch_mm_entry *remove_mmap(struct iwch_ucontext *ucontext,
-						u64 addr, unsigned len)
+						u32 key, unsigned len)
 {
 	struct list_head *pos, *nxt;
 	struct iwch_mm_entry *mm;
@@ -208,11 +210,11 @@ static inline struct iwch_mm_entry *remo
 	list_for_each_safe(pos, nxt, &ucontext->mmaps) {
 		
 		mm = list_entry(pos, struct iwch_mm_entry, entry);
-		if (mm->addr == addr && mm->len == len) {
+		if (mm->key == key && mm->len == len) {
 			list_del_init(&mm->entry);
 			spin_unlock_irq(&ucontext->mmap_lock);
-			PDBG("%s addr 0x%llx len %d\n", __FUNCTION__, mm->addr,
-			     mm->len);
+			PDBG("%s addr 0x%llx key 0x%x len %d\n", 
+			     __FUNCTION__, mm->addr, mm->key, mm->len);
 			return mm;
 		}
 	}
@@ -224,7 +226,8 @@ static inline void insert_mmap(struct iw
 			       struct iwch_mm_entry *mm)
 {
 	spin_lock_irq(&ucontext->mmap_lock);
-	PDBG("%s addr 0x%llx len %d\n", __FUNCTION__, mm->addr, mm->len);
+	PDBG("%s addr 0x%llx key 0x%x len %d\n", 
+	     __FUNCTION__, mm->addr, mm->key, mm->len);
 	list_add_tail(&mm->entry, &ucontext->mmaps);
 	spin_unlock_irq(&ucontext->mmap_lock);
 }
diff --git a/drivers/infiniband/hw/cxgb3/iwch_user.h b/drivers/infiniband/hw/cxgb3/iwch_user.h
index bf0a2f6..4d7526d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_user.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_user.h
@@ -46,14 +46,14 @@ struct iwch_create_cq_req {
 };
 
 struct iwch_create_cq_resp {
-	__u64 physaddr;		
+	__u64 key;		
 	__u32 cqid;
 	__u32 size_log2;
 };
 
 struct iwch_create_qp_resp {
-	__u64 physaddr;
-	__u64 doorbell;	
+	__u64 key;
+	__u64 db_key;	
 	__u32 qpid;
 	__u32 size_log2;
 	__u32 sq_size_log2;

commit d0d41ed85d44dfafbe66f59ae0ad802409a115e7
Author: Divy Le Ray <divy at chelsio.com>
Date:   Tue Apr 24 10:31:15 2007 -0500

    Remove assumption that PHY interrupts use GPIOs 3 and 5.
    Deal with PHY interrupts connected to any GPIO pins.
    
    Signed-off-by: Divy Le Ray <divy at chelsio.com>

diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index d83f075..fb485d0 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1523,19 +1523,25 @@ static int mac_intr_handler(struct adapt
  */
 int t3_phy_intr_handler(struct adapter *adapter)
 {
-	static const int intr_gpio_bits[] = { 8, 0x20 };
-
+	u32 mask, gpi = adapter_info(adapter)->gpio_intr;
 	u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
 
 	for_each_port(adapter, i) {
-		if (cause & intr_gpio_bits[i]) {
-			struct cphy *phy = &adap2pinfo(adapter, i)->phy;
-			int phy_cause = phy->ops->intr_handler(phy);
+		struct port_info *p = adap2pinfo(adapter, i);
+
+		mask = gpi - (gpi & (gpi - 1));
+		gpi -= mask;
+
+		if (!(p->port_type->caps & SUPPORTED_IRQ))
+			continue;
+
+		if (cause & mask) {
+			int phy_cause = p->phy.ops->intr_handler(&p->phy);
 
 			if (phy_cause & cphy_cause_link_change)
 				t3_link_changed(adapter, i);
 			if (phy_cause & cphy_cause_fifo_error)
-				phy->fifo_errors++;
+				p->phy.fifo_errors++;
 		}
 	}
 

commit 918f98dc61e30a55c45086d2602bbe6187a3782c
Author: Divy Le Ray <divy at chelsio.com>
Date:   Tue Apr 24 10:31:11 2007 -0500

    Reuse the incoming skb when a clientless abort req is recieved.
    
    The release of RDMA connections HW resources might be deferred in
    low memory situations.
    Ensure that no further activity is passed up to the RDMA driver
    for these connections.
    
    Signed-off-by: Divy Le Ray <divy at chelsio.com>

diff --git a/drivers/net/cxgb3/cxgb3_defs.h b/drivers/net/cxgb3/cxgb3_defs.h
old mode 100755
new mode 100644
index e14862b..483a594
--- a/drivers/net/cxgb3/cxgb3_defs.h
+++ b/drivers/net/cxgb3/cxgb3_defs.h
@@ -67,7 +67,10 @@ static inline union listen_entry *stid2e
 static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
 					       unsigned int tid)
 {
-	return tid < t->ntids ? &(t->tid_tab[tid]) : NULL;
+	struct t3c_tid_entry *t3c_tid = tid < t->ntids ?
+	    &(t->tid_tab[tid]) : NULL;
+
+	return (t3c_tid && t3c_tid->client) ? t3c_tid : NULL;
 }
 
 /*
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 3353171..9db428d 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -506,6 +506,7 @@ void cxgb3_queue_tid_release(struct t3cd
 
 	spin_lock_bh(&td->tid_release_lock);
 	p->ctx = (void *)td->tid_release_list;
+	p->client = NULL;
 	td->tid_release_list = p;
 	if (!p->ctx)
 		schedule_work(&td->tid_release_task);
@@ -621,7 +622,8 @@ static int do_act_open_rpl(struct t3cdev
 	struct t3c_tid_entry *t3c_tid;
 
 	t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
-	if (t3c_tid->ctx && t3c_tid->client && t3c_tid->client->handlers &&
+	if (t3c_tid && t3c_tid->ctx && t3c_tid->client &&
+	    t3c_tid->client->handlers &&
 	    t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
 		return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
 								    t3c_tid->
@@ -640,7 +642,7 @@ static int do_stid_rpl(struct t3cdev *de
 	struct t3c_tid_entry *t3c_tid;
 
 	t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
-	if (t3c_tid->ctx && t3c_tid->client->handlers &&
+	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
 	    t3c_tid->client->handlers[p->opcode]) {
 		return t3c_tid->client->handlers[p->opcode] (dev, skb,
 							     t3c_tid->ctx);
@@ -658,7 +660,7 @@ static int do_hwtid_rpl(struct t3cdev *d
 	struct t3c_tid_entry *t3c_tid;
 
 	t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
-	if (t3c_tid->ctx && t3c_tid->client->handlers &&
+	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
 	    t3c_tid->client->handlers[p->opcode]) {
 		return t3c_tid->client->handlers[p->opcode]
 		    (dev, skb, t3c_tid->ctx);
@@ -687,6 +689,28 @@ static int do_cr(struct t3cdev *dev, str
 	}
 }
 
+/*
+ * Returns an sk_buff for a reply CPL message of size len.  If the input
+ * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
+ * is allocated.  The input skb must be of size at least len.  Note that this
+ * operation does not destroy the original skb data even if it decides to reuse
+ * the buffer.
+ */
+static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
+					       int gfp)
+{
+	if (likely(!skb_cloned(skb))) {
+		BUG_ON(skb->len < len);
+		__skb_trim(skb, len);
+		skb_get(skb);
+	} else {
+		skb = alloc_skb(len, gfp);
+		if (skb)
+			__skb_put(skb, len);
+	}
+	return skb;
+}
+
 static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
 {
 	union opcode_tid *p = cplhdr(skb);
@@ -694,30 +718,39 @@ static int do_abort_req_rss(struct t3cde
 	struct t3c_tid_entry *t3c_tid;
 
 	t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
-	if (t3c_tid->ctx && t3c_tid->client->handlers &&
+	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
 	    t3c_tid->client->handlers[p->opcode]) {
 		return t3c_tid->client->handlers[p->opcode]
 		    (dev, skb, t3c_tid->ctx);
 	} else {
 		struct cpl_abort_req_rss *req = cplhdr(skb);
 		struct cpl_abort_rpl *rpl;
+		struct sk_buff *reply_skb;
+		unsigned int tid = GET_TID(req);
+		u8 cmd = req->status;
+
+		if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
+		    req->status == CPL_ERR_PERSIST_NEG_ADVICE)
+			goto out;
 
-		struct sk_buff *skb =
-		    alloc_skb(sizeof(struct cpl_abort_rpl), GFP_ATOMIC);
-		if (!skb) {
+		reply_skb = cxgb3_get_cpl_reply_skb(skb,
+						    sizeof(struct
+							   cpl_abort_rpl),
+						    GFP_ATOMIC);
+
+		if (!reply_skb) {
 			printk("do_abort_req_rss: couldn't get skb!\n");
 			goto out;
 		}
-		skb->priority = CPL_PRIORITY_DATA;
-		__skb_put(skb, sizeof(struct cpl_abort_rpl));
-		rpl = cplhdr(skb);
+		reply_skb->priority = CPL_PRIORITY_DATA;
+		__skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
+		rpl = cplhdr(reply_skb);
 		rpl->wr.wr_hi =
 		    htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
-		rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req)));
-		OPCODE_TID(rpl) =
-		    htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req)));
-		rpl->cmd = req->status;
-		cxgb3_ofld_send(dev, skb);
+		rpl->wr.wr_lo = htonl(V_WR_TID(tid));
+		OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+		rpl->cmd = cmd;
+		cxgb3_ofld_send(dev, reply_skb);
 out:
 		return CPL_RET_BUF_DONE;
 	}
@@ -730,7 +763,7 @@ static int do_act_establish(struct t3cde
 	struct t3c_tid_entry *t3c_tid;
 
 	t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
-	if (t3c_tid->ctx && t3c_tid->client->handlers &&
+	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
 	    t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
 		return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
 		    (dev, skb, t3c_tid->ctx);
@@ -760,7 +793,7 @@ static int do_term(struct t3cdev *dev, s
 	struct t3c_tid_entry *t3c_tid;
 
 	t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
-	if (t3c_tid->ctx && t3c_tid->client->handlers &&
+	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
 	    t3c_tid->client->handlers[opcode]) {
 		return t3c_tid->client->handlers[opcode] (dev, skb,
 							  t3c_tid->ctx);
@@ -959,7 +992,7 @@ void cxgb_redirect(struct dst_entry *old
 	for (tid = 0; tid < ti->ntids; tid++) {
 		te = lookup_tid(ti, tid);
 		BUG_ON(!te);
-		if (te->ctx && te->client && te->client->redirect) {
+		if (te && te->ctx && te->client && te->client->redirect) {
 			update_tcb = te->client->redirect(te->ctx, old, new, e);
 			if (update_tcb) {
 				l2t_hold(L2DATA(tdev), e);





More information about the ewg mailing list