[openib-general] [PATCH 09/18] [RFC] Provider iWARP Connection Management

Steve Wise swise at opengridcomputing.com
Mon Mar 6 10:07:25 PST 2006


ISSUES:

- CM should pass down dst entry or at least the next hop ipaddr/macaddr.
Currently this code looks up this info based on the source and
destination ipaddr.

- port management isn't correct.  This should be moved into the core
IWCM or CMA.  Its not trivial to support native stack TCP port
allocation/reservation. 


--- old/src/linux-kernel/infiniband/hw/cxgb3/iwch_cm.c	1969-12-31 18:00:00.000000000 -0600
+++ new/src/linux-kernel/infiniband/hw/cxgb3/iwch_cm.c	2006-03-06 09:26:21.000000000 -0600
@@ -0,0 +1,1801 @@
+/*
+ * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
+ * Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <asm/atomic.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/notifier.h>
+#include <net/neighbour.h>
+#include <net/netevent.h>
+
+#include <t3c.h>
+
+#include "iwch.h"
+#include "iwch_provider.h"
+#include "iwch_cm.h"
+
+#ifdef IWCH_DEBUG
+char *states[] = {
+	"idle",
+	"listen",
+	"mpa_wait_req",
+	"mpa_req_sent",
+	"mpa_req_rcvd",
+	"mpa_rep_sent",
+	"fpdu_mode",
+	"reject",
+	"dead",
+	"free",
+	0,
+};
+#endif
+
+static int mpa_timeout_secs = 10;
+module_param(mpa_timeout_secs, int, 0444);
+MODULE_PARM_DESC(mpa_timeout_secs, "MPA negotiation timeout "
+				   "in seconds (default=10)");
+
+static int mpa_rev = 0;
+module_param(mpa_rev, int, 0444);
+MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
+		 "1 is spec compliant. (default=0)");
+
+static int markers_enabled = 0;
+module_param(markers_enabled, int, 0444);
+MODULE_PARM_DESC(markers_enabled, "Enable MARKERS (default(0)=disabled)");
+
+static u16 port_start = 32768;
+module_param(port_start, ushort, 0444);
+MODULE_PARM_DESC(port_start, 
+		 "Starting port for ephemeral ports. (default=32768)");
+
+static u16 port_end = 65535;
+module_param(port_end, ushort, 0444);
+MODULE_PARM_DESC(port_end, 
+		 "Ending port for ephemeral ports. (default=65535)");
+
+static void process_work(void *ctx);
+static DECLARE_WORK(work, process_work, NULL);
+static struct sk_buff_head rxq;
+static t3c_cpl_handler_t work_handlers[NUM_CPL_CMDS];
+
+static struct sk_buff *get_skb(struct sk_buff *skb, int len, int gfp);
+static void mpa_timeout(unsigned long arg);
+static void free_ep(struct iwch_ep_common *epc);
+
+static void start_mpa_timer(struct iwch_ep *ep)
+{
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	atomic_inc(&ep->com.refcnt);
+	ep->timer.expires = jiffies + mpa_timeout_secs * HZ;
+	ep->timer.data = (unsigned long)ep;
+	ep->timer.function = mpa_timeout;
+	add_timer(&ep->timer);
+}
+
+static void stop_mpa_timer(struct iwch_ep *ep)
+{
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	del_timer_sync(&ep->timer);
+	free_ep(&ep->com);
+}
+
+/*
+ * Port bitmap to track which ports are in use.  This should be
+ * global to all openib rnic devices...
+ */
+static DECLARE_BITMAP(portbits, 65536);
+static DEFINE_SPINLOCK(portlock);
+
+static int get_port(u16 *portp)
+{
+	u32 port = (u32)ntohs(*portp);
+	int ret = 0;
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	spin_lock(&portlock);
+	if (port == 0) {
+		port = find_next_zero_bit(portbits, 65536, port_start);
+		if (port > port_end)
+			ret = 1;
+		else
+			set_bit(port, portbits);
+	} else
+		if (test_and_set_bit(port, portbits))
+			ret = 1;
+	spin_unlock(&portlock);
+	if (!ret) {
+		*portp = htons(port);
+		PDBG("%s alloc port %d\n", __FUNCTION__, port);
+	}
+	return ret;
+}
+
+static void free_port(u16 port)
+{
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	spin_lock(&portlock);
+	PDBG("%s free port %d\n", __FUNCTION__, ntohs(port));
+	clear_bit((u32)ntohs(port), portbits);
+	spin_unlock(&portlock);
+}
+
+int iwch_quiesce_tid(struct iwch_ep *ep)
+{
+	struct cpl_set_tcb_field *req;
+	struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
+
+	if (!skb) {
+		return -ENOMEM;
+	}
+	req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
+	req->reply = 0;
+	req->cpu_idx = 0;
+	req->word = htons(W_TCB_RX_QUIESCE);
+	req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
+	req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
+
+	skb->priority = CPL_PRIORITY_CONTROL;
+	ep->com.tdev->send(ep->com.tdev, skb);
+	return 0;
+}
+
+int iwch_resume_tid(struct iwch_ep *ep)
+{
+	struct cpl_set_tcb_field *req;
+	struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
+
+	if (!skb) {
+		return -ENOMEM;
+	}
+	req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
+	req->reply = 0;
+	req->cpu_idx = 0;
+	req->word = htons(W_TCB_RX_QUIESCE);
+	req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
+	req->val = 0;
+
+	skb->priority = CPL_PRIORITY_CONTROL;
+	ep->com.tdev->send(ep->com.tdev, skb);
+	return 0;
+}
+
+static void set_emss(struct iwch_ep *ep, u16 opt)
+{
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
+	if (G_TCPOPT_TSTAMP(opt)) {
+		ep->emss -= 12;
+	}
+	PDBG("emss=%d\n", ep->emss);
+}
+
+static int state_comp_exch(struct iwch_ep_common *epc,
+                         		  enum iwch_ep_state comp, 
+				          enum iwch_ep_state exch)
+{
+        unsigned long flags;
+        int ret;
+
+        spin_lock_irqsave(&epc->lock, flags);
+        if ((ret = (epc->state == comp)))
+                epc->state = exch;
+        spin_unlock_irqrestore(&epc->lock, flags);
+        return ret;
+}
+
+static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
+{
+	unsigned long flags;
+	enum iwch_ep_state state;
+
+	spin_lock_irqsave(&epc->lock, flags);
+	state = epc->state;
+	spin_unlock_irqrestore(&epc->lock, flags);
+	return state;
+}
+
+static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&epc->lock, flags);
+	PDBG(" %s - %s -> %s\n", __FUNCTION__, states[epc->state], 
+		states[new]);
+	epc->state = new;
+	spin_unlock_irqrestore(&epc->lock, flags);
+	return;
+}
+
+static void *alloc_ep(int size, int gfp)
+{
+	struct iwch_ep_common *epc;
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	epc = kmalloc(size, gfp);
+	if (epc) {
+		memset(epc, 0, size);
+		atomic_set(&epc->refcnt, 1);
+		spin_lock_init(&epc->lock);
+		init_waitqueue_head(&epc->waitq);
+	}
+	PDBG("alloc ep %p\n", epc);
+	return (void *) epc;
+}
+
+static void free_ep(struct iwch_ep_common *epc)
+{
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	if (atomic_read(&epc->refcnt) == 1) {
+		goto out;
+	}
+	if (!atomic_dec_and_test(&epc->refcnt)) {
+		return;
+	}
+out:
+	PDBG("free ep %p\n", epc);
+	free_port(epc->local_addr.sin_port);
+	kfree(epc);
+}
+
+static void process_work(void *ctx)
+{
+	struct sk_buff *skb = NULL;
+	void *ep;
+	struct t3cdev *tdev;
+	union opcode_tid *p;
+	int ret;
+
+	while ((skb = skb_dequeue(&rxq))) {
+		p = cplhdr(skb);
+		ep = *((void **) (skb->cb));
+		tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
+		ret = work_handlers[p->opcode] (tdev, skb, ep);
+		if (ret & CPL_RET_BUF_DONE)
+			kfree_skb(skb);
+
+		/* 
+		 * ep was referenced in sched(), and is freed here.
+		 */
+		free_ep(ep);
+	}
+}
+
+static int status2errno(int status)
+{
+	switch (status) {
+	case CPL_ERR_NONE:
+		return 0;
+	case CPL_ERR_CONN_RESET:
+		return -ECONNREFUSED;
+	case CPL_ERR_ARP_MISS:
+		return -EHOSTUNREACH;
+	case CPL_ERR_CONN_TIMEDOUT:
+		return -ETIMEDOUT;
+	case CPL_ERR_TCAM_FULL:
+		return -ENOMEM;
+	case CPL_ERR_CONN_EXIST:
+		return -EADDRINUSE;
+	default:
+		return -EIO;
+	}
+}
+
+/*
+ * Try and reuse skbs already allocated...
+ */
+static struct sk_buff *get_skb(struct sk_buff *skb, int len, int gfp)
+{
+	if (skb) {
+		BUG_ON(skb_cloned(skb));
+		skb_trim(skb, 0);
+		skb_get(skb);
+	} else {
+		skb = alloc_skb(len, gfp);
+	}
+	return skb;
+}
+
+static struct rtable *find_route(struct t3cdev *dev,
+				 u32 local_ip, u32 peer_ip, u16 local_port,
+				 u16 peer_port, u8 tos)
+{
+	struct rtable *rt;
+	struct flowi fl = {
+		.oif = 0,
+		.nl_u = {
+			 .ip4_u = {
+				   .daddr = peer_ip,
+				   .saddr = local_ip,
+				   .tos = tos}
+			 },
+		.proto = IPPROTO_TCP,
+		.uli_u = {
+			  .ports = {
+				    .sport = local_port,
+				    .dport = peer_port}
+			  }
+	};
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	if (ip_route_output_flow(&rt, &fl, NULL, 0)) {
+		return NULL;
+	}
+	return rt;
+}
+
+static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
+{
+	int i = 0;
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
+		++i;
+	return i;
+}
+
+/*
+ * XXX need to upcall the connection setup failure somehow!
+ */
+static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
+{
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	kfree_skb(skb);
+}
+
+/*
+ * Handle an ARP failure for an active open.   
+ */
+static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
+{
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+}
+
+/*
+ * Handle an ARP failure for a CPL_ABORT_REQ.  Change it into a no RST variant
+ * and send it along.
+ */
+static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
+{
+	struct cpl_abort_req *req = cplhdr(skb);
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	req->cmd = CPL_ABORT_NO_RST;
+	t3c_send(dev, skb);
+}
+
+static int send_halfclose(struct iwch_ep *ep)
+{
+	struct cpl_close_con_req *req;
+	struct sk_buff *skb;
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
+	if (!skb) {
+		printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__);
+		return -ENOMEM;
+	}
+	skb->priority = CPL_PRIORITY_DATA;
+	set_arp_failure_handler(skb, arp_failure_discard);
+	req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req));
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_TOE_CLOSE_CON));
+	req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
+	l2t_send(ep->com.tdev, skb, ep->l2t);
+	return 0;
+}
+
+static int send_abort(struct iwch_ep *ep, struct sk_buff *skb)
+{
+	struct cpl_abort_req *req;
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	skb = get_skb(skb, sizeof(*req), GFP_KERNEL);
+	if (!skb) {
+		printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
+		       __FUNCTION__);
+		return -ENOMEM;
+	}
+	skb->priority = CPL_PRIORITY_DATA;
+	set_arp_failure_handler(skb, abort_arp_failure);
+	req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req));
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_TOE_HOST_ABORT_CON_REQ));
+	req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
+	req->cmd = CPL_ABORT_SEND_RST;
+	l2t_send(ep->com.tdev, skb, ep->l2t);
+	return 0;
+}
+
+static int send_connect(struct iwch_ep *ep)
+{
+	struct cpl_act_open_req *req;
+	struct sk_buff *skb;
+	u32 opt0h, opt0l, opt2;
+	unsigned int mtu_idx;
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+
+	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
+	if (!skb) {
+		printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
+		       __FUNCTION__);
+		return -ENOMEM;
+	}
+	mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
+	opt0h = V_NAGLE(1) |
+	    V_KEEP_ALIVE(1) |
+	    F_TCAM_BYPASS |
+	    V_WND_SCALE(1) |
+	    V_MSS_IDX(mtu_idx) |
+	    V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
+	opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(65536 >> 10);
+	opt2 = V_FLAVORS_VALID(0) | V_CONG_CONTROL_FLAVOR(0);
+	skb->priority = CPL_PRIORITY_SETUP;
+	set_arp_failure_handler(skb, act_open_req_arp_failure);
+
+	req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req));
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));
+	req->local_port = ep->com.local_addr.sin_port;
+	req->peer_port = ep->com.remote_addr.sin_port;
+	req->local_ip = ep->com.local_addr.sin_addr.s_addr;
+	req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
+	req->opt0h = htonl(opt0h);
+	req->opt0l = htonl(opt0l);
+	req->iff_vlantag = 0;	/* XXX */
+	req->opt2 = htonl(opt2);
+	l2t_send(ep->com.tdev, skb, ep->l2t);
+	return 0;
+}
+
+static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
+{
+	int mpalen;
+	struct tx_data_wr *req;
+	struct mpa_message *mpa;
+	int len;
+
+	PDBG("%s (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+
+	BUG_ON(skb_cloned(skb));
+
+	mpalen = sizeof(*mpa) + ep->plen;
+	skb_trim(skb, mpalen);
+	skb_reserve(skb, sizeof(*req));
+	skb->priority = CPL_PRIORITY_DATA;
+	mpa = (struct mpa_message *) skb->data;
+	memset(mpa, 0, sizeof(*mpa));
+	memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
+	mpa->flags = MPA_CRC | (markers_enabled ? MPA_MARKERS : 0);
+	mpa->private_data_size = htons(ep->plen);
+	mpa->revision = mpa_rev;
+	if (ep->plen) {
+		memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
+	}
+
+	/* 
+	 * Reference the mpa skb.  This ensures the data area
+	 * will remain in memory until the hw acks the tx.  
+	 * Function tx_ack() will deref it.
+	 */
+	skb_get(skb);
+	set_arp_failure_handler(skb, arp_failure_discard);
+	skb->h.raw = skb->data;
+	len = skb->len;
+	req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
+	req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_TOE_TX_DATA));
+	req->wr_lo = htonl(V_WR_TID(ep->hwtid));
+	req->len = htonl(len);
+	req->param = htonl(V_TX_PORT(ep->l2t->smt_idx));
+	req->flags = htonl(F_TX_INIT);
+	req->sndseq = htonl(ep->snd_seq);
+	BUG_ON(ep->mpa_skb);
+	ep->mpa_skb = skb;
+	l2t_send(ep->com.tdev, skb, ep->l2t);
+	start_mpa_timer(ep);
+	state_set(&ep->com, MPA_REQ_SENT);
+	return;
+}
+
+static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
+{
+	int mpalen;
+	struct tx_data_wr *req;
+	struct mpa_message *mpa;
+	struct sk_buff *skb;
+
+	PDBG("%s (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+
+	mpalen = sizeof(*mpa) + plen;
+
+	skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
+	if (!skb) {
+		printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__);
+		return -ENOMEM;
+	}
+	skb_reserve(skb, sizeof(*req));
+	mpa = (struct mpa_message *) skb_put(skb, mpalen);
+	memset(mpa, 0, sizeof(*mpa));
+	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
+	mpa->flags = MPA_REJECT;
+	mpa->revision = mpa_rev;
+	mpa->private_data_size = htons(plen);
+	if (plen) {
+		memcpy(mpa->private_data, pdata, plen);
+	}
+
+	/* 
+	 * Reference the mpa skb again.  This ensures the data area
+	 * will remain in memory until the hw acks the tx.  
+	 * Function tx_ack() will deref it.
+	 */
+	skb_get(skb);
+	skb->priority = CPL_PRIORITY_DATA;
+	set_arp_failure_handler(skb, arp_failure_discard);
+	skb->h.raw = skb->data;
+	req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
+	req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_TOE_TX_DATA));
+	req->wr_lo = htonl(V_WR_TID(ep->hwtid));
+	req->len = htonl(mpalen);
+	req->param = htonl(V_TX_PORT(ep->l2t->smt_idx));
+	req->flags = htonl(F_TX_INIT);
+	req->sndseq = htonl(ep->snd_seq);
+	BUG_ON(ep->mpa_skb);
+	ep->mpa_skb = skb;
+	l2t_send(ep->com.tdev, skb, ep->l2t);
+	state_set(&ep->com, REJECT);
+	return 0;
+}
+
+static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
+{
+	int mpalen;
+	struct tx_data_wr *req;
+	struct mpa_message *mpa;
+	int len;
+	struct sk_buff *skb;
+
+	PDBG("%s (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+
+	mpalen = sizeof(*mpa) + plen;
+
+	skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
+	if (!skb) {
+		printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__);
+		return -ENOMEM;
+	}
+	skb->priority = CPL_PRIORITY_DATA;
+	skb_reserve(skb, sizeof(*req));
+	mpa = (struct mpa_message *) skb_put(skb, mpalen);
+	memset(mpa, 0, sizeof(*mpa));
+	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
+	mpa->flags = MPA_CRC | (markers_enabled ? MPA_MARKERS : 0);
+	mpa->revision = mpa_rev;
+	mpa->private_data_size = htons(plen);
+	if (plen) {
+		memcpy(mpa->private_data, pdata, plen);
+	}
+
+	/* 
+	 * Reference the mpa skb.  This ensures the data area
+	 * will remain in memory until the hw acks the tx.  
+	 * Function tx_ack() will deref it.
+	 */
+	skb_get(skb);
+	set_arp_failure_handler(skb, arp_failure_discard);
+	skb->h.raw = skb->data;
+	len = skb->len;
+	req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
+	req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_TOE_TX_DATA));
+	req->wr_lo = htonl(V_WR_TID(ep->hwtid));
+	req->len = htonl(len);
+	req->param = htonl(V_TX_PORT(ep->l2t->smt_idx));
+	req->flags = htonl(F_TX_INIT);
+	req->sndseq = htonl(ep->snd_seq);
+	ep->mpa_skb = skb;
+	state_set(&ep->com, MPA_REP_SENT);
+	l2t_send(ep->com.tdev, skb, ep->l2t);
+	return 0;
+}
+
+static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
+{
+	struct iwch_ep *ep = ctx;
+	struct cpl_act_establish *req = cplhdr(skb);
+	unsigned int tid = GET_TID(req);
+
+	PDBG("%s (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+
+	dst_confirm(ep->dst);
+
+	/* setup the hwtid for this connection */
+	ep->hwtid = tid;
+	t3c_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
+
+	ep->snd_seq = ntohl(req->snd_isn);
+
+	set_emss(ep, ntohs(req->tcp_opt));
+
+	/* dealloc the atid */
+	t3c_free_atid(ep->com.tdev, ep->atid);
+
+	/* start MPA negotiation */
+	send_mpa_req(ep, skb);
+
+	return 0;
+}
+
+static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb)
+{
+	PDBG("%s:%s:%u\n", __FILE__, __FUNCTION__, __LINE__);
+	state_set(&ep->com, DEAD);
+	send_abort(ep, skb);
+}
+
+static void peer_close_upcall(struct iwch_ep *ep)
+{
+	struct iw_cm_event event;
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	memset(&event, 0, sizeof(event));
+	event.event = IW_CM_EVENT_LLP_DISCONNECT;
+	ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+}
+
+static void peer_abort_upcall(struct iwch_ep *ep)
+{
+	struct iw_cm_event event;
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	memset(&event, 0, sizeof(event));
+	event.event = IW_CM_EVENT_LLP_RESET;
+	ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+}
+
+static void connect_reply_upcall(struct iwch_ep *ep, int status)
+{
+	struct iw_cm_event event;
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	memset(&event, 0, sizeof(event));
+	event.event = IW_CM_EVENT_CONNECT_REPLY;
+	event.status = status;
+	event.local_addr = ep->com.local_addr;
+	event.remote_addr = ep->com.remote_addr;
+	event.private_data_len = ep->plen;
+	event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
+	ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+}
+
+static void connect_request_upcall(struct iwch_ep *ep)
+{
+	struct iw_cm_event event;
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	memset(&event, 0, sizeof(event));
+	event.event = IW_CM_EVENT_CONNECT_REQUEST;
+	event.local_addr = ep->com.local_addr;
+	event.remote_addr = ep->com.remote_addr;
+	event.private_data_len = ep->plen;
+	event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
+	event.provider_id = (u64) (unsigned long) ep;
+	ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+}
+
+static int update_rx_credits(struct iwch_ep *ep, u32 credits)
+{
+	struct cpl_rx_data_ack *req;
+	struct sk_buff *skb;
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
+	if (!skb) {
+		printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
+		return 0;
+	}
+
+	req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req));
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
+	req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
+	skb->priority = CPL_PRIORITY_ACK;
+	ep->com.tdev->send(ep->com.tdev, skb);
+	return credits;
+}
+
+static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
+{
+	struct mpa_message *mpa;
+	u16 plen;
+	struct iwch_qp_attributes attrs;
+	enum iwch_qp_attr_mask mask;
+	int err;
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+
+	/* 
+	 * If we get more than the supported amount of private data
+	 * then we must fail this connection.
+	 */
+	if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
+		err = -EINVAL;
+		goto err;
+	}
+
+	/*
+	 * copy the new data into our accumulation buffer.
+	 */
+	memcpy(&(ep->mpa_pkt[ep->mpa_pkt_len]), skb->data, skb->len);
+	ep->mpa_pkt_len += skb->len;
+
+	/* 
+	 * if we don't even have the mpa message, then bail. 
+	 */
+	if (ep->mpa_pkt_len < sizeof(*mpa)) {
+		return;
+	}
+	mpa = (struct mpa_message *) ep->mpa_pkt;
+
+	/* Validate MPA header. */
+	if (mpa->revision != mpa_rev) {
+		err = -EPROTO;
+		goto err;
+	}
+	if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
+		err = -EPROTO;
+		goto err;
+	}
+
+	plen = ntohs(mpa->private_data_size);
+
+	/* 
+	 * Fail if there's too much private data.
+	 */
+	if (plen > MPA_MAX_PRIVATE_DATA) {
+		err = -EPROTO;
+		goto err;
+	}
+	ep->plen = (u8) plen;
+
+	/*
+	 * If we don't have all the pdata yet, then bail.
+	 * We'll continue process when more data arrives.
+	 */
+	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) {
+		return;
+	}
+
+	if (mpa->flags & MPA_REJECT) {
+		err = -ECONNREFUSED;
+		goto err;
+	}
+
+	/*
+	 * If we get here we have accumulated the entire mpa
+	 * start reply message including private data. And
+	 * the MPA header is valid.
+	 */
+
+	ep->mpa_attr.crc_enabled = 1;
+	ep->mpa_attr.recv_marker_enabled = markers_enabled;
+	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS;
+	ep->mpa_attr.version = mpa_rev;
+
+	attrs.mpa_attr = ep->mpa_attr;
+	attrs.llp_stream_handle = ep;
+	attrs.next_state = IWCH_QP_STATE_RTS;
+
+	mask = IWCH_QP_ATTR_NEXT_STATE |
+	    IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR;
+
+	/* bind QP and TID with INIT_WR */
+	err = iwch_modify_qp(to_iwch_dev(ep->com.cm_id->device),
+			     to_iwch_qp(ep->com.cm_id->qp), mask,
+			     &attrs, 1);
+	if (!err) {
+		state_set(&ep->com, FPDU_MODE);
+		goto out;
+	}
+err:
+	abort_connection(ep, skb);
+out:
+	stop_mpa_timer(ep);
+	connect_reply_upcall(ep, err);
+	return;
+}
+
+static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
+{
+	struct mpa_message *mpa;
+	u16 plen;
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+
+	/* 
+	 * If we get more than the supported amount of private data
+	 * then we must fail this connection.
+	 */
+	if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
+		abort_connection(ep, skb);
+		return;
+	}
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+
+	/*
+	 * Copy the new data into our accumulation buffer.
+	 */
+	memcpy(&(ep->mpa_pkt[ep->mpa_pkt_len]), skb->data, skb->len);
+	ep->mpa_pkt_len += skb->len;
+
+	/* 
+	 * If we don't even have the mpa message, then bail. 
+	 * We'll continue process when more data arrives.
+	 */
+	if (ep->mpa_pkt_len < sizeof(*mpa)) {
+		return;
+	}
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	mpa = (struct mpa_message *) ep->mpa_pkt;
+
+	/* 
+	 * Validate MPA Header.
+	 */
+	if (mpa->revision != mpa_rev) {
+		abort_connection(ep, skb);
+		return;
+	}
+
+	if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
+		abort_connection(ep, skb);
+		return;
+	}
+
+	plen = ntohs(mpa->private_data_size);
+
+	/* 
+	 * Fail if there's too much private data.
+	 */
+	if (plen > MPA_MAX_PRIVATE_DATA) {
+		abort_connection(ep, skb);
+		return;
+	}
+	ep->plen = (u8) plen;
+
+	/*
+	 * If we don't have all the pdata yet, then bail.
+	 */
+	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) {
+		return;
+	}
+
+	/*
+	 * If we get here we have accumulated the entire mpa
+	 * start reply message including private data.
+	 */
+	ep->mpa_attr.crc_enabled = 1;
+	ep->mpa_attr.recv_marker_enabled = markers_enabled;
+	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS;
+	ep->mpa_attr.version = mpa_rev;
+	state_set(&ep->com, MPA_REQ_RCVD);
+
+	/* drive upcall */
+	connect_request_upcall(ep);
+	return;
+}
+
+static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
+{
+	struct iwch_ep *ep = ctx;
+	struct cpl_rx_data *hdr = cplhdr(skb);
+	unsigned int dlen = ntohs(hdr->len);
+	int err;
+
+	PDBG("%s (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+
+	skb_pull(skb, sizeof(*hdr));
+	skb_trim(skb, dlen);
+
+	switch (state_read(&ep->com)) {
+	case MPA_REQ_SENT:
+		process_mpa_reply(ep, skb);
+		break;
+	case MPA_REQ_WAIT:
+		process_mpa_request(ep, skb);
+		break;
+	case REJECT:
+		break;
+	default:
+		printk(KERN_ERR MOD "%s - unexpected streaming data.\n",
+		       __FUNCTION__);
+
+		/* generate some kind of upcall if needed */
+		err = send_abort(ep, skb);
+		break;
+	}
+
+	/* update RX credits */
+	update_rx_credits(ep, dlen);
+
+	return CPL_RET_BUF_DONE;
+}
+
+/*
+ * Upcall from the adapter indicating data has been transmitted.
+ * For us its just the single MPA request or reply.  We can now free
+ * the skb holding the mpa message.
+ */
+static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
+{
+	struct iwch_ep *ep = ctx;
+	struct cpl_wr_ack *hdr = cplhdr(skb);
+	unsigned int credits = ntohs(hdr->credits);
+
+	PDBG("%s (%s line %u) credits %d\n", __FUNCTION__, __FILE__,
+		__LINE__, credits);
+	BUG_ON(credits != 1);
+	BUG_ON(ep->mpa_skb == NULL);
+	kfree_skb(ep->mpa_skb);
+	ep->mpa_skb = NULL;
+	dst_confirm(ep->dst);
+	if (state_read(&ep->com) == MPA_REP_SENT) {
+		ep->com.rpl_done = 1;
+		PDBG("waking up ep %p\n", ep);
+		wake_up(&ep->com.waitq);
+	}
+	return CPL_RET_BUF_DONE;
+}
+
+static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
+{
+	struct iwch_ep *ep = ctx;
+	PDBG("%s (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	t3c_remove_tid(ep->com.tdev, ctx, ep->hwtid);
+	dst_release(ep->dst);
+	l2t_release(L2DATA(ep->com.tdev), ep->l2t);
+	free_ep(&ep->com);
+	return CPL_RET_BUF_DONE;
+}
+
+static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
+{
+	struct iwch_ep *ep = ctx;
+	struct cpl_act_open_rpl *rpl = cplhdr(skb);
+
+	PDBG("%s (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	printk(KERN_ERR MOD "%s - active connect failure status %d\n",
+	       __FUNCTION__, rpl->status);
+	dst_release(ep->dst);
+	l2t_release(L2DATA(ep->com.tdev), ep->l2t);
+	t3c_free_atid(ep->com.tdev, ep->atid);
+	connect_reply_upcall(ep, status2errno(rpl->status));
+	free_ep(&ep->com);
+	return CPL_RET_BUF_DONE;
+}
+
+static int listen_start(struct iwch_listen_ep *ep)
+{
+	struct sk_buff *skb;
+	struct cpl_pass_open_req *req;
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
+	if (!skb) {
+		printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n");
+		return -ENOMEM;
+	}
+
+	req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req));
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));
+	req->local_port = ep->com.local_addr.sin_port;
+	req->local_ip = ep->com.local_addr.sin_addr.s_addr;
+	req->peer_port = 0;
+	req->peer_ip = 0;
+	req->peer_netmask = 0;
+	req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS);
+	req->opt0l = htonl(V_RCV_BUFSIZ(16));
+	req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
+
+	skb->priority = 1;
+	ep->com.tdev->send(ep->com.tdev, skb);
+	return 0;
+}
+
+static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
+{
+	struct iwch_listen_ep *ep = ctx;
+	struct cpl_pass_open_rpl *rpl = cplhdr(skb);
+
+	PDBG("%s (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	ep->com.rpl_err = status2errno(rpl->status);
+	ep->com.rpl_done = 1;
+	wake_up(&ep->com.waitq);
+
+	return CPL_RET_BUF_DONE;
+}
+
+static int listen_stop(struct iwch_listen_ep *ep)
+{
+	struct sk_buff *skb;
+	struct cpl_close_listserv_req *req;
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
+	if (!skb) {
+		printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__);
+		return -ENOMEM;
+	}
+	req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
+	skb->priority = 1;
+	ep->com.tdev->send(ep->com.tdev, skb);
+	return 0;
+}
+
+static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
+			     void *ctx)
+{
+	struct iwch_listen_ep *ep = ctx;
+	struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
+
+	PDBG("%s (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	ep->com.rpl_err = status2errno(rpl->status);
+	ep->com.rpl_done = 1;
+	wake_up(&ep->com.waitq);
+	return CPL_RET_BUF_DONE;
+}
+
+static void accept_cr(struct iwch_ep *ep, u32 peer_ip, struct sk_buff *skb)
+{
+	struct cpl_pass_accept_rpl *rpl;
+	unsigned int mtu_idx;
+	u32 opt0h, opt0l, opt2;
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	BUG_ON(skb_cloned(skb));
+	skb_trim(skb, sizeof(*rpl));
+	skb_get(skb);
+	mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
+	opt0h = V_NAGLE(1) |
+	    V_KEEP_ALIVE(1) |
+	    F_TCAM_BYPASS |
+	    V_WND_SCALE(1) |
+	    V_MSS_IDX(mtu_idx) |
+	    V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
+	opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(65536 >> 10);
+	opt2 = V_FLAVORS_VALID(0) | V_CONG_CONTROL_FLAVOR(0);
+
+	rpl = cplhdr(skb);
+	rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+	OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid));
+	rpl->peer_ip = peer_ip;
+	rpl->opt0h = htonl(opt0h);
+	rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT);
+	rpl->opt2 = htonl(opt2);
+	rpl->rsvd = rpl->opt2;	/* workaround for HW bug */
+	skb->priority = CPL_PRIORITY_SETUP;
+	l2t_send(ep->com.tdev, skb, ep->l2t);
+
+	return;
+}
+
+static void reject_cr(struct t3cdev *tdev, u32 hwtid, u32 peer_ip,
+		      struct sk_buff *skb)
+{
+	struct cpl_pass_accept_rpl *rpl;
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	BUG_ON(skb_cloned(skb));
+	skb_trim(skb, sizeof(*rpl));
+	skb_get(skb);
+	rpl = cplhdr(skb);
+	skb->priority = CPL_PRIORITY_SETUP;
+	rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+	OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, hwtid));
+	rpl->peer_ip = peer_ip;
+	rpl->opt0h = htonl(F_TCAM_BYPASS);
+	rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
+	rpl->opt2 = 0;
+	rpl->rsvd = rpl->opt2;
+	tdev->send(tdev, skb);
+}
+
+static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
+{
+	struct iwch_ep *child_ep, *parent_ep = ctx;
+	struct cpl_pass_accept_req *req = cplhdr(skb);
+	unsigned int hwtid = GET_TID(req);
+	struct dst_entry *dst;
+	struct l2t_entry *l2t;
+	struct rtable *rt;
+	struct iff_mac tim;
+
+	PDBG("%s (%s line %u) - hwtid %u\n",
+		__FUNCTION__, __FILE__, __LINE__, hwtid);
+
+	if (state_read(&parent_ep->com) != LISTEN) {
+		PDBG("%s - listening ep not in LISTEN\n", __FUNCTION__);
+		goto reject;
+	}
+
+	/*
+	 * Find the netdev for this connection request.
+	 */
+	tim.mac_addr = req->dst_mac;
+	if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
+		goto reject;
+	}
+
+	PDBG("%s (%s line %u) - hwtid %u\n",
+		__FUNCTION__, __FILE__, __LINE__, hwtid);
+#if 0
+	if (ip_route_input(skb, req->peer_ip, req->local_ip,
+			   G_PASS_OPEN_TOS(ntohl(req->tos_tid)), tim.dev)) {
+
+		printk(KERN_ERR MOD "%s - failed to find input route\n",
+		       __FUNCTION__);
+		goto reject;
+	}
+	PDBG("%s (%s line %u) - hwtid %u\n",
+		__FUNCTION__, __FILE__, __LINE__, hwtid);
+	BUG_TRAP(!skb->dst);
+	dst_release(skb->dst);
+	skb->dst = NULL;
+#endif
+
+	/* Find output route */
+	rt = find_route(tdev,
+			req->local_ip,
+			req->peer_ip,
+			req->local_port,
+			req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
+	if (!rt) {
+		printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
+		       __FUNCTION__);
+		goto reject;
+	}
+	PDBG("%s (%s line %u) - hwtid %u\n",
+		__FUNCTION__, __FILE__, __LINE__, hwtid);
+	dst = &rt->u.dst;
+	l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev->if_port);
+	if (!l2t) {
+		printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
+		       __FUNCTION__);
+		dst_release(dst);
+		goto reject;
+	}
+	PDBG("%s (%s line %u) - hwtid %u\n",
+		__FUNCTION__, __FILE__, __LINE__, hwtid);
+
+	child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
+	if (!child_ep) {
+		printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
+		       __FUNCTION__);
+		l2t_release(L2DATA(tdev), l2t);
+		dst_release(dst);
+		goto reject;
+	}
+	PDBG("%s (%s line %u) - hwtid %u\n",
+		__FUNCTION__, __FILE__, __LINE__, hwtid);
+	child_ep->com.tdev = tdev;
+	child_ep->com.cm_id = parent_ep->com.cm_id;
+	child_ep->com.local_addr.sin_family = PF_INET;
+	child_ep->com.local_addr.sin_port = req->local_port;
+	child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
+	child_ep->com.remote_addr.sin_family = PF_INET;
+	child_ep->com.remote_addr.sin_port = req->peer_port;
+	child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
+	child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid));
+	child_ep->l2t = l2t;
+	child_ep->dst = dst;
+	child_ep->hwtid = hwtid;
+	t3c_insert_tid(tdev, &t3c_client, child_ep, hwtid);
+	PDBG("%s (%s line %u) - hwtid %u\n",
+		__FUNCTION__, __FILE__, __LINE__, hwtid);
+	accept_cr(child_ep, req->peer_ip, skb);
+	goto out;
+      reject:
+	reject_cr(tdev, hwtid, req->peer_ip, skb);
+      out:
+	return CPL_RET_BUF_DONE;
+}
+
+static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
+{
+	struct iwch_ep *ep = ctx;
+	struct cpl_pass_establish *req = cplhdr(skb);
+
+	PDBG("%s (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	ep->snd_seq = ntohl(req->snd_isn);
+
+	set_emss(ep, ntohs(req->tcp_opt));
+
+	dst_confirm(ep->dst);
+	state_set(&ep->com, MPA_REQ_WAIT);
+
+	return CPL_RET_BUF_DONE;
+}
+
+static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
+{
+	struct iwch_ep *ep = ctx;
+	int err;
+
+	PDBG("%s (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	dst_confirm(ep->dst);
+	state_set(&ep->com, DEAD);
+	peer_close_upcall(ep);
+	err = send_halfclose(ep);
+	return CPL_RET_BUF_DONE;
+}
+
+static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
+{
+	struct iwch_ep *ep = ctx;
+	struct cpl_abort_rpl *rpl;
+	struct sk_buff *rpl_skb;
+	struct iwch_qp_attributes attrs;
+	enum iwch_qp_attr_mask mask;
+	int ret;
+
+	PDBG("%s (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	switch (state_read(&ep->com)) {
+	case MPA_REQ_WAIT:
+		break;
+	case MPA_REQ_SENT:
+		connect_reply_upcall(ep, -ECONNREFUSED);
+		break;
+	case MPA_REQ_RCVD:
+		break;
+	case FPDU_MODE:
+		peer_abort_upcall(ep);
+		mask = IWCH_QP_ATTR_NEXT_STATE;
+		attrs.next_state = IWCH_QP_STATE_ERROR;
+		ret = iwch_modify_qp(to_iwch_dev(ep->com.cm_id->device),
+				     to_iwch_qp(ep->com.cm_id->qp), mask,
+				     &attrs, 1);
+		if (ret) {
+			printk(KERN_ERR MOD "%s - cannot modify qp !\n",
+			       __FUNCTION__);
+			dst_release(ep->dst);
+			l2t_release(L2DATA(ep->com.tdev), ep->l2t);
+			state_set(&ep->com, FREE);
+			free_ep(&ep->com);
+			return CPL_RET_BUF_DONE;
+		}
+		break;
+	case DEAD:
+		break;
+	default:
+		break;
+	}
+	dst_confirm(ep->dst);
+	
+	rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
+	if (!rpl_skb) {
+		printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
+		       __FUNCTION__);
+		dst_release(ep->dst);
+		l2t_release(L2DATA(ep->com.tdev), ep->l2t);
+		state_set(&ep->com, FREE);
+		free_ep(&ep->com);
+		return CPL_RET_BUF_DONE;
+	}
+	rpl_skb->priority = CPL_PRIORITY_DATA;
+	rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
+	rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_TOE_HOST_ABORT_CON_RPL));
+	rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
+	OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
+	rpl->cmd = CPL_ABORT_NO_RST;
+	ep->com.tdev->send(ep->com.tdev, rpl_skb);
+	t3c_remove_tid(ep->com.tdev, ctx, ep->hwtid);
+	dst_release(ep->dst);
+	l2t_release(L2DATA(ep->com.tdev), ep->l2t);
+	state_set(&ep->com, FREE);
+	free_ep(&ep->com);
+	return CPL_RET_BUF_DONE;
+}
+
+static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
+{
+	struct iwch_ep *ep = ctx;
+	struct iwch_qp_attributes attrs;
+	struct iwch_qp *qp;
+
+	PDBG("%s (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	BUG_ON(!ep);
+	BUG_ON(!ep->com.cm_id);
+	BUG_ON(!ep->com.cm_id->qp);
+	qp = to_iwch_qp(ep->com.cm_id->qp);
+	
+	if (qp->attr.state == IWCH_QP_STATE_CLOSING)
+		attrs.next_state = IWCH_QP_STATE_IDLE;
+	else
+		attrs.next_state = IWCH_QP_STATE_ERROR;
+	iwch_modify_qp(to_iwch_dev(ep->com.cm_id->device),
+				     to_iwch_qp(ep->com.cm_id->qp), 
+				     IWCH_QP_ATTR_NEXT_STATE,
+				     &attrs, 1);
+	t3c_remove_tid(ep->com.tdev, ctx, ep->hwtid);
+	dst_release(ep->dst);
+	l2t_release(L2DATA(ep->com.tdev), ep->l2t);
+	state_set(&ep->com, FREE);
+	free_ep(&ep->com);
+	return CPL_RET_BUF_DONE;
+}
+
+/*
+ * T3 does 3 things when a TERM is received:
+ * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
+ * 2) generate an async event on the QP with the TERMINATE opcode
+ * 3) post a TERMINATE opcde cqe into the associated CQ.
+ *
+ * For (1), we save the message in the qp for later consumer consumption
+ *          and begin an orderly shutdown.
+ * For (2), we move the QP into TERMINATE and post a QP event.
+ * For (3), we toss the CQE in cxio_poll_cq().
+ * 
+ * terminate() handles case (1)...
+ */
+static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
+{
+	struct iwch_ep *ep = ctx;
+	struct cpl_rdma_terminate *req = cplhdr(skb);
+	struct iwch_qp *qp = to_iwch_qp(ep->com.cm_id->qp);
+
+	PDBG("%s (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	skb_pull(skb, sizeof(*req));
+	memcpy(qp->attr.terminate_buffer, skb->data, skb->len);
+	qp->attr.terminate_msg_len = skb->len;
+	qp->attr.is_terminate_local = 0;
+	send_halfclose(ep);
+	return CPL_RET_BUF_DONE;
+}
+
+static void mpa_timeout(unsigned long arg)
+{
+	struct iwch_ep *ep = (struct iwch_ep *)arg;
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	if (state_comp_exch(&ep->com, MPA_REQ_SENT, DEAD)) {
+		struct sk_buff *skb;
+
+		connect_reply_upcall(ep, -ETIMEDOUT);
+		skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC);
+		if (skb) {
+			abort_connection(ep, skb);
+		}
+	}
+}
+
+int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+	int err;
+	struct iwch_ep *ep = to_ep(cm_id);
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+
+	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
+	if (mpa_rev == 0) {
+		err = send_abort(ep, NULL);
+	} else {
+		err = send_mpa_reject(ep, pdata, pdata_len);
+		err = send_halfclose(ep);
+	}
+	return 0;
+}
+
+int iwch_accept_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+	int err;
+	struct iwch_ep *ep = to_ep(cm_id);
+	struct iwch_qp_attributes attrs;
+	enum iwch_qp_attr_mask mask;
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+
+	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
+
+	ep->com.cm_id = cm_id;
+
+	if (cm_id->qp_num && !cm_id->qp)
+		cm_id->qp = (struct ib_qp *)
+		   to_iwch_dev(cm_id->device)->qpid2hlp[cm_id->qp_num];
+	
+
+	/* bind QP to EP and move to RTS */
+	attrs.mpa_attr = ep->mpa_attr;
+	attrs.llp_stream_handle = ep;
+	attrs.next_state = IWCH_QP_STATE_RTS;
+
+	mask = IWCH_QP_ATTR_NEXT_STATE |
+	    IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR;
+
+	err = iwch_quiesce_tid(ep);
+	if (err) {
+		abort_connection(ep, NULL);
+		return err;
+	}
+	ep->com.rpl_done = 0;
+	ep->com.rpl_err = 0;
+	atomic_inc(&ep->com.refcnt);
+	err = send_mpa_reply(ep, pdata, pdata_len);
+	if (err) {
+		abort_connection(ep, NULL);
+		return err;
+	}
+
+	/* wait until the MPA is transmitted. */
+	PDBG("sleeping on ep %p\n", ep);
+	wait_event(ep->com.waitq, ep->com.rpl_done);
+	PDBG("awakened on ep %p\n", ep);
+	
+	if (state_read(&ep->com) != MPA_REP_SENT) {
+		free_ep(&ep->com);
+		return -ECONNRESET;
+	}
+	free_ep(&ep->com);
+
+	/* bind QP and TID with INIT_WR */
+	err = iwch_modify_qp(to_iwch_dev(ep->com.cm_id->device),
+			     to_iwch_qp(ep->com.cm_id->qp), mask,
+			     &attrs, 1);
+	if (err) {
+		abort_connection(ep, NULL);
+		return err;
+	}
+	err = iwch_resume_tid(ep);
+	if (err) {
+		/* XXX probably wrong */
+		abort_connection(ep, NULL);
+		return err;
+	}
+
+	state_set(&ep->com, FPDU_MODE);
+	return 0;
+}
+
+int iwch_connect(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+	int err = 0;
+	struct iwch_dev *h = to_iwch_dev(cm_id->device);
+	struct iwch_ep *ep;
+	struct rtable *rt;
+
+	PDBG("%s:%s:%u\n", __FILE__, __FUNCTION__, __LINE__);
+ 
+ 	if (cm_id->qp_num && !cm_id->qp)
+ 		cm_id->qp = (struct ib_qp *) h->qpid2hlp[cm_id->qp_num];
+  	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
+  	if (!ep) {
+  		printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__);
+  		err = -ENOMEM;
+  		goto out;
+  	}
+  	init_timer(&ep->timer);
+	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
+	if (!ep) {
+		printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__);
+		err = -ENOMEM;
+		goto out;
+	}
+	init_timer(&ep->timer);
+	ep->plen = pdata_len;
+	if (pdata_len) {
+		memcpy(ep->mpa_pkt + sizeof(struct mpa_message), pdata,
+		       pdata_len);
+	}
+	ep->com.tdev = h->rdev.t3cdev_p;
+	ep->com.cm_id = cm_id;
+
+	/*
+	 * XXX.
+	 */
+	if (get_port(&cm_id->local_addr.sin_port)) {
+		err = -EADDRINUSE;
+		goto fail1;
+	}
+
+	/* 
+	 * Allocate an active TID to initiate a TCP connection. 
+	 */
+	ep->atid = t3c_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
+	if (ep->atid == -1) {
+		printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__);
+		err = -ENOMEM;
+		goto fail2;
+	}
+
+	/* find a route */
+	/* XXX Shouldn't need this.  IWCM should pass down dst entry ptr */
+	rt = find_route(h->rdev.t3cdev_p,
+			cm_id->local_addr.sin_addr.s_addr,
+			cm_id->remote_addr.sin_addr.s_addr,
+			cm_id->local_addr.sin_port,
+			cm_id->remote_addr.sin_port, IPTOS_MINCOST);
+	if (!rt) {
+		printk(KERN_ERR MOD "%s - cannot find route.\n", __FUNCTION__);
+		err = -EHOSTUNREACH;
+		goto fail3;
+	}
+	ep->dst = &rt->u.dst;
+
+	/* get a l2t entry */
+	ep->l2t = t3_l2t_get(ep->com.tdev,
+			     ep->dst->neighbour,
+			     ep->dst->neighbour->dev->if_port);
+	if (!ep->l2t) {
+		printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __FUNCTION__);
+		err = -ENOMEM;
+		goto fail4;
+	}
+
+	ep->tos = IPTOS_LOWDELAY;	/* XXX */
+	ep->com.local_addr = cm_id->local_addr;
+	ep->com.remote_addr = cm_id->remote_addr;
+
+	/* send connect request to rnic */
+	err = send_connect(ep);
+	if (!err) {
+		goto out;
+	}
+
+	l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t);
+fail4:
+	dst_release(ep->dst);
+fail3:
+	t3c_free_atid(ep->com.tdev, ep->atid);
+fail2:
+	free_port(cm_id->local_addr.sin_port);
+fail1:
+	free_ep(&ep->com);
+out:
+	cm_id->provider_id = (u64) (unsigned long) ep;
+	return err;
+}
+
+int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
+{
+	int err = 0;
+	struct iwch_dev *h = to_iwch_dev(cm_id->device);
+	struct iwch_listen_ep *ep;
+
+	PDBG("%s (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+
+	might_sleep();
+
+	if (get_port(&cm_id->local_addr.sin_port)) {
+		err = -EADDRINUSE;
+		goto out;
+	}
+	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
+	if (!ep) {
+		printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__);
+		err = -ENOMEM;
+		goto fail1;
+	}
+	ep->com.tdev = h->rdev.t3cdev_p;
+	ep->com.cm_id = cm_id;
+	ep->backlog = backlog;
+	ep->com.local_addr = cm_id->local_addr;
+
+	/* 
+	 * Allocate a server TID.
+	 */
+	ep->stid = t3c_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
+	if (ep->stid == -1) {
+		printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__);
+		err = -ENOMEM;
+		goto fail2;
+	}
+
+	state_set(&ep->com, LISTEN);
+	err = listen_start(ep);
+	if (err) {
+		goto fail3;
+	}
+
+	/* wait for pass_open_rpl */
+	wait_event(ep->com.waitq, ep->com.rpl_done);
+	err = ep->com.rpl_err;
+	if (!err) {
+		cm_id->provider_id = (u64) (unsigned long) ep;
+		goto out;
+	}
+fail3:
+	t3c_free_stid(ep->com.tdev, ep->stid);
+fail2:
+	free_ep(&ep->com);
+fail1:
+	free_port(cm_id->local_addr.sin_port);
+out:
+	return err;
+}
+
+int iwch_destroy_listen(struct iw_cm_id *cm_id)
+{
+	int err;
+	struct iwch_listen_ep *ep = to_listen_ep(cm_id);
+
+	PDBG("%s (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+
+	might_sleep();
+	state_set(&ep->com, DEAD);
+	ep->com.rpl_done = 0;
+	ep->com.rpl_err = 0;
+	err = listen_stop(ep);
+	wait_event(ep->com.waitq, ep->com.rpl_done);
+	t3c_free_stid(ep->com.tdev, ep->stid);
+	err = ep->com.rpl_err;
+	state_set(&ep->com, FREE);
+	free_ep(&ep->com);
+	return err;
+}
+
+int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt)
+{
+	int ret;
+
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	BUG_ON(state_read(&ep->com) != FPDU_MODE);
+	state_set(&ep->com, DEAD);
+	if (abrupt) {
+		ret = send_abort(ep, NULL);
+	} else {
+		ret = send_halfclose(ep);
+	}
+	ep->com.cm_id->provider_id = 0;
+	return ret;
+}
+
+int iwch_disconnect(struct iw_cm_id *cm_id, int abrupt)
+{
+	struct iwch_ep *ep = to_ep(cm_id);
+	int ret;
+	struct iwch_qp_attributes attrs;
+	enum iwch_qp_attr_mask mask;
+
+	/*
+	 * If the provider_id field is 0, then the ep 
+	 * has already been cleaned up via qp_destroy.
+	 */
+	PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+	if (!ep) {
+		PDBG("ep null...\n");
+		return 0;
+	}
+	
+	if (!to_iwch_qp(cm_id->qp)) {
+		PDBG("qp null...\n");
+		return 0;
+	}
+
+	mask = IWCH_QP_ATTR_NEXT_STATE;
+	if (abrupt) {
+		attrs.next_state = IWCH_QP_STATE_ERROR;
+		ret = iwch_modify_qp(to_iwch_dev(cm_id->device),
+				     to_iwch_qp(cm_id->qp), mask,
+				     &attrs, 1);
+	} else {
+		attrs.next_state = IWCH_QP_STATE_CLOSING;
+		ret = iwch_modify_qp(to_iwch_dev(cm_id->device),
+				     to_iwch_qp(cm_id->qp), mask,
+				     &attrs, 1);
+	}
+	iwch_ep_disconnect(ep, abrupt);
+	return ret;
+}
+
+/* 
+ * All the CM events are handled on a work queue to have a safe context.
+ */
+static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
+{
+	struct iwch_ep_common *epc = ctx;
+
+	atomic_inc(&epc->refcnt);
+	BUG_ON(state_read(epc) == FREE);
+
+#if 0
+	if (state_read(epc) == FREE) {
+		PDBG("%s - dropping cpl message due to FREE ep",
+			__FUNCTION__);
+		free_ep(epc);
+		kfree(skb);
+		return 0;
+	}
+#endif
+
+	/*
+	 * Save ctx and tdev in the skb->cb area.
+	 */
+	*((void **) skb->cb) = ctx;
+	*((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;
+
+	/* 
+	 * Queue the skb and schedule the worker thread.
+	 */
+	skb_queue_tail(&rxq, skb);
+	schedule_work(&work);
+	return 0;
+}
+
+void __init iwch_cm_init()
+{
+	skb_queue_head_init(&rxq);
+
+	/*
+	 * All upcalls from the T3 Core go to sched() to 
+	 * schedule the processing on a work queue.
+	 */
+	t3c_handlers[CPL_ACT_ESTABLISH] = sched;
+	t3c_handlers[CPL_ACT_OPEN_RPL] = sched;
+	t3c_handlers[CPL_RX_DATA] = sched;
+	t3c_handlers[CPL_TX_DMA_ACK] = sched;
+	t3c_handlers[CPL_ABORT_RPL_RSS] = sched;
+	t3c_handlers[CPL_ABORT_RPL] = sched;
+	t3c_handlers[CPL_PASS_OPEN_RPL] = sched;
+	t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
+	t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched;
+	t3c_handlers[CPL_PASS_ESTABLISH] = sched;
+	t3c_handlers[CPL_PEER_CLOSE] = sched;
+	t3c_handlers[CPL_CLOSE_CON_RPL] = sched;
+	t3c_handlers[CPL_ABORT_REQ_RSS] = sched;
+	t3c_handlers[CPL_RDMA_TERMINATE] = sched;
+
+	/*
+	 * These are the real handlers that are called from a 
+	 * work queue.
+	 */
+	work_handlers[CPL_ACT_ESTABLISH] = act_establish;
+	work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
+	work_handlers[CPL_RX_DATA] = rx_data;
+	work_handlers[CPL_TX_DMA_ACK] = tx_ack;
+	work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
+	work_handlers[CPL_ABORT_RPL] = abort_rpl;
+	work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
+	work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
+	work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
+	work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
+	work_handlers[CPL_PEER_CLOSE] = peer_close;
+	work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
+	work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
+	work_handlers[CPL_RDMA_TERMINATE] = terminate;
+}
--- old/src/linux-kernel/infiniband/hw/cxgb3/iwch_cm.h	1969-12-31 18:00:00.000000000 -0600
+++ new/src/linux-kernel/infiniband/hw/cxgb3/iwch_cm.h	2006-03-06 09:26:21.000000000 -0600
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
+ * Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _IWCH_CM_H_
+#define _IWCH_CM_H_
+
+#include <linux/inet.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <rdma/ib_verbs.h>
+#include "iwch_provider.h"
+#include <rdma/iw_cm.h>
+
+#include <t3c.h>
+
+#define MPA_KEY_REQ "MPA ID Req Frame"
+#define MPA_KEY_REP "MPA ID Rep Frame"
+
+#define MPA_MAX_PRIVATE_DATA 	256
+#define MPA_REV 		0	/* XXX - amso1100 uses rev 0 ! */
+#define MPA_REJECT 		0x20
+#define MPA_CRC			0x40
+#define MPA_MARKERS		0x80
+#define MPA_FLAGS_MASK		0xE0
+
+struct mpa_message {
+	u8 key[16];
+	u8 flags;
+	u8 revision;
+	u16 private_data_size;
+	u8 private_data[0];
+};
+
+struct terminate_message {
+	u8 layer_etype;
+	u8 ecode;
+	u16 hdrct_rsvd;
+	u8 len_hdrs[0];
+};
+
+#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 14 + 28)
+
+enum iwch_ep_state {
+	IDLE = 0,
+	LISTEN = 1,
+	MPA_REQ_WAIT = 2,
+	MPA_REQ_SENT = 3,
+	MPA_REQ_RCVD = 4,
+	MPA_REP_SENT = 5,
+	FPDU_MODE = 6,
+	REJECT = 7,
+	DEAD = 8,
+	FREE = 9
+};
+
+struct iwch_ep_common {
+	struct iw_cm_id *cm_id;
+	struct t3cdev *tdev;
+	enum iwch_ep_state state;
+	atomic_t refcnt;
+	spinlock_t lock;
+	struct sockaddr_in local_addr;
+	struct sockaddr_in remote_addr;
+	wait_queue_head_t waitq;
+	int rpl_done;
+	int rpl_err;
+};
+
+struct iwch_listen_ep {
+	struct iwch_ep_common com;
+	unsigned int stid;
+	int backlog;
+};
+
+struct iwch_ep {
+	struct iwch_ep_common com;
+	struct timer_list timer;
+	unsigned int atid;
+	u32 hwtid;
+	u32 snd_seq;
+	struct l2t_entry *l2t;
+	struct dst_entry *dst;
+	struct sk_buff *mpa_skb;
+	struct iwch_mpa_attributes mpa_attr;
+	unsigned int mpa_pkt_len;
+	u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
+	u8 plen;
+	u8 tos;
+	u16 emss;
+};
+
+static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id)
+{
+	return (struct iwch_ep *) (unsigned long) cm_id->provider_id;
+}
+
+static inline struct iwch_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
+{
+	return (struct iwch_listen_ep *) (unsigned long) cm_id->provider_id;
+}
+
+/* CM prototypes */
+
+int iwch_connect(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
+int iwch_create_listen(struct iw_cm_id *cm_id, int backlog);
+int iwch_destroy_listen(struct iw_cm_id *cm_id);
+int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
+int iwch_accept_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
+int iwch_disconnect(struct iw_cm_id *cm_id, int abrupt);
+int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt);
+int iwch_quiesce_tid(struct iwch_ep *ep);
+int iwch_resume_tid(struct iwch_ep *ep);
+
+void __init iwch_cm_init(void);
+
+#endif				/* _IWCH_CM_H_ */




More information about the general mailing list