[openib-general] [PATCH] iser: introduce struct iser_desc

Or Gerlitz ogerlitz at voltaire.com
Wed Jan 25 06:03:34 PST 2006


commited in r5181

introduced struct iser_desc having four types: rx, tx control/command/dataout,
removed the login/headers/dto/regd kmem_caches and struct dtask with its mempool.

Signed-off-by: Or Gerlitz <ogerlitz at voltaire.com>

Index: ulp/iser/iser_conn.c
===================================================================
--- ulp/iser/iser_conn.c	(revision 5180)
+++ ulp/iser/iser_conn.c	(working copy)
@@ -189,51 +189,25 @@ int iser_conn_bind(struct iscsi_iser_con
 	p_iser_conn->p_iscsi_conn = iscsi_conn;
 	iscsi_conn->ib_conn       = p_iser_conn;
 
-	/* MERGE_ADDED_CHANGE moved here from ic_establish, before LOGIN sent */
-	iser_dbg("postrecv_cache =  ig.login_cache\n");
-	iscsi_conn->postrecv_cache = ig.login_cache;
-	iscsi_conn->postrecv_bsize = ISER_LOGIN_PHASE_PDU_DATA_LEN;
-	sprintf(iscsi_conn->name,"%d.%d.%d.%d",
-		NIPQUAD(iscsi_conn->ib_conn->dst_addr));
+	sprintf(iscsi_conn->name,"%d.%d.%d.%d:%d",
+		NIPQUAD(iscsi_conn->ib_conn->dst_addr),
+		iscsi_conn->ib_conn->dst_port);
 
 	return 0;
 }
 
 /**
- * iser_conn_enable_rdma - iSER API. Implements
- * Allocate_Connection_Resources and Enable_Datamover primitives.
- *
+ *  iser_conn_set_full_featured_mode - (iSER API)
  */
 int iser_conn_set_full_featured_mode(struct iscsi_iser_conn *p_iser_conn)
 {
-	int i,err =  0;
+	int i, err =  0;
 	/* no need to keep it in a var, we are after login so if this should
 	 * be negotiated, by now the result should be available here */
 	int initial_post_recv_bufs_num = ISER_INITIAL_POST_RECV + 2;
 
-	p_iser_conn->postrecv_cache = NULL;
-
 	iser_dbg("Initially post: %d\n", initial_post_recv_bufs_num);
 
-	sprintf(p_iser_conn->postrecv_cn,"prcv_%d.%d.%d.%d:%d",
-		NIPQUAD(p_iser_conn->ib_conn->dst_addr),p_iser_conn->ib_conn->dst_port);
-
-	/* Allocate recv buffers for the full-featured phase */
-
-	/* FIXME should be a param eg p_iser_conn->initiator_max_recv_dsl; */
-	p_iser_conn->postrecv_bsize = defaultInitiatorRecvDataSegmentLength;
-
-	p_iser_conn->postrecv_cache =
-		kmem_cache_create(p_iser_conn->postrecv_cn,
-				  p_iser_conn->postrecv_bsize,
-				  0,SLAB_HWCACHE_ALIGN, NULL, NULL);
-	if (p_iser_conn->postrecv_cache == NULL) {
-		iser_err("Failed to allocate post recv cache\n");
-		err =  -ENOMEM;
-		goto ffeatured_mode_failure;
-	}
-
-
 	/* Check that there is no posted recv or send buffers left - */
 	/* they must be consumed during the login phase */
 	if (atomic_read(&p_iser_conn->post_recv_buf_count) != 0)
@@ -246,7 +220,7 @@ int iser_conn_set_full_featured_mode(str
 		if (iser_post_receive_control(p_iser_conn) != 0) {
 			iser_err("Failed to post recv bufs at:%d conn:0x%p\n",
 				 i, p_iser_conn);
-			err =  -ENOMEM;
+			err = -ENOMEM;
 			goto ffeatured_mode_failure;
 		}
 	}
@@ -256,10 +230,6 @@ int iser_conn_set_full_featured_mode(str
 	return 0;
 
 ffeatured_mode_failure:
-	if(p_iser_conn->postrecv_cache) {
-		kmem_cache_destroy(p_iser_conn->postrecv_cache);
-		p_iser_conn->postrecv_cache = NULL;
-	}
 	return err;
 }
 
@@ -372,9 +342,6 @@ void iser_conn_release(struct iser_conn 
 
 		p_iscsi_conn = p_iser_conn->p_iscsi_conn;
 		if(p_iscsi_conn != NULL && p_iscsi_conn->ff_mode_enabled) {
-			if(kmem_cache_destroy(p_iscsi_conn->postrecv_cache) != 0)
-				iser_err("postrecv cache %s not empty, leak!\n",
-					 p_iscsi_conn->postrecv_cn);
 			p_iscsi_conn->ff_mode_enabled = 0;
 		}
 		/* release socket with conn descriptor */
@@ -440,70 +407,74 @@ int iser_complete_conn_termination(struc
  */
 int iser_post_receive_control(struct iscsi_iser_conn *p_iser_conn)
 {
-	struct iser_adaptor *p_iser_adaptor = p_iser_conn->ib_conn->p_adaptor;
-	struct iser_dto *p_recv_dto;
-	struct iser_regd_buf *p_regd_buf;
-	int err = 0;
-	int i;
+	struct iser_desc     *rx_desc;
+	struct iser_regd_buf *p_regd_hdr;
+	struct iser_regd_buf *p_regd_data;
+	struct iser_dto      *p_recv_dto = NULL;
+	struct iser_adaptor  *p_iser_adaptor = p_iser_conn->ib_conn->p_adaptor;
+	int rx_data_size, err = 0;
 
-	/* Create & init send DTO descriptor */
-	iser_dbg( "Alloc post-recv DTO descriptor\n");
-	p_recv_dto = kmem_cache_alloc(ig.dto_cache,
+	rx_desc = kmem_cache_alloc(ig.desc_cache,
 				      GFP_KERNEL | __GFP_NOFAIL);
-	if (p_recv_dto == NULL) {
-		iser_err("Failed to alloc DTO desc for post recv buffer\n");
-		err = -ENOMEM;
-		goto post_receive_control_exit;
-	}
-	iser_dto_init(p_recv_dto);
-	p_recv_dto->p_conn = p_iser_conn;
-	p_recv_dto->type = ISER_DTO_RCV;
-
-	iser_dbg("Allocate iSER header buffer\n");
-	p_regd_buf = iser_regd_mem_alloc(p_iser_adaptor,
-					 ig.header_cache,
-					 ISER_TOTAL_HEADERS_LEN);
-	if (p_regd_buf == NULL) {
-		iser_err("Failed to alloc regd buf (post-recv-buf hdr)\n");
+	if(rx_desc == NULL) {
+		iser_err("Failed to alloc desc for post recv\n");
 		err = -ENOMEM;
 		goto post_receive_control_exit;
 	}
+	rx_desc->type = ISCSI_RX;
+
+	/* for the login sequence we must support rx of upto 8K         *
+	 * FIXME need better preditace to test whether we are logged in */
+	if(!p_iser_conn->ff_mode_enabled)
+		rx_data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
+	else /* FIXME till user space sets conn->max_recv_dlength correctly */
+		rx_data_size = 1024;
 
-	/* DMA_MAP: safe to dma_map now - map and invalidate the cache */
-	iser_reg_single(p_iser_adaptor,p_regd_buf, DMA_FROM_DEVICE);
+	/* FIXME need to ensure this is HW cache start/end aligned      */
+	rx_desc->data = kmalloc(rx_data_size, GFP_KERNEL | __GFP_NOFAIL);
 
-	i = iser_dto_add_regd_buff(p_recv_dto, p_regd_buf,
-				   USE_NO_OFFSET,
-				   USE_ENTIRE_SIZE);
-	iser_dbg("Added header buffer 0x%p to DTO as entry: %d\n",
-		 p_regd_buf, i);
-
-	/* Create an iSER data buffer */
-	p_regd_buf = iser_regd_mem_alloc(p_iser_adaptor,
-					 p_iser_conn->postrecv_cache,
-					 p_iser_conn->postrecv_bsize);
-	if (p_regd_buf == NULL) {
-		iser_err("Failed to alloc regd buf (post-recv-buf data)\n");
+	if(rx_desc->data == NULL) {
+		iser_err("Failed to alloc data buf for post recv\n");
 		err = -ENOMEM;
 		goto post_receive_control_exit;
+
 	}
-	iser_dbg("Allocated iSER data buffer from postrecv_cache 0x%p\n",
-		 p_regd_buf->virt_addr);
 
-	/* DMA_MAP: safe to dma_map now - map and invalidate the cache */
-	iser_reg_single(p_iser_adaptor,p_regd_buf, DMA_FROM_DEVICE);
+	p_recv_dto = &rx_desc->dto;
+	p_recv_dto->p_conn          = p_iser_conn;
+	p_recv_dto->regd_vector_len = 0;
+
+	p_regd_hdr = &rx_desc->hdr_regd_buf;
+	memset(p_regd_hdr, 0, sizeof(struct iser_regd_buf));
+	p_regd_hdr->p_adaptor  = p_iser_adaptor;
+	p_regd_hdr->virt_addr  = rx_desc; /* == &rx_desc->iser_header */
+	p_regd_hdr->data_size  = ISER_TOTAL_HEADERS_LEN;
+
+	iser_reg_single(p_iser_adaptor, p_regd_hdr, DMA_FROM_DEVICE);
+
+	iser_dto_add_regd_buff(p_recv_dto, p_regd_hdr, USE_NO_OFFSET,
+			       USE_ENTIRE_SIZE);
+
+	p_regd_data = &rx_desc->data_regd_buf;
+	memset(p_regd_data, 0, sizeof(struct iser_regd_buf));
+	p_regd_data->p_adaptor  = p_iser_adaptor;
+	p_regd_data->virt_addr  = rx_desc->data;
+	p_regd_data->data_size  = rx_data_size;
+
+	iser_reg_single(p_iser_adaptor, p_regd_data, DMA_FROM_DEVICE);
 
-	i = iser_dto_add_regd_buff(p_recv_dto, p_regd_buf,
-				   USE_NO_OFFSET, USE_ENTIRE_SIZE);
-	iser_dbg("Added data buffer 0x%p to DTO as entry: %d\n",
-		 p_regd_buf, i);
+	iser_dto_add_regd_buff(p_recv_dto, p_regd_data,
+			       USE_NO_OFFSET, USE_ENTIRE_SIZE);
 
 	atomic_inc(&p_iser_conn->post_recv_buf_count);
-	err = iser_post_recv(p_recv_dto);
+	err = iser_post_recv(rx_desc);
 
 post_receive_control_exit:
-	if (err && p_recv_dto != NULL) {
+	if(err && rx_desc) {
 		iser_dto_free(p_recv_dto);
+		if(rx_desc->data != NULL)
+			kfree(rx_desc->data);
+		kmem_cache_free(ig.desc_cache, rx_desc);
 		atomic_dec(&p_iser_conn->post_recv_buf_count);
 	}
 	return err;
Index: ulp/iser/iser_mod.c
===================================================================
--- ulp/iser/iser_mod.c	(revision 5180)
+++ ulp/iser/iser_mod.c	(working copy)
@@ -69,21 +69,6 @@ struct iser_global ig;
 
 static void iser_global_release(void);
 
-static kmem_cache_t *iser_mem_cache_create(const char *cache_name,
-					   unsigned int obj_size)
-{
-	kmem_cache_t *p_cache;
-
-	p_cache = kmem_cache_create(cache_name, obj_size,
-				     0, SLAB_HWCACHE_ALIGN,
-				     NULL, NULL);
-	if (p_cache == NULL) {
-		iser_err("Failed to alloc cache: %s\n", cache_name);
-		iser_global_release();
-	}
-	return p_cache;
-}
-
 /**
  * init_module - module initialization function
  */
@@ -95,24 +80,11 @@ int init_module(void)
 
 	memset(&ig, 0, sizeof(struct iser_global));
 
-	ig.header_cache = iser_mem_cache_create("iser_headers",
-						ISER_TOTAL_HEADERS_LEN);
-	if (ig.header_cache == NULL)
-		return -ENOMEM;
-
-	ig.regd_buf_cache = iser_mem_cache_create("iser_regbuf",
-						  sizeof(struct iser_regd_buf));
-	if (ig.regd_buf_cache == NULL)
-		return -ENOMEM;
-
-	ig.login_cache = iser_mem_cache_create("iser_login",
-					       ISER_LOGIN_PHASE_PDU_DATA_LEN);
-	if (ig.login_cache == NULL)
-		return -ENOMEM;
-
-	ig.dto_cache = iser_mem_cache_create("iser_dto",
-					     sizeof(struct iser_dto));
-	if (ig.dto_cache == NULL)
+	ig.desc_cache = kmem_cache_create("iser_descriptors",
+					  sizeof (struct iser_desc),
+					  0, SLAB_HWCACHE_ALIGN,
+					  NULL, NULL);
+	if (ig.desc_cache == NULL)
 		return -ENOMEM;
 
 	/* adaptor init is called only after the first addr resolution */
@@ -135,6 +107,7 @@ int init_module(void)
  */
 static void iser_global_release(void)
 {
+	int err;
 	struct iser_adaptor *p_adaptor;
 
 	iscsi_iser_exit();
@@ -148,22 +121,13 @@ static void iser_global_release(void)
 		ig.num_adaptors--;
 	}
 
-	if (ig.dto_cache != NULL) {
-		kmem_cache_destroy(ig.dto_cache);
-		ig.dto_cache = NULL;
-	}
-	if (ig.login_cache != NULL) {
-		kmem_cache_destroy(ig.login_cache);
-		ig.login_cache = NULL;
-	}
-	if (ig.regd_buf_cache != NULL) {
-		kmem_cache_destroy(ig.regd_buf_cache);
-		ig.regd_buf_cache = NULL;
-	}
-	if (ig.header_cache != NULL) {
-		kmem_cache_destroy(ig.header_cache);
-		ig.header_cache = NULL;
+	if (ig.desc_cache != NULL) {
+		err = kmem_cache_destroy(ig.desc_cache);
+		if(err)
+			iser_err("kmem_cache_destory returned %d\n",err);
+		ig.desc_cache = NULL;
 	}
+
 	iser_unreg_sockets();
 }
 
Index: ulp/iser/iscsi_iser.h
===================================================================
--- ulp/iser/iscsi_iser.h	(revision 5180)
+++ ulp/iser/iscsi_iser.h	(working copy)
@@ -173,6 +173,29 @@ struct rdma_cm_id;
 struct ib_qp;
 struct iscsi_iser_cmd_task;
 
+
+struct iser_mem_reg {
+	u32 lkey;
+	u32 rkey;
+	u64 va;
+	u64 len;
+	void *mem_h;
+};
+
+struct iser_regd_buf {
+	struct iser_mem_reg reg; /* memory registration info */
+	kmem_cache_t *data_cache; /* data allocated from here, when set */
+	void *virt_addr;
+
+	struct iser_adaptor *p_adaptor;	   /* p_adaptor->device for dma_unmap */
+	dma_addr_t dma_addr;		   /* if non zero, addr for dma_unmap */
+	enum dma_data_direction direction; /* direction for dma_unmap	      */
+	unsigned int data_size;
+
+	/* Reference count, memory freed when decremented to 0 */
+	atomic_t ref_count;
+};
+
 #define MAX_REGD_BUF_VECTOR_LEN	2
 
 enum iser_dto_type {
@@ -186,7 +209,6 @@ enum iser_dto_type {
 struct iser_dto {
 	struct iscsi_iser_cmd_task *p_task;
 	struct iscsi_iser_conn     *p_conn;
-	enum iser_dto_type type;
 	int notify_enable;
 
 	/* vector of registered buffers */
@@ -198,8 +220,24 @@ struct iser_dto {
 	unsigned int used_sz[MAX_REGD_BUF_VECTOR_LEN];
 };
 
-enum iser_op_param_default {
-	defaultInitiatorRecvDataSegmentLength = 128,
+enum iser_desc_type {
+	ISCSI_RX,
+	ISCSI_TX_CONTROL ,
+	ISCSI_TX_SCSI_COMMAND,
+	ISCSI_TX_DATAOUT
+};
+
+struct iser_desc {
+	struct iser_hdr  iser_header;
+	struct iscsi_hdr iscsi_header;
+
+	struct iser_regd_buf  hdr_regd_buf;
+
+	void                 *data;          /* used by RX & TX_CONTROL types */
+	struct iser_regd_buf  data_regd_buf; /* used by RX & TX_CONTROL types */
+
+	enum   iser_desc_type type;
+	struct iser_dto       dto;
 };
 
 struct iser_conn
@@ -232,10 +270,6 @@ struct iscsi_iser_conn
 
 	struct list_head adaptor_list;	/* entry in the adaptor's conn list */
 
-	kmem_cache_t *postrecv_cache;
-	unsigned int postrecv_bsize;
-	char         postrecv_cn[32];
-
 	atomic_t     post_recv_buf_count;
 	atomic_t     post_send_buf_count;
 
@@ -280,14 +314,16 @@ struct iscsi_iser_queue {
 };
 
 struct iscsi_iser_mgmt_task {
-	struct iscsi_hdr	hdr;
+	struct iser_desc        desc;
+	struct iscsi_hdr	*hdr;           /* points to desc->iscsi_hdr */
 	uint32_t	itt;			/* this ITT */
-	char		*data;			/* mgmt payload */
+	char		*data;			/* mgmt payload, points to desc->data */
 	int			data_count;	/* counts data to be sent */
 };
 
 struct iscsi_iser_cmd_task {
-	struct iscsi_cmd	hdr;			/* iSCSI PDU header */
+	struct iser_desc        desc;
+	struct iscsi_cmd	*hdr;			/* iSCSI PDU header points to desc->iscsi_hdr */
         int			itt;			/* this ITT */
 	struct iscsi_iser_conn  *conn;
 	spinlock_t 		task_lock;
@@ -311,26 +347,15 @@ struct iscsi_iser_cmd_task {
 	int			data_offset;
 	struct iscsi_iser_mgmt_task	*mtask;			/* tmf mtask in progr */
 
-	struct list_head	dataqueue;		/* Data-Out dataqueue */
-	mempool_t		*datapool;
-
-	struct iscsi_iser_data_task   *dtask;		/* data task in progress*/
-
 	unsigned int post_send_count; /* posted send buffers pending completion */
 
 	int dir[ISER_DIRS_NUM];	/* set if direction used */
-	struct iser_regd_buf *rdma_regd[ISER_DIRS_NUM];	/* regd rdma buffer */
+	struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];	/* regd rdma buffer */
 	unsigned long data_len[ISER_DIRS_NUM];	/* total data length */
 	struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data descriptor */
 	struct iser_data_buf data_copy[ISER_DIRS_NUM]; /* contig. copy */
 };
 
-struct iscsi_iser_data_task {
-	struct iscsi_data	hdr;			/* PDU */
-	struct list_head	item;			/* data queue item */
-};
-#define ISCSI_DTASK_DEFAULT_MAX	ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE / 512
-
 struct iscsi_iser_session
 {
 	/* iSCSI session-wide sequencing */
@@ -372,9 +397,6 @@ struct iscsi_iser_session
 	int			erl;
 };
 
-/* Various size limits */
-#define ISER_LOGIN_PHASE_PDU_DATA_LEN	    (8*1024)	/* 8K */
-
 struct iser_page_vec {
 	u64 *pages;
 	int length;
@@ -382,28 +404,6 @@ struct iser_page_vec {
 	int data_size;
 };
 
-struct iser_mem_reg {
-	u32 lkey;
-	u32 rkey;
-	u64 va;
-	u64 len;
-	void *mem_h;
-};
-
-struct iser_regd_buf {
-	struct iser_mem_reg reg; /* memory registration info */
-	kmem_cache_t *data_cache; /* data allocated from here, when set */
-	void *virt_addr;
-
-	struct iser_adaptor *p_adaptor;	   /* p_adaptor->device for dma_unmap */
-	dma_addr_t dma_addr;		   /* if non zero, addr for dma_unmap */
-	enum dma_data_direction direction; /* direction for dma_unmap	      */
-	unsigned int data_size;
-
-	/* Reference count, memory freed when decremented to 0 */
-	atomic_t ref_count;
-};
-
 struct iser_adaptor {
 	struct list_head       ig_list; /* entry in ig adaptors list */
 
@@ -426,11 +426,7 @@ struct iser_global {
 	struct semaphore  adaptor_list_sem;  /*                   */
 	struct list_head  adaptor_list;	     /* all iSER adaptors */
 
-	kmem_cache_t *dto_cache;	/* slab for iser_dto */
-	kmem_cache_t *regd_buf_cache;	/* slab iser_regd_buf */
-
-	kmem_cache_t *login_cache;
-	kmem_cache_t *header_cache;
+	kmem_cache_t *desc_cache;
 };				/* iser_global */
 
 extern struct iser_global ig;
@@ -517,8 +513,6 @@ void iser_adaptor_add_conn(struct iser_a
 #define USE_SIZE(size)	    (size)
 #define USE_ENTIRE_SIZE	    0
 
-void iser_dto_init(struct iser_dto *p_dto);
-
 int iser_dto_add_regd_buff(struct iser_dto *p_dto,
 			   struct iser_regd_buf *p_regd_buf,
 			   unsigned long use_offset,
@@ -526,30 +520,23 @@ int iser_dto_add_regd_buff(struct iser_d
 
 void iser_dto_free(struct iser_dto *p_dto);
 
-int iser_dto_completion_error(struct iser_dto *p_dto);
-
-void iser_dto_get_rx_pdu_data(struct iser_dto *p_dto,
-			      unsigned long dto_xfer_len,
-			      struct iscsi_hdr **p_rx_hdr,
-			      char **rx_data, int *rx_data_size);
-
-struct iser_dto *iser_dto_send_create(struct iscsi_iser_conn *p_iser_conn,
-				      struct iscsi_hdr *p_hdr,
-				      unsigned char **p_header);
+int iser_dto_completion_error(struct iser_desc *p_desc);
 
+void iser_dto_send_create(struct iscsi_iser_conn *p_iser_conn,
+			  struct iser_desc       *tx_desc);
 
 
 /* iser_initiator.h */
 
-void iser_rcv_dto_completion(struct iser_dto *p_dto,
+void iser_rcv_completion(struct iser_desc *p_desc,
 			     unsigned long dto_xfer_len);
 
-void iser_snd_dto_completion(struct iser_dto *p_dto);
+void iser_snd_completion(struct iser_desc *p_desc);
 
 /* iser_memory.h */
 
 /* regd_buf */
-struct iser_regd_buf *iser_regd_buf_alloc(void);
+//struct iser_regd_buf *iser_regd_buf_alloc(void);
 
 struct iser_regd_buf *iser_regd_mem_alloc(struct iser_adaptor *p_iser_adaptor,
 					  kmem_cache_t *cache,
@@ -686,7 +673,7 @@ int iser_reg_phys_mem(struct iser_conn *
 
 void iser_unreg_mem(struct iser_mem_reg *mem_reg);
 
-int iser_post_recv(struct iser_dto *p_dto);
-int iser_start_send(struct iser_dto *p_dto);
+int iser_post_recv(struct iser_desc *p_rx_desc);
+int iser_start_send(struct iser_desc *p_tx_desc);
 
 #endif
Index: ulp/iser/iser_verbs.c
===================================================================
--- ulp/iser/iser_verbs.c	(revision 5180)
+++ ulp/iser/iser_verbs.c	(working copy)
@@ -567,12 +567,13 @@ static void iser_dto_to_iov(struct iser_
  *
  * returns 0 on success, -1 on failure
  */
-int iser_post_recv(struct iser_dto *p_recv_dto)
+int iser_post_recv(struct iser_desc *p_rx_desc)
 {
 	int		  ib_ret, ret_val = 0;
 	struct ib_recv_wr recv_wr, *recv_wr_failed;
 	struct ib_sge	  iov[2];
 	struct iscsi_iser_conn  *p_iser_conn;
+	struct iser_dto         *p_recv_dto = &p_rx_desc->dto;
 
 	/* Retrieve conn */
 	p_iser_conn = p_recv_dto->p_conn;
@@ -584,7 +585,7 @@ int iser_post_recv(struct iser_dto *p_re
 	recv_wr.next	= NULL;
 	recv_wr.sg_list = iov;
 	recv_wr.num_sge = p_recv_dto->regd_vector_len;
-	recv_wr.wr_id	= (unsigned long)p_recv_dto;
+	recv_wr.wr_id	= (unsigned long)p_rx_desc;
 
 	ib_ret	= ib_post_recv (p_iser_conn->ib_conn->qp, &recv_wr, &recv_wr_failed);
 
@@ -601,12 +602,13 @@ int iser_post_recv(struct iser_dto *p_re
  *
  * returns 0 on success, -1 on failure
  */
-int iser_start_send(struct iser_dto *p_dto)
+int iser_start_send(struct iser_desc *p_tx_desc)
 {
 	int		  ib_ret, ret_val = 0;
 	struct ib_send_wr send_wr, *send_wr_failed;
 	struct ib_sge	  iov[MAX_REGD_BUF_VECTOR_LEN];
 	struct iscsi_iser_conn  *p_iser_conn;
+	struct iser_dto         *p_dto = &p_tx_desc->dto;
 
 	if (p_dto == NULL)
 		iser_bug("NULL p_dto\n");
@@ -618,7 +620,7 @@ int iser_start_send(struct iser_dto *p_d
 	iser_dto_to_iov(p_dto, iov, MAX_REGD_BUF_VECTOR_LEN);
 
 	send_wr.next	   = NULL;
-	send_wr.wr_id	   = (unsigned long)p_dto;
+	send_wr.wr_id	   = (unsigned long)p_tx_desc;
 	send_wr.sg_list	   = iov;
 	send_wr.num_sge	   = p_dto->regd_vector_len;
 	send_wr.opcode	   = IB_WR_SEND;
@@ -640,13 +642,13 @@ int iser_start_send(struct iser_dto *p_d
 }
 
 static void iser_handle_comp_error(enum ib_wc_status status,
-				   struct iser_dto   *p_dto)
+				   struct iser_desc  *p_desc)
 {
 	int		    ret_val;
-	struct iscsi_iser_conn  *p_iser_conn = p_dto->p_conn;
+	struct iscsi_iser_conn  *p_iser_conn = p_desc->dto.p_conn;
 
 	if(p_iser_conn == NULL)
-		iser_bug("NULL p_dto->p_conn \n");
+		iser_bug("NULL p_desc->p_conn \n");
 
 	/* Since the cma doesn't notify us on CONNECTION_EVENT_BROKEN *
 	* we need to initiate a disconn				      */
@@ -664,7 +666,7 @@ static void iser_handle_comp_error(enum 
 		iser_dbg("Conn. 0x%p is being terminated asynchronously\n", p_iser_conn);
 	}
 	/* Handle completion Error */
-	ret_val = iser_dto_completion_error(p_dto);
+	ret_val = iser_dto_completion_error(p_desc);
 	if (ret_val && ret_val != -EAGAIN)
 		iser_err("Failed to handle ERROR DTO completion\n");
 }
@@ -674,24 +676,24 @@ void iser_cq_tasklet_fn(unsigned long da
 	 struct iser_adaptor	*p_iser_adaptor = (struct iser_adaptor *)data;
 	 struct ib_cq	        *cq = p_iser_adaptor->cq;
 	 struct ib_wc	    	wc;
-	 struct iser_dto	*p_dto;
+	 struct iser_desc	*p_desc;
 	 unsigned long	    	xfer_len;
 
 	while (ib_poll_cq(cq, 1, &wc) == 1) {
-		p_dto	 = (struct iser_dto *) (unsigned long) wc.wr_id;
+		p_desc	 = (struct iser_desc *) (unsigned long) wc.wr_id;
 
-		if (p_dto == NULL || p_dto->type >= ISER_DTO_PASSIVE)
-			iser_bug("NULL p_dto %p or unexpected type\n", p_dto);
+		if (p_desc == NULL)
+			iser_bug("NULL p_desc\n");
 
 		if (wc.status == IB_WC_SUCCESS) {
-			if (p_dto->type == ISER_DTO_RCV) {
+			if (p_desc->type == ISCSI_RX) {
 				xfer_len = (unsigned long)wc.byte_len;
-				iser_rcv_dto_completion(p_dto, xfer_len);
-			} else /* p_dto->type == ISER_DTO_SEND */
-				iser_snd_dto_completion(p_dto);
+				iser_rcv_completion(p_desc, xfer_len);
+			} else /* type == ISCSI_TX_CONTROL/SCSI_CMD/DOUT */
+				iser_snd_completion(p_desc);
 		} else
 			/* #warning "we better do a context jump here" */
-			iser_handle_comp_error(wc.status, p_dto);
+			iser_handle_comp_error(wc.status, p_desc);
 	}
 /* #warning "it is assumed here that arming CQ only once its empty would not"
  *           "cause interrupts to be missed"  */
Index: ulp/iser/iser_task.c
===================================================================
--- ulp/iser/iser_task.c	(revision 5180)
+++ ulp/iser/iser_task.c	(working copy)
@@ -42,17 +42,20 @@ void iser_task_init_lowpart(struct iscsi
 	spin_lock_init(&p_iser_task->task_lock);
 	p_iser_task->status = ISER_TASK_STATUS_INIT;
 	p_iser_task->post_send_count = 0;
-	
+
 	p_iser_task->dir[ISER_DIR_IN] = 0;
 	p_iser_task->dir[ISER_DIR_OUT] = 0;
-	
+
 	p_iser_task->data_len[ISER_DIR_IN] = 0;
 	p_iser_task->data_len[ISER_DIR_OUT] = 0;
-	
-	p_iser_task->rdma_regd[ISER_DIR_IN] = NULL;
-	p_iser_task->rdma_regd[ISER_DIR_OUT] = NULL;
+
+	memset(&p_iser_task->rdma_regd[ISER_DIR_IN], 0,
+	       sizeof(struct iser_regd_buf));
+	memset(&p_iser_task->rdma_regd[ISER_DIR_OUT], 0,
+	       sizeof(struct iser_regd_buf));
 }
 
+
 /**
  * iser_task_post_send_count_inc - Increments counter of
  * post-send buffers pending send completion
@@ -112,13 +115,14 @@ void iser_task_finalize_lowpart(struct i
 
 	spin_lock_bh(&p_iser_task->task_lock);
 	if (p_iser_task->dir[ISER_DIR_IN]) {
-		deferred = iser_regd_buff_release(p_iser_task->rdma_regd[ISER_DIR_IN]);
+		deferred = iser_regd_buff_release
+			(&p_iser_task->rdma_regd[ISER_DIR_IN]);
 		if (deferred)
 			iser_bug("References remain for BUF-IN rdma reg\n");
 	}
-	if (p_iser_task->dir[ISER_DIR_OUT] &&
-	    p_iser_task->rdma_regd[ISER_DIR_OUT] != NULL) {
-		deferred = iser_regd_buff_release(p_iser_task->rdma_regd[ISER_DIR_OUT]);
+	if (p_iser_task->dir[ISER_DIR_OUT]) {
+		deferred = iser_regd_buff_release
+			(&p_iser_task->rdma_regd[ISER_DIR_OUT]);
 		if (deferred)
 			iser_bug("References remain for BUF-OUT rdma reg\n");
 	}
Index: ulp/iser/iser_initiator.c
===================================================================
--- ulp/iser/iser_initiator.c	(revision 5180)
+++ ulp/iser/iser_initiator.c	(working copy)
@@ -65,10 +65,8 @@ static int iser_reg_rdma_mem(struct iscs
 	else
 		priv_flags |= IB_ACCESS_REMOTE_READ;
 
-	p_iser_task->rdma_regd[cmd_dir] = NULL;
-	p_regd_buf = iser_regd_buf_alloc();
-	if (p_regd_buf == NULL)
-		return -ENOMEM;
+
+	p_regd_buf = &p_iser_task->rdma_regd[cmd_dir];
 
 	iser_dbg("p_mem %p p_mem->type %d\n", p_mem,p_mem->type);
 
@@ -95,23 +93,20 @@ static int iser_reg_rdma_mem(struct iscs
 	}
 
 	page_vec = iser_page_vec_alloc(p_mem,0,cnt_to_reg);
-	if (page_vec == NULL) {
-		iser_regd_buff_release(p_regd_buf);
+	if (page_vec == NULL)
 		return -ENOMEM;
-	}
+
 	page_vec_len = iser_page_vec_build(p_mem, page_vec, 0, cnt_to_reg);
 	err = iser_reg_phys_mem(p_iser_conn, page_vec, priv_flags,
 				&p_regd_buf->reg);
 	iser_page_vec_free(page_vec);
 	if (err) {
 		iser_err("Failed to register %d page entries\n", page_vec_len);
-		iser_regd_buff_release(p_regd_buf);
 		return -EINVAL;
 	}
 	/* take a reference on this regd buf such that it will not be released *
 	 * (eg in send dto completion) before we get the scsi response         */
 	iser_regd_buff_ref(p_regd_buf);
-	p_iser_task->rdma_regd[cmd_dir] = p_regd_buf;
 	return 0;
 }
 
@@ -121,15 +116,15 @@ static int iser_reg_rdma_mem(struct iscs
  */
 static int iser_prepare_read_cmd(struct iscsi_iser_cmd_task *p_iser_task,
 				 struct iser_data_buf *buf_in,
-				 unsigned int edtl,
-				 unsigned char *p_iser_header)
+				 unsigned int edtl)
+
 {
 	struct iser_regd_buf *p_regd_buf;
 	int err;
 	dma_addr_t dma_addr;
 	int dma_nents;
 	struct device *dma_device;
-	struct iser_hdr *hdr = (struct iser_hdr *)p_iser_header;
+	struct iser_hdr *hdr = &p_iser_task->desc.iser_header;
 
 	p_iser_task->dir[ISER_DIR_IN] = 1;
 	dma_device = p_iser_task->conn->ib_conn->p_adaptor->device->dma_device;
@@ -171,7 +166,7 @@ static int iser_prepare_read_cmd(struct 
 		iser_err("Failed to set up Data-IN RDMA\n");
 		return err;
 	}
-	p_regd_buf = p_iser_task->rdma_regd[ISER_DIR_IN];
+	p_regd_buf = &p_iser_task->rdma_regd[ISER_DIR_IN];
 
 	hdr->flags    |= ISER_RSV;
 	hdr->read_stag = cpu_to_be32(p_regd_buf->reg.rkey);
@@ -193,16 +188,15 @@ iser_prepare_write_cmd(struct iscsi_iser
 		       struct iser_data_buf *buf_out,
 		       unsigned int imm_sz,
 		       unsigned int unsol_sz,
-		       struct iser_dto *p_send_dto,
-		       unsigned int edtl,
-		       unsigned char *p_iser_header)
+		       unsigned int edtl)
 {
 	struct iser_regd_buf *p_regd_buf;
 	int err;
 	dma_addr_t dma_addr;
 	int dma_nents;
 	struct device *dma_device;
-	struct iser_hdr *hdr = (struct iser_hdr *)p_iser_header;
+	struct iser_dto *p_send_dto = &p_iser_task->desc.dto;
+	struct iser_hdr *hdr = &p_iser_task->desc.iser_header;
 
 	p_iser_task->dir[ISER_DIR_OUT] = 1;
 	dma_device = p_iser_task->conn->ib_conn->p_adaptor->device->dma_device;
@@ -247,7 +241,7 @@ iser_prepare_write_cmd(struct iscsi_iser
 		return err;
 	}
 
-	p_regd_buf = p_iser_task->rdma_regd[ISER_DIR_OUT];
+	p_regd_buf = &p_iser_task->rdma_regd[ISER_DIR_OUT];
 
 	if(unsol_sz < edtl) {
 		hdr->flags     |= ISER_WSV;
@@ -279,14 +273,11 @@ int iser_send_command(struct iscsi_iser_
 		      struct iscsi_iser_cmd_task *p_ctask)
 {
 	struct iser_dto *p_send_dto = NULL;
-	unsigned int itt;
-	unsigned long data_seg_len;
 	unsigned long edtl;
-	unsigned char *p_iser_header;
 	int err = 0;
 	struct iser_data_buf data_buf;
 
-	struct iscsi_cmd *hdr = &p_ctask->hdr;
+	struct iscsi_cmd *hdr =  p_ctask->hdr;
 	struct scsi_cmnd *sc  =  p_ctask->sc;
 
 	if (atomic_read(&p_iser_conn->ib_conn->state) != ISER_CONN_UP) {
@@ -294,22 +285,16 @@ int iser_send_command(struct iscsi_iser_
 		return -EPERM;
 	}
 
-	itt = ntohl(hdr->itt);
-	data_seg_len = ntoh24(hdr->dlength);
 	edtl = ntohl(hdr->data_length);
 
 	/* MERGE_CHANGE - temporal move it up */
 	iser_task_init_lowpart(p_ctask);
 
-	/* Allocate send DTO descriptor, headers buf and add it to the DTO */
-	p_send_dto = iser_dto_send_create(p_iser_conn, (struct iscsi_hdr *)hdr,
-					  &p_iser_header);
-	if (p_send_dto == NULL) {
-		iser_err("Failed to create send DTO, conn:0x%p\n", p_iser_conn);
-		err = -ENOMEM;
-		goto send_command_error;
-	}
+	/* build the tx desc regd header and add it to the tx desc dto */
+	p_ctask->desc.type = ISCSI_TX_SCSI_COMMAND;
+	p_send_dto = &p_ctask->desc.dto;
 	p_send_dto->p_task = p_ctask;
+	iser_dto_send_create(p_iser_conn, &p_ctask->desc);
 
 	if (sc->use_sg) { /* using a scatter list */
 		data_buf.p_buf = sc->request_buffer;
@@ -322,8 +307,7 @@ int iser_send_command(struct iscsi_iser_
 	}
 
 	if (hdr->flags & ISCSI_FLAG_CMD_READ) {
-		err = iser_prepare_read_cmd(p_ctask, &data_buf,
-					    edtl, p_iser_header);
+		err = iser_prepare_read_cmd(p_ctask, &data_buf, edtl);
 		if (err) goto send_command_error;
 	}
 	if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
@@ -331,7 +315,7 @@ int iser_send_command(struct iscsi_iser_
 					     p_ctask->imm_count,
 				             p_ctask->imm_count +
 					     p_ctask->unsol_count,
-					     p_send_dto, edtl, p_iser_header);
+					     edtl);
 		if (err) goto send_command_error;
 	}
 
@@ -348,7 +332,7 @@ int iser_send_command(struct iscsi_iser_
 	iser_task_set_status(p_ctask,ISER_TASK_STATUS_STARTED);
 	iser_task_post_send_count_inc(p_ctask);
 
-	err = iser_start_send(p_send_dto);
+	err = iser_start_send(&p_ctask->desc);
 	if (err) {
 		iser_task_post_send_count_dec_and_test(p_ctask);
 		goto send_command_error;
@@ -376,6 +360,7 @@ int iser_send_data_out(struct iscsi_iser
 		       struct iscsi_iser_cmd_task *p_ctask,
 		       struct iscsi_data *hdr)
 {
+	struct iser_desc *tx_desc = NULL;
 	struct iser_dto *p_send_dto = NULL;
 	unsigned long buf_offset;
 	unsigned long data_seg_len;
@@ -394,24 +379,28 @@ int iser_send_data_out(struct iscsi_iser
 	iser_dbg("%s itt %d dseg_len %d offset %d\n",
 		 __func__,(int)itt,(int)data_seg_len,(int)buf_offset);
 
-	/* Allocate send DTO descriptor, headers buf and add it to the DTO */
-	p_send_dto = iser_dto_send_create(p_iser_conn,
-					  (struct iscsi_hdr *)hdr, NULL);
-	if (p_send_dto == NULL) {
-		iser_err("Failed to create send DTO, conn:0x%p\n", p_iser_conn);
+	tx_desc = kmem_cache_alloc(ig.desc_cache, GFP_KERNEL | __GFP_NOFAIL);
+	if(tx_desc == NULL) {
+		iser_err("Failed to alloc desc for post dataout\n");
 		err = -ENOMEM;
 		goto send_data_out_error;
 	}
 
+	tx_desc->type = ISCSI_TX_DATAOUT;
+	memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
+
+	/* build the tx desc regd header and add it to the tx desc dto */
+	p_send_dto = &tx_desc->dto;
+	p_send_dto->p_task = p_ctask;
+	iser_dto_send_create(p_iser_conn, tx_desc);
+
 	/* DMA_MAP: safe to dma_map now - map and flush the cache */
 	iser_reg_single(p_iser_conn->ib_conn->p_adaptor,
 			p_send_dto->regd[0], DMA_TO_DEVICE);
 
-	p_send_dto->p_task = p_ctask;
-
 	/* all data was registered for RDMA, we can use the lkey */
 	iser_dto_add_regd_buff(p_send_dto,
-			       p_ctask->rdma_regd[ISER_DIR_OUT],
+			       &p_ctask->rdma_regd[ISER_DIR_OUT],
 			       USE_OFFSET(buf_offset),
 			       USE_SIZE(data_seg_len));
 
@@ -428,7 +417,7 @@ int iser_send_data_out(struct iscsi_iser
 
 	iser_task_post_send_count_inc(p_ctask);
 
-	err = iser_start_send(p_send_dto);
+	err = iser_start_send(tx_desc);
 	if (err) {
 		iser_task_post_send_count_dec_and_test(p_ctask);
 		goto send_data_out_error;
@@ -439,6 +428,9 @@ int iser_send_data_out(struct iscsi_iser
 send_data_out_error:
 	if (p_send_dto != NULL)
 		iser_dto_free(p_send_dto);
+	if (tx_desc != NULL)
+		kmem_cache_free(ig.desc_cache, tx_desc);
+
 	if (p_iser_conn != NULL) {
 		/* drop the conn, open tasks are deleted during shutdown */
 		iser_err("send dout failed, drop conn:0x%p\n", p_iser_conn);
@@ -463,20 +455,19 @@ int iser_send_control(struct iscsi_iser_
 		return -EPERM;
 	}
 
-	/* Allocate send DTO descriptor, headers buf and add it to the DTO */
-	p_send_dto = iser_dto_send_create(p_iser_conn, &p_mtask->hdr, NULL);
-	if (p_send_dto == NULL) {
-		iser_err("Failed to create send DTO, conn: 0x%p\n",p_iser_conn);
-		err = -ENOMEM;
-		goto send_control_error;
-	}
+	/* build the tx desc regd header and add it to the tx desc dto */
+	p_mtask->desc.type = ISCSI_TX_CONTROL;
+	p_send_dto = &p_mtask->desc.dto;
+	p_send_dto->p_task = NULL;
+	iser_dto_send_create(p_iser_conn, &p_mtask->desc);
+
 	p_iser_adaptor = p_iser_conn->ib_conn->p_adaptor;
 
 	/* DMA_MAP: safe to dma_map now - map and flush the cache */
 	iser_reg_single(p_iser_adaptor, p_send_dto->regd[0], DMA_TO_DEVICE);
 
-	itt = ntohl(p_mtask->hdr.itt);
-	opcode = p_mtask->hdr.opcode & ISCSI_OPCODE_MASK;
+	itt = ntohl(p_mtask->hdr->itt);
+	opcode = p_mtask->hdr->opcode & ISCSI_OPCODE_MASK;
 
 	/* no need to copy when there's data b/c the mtask is not reallocated *
 	 * till the response related to this ITT is received	              */
@@ -488,14 +479,10 @@ int iser_send_control(struct iscsi_iser_
 	case ISCSI_OP_LOGIN:
 	case ISCSI_OP_TEXT:
 	case ISCSI_OP_LOGOUT:
-		data_seg_len = ntoh24(p_mtask->hdr.dlength);
+		data_seg_len = ntoh24(p_mtask->hdr->dlength);
 		if (data_seg_len > 0) {
-			p_regd_buf = iser_regd_buf_alloc();
-			if (p_regd_buf == NULL) {
-				iser_err("Failed to alloc regd buffer\n");
-				err = -ENOMEM;
-				goto send_control_error;
-			}
+			p_regd_buf = &p_mtask->desc.data_regd_buf;
+			memset(p_regd_buf, 0, sizeof(struct iser_regd_buf));
 			p_regd_buf->p_adaptor = p_iser_adaptor;
 			p_regd_buf->virt_addr = p_mtask->data;
 			p_regd_buf->data_size = p_mtask->data_count;
@@ -520,7 +507,7 @@ int iser_send_control(struct iscsi_iser_
 		goto send_control_error;
 	}
 
-	err = iser_start_send(p_send_dto);
+	err = iser_start_send(&p_mtask->desc);
 	if (err) goto send_control_error;
 	return 0;
 
@@ -538,21 +525,30 @@ send_control_error:
 /**
  * iser_rcv_dto_completion - recv DTO completion
  */
-void iser_rcv_dto_completion(struct iser_dto *p_dto,
-			     unsigned long dto_xfer_len)
+void iser_rcv_completion(struct iser_desc *p_rx_desc,
+			 unsigned long dto_xfer_len)
 {
 	struct iscsi_iser_session *p_session;
+	struct iser_dto        *p_dto = &p_rx_desc->dto;
 	struct iscsi_iser_conn *p_iser_conn = p_dto->p_conn;
 	struct iscsi_iser_cmd_task *p_iser_task = NULL;
 	struct iscsi_hdr *p_hdr;
-	char   *rx_data;
+	char   *rx_data = NULL;
 	int     rc, rx_data_size = 0;
 	unsigned int itt;
 	unsigned char opcode;
 	int no_more_task_sends = 0;
 
-	iser_dto_get_rx_pdu_data(p_dto, dto_xfer_len,
-				 &p_hdr, &rx_data, &rx_data_size);
+	p_hdr = &p_rx_desc->iscsi_header;
+
+	iser_dbg("op 0x%x itt 0x%x\n", p_hdr->opcode,p_hdr->itt);
+
+	if (dto_xfer_len > ISER_TOTAL_HEADERS_LEN) { /* we have data */
+		rx_data_size = dto_xfer_len - ISER_TOTAL_HEADERS_LEN;
+		rx_data      = p_dto->regd[1]->virt_addr;
+		rx_data     += p_dto->offset[1];
+	}
+
 	opcode = p_hdr->opcode & ISCSI_OPCODE_MASK;
 
 	/* FIXME - "task" handles for non cmds */
@@ -607,6 +603,8 @@ void iser_rcv_dto_completion(struct iser
 	}
 
 	iser_dto_free(p_dto);
+	kfree(p_rx_desc->data);
+	kmem_cache_free(ig.desc_cache, p_rx_desc);
 
 	/* decrementing conn->post_recv_buf_count only --after-- freeing the   *
 	 * task eliminates the need to worry on tasks which are completed in   *
@@ -615,22 +613,24 @@ void iser_rcv_dto_completion(struct iser
 	atomic_dec(&p_iser_conn->post_recv_buf_count);
 }
 
-void iser_snd_dto_completion(struct iser_dto *p_dto)
+void iser_snd_completion(struct iser_desc *p_tx_desc)
 {
+	struct iser_dto        *p_dto = &p_tx_desc->dto;
 	struct iscsi_iser_conn *p_iser_conn = p_dto->p_conn;
-	struct iscsi_iser_cmd_task *p_iser_task = NULL;
 
 	iser_dbg("Initiator, Data sent p_dto=0x%p\n", p_dto);
 
-	p_iser_task = p_dto->p_task;
-
 	iser_dto_free(p_dto);
+
+	if(p_tx_desc->type == ISCSI_TX_DATAOUT)
+		kmem_cache_free(ig.desc_cache, p_tx_desc);
+
 	atomic_dec(&p_iser_conn->post_send_buf_count);
 
 	/* if the last sent PDU of the task, task can be freed */
-	if (p_iser_task != NULL &&
-	    iser_task_post_send_count_dec_and_test(p_iser_task))
-		iser_task_finalize_lowpart(p_iser_task);
+	if (p_dto->p_task != NULL &&
+	    iser_task_post_send_count_dec_and_test(p_dto->p_task))
+		iser_task_finalize_lowpart(p_dto->p_task);
 }
 
 static void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *p_iser_task)
Index: ulp/iser/iser_dto.c
===================================================================
--- ulp/iser/iser_dto.c	(revision 5180)
+++ ulp/iser/iser_dto.c	(working copy)
@@ -39,15 +39,6 @@
 
 #include "iscsi_iser.h"
 
-void iser_dto_init(struct iser_dto *p_dto)
-{
-	p_dto->p_task = NULL;
-	p_dto->p_conn = NULL;
-	p_dto->type = ISER_DTO_PASSIVE;
-	p_dto->notify_enable = 0;
-	p_dto->regd_vector_len = 0;
-}
-
 /**
  * iser_dto_add_regd_buff - Increments the reference count for the registered
  *	buffer & adds it to the DTO object
@@ -94,14 +85,6 @@ void iser_dto_buffs_release(struct iser_
 void iser_dto_free(struct iser_dto *p_dto)
 {
 	iser_dto_buffs_release(p_dto);
-
-	if (p_dto->type == ISER_DTO_RCV || p_dto->type == ISER_DTO_SEND) {
-		iser_dbg("Release %s dto desc.: 0x%p\n",
-			 p_dto->type == ISER_DTO_RCV ? "RECV" : "SEND",
-			 p_dto);
-		kmem_cache_free(ig.dto_cache, p_dto);
-	} else
-		iser_bug("Unexpected type:%d, dto:0x%p\n",p_dto->type,p_dto);
 }
 
 /**
@@ -109,11 +92,11 @@ void iser_dto_free(struct iser_dto *p_dt
  *
  * returns 0 on success, -1 on failure
  */
-int iser_dto_completion_error(struct iser_dto *p_dto)
+int iser_dto_completion_error(struct iser_desc *p_desc)
 {
 	struct iscsi_iser_conn *p_iser_conn;
 	int err;
-	enum iser_dto_type dto_type = p_dto->type;
+	struct iser_dto   *p_dto    = &p_desc->dto;
 
 	p_iser_conn = p_dto->p_conn;
 	if (p_iser_conn == NULL)
@@ -121,12 +104,16 @@ int iser_dto_completion_error(struct ise
 
 	iser_dto_free(p_dto);
 
-	if (dto_type == ISER_DTO_RCV)
+	if(p_desc->type == ISCSI_RX) {
+		kfree(p_desc->data);
+		kmem_cache_free(ig.desc_cache, p_desc);
 		atomic_dec(&p_iser_conn->post_recv_buf_count);
-	else if (dto_type == ISER_DTO_SEND)
+	}
+	else { /* type is TX control/command/dataout */
+		if(p_desc->type == ISCSI_TX_DATAOUT)
+			kmem_cache_free(ig.desc_cache, p_desc);
 		atomic_dec(&p_iser_conn->post_send_buf_count);
-	else
-		iser_bug("Unknown DTO type:%d\n", p_dto->type);
+	}
 
 	err = iser_complete_conn_termination(p_iser_conn);
 
@@ -135,76 +122,30 @@ int iser_dto_completion_error(struct ise
 
 /* iser_dto_get_rx_pdu_data - gets received PDU descriptor & data from rx DTO */
 
-void iser_dto_get_rx_pdu_data(struct iser_dto *p_dto, unsigned long dto_xfer_len,
-			      struct iscsi_hdr **p_hdr,
-			      char **rx_data, int *rx_data_size)
-{
-	unsigned char *p_recv_buf;
-
-	if (dto_xfer_len < ISER_TOTAL_HEADERS_LEN)
-		iser_bug("Recvd data size:%ld less than iSER headers\n",
-			 dto_xfer_len);
-	if (p_dto->regd_vector_len != 2)
-		iser_bug("Recvd data IOV len:%d != 2\n",
-			 p_dto->regd_vector_len);
-	/* Get the header memory */
-	p_recv_buf = (unsigned char *)p_dto->regd[0]->virt_addr;
-	p_recv_buf += p_dto->offset[0];
-	/* Skip the iSER header to get the iSCSI PDU BHS */
-	*p_hdr = (struct iscsi_hdr *)(p_recv_buf + ISER_HDR_LEN);
-
-	if (dto_xfer_len > ISER_TOTAL_HEADERS_LEN) { /* we have data */
-		*rx_data  = p_dto->regd[1]->virt_addr;
-		*rx_data += p_dto->offset[1];
-		*rx_data_size = dto_xfer_len - ISER_TOTAL_HEADERS_LEN;
-	}
-}
-
 /**
  * Creates a new send DTO descriptor,
  * adds header regd buffer
  *
  */
-struct iser_dto *iser_dto_send_create(struct iscsi_iser_conn *p_iser_conn,
-				      struct iscsi_hdr *hdr,
-				      unsigned char **p_header)
+void iser_dto_send_create(struct iscsi_iser_conn *p_iser_conn,
+			  struct iser_desc       *tx_desc)
 {
-	struct iser_regd_buf *p_regd_hdr = NULL;
-	struct iser_dto *p_send_dto = NULL;
-	unsigned char *p_iser_header = NULL;
-
-	p_send_dto = kmem_cache_alloc(ig.dto_cache,GFP_KERNEL | __GFP_NOFAIL);
-	if (p_send_dto == NULL) {
-		iser_err("allocation of send DTO descriptor failed\n");
-		goto dto_send_create_exit;
-	}
-	/* setup send dto */
-	iser_dto_init(p_send_dto);
-	p_send_dto->p_conn = p_iser_conn;
-	p_send_dto->type = ISER_DTO_SEND;
-	p_send_dto->notify_enable = 1;
-
-	p_regd_hdr = iser_regd_mem_alloc(p_iser_conn->ib_conn->p_adaptor,
-					 ig.header_cache,
-					 ISER_TOTAL_HEADERS_LEN);
-	if (p_regd_hdr == NULL) {
-		iser_err("failed to allocate regd header\n");
-		kmem_cache_free(ig.dto_cache, p_send_dto);
-		p_send_dto = NULL;
-		goto dto_send_create_exit;
-	}
+	struct iser_regd_buf *p_regd_hdr = &tx_desc->hdr_regd_buf;
+	struct iser_dto      *p_send_dto = &tx_desc->dto;
+
+	memset(p_regd_hdr, 0, sizeof(struct iser_regd_buf));
+	p_regd_hdr->p_adaptor  = p_iser_conn->ib_conn->p_adaptor;
+	p_regd_hdr->virt_addr  = tx_desc; /* == &tx_desc->iser_header */
+	p_regd_hdr->data_size  = ISER_TOTAL_HEADERS_LEN;
+
+	p_send_dto->p_conn          = p_iser_conn;
+	p_send_dto->notify_enable   = 1;
+	p_send_dto->regd_vector_len = 0;
+
+	memset(&tx_desc->iser_header, 0, ISER_HDR_LEN);
+	tx_desc->iser_header.flags = ISER_VER;
 
-	/* setup iSER Header */
-	p_iser_header = (unsigned char *)p_regd_hdr->virt_addr;
-	memset(p_iser_header, 0, ISER_HDR_LEN);
-
-	((struct iser_hdr *)p_iser_header)->flags = ISER_VER;
-
-	memcpy(p_iser_header + ISER_HDR_LEN, hdr, ISER_PDU_BHS_LENGTH);
-	iser_dto_add_regd_buff(p_send_dto, p_regd_hdr, USE_NO_OFFSET,
-			       USE_SIZE(ISER_TOTAL_HEADERS_LEN));
- dto_send_create_exit:
-	if (p_header != NULL) *p_header = p_iser_header;
-	return p_send_dto;
+	iser_dto_add_regd_buff(p_send_dto, p_regd_hdr,
+			       USE_NO_OFFSET, USE_ENTIRE_SIZE);
 }
 
Index: ulp/iser/iser_memory.c
===================================================================
--- ulp/iser/iser_memory.c	(revision 5180)
+++ ulp/iser/iser_memory.c	(working copy)
@@ -51,54 +51,6 @@ iser_page_to_virt(struct page *page)
 }
 
 /**
- * iser_regd_buf_alloc - allocates a blank registered buffer descriptor
- *
- * returns the registered buffer descriptor
- */
-struct iser_regd_buf *iser_regd_buf_alloc(void)
-{
-	struct iser_regd_buf *p_regd_buf;
-
-	p_regd_buf = (struct iser_regd_buf *)kmem_cache_alloc(
-						ig.regd_buf_cache,
-						GFP_KERNEL | __GFP_NOFAIL);
-	if (p_regd_buf != NULL)
-		memset(p_regd_buf, 0, sizeof(struct iser_regd_buf));
-
-	return p_regd_buf;
-}
-
-/**
- * iser_regd_mem_alloc - allocates memory and creates a registered buffer
- *
- * returns the registered buffer
- */
-struct iser_regd_buf *iser_regd_mem_alloc(struct iser_adaptor *p_iser_adaptor,
-					  kmem_cache_t *cache,
-					  int data_size)
-{
-	struct iser_regd_buf *p_regd_buf;
-	void *data;
-
-	p_regd_buf = iser_regd_buf_alloc();
-	if (p_regd_buf != NULL) {
-		data = (void *) kmem_cache_alloc(cache,
-						 GFP_KERNEL | __GFP_NOFAIL);
-		if (data == NULL) {
-			kmem_cache_free(ig.regd_buf_cache, p_regd_buf);
-			return NULL;
-		}
-		p_regd_buf->data_cache = cache;
-		p_regd_buf->p_adaptor  = p_iser_adaptor;
-		p_regd_buf->virt_addr  = data;
-		p_regd_buf->data_size  = data_size;
-		/* not here as it is not safe (the data might be touched later */
-		/* iser_reg_single(p_iser_adaptor, p_regd_buf, data, data_size, dir); */
-	}
-	return p_regd_buf;
-}
-
-/**
  * iser_regd_buff_ref - Increments the reference count of a
  *	registered buffer
  *
@@ -160,14 +112,6 @@ int iser_regd_buff_release(struct iser_r
 				  p_regd_buf->direction);
 		/* else this regd buf is associated with task which we */
 		/* dma_unmap_single/sg later */
-
-		if (p_regd_buf->data_cache != NULL) {
-			iser_dbg("releasing regd_buf data=0x%p (count = 0)\n",
-				 p_regd_buf->virt_addr);
-			kmem_cache_free(p_regd_buf->data_cache,
-					p_regd_buf->virt_addr);
-		}
-		kmem_cache_free(ig.regd_buf_cache, p_regd_buf);
 		return 0;
 	} else {
 		iser_dbg("Release deferred, regd.buff: 0x%p\n", p_regd_buf);
@@ -197,8 +141,6 @@ void iser_reg_single(struct iser_adaptor
 	p_regd_buf->reg.va   = dma_addr;
 
 	p_regd_buf->dma_addr  = dma_addr;
-	/* p_regd_buf->virt_addr = virt_addr; */
-	/* p_regd_buf->data_size = data_size; */
 	p_regd_buf->direction = direction;
 }
 
Index: ulp/iser/iscsi_iser.c
===================================================================
--- ulp/iser/iscsi_iser.c	(revision 5180)
+++ ulp/iser/iscsi_iser.c	(working copy)
@@ -78,8 +78,6 @@
 static unsigned int iscsi_max_lun = 512;
 module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
 
-static kmem_cache_t            *task_mem_cache;
-
 /**
  * iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
  *
@@ -92,16 +90,17 @@ static void iscsi_iser_cmd_init(struct i
 
 	ctask->sc = sc;
 	ctask->conn = conn;
-	ctask->hdr.opcode = ISCSI_OP_SCSI_CMD;
-	ctask->hdr.flags = ISCSI_ATTR_SIMPLE;
-	ctask->hdr.lun[1] = sc->device->lun;
-	ctask->hdr.itt = ctask->itt | (conn->id << CID_SHIFT) |
+
+	ctask->hdr->opcode = ISCSI_OP_SCSI_CMD;
+	ctask->hdr->flags = ISCSI_ATTR_SIMPLE;
+	ctask->hdr->lun[1] = sc->device->lun;
+	ctask->hdr->itt = ctask->itt | (conn->id << CID_SHIFT) |
 			 (session->age << AGE_SHIFT);
-	ctask->hdr.data_length = cpu_to_be32(sc->request_bufflen);
-	ctask->hdr.cmdsn = cpu_to_be32(session->cmdsn); session->cmdsn++;
-	ctask->hdr.exp_statsn = cpu_to_be32(conn->exp_statsn);
-	memcpy(ctask->hdr.cdb, sc->cmnd, sc->cmd_len);
-	memset(&ctask->hdr.cdb[sc->cmd_len], 0,
+	ctask->hdr->data_length = cpu_to_be32(sc->request_bufflen);
+	ctask->hdr->cmdsn = cpu_to_be32(session->cmdsn); session->cmdsn++;
+	ctask->hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+	memcpy(ctask->hdr->cdb, sc->cmnd, sc->cmd_len);
+	memset(&ctask->hdr->cdb[sc->cmd_len], 0,
 	       MAX_COMMAND_SIZE - sc->cmd_len);
 
 	ctask->mtask = NULL;
@@ -111,7 +110,7 @@ static void iscsi_iser_cmd_init(struct i
 	ctask->total_length = sc->request_bufflen;
 
 	if (sc->sc_data_direction == DMA_TO_DEVICE) {
-		ctask->hdr.flags |= ISCSI_FLAG_CMD_WRITE;
+		ctask->hdr->flags |= ISCSI_FLAG_CMD_WRITE;
 		BUG_ON(ctask->total_length == 0);
 
 		/* unsolicited bytes to be sent as imm. data - with cmd pdu */
@@ -127,16 +126,16 @@ static void iscsi_iser_cmd_init(struct i
 			else
 				ctask->imm_count = min(ctask->total_length,
 						       conn->max_xmit_dlength);
-			hton24(ctask->hdr.dlength, ctask->imm_count);
+			hton24(ctask->hdr->dlength, ctask->imm_count);
 		} else
-			zero_data(ctask->hdr.dlength);
+			zero_data(ctask->hdr->dlength);
 
 		if (!session->initial_r2t_en)
 			ctask->unsol_count = min(session->first_burst,
 				ctask->total_length) - ctask->imm_count;
 		if (!ctask->unsol_count)
 			/* No unsolicit Data-Out's */
-			ctask->hdr.flags |= ISCSI_FLAG_CMD_FINAL;
+			ctask->hdr->flags |= ISCSI_FLAG_CMD_FINAL;
 		/*else
 			ctask->xmstate |= XMSTATE_UNS_HDR | XMSTATE_UNS_INIT;*/
 
@@ -150,11 +149,12 @@ static void iscsi_iser_cmd_init(struct i
 			   ctask->itt, ctask->total_length, ctask->imm_count,
 			   ctask->unsol_count, ctask->rdma_data_count);
 	} else {
-		ctask->hdr.flags |= ISCSI_FLAG_CMD_FINAL;
+		ctask->hdr->flags |= ISCSI_FLAG_CMD_FINAL;
 		if (sc->sc_data_direction == DMA_FROM_DEVICE)
-			ctask->hdr.flags |= ISCSI_FLAG_CMD_READ;
+			ctask->hdr->flags |= ISCSI_FLAG_CMD_READ;
 		ctask->datasn = 0;
-		zero_data(ctask->hdr.dlength);
+		zero_data(ctask->hdr->dlength);
+		ctask->rdma_data_count = ctask->total_length;
 	}
 }
 
@@ -207,24 +207,17 @@ iscsi_iser_conn_failure(struct iscsi_ise
 }
 
 static void iscsi_iser_unsolicit_data_init(struct iscsi_iser_conn *conn,
-					   struct iscsi_iser_cmd_task *ctask)
+					   struct iscsi_iser_cmd_task *ctask,
+					   struct iscsi_data  *hdr)
 {
-	struct iscsi_data *hdr;
-	struct iscsi_iser_data_task *dtask;
-
-	dtask = mempool_alloc(ctask->datapool, GFP_ATOMIC);
-
-	BUG_ON(!dtask);
-	hdr = &dtask->hdr;
-
 	memset(hdr, 0, sizeof(struct iscsi_data));
 	hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
 	hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
 	ctask->unsol_datasn++;
 	hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
-	memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
+	memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
 
-	hdr->itt = ctask->hdr.itt;
+	hdr->itt = ctask->hdr->itt;
 	hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
 
 	hdr->offset = cpu_to_be32(ctask->total_length -
@@ -240,31 +233,25 @@ static void iscsi_iser_unsolicit_data_in
 		ctask->data_count = ctask->unsol_count;
 		hdr->flags = ISCSI_FLAG_CMD_FINAL;
 	}
-
-	list_add(&dtask->item, &ctask->dataqueue);
-
-	ctask->dtask = dtask;
 }
 
 static int iscsi_iser_ctask_xmit_unsol_data(struct iscsi_iser_conn *conn,
 					    struct iscsi_iser_cmd_task *ctask)
 {
-	struct iscsi_iser_data_task *dtask = NULL;
+	struct iscsi_data  hdr;
 	int error = 0;
 
 	debug_iser("%s: enter\n", __FUNCTION__);
 	/* Send data-out PDUs while there's still unsolicited data to send */
 	while (ctask->unsol_count > 0) {
-		iscsi_iser_unsolicit_data_init(conn, ctask);
-
-		dtask = ctask->dtask;
+		iscsi_iser_unsolicit_data_init(conn, ctask, &hdr);
 
 		debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
-			   dtask->hdr.itt, ctask->data_count);
+			   hdr.itt, ctask->data_count);
 
 		/* the buffer description has been passed with the command */
 		/* Send the command */
-		error = iser_send_data_out(conn, ctask, &dtask->hdr);
+		error = iser_send_data_out(conn, ctask, &hdr);
 		if (error) {
 			printk(KERN_ERR "send_data_out failed\n");
 			goto iscsi_iser_ctask_xmit_unsol_data_exit;
@@ -365,7 +352,7 @@ static int iscsi_iser_data_xmit(struct i
 			if (iscsi_iser_mtask_xmit(conn, conn->mtask))
 				goto iscsi_iser_data_xmit_fail;
 
-			if (conn->mtask->hdr.itt ==
+			if (conn->mtask->hdr->itt ==
 			    cpu_to_be32(ISCSI_RESERVED_TAG)) {
 				spin_lock_bh(&session->lock);
 				__kfifo_put(session->mgmtpool.queue,
@@ -396,7 +383,7 @@ static int iscsi_iser_data_xmit(struct i
 			if (iscsi_iser_mtask_xmit(conn, conn->mtask))
 				goto iscsi_iser_data_xmit_fail;
 
-			if (conn->mtask->hdr.itt ==
+			if (conn->mtask->hdr->itt ==
 			    cpu_to_be32(ISCSI_RESERVED_TAG)) {
 				spin_lock_bh(&session->lock);
 				__kfifo_put(session->mgmtpool.queue,
@@ -566,7 +553,7 @@ static int iscsi_iser_conn_send_generic(
 
 	nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
 
-	memcpy(&mtask->hdr, hdr, sizeof(struct iscsi_hdr));
+	memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
 
 	spin_unlock_bh(&session->lock);
 
@@ -642,14 +629,6 @@ static inline void iscsi_iser_ctask_clea
 		spin_unlock(&session->lock);
 		return;
 	}
-	if (sc->sc_data_direction == DMA_TO_DEVICE) {
-		struct iscsi_iser_data_task *dtask, *n;
-		list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) {
-			list_del(&dtask->item);
-			mempool_free(dtask, ctask->datapool);
-		}
-	}
-
 	ctask->sc = NULL;
 	__kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
 	spin_unlock(&session->lock);
@@ -720,9 +699,12 @@ static int iscsi_iser_eh_abort(struct sc
 		hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
 		hdr->flags = ISCSI_TM_FUNC_ABORT_TASK;
 		hdr->flags |= ISCSI_FLAG_CMD_FINAL;
-		memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
-		hdr->rtt = ctask->hdr.itt;
-		hdr->refcmdsn = ctask->hdr.cmdsn;
+		memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
+		hdr->rtt = ctask->hdr->itt;
+		hdr->refcmdsn = ctask->hdr->cmdsn;
+
+		iser_err("op 0x%x aborting rtt 0x%x itt 0x%x dlength %d]\n",
+		         hdr->opcode, hdr->rtt, hdr->itt, ntoh24(hdr->dlength));
 
 		debug_iser("%s: calling iscsi_iser_conn_send_generic (task mgmt)\n", __FUNCTION__);
 		rc = iscsi_iser_conn_send_generic(iscsi_handle(conn), (struct iscsi_hdr *)hdr,
@@ -953,39 +935,11 @@ static void iscsi_iser_pool_free(struct 
 	kfree(items);
 }
 
-static int iscsi_iser_dout_pool_alloc(struct iscsi_iser_session *session)
-{
-	int i;
-	int cmd_i;
-
-	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
-		struct iscsi_iser_cmd_task *ctask = session->cmds[cmd_i];
-
-		ctask->datapool = mempool_create(ISCSI_DTASK_DEFAULT_MAX,
-						 mempool_alloc_slab,
-						 mempool_free_slab,
-						 task_mem_cache);
-		if (ctask->datapool == NULL) {
-			goto dout_alloc_fail;
-		}
-
-		INIT_LIST_HEAD(&ctask->dataqueue);
-	}
-
-	return 0;
-
-dout_alloc_fail:
-	for (i = 0; i < cmd_i; i++) {
-		mempool_destroy(session->cmds[i]->datapool);
-	}
-	return -ENOMEM;
-}
-
 static iscsi_sessionh_t iscsi_iser_session_create(uint32_t initial_cmdsn,
 						  struct Scsi_Host *host)
 {
 	struct iscsi_iser_session *session = NULL;
-	int cmd_i;
+	int cmd_i, mgmt_i, j;
 
 	session = iscsi_hostdata(host->hostdata);
 	memset(session, 0, sizeof(struct iscsi_iser_session));
@@ -1007,9 +961,13 @@ static iscsi_sessionh_t iscsi_iser_sessi
 	}
 
 	/* pre-format cmds pool with ITT */
-	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++)
+	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
 		session->cmds[cmd_i]->itt = cmd_i;
 
+		session->cmds[cmd_i]->hdr  = (struct iscsi_cmd *)
+			&session->cmds[cmd_i]->desc.iscsi_header;
+	}
+
 	spin_lock_init(&session->lock);
 	INIT_LIST_HEAD(&session->connections);
 
@@ -1022,30 +980,32 @@ static iscsi_sessionh_t iscsi_iser_sessi
 	}
 
 	/* pre-format immediate cmds pool with ITT */
-	for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
-		session->mgmt_cmds[cmd_i]->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
-		session->mgmt_cmds[cmd_i]->data =
+	for (mgmt_i = 0; mgmt_i < session->mgmtpool_max; mgmt_i++) {
+		session->mgmt_cmds[mgmt_i]->itt = ISCSI_MGMT_ITT_OFFSET + mgmt_i;
+
+		session->mgmt_cmds[mgmt_i]->hdr  =
+			&session->mgmt_cmds[mgmt_i]->desc.iscsi_header;
+
+		/* FIXME need to ensure this is HW cache start/end aligned  */
+		session->mgmt_cmds[mgmt_i]->desc.data =
 			kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH,
 				GFP_KERNEL);
-		if (!session->mgmt_cmds[cmd_i]->data) {
-			int j;
-			for (j = 0; j < cmd_i; j++)
-				kfree(session->mgmt_cmds[j]->data);
+
+		if (!session->mgmt_cmds[mgmt_i]->desc.data) {
 			debug_iser("mgmt data allocation failed\n");
 			goto immdata_alloc_fail;
 		}
-	}
 
-	if (iscsi_iser_dout_pool_alloc(session))
-		goto dout_alloc_fail;
+		session->mgmt_cmds[mgmt_i]->data =
+			session->mgmt_cmds[mgmt_i]->desc.data;
+	}
 
 	return iscsi_handle(session);
 
-dout_alloc_fail:
-	for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++)
-		kfree(session->mgmt_cmds[cmd_i]->data);
-	iscsi_iser_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
 immdata_alloc_fail:
+	for (j = 0; j < mgmt_i; j++)
+		kfree(session->mgmt_cmds[j]->desc.data);
+	iscsi_iser_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
 mgmtpool_alloc_fail:
 	iscsi_iser_pool_free(&session->cmdpool, (void**)session->cmds);
 cmdpool_alloc_fail:
@@ -1054,28 +1014,16 @@ cmdpool_alloc_fail:
 
 static void iscsi_iser_session_destroy(iscsi_sessionh_t sessionh)
 {
-	int cmd_i;
-	struct iscsi_iser_data_task *dtask, *n;
+	int mgmt_i;
 	struct iscsi_iser_session *session = iscsi_ptr(sessionh);
 
 	debug_iser("%s: enter\n", __FUNCTION__);
 
-	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
-		struct iscsi_iser_cmd_task *ctask = session->cmds[cmd_i];
-		list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) {
-			list_del(&dtask->item);
-			mempool_free(dtask, ctask->datapool);
-		}
-	}
-
-	for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++)
-		kfree(session->mgmt_cmds[cmd_i]->data);
-
-	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
-		mempool_destroy(session->cmds[cmd_i]->datapool);
-	}
+	for (mgmt_i = 0; mgmt_i < session->mgmtpool_max; mgmt_i++)
+		kfree(session->mgmt_cmds[mgmt_i]->desc.data);
 
 	iscsi_iser_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
+
 	iscsi_iser_pool_free(&session->cmdpool, (void**)session->cmds);
 
 	debug_iser("%s: exit\n", __FUNCTION__);
@@ -1583,31 +1531,6 @@ static struct iscsi_transport iscsi_iser
 	.send_pdu               = iscsi_iser_conn_send_pdu,
 };
 
-static int iscsi_iser_slabs_create(void)
-{
-	task_mem_cache = kmem_cache_create("iscsi_iser_task",
-					   sizeof(struct iscsi_iser_data_task),
-					   0,
-					   SLAB_HWCACHE_ALIGN | SLAB_NO_REAP,
-					   NULL, NULL);
-	if (task_mem_cache == NULL) {
-		printk(KERN_ERR "Failed to create iscsi_iser_task slab\n");
-		return 1;
-	}
-	return 0;
-}
-
-static void iscsi_iser_slabs_destroy(void)
-{
-	if (task_mem_cache != NULL) {
-		if (kmem_cache_destroy(task_mem_cache) != 0) {
-			printk(KERN_ERR "Failed to destroy task_mem_cache\n");
-			return;
-		}
-		task_mem_cache = NULL;
-	}
-}
-
 static inline int iscsi_iser_check_assign_cmdsn(
 					struct iscsi_iser_session *session,
 					struct iscsi_nopin *hdr)
@@ -1881,13 +1804,9 @@ int iscsi_iser_init(void)
 	}
 	iscsi_iser_transport.max_lun = iscsi_max_lun;
 
-	if (iscsi_iser_slabs_create())
-		return -ENOMEM;
-
 	error = iscsi_register_transport(&iscsi_iser_transport);
 	if (error) {
 		printk(KERN_ERR "iscsi_register_transport failed\n");
-		iscsi_iser_slabs_destroy();
 		return error;
 	}
 	return 0;
@@ -1896,6 +1815,5 @@ int iscsi_iser_init(void)
 void iscsi_iser_exit(void)
 {
 	iscsi_unregister_transport(&iscsi_iser_transport);
-	iscsi_iser_slabs_destroy();
 }
 




More information about the general mailing list