[openib-general] [PATCH] iser: simplify handling of iscsi unsolicited data

Or Gerlitz ogerlitz at voltaire.com
Tue Jan 3 03:43:04 PST 2006


The patch below eliminates the special handling of memory to be used for 
iscsi unsolicited data write, instead all the command data is registered 
for rdma. The descriptors holding the rdma registration info were much 
simplified, the fields rdma_read/write_dto and send/recv_buff_list were 
removed from struct iscsi_iser_cmd_task and are now replaced with rdma_regd.

Signed-off-by: Alex Nezhinsky <alexn at voltaire.com>
Signed-off-by: Or Gerlitz <ogerlitz at voltaire.com>


Index: ulp/iser/iser_memory.h
===================================================================
--- ulp/iser/iser_memory.h	(revision 4622)
+++ ulp/iser/iser_memory.h	(working copy)
@@ -54,13 +54,6 @@ void iser_reg_single(struct iser_adaptor
 		     struct iser_regd_buf *p_regd_buf,
 		     enum dma_data_direction direction);
 
-void iser_reg_single_task(struct iser_adaptor *p_iser_adaptor,
-		     struct iser_regd_buf *p_regd_buf,
-		     void *virt_addr,
-		     dma_addr_t dma_addr,
-		     unsigned long data_size,
-		     enum dma_data_direction direction);
-
 /* scatterlist */
 int iser_sg_size(struct iser_data_buf *p_mem);
 
@@ -70,11 +63,6 @@ void iser_start_rdma_unaligned_sg(struct
 void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task  *p_iser_task);
 
 /* iser_data_buf */
-unsigned int iser_data_buf_contig_len(struct iser_data_buf *p_data,
-				     int skip,
-				     dma_addr_t *chunk_dma_addr,
-				     int *chink_sz);
-
 unsigned int iser_data_buf_aligned_len(struct iser_data_buf *p_data,
 				      int skip);
 
Index: ulp/iser/iscsi_iser.h
===================================================================
--- ulp/iser/iscsi_iser.h	(revision 4622)
+++ ulp/iser/iscsi_iser.h	(working copy)
@@ -277,16 +277,8 @@ struct iscsi_iser_cmd_task {
 
 	unsigned int post_send_count; /* posted send buffers pending completion */
 
-	/* buffers, to release when the task is complete */
-	struct list_head send_buff_list;
-	struct list_head rcv_buff_list;
-	struct iser_dto  rdma_read_dto;
-	struct iser_dto  rdma_write_dto;
-
-	struct list_head conn_list;	/* Tasks list of the conn */
-	struct list_head hash_list;	/* Hash table bucket entry */
-
 	int dir[ISER_DIRS_NUM];	/* set if direction used */
+	struct iser_regd_buf *rdma_regd[ISER_DIRS_NUM];	/* regd rdma buffer */
 	unsigned long data_len[ISER_DIRS_NUM];	/* total data length */
 	struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data descriptor */
 	struct iser_data_buf data_copy[ISER_DIRS_NUM]; /* contig. copy */
Index: ulp/iser/iser.h
===================================================================
--- ulp/iser/iser.h	(revision 4622)
+++ ulp/iser/iser.h	(working copy)
@@ -63,16 +63,9 @@
 #define ISER_TOTAL_HEADERS_LEN			\
 	(ISER_HDR_LEN + ISER_PDU_BHS_LENGTH)
 
-/* Hash tables */
-#define HASH_TABLE_SIZE			    256
-
 /* Various size limits */
 #define ISER_LOGIN_PHASE_PDU_DATA_LEN	    (8*1024)	/* 8K */
 
-struct hash_table {
-	struct list_head bucket_head[HASH_TABLE_SIZE];
-	spinlock_t lock;
-};
 
 struct iser_page_vec {
 	u64 *pages;
@@ -99,9 +92,6 @@ struct iser_regd_buf {
 	enum dma_data_direction direction; /* direction for dma_unmap	      */
 	unsigned int data_size;
 
-
-	/* To be chained here, if freeing upon completion is signaled */
-	struct list_head free_upon_comp_list;
 	/* Reference count, memory freed when decremented to 0 */
 	atomic_t ref_count;
 };
@@ -149,8 +139,6 @@ struct iser_global {
 
 	kmem_cache_t *login_cache;
 	kmem_cache_t *header_cache;
-
-	struct hash_table task_hash;	/* hash table for tasks */
 };				/* iser_global */
 
 extern struct iser_global ig;
Index: ulp/iser/iser_dto.c
===================================================================
--- ulp/iser/iser_dto.c	(revision 4622)
+++ ulp/iser/iser_dto.c	(working copy)
@@ -79,152 +79,6 @@ int iser_dto_add_regd_buff(struct iser_d
 }
 
 /**
- * iser_dto_clone_regd_buffs - creates a dto (dst) which refers to a subrange
- *	of the memory referenced by another dto (src).
- */
-void iser_dto_clone_regd_buffs(struct iser_dto *p_dst,
-			       struct iser_dto *p_src,
-			       unsigned long offset,
-			       unsigned long size)
-{
-	unsigned long remaining_offset = offset;
-	unsigned long remaining_size = size;
-	unsigned long regd_buf_size;
-	unsigned long used_size;
-	int i;
-
-	for (i = 0; i < p_src->regd_vector_len; i++) {
-		regd_buf_size = p_src->used_sz[i] > 0 ?
-				p_src->used_sz[i] :
-				p_src->regd[i]->reg.len;
-
-		if (remaining_offset < regd_buf_size) {
-			used_size = min(remaining_size,
-					regd_buf_size - remaining_offset);
-			iser_dto_add_regd_buff(p_dst,
-					       p_src->regd[i],
-					       USE_OFFSET(p_src->
-							  offset[i] +
-							  remaining_offset),
-					       USE_SIZE(used_size));
-			remaining_size -= used_size;
-			if (remaining_size == 0)
-				break;
-			else
-				remaining_offset = 0;
-		} else
-			remaining_offset -= regd_buf_size;
-	}
-	if (remaining_size > 0)
-		iser_bug("size to clone:%ld exceeds by %ld the total size of "
-			 "src DTO:0x%p; dst DTO:0x%p, task:0x%p\n",
-			 size, remaining_size, p_src, p_dst, p_dst->p_task);
-}
-
-/**
- * iser_dto_add_local_single -
- */
-void iser_dto_add_local_single(struct iser_adaptor *p_iser_adaptor,
-			    struct iser_dto *p_dto,
-			    void *virt_addr,
-			    dma_addr_t dma_addr,
-			    unsigned long data_size,
-			    enum dma_data_direction direction)
-{
-	struct iser_regd_buf *p_regd_buf;
-
-	p_regd_buf = iser_regd_buf_alloc(p_iser_adaptor);
-
-	iser_reg_single_task(p_iser_adaptor, p_regd_buf,
-			     virt_addr, dma_addr, data_size, direction);
-
-	iser_dto_add_regd_buff(p_dto, p_regd_buf,
-			       USE_NO_OFFSET, USE_ENTIRE_SIZE);
-}
-
-/**
- * iser_dto_add_local_sg - adds a scatterlist to a dto intended for local
- *	operations only; tries to use registration keys from all-memory
- *	registration whenever possible.
- */
-int iser_dto_add_local_sg(struct iser_dto *p_dto,
-			  struct iser_data_buf *p_mem,
-			  enum dma_data_direction direction)
-{
-	struct iser_adaptor *p_iser_adaptor = p_dto->p_conn->ib_conn->p_adaptor;
-	struct iser_regd_buf *p_regd_buf;
-	int cur_buf = 0;
-	int err = 0;
-	int num_sg;
-
-	do {
-		p_regd_buf = iser_regd_buf_alloc(p_iser_adaptor);
-		if (p_regd_buf == NULL) {
-			iser_err("Failed to alloc regd_buf\n");
-			err = -ENOMEM;
-			goto dto_add_local_sg_exit;
-		}
-		/* if enough place in IOV for all sg entries, use all-memory
-		 * registration, otherwise register memory */
-		/* DMA_MAP: by now the sg must have been mapped, get the dma addr properly & pass it */
-		if (p_mem->dma_nents - cur_buf <
-		    MAX_REGD_BUF_VECTOR_LEN - p_dto->regd_vector_len) {
-			dma_addr_t chunk_dma_addr;
-			int chunk_sz;
-			void *chunk_vaddr;
-			num_sg = iser_data_buf_contig_len(p_mem,
-							  cur_buf, /* skip */
-							  &chunk_dma_addr,
-							  &chunk_sz);
-			/* DMA_MAP: vaddr not needed for this regd_buf */
-			chunk_vaddr = 0;
-			iser_reg_single_task(p_iser_adaptor, p_regd_buf,
-					     chunk_vaddr, chunk_dma_addr,
-					     chunk_sz, direction);
-		} else {
-			struct iser_page_vec *page_vec;
-			num_sg = iser_data_buf_aligned_len(p_mem,cur_buf);
-			page_vec = iser_page_vec_alloc(p_mem,cur_buf,num_sg);
-			if (page_vec == NULL) {
-				iser_err("Failed to alloc page_vec\n");
-				iser_regd_buff_release(p_regd_buf);
-				err = -ENOMEM;
-				goto dto_add_local_sg_exit;
-			}
-			iser_page_vec_build(p_mem,page_vec,cur_buf,num_sg);
-
-			err = iser_reg_phys_mem(p_iser_adaptor,
-						page_vec,
-						IB_ACCESS_LOCAL_WRITE  |
-						IB_ACCESS_REMOTE_WRITE |
-						IB_ACCESS_REMOTE_READ ,
-						&p_regd_buf->reg);
-			iser_page_vec_free(page_vec);
-			if (err) {
-				iser_err("Failed to register %d sg entries "
-					 "starting from %d\n",num_sg,cur_buf);
-				iser_regd_buff_release(p_regd_buf);
-				goto dto_add_local_sg_exit;
-			}
-
-			iser_dto_add_regd_buff(p_dto,
-					       p_regd_buf,
-					       USE_NO_OFFSET,
-					       USE_ENTIRE_SIZE);
-		}
-		iser_dto_add_regd_buff(p_dto, p_regd_buf,
-				       USE_NO_OFFSET, USE_ENTIRE_SIZE);
-		iser_dbg("Added regd.buf:0x%p to DTO:0x%p now %d regd.bufs\n",
-			 p_regd_buf, p_dto, p_dto->regd_vector_len);
-
-		cur_buf += num_sg;
-	} while (cur_buf < p_mem->size);
-
- dto_add_local_sg_exit:
-	return err;
-}
-
-/**
  *  iser_dto_buffs_release - free all registered buffers
  */
 void iser_dto_buffs_release(struct iser_dto *p_dto)
Index: ulp/iser/iser_dto.h
===================================================================
--- ulp/iser/iser_dto.h	(revision 4622)
+++ ulp/iser/iser_dto.h	(working copy)
@@ -47,28 +47,11 @@ int iser_dto_add_regd_buff(struct iser_d
 			   struct iser_regd_buf *p_regd_buf,
 			   unsigned long use_offset,
 			   unsigned long use_size);
-void
-iser_dto_clone_regd_buffs(struct iser_dto *p_dst_dto,
-			  struct iser_dto *p_src_dto,
-			  unsigned long offset,
-			  unsigned long size);
 
-void iser_dto_buffs_release(struct iser_dto *p_dto);
 void iser_dto_free(struct iser_dto *p_dto);
 
 int iser_dto_completion_error(struct iser_dto *p_dto);
 
-void iser_dto_add_local_single(struct iser_adaptor *p_iser_adaptor,
-			    struct iser_dto *p_dto,
-			    void *virt_addr,
-			    dma_addr_t dma_addr,
-			    unsigned long data_size,
-			    enum dma_data_direction direction);
-
-int iser_dto_add_local_sg(struct iser_dto *p_dto,
-			  struct iser_data_buf *p_mem,
-			  enum dma_data_direction direction);
-
 void iser_dto_get_rx_pdu_data(struct iser_dto *p_dto,
 			      unsigned long dto_xfer_len,
 			      struct iscsi_hdr **p_rx_hdr,
Index: ulp/iser/iser_initiator.c
===================================================================
--- ulp/iser/iser_initiator.c	(revision 4622)
+++ ulp/iser/iser_initiator.c	(working copy)
@@ -46,11 +46,6 @@
 #include "iser_verbs.h"
 #include "iser_memory.h"
 
-#define ISCSI_AHSL_MASK			0xFF000000
-#define ISCSI_DSL_MASK			0x00FFFFFF
-#define ISCSI_INVALID_ITT		0xFFFFFFFF
-
-
 static void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *p_iser_task);
 
 /**
@@ -60,43 +55,27 @@ static void iser_dma_unmap_task_data(str
  * returns 0 on success, -1 on failure
  */
 static int iser_reg_rdma_mem(struct iscsi_iser_cmd_task  *p_iser_task,
-			     enum   iser_data_dir   cmd_dir,
-			     struct iser_data_buf  *p_mem,
-			     struct iser_regd_buf **regd_buf)
+			     enum   iser_data_dir   cmd_dir)
 {
 	struct iser_adaptor *p_iser_adaptor = p_iser_task->conn->ib_conn->p_adaptor;
-	struct list_head *p_task_buff_list = NULL;
 	struct iser_page_vec *page_vec = NULL;
 	struct iser_regd_buf *p_regd_buf = NULL;
-	struct iser_dto *p_dto = NULL;
-	enum   ib_access_flags priv_flags = 0;
+	enum   ib_access_flags priv_flags = IB_ACCESS_LOCAL_WRITE;
+	struct iser_data_buf *p_mem = &p_iser_task->data[cmd_dir];
 	unsigned int page_vec_len = 0;
-	struct iser_data_buf  *mem_to_reg;
-	int cnt_to_reg;
+	int cnt_to_reg = 0;
 	int err = 0;
 
-	if (cmd_dir == ISER_DIR_IN) {
-		iser_dbg("cmd_dir == ISER_DIR_IN\n");
-		p_dto = &p_iser_task->rdma_write_dto;
-		p_task_buff_list = &p_iser_task->rcv_buff_list;
-		priv_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
-	} else if (cmd_dir == ISER_DIR_OUT) {
-		iser_dbg("cmd_dir == ISER_DIR_OUT\n");
-		p_dto = &p_iser_task->rdma_read_dto;
-		p_task_buff_list = &p_iser_task->send_buff_list;
-		priv_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ;
-	} else
-		iser_bug("Unexpected cmd dir:%d, task:0x%p\n",
-			 cmd_dir, p_iser_task);
-	*regd_buf = NULL;
+	if (cmd_dir == ISER_DIR_IN)
+		priv_flags |= IB_ACCESS_REMOTE_WRITE;
+	else
+		priv_flags |= IB_ACCESS_REMOTE_READ;
 
+	p_iser_task->rdma_regd[cmd_dir] = NULL;
 	p_regd_buf = iser_regd_buf_alloc(p_iser_adaptor);
 	if (p_regd_buf == NULL)
 		return -ENOMEM;
 
-	cnt_to_reg = 0;
-	mem_to_reg = p_mem;
-
 	iser_dbg("p_mem %p p_mem->type %d\n", p_mem,p_mem->type);
 
 	if (p_mem->type != ISER_BUF_TYPE_SINGLE) {
@@ -114,19 +93,19 @@ static int iser_reg_rdma_mem(struct iscs
 			/* unaligned scatterlist, anyway dma map the copy */
 			iser_start_rdma_unaligned_sg(p_iser_task, cmd_dir);
 			p_regd_buf->virt_addr = p_iser_task->data_copy[cmd_dir].p_buf;
-			mem_to_reg = &p_iser_task->data_copy[cmd_dir];
+			p_mem = &p_iser_task->data_copy[cmd_dir];
 		}
 	} else {
 		iser_dbg("converting single to page_vec\n");
 		p_regd_buf->virt_addr = p_mem->p_buf;
 	}
 
-	page_vec = iser_page_vec_alloc(mem_to_reg,0,cnt_to_reg);
+	page_vec = iser_page_vec_alloc(p_mem,0,cnt_to_reg);
 	if (page_vec == NULL) {
 		iser_regd_buff_release(p_regd_buf);
 		return -ENOMEM;
 	}
-	page_vec_len = iser_page_vec_build(mem_to_reg,page_vec, 0, cnt_to_reg);
+	page_vec_len = iser_page_vec_build(p_mem, page_vec, 0, cnt_to_reg);
 	err = iser_reg_phys_mem(p_iser_adaptor, page_vec, priv_flags,
 				&p_regd_buf->reg);
 	iser_page_vec_free(page_vec);
@@ -135,57 +114,10 @@ static int iser_reg_rdma_mem(struct iscs
 		iser_regd_buff_release(p_regd_buf);
 		return -EINVAL;
 	}
-	*regd_buf = p_regd_buf;
-
-	spin_lock_bh(&p_iser_task->task_lock);
-
-/*FIXME p_dto->p_task = p_iser_task; */
-/*FIXME	p_dto->p_conn = p_iser_task->p_conn; */
-	p_dto->regd_vector_len = 0;
-	iser_dto_add_regd_buff(p_dto, p_regd_buf,
-			       USE_NO_OFFSET, USE_ENTIRE_SIZE);
-	/* to be released when the task completes */
-	list_add(&p_regd_buf->free_upon_comp_list, p_task_buff_list);
-
-	spin_unlock_bh(&p_iser_task->task_lock);
-	return 0;
-}
-
-/**
- * Registers memory
- * intended for sending as unsolicited data
- *
- * returns 0 on success, -1 on failure
- */
-static int iser_reg_unsol(struct iscsi_iser_cmd_task *p_iser_task)
-{
-	struct iser_adaptor *p_iser_adaptor = p_iser_task->conn->ib_conn->p_adaptor;
-	struct iser_dto *p_dto = &p_iser_task->rdma_read_dto;
-	struct iser_data_buf *p_mem = &p_iser_task->data[ISER_DIR_OUT];
-	int err = 0;
-	int i;
-
-	if (p_mem->type == ISER_BUF_TYPE_SINGLE) {
-		/* DMA_MAP: should pass the task? single address has been mapped already!!! */
-		iser_dto_add_local_single(p_iser_adaptor, p_dto,
-					  p_mem->p_buf,
-					  p_mem->dma_addr, p_mem->size,
-					  DMA_TO_DEVICE);
-	}
-	else {
-		/* DMA_MAP: should pass copied and mapped sg instead? */
-		err = iser_dto_add_local_sg(p_dto, p_mem, DMA_TO_DEVICE);
-		if (err) {
-			iser_err("iser_dto_add_local_sg failed\n");
-			iser_dto_buffs_release(p_dto);
-			return err;
-		}
-	}
-
-	/* all registered buffers have been referenced,
-	   but this dto is not used in any IO */
-	for (i = 0; i < p_dto->regd_vector_len; i++)
-		iser_regd_buff_deref(p_dto->regd[i]);
+	/* take a reference on this regd buf such that it will not be released *
+	 * (eg in send dto completion) before we get the scsi response         */
+	iser_regd_buff_ref(p_regd_buf);
+	p_iser_task->rdma_regd[cmd_dir] = p_regd_buf;
 	return 0;
 }
 
@@ -239,12 +171,12 @@ static int iser_prepare_read_cmd(struct 
 	memcpy(&p_iser_task->data[ISER_DIR_IN], buf_in,
 	       sizeof(struct iser_data_buf));
 
-	err = iser_reg_rdma_mem(p_iser_task,ISER_DIR_IN,
-				&p_iser_task->data[ISER_DIR_IN],&p_regd_buf);
+	err = iser_reg_rdma_mem(p_iser_task,ISER_DIR_IN);
 	if (err) {
 		iser_err("Failed to set up Data-IN RDMA\n");
 		return err;
 	}
+	p_regd_buf = p_iser_task->rdma_regd[ISER_DIR_IN];
 	ISER_HDR_SET_BITS(p_iser_header, RSV, 1);
 	ISER_HDR_R_VADDR(p_iser_header) = cpu_to_be64(p_regd_buf->reg.va);
 	ISER_HDR_R_RKEY(p_iser_header) = htonl(p_regd_buf->reg.rkey);
@@ -311,17 +243,18 @@ iser_prepare_write_cmd(struct iscsi_iser
 	memcpy(&p_iser_task->data[ISER_DIR_OUT], buf_out,
 	       sizeof(struct iser_data_buf));
 
-	if (unsol_sz < edtl) {
-		err = iser_reg_rdma_mem(p_iser_task,ISER_DIR_OUT,
-					&p_iser_task->data[ISER_DIR_OUT],
-					&p_regd_buf);
-		if (err != 0) {
-			iser_err("Failed to register write cmd RDMA mem\n");
-			return err;
-		}
+	err = iser_reg_rdma_mem(p_iser_task,ISER_DIR_OUT);
+	if (err != 0) {
+		iser_err("Failed to register write cmd RDMA mem\n");
+		return err;
+	}
+
+	p_regd_buf = p_iser_task->rdma_regd[ISER_DIR_OUT];
+
+	if(unsol_sz < edtl) {
 		ISER_HDR_SET_BITS(p_iser_header, WSV, 1);
 		ISER_HDR_W_VADDR(p_iser_header) = cpu_to_be64(
-			p_regd_buf->reg.va + unsol_sz);
+				p_regd_buf->reg.va + unsol_sz);
 		ISER_HDR_W_RKEY(p_iser_header) = htonl(p_regd_buf->reg.rkey);
 
 		iser_dbg("Cmd itt:%d, WRITE tags, RKEY:0x%08X "
@@ -329,24 +262,17 @@ iser_prepare_write_cmd(struct iscsi_iser
 			 p_iser_task->itt, p_regd_buf->reg.rkey,
 			 (unsigned long)p_regd_buf->reg.va,
 			 unsol_sz);
-	} else {
-		err = iser_reg_unsol(p_iser_task); /* DMA_MAP: buf_out is already in task->data[DIR_OUT] */
-		if (err != 0){
-			iser_err("Failed to register write cmd RDMA mem\n");
-			return err;
-		}
 	}
 
-	/* If there is immediate data, add its register
-	   buffer reference to the send dto descriptor */
 	if (imm_sz > 0) {
 		iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
 			 p_iser_task->itt, imm_sz);
-
-		iser_dto_clone_regd_buffs(p_send_dto, /* dst */
-					  &p_iser_task->rdma_read_dto,
-					  0, imm_sz);
+		iser_dto_add_regd_buff(p_send_dto,
+				       p_regd_buf,
+				       USE_NO_OFFSET,
+				       USE_SIZE(imm_sz));
 	}
+
 	return 0;
 }
 
@@ -469,7 +395,8 @@ int iser_send_data_out(struct iscsi_iser
 	data_seg_len = ntoh24(hdr->dlength);
 	buf_offset   = ntohl(hdr->offset);
 
-	iser_dbg("%s itt %d dseg_len %d offset %d\n",__func__,(int)itt,(int)data_seg_len,(int)buf_offset);
+	iser_dbg("%s itt %d dseg_len %d offset %d\n",
+		 __func__,(int)itt,(int)data_seg_len,(int)buf_offset);
 
 	/* Allocate send DTO descriptor, headers buf and add it to the DTO */
 	p_send_dto = iser_dto_send_create(p_iser_conn,
@@ -486,10 +413,11 @@ int iser_send_data_out(struct iscsi_iser
 
 	p_send_dto->p_task = p_ctask;
 
-	/* Set-up the registered buffer entries for the data segment */
-	iser_dto_clone_regd_buffs(p_send_dto,	/* dst */
-				  &p_ctask->rdma_read_dto,
-				  buf_offset, data_seg_len);
+	/* all data was registered for RDMA, we can use the lkey */
+	iser_dto_add_regd_buff(p_send_dto,
+			       p_ctask->rdma_regd[ISER_DIR_OUT],
+			       USE_OFFSET(buf_offset),
+			       USE_SIZE(data_seg_len));
 
 	if (buf_offset + data_seg_len > p_ctask->data_len[ISER_DIR_OUT]) {
 		iser_err("Offset:%ld & DSL:%ld in Data-Out "
Index: ulp/iser/iser_task.c
===================================================================
--- ulp/iser/iser_task.c	(revision 4622)
+++ ulp/iser/iser_task.c	(working copy)
@@ -46,90 +46,16 @@ void iser_task_init_lowpart(struct iscsi
 {
 	spin_lock_init(&p_iser_task->task_lock);
 	p_iser_task->status = ISER_TASK_STATUS_INIT;
-
-	INIT_LIST_HEAD(&p_iser_task->send_buff_list);
-	INIT_LIST_HEAD(&p_iser_task->rcv_buff_list);
-
 	p_iser_task->post_send_count = 0;
-
+	
 	p_iser_task->dir[ISER_DIR_IN] = 0;
 	p_iser_task->dir[ISER_DIR_OUT] = 0;
-
+	
 	p_iser_task->data_len[ISER_DIR_IN] = 0;
 	p_iser_task->data_len[ISER_DIR_OUT] = 0;
-
-	iser_dto_init(&p_iser_task->rdma_read_dto);
-	p_iser_task->rdma_read_dto.p_conn = p_iser_task->conn;
-	p_iser_task->rdma_read_dto.p_task = p_iser_task;
-
-	iser_dto_init(&p_iser_task->rdma_write_dto);
-	p_iser_task->rdma_write_dto.p_conn = p_iser_task->conn;
-	p_iser_task->rdma_write_dto.p_task = p_iser_task;
-}
-
-/**
- * iser_task_release_send_buffers - Frees all sent buffers of a
- * task (upon completion)
- */
-void iser_task_release_send_buffers(struct iscsi_iser_cmd_task *p_iser_task)
-{
-	struct iser_regd_buf *p_regd_buf;
-	int tries = 0;
-
-	iser_dbg( "Releasing send buffs for iSER task: 0x%p\n",
-	       p_iser_task);
-
-	/* Free all sent buffers from the list */
-	spin_lock_bh(&p_iser_task->task_lock);
-	while (!list_empty(&p_iser_task->send_buff_list)) {
-		/* Get the next send buffer & remove it from the list */
-		p_regd_buf =
-		    list_entry(p_iser_task->send_buff_list.next,
-			       struct iser_regd_buf, free_upon_comp_list);
-		list_del(&p_regd_buf->free_upon_comp_list);
-		spin_unlock_bh(&p_iser_task->task_lock);
-
-		if (iser_regd_buff_release(p_regd_buf) != 0) {
-			iser_err("Failed to release send buffer after "
-				 "task complete, task: 0x%p, itt: %d -"
-				 " references remain\n",
-				 p_iser_task, p_iser_task->itt);
-
-			tries++; /* FIXME: calling schedule */
-			schedule();
-		}
-
-		spin_lock_bh(&p_iser_task->task_lock);
-	}
-	spin_unlock_bh(&p_iser_task->task_lock);
-	if (tries)
-		iser_err("Released send buff after %d tries\n", tries);
-}
-
-/**
- * iser_task_release_recv_buffers - Frees all receive buffers of
- * a task (upon completion)
- */
-void iser_task_release_recv_buffers(struct iscsi_iser_cmd_task *p_iser_task)
-{
-	struct iser_regd_buf *p_regd_buf;
-
-	spin_lock_bh(&p_iser_task->task_lock);
-	while (!list_empty(&p_iser_task->rcv_buff_list)) {
-		p_regd_buf = list_entry(p_iser_task->rcv_buff_list.next,
-					struct iser_regd_buf,
-					free_upon_comp_list);
-		list_del(&p_regd_buf->free_upon_comp_list);
-		spin_unlock_bh(&p_iser_task->task_lock);
-
-		if (iser_regd_buff_release(p_regd_buf) != 0)
-			iser_bug("task:0x%p complete, failed to release "
-				 "recv buf:0x%p, itt:%d - refs remain\n",
-				 p_iser_task, p_regd_buf, p_iser_task->itt);
-
-		spin_lock_bh(&p_iser_task->task_lock);
-	}
-	spin_unlock_bh(&p_iser_task->task_lock);
+	
+	p_iser_task->rdma_regd[ISER_DIR_IN] = NULL;
+	p_iser_task->rdma_regd[ISER_DIR_OUT] = NULL;
 }
 
 /**
@@ -184,9 +110,22 @@ iser_task_set_status(struct iscsi_iser_c
  */
 void iser_task_finalize_lowpart(struct iscsi_iser_cmd_task *p_iser_task)
 {
+	int deferred;
+
 	if (p_iser_task == NULL)
 		iser_bug("NULL task descriptor\n");
 
-	iser_task_release_send_buffers(p_iser_task);
-	iser_task_release_recv_buffers(p_iser_task);
+	spin_lock_bh(&p_iser_task->task_lock);
+	if (p_iser_task->dir[ISER_DIR_IN]) {
+		deferred = iser_regd_buff_release(p_iser_task->rdma_regd[ISER_DIR_IN]);
+		if (deferred)
+			iser_bug("References remain for BUF-IN rdma reg\n");
+	}
+	if (p_iser_task->dir[ISER_DIR_OUT] &&
+	    p_iser_task->rdma_regd[ISER_DIR_OUT] != NULL) {
+		deferred = iser_regd_buff_release(p_iser_task->rdma_regd[ISER_DIR_OUT]);
+		if (deferred)
+			iser_bug("References remain for BUF-OUT rdma reg\n");
+	}
+	spin_unlock_bh(&p_iser_task->task_lock);
 }
Index: ulp/iser/iser_conn.h
===================================================================
--- ulp/iser/iser_conn.h	(revision 4622)
+++ ulp/iser/iser_conn.h	(working copy)
@@ -40,9 +40,6 @@
 /* adaptor-related */
 int iser_adaptor_init(struct iser_adaptor *p_iser_adaptor);
 int iser_adaptor_release(struct iser_adaptor *p_iser_adaptor);
-struct iser_conn *iser_adaptor_find_conn(
-	struct iser_adaptor *p_iser_adaptor, void *ep_handle);
-
 
 /* internal connection handling */
 void iser_conn_init(struct iser_conn *p_iser_conn);
Index: ulp/iser/iser_task.h
===================================================================
--- ulp/iser/iser_task.h	(revision 4622)
+++ ulp/iser/iser_task.h	(working copy)
@@ -37,13 +37,12 @@
 
 #include "iser.h"
 
-void iser_task_hash_init(struct hash_table *hash_table);
-struct iscsi_iser_cmd_task *iser_task_find(struct iscsi_iser_conn *p_iser_conn, u32 itt);
 void iser_task_init_lowpart(struct iscsi_iser_cmd_task *p_iser_task);
+void iser_task_finalize_lowpart(struct iscsi_iser_cmd_task *iser_task);
+
 void iser_task_post_send_count_inc(struct iscsi_iser_cmd_task *p_iser_task);
 int iser_task_post_send_count_dec_and_test(struct iscsi_iser_cmd_task *p_iser_task);
 void iser_task_set_status(struct iscsi_iser_cmd_task *p_iser_task,
 			  enum iser_task_status status);
-void iser_task_finalize_lowpart(struct iscsi_iser_cmd_task *iser_task);
 
 #endif				/* __ISER_TASK_H__ */
Index: ulp/iser/iser_memory.c
===================================================================
--- ulp/iser/iser_memory.c	(revision 4622)
+++ ulp/iser/iser_memory.c	(working copy)
@@ -206,24 +206,6 @@ void iser_reg_single(struct iser_adaptor
 	p_regd_buf->direction = direction;
 }
 
-void iser_reg_single_task(struct iser_adaptor *p_iser_adaptor,
-		     struct iser_regd_buf *p_regd_buf,
-		     void *virt_addr,
-		     dma_addr_t dma_addr,
-		     unsigned long data_size,
-		     enum dma_data_direction direction)
-{
-	p_regd_buf->reg.lkey = p_iser_adaptor->mr->lkey;
-	p_regd_buf->reg.rkey = 0; /* indicate there's no need to unreg */
-	p_regd_buf->reg.len  = data_size;
-	p_regd_buf->reg.va   = dma_addr;
-
-	p_regd_buf->dma_addr  = 0;
-	p_regd_buf->virt_addr = virt_addr;
-	p_regd_buf->data_size = data_size;
-	p_regd_buf->direction = direction;
-}
-
 /**
  * iser_sg_size - returns the total data length in sg list
  */
@@ -523,42 +505,6 @@ unsigned int iser_data_buf_aligned_len(s
 	return ret_len;
 }
 
-/*
- * determine the maximal contiguous sub-list of a scatter-gather list
- */
-unsigned int iser_data_buf_contig_len(struct iser_data_buf *p_data, int skip,
-				      dma_addr_t *chunk_dma_addr, int *chunk_size)
-{
-	unsigned int ret_len = 0;
-
-	if (p_data->type == ISER_BUF_TYPE_SINGLE)
-		iser_bug("p_data must be sg\n");
-	else {
-		struct scatterlist *p_sg = p_data->p_buf;
-		int cnt, i;
-
-		*chunk_dma_addr = sg_dma_address(&p_sg[skip]);
-		*chunk_size	= 0;
-
-		for (cnt = 0, i = skip; i < p_data->dma_nents; i++, cnt++){
-			if ((cnt > 0) && sg_dma_address(&p_sg[i]) !=
-			    (sg_dma_address(&p_sg[i-1]) + sg_dma_len(&p_sg[i-1]))) {
-				ret_len = cnt;
-				break;
-			}
-			*chunk_size += sg_dma_len(&p_sg[i]);
-		}
-		if (i == p_data->dma_nents)
-			ret_len = cnt;
-
-		iser_dbg("Found %d contiguous entries out of %d in sg:0x%p, "
-			 "start dma addr:%ld size:%d\n",
-			 ret_len, p_data->dma_nents-skip, p_data,
-			 (long)*chunk_dma_addr, *chunk_size);
-	}
-	return ret_len;
-}
-
 /**
  * iser_data_buf_memcpy - Copies arbitrary data buffer to a
  * contiguous memory region





More information about the general mailing list