[openib-general] [PATCH 4/7] IB/iser - Use the new verbs DMA mapping functions

Ralph Campbell ralph.campbell at qlogic.com
Thu Nov 2 14:33:49 PST 2006


IB/iser - Use the new verbs DMA mapping functions

This patch converts iser to use the new verbs DMA mapping functions
for kernel verbs consumers.

From: Ralph Campbell <ralph.campbell at qlogic.com>

diff -r f37bd0e41fec drivers/infiniband/ulp/iser/iser_memory.c
--- a/drivers/infiniband/ulp/iser/iser_memory.c	Thu Oct 26 21:44:41 2006 +0700
+++ b/drivers/infiniband/ulp/iser/iser_memory.c	Thu Oct 26 13:16:33 2006 -0800
@@ -51,7 +51,7 @@
  */
 int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
 {
-	struct device *dma_device;
+	struct ib_device *dev;
 
 	if ((atomic_read(&regd_buf->ref_count) == 0) ||
 	    atomic_dec_and_test(&regd_buf->ref_count)) {
@@ -60,8 +60,8 @@ int iser_regd_buff_release(struct iser_r
 			iser_unreg_mem(&regd_buf->reg);
 
 		if (regd_buf->dma_addr) {
-			dma_device = regd_buf->device->ib_device->dma_device;
-			dma_unmap_single(dma_device,
+			dev = regd_buf->device->ib_device;
+			ib_dma_unmap_single(dev,
 					 regd_buf->dma_addr,
 					 regd_buf->data_size,
 					 regd_buf->direction);
@@ -85,10 +85,10 @@ void iser_reg_single(struct iser_device 
 {
 	dma_addr_t dma_addr;
 
-	dma_addr  = dma_map_single(device->ib_device->dma_device,
-				   regd_buf->virt_addr,
-				   regd_buf->data_size, direction);
-	BUG_ON(dma_mapping_error(dma_addr));
+	dma_addr = ib_dma_map_single(device->ib_device,
+				     regd_buf->virt_addr,
+				     regd_buf->data_size, direction);
+	BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr));
 
 	regd_buf->reg.lkey = device->mr->lkey;
 	regd_buf->reg.len  = regd_buf->data_size;
@@ -106,7 +106,7 @@ int iser_start_rdma_unaligned_sg(struct 
 				 enum iser_data_dir cmd_dir)
 {
 	int dma_nents;
-	struct device *dma_device;
+	struct ib_device *dev;
 	char *mem = NULL;
 	struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
 	unsigned long  cmd_data_len = data->data_len;
@@ -146,17 +146,12 @@ int iser_start_rdma_unaligned_sg(struct 
 
 	iser_ctask->data_copy[cmd_dir].copy_buf  = mem;
 
-	dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
-
-	if (cmd_dir == ISER_DIR_OUT)
-		dma_nents = dma_map_sg(dma_device,
-				       &iser_ctask->data_copy[cmd_dir].sg_single,
-				       1, DMA_TO_DEVICE);
-	else
-		dma_nents = dma_map_sg(dma_device,
-				       &iser_ctask->data_copy[cmd_dir].sg_single,
-				       1, DMA_FROM_DEVICE);
-
+	dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+	dma_nents = ib_dma_map_sg(dev,
+				  &iser_ctask->data_copy[cmd_dir].sg_single,
+				  1,
+				  (cmd_dir == ISER_DIR_OUT) ?
+				  DMA_TO_DEVICE : DMA_FROM_DEVICE);
 	BUG_ON(dma_nents == 0);
 
 	iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
@@ -169,19 +164,16 @@ void iser_finalize_rdma_unaligned_sg(str
 void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
 				     enum iser_data_dir         cmd_dir)
 {
-	struct device *dma_device;
+	struct ib_device *dev;
 	struct iser_data_buf *mem_copy;
 	unsigned long  cmd_data_len;
 
-	dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
-	mem_copy   = &iser_ctask->data_copy[cmd_dir];
-
-	if (cmd_dir == ISER_DIR_OUT)
-		dma_unmap_sg(dma_device, &mem_copy->sg_single, 1,
-			     DMA_TO_DEVICE);
-	else
-		dma_unmap_sg(dma_device, &mem_copy->sg_single, 1,
-			     DMA_FROM_DEVICE);
+	dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+	mem_copy = &iser_ctask->data_copy[cmd_dir];
+
+	ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
+			(cmd_dir == ISER_DIR_OUT) ?
+			DMA_TO_DEVICE : DMA_FROM_DEVICE);
 
 	if (cmd_dir == ISER_DIR_IN) {
 		char *mem;
@@ -230,7 +222,8 @@ void iser_finalize_rdma_unaligned_sg(str
  * consecutive elements. Also, it handles one entry SG.
  */
 static int iser_sg_to_page_vec(struct iser_data_buf *data,
-			       struct iser_page_vec *page_vec)
+			       struct iser_page_vec *page_vec,
+			       struct ib_device *ibdev)
 {
 	struct scatterlist *sg = (struct scatterlist *)data->buf;
 	dma_addr_t first_addr, last_addr, page;
@@ -243,10 +236,12 @@ static int iser_sg_to_page_vec(struct is
 	page_vec->offset = (u64) sg[0].offset & ~MASK_4K;
 
 	for (i = 0; i < data->dma_nents; i++) {
-		total_sz += sg_dma_len(&sg[i]);
-
-		first_addr = sg_dma_address(&sg[i]);
-		last_addr  = first_addr + sg_dma_len(&sg[i]);
+		unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]);
+
+		total_sz += dma_len;
+
+		first_addr = ib_sg_dma_address(ibdev, &sg[i]);
+		last_addr  = first_addr + dma_len;
 
 		start_aligned = !(first_addr & ~MASK_4K);
 		end_aligned   = !(last_addr  & ~MASK_4K);
@@ -254,8 +249,9 @@ static int iser_sg_to_page_vec(struct is
 		/* continue to collect page fragments till aligned or SG ends */
 		while (!end_aligned && (i + 1 < data->dma_nents)) {
 			i++;
-			total_sz += sg_dma_len(&sg[i]);
-			last_addr = sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]);
+			dma_len = ib_sg_dma_len(ibdev, &sg[i]);
+			total_sz += dma_len;
+			last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len;
 			end_aligned = !(last_addr  & ~MASK_4K);
 		}
 
@@ -287,7 +283,8 @@ static int iser_sg_to_page_vec(struct is
  * the number of entries which are aligned correctly. Supports the case where
  * consecutive SG elements are actually fragments of the same physcial page.
  */
-static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data)
+static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
+					      struct ib_device *ibdev)
 {
 	struct scatterlist *sg;
 	dma_addr_t end_addr, next_addr;
@@ -302,12 +299,12 @@ static unsigned int iser_data_buf_aligne
 		   (unsigned long)page_to_phys(sg[i].page),
 		   (unsigned long)sg[i].offset,
 		   (unsigned long)sg[i].length); */
-		end_addr = sg_dma_address(&sg[i]) +
-			   sg_dma_len(&sg[i]);
+		end_addr = ib_sg_dma_address(ibdev, &sg[i]) +
+			   ib_sg_dma_len(ibdev, &sg[i]);
 		/* iser_dbg("Checking sg iobuf end address "
 		       "0x%08lX\n", end_addr); */
 		if (i + 1 < data->dma_nents) {
-			next_addr = sg_dma_address(&sg[i+1]);
+			next_addr = ib_sg_dma_address(ibdev, &sg[i+1]);
 			/* are i, i+1 fragments of the same page? */
 			if (end_addr == next_addr)
 				continue;
@@ -324,7 +321,8 @@ static unsigned int iser_data_buf_aligne
 	return ret_len;
 }
 
-static void iser_data_buf_dump(struct iser_data_buf *data)
+static void iser_data_buf_dump(struct iser_data_buf *data,
+			       struct ib_device *ibdev)
 {
 	struct scatterlist *sg = (struct scatterlist *)data->buf;
 	int i;
@@ -332,9 +330,9 @@ static void iser_data_buf_dump(struct is
 	for (i = 0; i < data->dma_nents; i++)
 		iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
 			 "off:0x%x sz:0x%x dma_len:0x%x\n",
-			 i, (unsigned long)sg_dma_address(&sg[i]),
+			 i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]),
 			 sg[i].page, sg[i].offset,
-			 sg[i].length,sg_dma_len(&sg[i]));
+			 sg[i].length, ib_sg_dma_len(ibdev, &sg[i]));
 }
 
 static void iser_dump_page_vec(struct iser_page_vec *page_vec)
@@ -348,7 +346,8 @@ static void iser_dump_page_vec(struct is
 }
 
 static void iser_page_vec_build(struct iser_data_buf *data,
-				struct iser_page_vec *page_vec)
+				struct iser_page_vec *page_vec,
+				struct ib_device *ibdev)
 {
 	int page_vec_len = 0;
 
@@ -356,14 +355,14 @@ static void iser_page_vec_build(struct i
 	page_vec->offset = 0;
 
 	iser_dbg("Translating sg sz: %d\n", data->dma_nents);
-	page_vec_len = iser_sg_to_page_vec(data,page_vec);
+	page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev);
 	iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len);
 
 	page_vec->length = page_vec_len;
 
 	if (page_vec_len * SIZE_4K < page_vec->data_size) {
 		iser_err("page_vec too short to hold this SG\n");
-		iser_data_buf_dump(data);
+		iser_data_buf_dump(data, ibdev);
 		iser_dump_page_vec(page_vec);
 		BUG();
 	}
@@ -374,13 +373,12 @@ int iser_dma_map_task_data(struct iscsi_
 			    enum   iser_data_dir       iser_dir,
 			    enum   dma_data_direction  dma_dir)
 {
-	struct device *dma_device;
+	struct ib_device *dev;
 
 	iser_ctask->dir[iser_dir] = 1;
-	dma_device =
-		iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
-
-	data->dma_nents = dma_map_sg(dma_device, data->buf, data->size, dma_dir);
+	dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+
+	data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
 	if (data->dma_nents == 0) {
 		iser_err("dma_map_sg failed!!!\n");
 		return -EINVAL;
@@ -390,20 +388,19 @@ int iser_dma_map_task_data(struct iscsi_
 
 void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
 {
-	struct device  *dma_device;
+	struct ib_device *dev;
 	struct iser_data_buf *data;
 
-	dma_device =
-		iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
+	dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
 
 	if (iser_ctask->dir[ISER_DIR_IN]) {
 		data = &iser_ctask->data[ISER_DIR_IN];
-		dma_unmap_sg(dma_device, data->buf, data->size, DMA_FROM_DEVICE);
+		ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
 	}
 
 	if (iser_ctask->dir[ISER_DIR_OUT]) {
 		data = &iser_ctask->data[ISER_DIR_OUT];
-		dma_unmap_sg(dma_device, data->buf, data->size, DMA_TO_DEVICE);
+		ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
 	}
 }
 
@@ -418,6 +415,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_
 {
 	struct iser_conn     *ib_conn = iser_ctask->iser_conn->ib_conn;
 	struct iser_device   *device = ib_conn->device;
+	struct ib_device     *ibdev = device->ib_device;
 	struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
 	struct iser_regd_buf *regd_buf;
 	int aligned_len;
@@ -427,11 +425,11 @@ int iser_reg_rdma_mem(struct iscsi_iser_
 
 	regd_buf = &iser_ctask->rdma_regd[cmd_dir];
 
-	aligned_len = iser_data_buf_aligned_len(mem);
+	aligned_len = iser_data_buf_aligned_len(mem, ibdev);
 	if (aligned_len != mem->dma_nents) {
 		iser_err("rdma alignment violation %d/%d aligned\n",
 			 aligned_len, mem->size);
-		iser_data_buf_dump(mem);
+		iser_data_buf_dump(mem, ibdev);
 
 		/* unmap the command data before accessing it */
 		iser_dma_unmap_task_data(iser_ctask);
@@ -449,8 +447,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_
 
 		regd_buf->reg.lkey = device->mr->lkey;
 		regd_buf->reg.rkey = device->mr->rkey;
-		regd_buf->reg.len  = sg_dma_len(&sg[0]);
-		regd_buf->reg.va   = sg_dma_address(&sg[0]);
+		regd_buf->reg.len  = ib_sg_dma_len(ibdev, &sg[0]);
+		regd_buf->reg.va   = ib_sg_dma_address(ibdev, &sg[0]);
 		regd_buf->reg.is_fmr = 0;
 
 		iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X  "
@@ -460,10 +458,10 @@ int iser_reg_rdma_mem(struct iscsi_iser_
 			 (unsigned long)regd_buf->reg.va,
 			 (unsigned long)regd_buf->reg.len);
 	} else { /* use FMR for multiple dma entries */
-		iser_page_vec_build(mem, ib_conn->page_vec);
+		iser_page_vec_build(mem, ib_conn->page_vec, ibdev);
 		err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
 		if (err) {
-			iser_data_buf_dump(mem);
+			iser_data_buf_dump(mem, ibdev);
 			iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
 				 ntoh24(iser_ctask->desc.iscsi_header.dlength));
 			iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",






More information about the general mailing list