[openib-general] [PATCH 5/6] [RFC] iser handling of memory for RDMA

Or Gerlitz ogerlitz at voltaire.com
Wed Feb 22 06:35:59 PST 2006


+ the code has the ability to handle the case of SG lists which are 
  not aligned for RDMA in the sense that one VA and RKEY pair can be 
  produced for them by any of ib verbs memory registration apis.

+ from our experience such lists are very rare and over time less then
  0.1% of the data sentdown by the SCSI ML is represented by such SGs

+ the unaligned SG flow need to be fixed such that dma mapping/unmapping
  takes place after/before the CPU last/first touching of the data

+ one planned change here is to always convert SGs to page vector of 4K 
  elements no matter what is the system PAGE_SIZE. This is expected towards 
  2.6.17 which the merging of the change in the ib_fmr_pool api


--- /ulp/iser-x/iser_memory.c	2006-02-22 15:06:53.000000000 +0200
+++ /ulp/iser/iser_memory.c	2006-02-22 13:48:55.000000000 +0200
@@ -1 +1,491 @@
+/*
+ * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *	- Redistributions of source code must retain the above
+ *	  copyright notice, this list of conditions and the following
+ *	  disclaimer.
+ *
+ *	- Redistributions in binary form must reproduce the above
+ *	  copyright notice, this list of conditions and the following
+ *	  disclaimer in the documentation and/or other materials
+ *	  provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: iser_memory.c 5459 2006-02-22 11:00:48Z ogerlitz $
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <asm/io.h>
+#include <asm/scatterlist.h>
 
+#include "iscsi_iser.h"
+
+#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
+/**
+ * Decrements the reference count for the
+ * registered buffer & releases it
+ *
+ * returns 0 if released, 1 if deferred
+ */
+int iser_regd_buff_release(struct iser_regd_buf *p_regd_buf)
+{
+	struct device *dma_device;
+
+	if ((atomic_read(&p_regd_buf->ref_count) == 0) ||
+	    atomic_dec_and_test(&p_regd_buf->ref_count)) {
+		/* if we used the dma mr, unreg is just NOP */
+		if (p_regd_buf->reg.rkey != 0)
+			iser_unreg_mem(&p_regd_buf->reg);
+
+		if (p_regd_buf->dma_addr) {
+			dma_device = p_regd_buf->p_adaptor->device->dma_device;
+			dma_unmap_single(dma_device,
+					 p_regd_buf->dma_addr,
+					 p_regd_buf->data_size,
+					 p_regd_buf->direction);
+		}
+		/* else this regd buf is associated with task which we */
+		/* dma_unmap_single/sg later */
+		return 0;
+	} else {
+		iser_dbg("Release deferred, regd.buff: 0x%p\n", p_regd_buf);
+		return 1;
+	}
+}
+
+/**
+ * iser_reg_single - fills registered buffer descriptor with
+ *		     registration information
+ */
+void iser_reg_single(struct iser_adaptor *p_iser_adaptor,
+		     struct iser_regd_buf *p_regd_buf,
+		     enum dma_data_direction direction)
+{
+	dma_addr_t dma_addr;
+
+	dma_addr  = dma_map_single(p_iser_adaptor->device->dma_device,
+				   p_regd_buf->virt_addr,
+				   p_regd_buf->data_size, direction);
+	if (dma_mapping_error(dma_addr))
+		iser_bug("dma_map_single failed at %p\n", p_regd_buf->virt_addr);
+
+	p_regd_buf->reg.lkey = p_iser_adaptor->mr->lkey;
+	p_regd_buf->reg.rkey = 0; /* indicate there's no need to unreg */
+	p_regd_buf->reg.len  = p_regd_buf->data_size;
+	p_regd_buf->reg.va   = dma_addr;
+
+	p_regd_buf->dma_addr  = dma_addr;
+	p_regd_buf->direction = direction;
+}
+
+
+/**
+ * iser_sg_size - returns the total data length in an sg list
+ */
+int iser_sg_size(struct iser_data_buf *p_data)
+{
+	struct scatterlist *p_sg = (struct scatterlist *)p_data->p_buf;
+	int i, total_len=0;
+
+	for (i = 0; i < p_data->dma_nents; i++)
+		total_len += sg_dma_len(&p_sg[i]);
+	return total_len;
+}
+
+/**
+ * iser_start_rdma_unaligned_sg
+ */
+void iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task  *p_iser_task,
+				  enum iser_data_dir cmd_dir)
+{
+	dma_addr_t dma_addr;
+	struct device *dma_device;
+	char *mem = NULL;
+	struct iser_data_buf *p_mem = &p_iser_task->data[cmd_dir];
+	unsigned long  cmd_data_len = p_iser_task->data_len[cmd_dir];
+
+	if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+               mem = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOFAIL,
+		      long_log2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
+       else
+               mem = kmalloc(cmd_data_len, GFP_KERNEL | __GFP_NOFAIL);
+
+	if (mem == NULL) {
+		iser_bug("Failed to allocate mem size %d %d for copying sglist\n",
+			 p_mem->size,(int)cmd_data_len);
+	}
+
+	if (cmd_dir == ISER_DIR_OUT) {
+		/* copy the unaligned sg the buffer which is used for RDMA */
+		struct scatterlist *p_sg = (struct scatterlist *)p_mem->p_buf;
+		int i;
+		char *p;
+
+		for (p = mem, i = 0; i < p_mem->size; i++) {
+			memcpy(p,
+			       page_address(p_sg[i].page) + p_sg[i].offset,
+			       p_sg[i].length);
+			p += p_sg[i].length;
+		}
+	}
+
+	p_iser_task->data_copy[cmd_dir].p_buf = mem;
+	p_iser_task->data_copy[cmd_dir].size = cmd_data_len;
+	p_iser_task->data_copy[cmd_dir].type = ISER_BUF_TYPE_SINGLE;
+
+	dma_device = p_iser_task->conn->ib_conn->p_adaptor->device->dma_device;
+
+	if (cmd_dir == ISER_DIR_OUT)
+		dma_addr = dma_map_single(dma_device, mem, cmd_data_len,
+					  DMA_TO_DEVICE);
+	else
+		dma_addr = dma_map_single(dma_device, mem, cmd_data_len,
+					  DMA_FROM_DEVICE);
+
+	if (dma_mapping_error(dma_addr))
+		iser_bug("dma_map_single failed at %p\n", mem);
+
+	p_iser_task->data_copy[cmd_dir].dma_addr = dma_addr;
+}
+
+/**
+ * iser_finalize_rdma_unaligned_sg
+ */
+void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *p_iser_task)
+{
+	struct device *dma_device;
+	struct iser_data_buf *p_mem_copy;
+	unsigned int size;
+	dma_addr_t dma_addr;
+
+	dma_device = p_iser_task->conn->ib_conn->p_adaptor->device->dma_device;
+
+	if (p_iser_task->dir[ISER_DIR_IN]) {
+		char *mem;
+		struct scatterlist *p_sg;
+		unsigned char *p;
+		unsigned int sg_size;
+		int i;
+
+		p_mem_copy = &p_iser_task->data_copy[ISER_DIR_IN];
+		size	 = p_mem_copy->size;
+		dma_addr = p_mem_copy->dma_addr;
+
+		dma_unmap_single(dma_device, dma_addr, size, DMA_FROM_DEVICE);
+		/* copy back read RDMA to unaligned sg */
+		mem	= p_mem_copy->p_buf;
+		p_sg	= (struct scatterlist *)p_iser_task->data[ISER_DIR_IN].p_buf;
+		sg_size = p_iser_task->data[ISER_DIR_IN].size;
+
+		for (p = mem, i = 0; i < sg_size; i++){
+			memcpy(page_address(p_sg[i].page) + p_sg[i].offset,
+			       p,
+			       p_sg[i].length);
+			p += p_sg[i].length;
+		}
+
+		if (size > ISER_KMALLOC_THRESHOLD)
+			free_pages((unsigned long)p_mem_copy->p_buf,
+          		  long_log2(roundup_pow_of_two((int)size)) - PAGE_SHIFT);
+		else
+			kfree(p_mem_copy->p_buf);
+		p_mem_copy->p_buf = NULL;
+	}
+
+	if (p_iser_task->dir[ISER_DIR_OUT]) {
+		p_mem_copy = &p_iser_task->data_copy[ISER_DIR_OUT];
+		size	 = p_mem_copy->size;
+		dma_addr = p_mem_copy->dma_addr;
+		dma_unmap_single(dma_device, dma_addr, size, DMA_TO_DEVICE);
+		if (size > ISER_KMALLOC_THRESHOLD)
+			free_pages((unsigned long)p_mem_copy->p_buf,
+          		  long_log2(roundup_pow_of_two((int)size)) - PAGE_SHIFT);
+		else
+			kfree(p_mem_copy->p_buf);
+		p_mem_copy->p_buf = NULL;
+	}
+}
+
+/**
+ * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
+ * and returns the length of resulting physical address array (may be less than
+ * the original due to possible compaction).
+ *
+ * we build a "page vec" under the assumption that the SG meets the RDMA
+ * alignment requirements. Other then the first and last SG elements, all
+ * the "internal" elements can be compacted into a list whose elements are
+ * dma addresses of physical pages. The code supports also the weird case
+ * where --few fragments of the same page-- are present in the SG as
+ * consecutive elements. Also, it handles one entry SG.
+ */
+static int iser_sg_to_page_vec(struct iser_data_buf *p_data,
+			       struct iser_page_vec *page_vec)
+{
+	struct scatterlist *p_sg = (struct scatterlist *)p_data->p_buf;
+	dma_addr_t first_addr, last_addr, page;
+	int start_aligned, end_aligned;
+	unsigned int cur_page = 0;
+	unsigned long total_sz = 0;
+	int i;
+
+	/* compute the offset of first element */
+	/* FIXME page_vec->offset type should be dma_addr_t */
+	page_vec->offset = (u64) p_sg[0].offset;
+
+	for (i = 0; i < p_data->dma_nents; i++) {
+		total_sz += sg_dma_len(&p_sg[i]);
+
+		first_addr = sg_dma_address(&p_sg[i]);
+		last_addr  = first_addr + sg_dma_len(&p_sg[i]);
+
+		start_aligned = !(first_addr & ~PAGE_MASK);
+		end_aligned   = !(last_addr  & ~PAGE_MASK);
+
+		/* continue to collect page fragments till aligned or SG ends */
+		while (!end_aligned && (i + 1 < p_data->dma_nents)) {
+			i++;
+			total_sz += sg_dma_len(&p_sg[i]);
+			last_addr = sg_dma_address(&p_sg[i]) + sg_dma_len(&p_sg[i]);
+			end_aligned = !(last_addr  & ~PAGE_MASK);
+		}
+
+		first_addr = first_addr & PAGE_MASK;
+
+		for (page = first_addr; page < last_addr; page += PAGE_SIZE)
+			page_vec->pages[cur_page++] = page;
+
+	}
+	page_vec->data_size = total_sz;
+	iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page);
+	return cur_page;
+}
+
+/**
+ * iser_single_to_page_vec -
+ */
+static int iser_single_to_page_vec(struct iser_data_buf *p_data,
+				   struct iser_page_vec *page_vec)
+{
+	u64 fpage, lpage, page;
+	int i;
+
+	iser_dbg("Translating data:0x%p, single virt:0x%p, data sz: %d\n",
+		 p_data, p_data->p_buf, p_data->size);
+
+	fpage =	 (u64) p_data->dma_addr & PAGE_MASK;
+	lpage = (u64) (p_data->dma_addr + p_data->size - 1 + PAGE_SIZE)
+		       & PAGE_MASK;
+
+	page_vec->offset = (u64) (p_data->dma_addr - (long)fpage);
+
+	for (i = 0, page = fpage; page < lpage; page += PAGE_SIZE, i++) {
+		page_vec->pages[i] = page;
+		iser_dbg(
+			"SINGLE VIRT ADDED page[%d]=0x%lX at page_vec %p\n",
+			i, (long)page, page_vec);
+	}
+
+	page_vec->data_size = p_data->size;
+	iser_dbg("page_vec->data_size=%d\n", page_vec->data_size);
+
+	return i;
+}
+
+
+#define MASK_4K			((1UL << 12) - 1) /* 0xFFF */
+#define IS_4K_ALIGNED(addr)	((((unsigned long)addr) & MASK_4K) == 0)
+
+/**
+ * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
+ * for RDMA sub-list of a scatter-gather list of memory buffers, and  returns
+ * the number of entries which are aligned correctly. Supports the case where
+ * consecutive SG elements are actually fragments of the same physcial page.
+ */
+static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *p_data)
+{
+	struct scatterlist *p_sg;
+	dma_addr_t end_addr, next_addr;
+	int i, cnt;
+	unsigned int ret_len = 0;
+
+	p_sg = (struct scatterlist *)p_data->p_buf;
+
+	for (cnt = 0, i = 0; i < p_data->dma_nents; i++, cnt++) {
+		/* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
+		   "offset: %ld sz: %ld\n", i,
+		   (unsigned long)page_to_phys(p_sg[i].page),
+		   (unsigned long)p_sg[i].offset,
+		   (unsigned long)p_sg[i].length); */
+		end_addr = sg_dma_address(&p_sg[i]) +
+			   sg_dma_len(&p_sg[i]);
+		/* iser_dbg("Checking sg iobuf end address "
+		       "0x%08lX\n", end_addr); */
+		if (i + 1 < p_data->dma_nents) {
+			next_addr = sg_dma_address(&p_sg[i+1]);
+			/* are i, i+1 fragments of the same page? */
+			if (end_addr == next_addr)
+				continue;
+			else if (!IS_4K_ALIGNED(end_addr)) {
+				ret_len = cnt + 1;
+				break;
+			}
+		}
+	}
+	if (i == p_data->dma_nents)
+		ret_len = cnt;	/* loop ended */
+	iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
+		 ret_len, p_data->dma_nents, p_data);
+	return ret_len;
+}
+
+static void iser_data_buf_dump(struct iser_data_buf *p_data)
+{
+	if (p_data->type == ISER_BUF_TYPE_SINGLE)
+		iser_err("single addr:0x%p sz:%d\n",
+		       p_data->p_buf, p_data->size);
+	else {
+		struct scatterlist *p_sg =
+			(struct scatterlist *)p_data->p_buf;
+		int i;
+
+		for (i = 0; i < p_data->size; i++)
+			iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
+				 "off:%d sz:%d dma_len:%d\n",
+				 i, (unsigned long)sg_dma_address(&p_sg[i]),
+				 p_sg[i].page, p_sg[i].offset,
+				 p_sg[i].length,sg_dma_len(&p_sg[i]));
+	}
+}
+
+/**
+ * iser_page_vec_alloc - allocate page_vec covering a given data buffer
+ */
+static struct iser_page_vec *iser_page_vec_alloc(struct iser_data_buf *p_data,
+						 int total_size)
+{
+	struct iser_page_vec *page_vec;
+	int npages;
+
+	npages = total_size / PAGE_SIZE + 2;
+
+	page_vec = kmalloc(sizeof(struct iser_page_vec) +
+			    (sizeof(u64) * npages),
+			    GFP_KERNEL | __GFP_NOFAIL);
+	if (page_vec != NULL) {
+		page_vec->pages = (u64 *) (page_vec + 1);
+		page_vec->data_size = total_size;
+		page_vec->length = 0;
+		page_vec->offset = 0;
+		iser_dbg("Allocated page_vec:%p, %d pages for size:%d\n",
+			 page_vec, npages, total_size);
+	} else
+		iser_err("Failed to alloc %d pages for size:%d\n",
+			 npages, total_size);
+	return page_vec;
+}
+
+
+static void iser_dump_page_vec(struct iser_page_vec *page_vec)
+{
+	int i;
+
+	iser_err("page vec length %d data size %d\n",
+		 page_vec->length, page_vec->data_size);
+	for (i = 0; i < page_vec->length; i++)
+		iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]);
+}
+
+static void iser_page_vec_build(struct iser_data_buf *p_data,
+				struct iser_page_vec *page_vec)
+{
+	int page_vec_len = 0;
+
+	if (p_data->type == ISER_BUF_TYPE_SINGLE) {
+		iser_dbg("Translating single sz: %d\n", p_data->size);
+		page_vec_len = iser_single_to_page_vec(p_data, page_vec);
+	} else {
+		iser_dbg("Translating sg sz: %d\n", p_data->dma_nents);
+		page_vec_len = iser_sg_to_page_vec(p_data,page_vec);
+		iser_dbg("sg len %d page_vec_len %d\n",
+			 p_data->dma_nents,page_vec_len);
+	}
+	page_vec->length = page_vec_len;
+
+        if (page_vec_len * 4096 < page_vec->data_size) {
+		if (p_data->type == ISER_BUF_TYPE_SCATTERLIST) {
+			iser_err("dumping sg\n");
+			iser_data_buf_dump(p_data);
+		}
+		iser_dump_page_vec(page_vec);
+		iser_bug("page_vec too short to hold this SG\n");
+	}
+}
+
+/**
+ * iser_reg_rdma_mem - Registers memory intended for RDMA,
+ * obtaining rkey and va
+ *
+ * returns 0 on success, errno code on failure
+ */
+int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *p_iser_task,
+		      enum   iser_data_dir        cmd_dir)
+{
+	struct iser_conn     *p_iser_conn = p_iser_task->conn->ib_conn;
+	struct iser_data_buf *p_mem = &p_iser_task->data[cmd_dir];
+	struct iser_page_vec *page_vec;
+	struct iser_regd_buf *p_regd_buf;
+	int aligned_len;
+	int err;
+
+	p_regd_buf = &p_iser_task->rdma_regd[cmd_dir];
+
+	iser_dbg("p_mem %p p_mem->type %d\n", p_mem,p_mem->type);
+
+	if (p_mem->type != ISER_BUF_TYPE_SINGLE) {
+		aligned_len = iser_data_buf_aligned_len(p_mem);
+		if (aligned_len != p_mem->size) {
+			iser_err("rdma alignment violation %d/%d aligned\n",
+				 aligned_len, p_mem->size);
+			iser_data_buf_dump(p_mem);
+			/* allocate copy buf, if we are writing, copy the */
+			/* unaligned scatterlist, dma map the copy        */
+			iser_start_rdma_unaligned_sg(p_iser_task, cmd_dir);
+			p_mem = &p_iser_task->data_copy[cmd_dir];
+		}
+	}
+
+	page_vec = iser_page_vec_alloc(p_mem, p_iser_task->data_len[cmd_dir]);
+	if(!page_vec)
+		return -ENOMEM;
+
+	iser_page_vec_build(p_mem, page_vec);
+	err = iser_reg_page_vec(p_iser_conn,page_vec,&p_regd_buf->reg);
+	kfree(page_vec);
+	if(err)
+		return err;
+
+	/* take a reference on this regd buf such that it will not be released *
+	 * (eg in send dto completion) before we get the scsi response         */
+	atomic_inc(&p_regd_buf->ref_count);
+	return 0;
+}




More information about the general mailing list