[openib-general] [PATCH] enable the fmr pool user to set the page size

Or Gerlitz ogerlitz at voltaire.com
Tue Jan 17 02:05:08 PST 2006


Roland,

This patch allows the consumer to set the page size of "pages" mapped 
by the pool fmrs which is a feature already existing in the ib_verbs api. 

On the cosmetic side it changes ib_fmr_attr.page_size field to be named
page_shift. Note that i did not go down to change mpt_entry->page_size 
name so its up to you if to leave the page_size convention.

A patch to convert the fmr consumers to the new api is below, if this 
api change is accepted we will enhance iser code eg to fmr in 4K "pages"
resolution.

Or.


Signed-off-by: Or Gerlitz <ogerlitz at voltaire.com>



Index: include/rdma/ib_verbs.h
===================================================================
--- include/rdma/ib_verbs.h	(revision 4911)
+++ include/rdma/ib_verbs.h	(working copy)
@@ -650,7 +650,7 @@ struct ib_mw_bind {
 struct ib_fmr_attr {
 	int	max_pages;
 	int	max_maps;
-	u8	page_size;
+	u8	page_shift;
 };
 
 struct ib_ucontext {
Index: include/rdma/ib_fmr_pool.h
===================================================================
--- include/rdma/ib_fmr_pool.h	(revision 4911)
+++ include/rdma/ib_fmr_pool.h	(working copy)
@@ -43,6 +43,7 @@ struct ib_fmr_pool;
 /**
  * struct ib_fmr_pool_param - Parameters for creating FMR pool
  * @max_pages_per_fmr:Maximum number of pages per map request.
+ * @page_shift: Log2 of sizeof "pages" mapped by this fmr
  * @access:Access flags for FMRs in pool.
  * @pool_size:Number of FMRs to allocate for pool.
  * @dirty_watermark:Flush is triggered when @dirty_watermark dirty
@@ -55,6 +56,7 @@ struct ib_fmr_pool;
  */
 struct ib_fmr_pool_param {
 	int                     max_pages_per_fmr;
+	int                     page_shift;
 	enum ib_access_flags    access;
 	int                     pool_size;
 	int                     dirty_watermark;
Index: core/fmr_pool.c
===================================================================
--- core/fmr_pool.c	(revision 4911)
+++ core/fmr_pool.c	(working copy)
@@ -280,7 +280,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
 		struct ib_fmr_attr attr = {
 			.max_pages = params->max_pages_per_fmr,
 			.max_maps  = IB_FMR_MAX_REMAPS,
-			.page_size = PAGE_SHIFT
+			.page_shift = params->page_shift;
 		};
 
 		for (i = 0; i < params->pool_size; ++i) {
Index: hw/mthca/mthca_mr.c
===================================================================
--- hw/mthca/mthca_mr.c	(revision 4911)
+++ hw/mthca/mthca_mr.c	(working copy)
@@ -497,7 +497,7 @@ int mthca_fmr_alloc(struct mthca_dev *de
 
 	might_sleep();
 
-	if (mr->attr.page_size < 12 || mr->attr.page_size >= 32)
+	if (mr->attr.page_shift < 12 || mr->attr.page_shift >= 32)
 		return -EINVAL;
 
 	/* For Arbel, all MTTs must fit in the same page. */
@@ -549,7 +549,7 @@ int mthca_fmr_alloc(struct mthca_dev *de
 				       MTHCA_MPT_FLAG_REGION      |
 				       access);
 
-	mpt_entry->page_size = cpu_to_be32(mr->attr.page_size - 12);
+	mpt_entry->page_size = cpu_to_be32(mr->attr.page_shift - 12);
 	mpt_entry->key       = cpu_to_be32(key);
 	mpt_entry->pd        = cpu_to_be32(pd);
 	memset(&mpt_entry->start, 0,

Index: ulp/sdp/sdp_conn.c
===================================================================
--- ulp/sdp/sdp_conn.c	(revision 4911)
+++ ulp/sdp/sdp_conn.c	(working copy)
@@ -1759,6 +1759,7 @@ static void sdp_device_init_one(struct i
 	/*
 	 * FMR allocation
 	 */
+	fmr_param_s.page_shift = PAGE_SHIFT;
 	fmr_param_s.pool_size = SDP_FMR_POOL_SIZE;
 	fmr_param_s.dirty_watermark = SDP_FMR_DIRTY_SIZE;
 	fmr_param_s.cache = 1;
Index: ulp/iser/iser_verbs.c
===================================================================
--- ulp/iser/iser_verbs.c	(revision 5033)
+++ ulp/iser/iser_verbs.c	(working copy)
@@ -150,6 +150,7 @@ int iser_create_ib_conn_res(struct iser_
 
 	p_iser_adaptor = p_iser_conn->p_adaptor;
 
+	params.page_shift        = PAGE_SHIFT;
 	params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE;
 	params.pool_size	 = ISCSI_ISER_XMIT_CMDS_MAX;
 	params.dirty_watermark	 = 32;




More information about the general mailing list