[openib-general] sdp: kill sdp buff pool
Michael S. Tsirkin
mst at mellanox.co.il
Thu Sep 8 04:41:56 PDT 2005
SDP seems to have a re-implementation of kmem_cache in sdp_buff.
Killing it actually results in a small bandwidth increase
for both bcopy and zcopy versions, so I plan to put this in.
Note that sdp_buff_pool_chain_link is now same as sdp_buff_pool_put,
and sdp_buff_pool_chain_put is now a nop, but I decided to
keep it in for now, just in case there's another use for it.
sdp_buff.c | 388 ++++++------------------------------------------------------
sdp_dev.h | 7 -
sdp_inet.c | 17 --
sdp_proc.c | 6
sdp_proc.h | 11 -
sdp_proto.h | 10 -
6 files changed, 50 insertions(+), 389 deletions(-)
---
Replace sdp buff pool by kmem_cache.
Signed-off-by: Michael S. Tsirkin <mst at mellanox.co.il>
Index: linux-2.6.13/drivers/infiniband/ulp/sdp/sdp_buff.c
===================================================================
--- linux-2.6.13.orig/drivers/infiniband/ulp/sdp/sdp_buff.c 2005-09-08 15:06:20.000000000 +0300
+++ linux-2.6.13/drivers/infiniband/ulp/sdp/sdp_buff.c 2005-09-08 16:03:16.000000000 +0300
@@ -35,7 +35,7 @@
#include "sdp_main.h"
-static struct sdpc_buff_root *main_pool = NULL;
+static struct sdpc_buff_root main_pool;
/*
* data buffers managment API
*/
@@ -305,194 +305,78 @@ void sdp_buff_q_clear_unmap(struct sdpc_
/*
* sdp_buff_pool_release - release allocated buffers from the main pool
*/
-static void sdp_buff_pool_release(struct sdpc_buff_root *m_pool, int count)
+static void sdp_buff_pool_release(struct sdpc_buff *buff)
{
- struct sdpc_buff *buff;
-
- /*
- * Release count buffers.
- */
- while (count--) {
- buff = sdp_buff_q_get(&m_pool->pool);
- if (!buff)
- break;
- /*
- * decrement global buffer count, free buffer page, and free
- * buffer descriptor.
- */
- m_pool->buff_cur--;
- free_page((unsigned long)buff->head);
- kmem_cache_free(m_pool->buff_cache, buff);
- }
-}
-
-/*
- * sdp_buff_pool_release_check - check for buffer release from main pool
- */
-static inline void sdp_buff_pool_release_check(struct sdpc_buff_root *m_pool)
-{
- /*
- * If there are more then minimum buffers outstanding, free half of
- * the available buffers.
- */
- if (m_pool->buff_cur > m_pool->buff_min &&
- m_pool->pool.size > m_pool->free_mark) {
- int count;
- /*
- * Always leave at least minimum buffers, otherwise remove
- * either half of the pool, which is more then the mark
- */
- count = min(m_pool->buff_cur - m_pool->buff_min,
- m_pool->free_mark/2);
-
- sdp_buff_pool_release(m_pool, count);
- }
+ kmem_cache_free(main_pool.pool_cache, buff->head);
+ kmem_cache_free(main_pool.buff_cache, buff);
}
/*
* sdp_buff_pool_alloc - allocate more buffers for the main pool
*/
-static int sdp_buff_pool_alloc(struct sdpc_buff_root *m_pool)
+static struct sdpc_buff *sdp_buff_pool_alloc(void)
{
struct sdpc_buff *buff;
- int total;
- /*
- * Calculate the total number of buffers.
- */
- total = max(m_pool->buff_min, m_pool->buff_cur + m_pool->alloc_inc);
- total = min(total, m_pool->buff_max);
-
- while (total > m_pool->buff_cur) {
- /*
- * allocate a buffer descriptor, buffer, and then add it to
- * the pool.
- */
- buff = kmem_cache_alloc(m_pool->buff_cache, GFP_ATOMIC);
- if (!buff) {
- sdp_warn("Failed to allocate buffer. <%d:%d>",
- total, m_pool->buff_cur);
- break;
- }
-
- buff->head = (void *)__get_free_page(GFP_ATOMIC);
- if (!buff->head) {
- sdp_warn("Failed to allocate buffer page. <%d:%d>",
- total, m_pool->buff_cur);
-
- kmem_cache_free(m_pool->buff_cache, buff);
- break;
- }
-
- buff->end = buff->head + PAGE_SIZE;
- buff->data = buff->head;
- buff->tail = buff->head;
- buff->sge.lkey = 0;
- buff->sge.addr = 0;
- buff->sge.length = 0;
- buff->pool = NULL;
- buff->type = SDP_DESC_TYPE_BUFF;
- buff->release = sdp_buff_pool_put;
-
- sdp_buff_q_put(&m_pool->pool, buff);
-
- m_pool->buff_cur++;
+ buff = kmem_cache_alloc(main_pool.buff_cache, GFP_ATOMIC);
+ if (!buff) {
+ sdp_warn("Failed to allocate buffer.");
+ return NULL;
}
- if (!main_pool->pool.head) {
- sdp_warn("Failed to allocate any buffers. <%d:%d:%d>",
- total, m_pool->buff_cur, m_pool->alloc_inc);
-
- return -ENOMEM;
+ buff->head = kmem_cache_alloc(main_pool.pool_cache, GFP_ATOMIC);
+ if (!buff->head) {
+ sdp_warn("Failed to allocate buffer page");
+ kmem_cache_free(main_pool.buff_cache, buff);
+ return NULL;
}
- return 0;
+ buff->end = buff->head + PAGE_SIZE;
+ buff->data = buff->head;
+ buff->tail = buff->head;
+ buff->sge.lkey = 0;
+ buff->sge.addr = 0;
+ buff->sge.length = 0;
+ buff->pool = NULL;
+ buff->type = SDP_DESC_TYPE_BUFF;
+ buff->release = sdp_buff_pool_put;
+ return buff;
}
/*
* sdp_buff_pool_init - Initialize the main buffer pool of memory
*/
-int sdp_buff_pool_init(int buff_min, int buff_max, int alloc_inc, int free_mark)
+int sdp_buff_pool_init(void)
{
int result;
- if (main_pool) {
- sdp_warn("Main pool already initialized!");
- return -EEXIST;
- }
-
- if (buff_min <= 0 ||
- alloc_inc <= 0 ||
- free_mark <= 0 ||
- buff_max < buff_min) {
-
- sdp_warn("Pool allocation count error. <%d:%d:%d:%d>",
- buff_min, buff_max, alloc_inc, free_mark);
- return -ERANGE;
- }
- /*
- * allocate the main pool structures
- */
- main_pool = kmalloc(sizeof(struct sdpc_buff_root), GFP_KERNEL);
- if (!main_pool) {
- sdp_warn("Main pool initialization failed.");
- result = -ENOMEM;
- goto done;
- }
-
- memset(main_pool, 0, sizeof(struct sdpc_buff_root));
-
- main_pool->buff_size = PAGE_SIZE;
- main_pool->buff_min = buff_min;
- main_pool->buff_max = buff_max;
- main_pool->alloc_inc = alloc_inc;
- main_pool->free_mark = free_mark;
-
- spin_lock_init(&main_pool->lock);
- sdp_buff_q_init(&main_pool->pool);
-
- main_pool->pool_cache = kmem_cache_create("sdp_buff_pool",
- sizeof(struct sdpc_buff_q),
- 0, SLAB_HWCACHE_ALIGN,
+ main_pool.pool_cache = kmem_cache_create("sdp_buff_pool",
+ PAGE_SIZE,
+ 0, 0,
NULL, NULL);
- if (!main_pool->pool_cache) {
+ if (!main_pool.pool_cache) {
sdp_warn("Failed to allocate pool cache.");
result = -ENOMEM;
goto error_pool;
}
- main_pool->buff_cache = kmem_cache_create("sdp_buff_desc",
+ main_pool.buff_cache = kmem_cache_create("sdp_buff_desc",
sizeof(struct sdpc_buff),
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
- if (!main_pool->buff_cache) {
+ if (!main_pool.buff_cache) {
sdp_warn("Failed to allocate buffer cache.");
result = -ENOMEM;
goto error_buff;
}
- /*
- * allocate the minimum number of buffers.
- */
- result = sdp_buff_pool_alloc(main_pool);
- if (result < 0) {
- sdp_warn("Error <%d> allocating buffers. <%d>",
- result, buff_min);
- goto error_alloc;
- }
- /*
- * done
- */
sdp_dbg_init("Main pool initialized with min:max <%d:%d> buffers.",
buff_min, buff_max);
- return 0; /* success */
-error_alloc:
- kmem_cache_destroy(main_pool->buff_cache);
+ return 0;
+
+ kmem_cache_destroy(main_pool.buff_cache);
error_buff:
- kmem_cache_destroy(main_pool->pool_cache);
+ kmem_cache_destroy(main_pool.pool_cache);
error_pool:
- kfree(main_pool);
-done:
- main_pool = NULL;
return result;
}
@@ -501,33 +385,8 @@ done:
*/
void sdp_buff_pool_destroy(void)
{
- if (!main_pool) {
- sdp_warn("Main pool dosn't exist.");
- return;
- }
- /*
- * Free all the buffers.
- */
- sdp_buff_pool_release(main_pool, main_pool->buff_cur);
- /*
- * Sanity check that the current number of buffers was released.
- */
- if (main_pool->buff_cur)
- sdp_warn("Leaking buffers during cleanup. <%d>",
- main_pool->buff_cur);
- /*
- * free pool cache
- */
- kmem_cache_destroy(main_pool->pool_cache);
- kmem_cache_destroy(main_pool->buff_cache);
- /*
- * free main
- */
- kfree(main_pool);
- main_pool = NULL;
- /*
- * done
- */
+ kmem_cache_destroy(main_pool.pool_cache);
+ kmem_cache_destroy(main_pool.buff_cache);
sdp_dbg_init("Main pool destroyed.");
}
@@ -537,37 +396,10 @@ void sdp_buff_pool_destroy(void)
struct sdpc_buff *sdp_buff_pool_get(void)
{
struct sdpc_buff *buff;
- int result;
- unsigned long flags;
-
- /*
- * get buffer
- */
- spin_lock_irqsave(&main_pool->lock, flags);
- if (!main_pool->pool.head) {
- result = sdp_buff_pool_alloc(main_pool);
- if (result < 0) {
- sdp_warn("Error <%d> allocating buffers.", result);
- spin_unlock_irqrestore(&main_pool->lock, flags);
- return NULL;
- }
- }
-
- buff = main_pool->pool.head;
-
- if (buff->next == buff)
- main_pool->pool.head = NULL;
- else {
- buff->next->prev = buff->prev;
- buff->prev->next = buff->next;
-
- main_pool->pool.head = buff->next;
- }
-
- main_pool->pool.size--;
-
- spin_unlock_irqrestore(&main_pool->lock, flags);
+ buff = sdp_buff_pool_alloc();
+ if (!buff)
+ return NULL;
buff->next = NULL;
buff->prev = NULL;
@@ -590,39 +422,7 @@ struct sdpc_buff *sdp_buff_pool_get(void
*/
void sdp_buff_pool_put(struct sdpc_buff *buff)
{
- unsigned long flags;
-
- if (!buff)
- return;
-
- BUG_ON(buff->pool);
- BUG_ON(buff->next || buff->prev);
- /*
- * reset pointers
- */
- buff->data = buff->head;
- buff->tail = buff->head;
- buff->pool = &main_pool->pool;
-
- spin_lock_irqsave(&main_pool->lock, flags);
-
- if (!main_pool->pool.head) {
- buff->next = buff;
- buff->prev = buff;
- main_pool->pool.head = buff;
- } else {
- buff->next = main_pool->pool.head;
- buff->prev = main_pool->pool.head->prev;
-
- buff->next->prev = buff;
- buff->prev->next = buff;
- }
-
- main_pool->pool.size++;
-
- sdp_buff_pool_release_check(main_pool);
-
- spin_unlock_irqrestore(&main_pool->lock, flags);
+ sdp_buff_pool_release(buff);
}
/*
@@ -630,20 +430,7 @@ void sdp_buff_pool_put(struct sdpc_buff
*/
void sdp_buff_pool_chain_link(struct sdpc_buff *head, struct sdpc_buff *buff)
{
- buff->data = buff->head;
- buff->tail = buff->head;
- buff->pool = &main_pool->pool;
-
- if (!head) {
- buff->next = buff;
- buff->prev = buff;
- } else {
- buff->next = head;
- buff->prev = head->prev;
-
- buff->next->prev = buff;
- buff->prev->next = buff;
- }
+ sdp_buff_pool_release(buff);
}
/*
@@ -651,38 +438,6 @@ void sdp_buff_pool_chain_link(struct sdp
*/
void sdp_buff_pool_chain_put(struct sdpc_buff *buff, u32 count)
{
- unsigned long flags;
- struct sdpc_buff *next;
- struct sdpc_buff *prev;
- /*
- * return an entire Link of buffers to the queue, this save on
- * lock contention for the buffer pool, for code paths where
- * a number of buffers are processed in a loop, before being
- * returned. (e.g. send completions, recv to userspace.
- */
- if (!buff || count <= 0)
- return;
-
- spin_lock_irqsave(&main_pool->lock, flags);
-
- if (!main_pool->pool.head)
- main_pool->pool.head = buff;
- else {
- prev = buff->prev;
- next = main_pool->pool.head->next;
-
- buff->prev = main_pool->pool.head;
- main_pool->pool.head->next = buff;
-
- prev->next = next;
- next->prev = prev;
- }
-
- main_pool->pool.size += count;
-
- sdp_buff_pool_release_check(main_pool);
-
- spin_unlock_irqrestore(&main_pool->lock, flags);
}
/*
@@ -690,62 +445,5 @@ void sdp_buff_pool_chain_put(struct sdpc
*/
int sdp_buff_pool_buff_size(void)
{
- int result;
-
- if (!main_pool)
- result = -1;
- else
- result = main_pool->buff_size;
-
- return result;
-}
-
-/*
- * sdp_proc_dump_buff_pool - write the buffer pool stats to a file (/proc)
- */
-int sdp_proc_dump_buff_pool(char *buffer, int max_size, off_t start_index,
- long *end_index)
-{
- unsigned long flags;
- int offset = 0;
-
- /*
- * simple table read, without page boundry handling.
- */
- *end_index = 0;
- /*
- * lock the table
- */
- spin_lock_irqsave(&main_pool->lock, flags);
-
- if (!start_index) {
- offset += sprintf(buffer + offset,
- " buffer size: %8d\n",
- main_pool->buff_size);
- offset += sprintf(buffer + offset,
- " buffers maximum: %8d\n",
- main_pool->buff_max);
- offset += sprintf(buffer + offset,
- " buffers minimum: %8d\n",
- main_pool->buff_min);
- offset += sprintf(buffer + offset,
- " buffers increment: %8d\n",
- main_pool->alloc_inc);
- offset += sprintf(buffer + offset,
- " buffers decrement: %8d\n",
- main_pool->free_mark);
- offset += sprintf(buffer + offset,
- " buffers allocated: %8d\n",
- main_pool->buff_cur);
- offset += sprintf(buffer + offset,
- " buffers available: %8d\n",
- main_pool->pool.size);
- offset += sprintf(buffer + offset,
- " buffers outstanding: %8d\n",
- main_pool->buff_cur - main_pool->pool.size);
- }
-
- spin_unlock_irqrestore(&main_pool->lock, flags);
-
- return offset;
+ return PAGE_SIZE;
}
Index: linux-2.6.13/drivers/infiniband/ulp/sdp/sdp_proc.c
===================================================================
--- linux-2.6.13.orig/drivers/infiniband/ulp/sdp/sdp_proc.c 2005-09-08 15:06:20.000000000 +0300
+++ linux-2.6.13/drivers/infiniband/ulp/sdp/sdp_proc.c 2005-09-08 15:57:45.000000000 +0300
@@ -85,12 +85,6 @@ static int sdp_proc_read_parse(char *pag
static struct sdpc_proc_ent file_entry_list[SDP_PROC_ENTRIES] = {
{
.entry = NULL,
- .type = SDP_PROC_ENTRY_MAIN_BUFF,
- .name = "buffer_pool",
- .read = sdp_proc_dump_buff_pool,
- },
- {
- .entry = NULL,
.type = SDP_PROC_ENTRY_MAIN_CONN,
.name = "conn_main",
.read = sdp_proc_dump_conn_main,
Index: linux-2.6.13/drivers/infiniband/ulp/sdp/sdp_proc.h
===================================================================
--- linux-2.6.13.orig/drivers/infiniband/ulp/sdp/sdp_proc.h 2005-09-08 15:06:20.000000000 +0300
+++ linux-2.6.13/drivers/infiniband/ulp/sdp/sdp_proc.h 2005-09-08 15:57:45.000000000 +0300
@@ -48,12 +48,11 @@
* proc filesystem framework table/file entries
*/
enum sdp_proc_ent_list {
- SDP_PROC_ENTRY_MAIN_BUFF = 0, /* buffer pool */
- SDP_PROC_ENTRY_MAIN_CONN = 1, /* connection table */
- SDP_PROC_ENTRY_DATA_CONN = 2, /* connection table */
- SDP_PROC_ENTRY_RDMA_CONN = 3, /* connection table */
- SDP_PROC_ENTRY_OPT_CONN = 4, /* socket option table */
- SDP_PROC_ENTRY_ROOT_TABLE = 5, /* device table */
+ SDP_PROC_ENTRY_MAIN_CONN = 0, /* connection table */
+ SDP_PROC_ENTRY_DATA_CONN = 1, /* connection table */
+ SDP_PROC_ENTRY_RDMA_CONN = 2, /* connection table */
+ SDP_PROC_ENTRY_OPT_CONN = 3, /* socket option table */
+ SDP_PROC_ENTRY_ROOT_TABLE = 4, /* device table */
SDP_PROC_ENTRIES /* number of entries in framework */
};
Index: linux-2.6.13/drivers/infiniband/ulp/sdp/sdp_proto.h
===================================================================
--- linux-2.6.13.orig/drivers/infiniband/ulp/sdp/sdp_proto.h 2005-09-08 15:57:21.000000000 +0300
+++ linux-2.6.13/drivers/infiniband/ulp/sdp/sdp_proto.h 2005-09-08 16:02:50.000000000 +0300
@@ -93,18 +93,10 @@ struct sdpc_buff *sdp_buff_q_fetch(struc
void *arg),
void *usr_arg);
-int sdp_buff_pool_init(int buff_min,
- int buff_max,
- int alloc_inc,
- int free_mark);
+int sdp_buff_pool_init(void);
void sdp_buff_pool_destroy(void);
-int sdp_proc_dump_buff_pool(char *buffer,
- int max_size,
- off_t start_index,
- long *end_index);
-
/*
* Wall between userspace protocol and SDP protocol proper
*/
Index: linux-2.6.13/drivers/infiniband/ulp/sdp/sdp_dev.h
===================================================================
--- linux-2.6.13.orig/drivers/infiniband/ulp/sdp/sdp_dev.h 2005-09-08 16:01:48.000000000 +0300
+++ linux-2.6.13/drivers/infiniband/ulp/sdp/sdp_dev.h 2005-09-08 16:02:06.000000000 +0300
@@ -114,13 +114,6 @@
#define SDP_SEND_POST_SLOW 0x01
#define SDP_SEND_POST_COUNT 0x0A
/*
- * Buffer pool initialization defaul values.
- */
-#define SDP_BUFF_POOL_COUNT_MIN 1024
-#define SDP_BUFF_POOL_COUNT_MAX 1048576
-#define SDP_BUFF_POOL_COUNT_INC 128
-#define SDP_BUFF_POOL_FREE_MARK 1024
-/*
* SDP experimental parameters.
*/
Index: linux-2.6.13/drivers/infiniband/ulp/sdp/sdp_inet.c
===================================================================
--- linux-2.6.13.orig/drivers/infiniband/ulp/sdp/sdp_inet.c 2005-09-08 15:59:32.000000000 +0300
+++ linux-2.6.13/drivers/infiniband/ulp/sdp/sdp_inet.c 2005-09-08 16:00:28.000000000 +0300
@@ -42,10 +42,6 @@
* list of connections waiting for an incoming connection
*/
static int proto_family = AF_INET_SDP;
-static int buff_min = SDP_BUFF_POOL_COUNT_MIN;
-static int buff_max = SDP_BUFF_POOL_COUNT_MAX;
-static int alloc_inc = SDP_BUFF_POOL_COUNT_INC;
-static int free_mark = SDP_BUFF_POOL_FREE_MARK;
static int conn_size = SDP_DEV_SK_LIST_SIZE;
static int recv_post_max = SDP_CQ_RECV_SIZE;
@@ -64,14 +60,6 @@ module_param(proto_family, int, 0);
MODULE_PARM_DESC(proto_family,
"Override the default protocol family value of 27.");
-module_param(buff_min, int, 0);
-MODULE_PARM_DESC(buff_min,
- "Set the minimum number of buffers to allocate.");
-
-module_param(buff_max, int, 0);
-MODULE_PARM_DESC(buff_max,
- "Set the maximum number of buffers to allocate.");
-
module_param(conn_size, int, 0);
MODULE_PARM_DESC(conn_size,
"Set the maximum number of active sockets.");
@@ -1366,10 +1354,7 @@ static int __init sdp_init(void)
/*
* buffer memory
*/
- result = sdp_buff_pool_init(buff_min,
- buff_max,
- alloc_inc,
- free_mark);
+ result = sdp_buff_pool_init();
if (result < 0) {
sdp_warn("Error <%d> initializing buffer pool.", result);
goto error_buff;
More information about the general
mailing list