[ofw] [Patch][IPoIB_NDIS6_CM] Recv shutter fix when init process fails
Fab Tillier
ftillier at microsoft.com
Wed Nov 10 09:20:44 PST 2010
Seems like you included the wrong patch?
From: ofw-bounces at lists.openfabrics.org [mailto:ofw-bounces at lists.openfabrics.org] On Behalf Of Alex Naslednikov
Sent: Wednesday, November 10, 2010 7:11 AM
To: ofw at lists.openfabrics.org
Subject: [ofw] [Patch][IPoIB_NDIS6_CM] Recv shutter fix when init process fails
This patch fix the situation when init process fails (say, __ib_mgr_init fails) and then ipoib_restart is called.
Shutter is shut first time at the end of the init process. Thus, if init will fail, shutter will be never shut,
while ipoib_restart will try to alive such shutter.
Signed-off by: Alexander Naslednikov (xalex at mellanox.co.il)
Index: ulp/ipoib_NDIS6_CM/kernel/ipoib_port.cpp
===================================================================
--- ulp/ipoib_NDIS6_CM/kernel/ipoib_port.cpp (revision 2986)
+++ ulp/ipoib_NDIS6_CM/kernel/ipoib_port.cpp (working copy)
@@ -1110,10 +1110,23 @@
qp_create.rq_sge = 2; /* To support buffers spanning pages. */
qp_create.h_rq_cq = p_port->ib_mgr.h_recv_cq;
qp_create.sq_depth = p_port->p_adapter->params.sq_depth;
-
+
+ // p_ca_attrs->max_sges contains the maximum number of SG elements
+ // available by HW. 3 of the them are reserved for an internal use
+ // Thus, the maximum of SGE for UD QP is limited by (p_ca_attrs->max_sges - 3)
#define UD_QP_USED_SGE 3
- qp_create.sq_sge = MAX_SEND_SGE < p_port->p_ca_attrs->max_sges ?
- MAX_SEND_SGE : ( p_port->p_ca_attrs->max_sges - UD_QP_USED_SGE );
+ if ( p_port->p_ca_attrs->max_sges > MAX_SEND_SGE )
+ {
+ p_port->max_sq_sge_supported = MAX_SEND_SGE - UD_QP_USED_SGE;
+ }
+ else
+ {
+ p_port->max_sq_sge_supported = p_port->p_ca_attrs->max_sges - UD_QP_USED_SGE;
+ }
+
+ p_port->p_ca_attrs->max_sges -= UD_QP_USED_SGE;
+ qp_create.sq_sge = p_port->max_sq_sge_supported;
+
if ( !p_port->p_ca_attrs->ipoib_csum )
{
/* checksum is not supported by device
@@ -1162,6 +1175,11 @@
p_port->p_adapter->p_ifc->get_err_str( status )) );
return status;
}
+ if ( qp_attr.sq_sge < qp_create.sq_sge)
+ {
+ ASSERT ( FALSE );
+ p_port->max_sq_sge_supported = qp_attr.sq_sge;
+ }
p_port->ib_mgr.qpn = qp_attr.num;
/* Register all of physical memory */
@@ -3799,7 +3817,7 @@
}
/* Remember that one of the DS entries is reserved for the IPoIB header. */
- if( num_pages >= MAX_SEND_SGE )
+ if( num_pages >= p_port->max_sq_sge_supported )
{
IPOIB_PRINT(TRACE_LEVEL_INFORMATION, IPOIB_DBG_SEND,
("Too many buffers to fit in WR ds_array. Copying data.\n") );
@@ -4270,13 +4288,14 @@
Copy only N+1-MAX_SEND_SGE, where N is a lenght of SG List
Remember that one of the DS entries is reserved for the IPoIB header.
*/
- if( ( p_sgl->NumberOfElements >= MAX_SEND_SGE ||
+ if( ( p_sgl->NumberOfElements >= s_buf->p_port->max_sq_sge_supported ||
p_sgl->Elements[0].Length < sizeof(eth_hdr_t)) )
{
IPOIB_PRINT( TRACE_LEVEL_WARNING, IPOIB_DBG_SEND,
("Too many buffers %d to fit in WR ds_array[%d] \
Or buffer[0] length %d < Eth header. Copying data.\n",
- p_sgl->NumberOfElements, MAX_SEND_SGE, p_sgl->Elements[0].Length ) );
+ p_sgl->NumberOfElements, s_buf->p_port->max_sq_sge_supported ,
+ p_sgl->Elements[0].Length ) );
if( !s_buf->p_port->p_adapter->params.cm_enabled )
{
Index: ulp/ipoib_NDIS6_CM/kernel/ipoib_port.h
===================================================================
--- ulp/ipoib_NDIS6_CM/kernel/ipoib_port.h (revision 2982)
+++ ulp/ipoib_NDIS6_CM/kernel/ipoib_port.h (working copy)
@@ -590,6 +590,7 @@
struct _ipoib_adapter *p_adapter;
uint8_t port_num;
+ uint32_t max_sq_sge_supported;
KEVENT sa_event;
@@ -653,6 +654,9 @@
* port_num
* Port number of this adapter.
*
+* max_sq_sge_supported
+* Maximum number of send QP SG elements supported by HW
+*
* ib_mgr
* IB resource manager.
*
Alexander (XaleX) Naslednikov
SW Networking Team
Mellanox Technologies
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.openfabrics.org/pipermail/ofw/attachments/20101110/e10bc5a3/attachment.html>
More information about the ofw
mailing list