[Openib-windows] RE: [PATCH] query-cq

Yossi Leybovich sleybo at mellanox.co.il
Wed Sep 7 04:59:21 PDT 2005



> -----Original Message-----
> From: Fab Tillier [mailto:ftillier at silverstorm.com] 
> Sent: Tuesday, September 06, 2005 10:41 PM
> To: 'Yossi Leybovich'
> Cc: openib-windows at openib.org; Ziv Farjun
> Subject: RE: [PATCH] query-cq
> 
> 
> > From: Yossi Leybovich [mailto:sleybo at mellanox.co.il]
> > Sent: Tuesday, September 06, 2005 7:16 AM
> > 
> > Hello Fab,
> >
> > Attached is a patch for query cq, the patch also includes 
> few checks 
> > to the alts test. Our verification team found that the 
> return size in 
> > query_cq is different from the size that was return in 
> create_cq VERB.
> > The reason is that when creating cq  the THH layer allocate 
> some spare cqes
> > (for DB coalsing) and return the size that was requested by the user
> > But the query_cq IOCTL use the kernel tables for the size 
> which use the actual
> > size (usually a bigger size).
> 
> Isn't the bigger size the correct one to return?  This seems 
> to be what current verb implementations do, and what the IB 
> spec suggests.  If it isn't, don't kernel clients end up 
> getting the wrong value when they query their CQ size?

The size that was requested is the size that should be return by the
query\create (both in kernel and driver),
This enable the driver to use the DB caolsing mechanism.
Both Kernel/User create_cq should return this size:
 
*num_o_cqes_p= (1U << new_cq->cur_buf.log2_num_o_cqes) - 1 -
 	new_cq->cur_buf.spare_cqes;

You can see the poll4wc how we use the spare cqes to save DBs

> 
> The value should be the same as what was returned in the call 
> to create_cq, which is 
> (hw\mt23108\vapi\Hca\hcahal\tavor\thhul_cqm\thhul_cqm.c at 890):
> 
> *num_o_cqes_p= (1U << new_cq->cur_buf.log2_num_o_cqes) - 1 -
> 	new_cq->cur_buf.spare_cqes;
> 
> If the kernel clients get fixed, then the user-mode clients 
> automatically get the right value and we only need to update 
> the ALTS test.

The issue is that for UL CQ the kernel get the final size 
so the query shouldn't move to kernel but finish in the user level (the
kernel does not know about the spare cqes)

I attached patch that return the correct size also in CQs that were created
in kernel.(+ the previous changes)

I didn't manage to test it because I could not make alts work in kernel (
BTW Is alts work at kernel level ? How ? )

Thanks
Yossi 


Singed-off-by: Yossi Leybovich (sleyboMellanox.co.il)

Index: core/al/user/ual_cq.c
===================================================================
--- core/al/user/ual_cq.c	(revision 351)
+++ core/al/user/ual_cq.c	(working copy)
@@ -306,13 +306,24 @@
 	{
 		/* Pre call to the UVP library */
 		status = uvp_intf.pre_query_cq( h_cq->h_ci_cq,
&cq_ioctl.in.umv_buf );
-		if( status != IB_SUCCESS )
+		if( status == IB_VERBS_PROCESSING_DONE )
 		{
+			/* Creation is done entirely in user mode.  Issue
the post call */
+			if( uvp_intf.post_query_cq)
+			{
+				uvp_intf.post_query_cq( h_cq->h_ci_cq,
+					IB_SUCCESS, p_size,
&cq_ioctl.out.umv_buf );
+			}
 			AL_EXIT( AL_DBG_CQ );
+			return IB_SUCCESS;
+		}
+		else if( status != IB_SUCCESS )
+		{
+			AL_EXIT( AL_DBG_CQ );
 			return status;
 		}
 	}
-
+		
 	cq_ioctl.in.h_cq = h_cq->obj.hdl;
 
 	cl_status = do_al_dev_ioctl( UAL_QUERY_CQ,
@@ -329,17 +340,18 @@
 	else
 	{
 		status = cq_ioctl.out.status;
-		if( status == IB_SUCCESS )
-			*p_size = cq_ioctl.out.size;
 	}
 
 	/* Post uvp call */
 	if( h_cq->h_ci_cq && uvp_intf.post_query_cq )
 	{
 		uvp_intf.post_query_cq( h_cq->h_ci_cq,
-			status, cq_ioctl.out.size, &cq_ioctl.out.umv_buf );
+			status, &cq_ioctl.out.size, &cq_ioctl.out.umv_buf );
 	}
 
+	if( status == IB_SUCCESS )
+		*p_size = cq_ioctl.out.size;
+	
 	AL_EXIT( AL_DBG_CQ );
 	return status;
 }
Index: hw/mt23108/kernel/hca_verbs.c
===================================================================
--- hw/mt23108/kernel/hca_verbs.c	(revision 351)
+++ hw/mt23108/kernel/hca_verbs.c	(working copy)
@@ -2119,7 +2119,7 @@
 	ib_api_status_t		status;
 
 	u_int32_t			hca_idx = CQ_HCA_FROM_HNDL(h_cq);
-	u_int32_t			cq_num  = CQ_NUM_FROM_HNDL(h_cq);
+	u_int32_t			cq_num  = CQ_NUM_FROM_HNDL(h_cq);

 	u_int32_t			cq_idx;
 	mlnx_hobul_t		*hobul_p;
 	HHUL_cq_hndl_t		hhul_cq_hndl;
@@ -2130,25 +2130,26 @@
 		status = IB_INVALID_PARAMETER;
 		goto cleanup;
 	}
+
 	VALIDATE_INDEX(hca_idx,   MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE,
cleanup);
 	hobul_p = mlnx_hobul_array[hca_idx];
 	if (NULL == hobul_p) {
 		status = IB_INVALID_CQ_HANDLE;
 		goto cleanup;
 	}
-
+	
 	cq_idx = cq_num & hobul_p->cq_idx_mask;
 	VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE,
cleanup);
 	if ( E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) {
 		status =  IB_INVALID_CQ_HANDLE;
 		goto cleanup;
-	}
+	}	
 
 	cl_mutex_acquire(&hobul_p->cq_info_tbl[cq_idx].mutex);
 
 	hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl;
-
-	if (HH_OK != THH_hob_query_cq(hobul_p->hh_hndl, cq_num, p_size)) {
+	if (HH_OK !=  THHUL_cqm_query_cq(hobul_p->hhul_hndl, hhul_cq_hndl,
p_size)){
+//	if (HH_OK != THH_hob_query_cq(hobul_p->hh_hndl, cq_num, p_size)) {
 		status = IB_ERROR;
 		goto cleanup_locked;
 	}
Index: hw/mt23108/user/mlnx_ual_cq.c
===================================================================
--- hw/mt23108/user/mlnx_ual_cq.c	(revision 351)
+++ hw/mt23108/user/mlnx_ual_cq.c	(working copy)
@@ -48,8 +48,8 @@
     p_uvp->pre_create_cq  = mlnx_pre_create_cq;
     p_uvp->post_create_cq = mlnx_post_create_cq;
   
-    p_uvp->pre_query_cq  = NULL;
-    p_uvp->post_query_cq = NULL;
+    p_uvp->pre_query_cq  = mlnx_pre_query_cq;
+    p_uvp->post_query_cq = mlnx_post_query_cq;
 
     p_uvp->pre_resize_cq  = mlnx_pre_resize_cq;
     p_uvp->post_resize_cq = mlnx_post_resize_cq;
@@ -441,22 +441,37 @@
     CL_ASSERT(p_umv_buf);
     p_umv_buf->command = TRUE;
     FUNC_EXIT;
-    return IB_SUCCESS;
+    return IB_VERBS_PROCESSING_DONE;
 }
 
 
 void
 mlnx_post_query_cq (
-    IN		const ib_cq_handle_t		h_uvp_cq,
-    IN		ib_api_status_t			ioctl_status,
-    IN OUT	ci_umv_buf_t			*p_umv_buf)
+    IN		const 	ib_cq_handle_t		h_uvp_cq,
+    IN				ib_api_status_t		ioctl_status,
+    IN OUT			uint32_t*  const 		p_size,
+    IN 				ci_umv_buf_t
*p_umv_buf)
 {
+    ib_api_status_t status;
+    mlnx_ual_cq_info_t *p_cq_info = (mlnx_ual_cq_info_t *)((void*)
h_uvp_cq);
+
     FUNC_ENTER;
+    CL_ASSERT(p_umv_buf);
+    CL_ASSERT(p_cq_info);
+
+    status = ioctl_status;
+    if (status == IB_SUCCESS)
+    {
+    	*p_size = p_cq_info->cq_size;
+
+        status = IB_VERBS_PROCESSING_DONE;
+    }
+    
     FUNC_EXIT;
-    return;
+    return;	
 }
+   
 
-
 ib_api_status_t
 mlnx_pre_destroy_cq (
     IN		const ib_cq_handle_t			h_uvp_cq)
Index: hw/mt23108/user/mlnx_ual_main.h
===================================================================
--- hw/mt23108/user/mlnx_ual_main.h	(revision 351)
+++ hw/mt23108/user/mlnx_ual_main.h	(working copy)
@@ -257,6 +257,7 @@
 mlnx_post_query_cq (
     IN		const ib_cq_handle_t		h_uvp_cq,
     IN		ib_api_status_t				ioctl_status,
+    IN OUT	uint32_t* 			const p_size,
     IN OUT	ci_umv_buf_t				*p_umv_buf);
 
 ib_api_status_t  
Index: hw/mt23108/vapi/Hca/hcahal/hhul.h
===================================================================
--- hw/mt23108/vapi/Hca/hcahal/hhul.h	(revision 351)
+++ hw/mt23108/vapi/Hca/hcahal/hhul.h	(working copy)
@@ -229,6 +229,10 @@
                               VAPI_cqe_num_t   cqe_num);
 
 
+  HH_ret_t  (*HHULIF_query_cq)(HHUL_hca_hndl_t  hca_hndl,
+                              HHUL_cq_hndl_t   cq,
+                              VAPI_cqe_num_t*  num_o_cqes_p);
+
   HH_ret_t  (*HHULIF_req_comp_notif)(HHUL_hca_hndl_t       hca_hndl,
                                      HHUL_cq_hndl_t        cq,
                                      VAPI_cq_notif_type_t  notif_type);
Index: hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.c
===================================================================
--- hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.c	(revision
351)
+++ hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.c	(working
copy)
@@ -1540,14 +1540,14 @@
 THHUL_cqm_count_cqe( 
 	IN				HHUL_hca_hndl_t
hca_hndl,
 	IN				HHUL_cq_hndl_t
cq,
-		OUT			uint32_t* const
p_n_cqes )
+	OUT				uint32_t* const
p_n_cqes )
 {
   THHUL_cq_t *cq_p= (THHUL_cq_t*)cq;
   VAPI_cqe_num_t	cqe_num;
 
   /* parameters checks */
   if (cq_p == NULL) {
-    MTL_ERROR1("THHUL_cqm_peek_cq: NULL CQ handle.\n");
+    MTL_ERROR1("THHUL_cqm_count_cqe: NULL CQ handle.\n");
     return IB_INVALID_CQ_HANDLE;
   }
 
@@ -1608,6 +1608,31 @@
 }
 
 
+
+HH_ret_t THHUL_cqm_query_cq( 
+  /*IN*/ HHUL_hca_hndl_t hca_hndl, 
+  /*IN*/ HHUL_cq_hndl_t cq, 
+  /*OUT*/ VAPI_cqe_num_t  *num_o_cqes_p)
+{
+  THHUL_cq_t *cq_p= (THHUL_cq_t*)cq;
+  HH_ret_t ret=HH_OK;
+
+
+  /* parameters checks */
+  if (cq_p == NULL) {
+    MTL_ERROR1("THHUL_cqm_query_cq: NULL CQ handle.\n");
+    return HH_EINVAL_CQ_HNDL;
+  }
+
+  /* Find CQE and check ownership */
+  MOSAL_spinlock_dpc_lock(&(cq_p->cq_lock));
+    *num_o_cqes_p=   ((1U << cq_p->cur_buf.log2_num_o_cqes) -1 -
cq_p->cur_buf.spare_cqes) ;
+  
+  MOSAL_spinlock_unlock(&(cq_p->cq_lock));    
+
+  return ret; 
+}
+
 static void  rearm_cq(THHUL_cq_t *cq_p, MT_bool solicitedNotification) {
         volatile u_int32_t chimeWords[2];
         THH_uar_t uar = cq_p->uar;
Index: hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.h
===================================================================
--- hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.h	(revision
351)
+++ hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.h	(working
copy)
@@ -107,9 +107,15 @@
 THHUL_cqm_count_cqe( 
 	IN				HHUL_hca_hndl_t
hca_hndl,
 	IN				HHUL_cq_hndl_t
cq,
-		OUT			uint32_t* const
p_n_cqes );
+	OUT			uint32_t* const
p_n_cqes );
 #endif
 
+DLL_API HH_ret_t THHUL_cqm_query_cq( 
+  /*IN*/ HHUL_hca_hndl_t hca_hndl, 
+  /*IN*/ HHUL_cq_hndl_t cq, 
+/*OUT*/ VAPI_cqe_num_t  *num_o_cqes_p
+);
+
 DLL_API HH_ret_t THHUL_cqm_peek_cq( 
   /*IN*/ HHUL_hca_hndl_t hca_hndl, 
   /*IN*/ HHUL_cq_hndl_t cq, 
Index: hw/mt23108/vapi/Hca/hcahal/tavor/thhul_hob/thhul_hob.c
===================================================================
--- hw/mt23108/vapi/Hca/hcahal/tavor/thhul_hob/thhul_hob.c	(revision
351)
+++ hw/mt23108/vapi/Hca/hcahal/tavor/thhul_hob/thhul_hob.c	(working
copy)
@@ -86,6 +86,7 @@
   NULL        /* HHULIF_poll4cqe          */,
   NULL /* HHULIF_poll_and_rearm_cq */,  
   THHUL_cqm_peek_cq         /* HHULIF_peek_cq           */,
+  THHUL_cqm_query_cq         /* HHULIF_query_cq           */,
   THHUL_cqm_req_comp_notif  /* HHULIF_req_comp_notif    */,
   THHUL_cqm_req_ncomp_notif /* HHULIF_req_ncomp_notif   */,
   THHUL_cqm_destroy_cq_done /* HHULIF_destroy_cq_done   */,
Index: inc/user/iba/ib_uvp.h
===================================================================
--- inc/user/iba/ib_uvp.h	(revision 351)
+++ inc/user/iba/ib_uvp.h	(working copy)
@@ -1789,7 +1789,7 @@
 (AL_API *uvp_post_query_cq_t) (
 	IN		const	ib_cq_handle_t
h_uvp_cq,
 	IN				ib_api_status_t
ioctl_status,
-	IN		const	uint32_t
size,
+	IN OUT			uint32_t
*size,
 	IN				ci_umv_buf_t
*p_umv_buf );
 
 /*
@@ -1803,7 +1803,7 @@
 *	ioctl_status
 *		[in] The ioctl status of the AL API.
 *	size
-*		[in] The size of the CQ retuned by the IOCTL.
+*		[in out] On input size of the CQ retuned by the IOCTL.
 *	p_umv_buf
 *		[in out] On input, it contains any vendor-specific private
information
 *		exchanged with the vendor's Verbs Provider Driver
(uvp_pre_query_cq).

-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.openfabrics.org/pipermail/ofw/attachments/20050907/247929be/attachment.html>
-------------- next part --------------
A non-text attachment was scrubbed...
Name: kernel_query_cq.patch
Type: application/octet-stream
Size: 9115 bytes
Desc: not available
URL: <http://lists.openfabrics.org/pipermail/ofw/attachments/20050907/247929be/attachment.obj>


More information about the ofw mailing list