[openib-general] [PATCH]: GSI: Eliminate spinlock wrappers

Hal Rosenstock halr at voltaire.com
Mon Aug 9 09:38:38 PDT 2004


This patch eliminates the use of the spinlock wrappers in RMPP. Hence,
it also eliminates the need for rmpp_lock.[ch]

Index: rmpp/rmpp.c
===================================================================
--- rmpp/rmpp.c	(revision 598)
+++ rmpp/rmpp.c	(working copy)
@@ -484,7 +484,7 @@
 		 "h_send %p INIT, p_mad_element %p, p_mad_buf %p\n", h_send,
 		 p_mad_element, p_mad_element->p_mad_buf);
 
-	/* version 1 support only currently. */
+	/* version 1 support only. */
 	if (p_mad_element->rmpp_version != DEFAULT_RMPP_VERSION) {
 		RMPP_LOG(RMPP_LOG_ERROR,
 			 "ERR: p_mad_element %p WRONG RMPP VERSION %d\n",
@@ -492,12 +492,7 @@
 		return RMPP_IB_INVALID_SETTING;
 	}
 
-	rmpp_spinlock_construct(&h_send->lock);
-	if (rmpp_spinlock_init(&h_send->lock) != RMPP_SUCCESS) {
-		RMPP_LOG(RMPP_LOG_ERROR, "ERR: rmpp_spinlock_init error\n");
-		return RMPP_IB_ERROR;
-	}
-
+	spin_lock_init(&h_send->lock);
 	INIT_LIST_HEAD(&h_send->req_list);
 	h_send->busy = FALSE;
 
@@ -554,12 +549,7 @@
 	}
 #endif
 
-	rmpp_spinlock_construct(&h_send->lock);
-	if (rmpp_spinlock_init(&h_send->lock) != RMPP_SUCCESS) {
-		RMPP_LOG(RMPP_LOG_ERROR, "ERR: rmpp_spinlock_init error\n");
-		return RMPP_IB_ERROR;
-	}
-
+	spin_lock_init(&h_send->lock);
 	INIT_LIST_HEAD(&h_send->req_list);
 	h_send->busy = FALSE;
 
@@ -613,7 +603,7 @@
 		goto exit;
 	}
 
-	rmpp_spinlock_acquire(&h_send->lock);
+	spin_lock_bh(&h_send->lock);
 
 	/* Reset information to track the send. */
 	h_send->retry_time = MAX_TIME;
@@ -705,7 +695,7 @@
 		RMPP_LOG(RMPP_LOG_DEBUG, "h_send %p DEBUG - drop a packet\n",
 			 h_send);
 
-		rmpp_spinlock_release(&h_send->lock);
+		spin_unlock_bh(&h_send->lock);
 		__set_retry_time(h_send);
 		rmpp_timer_trim(&((struct rmpp_info_t *) (rmpp_h))->send_timer,
 				h_send->p_send_mad->timeout_ms);
@@ -728,7 +718,7 @@
 	 */
 	if (h_send->busy) {
 		RMPP_LOG(RMPP_LOG_DEBUG, "h_send %p BUSY\n", h_send);
-		rmpp_spinlock_release(&h_send->lock);
+		spin_unlock_bh(&h_send->lock);
 		goto exit;
 	} else
 		h_send->busy = TRUE;
@@ -737,7 +727,7 @@
 		send_req = (struct rmpp_send_req_t *) h_send->req_list.next;
 		list_del((struct list_head *) send_req);
 
-		rmpp_spinlock_release(&h_send->lock);
+		spin_unlock_bh(&h_send->lock);
 
 		RMPP_LOG(RMPP_LOG_DEBUG,
 			 "h_send %p SEND, send_req %p, p_send_mad %p\n",
@@ -758,14 +748,14 @@
 
 		rmpp_free(send_req);
 
-		rmpp_spinlock_acquire(&h_send->lock);
+		spin_lock_bh(&h_send->lock);
 	}
 
 	if (h_send->ack_seg == h_send->total_seg)
 		send_done = TRUE;
 
 	h_send->busy = FALSE;
-	rmpp_spinlock_release(&h_send->lock);
+	spin_unlock_bh(&h_send->lock);
 
 	if (send_done)
 		__put_mad_send(h_send);
@@ -885,10 +875,10 @@
 		 p_mad_element, (p_rmpp_mad->common_hdr.trans_id));
 
 	/*
-	 *       Search for the send.  The send may have timed out,
+	 *       Search for the send. The send may have timed out,
 	 *       been canceled, or received a response.
 	 */
-	rmpp_spinlock_acquire(&rmpp_info->lock);
+	spin_lock_bh(&rmpp_info->lock);
 
 	/*
 	 * Check if direction switch is to be performed 
@@ -917,7 +907,7 @@
 			 "h_send %p handle regular ACK, TID 0x%LX\n",
 			 h_send, p_mad_element->p_mad_buf->trans_id);
 	}
-	rmpp_spinlock_release(&rmpp_info->lock);
+	spin_unlock_bh(&rmpp_info->lock);
 
 	if (!h_send) {
 		RMPP_LOG(RMPP_LOG_DEBUG,
@@ -926,7 +916,7 @@
 		return;
 	}
 
-	rmpp_spinlock_acquire(&h_send->lock);
+	spin_lock_bh(&h_send->lock);
 
 	/* Drop old ACKs. */
 	if (ntohl(p_rmpp_mad->seg_num) < h_send->ack_seg) {
@@ -969,12 +959,12 @@
 		 h_send, h_send->ack_seg, h_send->total_seg);
 
 	if (h_send->ack_seg == h_send->total_seg) {
-		rmpp_spinlock_release(&h_send->lock);
+		spin_unlock_bh(&h_send->lock);
 		/* The send is done.  All segments have been ack'ed. */
 		send_done = TRUE;
 	} else if (h_send->ack_seg < h_send->seg_limit) {
 		/* Send the next segment. */
-		rmpp_spinlock_release(&h_send->lock);
+		spin_unlock_bh(&h_send->lock);
 		status = __send_rmpp_seg(rmpp_h, h_send);
 		if (status != RMPP_IB_SUCCESS) {
 			RMPP_LOG(RMPP_LOG_ERROR,
@@ -985,19 +975,19 @@
 			wc_status = RMPP_IB_WCS_TIMEOUT_RETRY_ERR;
 		}
 	} else {
-		rmpp_spinlock_release(&h_send->lock);
+		spin_unlock_bh(&h_send->lock);
 		RMPP_LOG(RMPP_LOG_DEBUG,
 			 "h_send %p INVALID ACK seg %d >= seg_limit %d\n",
 			 h_send, h_send->ack_seg, h_send->seg_limit);
 	}
 
 	if (send_done) {
-		rmpp_spinlock_acquire(&rmpp_info->lock);
+		spin_lock_bh(&rmpp_info->lock);
 		RMPP_LOG(RMPP_LOG_DEBUG,
 			 "h_send %p SEND DONE, delete from list\n", h_send);
 
 		list_del((struct list_head *) h_send);
-		rmpp_spinlock_release(&rmpp_info->lock);
+		spin_unlock_bh(&rmpp_info->lock);
 
 		/*
 		 * Check if we finished sending a request MAD
@@ -1029,7 +1019,7 @@
 	return;
 
 exit:
-	rmpp_spinlock_release(&h_send->lock);
+	spin_unlock_bh(&h_send->lock);
 }
 
 /*
@@ -1046,29 +1036,28 @@
 	    (struct rmpp_ib_rmpp_mad_t *) rmpp_ib_get_mad_buf(p_mad_element);
 
 	/* Search for the send.  The send may have timed out or been canceled. */
-	rmpp_spinlock_acquire(&rmpp_info->lock);
+	spin_lock_bh(&rmpp_info->lock);
 	h_send = __mad_send_match(rmpp_h, p_mad_element);
+	spin_unlock_bh(&rmpp_info->lock);
 	if (!h_send) {
-		rmpp_spinlock_release(&rmpp_info->lock);
 		return;
 	}
-	rmpp_spinlock_release(&rmpp_info->lock);
 
-	rmpp_spinlock_acquire(&h_send->lock);
+	spin_lock_bh(&h_send->lock);
 	/* If the send is active, we will finish processing it once it completes. */
 	if (h_send->retry_time == MAX_TIME) {
 		h_send->canceled = TRUE;
-		rmpp_spinlock_release(&h_send->lock);
+		spin_unlock_bh(&h_send->lock);
 		return;
 	}
 
-	rmpp_spinlock_release(&h_send->lock);
+	spin_unlock_bh(&h_send->lock);
 
 	/* Fail the send operation. */
-	rmpp_spinlock_acquire(&rmpp_info->lock);
+	spin_lock_bh(&rmpp_info->lock);
 	RMPP_LOG(RMPP_LOG_DEBUG, "h_send %p DELETE from list\n", h_send);
 	list_del((struct list_head *) h_send);
-	rmpp_spinlock_release(&rmpp_info->lock);
+	spin_unlock_bh(&rmpp_info->lock);
 
 	if (rmpp_info->send_compl_cb)
 		rmpp_info->send_compl_cb(rmpp_h, h_send->p_send_mad,
@@ -1101,7 +1090,7 @@
 	INIT_LIST_HEAD(&timeout_list);
 	cur_time = rmpp_get_time_stamp();
 
-	rmpp_spinlock_acquire(&rmpp_info->lock);
+	spin_lock_bh(&rmpp_info->lock);
 
 	/* Check all outstanding sends. */
 	list_for_each(h_send, (struct rmpp_mad_send_t *) &rmpp_info->send_list) {
@@ -1110,7 +1099,7 @@
 
 		status = RMPP_IB_SUCCESS;
 
-		rmpp_spinlock_acquire(&h_send_current->lock);
+		spin_lock_bh(&h_send_current->lock);
 
 		if (h_send->p_send_mad == NULL) {
 			RMPP_LOG(RMPP_LOG_DEBUG,
@@ -1167,7 +1156,7 @@
 				/* Resend all unacknowledged segments. */
 				h_send->cur_seg = h_send->ack_seg + 1;
 
-				rmpp_spinlock_release(&h_send_current->lock);
+				spin_unlock_bh(&h_send_current->lock);
 				status = __send_rmpp_seg(rmpp_info, h_send);
 				if (status != RMPP_IB_SUCCESS) {
 					RMPP_LOG(RMPP_LOG_ERROR,
@@ -1205,10 +1194,10 @@
 		h_send = h_send_tmp;
 
 cont:
-		rmpp_spinlock_release(&h_send_current->lock);
+		spin_unlock_bh(&h_send_current->lock);
 	}
 
-	rmpp_spinlock_release(&rmpp_info->lock);
+	spin_unlock_bh(&rmpp_info->lock);
 
 	/* Report all timed out sends to the user. */
 
@@ -1241,7 +1230,7 @@
 
 	rmpp_info = (struct rmpp_info_t *) context;
 
-	rmpp_spinlock_acquire(&rmpp_info->lock);
+	spin_lock_bh(&rmpp_info->lock);
 
 	/* Check all outstanding receives. */
 	list_for_each(p_recv, (struct rmpp_mad_recv_t *) &rmpp_info->recv_list) {
@@ -1273,7 +1262,7 @@
 	}
 
 	restart_timer = !list_empty(&rmpp_info->recv_list);
-	rmpp_spinlock_release(&rmpp_info->lock);
+	spin_unlock_bh(&rmpp_info->lock);
 
 	if (restart_timer)
 		rmpp_timer_start(&rmpp_info->recv_timer,
@@ -1371,7 +1360,7 @@
 	p_rmpp_hdr = rmpp_ib_get_mad_buf(p_mad_element);
 
 	/* Try to find a receive already being reassembled. */
-	rmpp_spinlock_acquire(&rmpp_info->lock);
+	spin_lock_bh(&rmpp_info->lock);
 
 	p_recv = __find_recv(rmpp_h, p_mad_element);
 	if (!p_recv) {
@@ -1380,7 +1369,7 @@
 			 p_mad_element->p_mad_buf->trans_id);
 		/* This receive is not being reassembled. It should be the first seg. */
 		if (ntoh32(p_rmpp_hdr->seg_num) != 1) {
-			rmpp_spinlock_release(&rmpp_info->lock);
+			spin_unlock_bh(&rmpp_info->lock);
 			return RMPP_NOT_FOUND;
 		}
 
@@ -1388,7 +1377,7 @@
 		p_recv = __get_mad_recv(p_mad_element);
 
 		if (!p_recv) {
-			rmpp_spinlock_release(&rmpp_info->lock);
+			spin_unlock_bh(&rmpp_info->lock);
 			return RMPP_INSUFFICIENT_MEMORY;
 		}
 
@@ -1465,13 +1454,13 @@
 		rmpp_status = RMPP_OVERRUN;
 	}
 
-	rmpp_spinlock_release(&rmpp_info->lock);
+	spin_unlock_bh(&rmpp_info->lock);
 
 	/*
-	 * Send any response MAD (ACK, ABORT, etc.) to the sender.  Note that
-	 * we are currently in the callback from the MAD dispatcher.  The
+	 * Send any response MAD (ACK, ABORT, etc.) to the sender. Note that
+	 * we are currently in the callback from the MAD dispatcher. The
 	 * dispatcher holds a reference on the MAD service while in the callback,
-	 * preventing the MAD service from being destroyed.  This allows the
+	 * preventing the MAD service from being destroyed. This allows the
 	 * call to ib_send_mad() to proceed even if the user tries to destroy
 	 * the MAD service.
 	 */
@@ -1708,7 +1697,7 @@
 	RMPP_LOG(RMPP_LOG_DEBUG, "p_mad_element %p p_mad_buf %p\n",
 		 p_mad_element, p_mad_element->p_mad_buf);
 
-	rmpp_spinlock_acquire(&info->lock);
+	spin_lock_bh(&info->lock);
 
 	if ((retval =
 	     __prepare_mad_send(rmpp_h, p_mad_element,
@@ -1727,7 +1716,7 @@
 	}
 
 err:
-	rmpp_spinlock_release(&info->lock);
+	spin_unlock_bh(&info->lock);
 	return retval;
 }
 
@@ -1758,11 +1747,7 @@
 	INIT_LIST_HEAD(&info->send_list);
 	INIT_LIST_HEAD(&info->recv_list);
 
-	rmpp_spinlock_construct(&info->lock);
-	if (rmpp_spinlock_init(&info->lock) != RMPP_SUCCESS) {
-		RMPP_LOG(RMPP_LOG_ERROR, "rmpp_spinlock_init error\n");
-		goto send_spinlock_init_err;
-	}
+	spin_lock_init(&info->lock);
 
 	rmpp_timer_construct(&info->send_timer);
 	if (rmpp_timer_init(&info->send_timer,
@@ -1820,7 +1805,7 @@
 	INIT_LIST_HEAD(&tmp_send_list);
 	INIT_LIST_HEAD(&tmp_recv_list);
 
-	rmpp_spinlock_acquire(&info->lock);
+	spin_lock_bh(&info->lock);
 	while (!list_empty(&info->send_list)) {
 
 		h_send = (struct rmpp_mad_send_t *) info->send_list.next;
@@ -1837,7 +1822,7 @@
 		list_del((struct list_head *) p_recv);
 		list_add_tail((struct list_head *) p_recv, &tmp_recv_list);
 	}
-	rmpp_spinlock_release(&info->lock);
+	spin_unlock_bh(&info->lock);
 
 	/* Have a short sleep here in order to let fast operations (callbacks) finish */
 	RMPP_LOG(RMPP_LOG_DEBUG, "sleep\n");
@@ -1935,7 +1920,7 @@
 		return;
 	}
 
-	rmpp_spinlock_acquire(&h_send->lock);
+	spin_lock_bh(&h_send->lock);
 
 	RMPP_LOG(RMPP_LOG_VERBOSE,
 		 "h_send %p, ref_cnt %d, ack_seg %d, total_seg %d\n",
@@ -1945,13 +1930,13 @@
 
 	if (h_send->ack_seg == h_send->total_seg) {
 #if 1
-		rmpp_spinlock_release(&h_send->lock);
+		spin_unlock_bh(&h_send->lock);
 		/*
 		 * ACK was already received even before the send completion
 		 */
-		rmpp_spinlock_acquire(&rmpp_info->lock);
+		spin_lock_bh(&rmpp_info->lock);
 		list_del((struct list_head *) h_send);
-		rmpp_spinlock_release(&rmpp_info->lock);
+		spin_unlock_bh(&rmpp_info->lock);
 
 		if (rmpp_info->send_compl_cb)
 			rmpp_info->send_compl_cb(rmpp_h, h_send->p_send_mad,
@@ -1967,7 +1952,7 @@
 		rmpp_timer_trim(&rmpp_info->send_timer,
 				h_send->p_send_mad->timeout_ms);
 
-		rmpp_spinlock_release(&h_send->lock);
+		spin_unlock_bh(&h_send->lock);
 #endif
 		return;
 	}
@@ -1980,7 +1965,7 @@
 			 h_send,
 			 rmpp_atomic_read(&h_send->ref_cnt), h_send->cur_seg);
 
-		rmpp_spinlock_release(&h_send->lock);
+		spin_unlock_bh(&h_send->lock);
 
 		/* Send the next segment. */
 		status = __send_rmpp_seg(rmpp_h, h_send);
@@ -1990,9 +1975,9 @@
 				 "ERR: h_send %p DELETE, send status %d\n",
 				 h_send, status);
 
-			rmpp_spinlock_acquire(&rmpp_info->lock);
+			spin_lock_bh(&rmpp_info->lock);
 			list_del((struct list_head *) h_send);
-			rmpp_spinlock_release(&rmpp_info->lock);
+			spin_unlock_bh(&rmpp_info->lock);
 
 #if 0
 			rmpp_put_mad(h_send->p_send_mad);
@@ -2015,7 +2000,7 @@
 		RMPP_LOG(RMPP_LOG_DEBUG,
 			 "h_send %p TRIM TIMER timeout_ms %d\n",
 			 h_send, h_send->p_send_mad->timeout_ms);
-		rmpp_spinlock_release(&h_send->lock);
+		spin_unlock_bh(&h_send->lock);
 	}
 }
 
Index: rmpp/rmpp.h
===================================================================
--- rmpp/rmpp.h	(revision 560)
+++ rmpp/rmpp.h	(working copy)
@@ -111,7 +111,6 @@
 
 #include "rmpp_api.h"
 #include "rmpp_timer.h"
-#include "rmpp_lock.h"
 
 #define RMPP_TYPE_DATA          1
 #define RMPP_TYPE_ACK           2
@@ -150,7 +149,7 @@
 	u64 trans_id;
 
 	struct list_head req_list;
-	struct rmpp_spinlock_t lock;
+	spinlock_t lock;
 	int busy;
 
 	/* Absolute time that the request should be retried. */
@@ -204,6 +203,6 @@
 	void *vendal_p;
 	rmpp_recv_cb_t recv_cb;
 	rmpp_send_compl_cb_t send_compl_cb;
-	struct rmpp_spinlock_t lock;
+	spinlock_t lock;
 };
 #endif				/* __RMPP_H__ */
Index: rmpp/Makefile
===================================================================
--- rmpp/Makefile	(revision 560)
+++ rmpp/Makefile	(working copy)
@@ -8,7 +8,7 @@
 #INCDIRS := -I. -I/usr/src/linux/include 
  
 MODULE := rmpp_module.o
-OBJS := rmpp.o rmpp_timer.o rmpp_lock.o
+OBJS := rmpp.o rmpp_timer.o 
 
 $(MODULE): $(OBJS)
 	$(LD) $(LDFLAGS) -o $@ $(OBJS) 
Index: TODO
===================================================================
--- TODO	(revision 596)
+++ TODO	(working copy)
@@ -1,10 +1,9 @@
-8/6/04
+8/9/04
 
 Add support for (at least) responses to requests with GRH
 Remove #if 0/1 with suitable preprocessor symbols
 Replace ib_reg_mr with ib_reg_phys_mr
 Eliminate static limit on numbers of ports/HCAs
-Get rid of spinlock wrappers
 Makefile needs to use standard kbuild 
 Migrate from /proc to /sysfs
 Static rate handling (low priority)






More information about the general mailing list