[openib-general] [PATCH 3/3] iWARP CM - iWARP Connection Manager
Tom Tucker
tom at opengridcomputing.com
Mon Mar 20 08:47:31 PST 2006
This patch file contains the following files:
infiniband/core/Makefile
infiniband/core/iwcm.c
Patch prepared Mon Mar 20 10:22:10 CST 2006
Signed-off-by: Tom Tucker <tom at opengridcomputing.com>
Index: infiniband/core/Makefile
===================================================================
--- infiniband/core/Makefile (revision 5842)
+++ infiniband/core/Makefile (working copy)
@@ -1,8 +1,9 @@
EXTRA_CFLAGS += -Idrivers/infiniband/include -Idrivers/infiniband/ulp/ipoib
-obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_ping.o ib_cm.o \
+obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_ping.o \
+ ib_cm.o iw_cm.o \
ib_sa.o ib_at.o ib_addr.o rdma_cm.o \
- ib_local_sa.o findex.o
+ ib_local_sa.o findex.o
obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o ib_uat.o rdma_ucm.o
@@ -17,6 +18,8 @@
ib_cm-y := cm.o
+iw_cm-y := iwcm.o
+
rdma_cm-y := cma.o
rdma_ucm-y := ucma.o
Index: infiniband/core/iwcm.c
===================================================================
--- infiniband/core/iwcm.c (revision 0)
+++ infiniband/core/iwcm.c (revision 0)
@@ -0,0 +1,795 @@
+/*
+ * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
+ * Copyright (c) 2004 Topspin Corporation. All rights reserved.
+ * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_addr.h>
+
+MODULE_AUTHOR("Tom Tucker");
+MODULE_DESCRIPTION("iWARP CM");
+MODULE_LICENSE("Dual BSD/GPL");
+
+struct iwcm_id_private {
+ struct iw_cm_id id;
+
+ unsigned long destroy_flags;
+ wait_queue_head_t destroy_wait;
+
+ struct list_head work_list;
+
+ spinlock_t lock;
+ atomic_t refcount;
+};
+#define IWCM_DESTROY_F_CALLBACK 1
+
+struct iwcm_work {
+ struct work_struct work;
+ struct iwcm_id_private *cm_id;
+ struct list_head list;
+ struct iw_cm_event event;
+};
+
+/*
+ * Release a reference on cm_id. If the last reference is being removed
+ * and iw_destroy_cm_id is waiting, wake up the waiting thread.
+ */
+static inline int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
+{
+ int ret = 0;
+
+ if (atomic_dec_and_test(&cm_id_priv->refcount)) {
+ BUG_ON(!list_empty(&cm_id_priv->work_list));
+ if (waitqueue_active(&cm_id_priv->destroy_wait)) {
+ BUG_ON(cm_id_priv->id.state != IW_CM_STATE_DESTROYING);
+ BUG_ON(test_bit(IWCM_DESTROY_F_CALLBACK,
+ &cm_id_priv->destroy_flags));
+ ret = 1;
+ wake_up(&cm_id_priv->destroy_wait);
+ }
+ }
+
+ return ret;
+}
+
+static void cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
+
+struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
+ iw_cm_handler cm_handler,
+ void *context)
+{
+ struct iwcm_id_private *iwcm_id_priv;
+
+ iwcm_id_priv = kzalloc(sizeof *iwcm_id_priv, GFP_KERNEL);
+ if (!iwcm_id_priv)
+ return ERR_PTR(-ENOMEM);
+
+ iwcm_id_priv->id.state = IW_CM_STATE_IDLE;
+ iwcm_id_priv->id.device = device;
+ iwcm_id_priv->id.cm_handler = cm_handler;
+ iwcm_id_priv->id.context = context;
+ iwcm_id_priv->id.event_handler = cm_event_handler;
+
+ spin_lock_init(&iwcm_id_priv->lock);
+ atomic_set(&iwcm_id_priv->refcount, 1);
+ init_waitqueue_head(&iwcm_id_priv->destroy_wait);
+ INIT_LIST_HEAD(&iwcm_id_priv->work_list);
+
+ return &iwcm_id_priv->id;
+}
+EXPORT_SYMBOL(iw_create_cm_id);
+
+
+static int iwcm_modify_qp_err(struct ib_qp *qp)
+{
+ struct ib_qp_attr qp_attr;
+
+ BUG_ON(qp == NULL);
+ qp_attr.qp_state = IB_QPS_ERR;
+ return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
+}
+
+/*
+ * This is really the RDMAC CLOSING state. It is most similar to the
+ * IB SQD QP state.
+ */
+static int iwcm_modify_qp_sqd(struct ib_qp *qp)
+{
+ struct ib_qp_attr qp_attr;
+
+ BUG_ON(qp == NULL);
+ qp_attr.qp_state = IB_QPS_SQD;
+ return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
+}
+
+/*
+ * CM_ID <-- CLOSING
+ *
+ * - If we are established move to CLOSING and modify the QP state
+ * based on the abrupt flag
+ * - If the connection is already in the CLOSING state, the peer is
+ * disconnecting concurrently with us and we've already seen the
+ * DISCONNECT event -- ignore the request and return 0
+ * - If the connection was never established, return -EINVAL
+ */
+int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt)
+{
+ struct iwcm_id_private *iwcm_id_priv;
+ unsigned long flags;
+ struct ib_qp *qp;
+ int ret = 0;
+
+ iwcm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ spin_lock_irqsave(&iwcm_id_priv->lock, flags);
+ switch (cm_id->state) {
+ case IW_CM_STATE_ESTABLISHED:
+ cm_id->state = IW_CM_STATE_CLOSING;
+ qp = cm_id->qp;
+ spin_unlock_irqrestore(&iwcm_id_priv->lock, flags);
+ if (abrupt)
+ ret = iwcm_modify_qp_err(qp);
+ else
+ ret = iwcm_modify_qp_sqd(qp);
+ break;
+ case IW_CM_STATE_CONN_SENT:
+ case IW_CM_STATE_LISTEN:
+ case IW_CM_STATE_CONN_RECV:
+ spin_unlock_irqrestore(&iwcm_id_priv->lock, flags);
+ ret = -EINVAL;
+ break;
+ case IW_CM_STATE_CLOSING:
+ default:
+ spin_unlock_irqrestore(&iwcm_id_priv->lock, flags);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(iw_cm_disconnect);
+
+/*
+ * CM_ID <-- DESTROYING
+ *
+ * Clean up all resources associated with the connection and release
+ * the initial reference taken by iw_create_cm_id.
+ */
+static void destroy_cm_id(struct iw_cm_id *cm_id)
+{
+ struct iwcm_id_private *iwcm_id_priv;
+ unsigned long flags;
+ struct ib_qp *qp;
+ int ret;
+
+ iwcm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+
+ spin_lock_irqsave(&iwcm_id_priv->lock, flags);
+ switch (cm_id->state) {
+ case IW_CM_STATE_LISTEN:
+ cm_id->state = IW_CM_STATE_DESTROYING;
+ spin_unlock_irqrestore(&iwcm_id_priv->lock, flags);
+ /* destroy the listening endpoint */
+ ret = cm_id->device->iwcm->destroy_listen(cm_id);
+ break;
+ case IW_CM_STATE_CONN_RECV:
+ cm_id->state = IW_CM_STATE_DESTROYING;
+ spin_unlock_irqrestore(&iwcm_id_priv->lock, flags);
+ /* Need to call reject on behalf of client */
+ (void)iw_cm_reject(cm_id, NULL, 0);
+ break;
+ case IW_CM_STATE_ESTABLISHED:
+ cm_id->state = IW_CM_STATE_DESTROYING;
+ qp = cm_id->qp;
+ spin_unlock_irqrestore(&iwcm_id_priv->lock, flags);
+ /* Abrupt close of the connection */
+ ret = iwcm_modify_qp_err(qp);
+ break;
+ default:
+ cm_id->state = IW_CM_STATE_DESTROYING;
+ spin_unlock_irqrestore(&iwcm_id_priv->lock, flags);
+ }
+
+ (void)iwcm_deref_id(iwcm_id_priv);
+}
+
+/*
+ * This function is only called by the application thread and cannot
+ * be called by the event thread. The function will wait for all
+ * references to be released on the cm_id and then kfree the cm_id
+ * object.
+ */
+void iw_destroy_cm_id(struct iw_cm_id *cm_id)
+{
+ struct iwcm_id_private *iwcm_id_priv;
+
+ destroy_cm_id(cm_id);
+
+ iwcm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ wait_event(iwcm_id_priv->destroy_wait,
+ !atomic_read(&iwcm_id_priv->refcount));
+
+ kfree(iwcm_id_priv);
+}
+EXPORT_SYMBOL(iw_destroy_cm_id);
+
+/*
+ * CM_ID <-- LISTEN
+ *
+ * Start listening for connect requests. Generates one CONNECT_REQUEST
+ * event for each inbound connect request.
+ */
+int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
+{
+ struct iwcm_id_private *iwcm_id_priv;
+ unsigned long flags;
+ int ret = 0;
+
+ iwcm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ spin_lock_irqsave(&iwcm_id_priv->lock, flags);
+ switch (cm_id->state) {
+ case IW_CM_STATE_IDLE:
+ cm_id->state = IW_CM_STATE_LISTEN;
+ spin_unlock_irqrestore(&iwcm_id_priv->lock, flags);
+ ret = cm_id->device->iwcm->create_listen(cm_id, backlog);
+ if (ret)
+ cm_id->state = IW_CM_STATE_IDLE;
+ break;
+ default:
+ spin_unlock_irqrestore(&iwcm_id_priv->lock, flags);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(iw_cm_listen);
+
+/*
+ * CM_ID <-- IDLE
+ *
+ * Rejects an inbound connection request. No events are generated.
+ */
+int iw_cm_reject(struct iw_cm_id *cm_id,
+ const void *private_data,
+ u8 private_data_len)
+{
+ struct iwcm_id_private *iwcm_id_priv;
+ unsigned long flags;
+ int ret;
+
+ iwcm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+
+ spin_lock_irqsave(&iwcm_id_priv->lock, flags);
+ switch (cm_id->state) {
+ case IW_CM_STATE_CONN_RECV:
+ cm_id->state = IW_CM_STATE_IDLE;
+ spin_unlock_irqrestore(&iwcm_id_priv->lock, flags);
+ ret = cm_id->device->iwcm->reject(cm_id, private_data,
+ private_data_len);
+ break;
+ default:
+ spin_unlock_irqrestore(&iwcm_id_priv->lock, flags);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(iw_cm_reject);
+
+/*
+ * CM_ID <-- ESTABLISHED
+ *
+ * Accepts an inbound connectoin request and generates an ESTABLISHED event.
+ */
+int iw_cm_accept(struct iw_cm_id *cm_id,
+ const void *private_data,
+ u8 private_data_len)
+{
+ struct iwcm_id_private *iwcm_id_priv;
+ unsigned long flags;
+ int ret;
+
+ iwcm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+
+ spin_lock_irqsave(&iwcm_id_priv->lock, flags);
+ switch (cm_id->state) {
+ case IW_CM_STATE_CONN_RECV:
+ spin_unlock_irqrestore(&iwcm_id_priv->lock, flags);
+ ret = cm_id->device->iwcm->accept(cm_id, private_data,
+ private_data_len);
+ if (ret == 0) {
+ struct iw_cm_event event;
+ event.event = IW_CM_EVENT_ESTABLISHED;
+ event.provider_id = cm_id->provider_id;
+ event.status = 0;
+ event.local_addr = cm_id->local_addr;
+ event.remote_addr = cm_id->remote_addr;
+ event.private_data = NULL;
+ event.private_data_len = 0;
+ cm_event_handler(cm_id, &event);
+ } else
+ cm_id->state = IW_CM_STATE_IDLE;
+
+ break;
+ default:
+ spin_unlock_irqrestore(&iwcm_id_priv->lock, flags);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(iw_cm_accept);
+
+/*
+ * Active Side: CM_ID <-- CONN_SENT
+ *
+ * If successful, results in the generation of a CONNECT_REPLY event.
+ */
+int iw_cm_connect(struct iw_cm_id *cm_id,
+ const void *pdata,
+ u8 pdata_len)
+{
+ struct iwcm_id_private *cm_id_priv;
+ int ret = 0;
+ unsigned long flags;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ if (cm_id->state != IW_CM_STATE_IDLE) {
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ return -EINVAL;
+ }
+ cm_id->state = IW_CM_STATE_CONN_SENT;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ ret = cm_id->device->iwcm->connect(cm_id, pdata, pdata_len);
+ if (ret)
+ cm_id->state = IW_CM_STATE_IDLE;
+
+ return ret;
+}
+EXPORT_SYMBOL(iw_cm_connect);
+
+
+/*
+ * Passive Side: new CM_ID <-- CONN_RECV
+ *
+ * Handles an inbound connect request. The function creates a new
+ * iw_cm_id to represent the new connection and inherits the client
+ * callback function and other attributes from the listening parent.
+ *
+ * The work item contains a pointer to the listen_cm_id and the event. The
+ * listen_cm_id contains the client cm_handler, context and
+ * device. These are copied when the device is cloned. The event
+ * contains the new four tuple.
+ *
+ * An error on the child should not affect the parent, so this
+ * function does not return a value.
+ */
+static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
+ struct iw_cm_event *iw_event)
+{
+ unsigned long flags;
+ struct iw_cm_id *cm_id;
+ struct iwcm_id_private *cm_id_priv;
+ iw_cm_handler cm_handler;
+ void *context;
+ struct ib_device *device;
+ int ret;
+
+ /* The provider should never generate a connection request
+ * event with a bad status.
+ */
+ BUG_ON(iw_event->status);
+
+ /* We could be destroying the listening id. If so, ignore this
+ * upcall. */
+ spin_lock_irqsave(&listen_id_priv->lock, flags);
+ if (listen_id_priv->id.state != IW_CM_STATE_LISTEN) {
+ spin_unlock_irqrestore(&listen_id_priv->lock, flags);
+ return;
+ }
+ /* The listen_id can be destroyed the moment we release the
+ * lock, so take the state we need to inherit before releasing
+ * the lock
+ */
+ device = listen_id_priv->id.device;
+ cm_handler = listen_id_priv->id.cm_handler;
+ context = listen_id_priv->id.context;
+ spin_unlock_irqrestore(&listen_id_priv->lock, flags);
+
+ cm_id = iw_create_cm_id(device, cm_handler, context);
+
+ /* If the cm_id could not be created, ignore the request */
+ if (IS_ERR(cm_id))
+ return;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ cm_id_priv->id.local_addr = iw_event->local_addr;
+ cm_id_priv->id.remote_addr = iw_event->remote_addr;
+ cm_id_priv->id.provider_id = iw_event->provider_id;
+ cm_id_priv->id.state = IW_CM_STATE_CONN_RECV;
+
+ /* Call the client CM handler */
+ // XXX: atomic_inc(&cm_id_priv->refcount);
+ ret = cm_id->cm_handler(cm_id, iw_event);
+ // xxx: (void)iwcm_deref_id(cm_id_priv);
+ if (ret) {
+ set_bit(IWCM_DESTROY_F_CALLBACK, &cm_id_priv->destroy_flags);
+ destroy_cm_id(cm_id);
+ if (atomic_read(&cm_id_priv->refcount)==0)
+ kfree(cm_id);
+ }
+}
+
+/*
+ * Passive Side: CM_ID <-- ESTABLISHED
+ *
+ * The provider generated an ESTABLISHED event which means that
+ * the MPA negotion has completed successfully and we are now in MPA
+ * FPDU mode.
+ */
+static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
+ struct iw_cm_event *iw_event)
+{
+ unsigned long flags;
+
+ /* If the cm_id is in the wrong state, ignore the
+ * request. This could happen if the app called disconnect or
+ * destroy.
+ */
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ if (cm_id_priv->id.state != IW_CM_STATE_CONN_RECV) {
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ return 0;
+ }
+
+ /* Take a reference on behalf of the provider. This reference
+ * will be removed when the provider delivers the CLOSE event.
+ */
+ atomic_inc(&cm_id_priv->refcount);
+ cm_id_priv->id.state = IW_CM_STATE_ESTABLISHED;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ /* Call the client CM handler */
+ return cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
+}
+
+/*
+ * Active Side: CM_ID <-- ESTABLISHED
+ *
+ * Handles the reply to our connect request:
+ * - If the cm_id is in the wrong state when the event is
+ * delivered, the event is ignored.
+ * - If the remote peer accepted the connection, we update the 4-tuple
+ * in the cm_id with the remote peer info, move the cm_id to the
+ * ESTABLISHED state and deliver the event to the client.
+ * - If the remote peer rejected the connection, or there is some
+ * connection error, move the cm_id to the IDLE state, and deliver
+ * the bad news to the client.
+ */
+static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
+ struct iw_cm_event *iw_event)
+{
+ unsigned long flags;
+
+ /* If the cm_id is in the wrong state, ignore the
+ * request. This could happen if the app called disconnect or
+ * destroy.
+ */
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ if (cm_id_priv->id.state != IW_CM_STATE_CONN_SENT) {
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ return 0;
+ }
+ if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) {
+ cm_id_priv->id.local_addr = iw_event->local_addr;
+ cm_id_priv->id.remote_addr = iw_event->remote_addr;
+ /* Take a reference on behalf of the provider. This reference
+ * will be removed when the provider delivers the CLOSE event.
+ */
+ atomic_inc(&cm_id_priv->refcount);
+ cm_id_priv->id.state = IW_CM_STATE_ESTABLISHED;
+ } else
+ cm_id_priv->id.state = IW_CM_STATE_IDLE;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ /* Call the client CM handler */
+ return cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
+}
+
+/*
+ * CM_ID <-- CLOSING
+ *
+ * If in the ESTABLISHED state, move to CLOSING.
+ */
+static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
+ struct iw_cm_event *iw_event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ if (cm_id_priv->id.state == IW_CM_STATE_ESTABLISHED)
+ cm_id_priv->id.state = IW_CM_STATE_CLOSING;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+}
+
+/*
+ * CM_ID <-- IDLE
+ *
+ * If in the ESTBLISHED or CLOSING states, the QP will have have been
+ * moved by the provider to the ERR state. Disassociate the CM_ID from
+ * the QP, move to IDLE, and remove the 'connected' reference.
+ *
+ * If in some other state, the cm_id was destroyed asynchronously.
+ * This is the last reference that will result in waking up
+ * the app thread blocked in iw_destroy_cm_id.
+ */
+static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
+ struct iw_cm_event *iw_event)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+
+ /* Disassociate the QP from the cm_id */
+ cm_id_priv->id.qp = NULL;
+
+ /* Regardsless of the state, we remove the close reference. It
+ * cannot be the last one, because the event reference has not
+ * yet been removed.*/
+ (void)iwcm_deref_id(cm_id_priv);
+ BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
+
+ switch (cm_id_priv->id.state) {
+ case IW_CM_STATE_ESTABLISHED:
+ case IW_CM_STATE_CLOSING:
+ cm_id_priv->id.state = IW_CM_STATE_IDLE;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
+ break;
+ default:
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int process_event(struct iwcm_id_private *cm_id_priv,
+ struct iw_cm_event *iw_event)
+{
+ int ret = 0;
+
+ switch (iw_event->event) {
+ case IW_CM_EVENT_CONNECT_REQUEST:
+ cm_conn_req_handler(cm_id_priv, iw_event);
+ break;
+ case IW_CM_EVENT_CONNECT_REPLY:
+ ret = cm_conn_rep_handler(cm_id_priv, iw_event);
+ break;
+ case IW_CM_EVENT_ESTABLISHED:
+ ret = cm_conn_est_handler(cm_id_priv, iw_event);
+ break;
+ case IW_CM_EVENT_DISCONNECT:
+ cm_disconnect_handler(cm_id_priv, iw_event);
+ break;
+ case IW_CM_EVENT_CLOSE:
+ ret = cm_close_handler(cm_id_priv, iw_event);
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ return ret;
+}
+
+/*
+ * Process events on the work_list for the cm_id. If the callback
+ * function requests that the cm_id be deleted, a flag is set in the
+ * cm_id destroy_flags to indicate that when the last reference is
+ * removed, the cm_id is to be destroyed. This is necessary to
+ * distinguish between an object that will be destroyed by the app
+ * thread asleep on the destroy_wait list vs. an object destroyed
+ * here synchronously when the last reference is removed.
+ */
+static void cm_work_handler(void *arg)
+{
+ struct iwcm_work *work = (struct iwcm_work*)arg;
+ struct iwcm_id_private *cm_id_priv = work->cm_id;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ while (!list_empty(&cm_id_priv->work_list)) {
+ work = list_entry(cm_id_priv->work_list.next,
+ struct iwcm_work, list);
+ list_del_init(&work->list);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ ret = process_event(cm_id_priv, &work->event);
+ kfree(work);
+ if (ret) {
+ set_bit(IWCM_DESTROY_F_CALLBACK,
+ &cm_id_priv->destroy_flags);
+ destroy_cm_id(&cm_id_priv->id);
+ }
+ BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
+ if (iwcm_deref_id(cm_id_priv))
+ return;
+
+ if (atomic_read(&cm_id_priv->refcount)==0
+ && test_bit(IWCM_DESTROY_F_CALLBACK,
+ &cm_id_priv->destroy_flags)) {
+ kfree(cm_id_priv);
+ return;
+ }
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ }
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+}
+
+/*
+ * This function is called on interrupt context. Schedule events on
+ * the rdma_wq thread to allow callback functions to downcall into
+ * the CM and/or block. Events are queued to a per-CM_ID
+ * work_list. If this is the first event on the work_list, the work
+ * element is also queued on the rdma_wq thread.
+ *
+ * Each event holds a reference on the cm_id. Until the last posted
+ * event has been delivered and processed, the cm_id cannot be
+ * deleted.
+ */
+static void cm_event_handler(struct iw_cm_id *cm_id,
+ struct iw_cm_event *iw_event)
+{
+ struct iwcm_work *work;
+ struct iwcm_id_private *cm_id_priv;
+ unsigned long flags;
+
+ work = kmalloc(sizeof *work, GFP_ATOMIC);
+ if (!work)
+ return;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ atomic_inc(&cm_id_priv->refcount);
+
+ INIT_WORK(&work->work, cm_work_handler, work);
+ work->cm_id = cm_id_priv;
+ work->event = *iw_event;
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ if (list_empty(&cm_id_priv->work_list)) {
+ list_add_tail(&work->list, &cm_id_priv->work_list);
+ queue_work(rdma_wq, &work->work);
+ } else
+ list_add_tail(&work->list, &cm_id_priv->work_list);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+}
+
+static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv,
+ struct ib_qp_attr *qp_attr,
+ int *qp_attr_mask)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ switch (cm_id_priv->id.state) {
+ case IW_CM_STATE_IDLE:
+ case IW_CM_STATE_CONN_SENT:
+ case IW_CM_STATE_CONN_RECV:
+ case IW_CM_STATE_ESTABLISHED:
+ *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
+ qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE|
+ IB_ACCESS_REMOTE_READ;
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ return ret;
+}
+
+static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv,
+ struct ib_qp_attr *qp_attr,
+ int *qp_attr_mask)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ switch (cm_id_priv->id.state) {
+ case IW_CM_STATE_IDLE:
+ case IW_CM_STATE_CONN_SENT:
+ case IW_CM_STATE_CONN_RECV:
+ case IW_CM_STATE_ESTABLISHED:
+ *qp_attr_mask = IB_QP_STATE;
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ return ret;
+}
+
+int iw_cm_init_qp_attr(struct iw_cm_id *cm_id,
+ struct ib_qp_attr *qp_attr,
+ int *qp_attr_mask)
+{
+ struct iwcm_id_private *cm_id_priv;
+ int ret;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ switch (qp_attr->qp_state) {
+ case IB_QPS_INIT:
+ case IB_QPS_RTR:
+ ret = iwcm_init_qp_init_attr(cm_id_priv,
+ qp_attr, qp_attr_mask);
+ break;
+ case IB_QPS_RTS:
+ ret = iwcm_init_qp_rts_attr(cm_id_priv,
+ qp_attr, qp_attr_mask);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(iw_cm_init_qp_attr);
+
+static int __init iw_cm_init(void)
+{
+ return 0;
+}
+
+static void __exit iw_cm_cleanup(void)
+{
+}
+
+module_init(iw_cm_init);
+module_exit(iw_cm_cleanup);
More information about the general
mailing list