[openib-general] [PATCH] iWARP Support added to the CMA
Tom Tucker
tom at opengridcomputing.com
Thu Dec 15 09:55:36 PST 2005
This is a patch to the iWARP branch that adds:
- A generic iWARP transport CM module
- Support for iWARP transports to the CMA
- Modifications to the AMSO1100 driver for the iWARP transport CM
- ULP add_one event changes to filter events based on node_type
The code has been tested on IB and iWARP HCA with both the cmatose and krping applications.
The code can also be checked out from the iWARP branch with these patches applied.
Signed-off-by: Tom Tucker <tom at opengridcomputing.com>
Index: ulp/ipoib/ipoib_main.c
===================================================================
--- ulp/ipoib/ipoib_main.c (revision 4186)
+++ ulp/ipoib/ipoib_main.c (working copy)
@@ -1024,6 +1024,9 @@
struct ipoib_dev_priv *priv;
int s, e, p;
+ if (device->node_type == IB_NODE_RNIC)
+ return;
+
dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
if (!dev_list)
return;
@@ -1054,6 +1057,9 @@
struct ipoib_dev_priv *priv, *tmp;
struct list_head *dev_list;
+ if (device->node_type == IB_NODE_RNIC)
+ return;
+
dev_list = ib_get_client_data(device, &ipoib_client);
list_for_each_entry_safe(priv, tmp, dev_list, list) {
Index: include/rdma/ib_verbs.h
===================================================================
--- include/rdma/ib_verbs.h (revision 4186)
+++ include/rdma/ib_verbs.h (working copy)
@@ -805,7 +805,7 @@
struct ib_gid_cache **gid_cache;
};
-struct iw_cm;
+struct iw_cm_provider;
struct ib_device {
struct device *dma_device;
@@ -822,7 +822,7 @@
u32 flags;
- struct iw_cm *iwcm;
+ struct iw_cm_verbs *iwcm;
int (*query_device)(struct ib_device *device,
struct ib_device_attr *device_attr);
Index: include/rdma/iw_cm.h
===================================================================
--- include/rdma/iw_cm.h (revision 4186)
+++ include/rdma/iw_cm.h (working copy)
@@ -1,5 +1,7 @@
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
+ * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -33,112 +35,119 @@
#define IW_CM_H
#include <linux/in.h>
+#include <rdma/ib_cm.h>
-/* iWARP connection attributes. */
+struct iw_cm_id;
+struct iw_cm_event;
-struct iw_conn_attr {
- struct in_addr local_addr;
- struct in_addr remote_addr;
- u16 local_port;
- u16 remote_port;
+enum iw_cm_event_type {
+ IW_CM_EVENT_CONNECT_REQUEST = 1, /* connect request received */
+ IW_CM_EVENT_CONNECT_REPLY, /* reply from active connect request */
+ IW_CM_EVENT_ESTABLISHED,
+ IW_CM_EVENT_LLP_DISCONNECT,
+ IW_CM_EVENT_LLP_RESET,
+ IW_CM_EVENT_LLP_TIMEOUT,
+ IW_CM_EVENT_CLOSE
};
-/* This is provided in the event generated when
- * a remote peer accepts our connect request
- */
-
-enum conn_result {
- IW_CONN_ACCEPT = 0,
- IW_CONN_RESET,
- IW_CONN_PEER_REJECT,
- IW_CONN_TIMEDOUT,
- IW_CONN_NO_ROUTE_TO_HOST,
- IW_CONN_INVALID_PARM
+struct iw_cm_event {
+ enum iw_cm_event_type event;
+ int status;
+ u32 provider_id;
+ struct sockaddr_in local_addr;
+ struct sockaddr_in remote_addr;
+ void *private_data;
+ u8 private_data_len;
};
-
-/* This structure is provided in the event that
- * completes an active connection request.
- */
-struct iw_conn_results {
- enum conn_result result;
- struct iw_conn_attr conn_attr;
- u8 *private_data;
- int private_data_len;
-};
-/* This is provided in the event generated by a remote
- * connect request to a listening endpoint
- */
-struct iw_conn_request {
- u32 cr_id;
- struct iw_conn_attr conn_attr;
- u8 *private_data;
- int private_data_len;
-};
+typedef int (*iw_cm_handler)(struct iw_cm_id *cm_id,
+ struct iw_cm_event *event);
-/* Connection events. */
-enum iw_cm_event_type {
- IW_EVENT_ACTIVE_CONNECT_RESULTS,
- IW_EVENT_CONNECT_REQUEST,
- IW_EVENT_DISCONNECT
+enum iw_cm_state {
+ IW_CM_STATE_IDLE, /* unbound, inactive */
+ IW_CM_STATE_LISTEN, /* listen waiting for connect */
+ IW_CM_STATE_CONN_SENT, /* outbound waiting for peer accept */
+ IW_CM_STATE_CONN_RECV, /* inbound waiting for user accept */
+ IW_CM_STATE_ESTABLISHED, /* established */
};
-struct iw_cm_event {
- struct ib_device *device;
- union {
- struct iw_conn_results active_results;
- struct iw_conn_request conn_request;
- } element;
- enum iw_cm_event_type event;
+typedef void (*iw_event_handler)(struct iw_cm_id* cm_id,
+ struct iw_cm_event* event);
+struct iw_cm_id {
+ iw_cm_handler cm_handler; /* client callback function */
+ void *context; /* context to provide to client cb */
+ enum iw_cm_state state;
+ struct ib_device *device;
+ struct ib_qp *qp;
+ struct sockaddr_in local_addr;
+ struct sockaddr_in remote_addr;
+ u64 provider_id; /* device handle for this conn. */
+ iw_event_handler event_handler; /* callback for IW CM Provider events */
};
-/* Listening endpoint. */
-struct iw_listen_ep_attr {
- void (*event_handler)(struct iw_cm_event *, void *);
- void *listen_context;
- struct in_addr addr;
- u16 port;
- int backlog;
-};
+/**
+ * iw_create_cm_id - Allocate a communication identifier.
+ * @device: Device associated with the cm_id. All related communication will
+ * be associated with the specified device.
+ * @cm_handler: Callback invoked to notify the user of CM events.
+ * @context: User specified context associated with the communication
+ * identifier.
+ *
+ * Communication identifiers are used to track connection states,
+ * addr resolution requests, and listen requests.
+ */
+struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
+ iw_cm_handler cm_handler,
+ void *context);
-struct iw_cm {
+/* This is provided in the event generated when
+ * a remote peer accepts our connect request
+ */
- int (*connect_qp)(struct ib_qp *ib_qp,
- struct iw_conn_attr* attr,
- void (*event_handler)(struct iw_cm_event*, void*),
- void* context,
- u8 *pdata,
- int pdata_len
- );
+struct iw_cm_verbs {
+ int (*connect)(struct iw_cm_id* cm_id,
+ const void* private_data,
+ u8 private_data_len);
+
+ int (*disconnect)(struct iw_cm_id* cm_id,
+ int abrupt);
- int (*disconnect_qp)(struct ib_qp *qp,
- int abrupt
- );
+ int (*accept)(struct iw_cm_id*,
+ const void *private_data,
+ u8 pdata_data_len);
- int (*accept_cr)(struct ib_device* ibdev,
- u32 cr_id,
- struct ib_qp *qp,
- void (*event_handler)(struct iw_cm_event*, void*),
- void *context,
- u8 *pdata,
- int pdata_len);
+ int (*reject)(struct iw_cm_id* cm_id,
+ const void* private_data,
+ u8 private_data_len);
- int (*reject_cr)(struct ib_device* ibdev,
- u32 cr_id,
- u8 *pdata,
- int pdata_len);
+ int (*getpeername)(struct iw_cm_id* cm_id,
+ struct sockaddr_in* local_addr,
+ struct sockaddr_in* remote_addr);
- int (*query_cr)(struct ib_device* ibdev,
- u32 cr_id,
- struct iw_conn_request* req);
+ int (*create_listen)(struct iw_cm_id* cm_id,
+ int backlog);
- int (*create_listen_ep)(struct ib_device *ibdev,
- struct iw_listen_ep_attr *ep_attrs,
- void **ep_handle);
+ int (*destroy_listen)(struct iw_cm_id* cm_id);
- int (*destroy_listen_ep)(struct ib_device *ibdev,
- void *ep_handle);
-
};
+struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
+ iw_cm_handler cm_handler,
+ void *context);
+void iw_destroy_cm_id(struct iw_cm_id *cm_id);
+int iw_cm_listen(struct iw_cm_id *cm_id, int backlog);
+int iw_cm_getpeername(struct iw_cm_id *cm_id,
+ struct sockaddr_in* local_add,
+ struct sockaddr_in* remote_addr);
+int iw_cm_reject(struct iw_cm_id *cm_id,
+ const void *private_data,
+ u8 private_data_len);
+int iw_cm_accept(struct iw_cm_id *cm_id,
+ const void *private_data,
+ u8 private_data_len);
+int iw_cm_connect(struct iw_cm_id *cm_id,
+ const void* pdata, u8 pdata_len);
+int iw_cm_disconnect(struct iw_cm_id *cm_id);
+int iw_cm_bind_qp(struct iw_cm_id* cm_id, struct ib_qp* qp);
+
#endif /* IW_CM_H */
Index: core/cm.c
===================================================================
--- core/cm.c (revision 4186)
+++ core/cm.c (working copy)
@@ -3227,6 +3227,10 @@
int ret;
u8 i;
+ /* Ignore RNIC devices */
+ if (device->node_type == IB_NODE_RNIC)
+ return;
+
cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
device->phys_port_cnt, GFP_KERNEL);
if (!cm_dev)
@@ -3291,6 +3295,10 @@
if (!cm_dev)
return;
+ /* Ignore RNIC devices */
+ if (device->node_type == IB_NODE_RNIC)
+ return;
+
write_lock_irqsave(&cm.device_lock, flags);
list_del(&cm_dev->list);
write_unlock_irqrestore(&cm.device_lock, flags);
Index: core/iwcm.c
===================================================================
--- core/iwcm.c (revision 0)
+++ core/iwcm.c (revision 0)
@@ -0,0 +1,671 @@
+/*
+ * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
+ * Copyright (c) 2004 Topspin Corporation. All rights reserved.
+ * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include <rdma/ib_cache.h>
+#include <rdma/ib_cm.h>
+#include <rdma/iw_cm.h>
+
+#include "cm_msgs.h"
+
+MODULE_AUTHOR("Tom Tucker");
+MODULE_DESCRIPTION("iWARP CM");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static void iwcm_add_one(struct ib_device *device);
+static void iwcm_remove_one(struct ib_device *device);
+struct iwcm_id_private;
+
+static struct ib_client iwcm_client = {
+ .name = "cm",
+ .add = iwcm_add_one,
+ .remove = iwcm_remove_one
+};
+
+static struct {
+ spinlock_t lock;
+ struct list_head device_list;
+ rwlock_t device_lock;
+ struct workqueue_struct* wq;
+} iwcm;
+
+struct iwcm_device;
+struct iwcm_port {
+ struct iwcm_device *iwcm_dev;
+ struct sockaddr_in local_addr;
+ u8 port_num;
+};
+
+struct iwcm_device {
+ struct list_head list;
+ struct ib_device *device;
+ struct iwcm_port port[0];
+};
+
+struct iwcm_id_private {
+ struct iw_cm_id id;
+
+ spinlock_t lock;
+ wait_queue_head_t wait;
+ atomic_t refcount;
+
+ struct rb_node listen_node;
+
+ struct list_head work_list;
+ atomic_t work_count;
+};
+
+struct iwcm_work {
+ struct work_struct work;
+ struct iwcm_id_private* cm_id;
+ struct iw_cm_event event;
+};
+
+/* Called whenever a reference added for a cm_id */
+static inline void iwcm_addref_id(struct iwcm_id_private *cm_id_priv)
+{
+ atomic_inc(&cm_id_priv->refcount);
+}
+
+/* Called whenever releasing a reference to a cm id */
+static inline void iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
+{
+ if (atomic_dec_and_test(&cm_id_priv->refcount))
+ wake_up(&cm_id_priv->wait);
+}
+
+static void cm_event_handler(struct iw_cm_id* cm_id, struct iw_cm_event* event);
+
+struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
+ iw_cm_handler cm_handler,
+ void *context)
+{
+ struct iwcm_id_private *iwcm_id_priv;
+
+ iwcm_id_priv = kmalloc(sizeof *iwcm_id_priv, GFP_KERNEL);
+ if (!iwcm_id_priv)
+ return ERR_PTR(-ENOMEM);
+
+ memset(iwcm_id_priv, 0, sizeof *iwcm_id_priv);
+ iwcm_id_priv->id.state = IW_CM_STATE_IDLE;
+ iwcm_id_priv->id.device = device;
+ iwcm_id_priv->id.cm_handler = cm_handler;
+ iwcm_id_priv->id.context = context;
+ iwcm_id_priv->id.event_handler = cm_event_handler;
+
+ spin_lock_init(&iwcm_id_priv->lock);
+ init_waitqueue_head(&iwcm_id_priv->wait);
+ atomic_set(&iwcm_id_priv->refcount, 1);
+
+ return &iwcm_id_priv->id;
+
+}
+EXPORT_SYMBOL(iw_create_cm_id);
+
+struct iw_cm_id* iw_clone_id(struct iw_cm_id* parent)
+{
+ return iw_create_cm_id(parent->device,
+ parent->cm_handler,
+ parent->context);
+}
+
+void iw_destroy_cm_id(struct iw_cm_id *cm_id)
+{
+ struct iwcm_id_private *iwcm_id_priv;
+ unsigned long flags;
+ int ret = 0;
+
+
+ iwcm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+
+ spin_lock_irqsave(&iwcm_id_priv->lock, flags);
+ switch (cm_id->state) {
+ case IW_CM_STATE_LISTEN:
+ cm_id->state = IW_CM_STATE_IDLE;
+ spin_unlock_irqrestore(&iwcm_id_priv->lock, flags);
+ ret = cm_id->device->iwcm->destroy_listen(cm_id);
+ break;
+
+ case IW_CM_STATE_CONN_RECV:
+ case IW_CM_STATE_CONN_SENT:
+ case IW_CM_STATE_ESTABLISHED:
+ cm_id->state = IW_CM_STATE_IDLE;
+ spin_unlock_irqrestore(&iwcm_id_priv->lock, flags);
+ ret = cm_id->device->iwcm->disconnect(cm_id,1);
+ break;
+
+ case IW_CM_STATE_IDLE:
+ spin_unlock_irqrestore(&iwcm_id_priv->lock, flags);
+ break;
+
+ default:
+ spin_unlock_irqrestore(&iwcm_id_priv->lock, flags);
+ printk(KERN_ERR "%s:%s:%u Illegal state %d for iw_cm_id.\n",
+ __FILE__, __FUNCTION__, __LINE__, cm_id->state);
+ ;
+ }
+
+ atomic_dec(&iwcm_id_priv->refcount);
+ wait_event(iwcm_id_priv->wait, !atomic_read(&iwcm_id_priv->refcount));
+
+ kfree(iwcm_id_priv);
+}
+EXPORT_SYMBOL(iw_destroy_cm_id);
+
+int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
+{
+ struct iwcm_id_private *iwcm_id_priv;
+ unsigned long flags;
+ int ret = 0;
+
+
+ iwcm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+
+ if (cm_id->device == 0) {
+ printk(KERN_ERR "device is NULL\n");
+ return -EINVAL;
+ }
+
+ if (cm_id->device->iwcm == 0) {
+ printk(KERN_ERR "iwcm is NULL\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&iwcm_id_priv->lock, flags);
+ if (cm_id->state != IW_CM_STATE_IDLE) {
+ spin_unlock_irqrestore(&iwcm_id_priv->lock, flags);
+ return -EBUSY;
+ }
+ cm_id->state = IW_CM_STATE_LISTEN;
+ spin_unlock_irqrestore(&iwcm_id_priv->lock, flags);
+
+ ret = cm_id->device->iwcm->create_listen(cm_id, backlog);
+ if (ret != 0) {
+ spin_lock_irqsave(&iwcm_id_priv->lock, flags);
+ cm_id->state = IW_CM_STATE_IDLE;
+ spin_unlock_irqrestore(&iwcm_id_priv->lock, flags);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(iw_cm_listen);
+
+int iw_cm_getpeername(struct iw_cm_id *cm_id,
+ struct sockaddr_in* local_addr,
+ struct sockaddr_in* remote_addr)
+{
+ if (cm_id->device == 0)
+ return -EINVAL;
+
+ if (cm_id->device->iwcm == 0)
+ return -EINVAL;
+
+ /* Make sure there's a connection */
+ if (cm_id->state != IW_CM_STATE_ESTABLISHED)
+ return -ENOTCONN;
+
+ return cm_id->device->iwcm->getpeername(cm_id, local_addr, remote_addr);
+}
+EXPORT_SYMBOL(iw_cm_getpeername);
+
+int iw_cm_reject(struct iw_cm_id *cm_id,
+ const void *private_data,
+ u8 private_data_len)
+{
+ struct iwcm_id_private *iwcm_id_priv;
+ unsigned long flags;
+ int ret;
+
+
+ if (cm_id->device == 0 || cm_id->device->iwcm == 0)
+ return -EINVAL;
+
+ iwcm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+
+ spin_lock_irqsave(&iwcm_id_priv->lock, flags);
+ switch (cm_id->state) {
+ case IW_CM_STATE_CONN_RECV:
+ ret = cm_id->device->iwcm->reject(cm_id, private_data, private_data_len);
+ cm_id->state = IW_CM_STATE_IDLE;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+out: spin_unlock_irqrestore(&iwcm_id_priv->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(iw_cm_reject);
+
+int iw_cm_accept(struct iw_cm_id *cm_id,
+ const void *private_data,
+ u8 private_data_len)
+{
+ struct iwcm_id_private *iwcm_id_priv;
+ int ret;
+
+ if (cm_id->device == 0 || cm_id->device->iwcm == 0)
+ return -EINVAL;
+
+ iwcm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+
+ switch (cm_id->state) {
+ case IW_CM_STATE_CONN_RECV:
+ ret = cm_id->device->iwcm->accept(cm_id, private_data,
+ private_data_len);
+ if (ret == 0) {
+ struct iw_cm_event event;
+ event.event = IW_CM_EVENT_ESTABLISHED;
+ event.provider_id = cm_id->provider_id;
+ event.status = 0;
+ event.local_addr = cm_id->local_addr;
+ event.remote_addr = cm_id->remote_addr;
+ event.private_data = 0;
+ event.private_data_len = 0;
+ cm_event_handler(cm_id, &event);
+ }
+
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(iw_cm_accept);
+
+int iw_cm_bind_qp(struct iw_cm_id* cm_id, struct ib_qp* qp)
+{
+ int ret = -EINVAL;
+
+ if (cm_id)
+ cm_id->qp = qp;
+
+ return ret;
+}
+EXPORT_SYMBOL(iw_cm_bind_qp);
+
+int iw_cm_connect(struct iw_cm_id *cm_id,
+ const void* pdata, u8 pdata_len)
+{
+ struct iwcm_id_private* cm_id_priv;
+ int ret = 0;
+ unsigned long flags;
+
+ if (cm_id->state != IW_CM_STATE_IDLE)
+ return -EBUSY;
+
+ if (cm_id->device == 0)
+ return -EINVAL;
+
+ if (cm_id->device->iwcm == 0)
+ return -ENOSYS;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ cm_id->state = IW_CM_STATE_CONN_SENT;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ ret = cm_id->device->iwcm->connect(cm_id, pdata, pdata_len);
+ if (ret != 0) {
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ cm_id->state = IW_CM_STATE_IDLE;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(iw_cm_connect);
+
+int iw_cm_disconnect(struct iw_cm_id *cm_id)
+{
+ struct iwcm_id_private *iwcm_id_priv;
+ int ret;
+
+ if (cm_id->device == 0 || cm_id->device->iwcm == 0 || cm_id->qp == 0)
+ return -EINVAL;
+
+ iwcm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+
+ switch (cm_id->state) {
+ case IW_CM_STATE_ESTABLISHED:
+ ret = cm_id->device->iwcm->disconnect(cm_id, 1);
+ cm_id->state = IW_CM_STATE_IDLE;
+ if (ret == 0) {
+ struct iw_cm_event event;
+ event.event = IW_CM_EVENT_LLP_DISCONNECT;
+ event.provider_id = cm_id->provider_id;
+ event.status = 0;
+ event.local_addr = cm_id->local_addr;
+ event.remote_addr = cm_id->remote_addr;
+ event.private_data = 0;
+ event.private_data_len = 0;
+ cm_event_handler(cm_id, &event);
+ }
+
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(iw_cm_disconnect);
+
+static void iwcm_add_one(struct ib_device *device)
+{
+ struct iwcm_device *iwcm_dev;
+ struct iwcm_port *port;
+ unsigned long flags;
+ u8 i;
+
+ if (device->node_type != IB_NODE_RNIC)
+ return;
+
+ iwcm_dev = kmalloc(sizeof(*iwcm_dev) + sizeof(*port) *
+ device->phys_port_cnt, GFP_KERNEL);
+ if (!iwcm_dev)
+ return;
+
+ iwcm_dev->device = device;
+
+ for (i = 1; i <= device->phys_port_cnt; i++) {
+ port = &iwcm_dev->port[i-1];
+ port->iwcm_dev = iwcm_dev;
+ port->port_num = i;
+ }
+
+ ib_set_client_data(device, &iwcm_client, iwcm_dev);
+
+ write_lock_irqsave(&iwcm.device_lock, flags);
+ list_add_tail(&iwcm_dev->list, &iwcm.device_list);
+ write_unlock_irqrestore(&iwcm.device_lock, flags);
+ return;
+}
+
+static void iwcm_remove_one(struct ib_device *device)
+{
+ struct iwcm_device *iwcm_dev;
+ unsigned long flags;
+
+ if (device->node_type != IB_NODE_RNIC)
+ return;
+
+ iwcm_dev = ib_get_client_data(device, &iwcm_client);
+ if (!iwcm_dev)
+ return;
+
+ write_lock_irqsave(&iwcm.device_lock, flags);
+ list_del(&iwcm_dev->list);
+ write_unlock_irqrestore(&iwcm.device_lock, flags);
+
+ kfree(iwcm_dev);
+}
+
+/* Handles an inbound connect request. The function creates a new
+ * iw_cm_id to represent the new connection and inherits the client
+ * callback function and other attributes from the listening parent.
+ *
+ * The work item contains a pointer to the listen_cm_id and the event. The
+ * listen_cm_id contains the client cm_handler, context and device. These are
+ * copied when the device is cloned. The event contains the new four tuple.
+ */
+static int cm_conn_req_handler(struct iwcm_work* work)
+{
+ struct iw_cm_id* cm_id;
+ struct iwcm_id_private* cm_id_priv;
+ unsigned long flags;
+ int rc;
+
+ /* If the status was not successful, ignore request */
+ if (work->event.status) {
+ printk(KERN_ERR "Bad status=%d for connection request ... "
+ "should be filtered by provider\n",
+ work->event.status);
+ return work->event.status;
+ }
+ cm_id = iw_clone_id(&work->cm_id->id);
+ if (IS_ERR(cm_id))
+ return PTR_ERR(cm_id);
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ cm_id_priv->id.local_addr = work->event.local_addr;
+ cm_id_priv->id.remote_addr = work->event.remote_addr;
+ cm_id_priv->id.provider_id = work->event.provider_id;
+ cm_id_priv->id.state = IW_CM_STATE_CONN_RECV;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ /* Call the client CM handler */
+ rc = cm_id->cm_handler(cm_id, &work->event);
+ if (rc) {
+ cm_id->state = IW_CM_STATE_IDLE;
+ iw_destroy_cm_id(cm_id);
+ }
+ kfree(work);
+ return 0;
+}
+
+/*
+ * Handles the transition to established state on the passive side.
+ */
+static int cm_conn_est_handler(struct iwcm_work* work)
+{
+ struct iwcm_id_private* cm_id_priv;
+ unsigned long flags;
+ int ret = 0;
+
+ cm_id_priv = work->cm_id;
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ if (cm_id_priv->id.state != IW_CM_STATE_CONN_RECV) {
+ printk(KERN_ERR "%s:%d Invalid cm_id state=%d for established event\n",
+ __FUNCTION__, __LINE__, cm_id_priv->id.state);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ ret = -EINVAL;
+ goto error_out;
+ }
+
+ if (work->event.status == 0) {
+ cm_id_priv = work->cm_id;
+ cm_id_priv->id.local_addr = work->event.local_addr;
+ cm_id_priv->id.remote_addr = work->event.remote_addr;
+ cm_id_priv->id.state = IW_CM_STATE_ESTABLISHED;
+ } else {
+ cm_id_priv->id.state = IW_CM_STATE_IDLE;
+ }
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ /* Call the client CM handler */
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->event);
+ if (ret) {
+ cm_id_priv->id.state = IW_CM_STATE_IDLE;
+ iw_destroy_cm_id(&cm_id_priv->id);
+ }
+
+ error_out:
+ kfree(work);
+ return ret;
+}
+
+/*
+ * Handles the reply to our connect request. There are three
+ * possibilities:
+ * - If the cm_id is in the wrong state when the event is
+ * delivered, the event is ignored. [What should we do when the
+ * provider does something crazy?]
+ * - If the remote peer accepts the connection, we update the 4-tuple
+ * in the cm_id with the remote peer info, move the cm_id to the
+ * ESTABLISHED state and deliver the event to the client.
+ * - If the remote peer rejects the connection, or there is some
+ * connection error, move the cm_id to the IDLE state, and deliver
+ * the event to the client.
+ */
+static int cm_conn_rep_handler(struct iwcm_work* work)
+{
+ struct iwcm_id_private* cm_id_priv;
+ unsigned long flags;
+ int ret = 0;
+
+ cm_id_priv = work->cm_id;
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ if (cm_id_priv->id.state != IW_CM_STATE_CONN_SENT) {
+ printk(KERN_ERR "%s:%d Invalid cm_id state=%d for connect reply event\n",
+ __FUNCTION__, __LINE__, cm_id_priv->id.state);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ ret = -EINVAL;
+ goto error_out;
+ }
+
+ if (work->event.status == 0) {
+ cm_id_priv = work->cm_id;
+ cm_id_priv->id.local_addr = work->event.local_addr;
+ cm_id_priv->id.remote_addr = work->event.remote_addr;
+ cm_id_priv->id.state = IW_CM_STATE_ESTABLISHED;
+ } else {
+ cm_id_priv->id.state = IW_CM_STATE_IDLE;
+ }
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ /* Call the client CM handler */
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->event);
+ if (ret) {
+ cm_id_priv->id.state = IW_CM_STATE_IDLE;
+ iw_destroy_cm_id(&cm_id_priv->id);
+ }
+
+ error_out:
+ kfree(work);
+ return ret;
+}
+
+static int cm_disconnect_handler(struct iwcm_work* work)
+{
+ struct iwcm_id_private* cm_id_priv;
+ unsigned long flags;
+ int ret = 0;
+
+ cm_id_priv = work->cm_id;
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ cm_id_priv->id.state = IW_CM_STATE_IDLE;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ /* Call the client CM handler */
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->event);
+ if (ret) {
+ cm_id_priv->id.state = IW_CM_STATE_IDLE;
+ iw_destroy_cm_id(&cm_id_priv->id);
+ }
+
+ kfree(work);
+ return ret;
+}
+
+static void cm_work_handler(void* arg)
+{
+ struct iwcm_work* work = (struct iwcm_work*)arg;
+ int rc;
+
+ switch (work->event.event) {
+ case IW_CM_EVENT_CONNECT_REQUEST:
+ rc = cm_conn_req_handler(work);
+ break;
+ case IW_CM_EVENT_CONNECT_REPLY:
+ rc = cm_conn_rep_handler(work);
+ break;
+ case IW_CM_EVENT_ESTABLISHED:
+ rc = cm_conn_est_handler(work);
+ break;
+ case IW_CM_EVENT_LLP_DISCONNECT:
+ case IW_CM_EVENT_LLP_TIMEOUT:
+ case IW_CM_EVENT_LLP_RESET:
+ case IW_CM_EVENT_CLOSE:
+ rc = cm_disconnect_handler(work);
+ break;
+ }
+}
+
+/* IW CM provider event callback handler. This function is called on
+ * interrupt context. The function builds a work queue element
+ * and enqueues it for processing on a work queue thread. This allows
+ * CM client callback functions to block.
+ */
+static void cm_event_handler(struct iw_cm_id* cm_id,
+ struct iw_cm_event* event)
+{
+ struct iwcm_work *work;
+ struct iwcm_id_private* cm_id_priv;
+
+ work = kmalloc(sizeof *work, GFP_ATOMIC);
+ if (!work)
+ return;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ INIT_WORK(&work->work, cm_work_handler, work);
+ work->cm_id = cm_id_priv;
+ work->event = *event;
+ queue_work(iwcm.wq, &work->work);
+}
+
+static int __init iw_cm_init(void)
+{
+ memset(&iwcm, 0, sizeof iwcm);
+ INIT_LIST_HEAD(&iwcm.device_list);
+ rwlock_init(&iwcm.device_lock);
+ spin_lock_init(&iwcm.lock);
+ iwcm.wq = create_workqueue("iw_cm");
+ if (!iwcm.wq)
+ return -ENOMEM;
+
+ return ib_register_client(&iwcm_client);
+}
+
+static void __exit iw_cm_cleanup(void)
+{
+ ib_unregister_client(&iwcm_client);
+}
+
+module_init(iw_cm_init);
+module_exit(iw_cm_cleanup);
+
Index: core/addr.c
===================================================================
--- core/addr.c (revision 4186)
+++ core/addr.c (working copy)
@@ -73,8 +73,13 @@
if (!dev)
return -EADDRNOTAVAIL;
- *gid = *(union ib_gid *) (dev->dev_addr + 4);
- *pkey = addr_get_pkey(dev);
+ if (dev->type == ARPHRD_INFINIBAND) {
+ *gid = *(union ib_gid *) (dev->dev_addr + 4);
+ *pkey = addr_get_pkey(dev);
+ } else {
+ *gid = *(union ib_gid *) (dev->dev_addr);
+ *pkey = 0;
+ }
dev_put(dev);
return 0;
}
Index: core/Makefile
===================================================================
--- core/Makefile (revision 4186)
+++ core/Makefile (working copy)
@@ -1,6 +1,6 @@
EXTRA_CFLAGS += -Idrivers/infiniband/include -Idrivers/infiniband/ulp/ipoib
-obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_ping.o ib_cm.o \
+obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_ping.o ib_cm.o iw_cm.o \
ib_sa.o ib_at.o ib_addr.o rdma_cm.o
obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o ib_uat.o
@@ -14,6 +14,8 @@
ib_cm-y := cm.o
+iw_cm-y := iwcm.o
+
rdma_cm-y := cma.o
ib_addr-y := addr.o
Index: core/cma.c
===================================================================
--- core/cma.c (revision 4186)
+++ core/cma.c (working copy)
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
* Copyright (c) 2005 Voltaire Inc. All rights reserved.
* Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
* Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
@@ -30,9 +31,14 @@
*/
#include <linux/in.h>
#include <linux/in6.h>
+#include <linux/inetdevice.h>
+#include <net/route.h>
+#include <net/arp.h>
+#include <net/neighbour.h>
#include <rdma/rdma_cm.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_cm.h>
+#include <rdma/iw_cm.h>
#include <rdma/ib_sa.h>
MODULE_AUTHOR("Guy German");
@@ -100,7 +106,10 @@
int timeout_ms;
struct ib_sa_query *query;
int query_id;
- struct ib_cm_id *cm_id;
+ union {
+ struct ib_cm_id *ib;
+ struct iw_cm_id *iw;
+ } cm_id;
};
struct cma_addr {
@@ -266,6 +275,16 @@
IB_QP_PKEY_INDEX | IB_QP_PORT);
}
+static int cma_init_iw_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
+{
+ struct ib_qp_attr qp_attr;
+
+ qp_attr.qp_state = IB_QPS_INIT;
+ qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
+
+ return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS);
+}
+
int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr)
{
@@ -285,6 +304,9 @@
case IB_NODE_CA:
ret = cma_init_ib_qp(id_priv, qp);
break;
+ case IB_NODE_RNIC:
+ ret = cma_init_iw_qp(id_priv, qp);
+ break;
default:
ret = -ENOSYS;
break;
@@ -314,7 +336,7 @@
/* Need to update QP attributes from default values. */
qp_attr.qp_state = IB_QPS_INIT;
- ret = ib_cm_init_qp_attr(id_priv->cm_id, &qp_attr, &qp_attr_mask);
+ ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, &qp_attr, &qp_attr_mask);
if (ret)
return ret;
@@ -323,7 +345,7 @@
return ret;
qp_attr.qp_state = IB_QPS_RTR;
- ret = ib_cm_init_qp_attr(id_priv->cm_id, &qp_attr, &qp_attr_mask);
+ ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, &qp_attr, &qp_attr_mask);
if (ret)
return ret;
@@ -337,7 +359,7 @@
int qp_attr_mask, ret;
qp_attr.qp_state = IB_QPS_RTS;
- ret = ib_cm_init_qp_attr(id_priv->cm_id, &qp_attr, &qp_attr_mask);
+ ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, &qp_attr, &qp_attr_mask);
if (ret)
return ret;
@@ -419,8 +441,8 @@
{
cma_exch(id_priv, CMA_DESTROYING);
- if (id_priv->cm_id && !IS_ERR(id_priv->cm_id))
- ib_destroy_cm_id(id_priv->cm_id);
+ if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
+ ib_destroy_cm_id(id_priv->cm_id.ib);
list_del(&id_priv->listen_list);
if (id_priv->cma_dev)
@@ -476,8 +498,22 @@
state = cma_exch(id_priv, CMA_DESTROYING);
cma_cancel_operation(id_priv, state);
- if (id_priv->cm_id && !IS_ERR(id_priv->cm_id))
- ib_destroy_cm_id(id_priv->cm_id);
+ if (id->device) {
+ switch (id->device->node_type) {
+ case IB_NODE_RNIC:
+ if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw)) {
+ iw_destroy_cm_id(id_priv->cm_id.iw);
+ id_priv->cm_id.iw = 0;
+ }
+ break;
+ default:
+ if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) {
+ ib_destroy_cm_id(id_priv->cm_id.ib);
+ id_priv->cm_id.ib = 0;
+ }
+ break;
+ }
+ }
if (id_priv->cma_dev) {
down(&mutex);
@@ -505,14 +541,14 @@
if (ret)
goto reject;
- ret = ib_send_cm_rtu(id_priv->cm_id, NULL, 0);
+ ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
if (ret)
goto reject;
return 0;
reject:
cma_modify_qp_err(&id_priv->id);
- ib_send_cm_rej(id_priv->cm_id, IB_CM_REJ_CONSUMER_DEFINED,
+ ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
NULL, 0, NULL, 0);
return ret;
}
@@ -528,7 +564,7 @@
return 0;
reject:
cma_modify_qp_err(&id_priv->id);
- ib_send_cm_rej(id_priv->cm_id, IB_CM_REJ_CONSUMER_DEFINED,
+ ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
NULL, 0, NULL, 0);
return ret;
}
@@ -586,7 +622,7 @@
private_data_len);
if (ret) {
/* Destroy the CM ID by returning a non-zero value. */
- id_priv->cm_id = NULL;
+ id_priv->cm_id.ib = NULL;
cma_exch(id_priv, CMA_DESTROYING);
cma_release_remove(id_priv);
rdma_destroy_id(&id_priv->id);
@@ -675,7 +711,7 @@
goto out;
}
- conn_id->cm_id = cm_id;
+ conn_id->cm_id.ib = cm_id;
cm_id->context = conn_id;
cm_id->cm_handler = cma_ib_handler;
@@ -685,7 +721,7 @@
IB_CM_REQ_PRIVATE_DATA_SIZE - offset);
if (ret) {
/* Destroy the CM ID by returning a non-zero value. */
- conn_id->cm_id = NULL;
+ conn_id->cm_id.ib = NULL;
cma_exch(conn_id, CMA_DESTROYING);
cma_release_remove(conn_id);
rdma_destroy_id(&conn_id->id);
@@ -695,6 +731,112 @@
return ret;
}
+static int cma_iw_handler(struct iw_cm_id* iw_id, struct iw_cm_event* event)
+{
+ struct rdma_id_private *id_priv = iw_id->context;
+ enum rdma_cm_event_type event_type = 0;
+ int ret = 0;
+
+ atomic_inc(&id_priv->dev_remove);
+
+ switch (event->event) {
+ case IW_CM_EVENT_LLP_DISCONNECT:
+ case IW_CM_EVENT_LLP_RESET:
+ case IW_CM_EVENT_LLP_TIMEOUT:
+ case IW_CM_EVENT_CLOSE:
+ event_type = RDMA_CM_EVENT_DISCONNECTED;
+ break;
+
+ case IW_CM_EVENT_CONNECT_REQUEST:
+ BUG_ON(1);
+ break;
+
+ case IW_CM_EVENT_CONNECT_REPLY: {
+ if (event->status)
+ event_type = RDMA_CM_EVENT_REJECTED;
+ else
+ event_type = RDMA_CM_EVENT_ESTABLISHED;
+ break;
+ }
+
+ case IW_CM_EVENT_ESTABLISHED:
+ event_type = RDMA_CM_EVENT_ESTABLISHED;
+ break;
+ }
+
+ ret = cma_notify_user(id_priv,
+ event_type,
+ event->status,
+ event->private_data,
+ event->private_data_len);
+ if (ret) {
+ /* Destroy the CM ID by returning a non-zero value. */
+ id_priv->cm_id.iw = NULL;
+ cma_exch(id_priv, CMA_DESTROYING);
+ cma_release_remove(id_priv);
+ rdma_destroy_id(&id_priv->id);
+ return ret;
+ }
+
+ cma_release_remove(id_priv);
+ return ret;
+}
+
+static int iw_conn_req_handler(struct iw_cm_id *cm_id,
+ struct iw_cm_event *iw_event)
+{
+ struct rdma_cm_id* new_cm_id;
+ struct rdma_id_private *listen_id, *conn_id;
+ struct sockaddr_in* sin;
+ int ret;
+
+ listen_id = cm_id->context;
+ atomic_inc(&listen_id->dev_remove);
+ if (!cma_comp(listen_id, CMA_LISTEN)) {
+ ret = -ECONNABORTED;
+ goto out;
+ }
+
+ /* Create a new RDMA id the new IW CM ID */
+ new_cm_id = rdma_create_id(listen_id->id.event_handler,
+ listen_id->id.context);
+ if (!new_cm_id) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ conn_id = container_of(new_cm_id, struct rdma_id_private, id);
+ atomic_inc(&conn_id->dev_remove);
+ conn_id->state = CMA_CONNECT;
+
+ /* New connection inherits device from parent */
+ cma_attach_to_dev(conn_id, listen_id->cma_dev);
+
+ conn_id->cm_id.iw = cm_id;
+ cm_id->context = conn_id;
+ cm_id->cm_handler = cma_iw_handler;
+
+ sin = (struct sockaddr_in*)&new_cm_id->route.addr.src_addr;
+ *sin = iw_event->local_addr;
+
+ sin = (struct sockaddr_in*)&new_cm_id->route.addr.dst_addr;
+ *sin = iw_event->remote_addr;
+
+ ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0,
+ iw_event->private_data,
+ iw_event->private_data_len);
+ if (ret) {
+ /* Destroy the CM ID by returning a non-zero value. */
+ conn_id->cm_id.iw = NULL;
+ cma_exch(conn_id, CMA_DESTROYING);
+ cma_release_remove(conn_id);
+ rdma_destroy_id(&conn_id->id);
+ }
+
+out:
+ cma_release_remove(listen_id);
+ return ret;
+}
+
static __be64 cma_get_service_id(struct sockaddr *addr)
{
return cpu_to_be64(((u64)IB_OPENIB_OUI << 48) +
@@ -706,21 +848,44 @@
__be64 svc_id;
int ret;
- id_priv->cm_id = ib_create_cm_id(id_priv->id.device, cma_req_handler,
+ id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler,
id_priv);
- if (IS_ERR(id_priv->cm_id))
- return PTR_ERR(id_priv->cm_id);
+ if (IS_ERR(id_priv->cm_id.ib))
+ return PTR_ERR(id_priv->cm_id.ib);
svc_id = cma_get_service_id(&id_priv->id.route.addr.src_addr);
- ret = ib_cm_listen(id_priv->cm_id, svc_id, 0);
+ ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0);
if (ret) {
- ib_destroy_cm_id(id_priv->cm_id);
- id_priv->cm_id = NULL;
+ ib_destroy_cm_id(id_priv->cm_id.ib);
+ id_priv->cm_id.ib = NULL;
}
return ret;
}
+static int cma_iw_listen(struct rdma_id_private *id_priv)
+{
+ int ret;
+ struct sockaddr_in* sin;
+
+ id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device,
+ iw_conn_req_handler,
+ id_priv);
+ if (IS_ERR(id_priv->cm_id.iw))
+ return PTR_ERR(id_priv->cm_id.iw);
+
+ sin = (struct sockaddr_in*)&id_priv->id.route.addr.src_addr;
+ id_priv->cm_id.iw->local_addr = *sin;
+
+ ret = iw_cm_listen(id_priv->cm_id.iw, 10 /* backlog */);
+ if (ret) {
+ iw_destroy_cm_id(id_priv->cm_id.iw);
+ id_priv->cm_id.iw = NULL;
+ }
+
+ return ret;
+}
+
static int cma_duplicate_listen(struct rdma_id_private *id_priv)
{
struct rdma_id_private *cur_id_priv;
@@ -785,8 +950,9 @@
goto out;
list_add_tail(&id_priv->list, &listen_any_list);
- list_for_each_entry(cma_dev, &dev_list, list)
+ list_for_each_entry(cma_dev, &dev_list, list) {
cma_listen_on_dev(id_priv, cma_dev);
+ }
out:
up(&mutex);
return ret;
@@ -796,7 +962,6 @@
{
struct rdma_id_private *id_priv;
int ret;
-
id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
return -EINVAL;
@@ -806,6 +971,9 @@
case IB_NODE_CA:
ret = cma_ib_listen(id_priv);
break;
+ case IB_NODE_RNIC:
+ ret = cma_iw_listen(id_priv);
+ break;
default:
ret = -ENOSYS;
break;
@@ -890,6 +1058,30 @@
return (id_priv->query_id < 0) ? id_priv->query_id : 0;
}
+static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
+{
+ enum rdma_cm_event_type event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+ int rc;
+
+ atomic_inc(&id_priv->dev_remove);
+
+ if (!cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ROUTE_RESOLVED))
+ BUG_ON(1);
+
+ rc = cma_notify_user(id_priv, event, 0, NULL, 0);
+ if (rc) {
+ cma_exch(id_priv, CMA_DESTROYING);
+ cma_release_remove(id_priv);
+ cma_deref_id(id_priv);
+ rdma_destroy_id(&id_priv->id);
+ return rc;
+ }
+
+ cma_release_remove(id_priv);
+ cma_deref_id(id_priv);
+ return rc;
+}
+
int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
{
struct rdma_id_private *id_priv;
@@ -904,6 +1096,9 @@
case IB_NODE_CA:
ret = cma_resolve_ib_route(id_priv, timeout_ms);
break;
+ case IB_NODE_RNIC:
+ ret = cma_resolve_iw_route(id_priv, timeout_ms);
+ break;
default:
ret = -ENOSYS;
break;
@@ -952,20 +1147,133 @@
cma_deref_id(id_priv);
}
+
+/* Find the local interface with a route to the specified address and
+ * bind the CM ID to this interface's CMA device
+ */
+static int cma_acquire_iw_dev(struct rdma_cm_id* id, struct sockaddr* addr)
+{
+ int ret = -ENOENT;
+ struct cma_device* cma_dev;
+ struct rdma_id_private *id_priv;
+ struct sockaddr_in* sin;
+ struct rtable *rt = 0;
+ struct flowi fl;
+ struct net_device* netdev;
+ struct in_addr src_ip;
+ unsigned char* dev_addr;
+
+ sin = (struct sockaddr_in*)addr;
+ if (sin->sin_family != AF_INET)
+ return -EINVAL;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+
+ /* If the address is local, use the device. If it is remote,
+ * look up a route to get the local address
+ */
+ netdev = ip_dev_find(sin->sin_addr.s_addr);
+ if (netdev) {
+ src_ip = sin->sin_addr;
+ dev_addr = netdev->dev_addr;
+ dev_put(netdev);
+ } else {
+ memset(&fl, 0, sizeof(fl));
+ fl.nl_u.ip4_u.daddr = sin->sin_addr.s_addr;
+ if (ip_route_output_key(&rt, &fl)) {
+ return -ENETUNREACH;
+ }
+ dev_addr = rt->idev->dev->dev_addr;
+ src_ip.s_addr = rt->rt_src;
+
+ ip_rt_put(rt);
+ }
+
+ down(&mutex);
+
+ list_for_each_entry(cma_dev, &dev_list, list) {
+ if (memcmp(dev_addr,
+ &cma_dev->node_guid,
+ sizeof(cma_dev->node_guid)) == 0) {
+ /* If we find the device, then check if this
+ * is an iWARP device. If it is, then call the
+ * callback handler immediately because we
+ * already have the native address
+ */
+ if (cma_dev->device->node_type == IB_NODE_RNIC) {
+ struct sockaddr_in* cm_sin;
+ /* Set our source address */
+ cm_sin = (struct sockaddr_in*)
+ &id_priv->id.route.addr.src_addr;
+ cm_sin->sin_family = AF_INET;
+ cm_sin->sin_addr.s_addr = src_ip.s_addr;
+
+ /* Claim the device in the mutex */
+ cma_attach_to_dev(id_priv, cma_dev);
+ ret = 0;
+ break;
+ }
+ }
+ }
+ up(&mutex);
+
+ return ret;
+}
+
+
+/**
+ * rdma_resolve_addr - RDMA Resolve Address
+ *
+ * @id: RDMA identifier.
+ * @src_addr: Source IP address
+ * @dst_addr: Destination IP address
+ * &timeout_ms: Timeout to wait for address resolution
+ *
+ * Bind the specified cm_id to a local interface and if this is an IB
+ * CA, determine the GIDs associated with the specified IP addresses.
+ */
int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
struct sockaddr *dst_addr, int timeout_ms)
{
struct rdma_id_private *id_priv;
- int ret;
+ int ret = 0;
id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_QUERY))
return -EINVAL;
atomic_inc(&id_priv->refcount);
+
id->route.addr.dst_addr = *dst_addr;
- ret = ib_resolve_addr(src_addr, dst_addr, &id->route.addr.addr.ibaddr,
- timeout_ms, addr_handler, id_priv);
+
+ if (cma_acquire_iw_dev(id, dst_addr)==0) {
+
+ enum rdma_cm_event_type event;
+
+ cma_exch(id_priv, CMA_ADDR_RESOLVED);
+
+ atomic_inc(&id_priv->dev_remove);
+
+ event = RDMA_CM_EVENT_ADDR_RESOLVED;
+ if (cma_notify_user(id_priv, event, 0, NULL, 0)) {
+ cma_exch(id_priv, CMA_DESTROYING);
+ cma_deref_id(id_priv);
+ cma_release_remove(id_priv);
+ rdma_destroy_id(&id_priv->id);
+ return -EINVAL;
+ }
+
+ cma_release_remove(id_priv);
+ cma_deref_id(id_priv);
+
+ } else {
+
+ ret = ib_resolve_addr(src_addr,
+ dst_addr, &id->route.addr.addr.ibaddr,
+ timeout_ms, addr_handler, id_priv);
+
+ }
+
if (ret)
goto err;
@@ -980,10 +1288,13 @@
int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
{
struct rdma_id_private *id_priv;
+ struct sockaddr_in* sin;
struct ib_addr *ibaddr = &id->route.addr.addr.ibaddr;
int ret;
- if (addr->sa_family != AF_INET)
+ sin = (struct sockaddr_in*)addr;
+
+ if (sin->sin_family != AF_INET)
return -EINVAL;
id_priv = container_of(id, struct rdma_id_private, id);
@@ -994,9 +1305,11 @@
id->route.addr.src_addr = *addr;
ret = 0;
} else {
- ret = ib_translate_addr(addr, &ibaddr->sgid, &ibaddr->pkey);
- if (!ret)
- ret = cma_acquire_ib_dev(id_priv, &ibaddr->sgid);
+ if ((ret = cma_acquire_iw_dev(id, addr))) {
+ ret = ib_translate_addr(addr, &ibaddr->sgid, &ibaddr->pkey);
+ if (!ret)
+ ret = cma_acquire_ib_dev(id_priv, &ibaddr->sgid);
+ }
}
if (ret)
@@ -1041,10 +1354,10 @@
if (!private_data)
return -ENOMEM;
- id_priv->cm_id = ib_create_cm_id(id_priv->id.device, cma_ib_handler,
+ id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler,
id_priv);
- if (IS_ERR(id_priv->cm_id)) {
- ret = PTR_ERR(id_priv->cm_id);
+ if (IS_ERR(id_priv->cm_id.ib)) {
+ ret = PTR_ERR(id_priv->cm_id.ib);
goto out;
}
@@ -1075,25 +1388,61 @@
req.max_cm_retries = CMA_MAX_CM_RETRIES;
req.srq = id_priv->id.qp->srq ? 1 : 0;
- ret = ib_send_cm_req(id_priv->cm_id, &req);
+ ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
out:
kfree(private_data);
return ret;
}
+static int cma_connect_iw(struct rdma_id_private *id_priv,
+ struct rdma_conn_param *conn_param)
+{
+ struct iw_cm_id* cm_id;
+ struct sockaddr_in* sin;
+ int ret;
+
+ if (id_priv->id.qp == NULL)
+ return -EINVAL;
+
+ cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
+ if (IS_ERR(cm_id)) {
+ ret = PTR_ERR(cm_id);
+ goto out;
+ }
+
+ id_priv->cm_id.iw = cm_id;
+
+ sin = (struct sockaddr_in*)&id_priv->id.route.addr.src_addr;
+ cm_id->local_addr = *sin;
+
+ sin = (struct sockaddr_in*)&id_priv->id.route.addr.dst_addr;
+ cm_id->remote_addr = *sin;
+
+ iw_cm_bind_qp(cm_id, id_priv->id.qp);
+
+ ret = iw_cm_connect(cm_id, conn_param->private_data,
+ conn_param->private_data_len);
+
+out:
+ return ret;
+}
+
int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
{
struct rdma_id_private *id_priv;
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT))
+ if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT))
return -EINVAL;
switch (id->device->node_type) {
case IB_NODE_CA:
ret = cma_connect_ib(id_priv, conn_param);
break;
+ case IB_NODE_RNIC:
+ ret = cma_connect_iw(id_priv, conn_param);
+ break;
default:
ret = -ENOSYS;
break;
@@ -1131,7 +1480,7 @@
rep.rnr_retry_count = conn_param->rnr_retry_count;
rep.srq = id_priv->id.qp->srq ? 1 : 0;
- return ib_send_cm_rep(id_priv->cm_id, &rep);
+ return ib_send_cm_rep(id_priv->cm_id.ib, &rep);
}
int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
@@ -1147,6 +1496,12 @@
case IB_NODE_CA:
ret = cma_accept_ib(id_priv, conn_param);
break;
+ case IB_NODE_RNIC: {
+ iw_cm_bind_qp(id_priv->cm_id.iw, id_priv->id.qp);
+ ret = iw_cm_accept(id_priv->cm_id.iw, conn_param->private_data,
+ conn_param->private_data_len);
+ break;
+ }
default:
ret = -ENOSYS;
break;
@@ -1175,9 +1530,15 @@
switch (id->device->node_type) {
case IB_NODE_CA:
- ret = ib_send_cm_rej(id_priv->cm_id, IB_CM_REJ_CONSUMER_DEFINED,
+ ret = ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
NULL, 0, private_data, private_data_len);
break;
+
+ case IB_NODE_RNIC:
+ ret = iw_cm_reject(id_priv->cm_id.iw,
+ private_data, private_data_len);
+ break;
+
default:
ret = -ENOSYS;
break;
@@ -1190,7 +1551,6 @@
{
struct rdma_id_private *id_priv;
int ret;
-
id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp(id_priv, CMA_CONNECT))
return -EINVAL;
@@ -1202,9 +1562,12 @@
switch (id->device->node_type) {
case IB_NODE_CA:
/* Initiate or respond to a disconnect. */
- if (ib_send_cm_dreq(id_priv->cm_id, NULL, 0))
- ib_send_cm_drep(id_priv->cm_id, NULL, 0);
+ if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
+ ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
break;
+ case IB_NODE_RNIC:
+ ret = iw_cm_disconnect(id_priv->cm_id.iw);
+ break;
default:
break;
}
Index: Makefile
===================================================================
--- Makefile (revision 4186)
+++ Makefile (working copy)
@@ -7,3 +7,5 @@
obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
obj-$(CONFIG_KDAPL) += ulp/kdapl/
obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
+obj-$(CONFIG_KRPING) += krping/
+obj-$(CONFIG_RDMA_CMATOSE) += cmatose/
Index: hw/amso1100/c2.c
===================================================================
--- hw/amso1100/c2.c (revision 4482)
+++ hw/amso1100/c2.c (working copy)
@@ -933,7 +933,7 @@
spin_lock_init(&c2_port->tx_lock);
/* Copy our 48-bit ethernet hardware address */
- memcpy_fromio(netdev->dev_addr, mmio_addr + C2_REGS_ENADDR, 6);
+ memcpy_fromio(netdev->dev_addr, mmio_addr + C2_REGS_RDMA_ENADDR, 6);
/* Validate the MAC address */
if(!is_valid_ether_addr(netdev->dev_addr)) {
Index: hw/amso1100/c2_qp.c
===================================================================
--- hw/amso1100/c2_qp.c (revision 4482)
+++ hw/amso1100/c2_qp.c (working copy)
@@ -184,7 +184,7 @@
struct c2_vq_req *vq_req;
ccwr_qp_destroy_req_t wr;
ccwr_qp_destroy_rep_t *reply;
- int err;
+ int err;
/*
* Allocate a verb request message
@@ -343,8 +343,6 @@
qp->send_sgl_depth = qp_attrs->cap.max_send_sge;
qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge;
qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge;
- qp->event_handler = NULL;
- qp->context = NULL;
/* Initialize the SQ MQ */
q_size = be32_to_cpu(reply->sq_depth);
Index: hw/amso1100/c2.h
===================================================================
--- hw/amso1100/c2.h (revision 4482)
+++ hw/amso1100/c2.h (working copy)
@@ -113,6 +113,7 @@
C2_REGS_Q2_MSGSIZE = 0x0038,
C2_REGS_Q2_SHARED = 0x0040,
C2_REGS_ENADDR = 0x004C,
+ C2_REGS_RDMA_ENADDR = 0x0054,
C2_REGS_HRX_CUR = 0x006C,
};
@@ -592,16 +593,11 @@
extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify);
/* CM */
-extern int c2_qp_connect(struct c2_dev *c2dev, struct c2_qp *qp, u32 remote_addr,
- u16 remote_port, u32 pdata_len, u8 *pdata);
-extern int c2_cr_query(struct c2_dev *c2dev, u32 cr_id,
- struct c2_cr_query_attrs *cr_attrs);
-extern int c2_cr_accept(struct c2_dev *c2dev, u32 cr_id, struct c2_qp *qp,
- u32 pdata_len, u8 *pdata);
-extern int c2_cr_reject(struct c2_dev *c2dev, u32 cr_id);
-extern int c2_ep_listen_create(struct c2_dev *c2dev, u32 addr, u16 port,
- u32 backlog, struct c2_ep *ep);
-extern int c2_ep_listen_destroy(struct c2_dev *c2dev, struct c2_ep *ep);
+extern int c2_llp_connect(struct iw_cm_id* cm_id, const void* pdata, u8 pdata_len);
+extern int c2_llp_accept(struct iw_cm_id* cm_id, const void* pdata, u8 pdata_len);
+extern int c2_llp_reject(struct iw_cm_id* cm_id, const void* pdata, u8 pdata_len);
+extern int c2_llp_service_create(struct iw_cm_id* cm_id, int backlog);
+extern int c2_llp_service_destroy(struct iw_cm_id* cm_id);
/* MM */
extern int c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 **addr_list,
Index: hw/amso1100/c2_pd.c
===================================================================
--- hw/amso1100/c2_pd.c (revision 4482)
+++ hw/amso1100/c2_pd.c (working copy)
@@ -44,6 +44,8 @@
{
int err = 0;
+ printk(KERN_ERR "%s:%d\n", __FUNCTION__, __LINE__);
+
might_sleep();
atomic_set(&pd->sqp_count, 0);
Index: hw/amso1100/c2_ae.c
===================================================================
--- hw/amso1100/c2_ae.c (revision 4482)
+++ hw/amso1100/c2_ae.c (working copy)
@@ -35,51 +35,37 @@
#include "cc_status.h"
#include "cc_ae.h"
-enum conn_result
-c2_convert_cm_status(u32 cc_status)
+static int c2_convert_cm_status(u32 cc_status)
{
switch (cc_status) {
- case CC_CONN_STATUS_SUCCESS: return IW_CONN_ACCEPT;
- case CC_CONN_STATUS_REJECTED: return IW_CONN_RESET;
- case CC_CONN_STATUS_REFUSED: return IW_CONN_PEER_REJECT;
- case CC_CONN_STATUS_TIMEDOUT: return IW_CONN_TIMEDOUT;
- case CC_CONN_STATUS_NETUNREACH: return IW_CONN_NO_ROUTE_TO_HOST;
- case CC_CONN_STATUS_HOSTUNREACH: return IW_CONN_NO_ROUTE_TO_HOST;
- case CC_CONN_STATUS_INVALID_RNIC: return IW_CONN_INVALID_PARM;
- case CC_CONN_STATUS_INVALID_QP: return IW_CONN_INVALID_PARM;
- case CC_CONN_STATUS_INVALID_QP_STATE: return IW_CONN_INVALID_PARM;
+ case CC_CONN_STATUS_SUCCESS:
+ return 0;
+ case CC_CONN_STATUS_REJECTED:
+ return -ENETRESET;
+ case CC_CONN_STATUS_REFUSED:
+ return -ECONNREFUSED;
+ case CC_CONN_STATUS_TIMEDOUT:
+ return -ETIMEDOUT;
+ case CC_CONN_STATUS_NETUNREACH:
+ return -ENETUNREACH;
+ case CC_CONN_STATUS_HOSTUNREACH:
+ return -EHOSTUNREACH;
+ case CC_CONN_STATUS_INVALID_RNIC:
+ return -EINVAL;
+ case CC_CONN_STATUS_INVALID_QP:
+ return -EINVAL;
+ case CC_CONN_STATUS_INVALID_QP_STATE:
+ return -EINVAL;
default:
panic("Unable to convert CM status: %d\n", cc_status);
break;
}
}
-static int
-is_cm_event(cc_event_id_t id)
-{
- int is_cm;
-
- switch (id) {
- case CCAE_ACTIVE_CONNECT_RESULTS:
- case CCAE_BAD_CLOSE:
- case CCAE_LLP_CLOSE_COMPLETE:
- case CCAE_LLP_CONNECTION_RESET:
- case CCAE_LLP_CONNECTION_LOST:
- is_cm = 1;
- break;
- case CCAE_TERMINATE_MESSAGE_RECEIVED:
- case CCAE_CQ_SQ_COMPLETION_OVERFLOW:
- default:
- is_cm = 0;
- break;
- }
-
- return is_cm;
-}
void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
{
- ccwr_t *wr;
struct c2_mq *mq = c2dev->qptr_array[mq_index];
+ ccwr_t *wr;
void *resource_user_context;
struct iw_cm_event cm_event;
struct ib_event ib_event;
@@ -94,6 +80,7 @@
if (!wr)
return;
+ memset(&cm_event, 0, sizeof(cm_event));
event_id = c2_wr_get_id(wr);
resource_indicator = be32_to_cpu(wr->ae.ae_generic.resource_type);
resource_user_context = (void *)(unsigned long)wr->ae.ae_generic.user_context;
@@ -102,117 +89,126 @@
case CC_RES_IND_QP: {
struct c2_qp *qp = (struct c2_qp *)resource_user_context;
+ cm_event.status = c2_convert_cm_status(c2_wr_get_result(wr));
- if (is_cm_event(event_id)) {
-
- cm_event.device = &c2dev->ibdev;
- if (event_id == CCAE_ACTIVE_CONNECT_RESULTS) {
- cm_event.event = IW_EVENT_ACTIVE_CONNECT_RESULTS;
- cm_event.element.active_results.result =
- c2_convert_cm_status(c2_wr_get_result(wr));
- cm_event.element.active_results.conn_attr.local_addr.s_addr =
- wr->ae.ae_active_connect_results.laddr;
- cm_event.element.active_results.conn_attr.remote_addr.s_addr =
- wr->ae.ae_active_connect_results.raddr;
- cm_event.element.active_results.conn_attr.local_port =
- wr->ae.ae_active_connect_results.lport;
- cm_event.element.active_results.conn_attr.remote_port =
- wr->ae.ae_active_connect_results.rport;
- cm_event.element.active_results.private_data_len =
+ switch (event_id) {
+ case CCAE_ACTIVE_CONNECT_RESULTS:
+ cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
+ cm_event.local_addr.sin_addr.s_addr =
+ wr->ae.ae_active_connect_results.laddr;
+ cm_event.remote_addr.sin_addr.s_addr =
+ wr->ae.ae_active_connect_results.raddr;
+ cm_event.local_addr.sin_port =
+ wr->ae.ae_active_connect_results.lport;
+ cm_event.remote_addr.sin_port =
+ wr->ae.ae_active_connect_results.rport;
+ cm_event.private_data_len =
be32_to_cpu(wr->ae.ae_active_connect_results.private_data_length);
+ if (cm_event.private_data_len) {
/* XXX */
- pdata = kmalloc(cm_event.element.active_results.private_data_len,
- GFP_ATOMIC);
- if (!pdata)
- break;
+ pdata = kmalloc(cm_event.private_data_len, GFP_ATOMIC);
+ if (!pdata) {
+ /* Ignore the request, maybe the remote peer
+ * will retry */
+ dprintk("Ignored connect request -- no memory for pdata"
+ "private_data_len=%d\n", cm_event.private_data_len);
+ goto ignore_it;
+ }
memcpy(pdata,
wr->ae.ae_active_connect_results.private_data,
- cm_event.element.active_results.private_data_len);
- cm_event.element.active_results.private_data = pdata;
+ cm_event.private_data_len);
- } else {
- cm_event.event = IW_EVENT_DISCONNECT;
+ cm_event.private_data = pdata;
}
+ if (qp->cm_id->event_handler)
+ qp->cm_id->event_handler(qp->cm_id, &cm_event);
- if (qp->event_handler)
- (*qp->event_handler)(&cm_event, qp->context);
+ break;
- if (pdata)
- kfree(pdata);
- } else {
-
+ case CCAE_TERMINATE_MESSAGE_RECEIVED:
+ case CCAE_CQ_SQ_COMPLETION_OVERFLOW:
ib_event.device = &c2dev->ibdev;
ib_event.element.qp = &qp->ibqp;
- /* XXX */
ib_event.event = IB_EVENT_QP_REQ_ERR;
if(qp->ibqp.event_handler)
- (*qp->ibqp.event_handler)(&ib_event, qp->context);
- }
+ (*qp->ibqp.event_handler)(&ib_event,
+ qp->ibqp.qp_context);
+ case CCAE_BAD_CLOSE:
+ case CCAE_LLP_CLOSE_COMPLETE:
+ case CCAE_LLP_CONNECTION_RESET:
+ case CCAE_LLP_CONNECTION_LOST:
+ default:
+ cm_event.event = IW_CM_EVENT_CLOSE;
+ if (qp->cm_id->event_handler)
+ qp->cm_id->event_handler(qp->cm_id, &cm_event);
+ }
break;
}
+
case CC_RES_IND_EP: {
- struct c2_ep *ep = (struct c2_ep *)resource_user_context;
+ struct iw_cm_id* cm_id = (struct iw_cm_id*)resource_user_context;
+ dprintk("CC_RES_IND_EP event_id=%d\n", event_id);
if (event_id != CCAE_CONNECTION_REQUEST) {
dprintk("%s: Invalid event_id: %d\n", __FUNCTION__, event_id);
break;
}
- cm_event.device = &c2dev->ibdev;
- cm_event.event = IW_EVENT_CONNECT_REQUEST;
- cm_event.element.conn_request.cr_id =
+ cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
+ cm_event.provider_id =
wr->ae.ae_connection_request.cr_handle;
- cm_event.element.conn_request.conn_attr.local_addr.s_addr =
+ cm_event.local_addr.sin_addr.s_addr =
wr->ae.ae_connection_request.laddr;
- cm_event.element.conn_request.conn_attr.remote_addr.s_addr =
+ cm_event.remote_addr.sin_addr.s_addr =
wr->ae.ae_connection_request.raddr;
- cm_event.element.conn_request.conn_attr.local_port =
+ cm_event.local_addr.sin_port =
wr->ae.ae_connection_request.lport;
- cm_event.element.conn_request.conn_attr.remote_port =
+ cm_event.remote_addr.sin_port =
wr->ae.ae_connection_request.rport;
- cm_event.element.conn_request.private_data_len =
+ cm_event.private_data_len =
be32_to_cpu(wr->ae.ae_connection_request.private_data_length);
- /* XXX */
- pdata = kmalloc(cm_event.element.conn_request.private_data_len,
- GFP_ATOMIC);
- if (!pdata)
- break;
+ if (cm_event.private_data_len) {
+ pdata = kmalloc(cm_event.private_data_len, GFP_ATOMIC);
+ if (!pdata) {
+ /* Ignore the request, maybe the remote peer
+ * will retry */
+ dprintk("Ignored connect request -- no memory for pdata"
+ "private_data_len=%d\n", cm_event.private_data_len);
+ goto ignore_it;
+ }
+ memcpy(pdata,
+ wr->ae.ae_connection_request.private_data,
+ cm_event.private_data_len);
- memcpy(pdata,
- wr->ae.ae_connection_request.private_data,
- cm_event.element.conn_request.private_data_len);
-
- cm_event.element.conn_request.private_data = pdata;
-
- if (ep->event_handler)
- (*ep->event_handler)(&cm_event, ep->listen_context);
-
- kfree(pdata);
+ cm_event.private_data = pdata;
+ }
+ if (cm_id->event_handler)
+ cm_id->event_handler(cm_id, &cm_event);
break;
}
+
case CC_RES_IND_CQ: {
struct c2_cq *cq = (struct c2_cq *)resource_user_context;
+ dprintk("IB_EVENT_CQ_ERR\n");
ib_event.device = &c2dev->ibdev;
ib_event.element.cq = &cq->ibcq;
ib_event.event = IB_EVENT_CQ_ERR;
if (cq->ibcq.event_handler)
- (*cq->ibcq.event_handler)(&ib_event, cq->ibcq.cq_context);
+ cq->ibcq.event_handler(&ib_event, cq->ibcq.cq_context);
}
+
default:
break;
}
-
- /*
- * free the adapter message
- */
+
+ ignore_it:
c2_mq_free(mq);
}
-
Index: hw/amso1100/c2_provider.c
===================================================================
--- hw/amso1100/c2_provider.c (revision 4482)
+++ hw/amso1100/c2_provider.c (working copy)
@@ -305,8 +305,6 @@
struct c2_cq *cq;
int err;
- dprintk("%s:%s:%u\n", __FILE__, __FUNCTION__, __LINE__);
-
cq = kmalloc(sizeof(*cq), GFP_KERNEL);
if (!cq) {
dprintk("%s: Unable to allocate CQ\n", __FUNCTION__);
@@ -315,6 +313,7 @@
err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq);
if (err) {
+ dprintk("%s: error initializing CQ\n", __FUNCTION__);
kfree(cq);
return ERR_PTR(err);
}
@@ -540,156 +539,96 @@
return -ENOSYS;
}
-static int c2_connect_qp(struct ib_qp *ib_qp,
- struct iw_conn_attr *attr,
- void (*event_handler)(struct iw_cm_event*, void*),
- void *context,
- u8 *pdata,
- int pdata_len
- )
+static int c2_connect(struct iw_cm_id* cm_id,
+ const void* pdata, u8 pdata_len)
{
- struct c2_qp *qp = to_c2qp(ib_qp);
int err;
+ struct c2_qp* qp = container_of(cm_id->qp, struct c2_qp, ibqp);
dprintk("%s:%s:%u\n", __FILE__, __FUNCTION__, __LINE__);
- if (!event_handler)
+ if (cm_id->qp == NULL)
return -EINVAL;
- /*
- * Store the event handler and the
- * context in the QP.
- */
- qp->event_handler = event_handler;
- qp->context = context;
+ /* Cache the cm_id in the qp */
+ qp->cm_id = cm_id;
- err = c2_qp_connect(to_c2dev(ib_qp->device), qp,
- attr->remote_addr.s_addr, attr->remote_port,
- pdata_len, pdata);
- if (err) {
- qp->event_handler = NULL;
- qp->context = NULL;
- }
+ err = c2_llp_connect(cm_id, pdata, pdata_len);
return err;
}
-static int c2_disconnect_qp(struct ib_qp *qp,
- int abrupt)
+static int c2_disconnect(struct iw_cm_id* cm_id, int abrupt)
{
struct ib_qp_attr attr;
+ struct ib_qp *ib_qp = cm_id->qp;
int err;
dprintk("%s:%s:%u\n", __FILE__, __FUNCTION__, __LINE__);
+ if (ib_qp == 0)
+ /* If this is a lietening endpoint, there is no QP */
+ return 0;
+
memset(&attr, 0, sizeof(struct ib_qp_attr));
if (abrupt)
attr.qp_state = IB_QPS_ERR;
else
attr.qp_state = IB_QPS_SQD;
- err = c2_modify_qp(qp, &attr, IB_QP_STATE);
+ err = c2_modify_qp(ib_qp, &attr, IB_QP_STATE);
return err;
}
-static int c2_accept_cr(struct ib_device *ibdev,
- u32 cr_id,
- struct ib_qp *ib_qp,
- void (*event_handler)(struct iw_cm_event*, void*),
- void *context,
- u8 *pdata,
- int pdata_len)
+static int c2_accept(struct iw_cm_id* cm_id, const void *pdata, u8 pdata_len)
{
- struct c2_qp *qp = to_c2qp(ib_qp);
int err;
dprintk("%s:%s:%u\n", __FILE__, __FUNCTION__, __LINE__);
- /*
- * Store the event handler and the
- * context in the QP.
- */
- qp->event_handler = event_handler;
- qp->context = context;
+ err = c2_llp_accept(cm_id, pdata, pdata_len);
- err = c2_cr_accept(to_c2dev(ibdev), cr_id, qp,
- pdata_len, pdata);
-
return err;
}
-static int c2_reject_cr(struct ib_device *ibdev,
- u32 cr_id,
- u8 *pdata,
- int pdata_len)
+static int c2_reject(struct iw_cm_id* cm_id, const void* pdata, u8 pdata_len)
{
int err;
dprintk("%s:%s:%u\n", __FILE__, __FUNCTION__, __LINE__);
- err = c2_cr_reject(to_c2dev(ibdev), cr_id);
+ err = c2_llp_reject(cm_id, pdata, pdata_len);
return err;
}
-static int c2_query_cr(struct ib_device *ibdev,
- u32 cr_id,
- struct iw_conn_request *req)
+static int c2_getpeername(struct iw_cm_id* cm_id,
+ struct sockaddr_in* local_addr,
+ struct sockaddr_in* remote_addr )
{
- int err;
- struct c2_cr_query_attrs cr_attrs;
-
dprintk("%s:%s:%u\n", __FILE__, __FUNCTION__, __LINE__);
- err = c2_cr_query(to_c2dev(ibdev), cr_id, &cr_attrs);
- if (!err) {
- req->cr_id = cr_id;
- req->conn_attr.local_addr.s_addr = cr_attrs.local_addr;
- req->conn_attr.local_port = cr_attrs.local_port;
- req->conn_attr.remote_addr.s_addr = cr_attrs.remote_addr;
- req->conn_attr.remote_port = cr_attrs.remote_port;
- /* XXX pdata? */
- }
- return err;
+ *local_addr = cm_id->local_addr;
+ *remote_addr = cm_id->remote_addr;
+ return 0;
}
-static int c2_create_listen_ep(struct ib_device *ibdev,
- struct iw_listen_ep_attr *ep_attr,
- void **ep_handle)
+static int c2_service_create(struct iw_cm_id* cm_id, int backlog)
{
int err;
- struct c2_ep *ep;
dprintk("%s:%s:%u\n", __FILE__, __FUNCTION__, __LINE__);
-
- ep = kmalloc(sizeof(*ep), GFP_KERNEL);
- if (!ep) {
- dprintk("%s: Unable to allocate EP\n", __FUNCTION__);
- return -ENOMEM;
- }
-
- ep->event_handler = ep_attr->event_handler;
- ep->listen_context = ep_attr->listen_context;
-
- err = c2_ep_listen_create(to_c2dev(ibdev),
- ep_attr->addr.s_addr, ep_attr->port,
- ep_attr->backlog, ep);
- if (err)
- kfree(ep);
- else
- *ep_handle = (void *)ep;
-
+ err = c2_llp_service_create(cm_id, backlog);
return err;
}
-static int c2_destroy_listen_ep(struct ib_device *ibdev, void *ep_handle)
+static int c2_service_destroy(struct iw_cm_id* cm_id)
{
- struct c2_ep *ep = (struct c2_ep *)ep_handle;
-
+ int err;
dprintk("%s:%s:%u\n", __FILE__, __FUNCTION__, __LINE__);
- c2_ep_listen_destroy(to_c2dev(ibdev), ep);
- kfree(ep);
- return 0;
+ err = c2_llp_service_destroy(cm_id);
+
+ return err;
}
int c2_register_device(struct c2_dev *dev)
@@ -742,13 +681,13 @@
dev->ibdev.post_recv = c2_post_receive;
dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
- dev->ibdev.iwcm->connect_qp = c2_connect_qp;
- dev->ibdev.iwcm->disconnect_qp = c2_disconnect_qp;
- dev->ibdev.iwcm->accept_cr = c2_accept_cr;
- dev->ibdev.iwcm->reject_cr = c2_reject_cr;
- dev->ibdev.iwcm->query_cr = c2_query_cr;
- dev->ibdev.iwcm->create_listen_ep = c2_create_listen_ep;
- dev->ibdev.iwcm->destroy_listen_ep = c2_destroy_listen_ep;
+ dev->ibdev.iwcm->connect = c2_connect;
+ dev->ibdev.iwcm->disconnect = c2_disconnect;
+ dev->ibdev.iwcm->accept = c2_accept;
+ dev->ibdev.iwcm->reject = c2_reject;
+ dev->ibdev.iwcm->getpeername = c2_getpeername;
+ dev->ibdev.iwcm->create_listen = c2_service_create;
+ dev->ibdev.iwcm->destroy_listen = c2_service_destroy;
ret = ib_register_device(&dev->ibdev);
if (ret)
Index: hw/amso1100/c2_provider.h
===================================================================
--- hw/amso1100/c2_provider.h (revision 4482)
+++ hw/amso1100/c2_provider.h (working copy)
@@ -115,17 +115,15 @@
struct c2_wq {
spinlock_t lock;
};
-
+struct iw_cm_id;
struct c2_qp {
struct ib_qp ibqp;
+ struct iw_cm_id* cm_id;
spinlock_t lock;
atomic_t refcount;
wait_queue_head_t wait;
int qpn;
- void (*event_handler)(struct iw_cm_event *, void *);
- void *context;
-
u32 adapter_handle;
u32 send_sgl_depth;
u32 recv_sgl_depth;
@@ -136,15 +134,6 @@
struct c2_mq rq_mq;
};
-struct c2_ep {
- u32 adapter_handle;
- void (*event_handler)(struct iw_cm_event *, void *);
- void *listen_context;
- u32 addr;
- u16 port;
- int backlog;
-};
-
struct c2_cr_query_attrs {
u32 local_addr;
u32 remote_addr;
Index: hw/amso1100/c2_cm.c
===================================================================
--- hw/amso1100/c2_cm.c (revision 4482)
+++ hw/amso1100/c2_cm.c (working copy)
@@ -35,11 +35,10 @@
#include "c2_vq.h"
#include <rdma/iw_cm.h>
-int
-c2_qp_connect(struct c2_dev *c2dev, struct c2_qp *qp,
- u32 remote_addr, u16 remote_port,
- u32 pdata_len, u8 *pdata)
+int c2_llp_connect(struct iw_cm_id* cm_id, const void* pdata, u8 pdata_len)
{
+ struct c2_dev *c2dev = to_c2dev(cm_id->device);
+ struct c2_qp *qp = to_c2qp(cm_id->qp);
ccwr_qp_connect_req_t *wr; /* variable size needs a malloc. */
struct c2_vq_req *vq_req;
int err;
@@ -70,8 +69,8 @@
wr->rnic_handle = c2dev->adapter_handle;
wr->qp_handle = qp->adapter_handle;
- wr->remote_addr = remote_addr; /* already in Network Byte Order */
- wr->remote_port = remote_port; /* already in Network Byte Order */
+ wr->remote_addr = cm_id->remote_addr.sin_addr.s_addr;
+ wr->remote_port = cm_id->remote_addr.sin_port;
/*
* Move any private data from the callers's buf into
@@ -96,14 +95,18 @@
}
int
-c2_ep_listen_create(struct c2_dev *c2dev, u32 addr,
- u16 port, u32 backlog, struct c2_ep *ep)
+c2_llp_service_create(struct iw_cm_id* cm_id, int backlog)
{
+ struct c2_dev *c2dev;
ccwr_ep_listen_create_req_t wr;
ccwr_ep_listen_create_rep_t *reply;
struct c2_vq_req *vq_req;
int err;
+ c2dev = to_c2dev(cm_id->device);
+ if (c2dev == NULL)
+ return -EINVAL;
+
/*
* Allocate verbs request.
*/
@@ -115,15 +118,15 @@
* Build the WR
*/
c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE);
- wr.hdr.context = (unsigned long)vq_req;
+ wr.hdr.context = (u64)(unsigned long)vq_req;
wr.rnic_handle = c2dev->adapter_handle;
- wr.local_addr = addr; /* already in Net Byte Order */
- wr.local_port = port; /* already in Net Byte Order */
+ wr.local_addr = cm_id->local_addr.sin_addr.s_addr;
+ wr.local_port = cm_id->local_addr.sin_port;
wr.backlog = cpu_to_be32(backlog);
- wr.user_context = (unsigned long)ep;
+ wr.user_context = (u64)(unsigned long)cm_id;
/*
- * reference the request struct. dereferenced in the int handler.
+ * Reference the request struct. Dereferenced in the int handler.
*/
vq_req_get(c2dev, vq_req);
@@ -160,12 +163,7 @@
/*
* get the adapter handle
*/
- ep->adapter_handle = reply->ep_handle;
- if (port != reply->local_port)
- {
- // XXX
- //*p_port = reply->local_port;
- }
+ cm_id->provider_id = reply->ep_handle;
/*
* free vq stuff
@@ -184,13 +182,19 @@
int
-c2_ep_listen_destroy(struct c2_dev *c2dev, struct c2_ep *ep)
+c2_llp_service_destroy(struct iw_cm_id* cm_id)
{
+
+ struct c2_dev *c2dev;
ccwr_ep_listen_destroy_req_t wr;
ccwr_ep_listen_destroy_rep_t *reply;
struct c2_vq_req *vq_req;
int err;
+ c2dev = to_c2dev(cm_id->device);
+ if (c2dev == NULL)
+ return -EINVAL;
+
/*
* Allocate verbs request.
*/
@@ -205,7 +209,7 @@
c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY);
wr.hdr.context = (unsigned long)vq_req;
wr.rnic_handle = c2dev->adapter_handle;
- wr.ep_handle = ep->adapter_handle;
+ wr.ep_handle = cm_id->provider_id;
/*
* reference the request struct. dereferenced in the int handler.
@@ -250,87 +254,20 @@
int
-c2_cr_query(struct c2_dev *c2dev, u32 cr_id,
- struct c2_cr_query_attrs *cr_attrs)
+c2_llp_accept(struct iw_cm_id* cm_id, const void* pdata, u8 pdata_len)
{
- ccwr_ep_query_req_t wr;
- ccwr_ep_query_rep_t *reply;
- struct c2_vq_req *vq_req;
- int err;
+ struct c2_dev *c2dev = to_c2dev(cm_id->device);
+ struct c2_qp *qp = to_c2qp(cm_id->qp);
+ ccwr_cr_accept_req_t *wr; /* variable length WR */
+ struct c2_vq_req *vq_req;
+ ccwr_cr_accept_rep_t *reply; /* VQ Reply msg ptr. */
+ int err;
- /*
- * Create and send a WR_EP_CREATE...
- */
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req) {
- return -ENOMEM;
- }
+ /* Make sure there's a bound QP */
+ if (qp == 0)
+ return -EINVAL;
- /*
- * Build the WR
- */
- c2_wr_set_id(&wr, CCWR_EP_QUERY);
- wr.hdr.context = (unsigned long)vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
- wr.ep_handle = cr_id;
-
/*
- * reference the request struct. dereferenced in the int handler.
- */
- vq_req_get(c2dev, vq_req);
-
- /*
- * Send WR to adapter
- */
- err = vq_send_wr(c2dev, (ccwr_t*)&wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail0;
- }
-
- /*
- * Wait for reply from adapter
- */
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err) {
- goto bail0;
- }
-
- /*
- * Process reply
- */
- reply = (ccwr_ep_query_rep_t*)(unsigned long)vq_req->reply_msg;
- if (!reply) {
- err = -ENOMEM;
- goto bail0;
- }
- if ( (err = c2_errno(reply)) != 0) {
- goto bail1;
- }
-
- cr_attrs->local_addr = reply->local_addr;
- cr_attrs->local_port = reply->local_port;
- cr_attrs->remote_addr = reply->remote_addr;
- cr_attrs->remote_port = reply->remote_port;
-
-bail1:
- vq_repbuf_free(c2dev, reply);
-bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
-
-
-int
-c2_cr_accept(struct c2_dev *c2dev, u32 cr_id, struct c2_qp *qp,
- u32 pdata_len, u8 *pdata)
-{
- ccwr_cr_accept_req_t *wr; /* variable length WR */
- struct c2_vq_req *vq_req;
- ccwr_cr_accept_rep_t* reply; /* VQ Reply msg ptr. */
- int err;
-
- /*
* only support the max private_data length
*/
if (pdata_len > CC_MAX_PRIVATE_DATA_SIZE) {
@@ -357,7 +294,7 @@
c2_wr_set_id(wr, CCWR_CR_ACCEPT);
wr->hdr.context = (unsigned long)vq_req;
wr->rnic_handle = c2dev->adapter_handle;
- wr->ep_handle = cr_id;
+ wr->ep_handle = (u32)cm_id->provider_id;
wr->qp_handle = qp->adapter_handle;
if (pdata) {
wr->private_data_length = cpu_to_be32(pdata_len);
@@ -407,15 +344,17 @@
return err;
}
-
int
-c2_cr_reject(struct c2_dev *c2dev, u32 cr_id)
+c2_llp_reject(struct iw_cm_id* cm_id, const void* pdata, u8 pdata_len)
{
+ struct c2_dev *c2dev;
ccwr_cr_reject_req_t wr;
struct c2_vq_req *vq_req;
ccwr_cr_reject_rep_t *reply;
int err;
+ c2dev = to_c2dev(cm_id->device);
+
/*
* Allocate verbs request.
*/
@@ -430,7 +369,7 @@
c2_wr_set_id(&wr, CCWR_CR_REJECT);
wr.hdr.context = (unsigned long)vq_req;
wr.rnic_handle = c2dev->adapter_handle;
- wr.ep_handle = cr_id;
+ wr.ep_handle = (u32)cm_id->provider_id;
/*
* reference the request struct. dereferenced in the int handler.
More information about the general
mailing list