[openib-general] [PATCH] My current enumeration/async event diff
Roland Dreier
roland at topspin.com
Sun Sep 12 20:41:59 PDT 2004
Here's the latest version of my tree. I think the main change since
the last version I posted is the addition of ib_get_client_data() and
ib_set_client_data().
I'm planning on committing this on Monday unless someone complains. So
far all the feedback I've gotten has been positive except that there
was some question about register/unregister vs. reg/dereg in function
names. If we decide my function names need to change I have no
problem with it but it doesn't seem worth waiting to resolve the
question before committing.
Thanks,
Roland
Index: infiniband/ulp/ipoib/ipoib_verbs.c
===================================================================
--- infiniband/ulp/ipoib/ipoib_verbs.c (revision 759)
+++ infiniband/ulp/ipoib/ipoib_verbs.c (working copy)
@@ -214,7 +214,7 @@
return -ENODEV;
}
- priv->cq = ib_create_cq(priv->ca, ipoib_ib_completion, dev,
+ priv->cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev,
IPOIB_TX_RING_SIZE + IPOIB_RX_RING_SIZE + 1);
if (IS_ERR(priv->cq)) {
TS_REPORT_FATAL(MOD_IB_NET, "%s: failed to create CQ",
@@ -256,7 +256,6 @@
out_free_pd:
ib_dealloc_pd(priv->pd);
- module_put(priv->ca->owner);
return -ENODEV;
}
@@ -274,101 +273,37 @@
}
if (ib_dereg_mr(priv->mr))
- TS_REPORT_WARN(MOD_IB_NET,
- "%s: ib_dereg_mr failed", dev->name);
+ printk(KERN_WARNING "%s: ib_dereg_mr failed\n", dev->name);
if (ib_destroy_cq(priv->cq))
- TS_REPORT_WARN(MOD_IB_NET,
- "%s: ib_cq_destroy failed", dev->name);
+ printk(KERN_WARNING "%s: ib_cq_destroy failed\n", dev->name);
if (ib_dealloc_pd(priv->pd))
- TS_REPORT_WARN(MOD_IB_NET,
- "%s: ib_dealloc_pd failed", dev->name);
-
- module_put(priv->ca->owner);
+ printk(KERN_WARNING "%s: ib_dealloc_pd failed\n", dev->name);
}
-static void ipoib_device_notifier(struct ib_device_notifier *self,
- struct ib_device *device, int event)
+static void ipoib_event(struct ib_event_handler *handler,
+ struct ib_event *record)
{
- struct ib_device_attr props;
- int port;
+ struct ipoib_dev_priv *priv =
+ container_of(handler, struct ipoib_dev_priv, event_handler);
- switch (event) {
- case IB_DEVICE_NOTIFIER_ADD:
- if (ib_query_device(device, &props)) {
- TS_REPORT_WARN(MOD_IB_NET, "ib_device_properties_get failed");
- return;
- }
-
- if (device->node_type == IB_NODE_SWITCH) {
- if (try_module_get(device->owner))
- ipoib_add_port("ib%d", device, 0);
- } else {
- for (port = 1; port <= props.phys_port_cnt; ++port)
- if (try_module_get(device->owner))
- ipoib_add_port("ib%d", device, port);
- }
- break;
-
- case IB_DEVICE_NOTIFIER_REMOVE:
- /* Yikes! We don't support devices going away from
- underneath us yet! */
- TS_REPORT_WARN(MOD_IB_NET,
- "IPoIB driver can't handle removal of device %s",
- device->name);
- break;
-
- default:
- TS_REPORT_WARN(MOD_IB_NET, "Unknown device notifier event %d.");
- break;
- }
-}
-
-static struct ib_device_notifier ipoib_notifier = {
- .notifier = ipoib_device_notifier
-};
-
-int ipoib_transport_create_devices(void)
-{
- ib_device_notifier_register(&ipoib_notifier);
- return 0;
-}
-
-void ipoib_transport_cleanup(void)
-{
- ib_device_notifier_deregister(&ipoib_notifier);
-}
-
-static void ipoib_async_event(struct ib_async_event_record *record,
- void *priv_ptr)
-{
- struct ipoib_dev_priv *priv = priv_ptr;
-
if (record->event == IB_EVENT_PORT_ACTIVE) {
TS_TRACE(MOD_IB_NET, T_VERBOSE, TRACE_IB_NET_GEN,
- "%s: Port active Event", priv->dev.name);
-
- ipoib_ib_dev_flush(&priv->dev);
- } else
- TS_REPORT_WARN(MOD_IB_NET,
- "%s: Unexpected event %d", priv->dev.name,
- record->event);
+ "%s: Port active event", priv->dev.name);
+ schedule_work(&priv->flush_task);
+ }
}
int ipoib_port_monitor_dev_start(struct net_device *dev)
{
struct ipoib_dev_priv *priv = dev->priv;
- struct ib_async_event_record event_record = {
- .device = priv->ca,
- .event = IB_EVENT_PORT_ACTIVE,
- };
- if (ib_async_event_handler_register(&event_record,
- ipoib_async_event,
- priv, &priv->active_handler)) {
- TS_REPORT_FATAL(MOD_IB_NET,
- "ib_async_event_handler_register failed for TS_IB_PORT_ACTIVE");
+ INIT_IB_EVENT_HANDLER(&priv->event_handler,
+ priv->ca, ipoib_event);
+
+ if (ib_register_event_handler(&priv->event_handler)) {
+ printk(KERN_WARNING "ib_handler_register_event failed\n");
return -EINVAL;
}
@@ -379,7 +314,7 @@
{
struct ipoib_dev_priv *priv = dev->priv;
- ib_async_event_handler_deregister(priv->active_handler);
+ ib_unregister_event_handler(&priv->event_handler);
}
/*
Index: infiniband/ulp/ipoib/ipoib_main.c
===================================================================
--- infiniband/ulp/ipoib/ipoib_main.c (revision 759)
+++ infiniband/ulp/ipoib/ipoib_main.c (working copy)
@@ -605,6 +605,7 @@
INIT_LIST_HEAD(&priv->child_intfs);
INIT_LIST_HEAD(&priv->multicast_list);
+ INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, &priv->dev);
INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, &priv->dev);
priv->dev.priv = priv;
@@ -691,22 +692,59 @@
return result;
}
+static void ipoib_add_one(struct ib_device *device)
+{
+ struct ib_device_attr props;
+ int port;
+
+ if (ib_query_device(device, &props)) {
+ TS_REPORT_WARN(MOD_IB_NET, "ib_device_properties_get failed");
+ return;
+ }
+
+ if (device->node_type == IB_NODE_SWITCH)
+ ipoib_add_port("ib%d", device, 0);
+ else
+ for (port = 1; port <= props.phys_port_cnt; ++port)
+ ipoib_add_port("ib%d", device, port);
+}
+
+static void ipoib_remove_one(struct ib_device *device)
+{
+ struct ipoib_dev_priv *priv, *tmp;
+
+ LIST_HEAD(delete);
+
+ down(&ipoib_device_mutex);
+ list_for_each_entry_safe(priv, tmp, &ipoib_device_list, list) {
+ if (priv->ca == device) {
+ list_del(&priv->list);
+ list_add_tail(&priv->list, &delete);
+ }
+ }
+ up(&ipoib_device_mutex);
+
+ list_for_each_entry_safe(priv, tmp, &delete, list) {
+ unregister_netdev(&priv->dev);
+ ipoib_port_monitor_dev_stop(&priv->dev);
+ ipoib_dev_cleanup(&priv->dev);
+ kfree(priv);
+ }
+}
+
+static struct ib_client ipoib_client = {
+ .add = ipoib_add_one,
+ .remove = ipoib_remove_one
+};
+
static int __init ipoib_init_module(void)
{
int ret;
- ret = ipoib_transport_create_devices();
+ ret = ib_register_client(&ipoib_client);
if (ret)
return ret;
- down(&ipoib_device_mutex);
- if (list_empty(&ipoib_device_list)) {
- up(&ipoib_device_mutex);
- ipoib_transport_cleanup();
- return -ENODEV;
- }
- up(&ipoib_device_mutex);
-
ipoib_vlan_init();
return 0;
@@ -714,22 +752,8 @@
static void __exit ipoib_cleanup_module(void)
{
- struct ipoib_dev_priv *priv, *tpriv;
-
ipoib_vlan_cleanup();
- ipoib_transport_cleanup();
-
- down(&ipoib_device_mutex);
- list_for_each_entry_safe(priv, tpriv, &ipoib_device_list, list) {
- ipoib_port_monitor_dev_stop(&priv->dev);
- ipoib_dev_cleanup(&priv->dev);
- unregister_netdev(&priv->dev);
-
- list_del(&priv->list);
-
- kfree(priv);
- }
- up(&ipoib_device_mutex);
+ ib_unregister_client(&ipoib_client);
}
module_init(ipoib_init_module);
Index: infiniband/ulp/ipoib/ipoib.h
===================================================================
--- infiniband/ulp/ipoib/ipoib.h (revision 759)
+++ infiniband/ulp/ipoib/ipoib.h (working copy)
@@ -117,6 +117,7 @@
atomic_t mcast_joins;
+ struct work_struct flush_task;
struct work_struct restart_task;
struct ib_device *ca;
@@ -152,7 +153,7 @@
struct proc_dir_entry *arp_proc_entry;
struct proc_dir_entry *mcast_proc_entry;
- struct ib_async_event_handler *active_handler;
+ struct ib_event_handler event_handler;
struct net_device_stats stats;
};
@@ -253,10 +254,6 @@
int ipoib_add_port(const char *format, struct ib_device *device,
tTS_IB_PORT port);
-int ipoib_transport_create_devices(void);
-
-void ipoib_transport_cleanup(void);
-
int ipoib_port_monitor_dev_start(struct net_device *dev);
void ipoib_port_monitor_dev_stop(struct net_device *dev);
Index: infiniband/ulp/ipoib/ip2pr_link.c
===================================================================
--- infiniband/ulp/ipoib/ip2pr_link.c (revision 759)
+++ infiniband/ulp/ipoib/ip2pr_link.c (working copy)
@@ -27,8 +27,7 @@
static tTS_KERNEL_TIMER_STRUCT _tsIp2prPathTimer;
static tIP2PR_PATH_LOOKUP_ID _tsIp2prPathLookupId = 0;
-static struct ib_async_event_handler *_tsIp2prAsyncErrHandle[IP2PR_MAX_HCAS];
-static struct ib_async_event_handler *_tsIp2prAsyncActHandle[IP2PR_MAX_HCAS];
+static struct ib_event_handler _tsIp2prEventHandle[IP2PR_MAX_HCAS];
static unsigned int ip2pr_total_req = 0;
static unsigned int ip2pr_arp_timeout = 0;
@@ -1311,9 +1310,9 @@
return 0;
}
-/* ip2pr_async_event_func -- IB async event handler, for clearing caches */
-static void ip2pr_async_event_func(struct ib_async_event_record *record,
- void *arg)
+/* ip2pr_event_func -- IB async event handler, for clearing caches */
+static void ip2pr_event_func(struct ib_event_handler *handler,
+ struct ib_event *record)
{
struct ip2pr_path_element *path_elmt;
s32 result;
@@ -1321,15 +1320,10 @@
unsigned long flags;
struct ip2pr_gid_pr_element *prn_elmt;
- if (NULL == record) {
-
- TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
- "ASYNC: Event with no record of what happened?");
+ if (record->event != IB_EVENT_PORT_ACTIVE &&
+ record->event != IB_EVENT_PORT_ERR)
return;
- }
- /* if */
- TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
- "ASYNC: Event <%d> reported, clearing cache.");
+
/*
* destroy all cached path record elements.
*/
@@ -1346,18 +1340,18 @@
for (sgid_elmt = _tsIp2prLinkRoot.src_gid_list;
NULL != sgid_elmt; sgid_elmt = sgid_elmt->next) {
if ((sgid_elmt->ca == record->device) &&
- (sgid_elmt->port == record->modifier.port)) {
+ (sgid_elmt->port == record->element.port_num)) {
sgid_elmt->port_state =
record->event == IB_EVENT_PORT_ACTIVE ?
IB_PORT_ACTIVE : IB_PORT_DOWN;
/* Gid could have changed. Get the gid */
if (ib_cached_gid_get(record->device,
- record->modifier.port,
+ record->element.port_num,
0, sgid_elmt->gid)) {
TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
"Could not get GID: on hca=<%d>,port=<%d>, event=%d",
- record->device, record->modifier.port,
+ record->device, record->element.port_num,
record->event);
/* for now zero it. Will get it, when user queries */
@@ -1375,7 +1369,7 @@
TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
"Async Port Event on hca=<%d>,port=<%d>, event=%d",
- record->device, record->modifier.port, record->event);
+ record->device, record->element.port_num, record->event);
return;
}
@@ -2074,7 +2068,6 @@
s32 ip2pr_link_addr_init(void)
{
s32 result = 0;
- struct ib_async_event_record evt_rec;
int i;
struct ib_device *hca_device;
@@ -2138,43 +2131,18 @@
* Install async event handler, to clear cache on port down
*/
- for (i = 0; i < IP2PR_MAX_HCAS; i++) {
- _tsIp2prAsyncErrHandle[i] = TS_IP2PR_INVALID_ASYNC_HANDLE;
- _tsIp2prAsyncActHandle[i] = TS_IP2PR_INVALID_ASYNC_HANDLE;
- }
-
for (i = 0; ((hca_device = ib_device_get_by_index(i)) != NULL); ++i) {
- evt_rec.device = hca_device;
- evt_rec.event = IB_PORT_ERROR;
- result = ib_async_event_handler_register(&evt_rec,
- ip2pr_async_event_func,
- NULL,
- &_tsIp2prAsyncErrHandle
- [i]);
- if (0 != result) {
-
+ INIT_IB_EVENT_HANDLER(&_tsIp2prEventHandle[i],
+ hca_device, ip2pr_event_func);
+ result = ib_register_event_handler(&_tsIp2prEventHandle[i]);
+ if (result) {
TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
"INIT: Error <%d> registering event handler.",
result);
goto error_async;
}
- /* if */
- evt_rec.device = hca_device;
- evt_rec.event = IB_EVENT_PORT_ACTIVE;
- result = ib_async_event_handler_register(&evt_rec,
- ip2pr_async_event_func,
- NULL,
- &_tsIp2prAsyncActHandle
- [i]);
- if (0 != result) {
+ }
- TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
- "INIT: Error <%d> registering event handler.",
- result);
- goto error_async;
- } /* if */
- } /* for */
-
/*
* create timer for pruning path record cache.
*/
@@ -2198,16 +2166,9 @@
return 0;
error_async:
- for (i = 0; i < IP2PR_MAX_HCAS; i++) {
- if (_tsIp2prAsyncErrHandle[i] != TS_IP2PR_INVALID_ASYNC_HANDLE) {
- ib_async_event_handler_deregister(_tsIp2prAsyncErrHandle
- [i]);
- }
- if (_tsIp2prAsyncActHandle[i] != TS_IP2PR_INVALID_ASYNC_HANDLE) {
- ib_async_event_handler_deregister(_tsIp2prAsyncActHandle
- [i]);
- }
- }
+ for (i = 0; i < IP2PR_MAX_HCAS; i++)
+ if (_tsIp2prEventHandle[i].device)
+ ib_unregister_event_handler(&_tsIp2prEventHandle[i]);
kmem_cache_destroy(_tsIp2prLinkRoot.user_req);
error_user:
@@ -2243,16 +2204,9 @@
/*
* release async event handler(s)
*/
- for (i = 0; i < IP2PR_MAX_HCAS; i++) {
- if (_tsIp2prAsyncErrHandle[i] != TS_IP2PR_INVALID_ASYNC_HANDLE) {
- ib_async_event_handler_deregister(_tsIp2prAsyncErrHandle
- [i]);
- }
- if (_tsIp2prAsyncActHandle[i] != TS_IP2PR_INVALID_ASYNC_HANDLE) {
- ib_async_event_handler_deregister(_tsIp2prAsyncActHandle
- [i]);
- }
- }
+ for (i = 0; i < IP2PR_MAX_HCAS; i++)
+ if (_tsIp2prEventHandle[i].device)
+ ib_unregister_event_handler(&_tsIp2prEventHandle[i]);
/*
* clear wait list
Index: infiniband/ulp/srp/srp_host.c
===================================================================
--- infiniband/ulp/srp/srp_host.c (revision 759)
+++ infiniband/ulp/srp/srp_host.c (working copy)
@@ -563,6 +563,7 @@
target->cqs_hndl[hca_index] = ib_create_cq(hca->ca_hndl,
cq_send_event,
+ NULL,
target,
MAX_SEND_WQES);
@@ -583,6 +584,7 @@
target->cqr_hndl[hca_index] = ib_create_cq(hca->ca_hndl,
cq_recv_event,
+ NULL,
target,
MAX_RECV_WQES);
Index: infiniband/ulp/srp/srp_dm.c
===================================================================
--- infiniband/ulp/srp/srp_dm.c (revision 759)
+++ infiniband/ulp/srp/srp_dm.c (working copy)
@@ -1388,7 +1388,8 @@
return (status);
}
-void srp_hca_async_event_handler(struct ib_async_event_record *event, void *arg)
+void srp_hca_async_event_handler(struct ib_event_handler *handler,
+ struct ib_event *event)
{
int hca_index;
srp_host_port_params_t *port;
@@ -1406,7 +1407,7 @@
}
hca = &hca_params[hca_index];
- port = &hca->port[event->modifier.port - 1];
+ port = &hca->port[event->element.port_num - 1];
switch (event->event) {
@@ -1418,7 +1419,7 @@
*/
TS_REPORT_WARN(MOD_SRPTP,
"Port active event for hca %d port %d",
- hca_index + 1, event->modifier.port);
+ hca_index + 1, event->element.port_num);
if (!port->valid)
break;
@@ -1434,7 +1435,7 @@
up(&driver_params.sema);
break;
- case IB_LOCAL_CATASTROPHIC_ERROR:
+ case IB_EVENT_DEVICE_FATAL:
{
int port_index;
@@ -1454,17 +1455,17 @@
if (!hca->port[port_index].valid)
break;
- event->event = IB_PORT_ERROR;
+ event->event = IB_EVENT_PORT_ERR;
- event->modifier.port =
+ event->element.port_num =
hca->port[port_index].local_port;
- srp_hca_async_event_handler(event, NULL);
+ srp_hca_async_event_handler(handler, event);
}
}
break;
- case IB_PORT_ERROR:
+ case IB_EVENT_PORT_ERR:
{
u32 i;
int ioc_index;
@@ -1473,7 +1474,7 @@
TS_REPORT_WARN(MOD_SRPTP,
"Port error event for hca %d port %d",
- hca_index + 1, event->modifier.port);
+ hca_index + 1, event->element.port_num);
if (!port->valid)
break;
@@ -1554,24 +1555,15 @@
}
break;
- case IB_LID_CHANGE:
- break;
-
- case IB_PKEY_CHANGE:
- break;
-
default:
- TS_REPORT_FATAL(MOD_SRPTP, "Unsupported event type %d",
- event->event);
break;
}
}
int srp_dm_init(void)
{
- int i, async_event_index, hca_index, status;
+ int hca_index, status;
srp_host_hca_params_t *hca;
- struct ib_async_event_record async_record;
max_path_record_cache = max_srp_targets * MAX_LOCAL_PORTS;
@@ -1610,27 +1602,16 @@
"Registering async events handler for HCA %d",
hca->hca_index);
- async_record.device = hca->ca_hndl;
+ INIT_IB_EVENT_HANDLER(&hca->event_handler, hca->ca_hndl,
+ srp_hca_async_event_handler);
+ status = ib_register_event_handler(&hca->event_handler);
- async_event_index = IB_LOCAL_CATASTROPHIC_ERROR;
- for (i = 0; i < MAX_ASYNC_EVENT_HANDLES; i++) {
- async_record.event = async_event_index;
- status = ib_async_event_handler_register(&async_record,
- srp_hca_async_event_handler,
- hca,
- &hca->
- async_handles
- [i]);
-
- if (status) {
- TS_REPORT_FATAL(MOD_SRPTP,
- "Registration of async event "
- "%d on hca %d failed",
- i, hca->hca_index, status);
- return (-EINVAL);
- }
-
- async_event_index++;
+ if (status) {
+ TS_REPORT_FATAL(MOD_SRPTP,
+ "Registration of async event "
+ "hca %d failed",
+ hca->hca_index, status);
+ return (-EINVAL);
}
}
@@ -1646,7 +1627,7 @@
void srp_dm_unload(void)
{
srp_host_hca_params_t *hca;
- int i, hca_index;
+ int hca_index;
/*
* Unegister for async events on the HCA
@@ -1665,9 +1646,7 @@
* Loop through the async handles for the HCA and
* deregister them.
*/
- for (i = 0; i < MAX_ASYNC_EVENT_HANDLES; i++) {
- ib_async_event_handler_deregister(hca->async_handles[i]);
- }
+ ib_unregister_event_handler(&hca->event_handler);
}
/* Register with DM to register for async notification */
Index: infiniband/ulp/srp/srp_host.h
===================================================================
--- infiniband/ulp/srp/srp_host.h (revision 759)
+++ infiniband/ulp/srp/srp_host.h (working copy)
@@ -161,7 +161,7 @@
struct _srp_host_port_params port[MAX_LOCAL_PORTS_PER_HCA];
- struct ib_async_event_handler *async_handles[MAX_ASYNC_EVENT_HANDLES];
+ struct ib_event_handler event_handler;
} srp_host_hca_params_t;
Index: infiniband/ulp/srp/srptp.c
===================================================================
--- infiniband/ulp/srp/srptp.c (revision 759)
+++ infiniband/ulp/srp/srptp.c (working copy)
@@ -681,6 +681,8 @@
init_attr.rq_sig_type = IB_SIGNAL_ALL_WR;
init_attr.qp_type = IB_QPT_RC;
+ init_attr.event_handler = NULL;
+
conn->qp_hndl = ib_create_qp(hca->pd_hndl, &init_attr, &qp_cap);
if (IS_ERR(conn->qp_hndl)) {
TS_REPORT_FATAL(MOD_SRPTP, "QP Create failed %d",
Index: infiniband/ulp/sdp/sdp_conn.c
===================================================================
--- infiniband/ulp/sdp/sdp_conn.c (revision 759)
+++ infiniband/ulp/sdp/sdp_conn.c (working copy)
@@ -1065,6 +1065,7 @@
if (!conn->send_cq) {
conn->send_cq = ib_create_cq(conn->ca,
sdp_cq_event_handler,
+ NULL,
(void *)(unsigned long)conn->hashent,
conn->send_cq_size);
if (IS_ERR(conn->send_cq)) {
@@ -1091,6 +1092,7 @@
if (!conn->recv_cq) {
conn->recv_cq = ib_create_cq(conn->ca,
sdp_cq_event_handler,
+ NULL,
(void *)(unsigned long)conn->hashent,
conn->recv_cq_size);
Index: infiniband/include/ib_verbs.h
===================================================================
--- infiniband/include/ib_verbs.h (revision 759)
+++ infiniband/include/ib_verbs.h (working copy)
@@ -206,6 +206,46 @@
u8 init_type;
};
+enum ib_event_type {
+ IB_EVENT_CQ_ERR,
+ IB_EVENT_QP_FATAL,
+ IB_EVENT_QP_REQ_ERR,
+ IB_EVENT_QP_ACCESS_ERR,
+ IB_EVENT_COMM_EST,
+ IB_EVENT_SQ_DRAINED,
+ IB_EVENT_PATH_MIG,
+ IB_EVENT_PATH_MIG_ERR,
+ IB_EVENT_DEVICE_FATAL,
+ IB_EVENT_PORT_ACTIVE,
+ IB_EVENT_PORT_ERR,
+ IB_EVENT_LID_CHANGE,
+ IB_EVENT_PKEY_CHANGE,
+ IB_EVENT_SM_CHANGE
+};
+
+struct ib_event {
+ struct ib_device *device;
+ union {
+ struct ib_cq *cq;
+ struct ib_qp *qp;
+ u8 port_num;
+ } element;
+ enum ib_event_type event;
+};
+
+struct ib_event_handler {
+ struct ib_device *device;
+ void (*handler)(struct ib_event_handler *, struct ib_event *);
+ struct list_head list;
+};
+
+#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
+ do { \
+ (_ptr)->device = _device; \
+ (_ptr)->handler = _handler; \
+ INIT_LIST_HEAD(&(_ptr)->list); \
+ } while (0)
+
struct ib_global_route {
union ib_gid dgid;
u32 flow_label;
@@ -316,6 +356,7 @@
};
struct ib_qp_init_attr {
+ void (*event_handler)(struct ib_event *, void *);
void *qp_context;
struct ib_cq *send_cq;
struct ib_cq *recv_cq;
@@ -549,6 +590,7 @@
struct ib_cq {
struct ib_device *device;
ib_comp_handler comp_handler;
+ void (*event_handler)(struct ib_event *, void *);
void * context;
int cqe;
atomic_t usecnt; /* count number of work queues */
@@ -567,6 +609,7 @@
struct ib_cq *send_cq;
struct ib_cq *recv_cq;
struct ib_srq *srq;
+ void (*event_handler)(struct ib_event *, void *);
void *qp_context;
u32 qp_num;
};
@@ -600,8 +643,14 @@
struct pci_dev *dma_device;
char name[IB_DEVICE_NAME_MAX];
- char *provider;
+
+ struct list_head event_handler_list;
+ spinlock_t event_handler_lock;
+
struct list_head core_list;
+ struct list_head client_data_list;
+ spinlock_t client_data_lock;
+
void *core;
void *mad;
u32 flags;
@@ -709,11 +758,30 @@
u8 node_type;
};
+struct ib_client {
+ void (*add) (struct ib_device *);
+ void (*remove)(struct ib_device *);
+
+ struct list_head list;
+};
+
struct ib_device *ib_alloc_device(size_t size);
void ib_dealloc_device(struct ib_device *device);
-int ib_register_device (struct ib_device *device);
-int ib_deregister_device(struct ib_device *device);
+int ib_register_device (struct ib_device *device);
+void ib_unregister_device(struct ib_device *device);
+
+int ib_register_client (struct ib_client *client);
+void ib_unregister_client(struct ib_client *client);
+
+void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
+int ib_set_client_data(struct ib_device *device, struct ib_client *client,
+ void *data);
+
+int ib_register_event_handler (struct ib_event_handler *event_handler);
+int ib_unregister_event_handler(struct ib_event_handler *event_handler);
+void ib_dispatch_event(struct ib_event *event);
+
int ib_query_device(struct ib_device *device,
struct ib_device_attr *device_attr);
@@ -774,6 +842,7 @@
struct ib_cq *ib_create_cq(struct ib_device *device,
ib_comp_handler comp_handler,
+ void (*event_handler)(struct ib_event *, void *),
void *cq_context, int cqe);
int ib_resize_cq(struct ib_cq *cq, int cqe);
Index: infiniband/include/ts_ib_core_types.h
===================================================================
--- infiniband/include/ts_ib_core_types.h (revision 759)
+++ infiniband/include/ts_ib_core_types.h (working copy)
@@ -74,59 +74,12 @@
#ifdef __KERNEL__
-enum ib_async_event {
- IB_QP_PATH_MIGRATED,
- IB_EEC_PATH_MIGRATED,
- IB_QP_COMMUNICATION_ESTABLISHED,
- IB_EEC_COMMUNICATION_ESTABLISHED,
- IB_SEND_QUEUE_DRAINED,
- IB_CQ_ERROR,
- IB_LOCAL_WQ_INVALID_REQUEST_ERROR,
- IB_LOCAL_WQ_ACCESS_VIOLATION_ERROR,
- IB_LOCAL_WQ_CATASTROPHIC_ERROR,
- IB_PATH_MIGRATION_ERROR,
- IB_LOCAL_EEC_CATASTROPHIC_ERROR,
- IB_LOCAL_CATASTROPHIC_ERROR,
- IB_PORT_ERROR,
- IB_EVENT_PORT_ACTIVE,
- IB_LID_CHANGE,
- IB_PKEY_CHANGE,
-};
-
-struct ib_async_event_handler; /* actual definition in core_async.c */
-
-struct ib_async_event_record {
- struct ib_device *device;
- enum ib_async_event event;
- union {
- struct ib_qp *qp;
- struct ib_eec *eec;
- struct ib_cq *cq;
- int port;
- } modifier;
-};
-
-typedef void (*ib_async_event_handler_func)(struct ib_async_event_record *record,
- void *arg);
-
/* enum definitions */
#define IB_MULTICAST_QPN 0xffffff
/* structures */
-enum {
- IB_DEVICE_NOTIFIER_ADD,
- IB_DEVICE_NOTIFIER_REMOVE
-};
-
-struct ib_device_notifier {
- void (*notifier)(struct ib_device_notifier *self,
- struct ib_device *device,
- int event);
- struct list_head list;
-};
-
struct ib_sm_path {
u16 sm_lid;
tTS_IB_SL sm_sl;
Index: infiniband/include/ts_ib_core.h
===================================================================
--- infiniband/include/ts_ib_core.h (revision 759)
+++ infiniband/include/ts_ib_core.h (working copy)
@@ -38,17 +38,9 @@
}
}
-struct ib_device *ib_device_get_by_name(const char *name);
-struct ib_device *ib_device_get_by_index(int index);
-int ib_device_notifier_register(struct ib_device_notifier *notifier);
-int ib_device_notifier_deregister(struct ib_device_notifier *notifier);
+struct ib_device *ib_device_get_by_name(const char *name) __deprecated;
+struct ib_device *ib_device_get_by_index(int index) __deprecated;
-int ib_async_event_handler_register(struct ib_async_event_record *record,
- ib_async_event_handler_func function,
- void *arg,
- struct ib_async_event_handler **handle);
-int ib_async_event_handler_deregister(struct ib_async_event_handler *handle);
-
int ib_cached_node_guid_get(struct ib_device *device,
tTS_IB_GUID node_guid);
int ib_cached_port_properties_get(struct ib_device *device,
Index: infiniband/include/ts_ib_provider.h
===================================================================
--- infiniband/include/ts_ib_provider.h (revision 715)
+++ infiniband/include/ts_ib_provider.h (working copy)
@@ -1,38 +0,0 @@
-/*
- This software is available to you under a choice of one of two
- licenses. You may choose to be licensed under the terms of the GNU
- General Public License (GPL) Version 2, available at
- <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD
- license, available in the LICENSE.TXT file accompanying this
- software. These details are also available at
- <http://openib.org/license.html>.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- SOFTWARE.
-
- Copyright (c) 2004 Topspin Communications. All rights reserved.
-
- $Id$
-*/
-
-#ifndef _TS_IB_PROVIDER_H
-#define _TS_IB_PROVIDER_H
-
-#include <ib_verbs.h>
-
-void ib_async_event_dispatch(struct ib_async_event_record *event_record);
-
-#endif /* _TS_IB_PROVIDER_H */
-
-/*
- Local Variables:
- c-file-style: "linux"
- indent-tabs-mode: t
- End:
-*/
Index: infiniband/core/Makefile
===================================================================
--- infiniband/core/Makefile (revision 759)
+++ infiniband/core/Makefile (working copy)
@@ -36,10 +36,9 @@
header_ud.o \
ib_verbs.o \
ib_sysfs.o \
+ ib_device.o \
core_main.o \
- core_device.o \
core_fmr_pool.o \
- core_async.o \
core_cache.o \
core_proc.o
Index: infiniband/core/core_cache.c
===================================================================
--- infiniband/core/core_cache.c (revision 759)
+++ infiniband/core/core_cache.c (working copy)
@@ -260,71 +260,9 @@
}
EXPORT_SYMBOL(ib_cached_pkey_find);
-int ib_cache_setup(struct ib_device *device)
+static void ib_cache_update(struct ib_device *device,
+ tTS_IB_PORT port)
{
- struct ib_device_private *priv = device->core;
- struct ib_port_attr prop;
- int p;
- int ret;
-
- for (p = priv->start_port; p <= priv->end_port; ++p) {
- priv->port_data[p].gid_table = NULL;
- priv->port_data[p].pkey_table = NULL;
- }
-
- for (p = priv->start_port; p <= priv->end_port; ++p) {
- seqcount_init(&priv->port_data[p].lock);
- ret = device->query_port(device, p, &prop);
- if (ret) {
- TS_REPORT_WARN(MOD_KERNEL_IB,
- "query_port failed for %s",
- device->name);
- goto error;
- }
- priv->port_data[p].gid_table_alloc_length = prop.gid_tbl_len;
- priv->port_data[p].gid_table = kmalloc(prop.gid_tbl_len * sizeof (tTS_IB_GID),
- GFP_KERNEL);
- if (!priv->port_data[p].gid_table) {
- ret = -ENOMEM;
- goto error;
- }
-
- priv->port_data[p].pkey_table_alloc_length = prop.pkey_tbl_len;
- priv->port_data[p].pkey_table = kmalloc(prop.pkey_tbl_len * sizeof (u16),
- GFP_KERNEL);
- if (!priv->port_data[p].pkey_table) {
- ret = -ENOMEM;
- goto error;
- }
-
- ib_cache_update(device, p);
- }
-
- return 0;
-
- error:
- for (p = priv->start_port; p <= priv->end_port; ++p) {
- kfree(priv->port_data[p].gid_table);
- kfree(priv->port_data[p].pkey_table);
- }
-
- return ret;
-}
-
-void ib_cache_cleanup(struct ib_device *device)
-{
- struct ib_device_private *priv = device->core;
- int p;
-
- for (p = priv->start_port; p <= priv->end_port; ++p) {
- kfree(priv->port_data[p].gid_table);
- kfree(priv->port_data[p].pkey_table);
- }
-}
-
-void ib_cache_update(struct ib_device *device,
- tTS_IB_PORT port)
-{
struct ib_device_private *priv = device->core;
struct ib_port_data *info = &priv->port_data[port];
struct ib_port_attr *tprops = NULL;
@@ -405,6 +343,104 @@
kfree(tgid);
}
+static void ib_cache_task(void *port_ptr)
+{
+ struct ib_port_data *port_data = port_ptr;
+
+ ib_cache_update(port_data->device, port_data->port_num);
+}
+
+static void ib_cache_event(struct ib_event_handler *handler,
+ struct ib_event *event)
+{
+ if (event->event == IB_EVENT_PORT_ERR ||
+ event->event == IB_EVENT_PORT_ACTIVE ||
+ event->event == IB_EVENT_LID_CHANGE ||
+ event->event == IB_EVENT_PKEY_CHANGE ||
+ event->event == IB_EVENT_SM_CHANGE) {
+ struct ib_device_private *priv = event->device->core;
+ schedule_work(&priv->port_data[event->element.port_num].refresh_task);
+ }
+}
+
+int ib_cache_setup(struct ib_device *device)
+{
+ struct ib_device_private *priv = device->core;
+ struct ib_port_attr prop;
+ int p;
+ int ret;
+
+ for (p = priv->start_port; p <= priv->end_port; ++p) {
+ priv->port_data[p].device = device;
+ priv->port_data[p].port_num = p;
+ INIT_WORK(&priv->port_data[p].refresh_task,
+ ib_cache_task, &priv->port_data[p]);
+ priv->port_data[p].gid_table = NULL;
+ priv->port_data[p].pkey_table = NULL;
+ priv->port_data[p].event_handler.device = NULL;
+ }
+
+ for (p = priv->start_port; p <= priv->end_port; ++p) {
+ seqcount_init(&priv->port_data[p].lock);
+ ret = device->query_port(device, p, &prop);
+ if (ret) {
+ TS_REPORT_WARN(MOD_KERNEL_IB,
+ "query_port failed for %s",
+ device->name);
+ goto error;
+ }
+ priv->port_data[p].gid_table_alloc_length = prop.gid_tbl_len;
+ priv->port_data[p].gid_table = kmalloc(prop.gid_tbl_len * sizeof (tTS_IB_GID),
+ GFP_KERNEL);
+ if (!priv->port_data[p].gid_table) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ priv->port_data[p].pkey_table_alloc_length = prop.pkey_tbl_len;
+ priv->port_data[p].pkey_table = kmalloc(prop.pkey_tbl_len * sizeof (u16),
+ GFP_KERNEL);
+ if (!priv->port_data[p].pkey_table) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ib_cache_update(device, p);
+
+ INIT_IB_EVENT_HANDLER(&priv->port_data[p].event_handler,
+ device, ib_cache_event);
+ ret = ib_register_event_handler(&priv->port_data[p].event_handler);
+ if (ret) {
+ priv->port_data[p].event_handler.device = NULL;
+ goto error;
+ }
+ }
+
+ return 0;
+
+ error:
+ for (p = priv->start_port; p <= priv->end_port; ++p) {
+ if (priv->port_data[p].event_handler.device)
+ ib_unregister_event_handler(&priv->port_data[p].event_handler);
+ kfree(priv->port_data[p].gid_table);
+ kfree(priv->port_data[p].pkey_table);
+ }
+
+ return ret;
+}
+
+void ib_cache_cleanup(struct ib_device *device)
+{
+ struct ib_device_private *priv = device->core;
+ int p;
+
+ for (p = priv->start_port; p <= priv->end_port; ++p) {
+ ib_unregister_event_handler(&priv->port_data[p].event_handler);
+ kfree(priv->port_data[p].gid_table);
+ kfree(priv->port_data[p].pkey_table);
+ }
+}
+
/*
Local Variables:
c-file-style: "linux"
Index: infiniband/core/core_priv.h
===================================================================
--- infiniband/core/core_priv.h (revision 759)
+++ infiniband/core/core_priv.h (working copy)
@@ -24,16 +24,14 @@
#ifndef _CORE_PRIV_H
#define _CORE_PRIV_H
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
#include <ib_verbs.h>
-#include "ts_ib_provider.h"
#include "ts_kernel_services.h"
#include "ts_kernel_thread.h"
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-
enum {
IB_PORT_CAP_SM,
IB_PORT_CAP_SNMP_TUN,
@@ -48,18 +46,17 @@
tTS_IB_GUID node_guid;
struct ib_port_data *port_data;
- struct list_head async_handler_list;
- spinlock_t async_handler_lock;
-
- tTS_KERNEL_QUEUE_THREAD async_thread;
-
struct ib_core_proc *proc;
};
struct ib_port_data {
+ struct ib_device *device;
spinlock_t port_cap_lock;
int port_cap_count[IB_PORT_CAP_NUM];
+ struct ib_event_handler event_handler;
+ struct work_struct refresh_task;
+
seqcount_t lock;
struct ib_port_attr properties;
struct ib_sm_path sm_path;
@@ -68,11 +65,11 @@
u16 pkey_table_alloc_length;
union ib_gid *gid_table;
u16 *pkey_table;
+ u8 port_num;
};
int ib_cache_setup(struct ib_device *device);
void ib_cache_cleanup(struct ib_device *device);
-void ib_cache_update(struct ib_device *device, tTS_IB_PORT port);
int ib_proc_setup(struct ib_device *device, int is_switch);
void ib_proc_cleanup(struct ib_device *device);
int ib_create_proc_dir(void);
@@ -81,7 +78,7 @@
void ib_async_thread(struct list_head *entry, void *device_ptr);
int ib_device_register_sysfs(struct ib_device *device);
-void ib_device_deregister_sysfs(struct ib_device *device);
+void ib_device_unregister_sysfs(struct ib_device *device);
int ib_sysfs_setup(void);
void ib_sysfs_cleanup(void);
Index: infiniband/core/ib_device.c
===================================================================
--- infiniband/core/ib_device.c (revision 715)
+++ infiniband/core/ib_device.c (working copy)
@@ -1,28 +1,26 @@
/*
- This software is available to you under a choice of one of two
- licenses. You may choose to be licensed under the terms of the GNU
- General Public License (GPL) Version 2, available at
- <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD
- license, available in the LICENSE.TXT file accompanying this
- software. These details are also available at
- <http://openib.org/license.html>.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available at
+ * <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD
+ * license, available in the LICENSE.TXT file accompanying this
+ * software. These details are also available at
+ * <http://openib.org/license.html>.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Copyright (c) 2004 Topspin Communications. All rights reserved.
+ *
+ * $Id$
+ */
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- SOFTWARE.
-
- Copyright (c) 2004 Topspin Communications. All rights reserved.
-
- $Id$
-*/
-
-#include "ts_kernel_services.h"
-
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/slab.h>
@@ -32,10 +30,24 @@
#include "core_priv.h"
+struct ib_client_data {
+ struct list_head list;
+ struct ib_client *client;
+ void * data;
+};
+
static LIST_HEAD(device_list);
-static LIST_HEAD(notifier_list);
-static DECLARE_MUTEX(device_lock);
+static LIST_HEAD(client_list);
+/*
+ * device_sem protects access to both device_list and client_list.
+ * There's no real point to using multiple locks or something fancier
+ * like an rwsem: we always access both lists, and we're always
+ * modifying one list or the other list. In any case this is not a
+ * hot path so there's no point in trying to optimize.
+ */
+static DECLARE_MUTEX(device_sem);
+
static int ib_device_check_mandatory(struct ib_device *device)
{
#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
@@ -145,7 +157,7 @@
BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
- ib_device_deregister_sysfs(device);
+ ib_device_unregister_sysfs(device);
}
EXPORT_SYMBOL(ib_dealloc_device);
@@ -156,18 +168,19 @@
int ret;
int p;
- if (ib_device_check_mandatory(device)) {
- return -EINVAL;
- }
+ down(&device_sem);
- down(&device_lock);
-
if (strchr(device->name, '%')) {
ret = alloc_name(device->name);
if (ret)
goto out;
}
+ if (ib_device_check_mandatory(device)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
priv = kmalloc(sizeof *priv, GFP_KERNEL);
if (!priv) {
printk(KERN_WARNING "Couldn't allocate private struct for %s\n",
@@ -209,8 +222,10 @@
device->core = priv;
- INIT_LIST_HEAD(&priv->async_handler_list);
- spin_lock_init(&priv->async_handler_lock);
+ INIT_LIST_HEAD(&device->event_handler_list);
+ INIT_LIST_HEAD(&device->client_data_list);
+ spin_lock_init(&device->event_handler_lock);
+ spin_lock_init(&device->client_data_lock);
ret = ib_cache_setup(device);
if (ret) {
@@ -219,21 +234,11 @@
goto out_free_port;
}
- ret = tsKernelQueueThreadStart("ts_ib_async",
- ib_async_thread,
- device,
- &priv->async_thread);
- if (ret) {
- printk(KERN_WARNING "Couldn't start async thread for %s\n",
- device->name);
- goto out_free_cache;
- }
-
ret = ib_proc_setup(device, device->node_type == IB_NODE_SWITCH);
if (ret) {
printk(KERN_WARNING "Couldn't create /proc dir for %s\n",
device->name);
- goto out_stop_async;
+ goto out_free_cache;
}
if (ib_device_register_sysfs(device)) {
@@ -243,27 +248,23 @@
}
list_add_tail(&device->core_list, &device_list);
+
+ device->reg_state = IB_DEV_REGISTERED;
+
{
- struct list_head *ptr;
- struct ib_device_notifier *notifier;
+ struct ib_client *client;
- list_for_each(ptr, ¬ifier_list) {
- notifier = list_entry(ptr, struct ib_device_notifier, list);
- notifier->notifier(notifier, device, IB_DEVICE_NOTIFIER_ADD);
- }
+ list_for_each_entry(client, &client_list, list)
+ if (client->add)
+ client->add(device);
}
- device->reg_state = IB_DEV_REGISTERED;
-
- up(&device_lock);
+ up(&device_sem);
return 0;
out_proc:
ib_proc_cleanup(device);
- out_stop_async:
- tsKernelQueueThreadStop(priv->async_thread);
-
out_free_cache:
ib_cache_cleanup(device);
@@ -274,54 +275,50 @@
kfree(priv);
out:
- up(&device_lock);
+ up(&device_sem);
return ret;
}
EXPORT_SYMBOL(ib_register_device);
-int ib_deregister_device(struct ib_device *device)
+void ib_unregister_device(struct ib_device *device)
{
- struct ib_device_private *priv;
+ struct ib_device_private *priv = device->core;
+ struct ib_client *client;
+ struct ib_client_data *context, *tmp;
+ unsigned long flags;
- priv = device->core;
+ down(&device_sem);
- if (tsKernelQueueThreadStop(priv->async_thread)) {
- printk(KERN_WARNING "tsKernelThreadStop failed for %s async thread\n",
- device->name);
- }
+ list_for_each_entry_reverse(client, &client_list, list)
+ if (client->remove)
+ client->remove(device);
+ list_del(&device->core_list);
+
+ up(&device_sem);
+
+ spin_lock_irqsave(&device->client_data_lock, flags);
+ list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ kfree(context);
+ spin_unlock_irqrestore(&device->client_data_lock, flags);
+
ib_proc_cleanup(device);
ib_cache_cleanup(device);
- down(&device_lock);
- list_del(&device->core_list);
- {
- struct list_head *ptr;
- struct ib_device_notifier *notifier;
-
- list_for_each_prev(ptr, ¬ifier_list) {
- notifier = list_entry(ptr, struct ib_device_notifier, list);
- notifier->notifier(notifier, device, IB_DEVICE_NOTIFIER_REMOVE);
- }
- }
- up(&device_lock);
-
kfree(priv->port_data);
kfree(priv);
device->reg_state = IB_DEV_UNREGISTERED;
-
- return 0;
}
-EXPORT_SYMBOL(ib_deregister_device);
+EXPORT_SYMBOL(ib_unregister_device);
struct ib_device *ib_device_get_by_name(const char *name)
{
struct ib_device *device;
- down(&device_lock);
+ down(&device_sem);
device = __ib_device_get_by_name(name);
- up(&device_lock);
+ up(&device_sem);
return device;
}
@@ -335,7 +332,7 @@
if (index < 0)
return NULL;
- down(&device_lock);
+ down(&device_sem);
list_for_each(ptr, &device_list) {
device = list_entry(ptr, struct ib_device, core_list);
if (!index)
@@ -345,38 +342,142 @@
device = NULL;
out:
- up(&device_lock);
+ up(&device_sem);
return device;
}
EXPORT_SYMBOL(ib_device_get_by_index);
-int ib_device_notifier_register(struct ib_device_notifier *notifier)
+int ib_register_client(struct ib_client *client)
{
- struct list_head *ptr;
struct ib_device *device;
- down(&device_lock);
- list_add_tail(¬ifier->list, ¬ifier_list);
- list_for_each(ptr, &device_list) {
- device = list_entry(ptr, struct ib_device, core_list);
- notifier->notifier(notifier, device, IB_DEVICE_NOTIFIER_ADD);
+ down(&device_sem);
+
+ list_add_tail(&client->list, &client_list);
+ list_for_each_entry(device, &device_list, core_list)
+ if (client->add)
+ client->add(device);
+
+ up(&device_sem);
+
+ return 0;
+}
+EXPORT_SYMBOL(ib_register_client);
+
+void ib_unregister_client(struct ib_client *client)
+{
+ struct ib_client_data *context, *tmp;
+ struct ib_device *device;
+ unsigned long flags;
+
+ down(&device_sem);
+
+ list_for_each_entry(device, &device_list, core_list) {
+ if (client->remove)
+ client->remove(device);
+
+ spin_lock_irqsave(&device->client_data_lock, flags);
+ list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ if (context->client == client) {
+ list_del(&context->list);
+ kfree(context);
+ }
+ spin_unlock_irqrestore(&device->client_data_lock, flags);
}
- up(&device_lock);
+ list_del(&client->list);
+ up(&device_sem);
+}
+EXPORT_SYMBOL(ib_unregister_client);
+
+void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
+{
+ struct ib_client_data *context;
+ void *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&device->client_data_lock, flags);
+ list_for_each_entry(context, &device->client_data_list, list)
+ if (context->client == client) {
+ ret = context->data;
+ break;
+ }
+ spin_unlock_irqrestore(&device->client_data_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(ib_get_client_data);
+
+int ib_set_client_data(struct ib_device *device, struct ib_client *client,
+ void *data)
+{
+ struct ib_client_data *context;
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&device->client_data_lock, flags);
+ list_for_each_entry(context, &device->client_data_list, list)
+ if (context->client == client) {
+ context->data = data;
+ spin_unlock_irqrestore(&device->client_data_lock, flags);
+ return 0;
+ }
+
+ spin_unlock_irqrestore(&device->client_data_lock, flags);
+ context = kmalloc(sizeof *context, GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ context->client = client;
+ context->data = data;
+
+ spin_lock_irqsave(&device->client_data_lock, flags);
+ list_add(&context->list, &device->client_data_list);
+ spin_unlock_irqrestore(&device->client_data_lock, flags);
+
return 0;
}
-EXPORT_SYMBOL(ib_device_notifier_register);
+EXPORT_SYMBOL(ib_set_client_data);
-int ib_device_notifier_deregister(struct ib_device_notifier *notifier)
+int ib_register_event_handler (struct ib_event_handler *event_handler)
{
- down(&device_lock);
- list_del(¬ifier->list);
- up(&device_lock);
+ unsigned long flags;
+ spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
+ list_add_tail(&event_handler->list,
+ &event_handler->device->event_handler_list);
+ spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
+
return 0;
}
-EXPORT_SYMBOL(ib_device_notifier_deregister);
+EXPORT_SYMBOL(ib_register_event_handler);
+int ib_unregister_event_handler(struct ib_event_handler *event_handler)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
+ list_del(&event_handler->list);
+ spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(ib_unregister_event_handler);
+
+void ib_dispatch_event(struct ib_event *event)
+{
+ unsigned long flags;
+ struct ib_event_handler *handler;
+
+ spin_lock_irqsave(&event->device->event_handler_lock, flags);
+
+ list_for_each_entry(handler, &event->device->event_handler_list, list)
+ handler->handler(handler, event);
+
+ spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
+}
+EXPORT_SYMBOL(ib_dispatch_event);
+
int ib_query_device(struct ib_device *device,
struct ib_device_attr *device_attr)
{
Index: infiniband/core/core_async.c
===================================================================
--- infiniband/core/core_async.c (revision 715)
+++ infiniband/core/core_async.c (working copy)
@@ -1,251 +0,0 @@
-/*
- This software is available to you under a choice of one of two
- licenses. You may choose to be licensed under the terms of the GNU
- General Public License (GPL) Version 2, available at
- <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD
- license, available in the LICENSE.TXT file accompanying this
- software. These details are also available at
- <http://openib.org/license.html>.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- SOFTWARE.
-
- Copyright (c) 2004 Topspin Communications. All rights reserved.
-
- $Id$
-*/
-
-#include "core_priv.h"
-
-#include "ts_kernel_trace.h"
-#include "ts_kernel_services.h"
-
-#include <linux/version.h>
-#include <linux/module.h>
-
-#include <linux/errno.h>
-#include <linux/slab.h>
-
-struct ib_async_event_handler {
- struct ib_async_event_record record;
- ib_async_event_handler_func function;
- void *arg;
- struct list_head list;
- spinlock_t *list_lock;
-};
-
-struct ib_async_event_list {
- struct ib_async_event_record record;
- struct list_head list;
-};
-
-/* Table of modifiers for async events */
-static struct {
- enum {
- QP,
- EEC,
- CQ,
- PORT,
- NONE
- } mod;
- char *desc;
-} event_table[] = {
- [IB_QP_PATH_MIGRATED] = { QP, "QP Path Migrated" },
- [IB_EEC_PATH_MIGRATED] = { EEC, "EEC Path Migrated" },
- [IB_QP_COMMUNICATION_ESTABLISHED] = { QP, "QP Communication Established" },
- [IB_EEC_COMMUNICATION_ESTABLISHED] = { EEC, "EEC Communication Established" },
- [IB_SEND_QUEUE_DRAINED] = { QP, "Send Queue Drained" },
- [IB_CQ_ERROR] = { CQ, "CQ Error" },
- [IB_LOCAL_WQ_INVALID_REQUEST_ERROR] = { QP, "Local WQ Invalid Request Error" },
- [IB_LOCAL_WQ_ACCESS_VIOLATION_ERROR] = { QP, "Local WQ Access Violation Error" },
- [IB_LOCAL_WQ_CATASTROPHIC_ERROR] = { QP, "Local WQ Catastrophic Error" },
- [IB_PATH_MIGRATION_ERROR] = { QP, "Path Migration Error" },
- [IB_LOCAL_EEC_CATASTROPHIC_ERROR] = { EEC, "Local EEC Catastrophic Error" },
- [IB_LOCAL_CATASTROPHIC_ERROR] = { NONE, "Local Catastrophic Error" },
- [IB_PORT_ERROR] = { PORT, "Port Error" },
- [IB_EVENT_PORT_ACTIVE] = { PORT, "Port Active" },
- [IB_LID_CHANGE] = { PORT, "LID Change" },
- [IB_PKEY_CHANGE] = { PORT, "P_Key Change" }
-};
-
-int ib_async_event_handler_register(struct ib_async_event_record *record,
- ib_async_event_handler_func function,
- void *arg,
- struct ib_async_event_handler **handle)
-{
- struct ib_async_event_handler *handler;
- int ret;
- unsigned long flags;
-
- if (record->event < 0 || record->event >= ARRAY_SIZE(event_table)) {
- TS_REPORT_WARN(MOD_KERNEL_IB,
- "Attempt to register handler for invalid async event %d",
- record->event);
- return -EINVAL;
- }
-
- handler = kmalloc(sizeof *handler, GFP_KERNEL);
- if (!handler) {
- return -ENOMEM;
- }
-
- handler->record = *record;
- handler->function = function;
- handler->arg = arg;
-
- switch (event_table[record->event].mod) {
- case QP:
- break;
-
- case CQ:
- printk(KERN_WARNING "Async events for CQs not supported\n");
- break;
-
- case EEC:
- TS_REPORT_WARN(MOD_KERNEL_IB,
- "Async events for EECs not supported yet");
- ret = -EINVAL;
- goto error;
-
- case PORT:
- case NONE:
- {
- struct ib_device_private *priv = ((struct ib_device *) record->device)->core;
-
- spin_lock_irqsave(&priv->async_handler_lock, flags);
- handler->list_lock = &priv->async_handler_lock;
- list_add_tail(&handler->list, &priv->async_handler_list);
- spin_unlock_irqrestore(&priv->async_handler_lock, flags);
- }
- break;
- }
-
- *handle = handler;
- return 0;
-
- error:
- kfree(handler);
- return ret;
-}
-EXPORT_SYMBOL(ib_async_event_handler_register);
-
-int ib_async_event_handler_deregister(struct ib_async_event_handler *handle)
-{
- struct ib_async_event_handler *handler = handle;
- unsigned long flags;
-
- spin_lock_irqsave(handler->list_lock, flags);
- list_del(&handler->list);
- spin_unlock_irqrestore(handler->list_lock, flags);
-
- kfree(handle);
- return 0;
-}
-EXPORT_SYMBOL(ib_async_event_handler_deregister);
-
-void ib_async_event_dispatch(struct ib_async_event_record *event_record)
-{
- struct ib_async_event_list *event;
- struct ib_device_private *priv = event_record->device->core;
-
- switch (event_table[event_record->event].mod) {
- default:
- break;
- }
-
- event = kmalloc(sizeof *event, GFP_ATOMIC);
- if (!event) {
- return;
- }
-
- event->record = *event_record;
-
- tsKernelQueueThreadAdd(priv->async_thread, &event->list);
-}
-EXPORT_SYMBOL(ib_async_event_dispatch);
-
-void ib_async_thread(struct list_head *entry,
- void *device_ptr)
-{
- struct ib_async_event_list *event;
- struct ib_device_private *priv;
- char mod_buf[32];
- struct list_head *handler_list = NULL;
- spinlock_t *handler_lock = NULL;
- struct list_head *pos;
- struct list_head *n;
- struct ib_async_event_handler *handler;
- ib_async_event_handler_func function;
- void *arg;
-
- event = list_entry(entry, struct ib_async_event_list, list);
- priv = ((struct ib_device *) event->record.device)->core;
-
- switch (event_table[event->record.event].mod) {
- case QP:
- sprintf(mod_buf, " (QP %p)", event->record.modifier.qp);
- break;
-
- case CQ:
- sprintf(mod_buf, " (CQ %p)", event->record.modifier.cq);
- break;
-
- case EEC:
- sprintf(mod_buf, " (EEC %p)", event->record.modifier.eec);
- break;
-
- case PORT:
- sprintf(mod_buf, " (port %d)", event->record.modifier.port);
- handler_list = &priv->async_handler_list;
- handler_lock = &priv->async_handler_lock;
-
- /* Update cached port info */
- ib_cache_update(event->record.device, event->record.modifier.port);
- break;
-
- case NONE:
- mod_buf[0] = '\0';
- handler_list = &priv->async_handler_list;
- handler_lock = &priv->async_handler_lock;
- break;
- }
-
- TS_TRACE(MOD_KERNEL_IB, T_VERY_VERBOSE, TRACE_KERNEL_IB_GEN,
- "Received %s event for %s%s",
- event_table[event->record.event].desc,
- ((struct ib_device *) event->record.device)->name,
- mod_buf);
-
- if (!handler_list)
- return;
-
- spin_lock_irq(handler_lock);
-
- list_for_each_safe(pos, n, handler_list) {
- handler = list_entry(pos, struct ib_async_event_handler, list);
- if (handler->record.event == event->record.event) {
- function = handler->function;
- arg = handler->arg;
-
- spin_unlock_irq(handler_lock);
- function(&event->record, arg);
- spin_lock_irq(handler_lock);
- }
- }
-
- spin_unlock_irq(handler_lock);
- kfree(event);
-}
-
-/*
- Local Variables:
- c-file-style: "linux"
- indent-tabs-mode: t
- End:
-*/
Index: infiniband/core/mad_main.c
===================================================================
--- infiniband/core/mad_main.c (revision 759)
+++ infiniband/core/mad_main.c (working copy)
@@ -23,11 +23,6 @@
#include <linux/config.h>
-#include "mad_priv.h"
-
-#include "ts_kernel_trace.h"
-#include "ts_kernel_services.h"
-
#include <linux/version.h>
#include <linux/module.h>
@@ -37,10 +32,10 @@
/* Need the definition of high_memory: */
#include <linux/mm.h>
-#ifdef CONFIG_KMOD
-#include <linux/kmod.h>
-#endif
+#include "ts_kernel_services.h"
+#include "mad_priv.h"
+
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("kernel IB MAD API");
MODULE_LICENSE("Dual BSD/GPL");
@@ -60,11 +55,11 @@
*mr = ib_reg_phys_mr(pd, &buffer_list, 1, /* list_len */
IB_ACCESS_LOCAL_WRITE, &iova);
if (IS_ERR(*mr)) {
- TS_REPORT_WARN(MOD_KERNEL_IB,
- "ib_reg_phys_mr failed "
- "size 0x%016" TS_U64_FMT "x, iova 0x%016" TS_U64_FMT "x"
- " (return code %d)",
- buffer_list.size, iova, PTR_ERR(*mr));
+ printk(KERN_WARNING "ib_reg_phys_mr failed "
+ "size 0x%016llx, iova 0x%016llx "
+ "(return code %ld)\n",
+ (unsigned long long) buffer_list.size,
+ (unsigned long long) iova, PTR_ERR(*mr));
return PTR_ERR(*mr);
}
@@ -82,10 +77,6 @@
int attr_mask;
int ret;
- TS_TRACE(MOD_KERNEL_IB, T_VERY_VERBOSE, TRACE_KERNEL_IB_GEN,
- "Creating port %d QPN %d for device %s",
- port, qpn, device->name);
-
{
struct ib_qp_init_attr init_attr = {
.send_cq = priv->cq,
@@ -105,10 +96,10 @@
priv->qp[port][qpn] = ib_create_qp(priv->pd, &init_attr, &qp_cap);
if (IS_ERR(priv->qp[port][qpn])) {
- TS_REPORT_FATAL(MOD_KERNEL_IB,
- "ib_special_qp_create failed for %s port %d QPN %d (%d)",
- device->name, port, qpn,
- PTR_ERR(priv->qp[port][qpn]));
+ printk(KERN_WARNING "ib_special_qp_create failed "
+ "for %s port %d QPN %d (%ld)\n",
+ device->name, port, qpn,
+ PTR_ERR(priv->qp[port][qpn]));
return PTR_ERR(priv->qp[port][qpn]);
}
}
@@ -125,9 +116,9 @@
ret = ib_modify_qp(priv->qp[port][qpn], &qp_attr, attr_mask, &qp_cap);
if (ret) {
- TS_REPORT_FATAL(MOD_KERNEL_IB,
- "ib_modify_qp -> INIT failed for %s port %d QPN %d (%d)",
- device->name, port, qpn, ret);
+ printk(KERN_WARNING "ib_modify_qp -> INIT failed "
+ "for %s port %d QPN %d (%d)\n",
+ device->name, port, qpn, ret);
return ret;
}
@@ -135,9 +126,9 @@
attr_mask = IB_QP_STATE;
ret = ib_modify_qp(priv->qp[port][qpn], &qp_attr, attr_mask, &qp_cap);
if (ret) {
- TS_REPORT_FATAL(MOD_KERNEL_IB,
- "ib_modify_qp -> RTR failed for %s port %d QPN %d (%d)",
- device->name, port, qpn, ret);
+ printk(KERN_WARNING "ib_modify_qp -> RTR failed "
+ "for %s port %d QPN %d (%d)\n",
+ device->name, port, qpn, ret);
return ret;
}
@@ -148,16 +139,16 @@
IB_QP_SQ_PSN;
ret = ib_modify_qp(priv->qp[port][qpn], &qp_attr, attr_mask, &qp_cap);
if (ret) {
- TS_REPORT_FATAL(MOD_KERNEL_IB,
- "ib_modify_qp -> RTS failed for %s port %d QPN %d (%d)",
- device->name, port, qpn, ret);
+ printk(KERN_WARNING "ib_modify_qp -> RTS failed "
+ "for %s port %d QPN %d (%d)\n",
+ device->name, port, qpn, ret);
return ret;
}
return 0;
}
-static int ib_mad_init_one(struct ib_device *device)
+static void ib_mad_add_one(struct ib_device *device)
{
struct ib_mad_private *priv;
struct ib_device_attr prop;
@@ -165,18 +156,13 @@
ret = ib_query_device(device, &prop);
if (ret)
- return ret;
+ return;
- TS_TRACE(MOD_KERNEL_IB, T_VERY_VERBOSE, TRACE_KERNEL_IB_GEN,
- "Setting up device %s, %d ports",
- device->name, prop.phys_port_cnt);
-
priv = kmalloc(sizeof *priv, GFP_KERNEL);
if (!priv) {
- TS_REPORT_WARN(MOD_KERNEL_IB,
- "Couldn't allocate private structure for %s",
- device->name);
- return -ENOMEM;
+ printk(KERN_WARNING "Couldn't allocate MAD private structure for %s\n",
+ device->name);
+ return;
}
device->mad = priv;
@@ -187,9 +173,8 @@
priv->pd = ib_alloc_pd(device);
if (IS_ERR(priv->pd)) {
- TS_REPORT_FATAL(MOD_KERNEL_IB,
- "Failed to allocate PD for %s",
- device->name);
+ printk(KERN_WARNING "Failed to allocate MAD PD for %s\n",
+ device->name);
goto error;
}
@@ -198,11 +183,10 @@
(IB_MAD_RECEIVES_PER_QP + IB_MAD_SENDS_PER_QP) * priv->num_port;
priv->cq = ib_create_cq(device, ib_mad_completion,
- device, entries);
+ NULL, device, entries);
if (IS_ERR(priv->cq)) {
- TS_REPORT_FATAL(MOD_KERNEL_IB,
- "Failed to allocate CQ for %s",
- device->name);
+ printk(KERN_WARNING "Failed to allocate MAD CQ for %s\n",
+ device->name);
goto error_free_pd;
}
}
@@ -214,9 +198,8 @@
INIT_WORK(&priv->cq_work, ib_mad_drain_cq, device);
if (ib_mad_register_memory(priv->pd, &priv->mr, &priv->lkey)) {
- TS_REPORT_FATAL(MOD_KERNEL_IB,
- "Failed to allocate MR for %s",
- device->name);
+ printk(KERN_WARNING "Failed to allocate MAD MR for %s\n",
+ device->name);
goto error_free_cq;
}
@@ -225,9 +208,8 @@
device,
&priv->work_thread);
if (ret) {
- TS_REPORT_WARN(MOD_KERNEL_IB,
- "Couldn't start completion thread for %s",
- device->name);
+ printk(KERN_WARNING "Couldn't start completion thread for %s\n",
+ device->name);
goto error_free_mr;
}
@@ -272,7 +254,7 @@
}
}
- return 0;
+ return;
error_free_qp:
{
@@ -307,7 +289,6 @@
error:
kfree(priv);
- return ret;
}
static void ib_mad_remove_one(struct ib_device *device)
@@ -346,39 +327,15 @@
}
}
-static void ib_mad_device_notifier(struct ib_device_notifier *self,
- struct ib_device *device,
- int event)
-{
- switch (event) {
- case IB_DEVICE_NOTIFIER_ADD:
- if (ib_mad_init_one(device))
- TS_REPORT_WARN(MOD_KERNEL_IB,
- "Failed to initialize device.");
- break;
-
- case IB_DEVICE_NOTIFIER_REMOVE:
- ib_mad_remove_one(device);
- break;
-
- default:
- TS_REPORT_WARN(MOD_KERNEL_IB,
- "Unknown device notifier event %d.");
- break;
- }
-}
-
-static struct ib_device_notifier mad_notifier = {
- .notifier = ib_mad_device_notifier
+static struct ib_client mad_client = {
+ .add = ib_mad_add_one,
+ .remove = ib_mad_remove_one
};
static int __init ib_mad_init(void)
{
int ret;
- TS_REPORT_INIT(MOD_KERNEL_IB,
- "Initializing IB MAD layer");
-
ret = ib_mad_proc_setup();
if (ret)
return ret;
@@ -391,34 +348,25 @@
NULL,
NULL);
if (!mad_cache) {
- TS_REPORT_FATAL(MOD_KERNEL_IB,
- "Couldn't create MAD slab cache");
+ printk(KERN_ERR "Couldn't create MAD slab cache\n");
ib_mad_proc_cleanup();
return -ENOMEM;
}
- ib_device_notifier_register(&mad_notifier);
+ if (ib_register_client(&mad_client)) {
- TS_REPORT_INIT(MOD_KERNEL_IB,
- "IB MAD layer initialized");
+ }
return 0;
}
static void __exit ib_mad_cleanup(void)
{
- TS_REPORT_CLEANUP(MOD_KERNEL_IB,
- "Unloading IB MAD layer");
-
- ib_device_notifier_deregister(&mad_notifier);
+ ib_unregister_client(&mad_client);
ib_mad_proc_cleanup();
if (kmem_cache_destroy(mad_cache))
- TS_REPORT_WARN(MOD_KERNEL_IB,
- "Failed to destroy MAD slab cache (memory leak?)");
-
- TS_REPORT_CLEANUP(MOD_KERNEL_IB,
- "IB MAD layer unloaded");
+ printk(KERN_WARNING "Failed to destroy MAD slab cache (memory leak?)\n");
}
module_init(ib_mad_init);
Index: infiniband/core/mad_priv.h
===================================================================
--- infiniband/core/mad_priv.h (revision 759)
+++ infiniband/core/mad_priv.h (working copy)
@@ -26,7 +26,6 @@
#include "ts_ib_mad.h"
#include <ib_verbs.h>
-#include "ts_ib_provider.h"
#include <ts_kernel_services.h>
#include "ts_kernel_thread.h"
Index: infiniband/core/core_device.c
===================================================================
--- infiniband/core/core_device.c (revision 715)
+++ infiniband/core/core_device.c (working copy)
@@ -1,432 +0,0 @@
-/*
- This software is available to you under a choice of one of two
- licenses. You may choose to be licensed under the terms of the GNU
- General Public License (GPL) Version 2, available at
- <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD
- license, available in the LICENSE.TXT file accompanying this
- software. These details are also available at
- <http://openib.org/license.html>.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- SOFTWARE.
-
- Copyright (c) 2004 Topspin Communications. All rights reserved.
-
- $Id$
-*/
-
-#include "ts_kernel_services.h"
-
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <asm/semaphore.h>
-
-#include "core_priv.h"
-
-static LIST_HEAD(device_list);
-static LIST_HEAD(notifier_list);
-static DECLARE_MUTEX(device_lock);
-
-static int ib_device_check_mandatory(struct ib_device *device)
-{
-#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
- static const struct {
- size_t offset;
- char *name;
- } mandatory_table[] = {
- IB_MANDATORY_FUNC(query_device),
- IB_MANDATORY_FUNC(query_port),
- IB_MANDATORY_FUNC(query_pkey),
- IB_MANDATORY_FUNC(query_gid),
- IB_MANDATORY_FUNC(alloc_pd),
- IB_MANDATORY_FUNC(dealloc_pd),
- IB_MANDATORY_FUNC(create_ah),
- IB_MANDATORY_FUNC(destroy_ah),
- IB_MANDATORY_FUNC(create_qp),
- IB_MANDATORY_FUNC(modify_qp),
- IB_MANDATORY_FUNC(destroy_qp),
- IB_MANDATORY_FUNC(post_send),
- IB_MANDATORY_FUNC(post_recv),
- IB_MANDATORY_FUNC(create_cq),
- IB_MANDATORY_FUNC(destroy_cq),
- IB_MANDATORY_FUNC(poll_cq),
- IB_MANDATORY_FUNC(req_notify_cq),
- IB_MANDATORY_FUNC(reg_phys_mr),
- IB_MANDATORY_FUNC(dereg_mr)
- };
- int i;
-
- for (i = 0; i < sizeof mandatory_table / sizeof mandatory_table[0]; ++i) {
- if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
- printk(KERN_WARNING "Device %s is missing mandatory function %s\n",
- device->name, mandatory_table[i].name);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static struct ib_device *__ib_device_get_by_name(const char *name)
-{
- struct ib_device *device;
-
- list_for_each_entry(device, &device_list, core_list)
- if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
- return device;
-
- return NULL;
-}
-
-
-static int alloc_name(char *name)
-{
- long *inuse;
- char buf[IB_DEVICE_NAME_MAX];
- struct ib_device *device;
- int i;
-
- inuse = (long *) get_zeroed_page(GFP_KERNEL);
- if (!inuse)
- return -ENOMEM;
-
- list_for_each_entry(device, &device_list, core_list) {
- if (!sscanf(device->name, name, &i))
- continue;
- if (i < 0 || i >= PAGE_SIZE * 8)
- continue;
- snprintf(buf, sizeof buf, name, i);
- if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
- set_bit(i, inuse);
- }
-
- i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
- free_page((unsigned long) inuse);
- snprintf(buf, sizeof buf, name, i);
-
- if (__ib_device_get_by_name(buf))
- return -ENFILE;
-
- strlcpy(name, buf, IB_DEVICE_NAME_MAX);
- return 0;
-}
-
-struct ib_device *ib_alloc_device(size_t size)
-{
- void *dev;
-
- BUG_ON(size < sizeof (struct ib_device));
-
- dev = kmalloc(size, GFP_KERNEL);
- if (!dev)
- return NULL;
-
- memset(dev, 0, size);
-
- return dev;
-}
-EXPORT_SYMBOL(ib_alloc_device);
-
-void ib_dealloc_device(struct ib_device *device)
-{
- if (device->reg_state == IB_DEV_UNINITIALIZED) {
- kfree(device);
- return;
- }
-
- BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
-
- ib_device_deregister_sysfs(device);
-}
-EXPORT_SYMBOL(ib_dealloc_device);
-
-int ib_register_device(struct ib_device *device)
-{
- struct ib_device_private *priv;
- struct ib_device_attr prop;
- int ret;
- int p;
-
- if (ib_device_check_mandatory(device)) {
- return -EINVAL;
- }
-
- down(&device_lock);
-
- if (strchr(device->name, '%')) {
- ret = alloc_name(device->name);
- if (ret)
- goto out;
- }
-
- priv = kmalloc(sizeof *priv, GFP_KERNEL);
- if (!priv) {
- printk(KERN_WARNING "Couldn't allocate private struct for %s\n",
- device->name);
- ret = -ENOMEM;
- goto out;
- }
-
- *priv = (struct ib_device_private) { 0 };
-
- ret = device->query_device(device, &prop);
- if (ret) {
- printk(KERN_WARNING "query_device failed for %s\n",
- device->name);
- goto out_free;
- }
-
- memcpy(priv->node_guid, &prop.node_guid, sizeof (tTS_IB_GUID));
-
- if (device->node_type == IB_NODE_SWITCH) {
- priv->start_port = priv->end_port = 0;
- } else {
- priv->start_port = 1;
- priv->end_port = prop.phys_port_cnt;
- }
-
- priv->port_data = kmalloc((priv->end_port + 1) * sizeof (struct ib_port_data),
- GFP_KERNEL);
- if (!priv->port_data) {
- printk(KERN_WARNING "Couldn't allocate port info for %s\n",
- device->name);
- goto out_free;
- }
-
- for (p = priv->start_port; p <= priv->end_port; ++p) {
- spin_lock_init(&priv->port_data[p].port_cap_lock);
- memset(priv->port_data[p].port_cap_count, 0, IB_PORT_CAP_NUM * sizeof (int));
- }
-
- device->core = priv;
-
- INIT_LIST_HEAD(&priv->async_handler_list);
- spin_lock_init(&priv->async_handler_lock);
-
- ret = ib_cache_setup(device);
- if (ret) {
- printk(KERN_WARNING "Couldn't create device info cache for %s\n",
- device->name);
- goto out_free_port;
- }
-
- ret = tsKernelQueueThreadStart("ts_ib_async",
- ib_async_thread,
- device,
- &priv->async_thread);
- if (ret) {
- printk(KERN_WARNING "Couldn't start async thread for %s\n",
- device->name);
- goto out_free_cache;
- }
-
- ret = ib_proc_setup(device, device->node_type == IB_NODE_SWITCH);
- if (ret) {
- printk(KERN_WARNING "Couldn't create /proc dir for %s\n",
- device->name);
- goto out_stop_async;
- }
-
- if (ib_device_register_sysfs(device)) {
- printk(KERN_WARNING "Couldn't register device %s with driver model\n",
- device->name);
- goto out_proc;
- }
-
- list_add_tail(&device->core_list, &device_list);
- {
- struct list_head *ptr;
- struct ib_device_notifier *notifier;
-
- list_for_each(ptr, ¬ifier_list) {
- notifier = list_entry(ptr, struct ib_device_notifier, list);
- notifier->notifier(notifier, device, IB_DEVICE_NOTIFIER_ADD);
- }
- }
-
- device->reg_state = IB_DEV_REGISTERED;
-
- up(&device_lock);
- return 0;
-
- out_proc:
- ib_proc_cleanup(device);
-
- out_stop_async:
- tsKernelQueueThreadStop(priv->async_thread);
-
- out_free_cache:
- ib_cache_cleanup(device);
-
- out_free_port:
- kfree(priv->port_data);
-
- out_free:
- kfree(priv);
-
- out:
- up(&device_lock);
- return ret;
-}
-EXPORT_SYMBOL(ib_register_device);
-
-int ib_deregister_device(struct ib_device *device)
-{
- struct ib_device_private *priv;
-
- priv = device->core;
-
- if (tsKernelQueueThreadStop(priv->async_thread)) {
- printk(KERN_WARNING "tsKernelThreadStop failed for %s async thread\n",
- device->name);
- }
-
- ib_proc_cleanup(device);
- ib_cache_cleanup(device);
-
- down(&device_lock);
- list_del(&device->core_list);
- {
- struct list_head *ptr;
- struct ib_device_notifier *notifier;
-
- list_for_each_prev(ptr, ¬ifier_list) {
- notifier = list_entry(ptr, struct ib_device_notifier, list);
- notifier->notifier(notifier, device, IB_DEVICE_NOTIFIER_REMOVE);
- }
- }
- up(&device_lock);
-
- kfree(priv->port_data);
- kfree(priv);
-
- device->reg_state = IB_DEV_UNREGISTERED;
-
- return 0;
-}
-EXPORT_SYMBOL(ib_deregister_device);
-
-struct ib_device *ib_device_get_by_name(const char *name)
-{
- struct ib_device *device;
-
- down(&device_lock);
- device = __ib_device_get_by_name(name);
- up(&device_lock);
-
- return device;
-}
-EXPORT_SYMBOL(ib_device_get_by_name);
-
-struct ib_device *ib_device_get_by_index(int index)
-{
- struct list_head *ptr;
- struct ib_device *device;
-
- if (index < 0)
- return NULL;
-
- down(&device_lock);
- list_for_each(ptr, &device_list) {
- device = list_entry(ptr, struct ib_device, core_list);
- if (!index)
- goto out;
- --index;
- }
-
- device = NULL;
- out:
- up(&device_lock);
- return device;
-}
-EXPORT_SYMBOL(ib_device_get_by_index);
-
-int ib_device_notifier_register(struct ib_device_notifier *notifier)
-{
- struct list_head *ptr;
- struct ib_device *device;
-
- down(&device_lock);
- list_add_tail(¬ifier->list, ¬ifier_list);
- list_for_each(ptr, &device_list) {
- device = list_entry(ptr, struct ib_device, core_list);
- notifier->notifier(notifier, device, IB_DEVICE_NOTIFIER_ADD);
- }
- up(&device_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(ib_device_notifier_register);
-
-int ib_device_notifier_deregister(struct ib_device_notifier *notifier)
-{
- down(&device_lock);
- list_del(¬ifier->list);
- up(&device_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(ib_device_notifier_deregister);
-
-int ib_query_device(struct ib_device *device,
- struct ib_device_attr *device_attr)
-{
- return device->query_device(device, device_attr);
-}
-EXPORT_SYMBOL(ib_query_device);
-
-int ib_query_port(struct ib_device *device,
- u8 port_num,
- struct ib_port_attr *port_attr)
-{
- return device->query_port(device, port_num, port_attr);
-}
-EXPORT_SYMBOL(ib_query_port);
-
-int ib_query_gid(struct ib_device *device,
- u8 port_num, int index, union ib_gid *gid)
-{
- return device->query_gid(device, port_num, index, gid);
-}
-EXPORT_SYMBOL(ib_query_gid);
-
-int ib_query_pkey(struct ib_device *device,
- u8 port_num, u16 index, u16 *pkey)
-{
- return device->query_pkey(device, port_num, index, pkey);
-}
-EXPORT_SYMBOL(ib_query_pkey);
-
-int ib_modify_device(struct ib_device *device,
- int device_modify_mask,
- struct ib_device_modify *device_modify)
-{
- return device->modify_device(device, device_modify_mask,
- device_modify);
-}
-EXPORT_SYMBOL(ib_modify_device);
-
-int ib_modify_port(struct ib_device *device,
- u8 port_num, int port_modify_mask,
- struct ib_port_modify *port_modify)
-{
- return device->modify_port(device, port_num, port_modify_mask,
- port_modify);
-}
-EXPORT_SYMBOL(ib_modify_port);
-
-/*
- Local Variables:
- c-file-style: "linux"
- indent-tabs-mode: t
- End:
-*/
Index: infiniband/core/mad_static.c
===================================================================
--- infiniband/core/mad_static.c (revision 759)
+++ infiniband/core/mad_static.c (working copy)
@@ -22,7 +22,6 @@
*/
#include "mad_priv.h"
-#include "ts_ib_provider.h"
#include "smp_access.h"
#include "ts_kernel_trace.h"
@@ -167,12 +166,12 @@
{
/* Generate an artificial port error event so that cached info is
updated for this port */
- struct ib_async_event_record record;
+ struct ib_event record;
- record.device = device;
- record.event = IB_PORT_ERROR;
- record.modifier.port = port;
- ib_async_event_dispatch(&record);
+ record.device = device;
+ record.event = IB_EVENT_PORT_ERR;
+ record.element.port_num = port;
+ ib_dispatch_event(&record);
}
}
Index: infiniband/core/ib_verbs.c
===================================================================
--- infiniband/core/ib_verbs.c (revision 759)
+++ infiniband/core/ib_verbs.c (working copy)
@@ -113,12 +113,13 @@
qp = pd->device->create_qp(pd, qp_init_attr, qp_cap);
if (!IS_ERR(qp)) {
- qp->device = pd->device;
- qp->pd = pd;
- qp->send_cq = qp_init_attr->send_cq;
- qp->recv_cq = qp_init_attr->recv_cq;
- qp->srq = qp_init_attr->srq;
- qp->qp_context = qp_init_attr->qp_context;
+ qp->device = pd->device;
+ qp->pd = pd;
+ qp->send_cq = qp_init_attr->send_cq;
+ qp->recv_cq = qp_init_attr->recv_cq;
+ qp->srq = qp_init_attr->srq;
+ qp->event_handler = qp_init_attr->event_handler;
+ qp->qp_context = qp_init_attr->qp_context;
atomic_inc(&pd->usecnt);
atomic_inc(&qp_init_attr->send_cq->usecnt);
atomic_inc(&qp_init_attr->recv_cq->usecnt);
@@ -179,6 +180,7 @@
struct ib_cq *ib_create_cq(struct ib_device *device,
ib_comp_handler comp_handler,
+ void (*event_handler)(struct ib_event *, void *),
void *cq_context, int cqe)
{
struct ib_cq *cq;
@@ -186,9 +188,10 @@
cq = device->create_cq(device, cqe);
if (!IS_ERR(cq)) {
- cq->device = device;
- cq->comp_handler = comp_handler;
- cq->context = cq_context;
+ cq->device = device;
+ cq->comp_handler = comp_handler;
+ cq->event_handler = event_handler;
+ cq->context = cq_context;
atomic_set(&cq->usecnt, 0);
}
Index: infiniband/hw/mthca/mthca_dev.h
===================================================================
--- infiniband/hw/mthca/mthca_dev.h (revision 759)
+++ infiniband/hw/mthca/mthca_dev.h (working copy)
@@ -283,7 +283,7 @@
void mthca_cleanup_mcg_table(struct mthca_dev *dev);
int mthca_register_device(struct mthca_dev *dev);
-void mthca_deregister_device(struct mthca_dev *dev);
+void mthca_unregister_device(struct mthca_dev *dev);
int mthca_pd_alloc(struct mthca_dev *dev, struct mthca_pd *pd);
void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd);
@@ -308,7 +308,7 @@
void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn);
void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
- enum ib_async_event event);
+ enum ib_event_type event_type);
int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_qp_cap *qp_cap);
int mthca_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
Index: infiniband/hw/mthca/mthca_main.c
===================================================================
--- infiniband/hw/mthca/mthca_main.c (revision 759)
+++ infiniband/hw/mthca/mthca_main.c (working copy)
@@ -638,7 +638,7 @@
int p;
if (mdev) {
- mthca_deregister_device(mdev);
+ mthca_unregister_device(mdev);
for (p = 1; p <= mdev->limits.num_ports; ++p)
mthca_CLOSE_IB(mdev, p, &status);
Index: infiniband/hw/mthca/mthca_provider.c
===================================================================
--- infiniband/hw/mthca/mthca_provider.c (revision 759)
+++ infiniband/hw/mthca/mthca_provider.c (working copy)
@@ -560,7 +560,6 @@
dev->ib_dev.owner = THIS_MODULE;
dev->ib_dev.dma_device = dev->pdev;
dev->ib_dev.class_dev.dev = &dev->pdev->dev;
- dev->ib_dev.provider = "mthca";
dev->ib_dev.query_device = mthca_query_device;
dev->ib_dev.query_port = mthca_query_port;
dev->ib_dev.modify_port = mthca_modify_port;
@@ -593,7 +592,7 @@
ret = class_device_create_file(&dev->ib_dev.class_dev,
mthca_class_attributes[i]);
if (ret) {
- ib_deregister_device(&dev->ib_dev);
+ ib_unregister_device(&dev->ib_dev);
return ret;
}
}
@@ -601,9 +600,9 @@
return 0;
}
-void mthca_deregister_device(struct mthca_dev *dev)
+void mthca_unregister_device(struct mthca_dev *dev)
{
- ib_deregister_device(&dev->ib_dev);
+ ib_unregister_device(&dev->ib_dev);
}
/*
Index: infiniband/hw/mthca/mthca_provider.h
===================================================================
--- infiniband/hw/mthca/mthca_provider.h (revision 759)
+++ infiniband/hw/mthca/mthca_provider.h (working copy)
@@ -24,7 +24,6 @@
#ifndef MTHCA_PROVIDER_H
#define MTHCA_PROVIDER_H
-#include <ts_ib_provider.h>
#include <ts_ib_header_types.h>
#define MTHCA_MPT_FLAG_ATOMIC (1 << 14)
Index: infiniband/hw/mthca/mthca_mad.c
===================================================================
--- infiniband/hw/mthca/mthca_mad.c (revision 759)
+++ infiniband/hw/mthca/mthca_mad.c (working copy)
@@ -46,24 +46,24 @@
static void smp_snoop(struct ib_device *ibdev,
struct ib_mad *mad)
{
- struct ib_async_event_record record;
+ struct ib_event event;
if (mad->dqpn == 0 &&
(mad->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
mad->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
mad->r_method == IB_MGMT_METHOD_SET) {
if (mad->attribute_id == cpu_to_be16(IB_SM_PORT_INFO)) {
- record.device = ibdev;
- record.event = IB_LID_CHANGE;
- record.modifier.port = mad->port;
- ib_async_event_dispatch(&record);
+ event.device = ibdev;
+ event.event = IB_EVENT_LID_CHANGE;
+ event.element.port_num = mad->port;
+ ib_dispatch_event(&event);
}
if (mad->attribute_id == cpu_to_be16(IB_SM_PKEY_TABLE)) {
- record.device = ibdev;
- record.event = IB_PKEY_CHANGE;
- record.modifier.port = mad->port;
- ib_async_event_dispatch(&record);
+ event.device = ibdev;
+ event.event = IB_EVENT_PKEY_CHANGE;
+ event.element.port_num = mad->port;
+ ib_dispatch_event(&event);
}
}
}
Index: infiniband/hw/mthca/mthca_eq.c
===================================================================
--- infiniband/hw/mthca/mthca_eq.c (revision 759)
+++ infiniband/hw/mthca/mthca_eq.c (working copy)
@@ -200,16 +200,16 @@
static void port_change(struct mthca_dev *dev, int port, int active)
{
- struct ib_async_event_record record;
+ struct ib_event record;
mthca_dbg(dev, "Port change to %s for port %d\n",
active ? "active" : "down", port);
record.device = &dev->ib_dev;
- record.event = active ? IB_EVENT_PORT_ACTIVE : IB_PORT_ERROR;
- record.modifier.port = port;
+ record.event = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+ record.element.port_num = port;
- ib_async_event_dispatch(&record);
+ ib_dispatch_event(&record);
}
static void mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
@@ -234,37 +234,37 @@
case MTHCA_EVENT_TYPE_PATH_MIG:
mthca_qp_event(dev, be32_to_cpu(eqe->qp.qpn) & 0xffffff,
- IB_QP_PATH_MIGRATED);
+ IB_EVENT_PATH_MIG);
break;
case MTHCA_EVENT_TYPE_COMM_EST:
mthca_qp_event(dev, be32_to_cpu(eqe->qp.qpn) & 0xffffff,
- IB_QP_COMMUNICATION_ESTABLISHED);
+ IB_EVENT_COMM_EST);
break;
case MTHCA_EVENT_TYPE_SQ_DRAINED:
mthca_qp_event(dev, be32_to_cpu(eqe->qp.qpn) & 0xffffff,
- IB_SEND_QUEUE_DRAINED);
+ IB_EVENT_SQ_DRAINED);
break;
case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:
mthca_qp_event(dev, be32_to_cpu(eqe->qp.qpn) & 0xffffff,
- IB_LOCAL_WQ_CATASTROPHIC_ERROR);
+ IB_EVENT_QP_FATAL);
break;
case MTHCA_EVENT_TYPE_PATH_MIG_FAILED:
mthca_qp_event(dev, be32_to_cpu(eqe->qp.qpn) & 0xffffff,
- IB_PATH_MIGRATION_ERROR);
+ IB_EVENT_PATH_MIG_ERR);
break;
case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
mthca_qp_event(dev, be32_to_cpu(eqe->qp.qpn) & 0xffffff,
- IB_LOCAL_WQ_INVALID_REQUEST_ERROR);
+ IB_EVENT_QP_REQ_ERR);
break;
case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR:
mthca_qp_event(dev, be32_to_cpu(eqe->qp.qpn) & 0xffffff,
- IB_LOCAL_WQ_ACCESS_VIOLATION_ERROR);
+ IB_EVENT_QP_ACCESS_ERR);
break;
case MTHCA_EVENT_TYPE_CMD:
Index: infiniband/hw/mthca/mthca_qp.c
===================================================================
--- infiniband/hw/mthca/mthca_qp.c (revision 759)
+++ infiniband/hw/mthca/mthca_qp.c (working copy)
@@ -261,10 +261,10 @@
}
void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
- enum ib_async_event event)
+ enum ib_event_type event_type)
{
struct mthca_qp *qp;
- struct ib_async_event_record event_record;
+ struct ib_event event;
spin_lock(&dev->qp_table.lock);
qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
@@ -277,10 +277,11 @@
return;
}
- event_record.device = &dev->ib_dev;
- event_record.event = event;
- event_record.modifier.qp = (struct ib_qp *) qp;
- ib_async_event_dispatch(&event_record);
+ event.device = &dev->ib_dev;
+ event.event = event_type;
+ event.element.qp = &qp->ibqp;
+ if (qp->ibqp.event_handler)
+ qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
More information about the general
mailing list