[openib-general] [PATCH] ib_smi: First working version of SMI/SMA
Hal Rosenstock
halr at voltaire.com
Tue Oct 5 17:18:11 PDT 2004
ib_smi: First working version of SMI/SMA (port gets to active)
There is a workaround for the hop pointer in the response which I will
work on tomorrow.
Index: ib_smi.c
===================================================================
--- ib_smi.c (revision 923)
+++ ib_smi.c (working copy)
@@ -24,13 +24,24 @@
*/
#include <ib_smi.h>
+#include "ib_smi_priv.h"
#include "ib_mad_priv.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("kernel IB SMI");
+MODULE_AUTHOR("Sean Hefty");
+MODULE_AUTHOR("Hal Rosenstock");
+
+
+static spinlock_t ib_smi_port_list_lock = SPIN_LOCK_UNLOCKED;
+static struct list_head ib_smi_port_list;
+
/*
* Fixup a directed route SMP for sending. Return 0 if the SMP should
be
* discarded.
*/
-static int smi_handle_dr_smp_send(struct ib_mad_port_private
*port_priv,
+static int smi_handle_dr_smp_send(struct ib_mad_agent *mad_agent,
struct ib_smp *smp)
{
u8 hop_ptr, hop_cnt;
@@ -44,25 +55,25 @@
if (hop_cnt && hop_ptr == 0) {
smp->hop_ptr++;
return (smp->initial_path[smp->hop_ptr] ==
- port_priv->port_num);
+ mad_agent->port_num);
}
/* C14-9:2 */
if (hop_ptr && hop_ptr < hop_cnt) {
- if (port_priv->device->node_type != IB_NODE_SWITCH)
+ if (mad_agent->device->node_type != IB_NODE_SWITCH)
return 0;
/* smp->return_path set when received */
smp->hop_ptr++;
return (smp->initial_path[smp->hop_ptr] ==
- port_priv->port_num);
+ mad_agent->port_num);
}
/* C14-9:3 -- We're at the end of the DR segment of path */
if (hop_ptr == hop_cnt) {
/* smp->return_path set when received */
smp->hop_ptr++;
- return (port_priv->device->node_type != IB_NODE_CA ||
+ return (mad_agent->device->node_type != IB_NODE_CA ||
smp->dr_dlid == IB_LID_PERMISSIVE);
}
@@ -75,24 +86,24 @@
if (hop_cnt && hop_ptr == hop_cnt + 1) {
smp->hop_ptr--;
return (smp->return_path[smp->hop_ptr] ==
- port_priv->port_num);
+ mad_agent->port_num);
}
/* C14-13:2 */
if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
- if (port_priv->device->node_type != IB_NODE_SWITCH)
+ if (mad_agent->device->node_type != IB_NODE_SWITCH)
return 0;
smp->hop_ptr--;
return (smp->return_path[smp->hop_ptr] ==
- port_priv->port_num);
+ mad_agent->port_num);
}
/* C14-13:3 -- at the end of the DR segment of path */
if (hop_ptr == 1) {
smp->hop_ptr--;
/* C14-13:3 -- SMPs destined for SM shouldn't be here */
- return (port_priv->device->node_type == IB_NODE_SWITCH &&
+ return (mad_agent->device->node_type == IB_NODE_SWITCH &&
smp->dr_slid != IB_LID_PERMISSIVE);
}
@@ -106,13 +117,13 @@
* Sender side handling of outgoing SMPs. Fixup the SMP as required by
* the spec. Return 0 if the SMP should be dropped.
*/
-static int smi_handle_smp_send(struct ib_mad_port_private *port_priv,
+static int smi_handle_smp_send(struct ib_mad_agent *mad_agent,
struct ib_smp *smp)
{
switch (smp->mgmt_class)
{
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
- return smi_handle_dr_smp_send(port_priv, smp);
+ return smi_handle_dr_smp_send(mad_agent, smp);
default:
return 0; /* write me... */
}
@@ -121,12 +132,12 @@
/*
* Return 1 if the SMP should be handled by the local SMA via
process_mad.
*/
-static inline int smi_check_local_smp(struct ib_mad_port_private
*port_priv,
+static inline int smi_check_local_smp(struct ib_mad_agent *mad_agent,
struct ib_smp *smp)
{
/* C14-9:3 -- We're at the end of the DR segment of path */
/* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM. */
- return (port_priv->device->process_mad &&
+ return (mad_agent->device->process_mad &&
!ib_get_smp_direction(smp) &&
(smp->hop_ptr == smp->hop_cnt + 1));
}
@@ -135,7 +146,7 @@
* Adjust information for a received SMP. Return 0 if the SMP should
be
* dropped.
*/
-static int smi_handle_dr_smp_recv(struct ib_mad_port_private
*port_priv,
+static int smi_handle_dr_smp_recv(struct ib_mad_agent *mad_agent,
struct ib_smp *smp)
{
u8 hop_ptr, hop_cnt;
@@ -151,22 +162,22 @@
/* C14-9:2 -- intermediate hop */
if (hop_ptr && hop_ptr < hop_cnt) {
- if (port_priv->device->node_type != IB_NODE_SWITCH)
+ if (mad_agent->device->node_type != IB_NODE_SWITCH)
return 0;
- smp->return_path[hop_ptr] = port_priv->port_num;
+ smp->return_path[hop_ptr] = mad_agent->port_num;
/* smp->hop_ptr updated when sending */
return 1; /*(smp->initial_path[hop_ptr+1] <=
- port_priv->device->phys_port_cnt); */
+ mad_agent->device->phys_port_cnt); */
}
/* C14-9:3 -- We're at the end of the DR segment of path */
if (hop_ptr == hop_cnt) {
if (hop_cnt)
- smp->return_path[hop_ptr] = port_priv->port_num;
+ smp->return_path[hop_ptr] = mad_agent->port_num;
/* smp->hop_ptr updated when sending */
- return (port_priv->device->node_type != IB_NODE_CA ||
+ return (mad_agent->device->node_type != IB_NODE_CA ||
smp->dr_dlid == IB_LID_PERMISSIVE);
}
@@ -182,12 +193,12 @@
/* C14-13:2 */
if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
- if (port_priv->device->node_type != IB_NODE_SWITCH)
+ if (mad_agent->device->node_type != IB_NODE_SWITCH)
return 0;
/* smp->hop_ptr updated when sending */
return 1; /*(smp->return_path[hop_ptr-1] <=
- port_priv->device->phys_port_cnt); */
+ mad_agent->device->phys_port_cnt); */
}
/* C14-13:3 -- We're at the end of the DR segment of path */
@@ -198,7 +209,7 @@
return 1;
}
/* smp->hop_ptr updated when sending */
- return (port_priv->device->node_type != IB_NODE_CA);
+ return (mad_agent->device->node_type != IB_NODE_CA);
}
/* C14-13:4 -- hop_ptr = 0 -> give to SM. */
@@ -211,13 +222,13 @@
* Receive side handling SMPs. Save receive information as required by
* the spec. Return 0 if the SMP should be dropped.
*/
-static int smi_handle_smp_recv(struct ib_mad_port_private *port_priv,
+static int smi_handle_smp_recv(struct ib_mad_agent *mad_agent,
struct ib_smp *smp)
{
switch (smp->mgmt_class)
{
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
- return smi_handle_dr_smp_recv(port_priv, smp);
+ return smi_handle_dr_smp_recv(mad_agent, smp);
default:
return 0; /* write me... */
}
@@ -227,7 +238,7 @@
* Return 1 if the received DR SMP should be forwarded to the send
queue.
* Return 0 if the SMP should be completed up the stack.
*/
-static int smi_check_forward_dr_smp(struct ib_mad_port_private
*port_priv,
+static int smi_check_forward_dr_smp(struct ib_mad_agent *mad_agent,
struct ib_smp *smp)
{
u8 hop_ptr, hop_cnt;
@@ -263,56 +274,426 @@
* Return 1 if the received SMP should be forwarded to the send queue.
* Return 0 if the SMP should be completed up the stack.
*/
-static int smi_check_forward_smp(struct ib_mad_port_private *port_priv,
+static int smi_check_forward_smp(struct ib_mad_agent *mad_agent,
struct ib_smp *smp)
{
switch (smp->mgmt_class)
{
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
- return smi_check_forward_dr_smp(port_priv, smp);
+ return smi_check_forward_dr_smp(mad_agent, smp);
default:
return 0; /* write me... */
}
}
-/*
-static int smi_process_local(struct ib_mad_port_private *port_priv,
- struct ib_smp *smp)
+static int smi_process_local(struct ib_mad_agent *mad_agent,
+ struct ib_mad *smp,
+ struct ib_mad *smp_response,
+ u16 slid)
{
- port_priv->device->process_mad( ... );
+ return mad_agent->device->process_mad(mad_agent->device, 0,
+ mad_agent->port_num,
+ slid, smp, smp_response);
}
-int smi_send_smp(struct ib_mad_port_private *port_priv,
- struct ib_smp *smp)
+void smp_send(struct ib_mad_agent *mad_agent,
+ struct ib_mad *smp,
+ struct ib_mad_recv_wc *mad_recv_wc)
{
- if (!smi_handle_smp_send(port_priv, smp)) {
- smi_fail_send()
- return 0;
+ struct ib_smi_port_private *entry, *port_priv = NULL;
+ struct ib_smi_send_wr *smi_send_wr;
+ struct ib_sge gather_list;
+ struct ib_send_wr send_wr;
+ struct ib_send_wr *bad_send_wr;
+ struct ib_ah_attr ah_attr;
+ struct ib_ah *ah;
+ unsigned long flags;
+
+ /* Find matching MAD agent */
+ spin_lock_irqsave(&ib_smi_port_list_lock, flags);
+ list_for_each_entry(entry, &ib_smi_port_list, port_list) {
+ if (entry->mad_agent == mad_agent) {
+ port_priv = entry;
+ break;
+ }
}
+ spin_unlock_irqrestore(&ib_smi_port_list_lock, flags);
+ if (!port_priv) {
+ printk(KERN_ERR "smp_send: no matching MAD agent 0x%x\n", mad_agent);
+ return;
+ }
- if (smi_check_local_smp(port_priv, smp)) {
- smi_process_local(port_priv, smp);
+ smi_send_wr = kmalloc(sizeof(*smi_send_wr), GFP_KERNEL);
+ if (!smi_send_wr)
+ return;
+ smi_send_wr->smp = smp;
+
+ /* PCI mapping */
+ gather_list.addr = pci_map_single(mad_agent->device->dma_device,
+ smp,
+ sizeof(struct ib_mad),
+ PCI_DMA_TODEVICE);
+ gather_list.length = sizeof(struct ib_mad);
+ gather_list.lkey = (*port_priv->mr).lkey;
+
+ send_wr.next = NULL;
+ send_wr.opcode = IB_WR_SEND;
+ send_wr.sg_list = &gather_list;
+ send_wr.num_sge = 1;
+ send_wr.wr.ud.remote_qpn = mad_recv_wc->wc->src_qp; /* DQPN */
+ send_wr.wr.ud.timeout_ms = 0;
+ send_wr.wr.ud.pkey_index = 0; /* Should only matter for GMPs */
+ send_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
+
+ ah_attr.dlid = mad_recv_wc->wc->slid;
+ ah_attr.port_num = mad_agent->port_num;
+ ah_attr.src_path_bits = mad_recv_wc->wc->dlid_path_bits;
+ ah_attr.ah_flags = 0; /* No GRH */
+ ah_attr.sl = mad_recv_wc->wc->sl;
+ ah_attr.static_rate = 0;
+
+ ah = ib_create_ah(mad_agent->qp->pd, &ah_attr);
+ if (IS_ERR(ah)) {
+ printk(KERN_ERR "No memory for address handle\n");
+ kfree(smp);
+ return;
+ }
+
+ send_wr.wr.ud.ah = ah;
+ send_wr.wr.ud.remote_qkey = 0; /* for SMPs */
+ send_wr.wr_id = ++port_priv->wr_id;
+
+ pci_unmap_addr_set(smp, mapping, gather_list.addr);
+
+ /* Send */
+ spin_lock_irqsave(&port_priv->send_list_lock, flags);
+ if (ib_post_send_mad(mad_agent, &send_wr, &bad_send_wr)) {
+ pci_unmap_single(mad_agent->device->dma_device,
+ pci_unmap_addr(smp, mapping),
+ sizeof(struct ib_mad),
+ PCI_DMA_TODEVICE);
+ } else {
+ list_add_tail(&smi_send_wr->send_list,
+ &port_priv->send_posted_smp_list);
+ }
+ spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
+ ib_destroy_ah(ah);
+}
+
+int smi_send_smp(struct ib_mad_agent *mad_agent,
+ struct ib_smp *smp,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ u16 slid)
+{
+ struct ib_mad *smp_response;
+ int ret;
+
+ if (!smi_handle_smp_send(mad_agent, smp)) {
return 0;
}
- * Post the send on the QP *
+ if (smi_check_local_smp(mad_agent, smp)) {
+ smp_response = kmalloc(sizeof(struct ib_mad), GFP_KERNEL);
+ if (!smp_response)
+ return 0;
+
+ ret = smi_process_local(mad_agent, (struct ib_mad *)smp,
+ smp_response, slid);
+ if (ret & IB_MAD_RESULT_SUCCESS) {
+ /* Workaround !!! */
+ ((struct ib_smp *)smp_response)->hop_ptr--;
+ smp_send(mad_agent, smp_response, mad_recv_wc);
+ } else
+ kfree(smp_response);
+ return 1;
+ }
+
+ /* Post the send on the QP */
return 1;
}
-int smi_recv_smp(struct ib_mad_port_private *port_priv,
- struct ib_smp *smp)
+int smi_recv_smp(struct ib_mad_agent *mad_agent,
+ struct ib_smp *smp,
+ struct ib_mad_recv_wc *mad_recv_wc)
{
- if (!smi_handle_smp_recv(port_priv, smp)) {
- smi_fail_recv();
+ if (!smi_handle_smp_recv(mad_agent, smp)) {
return 0;
}
- if (smi_check_forward_smp(port_priv, smp)) {
- smi_send_smp(port_priv, smp);
+ if (smi_check_forward_smp(mad_agent, smp)) {
+ smi_send_smp(mad_agent, smp, mad_recv_wc, mad_recv_wc->wc->slid);
return 0;
}
- * Complete receive up stack *
+ /* Complete receive up stack */
return 1;
}
-*/
+
+static void smi_send_handler(struct ib_mad_agent *mad_agent,
+ struct ib_mad_send_wc *mad_send_wc)
+{
+ struct ib_smi_port_private *entry, *port_priv = NULL;
+ struct ib_smi_send_wr *smi_send_wr;
+ struct list_head *send_wr;
+ unsigned long flags;
+
+ /* Find matching MAD agent */
+ spin_lock_irqsave(&ib_smi_port_list_lock, flags);
+ list_for_each_entry(entry, &ib_smi_port_list, port_list) {
+ if (entry->mad_agent == mad_agent) {
+ port_priv = entry;
+ break;
+ }
+ }
+ /* Hold lock longer !!! */
+ spin_unlock_irqrestore(&ib_smi_port_list_lock, flags);
+ if (!port_priv) {
+ printk(KERN_ERR "smi_send_handler: no matching MAD agent 0x%x\n",
mad_agent);
+ return;
+ }
+
+ /* Completion corresponds to first entry on posted MAD send list */
+ spin_lock_irqsave(&port_priv->send_list_lock, flags);
+ if (list_empty(&port_priv->send_posted_smp_list)) {
+ spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
+ printk(KERN_ERR "Send completion WR ID 0x%Lx but send list "
+ "is empty\n", mad_send_wc->wr_id);
+ return;
+ }
+
+ smi_send_wr = list_entry(&port_priv->send_posted_smp_list,
+ struct ib_smi_send_wr,
+ send_list);
+ send_wr = smi_send_wr->send_list.next;
+ smi_send_wr = container_of(send_wr, struct ib_smi_send_wr, send_list);
+
+ /* Remove from posted send SMP list */
+ list_del(&smi_send_wr->send_list);
+ spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
+
+ /* Unmap PCI */
+ pci_unmap_single(mad_agent->device->dma_device,
+ pci_unmap_addr(smi_send_wr->smp, mapping),
+ sizeof(struct ib_mad),
+ PCI_DMA_TODEVICE);
+
+ /* Release allocated memory */
+ kfree(smi_send_wr->smp);
+}
+
+static void smi_recv_handler(struct ib_mad_agent *mad_agent,
+ struct ib_mad_recv_wc *mad_recv_wc)
+{
+ smi_recv_smp(mad_agent,
+ (struct ib_smp *)mad_recv_wc->recv_buf->mad,
+ mad_recv_wc);
+
+ /* Free received MAD */
+ ib_free_recv_mad(mad_recv_wc);
+}
+
+static int ib_smi_port_open(struct ib_device *device, int port_num)
+{
+ int ret;
+ u64 iova = 0;
+ struct ib_phys_buf buf_list = {
+ .addr = 0,
+ .size = (unsigned long) high_memory - PAGE_OFFSET
+ };
+ struct ib_smi_port_private *entry, *port_priv = NULL;
+ struct ib_mad_reg_req reg_req;
+ unsigned long flags;
+
+ /* First, check if port already open for SMI */
+ spin_lock_irqsave(&ib_smi_port_list_lock, flags);
+ list_for_each_entry(entry, &ib_smi_port_list, port_list) {
+ if (entry->mad_agent->device == device && entry->port_num ==
port_num) {
+ port_priv = entry;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ib_smi_port_list_lock, flags);
+ if (port_priv) {
+ printk(KERN_DEBUG "%s port %d already open\n",
+ device->name, port_num);
+ return 0;
+ }
+
+ /* Create new device info */
+ port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL);
+ if (!port_priv) {
+ printk(KERN_ERR "No memory for ib_smi_port_private\n");
+ return -ENOMEM;
+ }
+
+ memset(port_priv, 0, sizeof *port_priv);
+ port_priv->port_num = port_num;
+ port_priv->wr_id = 0;
+ spin_lock_init(&port_priv->send_list_lock);
+ INIT_LIST_HEAD(&port_priv->send_posted_smp_list);
+
+ reg_req.mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
+ reg_req.mgmt_class_version = 1;
+ /* All methods for now even though only some are used BY SMA !!! */
+ bitmap_fill(®_req.method_mask, IB_MGMT_MAX_METHODS);
+
+ port_priv->mad_agent = ib_register_mad_agent(device, port_num,
+ IB_QPT_SMI,
+ ®_req, 0,
+ &smi_send_handler,
+ &smi_recv_handler,
+ NULL);
+ if (IS_ERR(port_priv->mad_agent)) {
+ port_priv->mad_agent = NULL;
+ ret = PTR_ERR(port_priv->mad_agent);
+ kfree(port_priv);
+ return ret;
+ }
+
+ port_priv->mr = ib_reg_phys_mr(port_priv->mad_agent->qp->pd,
+ &buf_list, 1,
+ IB_ACCESS_LOCAL_WRITE, &iova);
+ if (IS_ERR(port_priv->mr)) {
+ printk(KERN_ERR "Couldn't register MR\n");
+ ib_unregister_mad_agent(port_priv->mad_agent);
+ ret = PTR_ERR(port_priv->mr);
+ kfree(port_priv);
+ return ret;
+ }
+
+ spin_lock_irqsave(&ib_smi_port_list_lock, flags);
+ list_add_tail(&port_priv->port_list, &ib_smi_port_list);
+ spin_unlock_irqrestore(&ib_smi_port_list_lock, flags);
+
+ return 0;
+}
+
+static int ib_smi_port_close(struct ib_device *device, int port_num)
+{
+ struct ib_smi_port_private *entry, *port_priv = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ib_smi_port_list_lock, flags);
+ list_for_each_entry(entry, &ib_smi_port_list, port_list) {
+ if (entry->mad_agent->device == device && entry->port_num ==
port_num) {
+ port_priv = entry;
+ break;
+ }
+ }
+
+ if (port_priv == NULL) {
+ printk(KERN_ERR "Port %d not found\n", port_num);
+ spin_unlock_irqrestore(&ib_smi_port_list_lock, flags);
+ return -ENODEV;
+ }
+
+ list_del(&port_priv->port_list);
+ spin_unlock_irqrestore(&ib_smi_port_list_lock, flags);
+
+ ib_dereg_mr(port_priv->mr);
+ kfree(port_priv);
+
+ return 0;
+}
+
+static void ib_smi_init_device(struct ib_device *device)
+{
+ int ret, num_ports, cur_port, i, ret2;
+ struct ib_device_attr device_attr;
+
+ ret = ib_query_device(device, &device_attr);
+ if (ret) {
+ printk(KERN_ERR "Couldn't query device %s\n", device->name);
+ goto error_device_query;
+ }
+
+ if (device->node_type == IB_NODE_SWITCH) {
+ num_ports = 1;
+ cur_port = 0;
+ } else {
+ num_ports = device_attr.phys_port_cnt;
+ cur_port = 1;
+ }
+
+ for (i = 0; i < num_ports; i++, cur_port++) {
+ ret = ib_smi_port_open(device, cur_port);
+ if (ret) {
+ printk(KERN_ERR "Couldn't open %s port %d\n",
+ device->name, cur_port);
+ goto error_device_open;
+ }
+ }
+
+ goto error_device_query;
+
+error_device_open:
+ while (i > 0) {
+ cur_port--;
+ ret2 = ib_smi_port_close(device, cur_port);
+ if (ret2) {
+ printk(KERN_ERR "Couldn't close %s port %d\n",
+ device->name, cur_port);
+ }
+ i--;
+ }
+
+error_device_query:
+ return;
+}
+
+static void ib_smi_remove_device(struct ib_device *device)
+{
+ int ret, i, num_ports, cur_port, ret2;
+ struct ib_device_attr device_attr;
+
+ ret = ib_query_device(device, &device_attr);
+ if (ret) {
+ printk(KERN_ERR "Couldn't query device %s\n", device->name);
+ goto error_device_query;
+ }
+
+ if (device->node_type == IB_NODE_SWITCH) {
+ num_ports = 1;
+ cur_port = 0;
+ } else {
+ num_ports = device_attr.phys_port_cnt;
+ cur_port = 1;
+ }
+ for (i = 0; i < num_ports; i++, cur_port++) {
+ ret2 = ib_smi_port_close(device, cur_port);
+ if (ret2) {
+ printk(KERN_ERR "Couldn't close %s port %d\n",
+ device->name, cur_port);
+ if (!ret)
+ ret = ret2;
+ }
+ }
+
+error_device_query:
+ return;
+}
+
+static struct ib_client ib_smi_client = {
+ .name = "ib_smi",
+ .add = ib_smi_init_device,
+ .remove = ib_smi_remove_device
+};
+
+static int __init ib_smi_init(void)
+{
+ INIT_LIST_HEAD(&ib_smi_port_list);
+ if (ib_register_client(&ib_smi_client)) {
+ printk(KERN_ERR "Couldn't register ib_smi client\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void __exit ib_smi_exit(void)
+{
+ ib_unregister_client(&ib_smi_client);
+}
+
+module_init(ib_smi_init);
+module_exit(ib_smi_exit);
Index: ib_smi_priv.h
===================================================================
--- ib_smi_priv.h (revision 930)
+++ ib_smi_priv.h (working copy)
@@ -23,8 +23,15 @@
Copyright (c) 2004 Voltaire Corporation. All rights reserved.
*/
+struct ib_smi_send_wr {
+ struct list_head send_list;
+ struct ib_mad *smp;
+};
+
struct ib_smi_port_private {
struct list_head port_list;
+ struct list_head send_posted_smp_list;
+ spinlock_t send_list_lock;
int port_num;
struct ib_mad_agent *mad_agent;
struct ib_mr *mr;
Index: Makefile
===================================================================
--- Makefile (revision 923)
+++ Makefile (working copy)
@@ -1,9 +1,11 @@
EXTRA_CFLAGS += -I. -Idrivers/infiniband/include
obj-$(CONFIG_INFINIBAND_ACCESS_LAYER) += \
- ib_al.o
+ ib_al.o \
+ ib_sma.o
ib_al-objs := \
- ib_mad.o \
+ ib_mad.o
+
+ib_sma-objs := \
ib_smi.o
-
More information about the general
mailing list