[openib-general] [PATCH] ib_smi: Changes to make ib_sma into ib_agt module since this supports PMA as well as SMA currently
Hal Rosenstock
halr at voltaire.com
Sun Oct 24 11:45:40 PDT 2004
ib_smi: Changes to make ib_sma into ib_agt module since this supports
PMA as well as SMA currently
Index: ib_smi.c
===================================================================
--- ib_smi.c (revision 1037)
+++ ib_smi.c (working copy)
@@ -1,865 +0,0 @@
-/*
- This software is available to you under a choice of one of two
- licenses. You may choose to be licensed under the terms of the GNU
- General Public License (GPL) Version 2, available at
- <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD
- license, available in the LICENSE.TXT file accompanying this
- software. These details are also available at
- <http://openib.org/license.html>.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- SOFTWARE.
-
- Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
- Copyright (c) 2004 Infinicon Corporation. All rights reserved.
- Copyright (c) 2004 Intel Corporation. All rights reserved.
- Copyright (c) 2004 Topspin Corporation. All rights reserved.
- Copyright (c) 2004 Voltaire Corporation. All rights reserved.
-*/
-
-#include <ib_smi.h>
-#include "ib_smi_priv.h"
-#include "ib_mad_priv.h"
-
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("kernel IB agents (SMA and PMA)");
-MODULE_AUTHOR("Sean Hefty");
-MODULE_AUTHOR("Hal Rosenstock");
-
-
-static spinlock_t ib_agent_port_list_lock = SPIN_LOCK_UNLOCKED;
-static struct list_head ib_agent_port_list;
-
-/*
- * Fixup a directed route SMP for sending. Return 0 if the SMP should
be
- * discarded.
- */
-static int smi_handle_dr_smp_send(struct ib_smp *smp,
- u8 node_type,
- int port_num)
-{
- u8 hop_ptr, hop_cnt;
-
- hop_ptr = smp->hop_ptr;
- hop_cnt = smp->hop_cnt;
-
- /* See section 14.2.2.2, Vol 1 IB spec */
- if (!ib_get_smp_direction(smp)) {
- /* C14-9:1 */
- if (hop_cnt && hop_ptr == 0) {
- smp->hop_ptr++;
- return (smp->initial_path[smp->hop_ptr] ==
- port_num);
- }
-
- /* C14-9:2 */
- if (hop_ptr && hop_ptr < hop_cnt) {
- if (node_type != IB_NODE_SWITCH)
- return 0;
-
- /* smp->return_path set when received */
- smp->hop_ptr++;
- return (smp->initial_path[smp->hop_ptr] ==
- port_num);
- }
-
- /* C14-9:3 -- We're at the end of the DR segment of path */
- if (hop_ptr == hop_cnt) {
- /* smp->return_path set when received */
- smp->hop_ptr++;
- return (node_type == IB_NODE_SWITCH ||
- smp->dr_dlid == IB_LID_PERMISSIVE);
- }
-
- /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM. */
- /* C14-9:5 -- Fail unreasonable hop pointer. */
- return (hop_ptr == hop_cnt + 1);
-
- } else {
- /* C14-13:1 */
- if (hop_cnt && hop_ptr == hop_cnt + 1) {
- smp->hop_ptr--;
- return (smp->return_path[smp->hop_ptr] ==
- port_num);
- }
-
- /* C14-13:2 */
- if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
- if (node_type != IB_NODE_SWITCH)
- return 0;
-
- smp->hop_ptr--;
- return (smp->return_path[smp->hop_ptr] ==
- port_num);
- }
-
- /* C14-13:3 -- at the end of the DR segment of path */
- if (hop_ptr == 1) {
- smp->hop_ptr--;
- /* C14-13:3 -- SMPs destined for SM shouldn't be here */
- return (node_type == IB_NODE_SWITCH ||
- smp->dr_slid == IB_LID_PERMISSIVE);
- }
-
- /* C14-13:4 -- hop_ptr = 0 -> should have gone to SM. */
- /* C14-13:5 -- Check for unreasonable hop pointer. */
- return 0;
- }
-}
-
-/*
- * Sender side handling of outgoing SMPs. Fixup the SMP as required by
- * the spec. Return 0 if the SMP should be dropped.
- */
-static int smi_handle_smp_send(struct ib_smp *smp,
- u8 node_type,
- int port_num)
-{
- switch (smp->mgmt_class)
- {
- case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
- return smi_handle_dr_smp_send(smp, node_type, port_num);
- default: /* LR SM class */
- return 1;
- }
-}
-
-/*
- * Return 1 if the SMP should be handled by the local SMA via
process_mad.
- */
-static inline int smi_check_local_smp(struct ib_mad_agent *mad_agent,
- struct ib_smp *smp)
-{
- /* C14-9:3 -- We're at the end of the DR segment of path */
- /* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM. */
- return ((smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) ||
- (mad_agent->device->process_mad &&
- !ib_get_smp_direction(smp) &&
- (smp->hop_ptr == smp->hop_cnt + 1)));
-}
-
-/*
- * Adjust information for a received SMP. Return 0 if the SMP should
be
- * dropped.
- */
-static int smi_handle_dr_smp_recv(struct ib_smp *smp,
- u8 node_type,
- int port_num,
- int phys_port_cnt)
-{
- u8 hop_ptr, hop_cnt;
-
- hop_ptr = smp->hop_ptr;
- hop_cnt = smp->hop_cnt;
-
- /* See section 14.2.2.2, Vol 1 IB spec */
- if (!ib_get_smp_direction(smp)) {
- /* C14-9:1 -- sender should have incremented hop_ptr */
- if (hop_cnt && hop_ptr == 0)
- return 0;
-
- /* C14-9:2 -- intermediate hop */
- if (hop_ptr && hop_ptr < hop_cnt) {
- if (node_type != IB_NODE_SWITCH)
- return 0;
-
- smp->return_path[hop_ptr] = port_num;
- /* smp->hop_ptr updated when sending */
- return (smp->initial_path[hop_ptr+1] <= phys_port_cnt);
- }
-
- /* C14-9:3 -- We're at the end of the DR segment of path */
- if (hop_ptr == hop_cnt) {
- if (hop_cnt)
- smp->return_path[hop_ptr] = port_num;
- /* smp->hop_ptr updated when sending */
-
- return (node_type == IB_NODE_SWITCH ||
- smp->dr_dlid == IB_LID_PERMISSIVE);
- }
-
- /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM. */
- /* C14-9:5 -- fail unreasonable hop pointer. */
- return (hop_ptr == hop_cnt + 1);
-
- } else {
-
- /* C14-13:1 */
- if (hop_cnt && hop_ptr == hop_cnt + 1) {
- smp->hop_ptr--;
- return (smp->return_path[smp->hop_ptr] ==
- port_num);
- }
-
- /* C14-13:2 */
- if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
- if (node_type != IB_NODE_SWITCH)
- return 0;
-
- /* smp->hop_ptr updated when sending */
- return (smp->return_path[hop_ptr-1] <= phys_port_cnt);
- }
-
- /* C14-13:3 -- We're at the end of the DR segment of path */
- if (hop_ptr == 1) {
- if (smp->dr_slid == IB_LID_PERMISSIVE) {
- /* giving SMP to SM - update hop_ptr */
- smp->hop_ptr--;
- return 1;
- }
- /* smp->hop_ptr updated when sending */
- return (node_type == IB_NODE_SWITCH);
- }
-
- /* C14-13:4 -- hop_ptr = 0 -> give to SM. */
- /* C14-13:5 -- Check for unreasonable hop pointer. */
- return (hop_ptr == 0);
- }
-}
-
-/*
- * Receive side handling SMPs. Save receive information as required by
- * the spec. Return 0 if the SMP should be dropped.
- */
-static int smi_handle_smp_recv(struct ib_smp *smp,
- u8 node_type,
- int port_num,
- int phys_port_cnt)
-{
- switch (smp->mgmt_class)
- {
- case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
- return smi_handle_dr_smp_recv(smp, node_type,
- port_num, phys_port_cnt);
- default: /* LR SM class */
- return 1;
- }
-}
-
-/*
- * Return 1 if the received DR SMP should be forwarded to the send
queue.
- * Return 0 if the SMP should be completed up the stack.
- */
-static int smi_check_forward_dr_smp(struct ib_smp *smp)
-{
- u8 hop_ptr, hop_cnt;
-
- hop_ptr = smp->hop_ptr;
- hop_cnt = smp->hop_cnt;
-
- if (!ib_get_smp_direction(smp)) {
- /* C14-9:2 -- intermediate hop */
- if (hop_ptr && hop_ptr < hop_cnt)
- return 1;
-
- /* C14-9:3 -- at the end of the DR segment of path */
- if (hop_ptr == hop_cnt)
- return (smp->dr_dlid == IB_LID_PERMISSIVE);
-
- /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM. */
- if (hop_ptr == hop_cnt + 1)
- return 1;
- } else {
- /* C14-13:2 */
- if (2 <= hop_ptr && hop_ptr <= hop_cnt)
- return 1;
-
- /* C14-13:3 -- at the end of the DR segment of path */
- if (hop_ptr == 1)
- return (smp->dr_slid != IB_LID_PERMISSIVE);
- }
- return 0;
-}
-
-/*
- * Return 1 if the received SMP should be forwarded to the send queue.
- * Return 0 if the SMP should be completed up the stack.
- */
-static int smi_check_forward_smp(struct ib_smp *smp)
-{
- switch (smp->mgmt_class)
- {
- case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
- return smi_check_forward_dr_smp(smp);
- default: /* LR SM class */
- return 1;
- }
-}
-
-static int mad_process_local(struct ib_mad_agent *mad_agent,
- struct ib_mad *mad,
- struct ib_mad *mad_response,
- u16 slid)
-{
- return mad_agent->device->process_mad(mad_agent->device, 0,
- mad_agent->port_num,
- slid, mad, mad_response);
-}
-
-void agent_mad_send(struct ib_mad_agent *mad_agent,
- struct ib_mad *mad,
- struct ib_grh *grh,
- struct ib_mad_recv_wc *mad_recv_wc)
-{
- struct ib_agent_port_private *entry, *port_priv = NULL;
- struct ib_agent_send_wr *agent_send_wr;
- struct ib_sge gather_list;
- struct ib_send_wr send_wr;
- struct ib_send_wr *bad_send_wr;
- struct ib_ah_attr ah_attr;
- struct ib_ah *ah;
- unsigned long flags;
-
- /* Find matching MAD agent */
- spin_lock_irqsave(&ib_agent_port_list_lock, flags);
- list_for_each_entry(entry, &ib_agent_port_list, port_list) {
- if ((entry->dr_smp_agent == mad_agent) ||
- (entry->lr_smp_agent == mad_agent) ||
- (entry->perf_mgmt_agent == mad_agent)) {
- port_priv = entry;
- break;
- }
- }
- spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
- if (!port_priv) {
- printk(KERN_ERR SPFX "agent_mad_send: no matching MAD agent 0x%x\n",
- (unsigned int)mad_agent);
- return;
- }
-
- agent_send_wr = kmalloc(sizeof(*agent_send_wr), GFP_KERNEL);
- if (!agent_send_wr)
- return;
- agent_send_wr->mad = mad;
-
- /* PCI mapping */
- gather_list.addr = pci_map_single(mad_agent->device->dma_device,
- mad,
- sizeof(struct ib_mad),
- PCI_DMA_TODEVICE);
- gather_list.length = sizeof(struct ib_mad);
- gather_list.lkey = (*port_priv->mr).lkey;
-
- send_wr.next = NULL;
- send_wr.opcode = IB_WR_SEND;
- send_wr.sg_list = &gather_list;
- send_wr.num_sge = 1;
- send_wr.wr.ud.remote_qpn = mad_recv_wc->wc->src_qp; /* DQPN */
- send_wr.wr.ud.timeout_ms = 0;
- send_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
-
- ah_attr.dlid = mad_recv_wc->wc->slid;
- ah_attr.port_num = mad_agent->port_num;
- ah_attr.src_path_bits = mad_recv_wc->wc->dlid_path_bits;
- ah_attr.sl = mad_recv_wc->wc->sl;
- ah_attr.static_rate = 0;
- if (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
- if (mad_recv_wc->wc->wc_flags & IB_WC_GRH) {
- ah_attr.ah_flags = IB_AH_GRH;
- ah_attr.grh.sgid_index = 0; /* Should sgid be looked up
-? */
- ah_attr.grh.hop_limit = grh->hop_limit;
- ah_attr.grh.flow_label = be32_to_cpup(&grh->version_tclass_flow) &
0xfffff;
- ah_attr.grh.traffic_class = (be32_to_cpup(&grh->version_tclass_flow)
>> 20) & 0xff;
- memcpy(ah_attr.grh.dgid.raw, grh->sgid.raw, sizeof(struct ib_grh));
- } else {
- ah_attr.ah_flags = 0; /* No GRH */
- }
- } else {
- /* Directed route or LID routed SM class */
- ah_attr.ah_flags = 0; /* No GRH */
- }
-
- ah = ib_create_ah(mad_agent->qp->pd, &ah_attr);
- if (IS_ERR(ah)) {
- printk(KERN_ERR SPFX "No memory for address handle\n");
- kfree(mad);
- return;
- }
-
- send_wr.wr.ud.ah = ah;
- if (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
- send_wr.wr.ud.pkey_index = mad_recv_wc->wc->pkey_index;
- send_wr.wr.ud.remote_qkey = IB_QP1_QKEY;
- } else {
- send_wr.wr.ud.pkey_index = 0; /* Should only matter for GMPs */
- send_wr.wr.ud.remote_qkey = 0; /* for SMPs */
- }
- send_wr.wr.ud.mad_hdr = (struct ib_mad_hdr *)mad;
- send_wr.wr_id = ++port_priv->wr_id;
-
- pci_unmap_addr_set(mad, agent_send_wr->mapping, gather_list.addr);
-
- /* Send */
- spin_lock_irqsave(&port_priv->send_list_lock, flags);
- if (ib_post_send_mad(mad_agent, &send_wr, &bad_send_wr)) {
- pci_unmap_single(mad_agent->device->dma_device,
- pci_unmap_addr(mad, agent_send_wr->mapping),
- sizeof(struct ib_mad),
- PCI_DMA_TODEVICE);
- } else {
- list_add_tail(&agent_send_wr->send_list,
- &port_priv->send_posted_list);
- }
- spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
- ib_destroy_ah(ah);
-}
-
-int smi_send_smp(struct ib_mad_agent *mad_agent,
- struct ib_smp *smp,
- struct ib_mad_recv_wc *mad_recv_wc,
- u16 slid,
- int phys_port_cnt)
-{
- struct ib_mad *smp_response;
- int ret;
-
- if (!smi_handle_smp_send(smp, mad_agent->device->node_type,
- mad_agent->port_num)) {
- /* SMI failed send */
- return 0;
- }
-
- if (smi_check_local_smp(mad_agent, smp)) {
- smp_response = kmalloc(sizeof(struct ib_mad), GFP_KERNEL);
- if (!smp_response)
- return 0;
-
- ret = mad_process_local(mad_agent, (struct ib_mad *)smp,
- smp_response, slid);
- if (ret & IB_MAD_RESULT_SUCCESS) {
- if (!smi_handle_smp_recv((struct ib_smp *)smp_response,
- mad_agent->device->node_type,
- mad_agent->port_num,
- phys_port_cnt)) {
- /* SMI failed receive */
- kfree(smp_response);
- return 0;
- }
- agent_mad_send(mad_agent, smp_response,
- NULL, mad_recv_wc);
- } else
- kfree(smp_response);
- return 1;
- }
-
- /* Post the send on the QP */
- return 1;
-}
-
-int agent_mad_response(struct ib_mad_agent *mad_agent,
- struct ib_mad *mad,
- struct ib_mad_recv_wc *mad_recv_wc,
- u16 slid)
-{
- struct ib_mad *response;
- struct ib_grh *grh;
- int ret;
-
- response = kmalloc(sizeof(struct ib_mad), GFP_KERNEL);
- if (!response)
- return 0;
-
- ret = mad_process_local(mad_agent, mad, response, slid);
- if (ret & IB_MAD_RESULT_SUCCESS) {
- grh = (void *)mad - sizeof(struct ib_grh);
- agent_mad_send(mad_agent, response, grh, mad_recv_wc);
- } else
- kfree(response);
- return 1;
-}
-
-int agent_recv_mad(struct ib_mad_agent *mad_agent,
- struct ib_mad *mad,
- struct ib_mad_recv_wc *mad_recv_wc,
- int phys_port_cnt)
-{
- int port_num;
-
- /* SM Directed Route or LID Routed class */
- if (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ||
- mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) {
- if (mad_agent->device->node_type != IB_NODE_SWITCH)
- port_num = mad_agent->port_num;
- else
- port_num = mad_recv_wc->wc->port_num;
- if (!smi_handle_smp_recv((struct ib_smp *)mad,
- mad_agent->device->node_type,
- port_num, phys_port_cnt)) {
- /* SMI failed receive */
- return 0;
- }
-
- if (smi_check_forward_smp((struct ib_smp *)mad)) {
- smi_send_smp(mad_agent,
- (struct ib_smp *)mad,
- mad_recv_wc,
- mad_recv_wc->wc->slid,
- phys_port_cnt);
- return 0;
- }
-
- } else {
- /* PerfMgmt class */
- if (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
- agent_mad_response(mad_agent, mad, mad_recv_wc,
- mad_recv_wc->wc->slid);
- } else {
- printk(KERN_ERR "agent_recv_mad: Unexpected mgmt class 0x%x
received\n", mad->mad_hdr.mgmt_class);
- }
- return 0;
- }
-
- /* Complete receive up stack */
- return 1;
-}
-
-static void agent_send_handler(struct ib_mad_agent *mad_agent,
- struct ib_mad_send_wc *mad_send_wc)
-{
- struct ib_agent_port_private *entry, *port_priv = NULL;
- struct ib_agent_send_wr *agent_send_wr;
- struct list_head *send_wr;
- unsigned long flags;
-
- /* Find matching MAD agent */
- spin_lock_irqsave(&ib_agent_port_list_lock, flags);
- list_for_each_entry(entry, &ib_agent_port_list, port_list) {
- if ((entry->dr_smp_agent == mad_agent) ||
- (entry->lr_smp_agent == mad_agent) ||
- (entry->perf_mgmt_agent == mad_agent)) {
- port_priv = entry;
- break;
- }
- }
- spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
- if (!port_priv) {
- printk(KERN_ERR SPFX "agent_send_handler: no matching MAD agent "
- "0x%x\n", (unsigned int)mad_agent);
- return;
- }
-
- /* Completion corresponds to first entry on posted MAD send list */
- spin_lock_irqsave(&port_priv->send_list_lock, flags);
- if (list_empty(&port_priv->send_posted_list)) {
- spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
- printk(KERN_ERR SPFX "Send completion WR ID 0x%Lx but send list "
- "is empty\n", mad_send_wc->wr_id);
- return;
- }
-
- agent_send_wr = list_entry(&port_priv->send_posted_list,
- struct ib_agent_send_wr,
- send_list);
- send_wr = agent_send_wr->send_list.next;
- agent_send_wr = container_of(send_wr, struct ib_agent_send_wr,
- send_list);
-
- /* Remove from posted send SMP list */
- list_del(&agent_send_wr->send_list);
- spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
-
- /* Unmap PCI */
- pci_unmap_single(mad_agent->device->dma_device,
- pci_unmap_addr(agent_send_wr->smp,
- agent_send_wr->mapping),
- sizeof(struct ib_mad),
- PCI_DMA_TODEVICE);
-
- /* Release allocated memory */
- kfree(agent_send_wr->mad);
-}
-
-static void agent_recv_handler(struct ib_mad_agent *mad_agent,
- struct ib_mad_recv_wc *mad_recv_wc)
-{
- struct ib_agent_port_private *entry, *port_priv = NULL;
- unsigned long flags;
-
- /* Find matching MAD agent */
- spin_lock_irqsave(&ib_agent_port_list_lock, flags);
- list_for_each_entry(entry, &ib_agent_port_list, port_list) {
- if ((entry->dr_smp_agent == mad_agent) ||
- (entry->lr_smp_agent == mad_agent) ||
- (entry->perf_mgmt_agent == mad_agent)) {
- port_priv = entry;
- break;
- }
- }
- spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
- if (!port_priv) {
- printk(KERN_ERR SPFX "agent_recv_handler: no matching MAD agent
0x%x\n",
- (unsigned int)mad_agent);
-
- } else {
- agent_recv_mad(mad_agent,
- mad_recv_wc->recv_buf->mad,
- mad_recv_wc, port_priv->phys_port_cnt);
- }
-
- /* Free received MAD */
- ib_free_recv_mad(mad_recv_wc);
-}
-
-static int ib_agent_port_open(struct ib_device *device, int port_num,
- int phys_port_cnt)
-{
- int ret;
- u64 iova = 0;
- struct ib_phys_buf buf_list = {
- .addr = 0,
- .size = (unsigned long) high_memory - PAGE_OFFSET
- };
- struct ib_agent_port_private *entry, *port_priv = NULL;
- struct ib_mad_reg_req reg_req;
- unsigned long flags;
-
- /* First, check if port already open for SMI */
- spin_lock_irqsave(&ib_agent_port_list_lock, flags);
- list_for_each_entry(entry, &ib_agent_port_list, port_list) {
- if (entry->dr_smp_agent->device == device &&
- entry->port_num == port_num) {
- port_priv = entry;
- break;
- }
- }
- spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
- if (port_priv) {
- printk(KERN_DEBUG SPFX "%s port %d already open\n",
- device->name, port_num);
- return 0;
- }
-
- /* Create new device info */
- port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL);
- if (!port_priv) {
- printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n");
- ret = -ENOMEM;
- goto error1;
- }
-
- memset(port_priv, 0, sizeof *port_priv);
- port_priv->port_num = port_num;
- port_priv->phys_port_cnt = phys_port_cnt;
- port_priv->wr_id = 0;
- spin_lock_init(&port_priv->send_list_lock);
- INIT_LIST_HEAD(&port_priv->send_posted_list);
-
- /* Obtain MAD agent for directed route SM class */
- reg_req.mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
- reg_req.mgmt_class_version = 1;
-
- /* SMA needs to receive Get, Set, and TrapRepress methods */
- bitmap_zero((unsigned long *)®_req.method_mask,
IB_MGMT_MAX_METHODS);
- set_bit(IB_MGMT_METHOD_GET, (unsigned long *)®_req.method_mask);
- set_bit(IB_MGMT_METHOD_SET, (unsigned long *)®_req.method_mask);
- set_bit(IB_MGMT_METHOD_TRAP_REPRESS,
- (unsigned long *)®_req.method_mask);
-
- port_priv->dr_smp_agent = ib_register_mad_agent(device, port_num,
- IB_QPT_SMI,
- ®_req, 0,
- &agent_send_handler,
- &agent_recv_handler,
- NULL);
- if (IS_ERR(port_priv->dr_smp_agent)) {
- ret = PTR_ERR(port_priv->dr_smp_agent);
- goto error2;
- }
-
- /* Obtain MAD agent for LID routed SM class */
- reg_req.mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- port_priv->lr_smp_agent = ib_register_mad_agent(device, port_num,
- IB_QPT_SMI,
- ®_req, 0,
- &agent_send_handler,
- &agent_recv_handler,
- NULL);
- if (IS_ERR(port_priv->lr_smp_agent)) {
- ret = PTR_ERR(port_priv->lr_smp_agent);
- goto error3;
- }
-
- /* Obtain MAD agent for PerfMgmt class */
- reg_req.mgmt_class = IB_MGMT_CLASS_PERF_MGMT;
- clear_bit(IB_MGMT_METHOD_TRAP_REPRESS,
- (unsigned long *)®_req.method_mask);
- port_priv->perf_mgmt_agent = ib_register_mad_agent(device,
port_num,
- IB_QPT_GSI,
- ®_req, 0,
- &agent_send_handler,
- &agent_recv_handler,
- NULL);
- if (IS_ERR(port_priv->perf_mgmt_agent)) {
- ret = PTR_ERR(port_priv->perf_mgmt_agent);
- goto error4;
- }
-
- port_priv->mr = ib_reg_phys_mr(port_priv->dr_smp_agent->qp->pd,
- &buf_list, 1,
- IB_ACCESS_LOCAL_WRITE, &iova);
- if (IS_ERR(port_priv->mr)) {
- printk(KERN_ERR SPFX "Couldn't register MR\n");
- ret = PTR_ERR(port_priv->mr);
- goto error5;
- }
-
- spin_lock_irqsave(&ib_agent_port_list_lock, flags);
- list_add_tail(&port_priv->port_list, &ib_agent_port_list);
- spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
-
- return 0;
-
-error5:
- ib_unregister_mad_agent(port_priv->perf_mgmt_agent);
-error4:
- ib_unregister_mad_agent(port_priv->lr_smp_agent);
-error3:
- ib_unregister_mad_agent(port_priv->dr_smp_agent);
-error2:
- kfree(port_priv);
-error1:
- return ret;
-}
-
-static int ib_agent_port_close(struct ib_device *device, int port_num)
-{
- struct ib_agent_port_private *entry, *port_priv = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&ib_agent_port_list_lock, flags);
- list_for_each_entry(entry, &ib_agent_port_list, port_list) {
- if (entry->dr_smp_agent->device == device &&
- entry->port_num == port_num) {
- port_priv = entry;
- break;
- }
- }
-
- if (port_priv == NULL) {
- printk(KERN_ERR SPFX "Port %d not found\n", port_num);
- spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
- return -ENODEV;
- }
-
- list_del(&port_priv->port_list);
- spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
-
- ib_dereg_mr(port_priv->mr);
-
- ib_unregister_mad_agent(port_priv->perf_mgmt_agent);
- ib_unregister_mad_agent(port_priv->lr_smp_agent);
- ib_unregister_mad_agent(port_priv->dr_smp_agent);
- kfree(port_priv);
-
- return 0;
-}
-
-static void ib_agent_init_device(struct ib_device *device)
-{
- int ret, num_ports, cur_port, i, ret2;
- struct ib_device_attr device_attr;
-
- ret = ib_query_device(device, &device_attr);
- if (ret) {
- printk(KERN_ERR SPFX "Couldn't query device %s\n", device->name);
- goto error_device_query;
- }
-
- if (device->node_type == IB_NODE_SWITCH) {
- num_ports = 1;
- cur_port = 0;
- } else {
- num_ports = device_attr.phys_port_cnt;
- cur_port = 1;
- }
-
- for (i = 0; i < num_ports; i++, cur_port++) {
- ret = ib_agent_port_open(device, cur_port, num_ports);
- if (ret) {
- printk(KERN_ERR SPFX "Couldn't open %s port %d\n",
- device->name, cur_port);
- goto error_device_open;
- }
- }
-
- goto error_device_query;
-
-error_device_open:
- while (i > 0) {
- cur_port--;
- ret2 = ib_agent_port_close(device, cur_port);
- if (ret2) {
- printk(KERN_ERR SPFX "Couldn't close %s port %d\n",
- device->name, cur_port);
- }
- i--;
- }
-
-error_device_query:
- return;
-}
-
-static void ib_agent_remove_device(struct ib_device *device)
-{
- int ret, i, num_ports, cur_port, ret2;
- struct ib_device_attr device_attr;
-
- ret = ib_query_device(device, &device_attr);
- if (ret) {
- printk(KERN_ERR SPFX "Couldn't query device %s\n", device->name);
- goto error_device_query;
- }
-
- if (device->node_type == IB_NODE_SWITCH) {
- num_ports = 1;
- cur_port = 0;
- } else {
- num_ports = device_attr.phys_port_cnt;
- cur_port = 1;
- }
- for (i = 0; i < num_ports; i++, cur_port++) {
- ret2 = ib_agent_port_close(device, cur_port);
- if (ret2) {
- printk(KERN_ERR SPFX "Couldn't close %s port %d\n",
- device->name, cur_port);
- if (!ret)
- ret = ret2;
- }
- }
-
-error_device_query:
- return;
-}
-
-static struct ib_client ib_agent_client = {
- .name = "ib_agent",
- .add = ib_agent_init_device,
- .remove = ib_agent_remove_device
-};
-
-static int __init ib_agent_init(void)
-{
- INIT_LIST_HEAD(&ib_agent_port_list);
- if (ib_register_client(&ib_agent_client)) {
- printk(KERN_ERR SPFX "Couldn't register ib_agent
client\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void __exit ib_agent_exit(void)
-{
- ib_unregister_client(&ib_agent_client);
-}
-
-module_init(ib_agent_init);
-module_exit(ib_agent_exit);
Index: ib_smi_priv.h
===================================================================
--- ib_smi_priv.h (revision 1037)
+++ ib_smi_priv.h (working copy)
@@ -1,52 +0,0 @@
-/*
- This software is available to you under a choice of one of two
- licenses. You may choose to be licensed under the terms of the GNU
- General Public License (GPL) Version 2, available at
- <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD
- license, available in the LICENSE.TXT file accompanying this
- software. These details are also available at
- <http://openib.org/license.html>.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- SOFTWARE.
-
- Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
- Copyright (c) 2004 Infinicon Corporation. All rights reserved.
- Copyright (c) 2004 Intel Corporation. All rights reserved.
- Copyright (c) 2004 Topspin Corporation. All rights reserved.
- Copyright (c) 2004 Voltaire Corporation. All rights reserved.
-*/
-
-#ifndef __IB_AGENT_PRIV_H__
-#define __IB_AGENT_PRIV_H__
-
-#include <linux/pci.h>
-
-#define SPFX "ib_agent: "
-
-struct ib_agent_send_wr {
- struct list_head send_list;
- struct ib_mad *mad;
- DECLARE_PCI_UNMAP_ADDR(mapping)
-};
-
-struct ib_agent_port_private {
- struct list_head port_list;
- struct list_head send_posted_list;
- spinlock_t send_list_lock;
- int port_num;
- int phys_port_cnt;
- struct ib_mad_agent *dr_smp_agent; /* DR SM class */
- struct ib_mad_agent *lr_smp_agent; /* LR SM class */
- struct ib_mad_agent *perf_mgmt_agent; /* PerfMgmt class */
- struct ib_mr *mr;
- u64 wr_id;
-};
-
-#endif /* __IB_AGENT_PRIV_H__ */
Index: ib_agent.c
===================================================================
--- ib_agent.c (revision 1037)
+++ ib_agent.c (working copy)
@@ -24,7 +24,7 @@
*/
#include <ib_smi.h>
-#include "ib_smi_priv.h"
+#include "ib_agent_priv.h"
#include "ib_mad_priv.h"
Index: Makefile
===================================================================
--- Makefile (revision 1037)
+++ Makefile (working copy)
@@ -2,10 +2,10 @@
obj-$(CONFIG_INFINIBAND_ACCESS_LAYER) += \
ib_al.o \
- ib_sma.o
+ ib_agt.o
ib_al-objs := \
ib_mad.o
-ib_sma-objs := \
- ib_smi.o
+ib_agt-objs := \
+ ib_agent.o
Index: README
===================================================================
--- README (revision 1037)
+++ README (working copy)
@@ -43,6 +43,6 @@
6. You are now ready to run the new access layer as follows:
/sbin/modprobe ib_mthca
/sbin/modprobe ib_al (This can be skipped)
- /sbin/modprobe ib_sma
+ /sbin/modprobe ib_agt
Note that starting ib_al does not cause ib_mthca to be started.
More information about the general
mailing list