[openib-general] [PATCH] OpenSM: Add multicast destination handling into SA PathRecord support
Hal Rosenstock
halr at voltaire.com
Mon Feb 21 06:15:40 PST 2005
OpenSM: Add multicast destination handling into SA PathRecord support
Index: osm_sa_path_record.c
===================================================================
--- osm_sa_path_record.c (revision 1842)
+++ osm_sa_path_record.c (working copy)
@@ -69,6 +69,7 @@
#include <vendor/osm_vendor_api.h>
#include <opensm/osm_helper.h>
#include <opensm/osm_pkey.h>
+#include <opensm/osm_multicast.h>
#define OSM_PR_RCV_POOL_MIN_SIZE 64
#define OSM_PR_RCV_POOL_GROW_SIZE 64
@@ -90,6 +91,12 @@
boolean_t reversible;
} osm_path_parms_t;
+typedef struct osm_sa_pr_mcmr_search_ctxt {
+ ib_gid_t *p_mgid;
+ osm_mgrp_t *p_mgrp;
+ osm_pr_rcv_t *p_rcv;
+} osm_sa_pr_mcmr_search_ctxt_t;
+
/**********************************************************************
**********************************************************************/
void
@@ -1153,8 +1160,277 @@
}
/**********************************************************************
+ *********************************************************************/
+static
+void
+__search_mgrp_by_mgid(
+ IN cl_map_item_t* const p_map_item,
+ IN void* context )
+{
+ osm_mgrp_t* p_mgrp = (osm_mgrp_t*)p_map_item;
+ osm_sa_pr_mcmr_search_ctxt_t *p_ctxt = (osm_sa_pr_mcmr_search_ctxt_t *) context;
+ const ib_gid_t *p_recvd_mgid;
+ osm_pr_rcv_t *p_rcv;
+ /* uint32_t i; */
+
+ p_recvd_mgid = p_ctxt->p_mgid;
+ p_rcv = p_ctxt->p_rcv;
+
+ /* Why not compare the entire MGID ???? */
+ /* different scope can sneak in for the same MGID ? */
+ /* EZ: I changed it to full compare ! */
+ if (cl_memcmp(&p_mgrp->mcmember_rec.mgid,
+ p_recvd_mgid,
+ sizeof(ib_gid_t)))
+ return;
+
+#if 0
+ for ( i = 0 ; i < sizeof(p_mgrp->mcmember_rec.mgid.multicast.raw_group_id); i++)
+ {
+ if (p_mgrp->mcmember_rec.mgid.multicast.raw_group_id[i] !=
+ p_recvd_mgid->mgid.multicast.raw_group_id[i])
+ return;
+ }
+#endif
+
+ if(p_ctxt->p_mgrp)
+ {
+ osm_log( p_rcv->p_log, OSM_LOG_ERROR,
+ "__search_mgrp_by_mgid: ERR 1B03: "
+ "Multiple MCGROUP for same MGID.\n" );
+ return;
+ }
+ p_ctxt->p_mgrp = p_mgrp;
+
+}
+
+/**********************************************************************
**********************************************************************/
+static ib_api_status_t
+__get_mgrp_by_mgid(
+ IN osm_pr_rcv_t* const p_rcv,
+ IN ib_path_rec_t* p_recvd_path_rec,
+ OUT osm_mgrp_t **pp_mgrp)
+{
+ osm_sa_pr_mcmr_search_ctxt_t mcmr_search_context;
+
+ mcmr_search_context.p_mgid = &p_recvd_path_rec->dgid;
+ mcmr_search_context.p_rcv = p_rcv;
+ mcmr_search_context.p_mgrp = NULL;
+
+ cl_qmap_apply_func( &p_rcv->p_subn->mgrp_mlid_tbl,
+ __search_mgrp_by_mgid,
+ &mcmr_search_context);
+
+ if(mcmr_search_context.p_mgrp == NULL)
+ {
+ return IB_NOT_FOUND;
+ }
+
+ *pp_mgrp = mcmr_search_context.p_mgrp;
+ return IB_SUCCESS;
+}
+
+/**********************************************************************
+ **********************************************************************/
+static
+osm_mgrp_t *
+__get_mgrp_by_mlid(
+ IN const osm_pr_rcv_t* const p_rcv,
+ IN ib_net16_t const mlid)
+{
+ cl_map_item_t *map_item;
+
+ map_item = cl_qmap_get(&p_rcv->p_subn->mgrp_mlid_tbl,
+ mlid);
+ if(map_item == cl_qmap_end(&p_rcv->p_subn->mgrp_mlid_tbl))
+ {
+ return NULL;
+ }
+
+ return (osm_mgrp_t *)map_item;
+}
+
+/**********************************************************************
+ **********************************************************************/
static void
+__osm_pr_get_mgrp(
+ IN osm_pr_rcv_t* const p_rcv,
+ IN const osm_madw_t* const p_madw,
+ OUT osm_mgrp_t **pp_mgrp )
+{
+ ib_path_rec_t* p_pr;
+ const ib_sa_mad_t* p_sa_mad;
+ ib_net64_t comp_mask;
+ ib_api_status_t status;
+
+ OSM_LOG_ENTER( p_rcv->p_log, __osm_pr_get_mgrp );
+
+ p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw );
+ p_pr = (ib_path_rec_t*)ib_sa_mad_get_payload_ptr( p_sa_mad );
+
+ comp_mask = p_sa_mad->comp_mask;
+
+ if( comp_mask & IB_PR_COMPMASK_DGID )
+ {
+ status = __get_mgrp_by_mgid( p_rcv, p_pr, pp_mgrp );
+ if( status != IB_SUCCESS )
+ {
+ osm_log( p_rcv->p_log, OSM_LOG_ERROR,
+ "__osm_pr_get_mgrp: "
+ "No MC group found for PathRecord destination GID.\n" );
+ goto Exit;
+ }
+ }
+
+ if( comp_mask & IB_PR_COMPMASK_DLID )
+ {
+ if( *pp_mgrp)
+ {
+ /* check that the MLID in the MC group is */
+ /* the same as the DLID in the PathRecord */
+ if( (*pp_mgrp)->mlid != p_pr->dlid )
+ {
+ /* Note: perhaps this might be better indicated as an invalid request */
+ osm_log( p_rcv->p_log, OSM_LOG_ERROR,
+ "__osm_pr_get_mgrp: "
+ "MC group MLID does not match PathRecord destination LID.\n" );
+ *pp_mgrp = NULL;
+ goto Exit;
+ }
+ }
+ else
+ {
+ *pp_mgrp = __get_mgrp_by_mlid( p_rcv, p_pr->dlid );
+ if( *pp_mgrp == NULL)
+ {
+ osm_log( p_rcv->p_log, OSM_LOG_ERROR,
+ "__osm_pr_get_mgrp: "
+ "No MC group found for PathRecord destination LID.\n" );
+ }
+ }
+ }
+
+ Exit:
+ OSM_LOG_EXIT( p_rcv->p_log );
+}
+
+/**********************************************************************
+ **********************************************************************/
+static ib_api_status_t
+__osm_pr_match_mgrp_attributes(
+ IN osm_pr_rcv_t* const p_rcv,
+ IN const osm_madw_t* const p_madw,
+ IN const osm_mgrp_t* const p_mgrp )
+{
+ const ib_path_rec_t* p_pr;
+ const ib_sa_mad_t* p_sa_mad;
+ ib_net64_t comp_mask;
+ ib_api_status_t status = IB_ERROR;
+ uint32_t flow_label;
+ uint8_t sl;
+ uint8_t hop_limit;
+
+ OSM_LOG_ENTER( p_rcv->p_log, __osm_pr_rcv_check_mcast_dest );
+
+ p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw );
+ p_pr = (ib_path_rec_t*)ib_sa_mad_get_payload_ptr( p_sa_mad );
+
+ comp_mask = p_sa_mad->comp_mask;
+
+ /* If SGID and/or SLID specified, should validate as part of MC group */
+ /* Also, not checking MTU, rate, packet lifetime, and raw traffic currently */
+ if( comp_mask & IB_PR_COMPMASK_PKEY )
+ {
+ if( p_pr->pkey != p_mgrp->mcmember_rec.pkey )
+ goto Exit;
+ }
+
+ ib_member_get_sl_flow_hop( p_mgrp->mcmember_rec.sl_flow_hop,
+ &sl, &flow_label, &hop_limit );
+
+ if( comp_mask & IB_PR_COMPMASK_SL )
+ {
+ if( ( p_pr->sl & 0xf ) != sl )
+ goto Exit;
+ }
+
+ if( comp_mask & IB_PR_COMPMASK_NUMBPATH )
+ {
+ if( ( p_pr->num_path & 0x7f ) == 0 )
+ goto Exit;
+ }
+
+ if( comp_mask & IB_PR_COMPMASK_FLOWLABEL )
+ {
+ if( ib_path_rec_flow_lbl( p_pr ) != flow_label )
+ goto Exit;
+ }
+
+ if( comp_mask & IB_PR_COMPMASK_HOPLIMIT )
+ {
+ if( ib_path_rec_hop_limit( p_pr ) != hop_limit )
+ goto Exit;
+ }
+
+ if( comp_mask & IB_PR_COMPMASK_TCLASS )
+ {
+ if( p_pr->tclass != p_mgrp->mcmember_rec.tclass )
+ goto Exit;
+ }
+
+ status = IB_SUCCESS;
+
+ Exit:
+ OSM_LOG_EXIT( p_rcv->p_log );
+ return( status );
+}
+
+/**********************************************************************
+ **********************************************************************/
+static boolean_t
+__osm_pr_rcv_check_mcast_dest(
+ IN osm_pr_rcv_t* const p_rcv,
+ IN const osm_madw_t* const p_madw )
+{
+ const ib_path_rec_t* p_pr;
+ const ib_sa_mad_t* p_sa_mad;
+ ib_net64_t comp_mask;
+ boolean_t is_multicast = FALSE;
+
+ OSM_LOG_ENTER( p_rcv->p_log, __osm_pr_rcv_check_mcast_dest );
+
+ p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw );
+ p_pr = (ib_path_rec_t*)ib_sa_mad_get_payload_ptr( p_sa_mad );
+
+ comp_mask = p_sa_mad->comp_mask;
+
+ if( comp_mask & IB_PR_COMPMASK_DGID )
+ {
+ is_multicast = ib_gid_is_multicast( &p_pr->dgid );
+ if( is_multicast )
+ goto Exit;
+ }
+
+ if( comp_mask & IB_PR_COMPMASK_DLID )
+ {
+ if( cl_ntoh16( p_pr->dlid ) >= IB_LID_MCAST_START &&
+ cl_ntoh16( p_pr->dlid ) <= IB_LID_MCAST_END )
+ is_multicast = TRUE;
+ else if( is_multicast )
+ osm_log( p_rcv->p_log, OSM_LOG_ERROR,
+ "__osm_pr_rcv_check_mcast_dest: "
+ "PathRecord request indicates MGID but not MLID.\n" );
+ }
+
+ Exit:
+ OSM_LOG_EXIT( p_rcv->p_log );
+ return( is_multicast );
+}
+
+/**********************************************************************
+ **********************************************************************/
+static void
__osm_pr_rcv_respond(
IN osm_pr_rcv_t* const p_rcv,
IN const osm_madw_t* const p_madw,
@@ -1214,7 +1490,7 @@
if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) )
{
osm_log( p_rcv->p_log, OSM_LOG_DEBUG,
- "__osm_pr_rcv_respond:"
+ "__osm_pr_rcv_respond: "
"Generating response with %u records.\n", num_rec );
}
@@ -1329,7 +1605,7 @@
{
osm_log( p_rcv->p_log, OSM_LOG_ERROR,
"osm_pr_rcv_process: "
- "Cannot find requestor physical port. \n" );
+ "Cannot find requestor physical port.\n" );
goto Exit;
}
@@ -1359,6 +1635,14 @@
*/
cl_plock_acquire( p_rcv->p_lock );
+ /* Handle multicast destinations separately */
+ if( __osm_pr_rcv_check_mcast_dest( p_rcv, p_madw ) )
+ goto McastDest;
+
+ osm_log( p_rcv->p_log, OSM_LOG_DEBUG,
+ "osm_pr_rcv_process: "
+ "Unicast destination requested.\n" );
+
sa_status = __osm_pr_rcv_get_end_points( p_rcv, p_madw,
&p_src_port, &p_dest_port );
@@ -1394,7 +1678,78 @@
__osm_pr_rcv_process_world( p_rcv, p_pr, requestor_port,
p_sa_mad->comp_mask, &pr_list );
}
+ goto Unlock;
+ McastDest:
+ osm_log(p_rcv->p_log, OSM_LOG_DEBUG,
+ "osm_pr_rcv_process: "
+ "Multicast destination requested.\n" );
+
+ osm_mgrp_t *p_mgrp = NULL;
+ ib_api_status_t status;
+ osm_pr_item_t* p_pr_item;
+ uint32_t flow_label;
+ uint8_t sl;
+ uint8_t hop_limit;
+
+ /* First, get the MC info */
+ __osm_pr_get_mgrp( p_rcv, p_madw, &p_mgrp );
+
+ if ( p_mgrp )
+ {
+ /* Make sure the rest of the PathRecord matches the MC group attributes */
+ status = __osm_pr_match_mgrp_attributes( p_rcv, p_madw, p_mgrp);
+ if ( status == IB_SUCCESS )
+ {
+ p_pr_item = (osm_pr_item_t*)cl_qlock_pool_get( &p_rcv->pr_pool );
+ if( p_pr_item == NULL )
+ {
+ osm_log( p_rcv->p_log, OSM_LOG_ERROR,
+ "osm_pr_rcv_process: "
+ "Unable to allocate path record for MC group.\n" );
+ }
+ else
+ {
+ /* Copy PathRecord request into response */
+ p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw );
+ p_pr = (ib_path_rec_t*)ib_sa_mad_get_payload_ptr( p_sa_mad );
+ p_pr_item->path_rec = *p_pr;
+
+ /* Now, use the MC info to cruft up the PathRecord response */
+ p_pr_item->path_rec.dgid = p_mgrp->mcmember_rec.mgid;
+ p_pr_item->path_rec.dlid = p_mgrp->mcmember_rec.mlid;
+ p_pr_item->path_rec.tclass = p_mgrp->mcmember_rec.tclass;
+ p_pr_item->path_rec.num_path = 1;
+ p_pr_item->path_rec.pkey = p_mgrp->mcmember_rec.pkey;
+
+ /* MTU, rate, and packet lifetime should be exactly */
+ p_pr_item->path_rec.mtu = (2<<6) | p_mgrp->mcmember_rec.mtu;
+ p_pr_item->path_rec.rate = (2<<6) | p_mgrp->mcmember_rec.rate;
+ p_pr_item->path_rec.pkt_life = (2<<6) | p_mgrp->mcmember_rec.pkt_life;
+
+ /* SL, Hop Limit, and Flow Label */
+ ib_member_get_sl_flow_hop( p_mgrp->mcmember_rec.sl_flow_hop,
+ &sl, &flow_label, &hop_limit );
+ p_pr_item->path_rec.sl = sl;
+ p_pr_item->path_rec.hop_flow_raw = (uint32_t)(hop_limit) |
+ (flow_label << 8);
+
+ cl_qlist_insert_tail( &pr_list,
+ (cl_list_item_t*)&p_pr_item->pool_item );
+
+ }
+ }
+ else
+ {
+ osm_log( p_rcv->p_log, OSM_LOG_ERROR,
+ "osm_pr_rcv_process: "
+ "MC group attributes don't match PathRecord request.\n" );
+ }
+ }
+
+ /* Now, (finally) respond to the PathRecord request */
+
+ Unlock:
cl_plock_release( p_rcv->p_lock );
__osm_pr_rcv_respond( p_rcv, p_madw, &pr_list );
More information about the general
mailing list