[openib-general] [PATCH 3/7] IB/ipoib - Use the new verbs DMA mapping functions
Ralph Campbell
ralph.campbell at qlogic.com
Mon Nov 6 10:13:07 PST 2006
There is a very slight overhead since there is a test and
branch. Given modern CPU architecture, this is in the noise.
On Sun, 2006-11-05 at 09:42 +0200, Eitan Zahavi wrote:
> Hi Ralph,
>
> Is there any performance penalty for using the IB version of the DMA
> mapping functions?
>
> Thanks
>
> Eitan
>
> Ralph Campbell wrote:
> > IB/ipoib - Use the new verbs DMA mapping functions
> >
> > This patch converts IPoIB to use the new DMA mapping functions
> > for kernel verbs consumers.
> >
> > From: Ralph Campbell <ralph.campbell at qlogic.com>
> >
> > diff -r f37bd0e41fec drivers/infiniband/ulp/ipoib/ipoib_ib.c
> > --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c Thu Oct 26 21:44:41 2006 +0700
> > +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c Thu Oct 26 12:37:09 2006 -0800
> > @@ -109,9 +109,8 @@ static int ipoib_ib_post_receive(struct
> > ret = ib_post_recv(priv->qp, ¶m, &bad_wr);
> > if (unlikely(ret)) {
> > ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
> > - dma_unmap_single(priv->ca->dma_device,
> > - priv->rx_ring[id].mapping,
> > - IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
> > + ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping,
> > + IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
> > dev_kfree_skb_any(priv->rx_ring[id].skb);
> > priv->rx_ring[id].skb = NULL;
> > }
> > @@ -136,10 +135,9 @@ static int ipoib_alloc_rx_skb(struct net
> > */
> > skb_reserve(skb, 4);
> >
> > - addr = dma_map_single(priv->ca->dma_device,
> > - skb->data, IPOIB_BUF_SIZE,
> > - DMA_FROM_DEVICE);
> > - if (unlikely(dma_mapping_error(addr))) {
> > + addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE,
> > + DMA_FROM_DEVICE);
> > + if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
> > dev_kfree_skb_any(skb);
> > return -EIO;
> > }
> > @@ -193,8 +191,8 @@ static void ipoib_ib_handle_rx_wc(struct
> > ipoib_warn(priv, "failed recv event "
> > "(status=%d, wrid=%d vend_err %x)\n",
> > wc->status, wr_id, wc->vendor_err);
> > - dma_unmap_single(priv->ca->dma_device, addr,
> > - IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
> > + ib_dma_unmap_single(priv->ca, addr,
> > + IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
> > dev_kfree_skb_any(skb);
> > priv->rx_ring[wr_id].skb = NULL;
> > return;
> > @@ -212,8 +210,7 @@ static void ipoib_ib_handle_rx_wc(struct
> > ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
> > wc->byte_len, wc->slid);
> >
> > - dma_unmap_single(priv->ca->dma_device, addr,
> > - IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
> > + ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
> >
> > skb_put(skb, wc->byte_len);
> > skb_pull(skb, IB_GRH_BYTES);
> > @@ -261,10 +258,8 @@ static void ipoib_ib_handle_tx_wc(struct
> >
> > tx_req = &priv->tx_ring[wr_id];
> >
> > - dma_unmap_single(priv->ca->dma_device,
> > - pci_unmap_addr(tx_req, mapping),
> > - tx_req->skb->len,
> > - DMA_TO_DEVICE);
> > + ib_dma_unmap_single(priv->ca, pci_unmap_addr(tx_req, mapping),
> > + tx_req->skb->len, DMA_TO_DEVICE);
> >
> > ++priv->stats.tx_packets;
> > priv->stats.tx_bytes += tx_req->skb->len;
> > @@ -353,9 +348,9 @@ void ipoib_send(struct net_device *dev,
> > */
> > tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
> > tx_req->skb = skb;
> > - addr = dma_map_single(priv->ca->dma_device, skb->data, skb->len,
> > - DMA_TO_DEVICE);
> > - if (unlikely(dma_mapping_error(addr))) {
> > + addr = ib_dma_map_single(priv->ca, skb->data, skb->len,
> > + DMA_TO_DEVICE);
> > + if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
> > ++priv->stats.tx_errors;
> > dev_kfree_skb_any(skb);
> > return;
> > @@ -366,8 +361,7 @@ void ipoib_send(struct net_device *dev,
> > address->ah, qpn, addr, skb->len))) {
> > ipoib_warn(priv, "post_send failed\n");
> > ++priv->stats.tx_errors;
> > - dma_unmap_single(priv->ca->dma_device, addr, skb->len,
> > - DMA_TO_DEVICE);
> > + ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
> > dev_kfree_skb_any(skb);
> > } else {
> > dev->trans_start = jiffies;
> > @@ -537,24 +531,28 @@ int ipoib_ib_dev_stop(struct net_device
> > while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
> > tx_req = &priv->tx_ring[priv->tx_tail &
> > (ipoib_sendq_size - 1)];
> > - dma_unmap_single(priv->ca->dma_device,
> > - pci_unmap_addr(tx_req, mapping),
> > - tx_req->skb->len,
> > - DMA_TO_DEVICE);
> > + ib_dma_unmap_single(priv->ca,
> > + pci_unmap_addr(tx_req,
> > + mapping),
> > + tx_req->skb->len,
> > + DMA_TO_DEVICE);
> > dev_kfree_skb_any(tx_req->skb);
> > ++priv->tx_tail;
> > }
> >
> > - for (i = 0; i < ipoib_recvq_size; ++i)
> > - if (priv->rx_ring[i].skb) {
> > - dma_unmap_single(priv->ca->dma_device,
> > - pci_unmap_addr(&priv->rx_ring[i],
> > - mapping),
> > - IPOIB_BUF_SIZE,
> > - DMA_FROM_DEVICE);
> > - dev_kfree_skb_any(priv->rx_ring[i].skb);
> > - priv->rx_ring[i].skb = NULL;
> > - }
> > + for (i = 0; i < ipoib_recvq_size; ++i) {
> > + struct ipoib_rx_buf *rx_req;
> > +
> > + rx_req = &priv->rx_ring[i];
> > + if (!rx_req->skb)
> > + continue;
> > + ib_dma_unmap_single(priv->ca,
> > + rx_req->mapping,
> > + IPOIB_BUF_SIZE,
> > + DMA_FROM_DEVICE);
> > + dev_kfree_skb_any(rx_req->skb);
> > + rx_req->skb = NULL;
> > + }
> >
> > goto timeout;
> > }
> >
> >
> >
> > _______________________________________________
> > openib-general mailing list
> > openib-general at openib.org
> > http://openib.org/mailman/listinfo/openib-general
> >
> > To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general
> >
>
>
> _______________________________________________
> openib-general mailing list
> openib-general at openib.org
> http://openib.org/mailman/listinfo/openib-general
>
> To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general
>
More information about the general
mailing list