[openib-general] [PATCH 6/7] IB/sdp - Use the new verbs DMA mapping functions
Ralph Campbell
ralph.campbell at qlogic.com
Thu Nov 2 14:36:01 PST 2006
IB/sdp - Use the new verbs DMA mapping functions
This patch converts SDP to use the new DMA mapping functions
for kernel verbs consumers.
From: Ralph Campbell <ralph.campbell at qlogic.com>
Index: src/linux-kernel/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
--- src/linux-kernel/infiniband/ulp/sdp/sdp_bcopy.c (revision 9441)
+++ src/linux-kernel/infiniband/ulp/sdp/sdp_bcopy.c (working copy)
@@ -67,7 +67,7 @@ void sdp_post_send(struct sdp_sock *ssk,
unsigned mseq = ssk->tx_head;
int i, rc, frags;
dma_addr_t addr;
- struct device *hwdev;
+ struct ib_device *dev;
struct ib_sge *sge;
struct ib_send_wr *bad_wr;
@@ -80,15 +80,14 @@ void sdp_post_send(struct sdp_sock *ssk,
tx_req = &ssk->tx_ring[mseq & (SDP_TX_SIZE - 1)];
tx_req->skb = skb;
- hwdev = ssk->dma_device;
+ dev = ssk->mr->device;
sge = ssk->ibsge;
- addr = dma_map_single(hwdev,
- skb->data, skb->len - skb->data_len,
- DMA_TO_DEVICE);
+ addr = ib_dma_map_single(dev, skb->data, skb->len - skb->data_len,
+ DMA_TO_DEVICE);
tx_req->mapping[0] = addr;
/* TODO: proper error handling */
- BUG_ON(dma_mapping_error(addr));
+ BUG_ON(ib_dma_mapping_error(dev, addr));
sge->addr = (u64)addr;
sge->length = skb->len - skb->data_len;
@@ -96,11 +95,11 @@ void sdp_post_send(struct sdp_sock *ssk,
frags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < frags; ++i) {
++sge;
- addr = dma_map_page(hwdev, skb_shinfo(skb)->frags[i].page,
- skb_shinfo(skb)->frags[i].page_offset,
- skb_shinfo(skb)->frags[i].size,
- DMA_TO_DEVICE);
- BUG_ON(dma_mapping_error(addr));
+ addr = ib_dma_map_page(dev, skb_shinfo(skb)->frags[i].page,
+ skb_shinfo(skb)->frags[i].page_offset,
+ skb_shinfo(skb)->frags[i].size,
+ DMA_TO_DEVICE);
+ BUG_ON(ib_dma_mapping_error(dev, addr));
tx_req->mapping[i + 1] = addr;
sge->addr = addr;
sge->length = skb_shinfo(skb)->frags[i].size;
@@ -124,7 +123,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct sk_buff *sdp_send_completion(struct sdp_sock *ssk, int mseq)
{
- struct device *hwdev;
+ struct ib_device *dev;
struct sdp_buf *tx_req;
struct sk_buff *skb;
int i, frags;
@@ -135,16 +134,16 @@ struct sk_buff *sdp_send_completion(stru
return NULL;
}
- hwdev = ssk->dma_device;
+ dev = ssk->mr->device;
tx_req = &ssk->tx_ring[mseq & (SDP_TX_SIZE - 1)];
skb = tx_req->skb;
- dma_unmap_single(hwdev, tx_req->mapping[0], skb->len - skb->data_len,
- DMA_TO_DEVICE);
+ ib_dma_unmap_single(dev, tx_req->mapping[0], skb->len - skb->data_len,
+ DMA_TO_DEVICE);
frags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < frags; ++i) {
- dma_unmap_page(hwdev, tx_req->mapping[i + 1],
- skb_shinfo(skb)->frags[i].size,
- DMA_TO_DEVICE);
+ ib_dma_unmap_page(dev, tx_req->mapping[i + 1],
+ skb_shinfo(skb)->frags[i].size,
+ DMA_TO_DEVICE);
}
++ssk->tx_tail;
@@ -157,7 +156,7 @@ static void sdp_post_recv(struct sdp_soc
struct sdp_buf *rx_req;
int i, rc, frags;
dma_addr_t addr;
- struct device *hwdev;
+ struct ib_device *dev;
struct ib_sge *sge;
struct ib_recv_wr *bad_wr;
struct sk_buff *skb;
@@ -188,11 +187,10 @@ static void sdp_post_recv(struct sdp_soc
rx_req = ssk->rx_ring + (id & (SDP_RX_SIZE - 1));
rx_req->skb = skb;
- hwdev = ssk->dma_device;
+ dev = ssk->mr->device;
sge = ssk->ibsge;
- addr = dma_map_single(hwdev, h, skb_headlen(skb),
- DMA_FROM_DEVICE);
- BUG_ON(dma_mapping_error(addr));
+ addr = ib_dma_map_single(dev, h, skb_headlen(skb), DMA_FROM_DEVICE);
+ BUG_ON(ib_dma_mapping_error(dev, addr));
rx_req->mapping[0] = addr;
@@ -203,11 +201,11 @@ static void sdp_post_recv(struct sdp_soc
frags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < frags; ++i) {
++sge;
- addr = dma_map_page(hwdev, skb_shinfo(skb)->frags[i].page,
- skb_shinfo(skb)->frags[i].page_offset,
- skb_shinfo(skb)->frags[i].size,
- DMA_FROM_DEVICE);
- BUG_ON(dma_mapping_error(addr));
+ addr = ib_dma_map_page(dev, skb_shinfo(skb)->frags[i].page,
+ skb_shinfo(skb)->frags[i].page_offset,
+ skb_shinfo(skb)->frags[i].size,
+ DMA_FROM_DEVICE);
+ BUG_ON(ib_dma_mapping_error(dev, addr));
rx_req->mapping[i + 1] = addr;
sge->addr = addr;
sge->length = skb_shinfo(skb)->frags[i].size;
@@ -242,7 +240,7 @@ void sdp_post_recvs(struct sdp_sock *ssk
struct sk_buff *sdp_recv_completion(struct sdp_sock *ssk, int id)
{
struct sdp_buf *rx_req;
- struct device *hwdev;
+ struct ib_device *dev;
struct sk_buff *skb;
int i, frags;
@@ -252,16 +250,16 @@ struct sk_buff *sdp_recv_completion(stru
return NULL;
}
- hwdev = ssk->dma_device;
+ dev = ssk->mr->device;
rx_req = &ssk->rx_ring[id & (SDP_RX_SIZE - 1)];
skb = rx_req->skb;
- dma_unmap_single(hwdev, rx_req->mapping[0], skb_headlen(skb),
- DMA_FROM_DEVICE);
+ ib_dma_unmap_single(dev, rx_req->mapping[0], skb_headlen(skb),
+ DMA_FROM_DEVICE);
frags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < frags; ++i)
- dma_unmap_page(hwdev, rx_req->mapping[i + 1],
- skb_shinfo(skb)->frags[i].size,
- DMA_TO_DEVICE);
+ ib_dma_unmap_page(dev, rx_req->mapping[i + 1],
+ skb_shinfo(skb)->frags[i].size,
+ DMA_TO_DEVICE);
++ssk->rx_tail;
--ssk->remote_credits;
return skb;
Index: src/linux-kernel/infiniband/ulp/sdp/sdp_cma.c
===================================================================
--- src/linux-kernel/infiniband/ulp/sdp/sdp_cma.c (revision 9441)
+++ src/linux-kernel/infiniband/ulp/sdp/sdp_cma.c (working copy)
@@ -159,7 +159,6 @@ int sdp_init_qp(struct sock *sk, struct
}
sdp_sk(sk)->cq = cq;
sdp_sk(sk)->qp = id->qp;
- sdp_sk(sk)->dma_device = device->dma_device;
init_waitqueue_head(&sdp_sk(sk)->wq);
Index: src/linux-kernel/infiniband/ulp/sdp/sdp.h
===================================================================
--- src/linux-kernel/infiniband/ulp/sdp/sdp.h (revision 9441)
+++ src/linux-kernel/infiniband/ulp/sdp/sdp.h (working copy)
@@ -79,7 +79,6 @@ struct sdp_sock {
struct ib_qp *qp;
struct ib_cq *cq;
struct ib_mr *mr;
- struct device *dma_device;
/* Like tcp_sock */
__u16 urg_data;
int offset; /* like seq in tcp */
More information about the general
mailing list