[openfabrics-ewg] [PATCH] ehca backport openib_dma_addr_t_ofed_1.1_to_2_6_9.patch
Hoang-Nam Nguyen
hnguyen at de.ibm.com
Mon Sep 18 09:59:31 PDT 2006
Hi,
this patch below is required to run ipoib, mad etc on ehca device driver,
kernel 2.6.9, ppc64. The main changes are replacing dma_addr_t by dma64_addr_t
and dma_map/unmap_single() by dma64_map/unmap_single().
I did commit it into contrib/ibm/gen2/backport/2.6.9.
Thanks!
Nam Nguyen
Signed-off-by: Hoang-Nam Nguyen <hnguyen at de.ibm.com>
---
core/mad.c | 79 +++++++++++++++++++++++++--------------------------
ulp/ipoib/ipoib.h | 6 ++-
ulp/ipoib/ipoib_ib.c | 58 ++++++++++++++++++-------------------
ulp/srp/ib_srp.c | 8 ++---
ulp/srp/ib_srp.h | 2 -
diff -Nurp linux-2.6.9_orig/drivers/infiniband/core/mad.c linux-2.6.9_work/drivers/infiniband/core/mad.c
--- linux-2.6.9_orig/drivers/infiniband/core/mad.c 2006-09-18 04:42:09.226902264 -0700
+++ linux-2.6.9_work/drivers/infiniband/core/mad.c 2006-09-18 04:43:45.976967928 -0700
@@ -999,16 +999,16 @@ int ib_send_mad(struct ib_mad_send_wr_pr
mad_agent = mad_send_wr->send_buf.mad_agent;
sge = mad_send_wr->sg_list;
- sge[0].addr = dma_map_single(mad_agent->device->dma_device,
- mad_send_wr->send_buf.mad,
- sge[0].length,
- DMA_TO_DEVICE);
+ sge[0].addr = dma64_map_single(mad_agent->device->dma_device,
+ mad_send_wr->send_buf.mad,
+ sge[0].length,
+ DMA_TO_DEVICE);
pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr);
- sge[1].addr = dma_map_single(mad_agent->device->dma_device,
- ib_get_payload(mad_send_wr),
- sge[1].length,
- DMA_TO_DEVICE);
+ sge[1].addr = dma64_map_single(mad_agent->device->dma_device,
+ ib_get_payload(mad_send_wr),
+ sge[1].length,
+ DMA_TO_DEVICE);
pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr);
spin_lock_irqsave(&qp_info->send_queue.lock, flags);
@@ -1027,12 +1027,12 @@ int ib_send_mad(struct ib_mad_send_wr_pr
}
spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
if (ret) {
- dma_unmap_single(mad_agent->device->dma_device,
- pci_unmap_addr(mad_send_wr, header_mapping),
- sge[0].length, DMA_TO_DEVICE);
- dma_unmap_single(mad_agent->device->dma_device,
- pci_unmap_addr(mad_send_wr, payload_mapping),
- sge[1].length, DMA_TO_DEVICE);
+ dma64_unmap_single(mad_agent->device->dma_device,
+ pci_unmap_addr(mad_send_wr, header_mapping),
+ sge[0].length, DMA_TO_DEVICE);
+ dma64_unmap_single(mad_agent->device->dma_device,
+ pci_unmap_addr(mad_send_wr, payload_mapping),
+ sge[1].length, DMA_TO_DEVICE);
}
return ret;
}
@@ -1851,11 +1851,11 @@ static void ib_mad_recv_done_handler(str
mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
mad_list);
recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
- dma_unmap_single(port_priv->device->dma_device,
- pci_unmap_addr(&recv->header, mapping),
- sizeof(struct ib_mad_private) -
- sizeof(struct ib_mad_private_header),
- DMA_FROM_DEVICE);
+ dma64_unmap_single(port_priv->device->dma_device,
+ pci_unmap_addr(&recv->header, mapping),
+ sizeof(struct ib_mad_private) -
+ sizeof(struct ib_mad_private_header),
+ DMA_FROM_DEVICE);
/* Setup MAD receive work completion from "normal" work completion */
recv->header.wc = *wc;
@@ -2081,12 +2081,12 @@ static void ib_mad_send_done_handler(str
qp_info = send_queue->qp_info;
retry:
- dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
- pci_unmap_addr(mad_send_wr, header_mapping),
- mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
- dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
- pci_unmap_addr(mad_send_wr, payload_mapping),
- mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
+ dma64_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
+ pci_unmap_addr(mad_send_wr, header_mapping),
+ mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
+ dma64_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
+ pci_unmap_addr(mad_send_wr, payload_mapping),
+ mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
queued_send_wr = NULL;
spin_lock_irqsave(&send_queue->lock, flags);
list_del(&mad_list->list);
@@ -2527,12 +2527,12 @@ static int ib_mad_post_receive_mads(stru
break;
}
}
- sg_list.addr = dma_map_single(qp_info->port_priv->
+ sg_list.addr = dma64_map_single(qp_info->port_priv->
device->dma_device,
- &mad_priv->grh,
- sizeof *mad_priv -
+ &mad_priv->grh,
+ sizeof *mad_priv -
sizeof mad_priv->header,
- DMA_FROM_DEVICE);
+ DMA_FROM_DEVICE);
pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
mad_priv->header.mad_list.mad_queue = recv_queue;
@@ -2548,12 +2548,12 @@ static int ib_mad_post_receive_mads(stru
list_del(&mad_priv->header.mad_list.list);
recv_queue->count--;
spin_unlock_irqrestore(&recv_queue->lock, flags);
- dma_unmap_single(qp_info->port_priv->device->dma_device,
- pci_unmap_addr(&mad_priv->header,
- mapping),
- sizeof *mad_priv -
+ dma64_unmap_single(qp_info->port_priv->device->dma_device,
+ pci_unmap_addr(&mad_priv->header,
+ mapping),
+ sizeof *mad_priv -
sizeof mad_priv->header,
- DMA_FROM_DEVICE);
+ DMA_FROM_DEVICE);
kmem_cache_free(ib_mad_cache, mad_priv);
printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
break;
@@ -2585,11 +2585,11 @@ static void cleanup_recv_queue(struct ib
/* Remove from posted receive MAD list */
list_del(&mad_list->list);
- dma_unmap_single(qp_info->port_priv->device->dma_device,
- pci_unmap_addr(&recv->header, mapping),
- sizeof(struct ib_mad_private) -
- sizeof(struct ib_mad_private_header),
- DMA_FROM_DEVICE);
+ dma64_unmap_single(qp_info->port_priv->device->dma_device,
+ pci_unmap_addr(&recv->header, mapping),
+ sizeof(struct ib_mad_private) -
+ sizeof(struct ib_mad_private_header),
+ DMA_FROM_DEVICE);
kmem_cache_free(ib_mad_cache, recv);
}
@@ -2992,4 +2992,3 @@ static void __exit ib_mad_cleanup_module
module_init(ib_mad_init_module);
module_exit(ib_mad_cleanup_module);
-
diff -Nurp linux-2.6.9_orig/drivers/infiniband/ulp/ipoib/ipoib.h linux-2.6.9_work/drivers/infiniband/ulp/ipoib/ipoib.h
--- linux-2.6.9_orig/drivers/infiniband/ulp/ipoib/ipoib.h 2006-09-18 04:42:09.129917008 -0700
+++ linux-2.6.9_work/drivers/infiniband/ulp/ipoib/ipoib.h 2006-09-18 04:43:44.997984584 -0700
@@ -46,6 +46,8 @@
#include <linux/if_infiniband.h>
#include <linux/mutex.h>
+#include <linux/version.h>
+
#include <net/neighbour.h>
#include <asm/atomic.h>
@@ -105,12 +107,12 @@ struct ipoib_mcast;
struct ipoib_rx_buf {
struct sk_buff *skb;
- dma_addr_t mapping;
+ dma64_addr_t mapping;
};
struct ipoib_tx_buf {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ dma64_addr_t mapping;
};
/*
diff -Nurp linux-2.6.9_orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c linux-2.6.9_work/drivers/infiniband/ulp/ipoib/ipoib_ib.c
--- linux-2.6.9_orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c 2006-09-18 04:42:09.128917160 -0700
+++ linux-2.6.9_work/drivers/infiniband/ulp/ipoib/ipoib_ib.c 2006-09-18 04:43:44.996984736 -0700
@@ -109,9 +109,9 @@ static int ipoib_ib_post_receive(struct
ret = ib_post_recv(priv->qp, ¶m, &bad_wr);
if (unlikely(ret)) {
ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
- dma_unmap_single(priv->ca->dma_device,
- priv->rx_ring[id].mapping,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ dma64_unmap_single(priv->ca->dma_device,
+ priv->rx_ring[id].mapping,
+ IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(priv->rx_ring[id].skb);
priv->rx_ring[id].skb = NULL;
}
@@ -123,7 +123,7 @@ static int ipoib_alloc_rx_skb(struct net
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
- dma_addr_t addr;
+ dma64_addr_t addr;
skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
if (!skb)
@@ -136,9 +136,9 @@ static int ipoib_alloc_rx_skb(struct net
*/
skb_reserve(skb, 4);
- addr = dma_map_single(priv->ca->dma_device,
- skb->data, IPOIB_BUF_SIZE,
- DMA_FROM_DEVICE);
+ addr = dma64_map_single(priv->ca->dma_device,
+ skb->data, IPOIB_BUF_SIZE,
+ DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(addr))) {
dev_kfree_skb_any(skb);
return -EIO;
@@ -183,15 +183,15 @@ static void ipoib_ib_handle_wc(struct ne
if (wr_id < ipoib_recvq_size) {
struct sk_buff *skb = priv->rx_ring[wr_id].skb;
- dma_addr_t addr = priv->rx_ring[wr_id].mapping;
+ dma64_addr_t addr = priv->rx_ring[wr_id].mapping;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
if (wc->status != IB_WC_WR_FLUSH_ERR)
ipoib_warn(priv, "failed recv event "
"(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
- dma_unmap_single(priv->ca->dma_device, addr,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ dma64_unmap_single(priv->ca->dma_device, addr,
+ IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
priv->rx_ring[wr_id].skb = NULL;
return;
@@ -209,8 +209,8 @@ static void ipoib_ib_handle_wc(struct ne
ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
wc->byte_len, wc->slid);
- dma_unmap_single(priv->ca->dma_device, addr,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ dma64_unmap_single(priv->ca->dma_device, addr,
+ IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
skb_put(skb, wc->byte_len);
skb_pull(skb, IB_GRH_BYTES);
@@ -256,10 +256,10 @@ static void ipoib_ib_handle_wc(struct ne
tx_req = &priv->tx_ring[wr_id];
- dma_unmap_single(priv->ca->dma_device,
- pci_unmap_addr(tx_req, mapping),
- tx_req->skb->len,
- DMA_TO_DEVICE);
+ dma64_unmap_single(priv->ca->dma_device,
+ pci_unmap_addr(tx_req, mapping),
+ tx_req->skb->len,
+ DMA_TO_DEVICE);
++priv->stats.tx_packets;
priv->stats.tx_bytes += tx_req->skb->len;
@@ -299,7 +299,7 @@ void ipoib_ib_completion(struct ib_cq *c
static inline int post_send(struct ipoib_dev_priv *priv,
unsigned int wr_id,
struct ib_ah *address, u32 qpn,
- dma_addr_t addr, int len)
+ dma64_addr_t addr, int len)
{
struct ib_send_wr *bad_wr;
@@ -318,7 +318,7 @@ void ipoib_send(struct net_device *dev,
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_tx_buf *tx_req;
- dma_addr_t addr;
+ dma64_addr_t addr;
if (skb->len > dev->mtu + INFINIBAND_ALEN) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
@@ -341,16 +341,16 @@ void ipoib_send(struct net_device *dev,
*/
tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
tx_req->skb = skb;
- addr = dma_map_single(priv->ca->dma_device, skb->data, skb->len,
- DMA_TO_DEVICE);
+ addr = dma64_map_single(priv->ca->dma_device, skb->data, skb->len,
+ DMA_TO_DEVICE);
pci_unmap_addr_set(tx_req, mapping, addr);
if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
address->ah, qpn, addr, skb->len))) {
ipoib_warn(priv, "post_send failed\n");
++priv->stats.tx_errors;
- dma_unmap_single(priv->ca->dma_device, addr, skb->len,
- DMA_TO_DEVICE);
+ dma64_unmap_single(priv->ca->dma_device, addr, skb->len,
+ DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
} else {
dev->trans_start = jiffies;
@@ -520,19 +520,19 @@ int ipoib_ib_dev_stop(struct net_device
while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
tx_req = &priv->tx_ring[priv->tx_tail &
(ipoib_sendq_size - 1)];
- dma_unmap_single(priv->ca->dma_device,
- pci_unmap_addr(tx_req, mapping),
- tx_req->skb->len,
- DMA_TO_DEVICE);
+ dma64_unmap_single(priv->ca->dma_device,
+ pci_unmap_addr(tx_req, mapping),
+ tx_req->skb->len,
+ DMA_TO_DEVICE);
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
}
for (i = 0; i < ipoib_recvq_size; ++i)
if (priv->rx_ring[i].skb) {
- dma_unmap_single(priv->ca->dma_device,
- pci_unmap_addr(&priv->rx_ring[i],
- mapping),
+ dma64_unmap_single(priv->ca->dma_device,
+ pci_unmap_addr(&priv->rx_ring[i],
+ mapping),
IPOIB_BUF_SIZE,
DMA_FROM_DEVICE);
dev_kfree_skb_any(priv->rx_ring[i].skb);
diff -Nurp linux-2.6.9_orig/drivers/infiniband/ulp/srp/ib_srp.c linux-2.6.9_work/drivers/infiniband/ulp/srp/ib_srp.c
--- linux-2.6.9_orig/drivers/infiniband/ulp/srp/ib_srp.c 2006-09-18 04:42:09.139915488 -0700
+++ linux-2.6.9_work/drivers/infiniband/ulp/srp/ib_srp.c 2006-09-18 04:43:45.233948712 -0700
@@ -120,8 +120,8 @@ static struct srp_iu *srp_alloc_iu(struc
if (!iu->buf)
goto out_free_iu;
- iu->dma = dma_map_single(host->dev->dev->dma_device,
- iu->buf, size, direction);
+ iu->dma = dma64_map_single(host->dev->dev->dma_device,
+ iu->buf, size, direction);
if (dma_mapping_error(iu->dma))
goto out_free_buf;
@@ -143,8 +143,8 @@ static void srp_free_iu(struct srp_host
if (!iu)
return;
- dma_unmap_single(host->dev->dev->dma_device,
- iu->dma, iu->size, iu->direction);
+ dma64_unmap_single(host->dev->dev->dma_device,
+ iu->dma, iu->size, iu->direction);
kfree(iu->buf);
kfree(iu);
}
diff -Nurp linux-2.6.9_orig/drivers/infiniband/ulp/srp/ib_srp.h linux-2.6.9_work/drivers/infiniband/ulp/srp/ib_srp.h
--- linux-2.6.9_orig/drivers/infiniband/ulp/srp/ib_srp.h 2006-09-18 04:42:09.138915640 -0700
+++ linux-2.6.9_work/drivers/infiniband/ulp/srp/ib_srp.h 2006-09-18 04:43:45.229949320 -0700
@@ -161,7 +161,7 @@ struct srp_target_port {
};
struct srp_iu {
- dma_addr_t dma;
+ dma64_addr_t dma;
void *buf;
size_t size;
enum dma_data_direction direction;
More information about the ewg
mailing list