[ewg] [PATCH 1/10 VNIC] DMA interface changes to support iPath

Ramachandra K ramachandra.kuchimanchi at qlogic.com
Tue Aug 21 07:22:43 PDT 2007


Use the ib_dma interface for compatibility with ipath driver.

From: Poornima Kamath <poornima.kamath at qlogic.com>
Signed-off-by: Ramachandra K <ramachandra.kuchimanchi at qlogic.com>

---

 drivers/infiniband/ulp/vnic/vnic_control.c |  352 ++++++++++++++--------------
 drivers/infiniband/ulp/vnic/vnic_data.c    |  192 ++++++++-------
 2 files changed, 276 insertions(+), 268 deletions(-)

diff --git a/drivers/infiniband/ulp/vnic/vnic_control.c b/drivers/infiniband/ulp/vnic/vnic_control.c
index a199380..02e8fa5 100644
--- a/drivers/infiniband/ulp/vnic/vnic_control.c
+++ b/drivers/infiniband/ulp/vnic/vnic_control.c
@@ -78,9 +78,9 @@ static void control_recv_complete(struct
 	CONTROL_FUNCTION("%s: control_recv_complete()\n",
 			 control_ifcfg_name(control));
 
-	dma_sync_single_for_cpu(control->parent->config->ibdev->dma_device,
-				control->recv_dma, control->recv_len,
-				DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_cpu(control->parent->config->ibdev,
+				   control->recv_dma, control->recv_len,
+				   DMA_FROM_DEVICE);
 	control_note_rsptime_stats(&response_time);
 	CONTROL_PACKET(pkt);
 	spin_lock_irqsave(&control->io_lock, flags);
@@ -110,9 +110,9 @@ static void control_recv_complete(struct
 		spin_unlock_irqrestore(&control->io_lock, flags);
 		viport_kick(control->parent);
 	}
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->recv_dma, control->recv_len,
-				   DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->recv_dma, control->recv_len,
+				      DMA_FROM_DEVICE);
 }
 
 static void control_timeout(unsigned long data)
@@ -196,9 +196,9 @@ void control_process_async(struct contro
 
 	CONTROL_FUNCTION("%s: control_process_async()\n",
 			 control_ifcfg_name(control));
-	dma_sync_single_for_cpu(control->parent->config->ibdev->dma_device,
-				control->recv_dma, control->recv_len,
-				DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_cpu(control->parent->config->ibdev,
+				   control->recv_dma, control->recv_len,
+				   DMA_FROM_DEVICE);
 
 	spin_lock_irqsave(&control->io_lock, flags);
 	recv_io = control->info;
@@ -262,9 +262,9 @@ void control_process_async(struct contro
 		spin_lock_irqsave(&control->io_lock, flags);
 	}
 	spin_unlock_irqrestore(&control->io_lock, flags);
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->recv_dma, control->recv_len,
-				   DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->recv_dma, control->recv_len,
+				      DMA_FROM_DEVICE);
 
 	CONTROL_INFO("%s: done control_process_async\n",
 		     control_ifcfg_name(control));
@@ -340,9 +340,9 @@ int control_init_vnic_req(struct control
 	struct vnic_control_packet	*pkt;
 	struct vnic_cmd_init_vnic_req	*init_vnic_req;
 
-	dma_sync_single_for_cpu(control->parent->config->ibdev->dma_device,
-				control->send_dma, control->send_len,
-				DMA_TO_DEVICE);
+	ib_dma_sync_single_for_cpu(control->parent->config->ibdev,
+				   control->send_dma, control->send_len,
+				   DMA_TO_DEVICE);
 
 	send_io = control_init_hdr(control, CMD_INIT_VNIC);
 	if (!send_io)
@@ -363,15 +363,15 @@ int control_init_vnic_req(struct control
 
 	control->rsp_expected = pkt->hdr.pkt_cmd;
 
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->send_dma, control->send_len,
-				   DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->send_dma, control->send_len,
+				      DMA_TO_DEVICE);
 
 	return control_send(control, send_io);
 failure:
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->send_dma, control->send_len,
-				   DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->send_dma, control->send_len,
+				      DMA_TO_DEVICE);
 	return -1;
 }
 
@@ -434,9 +434,9 @@ int control_init_vnic_rsp(struct control
 
 	CONTROL_FUNCTION("%s: control_init_vnic_rsp()\n",
 			 control_ifcfg_name(control));
-	dma_sync_single_for_cpu(control->parent->config->ibdev->dma_device,
-				control->recv_dma, control->recv_len,
-				DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_cpu(control->parent->config->ibdev,
+				   control->recv_dma, control->recv_len,
+				   DMA_FROM_DEVICE);
 
 	recv_io = control_get_rsp(control);
 	if (!recv_io)
@@ -479,16 +479,16 @@ int control_init_vnic_rsp(struct control
 	       ETH_ALEN);
 
 	control_recv(control, recv_io);
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->recv_dma, control->recv_len,
-				   DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->recv_dma, control->recv_len,
+				      DMA_FROM_DEVICE);
 	return 0;
 failure:
 	viport_failure(control->parent);
 out:
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->recv_dma, control->recv_len,
-				   DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->recv_dma, control->recv_len,
+				      DMA_FROM_DEVICE);
 	return -1;
 }
 
@@ -622,9 +622,9 @@ int control_config_data_path_req(struct
 
 	CONTROL_FUNCTION("%s: control_config_data_path_req()\n",
 			 control_ifcfg_name(control));
-	dma_sync_single_for_cpu(control->parent->config->ibdev->dma_device,
-				control->send_dma, control->send_len,
-				DMA_TO_DEVICE);
+	ib_dma_sync_single_for_cpu(control->parent->config->ibdev,
+				   control->send_dma, control->send_len,
+				   DMA_TO_DEVICE);
 
 	send_io = control_init_hdr(control, CMD_CONFIG_DATA_PATH);
 	if (!send_io)
@@ -642,15 +642,15 @@ int control_config_data_path_req(struct
 
 	control->rsp_expected = pkt->hdr.pkt_cmd;
 
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->send_dma, control->send_len,
-				   DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->send_dma, control->send_len,
+				      DMA_TO_DEVICE);
 
 	return control_send(control, send_io);
 failure:
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->send_dma, control->send_len,
-				   DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->send_dma, control->send_len,
+				      DMA_TO_DEVICE);
 	return -1;
 }
 
@@ -668,9 +668,9 @@ int control_config_data_path_rsp(struct
 
 	CONTROL_FUNCTION("%s: control_config_data_path_rsp()\n",
 			 control_ifcfg_name(control));
-	dma_sync_single_for_cpu(control->parent->config->ibdev->dma_device,
-				control->recv_dma, control->recv_len,
-				DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_cpu(control->parent->config->ibdev,
+				   control->recv_dma, control->recv_len,
+				   DMA_FROM_DEVICE);
 
 	recv_io = control_get_rsp(control);
 	if (!recv_io)
@@ -706,17 +706,17 @@ int control_config_data_path_rsp(struct
 	}
 
 	control_recv(control, recv_io);
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->recv_dma, control->recv_len,
-				   DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->recv_dma, control->recv_len,
+				      DMA_FROM_DEVICE);
 
 	return 0;
 failure:
 	viport_failure(control->parent);
 out:
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->recv_dma, control->recv_len,
-				   DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->recv_dma, control->recv_len,
+				      DMA_FROM_DEVICE);
 	return -1;
 }
 
@@ -728,9 +728,9 @@ int control_exchange_pools_req(struct co
 
 	CONTROL_FUNCTION("%s: control_exchange_pools_req()\n",
 			 control_ifcfg_name(control));
-	dma_sync_single_for_cpu(control->parent->config->ibdev->dma_device,
-				control->send_dma, control->send_len,
-				DMA_TO_DEVICE);
+	ib_dma_sync_single_for_cpu(control->parent->config->ibdev,
+				   control->send_dma, control->send_len,
+				   DMA_TO_DEVICE);
 
 	send_io = control_init_hdr(control, CMD_EXCHANGE_POOLS);
 	if (!send_io)
@@ -744,14 +744,14 @@ int control_exchange_pools_req(struct co
 
 	control->rsp_expected = pkt->hdr.pkt_cmd;
 
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->send_dma, control->send_len,
-				   DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->send_dma, control->send_len,
+				      DMA_TO_DEVICE);
 	return control_send(control, send_io);
 failure:
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->send_dma, control->send_len,
-				   DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->send_dma, control->send_len,
+				      DMA_TO_DEVICE);
 	return -1;
 }
 
@@ -764,9 +764,9 @@ int control_exchange_pools_rsp(struct co
 
 	CONTROL_FUNCTION("%s: control_exchange_pools_rsp()\n",
 			 control_ifcfg_name(control));
-	dma_sync_single_for_cpu(control->parent->config->ibdev->dma_device,
-				control->recv_dma, control->recv_len,
-				DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_cpu(control->parent->config->ibdev,
+				   control->recv_dma, control->recv_len,
+				   DMA_FROM_DEVICE);
 
 	recv_io = control_get_rsp(control);
 	if (!recv_io)
@@ -796,16 +796,16 @@ int control_exchange_pools_rsp(struct co
 	}
 
 	control_recv(control, recv_io);
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->recv_dma, control->recv_len,
-				   DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->recv_dma, control->recv_len,
+				      DMA_FROM_DEVICE);
 	return 0;
 failure:
 	viport_failure(control->parent);
 out:
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->recv_dma, control->recv_len,
-				   DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->recv_dma, control->recv_len,
+				      DMA_FROM_DEVICE);
 	return -1;
 }
 
@@ -817,9 +817,9 @@ int control_config_link_req(struct contr
 
 	CONTROL_FUNCTION("%s: control_config_link_req()\n",
 			 control_ifcfg_name(control));
-	dma_sync_single_for_cpu(control->parent->config->ibdev->dma_device,
-				control->send_dma, control->send_len,
-				DMA_TO_DEVICE);
+	ib_dma_sync_single_for_cpu(control->parent->config->ibdev,
+				   control->send_dma, control->send_len,
+				   DMA_TO_DEVICE);
 
 	send_io = control_init_hdr(control, CMD_CONFIG_LINK);
 	if (!send_io)
@@ -853,14 +853,14 @@ int control_config_link_req(struct contr
 	config_link_req->mtu_size = cpu_to_be16(mtu);
 
 	control->rsp_expected = pkt->hdr.pkt_cmd;
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->send_dma, control->send_len,
-				   DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->send_dma, control->send_len,
+				      DMA_TO_DEVICE);
 	return control_send(control, send_io);
 failure:
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->send_dma, control->send_len,
-				   DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->send_dma, control->send_len,
+				      DMA_TO_DEVICE);
 	return -1;
 }
 
@@ -873,9 +873,9 @@ int control_config_link_rsp(struct contr
 
 	CONTROL_FUNCTION("%s: control_config_link_rsp()\n",
 			 control_ifcfg_name(control));
-	dma_sync_single_for_cpu(control->parent->config->ibdev->dma_device,
-				control->recv_dma, control->recv_len,
-				DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_cpu(control->parent->config->ibdev,
+				   control->recv_dma, control->recv_len,
+				   DMA_FROM_DEVICE);
 
 	recv_io = control_get_rsp(control);
 	if (!recv_io)
@@ -902,16 +902,16 @@ int control_config_link_rsp(struct contr
 	*mtu = be16_to_cpu(config_link_rsp->mtu_size);
 
 	control_recv(control, recv_io);
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->recv_dma, control->recv_len,
-				   DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->recv_dma, control->recv_len,
+				      DMA_FROM_DEVICE);
 	return 0;
 failure:
 	viport_failure(control->parent);
 out:
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->recv_dma, control->recv_len,
-				   DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->recv_dma, control->recv_len,
+				      DMA_FROM_DEVICE);
 	return -1;
 }
 
@@ -934,9 +934,9 @@ int control_config_addrs_req(struct cont
 
 	CONTROL_FUNCTION("%s: control_config_addrs_req()\n",
 			 control_ifcfg_name(control));
-	dma_sync_single_for_cpu(control->parent->config->ibdev->dma_device,
-				control->send_dma, control->send_len,
-				DMA_TO_DEVICE);
+	ib_dma_sync_single_for_cpu(control->parent->config->ibdev,
+				   control->send_dma, control->send_len,
+				   DMA_TO_DEVICE);
 
 	send_io = control_init_hdr(control, CMD_CONFIG_ADDRESSES);
 	if (!send_io)
@@ -968,17 +968,17 @@ int control_config_addrs_req(struct cont
 	config_addrs_req->num_address_ops = j;
 
 	control->rsp_expected = pkt->hdr.pkt_cmd;
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->send_dma, control->send_len,
-				   DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->send_dma, control->send_len,
+				      DMA_TO_DEVICE);
 
 	if (control_send(control, send_io))
 		return -1;
 	return ret;
 failure:
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->send_dma, control->send_len,
-				   DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->send_dma, control->send_len,
+				      DMA_TO_DEVICE);
 	return -1;
 }
 
@@ -990,9 +990,9 @@ int control_config_addrs_rsp(struct cont
 
 	CONTROL_FUNCTION("%s: control_config_addrs_rsp()\n",
 			 control_ifcfg_name(control));
-	dma_sync_single_for_cpu(control->parent->config->ibdev->dma_device,
-				control->recv_dma, control->recv_len,
-				DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_cpu(control->parent->config->ibdev,
+				   control->recv_dma, control->recv_len,
+				   DMA_FROM_DEVICE);
 
 	recv_io = control_get_rsp(control);
 	if (!recv_io)
@@ -1011,16 +1011,16 @@ int control_config_addrs_rsp(struct cont
 	config_addrs_rsp = &pkt->cmd.config_addresses_rsp;
 
 	control_recv(control, recv_io);
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->recv_dma, control->recv_len,
-				   DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->recv_dma, control->recv_len,
+				      DMA_FROM_DEVICE);
 	return 0;
 failure:
 	viport_failure(control->parent);
 out:
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->recv_dma, control->recv_len,
-				   DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->recv_dma, control->recv_len,
+				      DMA_FROM_DEVICE);
 	return -1;
 }
 
@@ -1032,9 +1032,9 @@ int control_report_statistics_req(struct
 
 	CONTROL_FUNCTION("%s: control_report_statistics_req()\n",
 			 control_ifcfg_name(control));
-	dma_sync_single_for_cpu(control->parent->config->ibdev->dma_device,
-				control->send_dma, control->send_len,
-				DMA_TO_DEVICE);
+	ib_dma_sync_single_for_cpu(control->parent->config->ibdev,
+				   control->send_dma, control->send_len,
+				   DMA_TO_DEVICE);
 
 	send_io = control_init_hdr(control, CMD_REPORT_STATISTICS);
 	if (!send_io)
@@ -1046,14 +1046,14 @@ int control_report_statistics_req(struct
 	    control->lan_switch.lan_switch_num;
 
 	control->rsp_expected = pkt->hdr.pkt_cmd;
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->send_dma, control->send_len,
-				   DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->send_dma, control->send_len,
+				      DMA_TO_DEVICE);
 	return control_send(control, send_io);
 failure:
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->send_dma, control->send_len,
-				   DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->send_dma, control->send_len,
+				      DMA_TO_DEVICE);
 	return -1;
 }
 
@@ -1066,9 +1066,9 @@ int control_report_statistics_rsp(struct
 
 	CONTROL_FUNCTION("%s: control_report_statistics_rsp()\n",
 			 control_ifcfg_name(control));
-	dma_sync_single_for_cpu(control->parent->config->ibdev->dma_device,
-				control->recv_dma, control->recv_len,
-				DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_cpu(control->parent->config->ibdev,
+				   control->recv_dma, control->recv_len,
+				   DMA_FROM_DEVICE);
 
 	recv_io = control_get_rsp(control);
 	if (!recv_io)
@@ -1111,17 +1111,17 @@ int control_report_statistics_rsp(struct
 	stats->ethernet_status	      = rep_stat_rsp->ethernet_status;
 
 	control_recv(control, recv_io);
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->recv_dma, control->recv_len,
-				   DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->recv_dma, control->recv_len,
+				      DMA_FROM_DEVICE);
 
 	return 0;
 failure:
 	viport_failure(control->parent);
 out:
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->recv_dma, control->recv_len,
-				   DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->recv_dma, control->recv_len,
+				      DMA_FROM_DEVICE);
 	return -1;
 }
 
@@ -1132,9 +1132,9 @@ int control_reset_req(struct control * c
 
 	CONTROL_FUNCTION("%s: control_reset_req()\n",
 			 control_ifcfg_name(control));
-	dma_sync_single_for_cpu(control->parent->config->ibdev->dma_device,
-				control->send_dma, control->send_len,
-				DMA_TO_DEVICE);
+	ib_dma_sync_single_for_cpu(control->parent->config->ibdev,
+				   control->send_dma, control->send_len,
+				   DMA_TO_DEVICE);
 
 	send_io = control_init_hdr(control, CMD_RESET);
 	if (!send_io)
@@ -1143,14 +1143,14 @@ int control_reset_req(struct control * c
 	pkt = control_packet(send_io);
 
 	control->rsp_expected = pkt->hdr.pkt_cmd;
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->send_dma, control->send_len,
-				   DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->send_dma, control->send_len,
+				      DMA_TO_DEVICE);
 	return control_send(control, send_io);
 failure:
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->send_dma, control->send_len,
-				   DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->send_dma, control->send_len,
+				      DMA_TO_DEVICE);
 	return -1;
 }
 
@@ -1161,9 +1161,9 @@ int control_reset_rsp(struct control * c
 
 	CONTROL_FUNCTION("%s: control_reset_rsp()\n",
 			 control_ifcfg_name(control));
-	dma_sync_single_for_cpu(control->parent->config->ibdev->dma_device,
-				control->recv_dma, control->recv_len,
-				DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_cpu(control->parent->config->ibdev,
+				   control->recv_dma, control->recv_len,
+				   DMA_FROM_DEVICE);
 
 	recv_io = control_get_rsp(control);
 	if (!recv_io)
@@ -1181,16 +1181,16 @@ int control_reset_rsp(struct control * c
 	}
 
 	control_recv(control, recv_io);
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->recv_dma, control->recv_len,
-				   DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->recv_dma, control->recv_len,
+				      DMA_FROM_DEVICE);
 	return 0;
 failure:
 	viport_failure(control->parent);
 out:
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->recv_dma, control->recv_len,
-				   DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->recv_dma, control->recv_len,
+				      DMA_FROM_DEVICE);
 	return -1;
 }
 
@@ -1202,9 +1202,9 @@ int control_heartbeat_req(struct control
 
 	CONTROL_FUNCTION("%s: control_heartbeat_req()\n",
 			 control_ifcfg_name(control));
-	dma_sync_single_for_cpu(control->parent->config->ibdev->dma_device,
-				control->send_dma, control->send_len,
-				DMA_TO_DEVICE);
+	ib_dma_sync_single_for_cpu(control->parent->config->ibdev,
+				   control->send_dma, control->send_len,
+				   DMA_TO_DEVICE);
 
 	send_io = control_init_hdr(control, CMD_HEARTBEAT);
 	if (!send_io)
@@ -1215,14 +1215,14 @@ int control_heartbeat_req(struct control
 	heartbeat_req->hb_interval = cpu_to_be32(hb_interval);
 
 	control->rsp_expected = pkt->hdr.pkt_cmd;
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->send_dma, control->send_len,
-				   DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->send_dma, control->send_len,
+				      DMA_TO_DEVICE);
 	return control_send(control, send_io);
 failure:
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->send_dma, control->send_len,
-				   DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->send_dma, control->send_len,
+				      DMA_TO_DEVICE);
 	return -1;
 }
 
@@ -1234,9 +1234,9 @@ int control_heartbeat_rsp(struct control
 
 	CONTROL_FUNCTION("%s: control_heartbeat_rsp()\n",
 			 control_ifcfg_name(control));
-	dma_sync_single_for_cpu(control->parent->config->ibdev->dma_device,
-				control->recv_dma, control->recv_len,
-				DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_cpu(control->parent->config->ibdev,
+				   control->recv_dma, control->recv_len,
+				   DMA_FROM_DEVICE);
 
 	recv_io = control_get_rsp(control);
 	if (!recv_io)
@@ -1256,16 +1256,16 @@ int control_heartbeat_rsp(struct control
 	heartbeat_rsp = &pkt->cmd.heartbeat_rsp;
 
 	control_recv(control, recv_io);
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->recv_dma, control->recv_len,
-				   DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->recv_dma, control->recv_len,
+				      DMA_FROM_DEVICE);
 	return 0;
 failure:
 	viport_failure(control->parent);
 out:
-	dma_sync_single_for_device(control->parent->config->ibdev->dma_device,
-				   control->recv_dma, control->recv_len,
-				   DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_device(control->parent->config->ibdev,
+				      control->recv_dma, control->recv_len,
+				      DMA_FROM_DEVICE);
 	return -1;
 }
 
@@ -1281,11 +1281,11 @@ static int control_init_recv_ios(struct
 
 
 	control->recv_len = sizeof *pkt * config->num_recvs;
-	control->recv_dma = dma_map_single(ibdev->dma_device,
-					   pkt, control->recv_len,
-					   DMA_FROM_DEVICE);
+	control->recv_dma = ib_dma_map_single(ibdev,
+					      pkt, control->recv_len,
+					      DMA_FROM_DEVICE);
 
-	if (dma_mapping_error(control->recv_dma)) {
+	if (ib_dma_mapping_error(ibdev, control->recv_dma)) {
 		CONTROL_ERROR("control recv dma map error\n");
 		goto failure;
 	}
@@ -1314,9 +1314,9 @@ static int control_init_recv_ios(struct
 
 	return 0;
 unmap_recv:
-	dma_unmap_single(control->parent->config->ibdev->dma_device,
-			 control->recv_dma, control->send_len,
-			 DMA_FROM_DEVICE);
+	ib_dma_unmap_single(control->parent->config->ibdev,
+			    control->recv_dma, control->send_len,
+			    DMA_FROM_DEVICE);
 failure:
 	return -1;
 }
@@ -1330,10 +1330,10 @@ static int control_init_send_ios(struct
 
 	control->send_io.virtual_addr = (u8*)pkt;
 	control->send_len = sizeof *pkt;
-	control->send_dma = dma_map_single(ibdev->dma_device, pkt,
-					   control->send_len,
-					   DMA_TO_DEVICE);
-	if (dma_mapping_error(control->send_dma)) {
+	control->send_dma = ib_dma_map_single(ibdev, pkt,
+					      control->send_len,
+					      DMA_TO_DEVICE);
+	if (ib_dma_mapping_error(ibdev, control->send_dma)) {
 		CONTROL_ERROR("control send dma map error\n");
 		goto failure;
 	}
@@ -1434,9 +1434,9 @@ int control_init(struct control * contro
 	return 0;
 
 unmap_send:
-	dma_unmap_single(control->parent->config->ibdev->dma_device,
-			 control->send_dma, control->send_len,
-			 DMA_TO_DEVICE);
+	ib_dma_unmap_single(control->parent->config->ibdev,
+			    control->send_dma, control->send_len,
+			    DMA_TO_DEVICE);
 free_storage:
 	vfree(control->recv_ios);
 	kfree(control->local_storage);
@@ -1460,12 +1460,12 @@ void control_cleanup(struct control *con
 	ib_destroy_qp(control->ib_conn.qp);
 	ib_destroy_cq(control->ib_conn.cq);
 	ib_dereg_mr(control->mr);
-	dma_unmap_single(control->parent->config->ibdev->dma_device,
-			 control->send_dma, control->send_len,
-			 DMA_TO_DEVICE);
-	dma_unmap_single(control->parent->config->ibdev->dma_device,
-			 control->recv_dma, control->send_len,
-			 DMA_FROM_DEVICE);
+	ib_dma_unmap_single(control->parent->config->ibdev,
+			    control->send_dma, control->send_len,
+			    DMA_TO_DEVICE);
+	ib_dma_unmap_single(control->parent->config->ibdev,
+			    control->recv_dma, control->send_len,
+			    DMA_FROM_DEVICE);
 	vfree(control->recv_ios);
 	kfree(control->local_storage);
 
diff --git a/drivers/infiniband/ulp/vnic/vnic_data.c b/drivers/infiniband/ulp/vnic/vnic_data.c
index 33fa914..66fc15d 100644
--- a/drivers/infiniband/ulp/vnic/vnic_data.c
+++ b/drivers/infiniband/ulp/vnic/vnic_data.c
@@ -250,11 +250,11 @@ static int data_init_buf_pools(struct da
 	}
 
 	recv_pool->buf_pool_dma =
-	    dma_map_single(viport->config->ibdev->dma_device,
-			   recv_pool->buf_pool, recv_pool->buf_pool_len,
-			   DMA_TO_DEVICE);
+	    ib_dma_map_single(viport->config->ibdev,
+			      recv_pool->buf_pool, recv_pool->buf_pool_len,
+			      DMA_TO_DEVICE);
 
-	if (dma_mapping_error(recv_pool->buf_pool_dma)) {
+	if (ib_dma_mapping_error(viport->config->ibdev, recv_pool->buf_pool_dma)) {
 		DATA_ERROR("xmit buf_pool dma map error\n");
 		goto free_recv_pool;
 	}
@@ -271,11 +271,11 @@ static int data_init_buf_pools(struct da
 	}
 
 	xmit_pool->buf_pool_dma =
-	    dma_map_single(viport->config->ibdev->dma_device,
-			   xmit_pool->buf_pool, xmit_pool->buf_pool_len,
-			   DMA_FROM_DEVICE);
+	    ib_dma_map_single(viport->config->ibdev,
+			      xmit_pool->buf_pool, xmit_pool->buf_pool_len,
+			      DMA_FROM_DEVICE);
 
-	if (dma_mapping_error(xmit_pool->buf_pool_dma)) {
+	if (ib_dma_mapping_error(viport->config->ibdev, xmit_pool->buf_pool_dma)) {
 		DATA_ERROR("xmit buf_pool dma map error\n");
 		goto free_xmit_pool;
 	}
@@ -289,11 +289,11 @@ static int data_init_buf_pools(struct da
 	}
 
 	xmit_pool->xmitdata_dma =
-	    dma_map_single(viport->config->ibdev->dma_device,
-			   xmit_pool->xmit_data, xmit_pool->xmitdata_len,
-			   DMA_TO_DEVICE);
+	    ib_dma_map_single(viport->config->ibdev,
+			      xmit_pool->xmit_data, xmit_pool->xmitdata_len,
+			      DMA_TO_DEVICE);
 
-	if (dma_mapping_error(xmit_pool->xmitdata_dma)) {
+	if (ib_dma_mapping_error(viport->config->ibdev, xmit_pool->xmitdata_dma)) {
 		DATA_ERROR("xmit data dma map error\n");
 		goto free_xmit_data;
 	}
@@ -303,15 +303,15 @@ static int data_init_buf_pools(struct da
 free_xmit_data:
 	kfree(xmit_pool->xmit_data);
 unmap_xmit_pool:
-	dma_unmap_single(data->parent->config->ibdev->dma_device,
-			 xmit_pool->buf_pool_dma,
-			 xmit_pool->buf_pool_len, DMA_FROM_DEVICE);
+	ib_dma_unmap_single(data->parent->config->ibdev,
+			    xmit_pool->buf_pool_dma,
+			    xmit_pool->buf_pool_len, DMA_FROM_DEVICE);
 free_xmit_pool:
 	kfree(xmit_pool->buf_pool);
 unmap_recv_pool:
-	dma_unmap_single(data->parent->config->ibdev->dma_device,
-			 recv_pool->buf_pool_dma,
-			 recv_pool->buf_pool_len, DMA_TO_DEVICE);
+	ib_dma_unmap_single(data->parent->config->ibdev,
+			    recv_pool->buf_pool_dma,
+			    recv_pool->buf_pool_len, DMA_TO_DEVICE);
 free_recv_pool:
 	kfree(recv_pool->buf_pool);
 failure:
@@ -422,10 +422,10 @@ int data_connect(struct data * data)
 	}
 
 	data->region_data_dma =
-	    dma_map_single(viport->config->ibdev->dma_device,
-			   data->region_data, 4, DMA_BIDIRECTIONAL);
+	    ib_dma_map_single(viport->config->ibdev,
+			      data->region_data, 4, DMA_BIDIRECTIONAL);
 
-	if (dma_mapping_error(data->region_data_dma)) {
+	if (ib_dma_mapping_error(viport->config->ibdev, data->region_data_dma)) {
 		DATA_ERROR("region data dma map error\n");
 		goto free_region_data;
 	}
@@ -444,8 +444,8 @@ int data_connect(struct data * data)
 	return 0;
 
 unmap_region_data:
-	dma_unmap_single(data->parent->config->ibdev->dma_device,
-			 data->region_data_dma, 4, DMA_BIDIRECTIONAL);
+	ib_dma_unmap_single(data->parent->config->ibdev,
+			    data->region_data_dma, 4, DMA_BIDIRECTIONAL);
 free_region_data:
 		kfree(data->region_data);
 free_local_storage:
@@ -459,24 +459,29 @@ static void data_add_free_buffer(struct
 {
 	struct recv_pool *pool = &data->recv_pool;
 	struct buff_pool_entry *bpe;
+	dma_addr_t vaddr_dma;
 
 	DATA_FUNCTION("data_add_free_buffer()\n");
 	rdma_dest->trailer->connection_hash_and_valid = 0;
-	dma_sync_single_for_cpu(data->parent->config->ibdev->dma_device,
-				pool->buf_pool_dma, pool->buf_pool_len,
-				DMA_TO_DEVICE);
+	ib_dma_sync_single_for_cpu(data->parent->config->ibdev,
+				   pool->buf_pool_dma, pool->buf_pool_len,
+				   DMA_TO_DEVICE);
 
 	bpe = &pool->buf_pool[index];
 	bpe->rkey = cpu_to_be32(data->mr->rkey);
-
-	bpe->remote_addr = cpu_to_be64((unsigned long long)
-					virt_to_phys(rdma_dest->data));
+	vaddr_dma = ib_dma_map_single(data->parent->config->ibdev,
+				      rdma_dest->data, pool->buffer_sz, DMA_FROM_DEVICE);
+        if (ib_dma_mapping_error(data->parent->config->ibdev, vaddr_dma)) {
+		DATA_ERROR("rdma_dest->data dma map error\n");
+		goto failure;
+	}
+	bpe->remote_addr = cpu_to_be64(vaddr_dma);
 	bpe->valid = (u32) (rdma_dest - &pool->recv_bufs[0]) + 1;
 	++pool->num_free_bufs;
-
-	dma_sync_single_for_device(data->parent->config->ibdev->dma_device,
-				   pool->buf_pool_dma, pool->buf_pool_len,
-				   DMA_TO_DEVICE);
+failure:
+	ib_dma_sync_single_for_device(data->parent->config->ibdev,
+				      pool->buf_pool_dma, pool->buf_pool_len,
+				      DMA_TO_DEVICE);
 }
 
 /* NOTE: this routine is not reentrant */
@@ -637,30 +642,30 @@ void data_disconnect(struct data *data)
 	}
 	vfree(data->local_storage);
 	if (data->region_data) {
-		dma_unmap_single(data->parent->config->ibdev->dma_device,
-				 data->region_data_dma, 4,
-				 DMA_BIDIRECTIONAL);
+		ib_dma_unmap_single(data->parent->config->ibdev,
+				    data->region_data_dma, 4,
+				    DMA_BIDIRECTIONAL);
 		kfree(data->region_data);
 	}
 
 	if (recv_pool->buf_pool) {
-		dma_unmap_single(data->parent->config->ibdev->dma_device,
-				 recv_pool->buf_pool_dma,
-				 recv_pool->buf_pool_len, DMA_TO_DEVICE);
+		ib_dma_unmap_single(data->parent->config->ibdev,
+				    recv_pool->buf_pool_dma,
+				    recv_pool->buf_pool_len, DMA_TO_DEVICE);
 		kfree(recv_pool->buf_pool);
 	}
 
 	if (xmit_pool->buf_pool) {
-		dma_unmap_single(data->parent->config->ibdev->dma_device,
-				 xmit_pool->buf_pool_dma,
-				 xmit_pool->buf_pool_len, DMA_FROM_DEVICE);
+		ib_dma_unmap_single(data->parent->config->ibdev,
+				    xmit_pool->buf_pool_dma,
+				    xmit_pool->buf_pool_len, DMA_FROM_DEVICE);
 		kfree(xmit_pool->buf_pool);
 	}
 
 	if (xmit_pool->xmit_data) {
-		dma_unmap_single(data->parent->config->ibdev->dma_device,
-				 xmit_pool->xmitdata_dma,
-				 xmit_pool->xmitdata_len, DMA_TO_DEVICE);
+		ib_dma_unmap_single(data->parent->config->ibdev,
+				    xmit_pool->xmitdata_dma,
+				    xmit_pool->xmitdata_len, DMA_TO_DEVICE);
 		kfree(xmit_pool->xmit_data);
 	}
 }
@@ -689,9 +694,9 @@ static int data_alloc_xmit_buffer(struct
 	DATA_FUNCTION("data_alloc_xmit_buffer()\n");
 
 	spin_lock_irqsave(&data->xmit_buf_lock, flags);
-	dma_sync_single_for_cpu(data->parent->config->ibdev->dma_device,
-				pool->buf_pool_dma, pool->buf_pool_len,
-				DMA_TO_DEVICE);
+	ib_dma_sync_single_for_cpu(data->parent->config->ibdev,
+				   pool->buf_pool_dma, pool->buf_pool_len,
+				   DMA_TO_DEVICE);
 	*last = 0;
 	*pp_rdma_io = &pool->xmit_bufs[pool->next_xmit_buf];
 	*pp_bpe = &pool->buf_pool[pool->next_xmit_pool];
@@ -725,9 +730,9 @@ static int data_alloc_xmit_buffer(struct
 		ret = -1;
 	}
 
-	dma_sync_single_for_device(data->parent->config->ibdev->
-				   dma_device, pool->buf_pool_dma,
-				   pool->buf_pool_len, DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(data->parent->config->ibdev,
+				      pool->buf_pool_dma,
+				      pool->buf_pool_len, DMA_TO_DEVICE);
 	spin_unlock_irqrestore(&data->xmit_buf_lock, flags);
 	return ret;
 }
@@ -751,9 +756,9 @@ static void data_rdma_packet(struct data
 	len = ALIGN(rdma_io->len, VIPORT_TRAILER_ALIGNMENT);
 	fill_len = len - skb->len;
 
-	dma_sync_single_for_cpu(data->parent->config->ibdev->dma_device,
-				xmit_pool->xmitdata_dma,
-				xmit_pool->xmitdata_len, DMA_TO_DEVICE);
+	ib_dma_sync_single_for_cpu(data->parent->config->ibdev,
+				   xmit_pool->xmitdata_dma,
+				   xmit_pool->xmitdata_len, DMA_TO_DEVICE);
 
 	d = (u8 *) rdma_io->trailer - fill_len;
 	trailer_data_dma = rdma_io->trailer_dma - fill_len;
@@ -769,11 +774,11 @@ static void data_rdma_packet(struct data
 	} else {
 		swr->sg_list[0].lkey = data->mr->lkey;
 
-		skb_data_dma = dma_map_single(viport->config->ibdev->dma_device,
-					      skb->data, skb->len,
-					      DMA_TO_DEVICE);
+		skb_data_dma = ib_dma_map_single(viport->config->ibdev,
+					         skb->data, skb->len,
+					         DMA_TO_DEVICE);
 
-		if (dma_mapping_error(skb_data_dma)) {
+		if (ib_dma_mapping_error(viport->config->ibdev, skb_data_dma)) {
 			DATA_ERROR("skb data dma map error\n");
 			goto failure;
 		}
@@ -783,9 +788,9 @@ static void data_rdma_packet(struct data
 		swr->sg_list[0].addr = skb_data_dma;
 		skb_orphan(skb);
 	}
-	dma_sync_single_for_cpu(data->parent->config->ibdev->dma_device,
-				xmit_pool->buf_pool_dma,
-				xmit_pool->buf_pool_len, DMA_TO_DEVICE);
+	ib_dma_sync_single_for_cpu(data->parent->config->ibdev,
+				   xmit_pool->buf_pool_dma,
+				   xmit_pool->buf_pool_len, DMA_TO_DEVICE);
 
 	swr->sg_list[1].addr = trailer_data_dma;
 	swr->sg_list[1].length = fill_len + sizeof(struct viport_trailer);
@@ -795,9 +800,9 @@ static void data_rdma_packet(struct data
 	swr->wr.rdma.remote_addr -= (sizeof(struct viport_trailer) + len);
 	swr->wr.rdma.rkey = be32_to_cpu(bpe->rkey);
 
-	dma_sync_single_for_device(data->parent->config->ibdev->dma_device,
-				   xmit_pool->buf_pool_dma,
-				   xmit_pool->buf_pool_len, DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(data->parent->config->ibdev,
+				      xmit_pool->buf_pool_dma,
+				      xmit_pool->buf_pool_len, DMA_TO_DEVICE);
 
 	data->xmit_pool.notify_count++;
 	if (data->xmit_pool.notify_count >= data->xmit_pool.notify_bundle) {
@@ -806,9 +811,9 @@ static void data_rdma_packet(struct data
 	} else {
 		swr->send_flags = 0;
 	}
-	dma_sync_single_for_device(data->parent->config->ibdev->dma_device,
-				   xmit_pool->xmitdata_dma,
-				   xmit_pool->xmitdata_len, DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(data->parent->config->ibdev,
+				      xmit_pool->xmitdata_dma,
+				      xmit_pool->xmitdata_len, DMA_TO_DEVICE);
 	if (vnic_ib_post_send(&data->ib_conn, &rdma_io->io)) {
 		DATA_ERROR("failed to post send for data RDMA write\n");
 		viport_failure(data->parent);
@@ -817,9 +822,9 @@ static void data_rdma_packet(struct data
 
 	data_xmits_stats(data);
 failure:
-	dma_sync_single_for_device(data->parent->config->ibdev->dma_device,
-				   xmit_pool->xmitdata_dma,
-				   xmit_pool->xmitdata_len, DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(data->parent->config->ibdev,
+				      xmit_pool->xmitdata_dma,
+				      xmit_pool->xmitdata_len, DMA_TO_DEVICE);
 }
 
 static void data_kick_timeout_handler(unsigned long arg)
@@ -851,9 +856,9 @@ int data_xmit_packet(struct data *data,
 		return -1;
 	}
 
-	dma_sync_single_for_cpu(data->parent->config->ibdev->dma_device,
-				pool->xmitdata_dma, pool->xmitdata_len,
-				DMA_TO_DEVICE);
+	ib_dma_sync_single_for_cpu(data->parent->config->ibdev,
+				   pool->xmitdata_dma, pool->xmitdata_len,
+				   DMA_TO_DEVICE);
 	trailer = rdma_io->trailer;
 
 	memset(trailer, 0, sizeof *trailer);
@@ -893,10 +898,9 @@ int data_xmit_packet(struct data *data,
 		    | TX_CHKSUM_FLAGS_UDP_CHECKSUM;
 	}
 
-	dma_sync_single_for_device(data->parent->config->ibdev->dma_device,
-				   pool->xmitdata_dma, pool->xmitdata_len,
-				   DMA_TO_DEVICE);
-
+	ib_dma_sync_single_for_device(data->parent->config->ibdev,
+				      pool->xmitdata_dma, pool->xmitdata_len,
+				      DMA_TO_DEVICE);
 	data_rdma_packet(data, bpe, rdma_io);
 
 	if (pool->send_kicks) {
@@ -929,9 +933,9 @@ void data_check_xmit_buffers(struct data
 
 	DATA_FUNCTION("data_check_xmit_buffers()\n");
 	spin_lock_irqsave(&data->xmit_buf_lock, flags);
-	dma_sync_single_for_cpu(data->parent->config->ibdev->dma_device,
-				pool->buf_pool_dma, pool->buf_pool_len,
-				DMA_TO_DEVICE);
+	ib_dma_sync_single_for_cpu(data->parent->config->ibdev,
+				   pool->buf_pool_dma, pool->buf_pool_len,
+				   DMA_TO_DEVICE);
 
 	if (data->xmit_pool.need_buffers
 	    && pool->buf_pool[pool->next_xmit_pool].valid
@@ -941,9 +945,9 @@ void data_check_xmit_buffers(struct data
 				  data->parent->parent);
 		DATA_INFO("there are free xmit buffers\n");
 	}
-	dma_sync_single_for_device(data->parent->config->ibdev->dma_device,
-				   pool->buf_pool_dma, pool->buf_pool_len,
-				   DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(data->parent->config->ibdev,
+				      pool->buf_pool_dma, pool->buf_pool_len,
+				      DMA_TO_DEVICE);
 
 	spin_unlock_irqrestore(&data->xmit_buf_lock, flags);
 }
@@ -1025,11 +1029,13 @@ static int data_incoming_recv(struct dat
 	struct viport_trailer *trailer;
 	struct buff_pool_entry *bpe;
 	struct sk_buff *skb;
+	dma_addr_t vaddr_dma;
 
 	DATA_FUNCTION("data_incoming_recv()\n");
 	if (pool->next_full_buf == pool->next_free_buf)
 		return -1;
 	bpe = &pool->buf_pool[pool->next_full_buf];
+	vaddr_dma = be64_to_cpu(bpe->remote_addr);
 	rdma_dest = &pool->recv_bufs[bpe->valid - 1];
 	trailer = rdma_dest->trailer;
 
@@ -1049,14 +1055,16 @@ static int data_incoming_recv(struct dat
 		list_add(&rdma_dest->list_ptrs, &pool->avail_recv_bufs);
 	}
 
-	dma_sync_single_for_cpu(data->parent->config->ibdev->dma_device,
-				pool->buf_pool_dma, pool->buf_pool_len,
-				DMA_TO_DEVICE);
+	ib_dma_unmap_single(data->parent->config->ibdev,
+			    vaddr_dma, pool->buffer_sz,
+			    DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_cpu(data->parent->config->ibdev,
+				   pool->buf_pool_dma, pool->buf_pool_len,
+				   DMA_TO_DEVICE);
 
 	bpe->valid = 0;
-	dma_sync_single_for_device(data->parent->config->ibdev->
-				   dma_device, pool->buf_pool_dma,
-				   pool->buf_pool_len, DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(data->parent->config->ibdev, pool->buf_pool_dma,
+				      pool->buf_pool_len, DMA_TO_DEVICE);
 
 	INC(pool->next_full_buf, 1, pool->eioc_pool_sz);
 	pool->num_posted_bufs--;
@@ -1096,9 +1104,9 @@ static void data_xmit_complete(struct io
 	DATA_FUNCTION("data_xmit_complete()\n");
 
 	if (rdma_io->skb)
-		dma_unmap_single(data->parent->config->ibdev->dma_device,
-				 rdma_io->skb_data_dma, rdma_io->skb->len,
-				 DMA_TO_DEVICE);
+		ib_dma_unmap_single(data->parent->config->ibdev,
+				    rdma_io->skb_data_dma, rdma_io->skb->len,
+				    DMA_TO_DEVICE);
 
 	while (pool->last_comp_buf != rdma_io->index) {
 		INC(pool->last_comp_buf, 1, pool->num_xmit_bufs);



More information about the ewg mailing list