[ofa-general] [PATCH 1/3] IB/srp: respect target credit limit
David Dillow
dillowda at ornl.gov
Wed Dec 19 14:08:43 PST 2007
The SRP initiator will currently send requests, even if we have no
credits available. The results of sending extra requests is vendor
specific, but on some devices, overrunning our credits will cost us 85%
of peak performance -- e.g. 100 MB/s vs 720 MB/s. Other devices may just
drop them.
This patch will tell the SCSI mid-layer to queue requests if there are
less than two credits remaining, and will not issue a task management
request if there are no credits remaining. The mid-layer will retry the
queued command once an outstanding command completes.
This removes the unlikely() in __srp_get_tx_iu(), as it is not at all
unlikely to hit this limit under heavy load.
Signed-off-by: David Dillow <dillowda at ornl.gov>
---
ib_srp.c | 16 +++++++++-------
ib_srp.h | 5 +++++
2 files changed, 14 insertions(+), 7 deletions(-)
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 950228f..17ad144 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -930,13 +930,18 @@ static int srp_post_recv(struct srp_target_port *target)
* req_lim and tx_head. Lock cannot be dropped between call here and
* call to __srp_post_send().
*/
-static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target)
+static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
+ enum srp_request_type req_type)
{
+ s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2;
+
if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
return NULL;
- if (unlikely(target->req_lim < 1))
+ if (target->req_lim < min) {
++target->zero_req_lim;
+ return NULL;
+ }
return target->tx_ring[target->tx_head & SRP_SQ_SIZE];
}
@@ -993,7 +998,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
return 0;
}
- iu = __srp_get_tx_iu(target);
+ iu = __srp_get_tx_iu(target, SRP_REQ_NORMAL);
if (!iu)
goto err;
@@ -1180,9 +1185,6 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len);
target->req_lim = be32_to_cpu(rsp->req_lim_delta);
-
- target->scsi_host->can_queue = min(target->req_lim,
- target->scsi_host->can_queue);
} else {
printk(KERN_WARNING PFX "Unhandled RSP opcode %#x\n", opcode);
target->status = -ECONNRESET;
@@ -1283,7 +1285,7 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
init_completion(&req->done);
- iu = __srp_get_tx_iu(target);
+ iu = __srp_get_tx_iu(target, SRP_REQ_TASK_MGMT);
if (!iu)
goto out;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index e3573e7..4a3c1f3 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -79,6 +79,11 @@ enum srp_target_state {
SRP_TARGET_REMOVED
};
+enum srp_request_type {
+ SRP_REQ_NORMAL,
+ SRP_REQ_TASK_MGMT,
+};
+
struct srp_device {
struct list_head dev_list;
struct ib_device *dev;
More information about the general
mailing list