[ewg] [PATCH 6/6] nes: Cosmetic changes; support virtual WQs and PPC
Glenn Grundstrom NetEffect
glenn at lists.openfabrics.org
Wed Nov 14 14:40:19 PST 2007
Updated code for the NetEffect NE020 adapter.
Updates include:
- Support for userspace/virtual WQs.
- PowerPC
- Support for multiple debugging levels
- Many, many cosmetic changes inline with kernel.org standards
Diffs for nes_verbs.c and nes_verbs.h
Signed-off-by: Glenn Grundstrom <ggrundstrom at neteffect.com>
---
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 7a0aee7..311127e 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -34,15 +34,16 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/random.h>
+#include <linux/highmem.h>
#include <asm/byteorder.h>
#include <rdma/ib_verbs.h>
#include <rdma/iw_cm.h>
#include <rdma/ib_user_verbs.h>
+
#include "nes.h"
-#ifndef OFED_1_2
+
#include <rdma/ib_umem.h>
-#endif
atomic_t mod_qp_timouts;
atomic_t qps_created;
@@ -83,7 +84,7 @@ static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) {
return ERR_PTR(ret);
}
- nesmr = kmalloc(sizeof(*nesmr), GFP_KERNEL);
+ nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
if (!nesmr) {
nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
return ERR_PTR(-ENOMEM);
@@ -97,12 +98,13 @@ static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) {
stag, stag_index);
/* Register the region with the adapter */
- cqp_request = nes_get_cqp_request(nesdev, NES_CQP_REQUEST_NOT_HOLDING_LOCK);
- if (NULL == cqp_request) {
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
kfree(nesmr);
nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
return ERR_PTR(-ENOMEM);
}
+
cqp_request->waiting = 1;
cqp_wqe = &cqp_request->cqp_wqe;
@@ -120,7 +122,7 @@ static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) {
cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_LOW_IDX] = 0;
cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] =
- cpu_to_le32(nespd->pd_id&0x00007fff);
+ cpu_to_le32(nespd->pd_id & 0x00007fff);
cqp_wqe->wqe_words[NES_CQP_STAG_WQE_STAG_IDX] = cpu_to_le32(stag);
cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PA_LOW_IDX] = 0;
@@ -129,11 +131,10 @@ static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) {
cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_LEN_IDX] = 0;
atomic_set(&cqp_request->refcount, 2);
- nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_NOT_HOLDING_LOCK,
- NES_CQP_REQUEST_RING_DOORBELL);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
/* Wait for CQP */
- ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
+ ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
NES_EVENT_TIMEOUT);
nes_debug(NES_DBG_MR, "Register STag 0x%08X completed, wait_event_timeout ret = %u,"
" CQP Major:Minor codes = 0x%04X:0x%04X.\n",
@@ -141,10 +142,8 @@ static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) {
if ((!ret) || (cqp_request->major_code)) {
if (atomic_dec_and_test(&cqp_request->refcount)) {
if (cqp_request->dynamic) {
- atomic_inc(&cqp_reqs_dynfreed);
kfree(cqp_request);
} else {
- atomic_inc(&cqp_reqs_freed);
spin_lock_irqsave(&nesdev->cqp.lock, flags);
list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
@@ -160,10 +159,8 @@ static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) {
} else {
if (atomic_dec_and_test(&cqp_request->refcount)) {
if (cqp_request->dynamic) {
- atomic_inc(&cqp_reqs_dynfreed);
kfree(cqp_request);
} else {
- atomic_inc(&cqp_reqs_freed);
spin_lock_irqsave(&nesdev->cqp.lock, flags);
list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
@@ -197,8 +194,8 @@ static int nes_dealloc_mw(struct ib_mw *ibmw)
int ret;
/* Deallocate the window with the adapter */
- cqp_request = nes_get_cqp_request(nesdev, NES_CQP_REQUEST_NOT_HOLDING_LOCK);
- if (NULL == cqp_request) {
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
return -ENOMEM;
}
@@ -217,8 +214,7 @@ static int nes_dealloc_mw(struct ib_mw *ibmw)
cqp_wqe->wqe_words[NES_CQP_STAG_WQE_STAG_IDX] = cpu_to_le32(ibmw->rkey);
atomic_set(&cqp_request->refcount, 2);
- nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_NOT_HOLDING_LOCK,
- NES_CQP_REQUEST_RING_DOORBELL);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
/* Wait for CQP */
nes_debug(NES_DBG_MR, "Waiting for deallocate STag 0x%08X to complete.\n",
@@ -231,10 +227,8 @@ static int nes_dealloc_mw(struct ib_mw *ibmw)
if ((!ret) || (cqp_request->major_code)) {
if (atomic_dec_and_test(&cqp_request->refcount)) {
if (cqp_request->dynamic) {
- atomic_inc(&cqp_reqs_dynfreed);
kfree(cqp_request);
} else {
- atomic_inc(&cqp_reqs_freed);
spin_lock_irqsave(&nesdev->cqp.lock, flags);
list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
@@ -248,10 +242,8 @@ static int nes_dealloc_mw(struct ib_mw *ibmw)
} else {
if (atomic_dec_and_test(&cqp_request->refcount)) {
if (cqp_request->dynamic) {
- atomic_inc(&cqp_reqs_dynfreed);
kfree(cqp_request);
} else {
- atomic_inc(&cqp_reqs_freed);
spin_lock_irqsave(&nesdev->cqp.lock, flags);
list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
@@ -260,7 +252,7 @@ static int nes_dealloc_mw(struct ib_mw *ibmw)
}
nes_free_resource(nesadapter, nesadapter->allocated_mrs,
- (ibmw->rkey&0x0fffff00) >> 8);
+ (ibmw->rkey & 0x0fffff00) >> 8);
kfree(nesmr);
return err;
@@ -294,6 +286,7 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
/* Check for SQ overflow */
if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
+ spin_unlock_irqrestore(&nesqp->lock, flags);
return -EINVAL;
}
@@ -301,8 +294,10 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
/* nes_debug(NES_DBG_MR, "processing sq wqe at %p, head = %u.\n", wqe, head); */
u64temp = (u64)ibmw_bind->wr_id;
wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX] = cpu_to_le32((u32)u64temp);
- wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX] = cpu_to_le32((u32)((u64temp)>>32));
- wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_CTX_HIGH_IDX] = cpu_to_le32((u32)(((u64)nesqp)>>32));
+ wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX] =
+ cpu_to_le32((u32)((u64temp)>>32));
+ wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_CTX_HIGH_IDX] =
+ cpu_to_le32((u32)(((u64)nesqp)>>32));
wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX] = (u32)((u64)nesqp);
wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX] |= head;
wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX] =
@@ -329,7 +324,8 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
wqe->wqe_words[NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX] = 0;
u64temp = (u64)ibmw_bind->addr;
wqe->wqe_words[NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX] = cpu_to_le32((u32)u64temp);
- wqe->wqe_words[NES_IWARP_SQ_BIND_WQE_VA_FBO_HIGH_IDX] = cpu_to_le32((u32)(u64temp>>32));
+ wqe->wqe_words[NES_IWARP_SQ_BIND_WQE_VA_FBO_HIGH_IDX] =
+ cpu_to_le32((u32)(u64temp >> 32));
head++;
if (head >= qsize)
@@ -338,7 +334,7 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
nesqp->hwqp.sq_head = head;
barrier();
- nes_write32(nesdev->regs + NES_WQE_ALLOC,
+ nes_write32(nesdev->regs+NES_WQE_ALLOC,
(1 << 24) | 0x00800000 | nesqp->hwqp.qp_id);
spin_unlock_irqrestore(&nesqp->lock, flags);
@@ -385,7 +381,7 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
goto failed_resource_alloc;
}
- nesfmr = kmalloc(sizeof(*nesfmr), GFP_KERNEL);
+ nesfmr = kzalloc(sizeof(*nesfmr), GFP_KERNEL);
if (!nesfmr) {
ret = -ENOMEM;
goto failed_fmr_alloc;
@@ -408,17 +404,17 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
/* use two level 4K PBLs */
/* add support for two level 256B PBLs */
nesfmr->nesmr.pbl_4k = 1;
- nesfmr->nesmr.pbls_used = 1 + (ibfmr_attr->max_pages>>9) +
- ((ibfmr_attr->max_pages&511)?1:0);
+ nesfmr->nesmr.pbls_used = 1 + (ibfmr_attr->max_pages >> 9) +
+ ((ibfmr_attr->max_pages & 511) ? 1 : 0);
}
/* Register the region with the adapter */
- spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
/* track PBL resources */
if (nesfmr->nesmr.pbls_used != 0) {
if (nesfmr->nesmr.pbl_4k) {
if (nesfmr->nesmr.pbls_used > nesadapter->free_4kpbl) {
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
ret = -ENOMEM;
goto failed_vpbl_alloc;
} else {
@@ -426,7 +422,7 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
}
} else {
if (nesfmr->nesmr.pbls_used > nesadapter->free_256pbl) {
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
ret = -ENOMEM;
goto failed_vpbl_alloc;
} else {
@@ -444,7 +440,7 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
nesfmr->root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
&nesfmr->root_vpbl.pbl_pbase);
if (!nesfmr->root_vpbl.pbl_vbase) {
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
ret = -ENOMEM;
goto failed_vpbl_alloc;
}
@@ -457,14 +453,14 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
nesfmr->root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 8192,
&nesfmr->root_vpbl.pbl_pbase);
if (!nesfmr->root_vpbl.pbl_vbase) {
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
ret = -ENOMEM;
goto failed_vpbl_alloc;
}
- nesfmr->root_vpbl.leaf_vpbl = kmalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_KERNEL);
+ nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_KERNEL);
if (!nesfmr->root_vpbl.leaf_vpbl) {
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
ret = -ENOMEM;
goto failed_leaf_vpbl_alloc;
}
@@ -503,9 +499,9 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
stag |= driver_key;
stag += (u32)stag_key;
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
- cqp_request = nes_get_cqp_request(nesdev, NES_CQP_REQUEST_NOT_HOLDING_LOCK);
- if (NULL == cqp_request) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
ret = -ENOMEM;
goto failed_leaf_vpbl_pages_alloc;
@@ -517,9 +513,7 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
stag, stag_index);
cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(
- NES_CQP_ALLOCATE_STAG |
- NES_CQP_STAG_VA_TO |
- NES_CQP_STAG_MR);
+ NES_CQP_ALLOCATE_STAG | NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR);
if (nesfmr->nesmr.pbl_4k == 1)
cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_STAG_PBL_BLK_SIZE);
@@ -535,7 +529,7 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
if (ibmr_access_flags & IB_ACCESS_REMOTE_READ) {
cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |=
- cpu_to_le32( NES_CQP_STAG_RIGHTS_REMOTE_READ |
+ cpu_to_le32(NES_CQP_STAG_RIGHTS_REMOTE_READ |
NES_CQP_STAG_RIGHTS_LOCAL_READ | NES_CQP_STAG_REM_ACC_EN);
nesfmr->access_rights |=
NES_CQP_STAG_RIGHTS_REMOTE_READ | NES_CQP_STAG_RIGHTS_LOCAL_READ |
@@ -545,11 +539,11 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_LOW_IDX] =
cpu_to_le32((u32)((u64)(&nesdev->cqp)));
cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_HIGH_IDX] =
- cpu_to_le32((u32)(((u64)(&nesdev->cqp))>>32));
- cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX] = 0;
- cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX] = 0;
+ cpu_to_le32((u32)(((u64)(&nesdev->cqp)) >> 32));
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX] = 0;
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX] = 0;
- cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_LOW_IDX] = 0;
+ cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_LOW_IDX] = 0;
cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] =
cpu_to_le32(nespd->pd_id & 0x00007fff);
cqp_wqe->wqe_words[NES_CQP_STAG_WQE_STAG_IDX] = cpu_to_le32(stag);
@@ -562,11 +556,10 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_LEN_IDX] = 0;
atomic_set(&cqp_request->refcount, 2);
- nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_NOT_HOLDING_LOCK,
- NES_CQP_REQUEST_RING_DOORBELL);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
/* Wait for CQP */
- ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
+ ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
NES_EVENT_TIMEOUT);
nes_debug(NES_DBG_MR, "Register STag 0x%08X completed, wait_event_timeout ret = %u,"
" CQP Major:Minor codes = 0x%04X:0x%04X.\n",
@@ -575,10 +568,8 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
if ((!ret) || (cqp_request->major_code)) {
if (atomic_dec_and_test(&cqp_request->refcount)) {
if (cqp_request->dynamic) {
- atomic_inc(&cqp_reqs_dynfreed);
kfree(cqp_request);
} else {
- atomic_inc(&cqp_reqs_freed);
spin_lock_irqsave(&nesdev->cqp.lock, flags);
list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
@@ -589,10 +580,8 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
} else {
if (atomic_dec_and_test(&cqp_request->refcount)) {
if (cqp_request->dynamic) {
- atomic_inc(&cqp_reqs_dynfreed);
kfree(cqp_request);
} else {
- atomic_inc(&cqp_reqs_freed);
spin_lock_irqsave(&nesdev->cqp.lock, flags);
list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
@@ -605,6 +594,7 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
nesfmr->attr = *ibfmr_attr;
return &nesfmr->nesmr.ibfmr;
+
failed_leaf_vpbl_pages_alloc:
/* unroll all allocated pages */
for (i=0; i<nesfmr->leaf_pbl_cnt; i++) {
@@ -614,7 +604,8 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
}
}
if (nesfmr->root_vpbl.leaf_vpbl)
- kfree( nesfmr->root_vpbl.leaf_vpbl );
+ kfree(nesfmr->root_vpbl.leaf_vpbl);
+
failed_leaf_vpbl_alloc:
if (nesfmr->leaf_pbl_cnt == 0) {
if (nesfmr->root_vpbl.pbl_vbase)
@@ -623,10 +614,13 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
} else
pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase,
nesfmr->root_vpbl.pbl_pbase);
+
failed_vpbl_alloc:
kfree(nesfmr);
+
failed_fmr_alloc:
nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+
failed_resource_alloc:
return ERR_PTR(ret);
}
@@ -656,7 +650,7 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.pbl_vbase,
nesfmr->root_vpbl.pbl_pbase);
} else {
- for (i=0; i<nesfmr->leaf_pbl_cnt; i++) {
+ for (i = 0; i < nesfmr->leaf_pbl_cnt; i++) {
pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase,
nesfmr->root_vpbl.leaf_vpbl[i].pbl_pbase);
}
@@ -781,7 +775,7 @@ static int nes_modify_port(struct ib_device *ibdev, u8 port,
/**
* nes_query_pkey
*/
-static int nes_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey)
+static int nes_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
{
*pkey = 0;
return 0;
@@ -812,34 +806,51 @@ static struct ib_ucontext *nes_alloc_ucontext(struct ib_device *ibdev,
{
struct nes_vnic *nesvnic = to_nesvnic(ibdev);
struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_alloc_ucontext_req req;
struct nes_alloc_ucontext_resp uresp;
struct nes_ucontext *nes_ucontext;
struct nes_ib_device *nesibdev = nesvnic->nesibdev;
+
+ if (ib_copy_from_udata(&req, udata, sizeof(struct nes_alloc_ucontext_req))) {
+ printk(KERN_ERR PFX "Invalid structure size on allocate user context.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (req.userspace_ver != NES_ABI_USERSPACE_VER) {
+ printk(KERN_ERR PFX "Invalid userspace driver version detected. Detected version %d, should be %d\n",
+ req.userspace_ver, NES_ABI_USERSPACE_VER);
+ return ERR_PTR(-EINVAL);
+ }
+
+
memset(&uresp, 0, sizeof uresp);
uresp.max_qps = nesibdev->max_qp;
uresp.max_pds = nesibdev->max_pd;
- uresp.wq_size = nesdev->nesadapter->max_qp_wr*2;
+ uresp.wq_size = nesdev->nesadapter->max_qp_wr * 2;
+ uresp.virtwq = nesadapter->virtwq;
+ uresp.kernel_ver = NES_ABI_KERNEL_VER;
- nes_ucontext = kmalloc(sizeof *nes_ucontext, GFP_KERNEL);
+ nes_ucontext = kzalloc(sizeof *nes_ucontext, GFP_KERNEL);
if (!nes_ucontext)
return ERR_PTR(-ENOMEM);
- memset(nes_ucontext, 0, sizeof(struct nes_ucontext));
-
nes_ucontext->nesdev = nesdev;
nes_ucontext->mmap_wq_offset = ((uresp.max_pds * 4096) + PAGE_SIZE-1) / PAGE_SIZE;
nes_ucontext->mmap_cq_offset = nes_ucontext->mmap_wq_offset +
((sizeof(struct nes_hw_qp_wqe) * uresp.max_qps * 2) + PAGE_SIZE-1) /
PAGE_SIZE;
+
if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
kfree(nes_ucontext);
return ERR_PTR(-EFAULT);
}
INIT_LIST_HEAD(&nes_ucontext->cq_reg_mem_list);
+ INIT_LIST_HEAD(&nes_ucontext->qp_reg_mem_list);
return &nes_ucontext->ibucontext;
}
@@ -882,7 +893,7 @@ static int nes_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
return -EFAULT;
}
nesqp = nes_ucontext->mmap_nesqp[index];
- if (NULL == nesqp) {
+ if (nesqp == NULL) {
nes_debug(NES_DBG_MMAP, "wq %lu has a NULL QP base.\n", index);
return -EFAULT;
}
@@ -903,7 +914,7 @@ static int nes_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start,
(nesdev->doorbell_start +
- ((nes_ucontext->mmap_db_index[index]-nesdev->base_doorbell_index) * 4096))
+ ((nes_ucontext->mmap_db_index[index] - nesdev->base_doorbell_index) * 4096))
>> PAGE_SHIFT, PAGE_SIZE, vma->vm_page_prot))
return -EAGAIN;
vma->vm_private_data = nes_ucontext;
@@ -929,7 +940,8 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
u32 pd_num = 0;
int err;
- nes_debug(NES_DBG_PD, "netdev refcnt=%u\n",
+ nes_debug(NES_DBG_PD, "nesvnic=%p, netdev=%p %s, ibdev=%p, context=%p, netdev refcnt=%u\n",
+ nesvnic, nesdev->netdev[0], nesdev->netdev[0]->name, ibdev, context,
atomic_read(&nesvnic->netdev->refcnt));
err = nes_alloc_resource(nesadapter, nesadapter->allocated_pds,
@@ -938,12 +950,12 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
return ERR_PTR(err);
}
- nespd = kmalloc(sizeof (struct nes_pd), GFP_KERNEL);
+ nespd = kzalloc(sizeof (struct nes_pd), GFP_KERNEL);
if (!nespd) {
nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num);
return ERR_PTR(-ENOMEM);
}
- memset(nespd, 0, sizeof(struct nes_pd));
+
nes_debug(NES_DBG_PD, "Allocating PD (%p) for ib device %s\n",
nespd, nesvnic->nesibdev->ibdev.name);
@@ -1031,6 +1043,221 @@ static int nes_destroy_ah(struct ib_ah *ah)
/**
+ * nes_get_encoded_size
+ */
+static inline u8 nes_get_encoded_size(u32 *size)
+{
+ u8 encoded_size = 0;
+ if (*size <= 32) {
+ *size = 32;
+ encoded_size = 1;
+ } else if (*size <= 128) {
+ *size = 128;
+ encoded_size = 2;
+ } else if (*size <= 512) {
+ *size = 512;
+ encoded_size = 3;
+ }
+ return (encoded_size);
+}
+
+
+
+/**
+ * nes_setup_virt_qp
+ */
+static int nes_setup_virt_qp(struct nes_qp *nesqp, struct nes_pbl *nespbl,
+ struct nes_vnic *nesvnic, int sq_size, int rq_size)
+{
+ unsigned long flags;
+ void *mem;
+ u64 *pbl = NULL;
+ u64 *tpbl;
+ u64 *pblbuffer;
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 pbl_entries;
+ u8 rq_pbl_entries;
+ u8 sq_pbl_entries;
+
+ pbl_entries = nespbl->pbl_size >> 3;
+ nes_debug(NES_DBG_QP, "Userspace PBL, pbl_size=%u, pbl_entries = %d pbl_vbase=%p, pbl_pbase=%p\n",
+ nespbl->pbl_size, pbl_entries,
+ (void *)nespbl->pbl_vbase,
+ (void *)nespbl->pbl_pbase);
+ pbl = nespbl->pbl_vbase; /* points to first pbl entry */
+ /* now lets set the sq_vbase as well as rq_vbase addrs we will assign */
+ /* the first pbl to be fro the rq_vbase... */
+ rq_pbl_entries = (rq_size * sizeof(struct nes_hw_qp_wqe)) >> PAGE_SHIFT;
+ sq_pbl_entries = (sq_size * sizeof(struct nes_hw_qp_wqe)) >> PAGE_SHIFT;
+ nesqp->hwqp.sq_pbase = (le32_to_cpu (((u32 *)pbl)[0]) ) | ((u64)((le32_to_cpu (((u32 *)pbl)[1]))) << 32);
+ if (!nespbl->page) {
+ nes_debug(NES_DBG_QP, "QP nespbl->page is NULL \n");
+ kfree(nespbl);
+ return -ENOMEM;
+ }
+
+ nesqp->hwqp.sq_vbase = kmap(nespbl->page);
+ nesqp->page = nespbl->page;
+
+ nesqp->hwqp.sq_vbase = ioremap(nesqp->hwqp.sq_pbase, PAGE_SIZE);
+ if (!nesqp->hwqp.sq_vbase) {
+ nes_debug(NES_DBG_QP, "QP sq_vbase kmap failed\n");
+ kfree(nespbl);
+ return -ENOMEM;
+ }
+
+ /* Now to get to sq.. we need to calculate how many */
+ /* PBL entries were used by the rq.. */
+ pbl += sq_pbl_entries;
+ nesqp->hwqp.rq_pbase = (le32_to_cpu (((u32 *)pbl)[0]) ) | ((u64)((le32_to_cpu (((u32 *)pbl)[1]))) << 32);
+ /* nesqp->hwqp.rq_vbase = bus_to_virt(*pbl); */
+ /*nesqp->hwqp.rq_vbase = phys_to_virt(*pbl); */
+
+ nes_debug(NES_DBG_QP, "QP sq_vbase= %p sq_pbase=%p rq_vbase=%p rq_pbase=%p\n",
+ nesqp->hwqp.sq_vbase, (void *)nesqp->hwqp.sq_pbase,
+ nesqp->hwqp.rq_vbase, (void *)nesqp->hwqp.rq_pbase);
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ if (!nesadapter->free_256pbl) {
+ pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
+ nespbl->pbl_pbase);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ kunmap(nesqp->page);
+ kfree(nespbl);
+ return -ENOMEM;
+ }
+ nesadapter->free_256pbl--;
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+
+ nesqp->pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 256, &nesqp->pbl_pbase);
+ pblbuffer = nesqp->pbl_vbase;
+ if (!nesqp->pbl_vbase) {
+ /* memory allocated during nes_reg_user_mr() */
+ pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
+ nespbl->pbl_pbase);
+ kfree(nespbl);
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ nesadapter->free_256pbl++;
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ kunmap(nesqp->page);
+ return -ENOMEM;
+ }
+ memset(nesqp->pbl_vbase, 0, 256);
+ /* fill in the page address in the pbl buffer.. */
+ tpbl = pblbuffer + 16;
+ pbl = nespbl->pbl_vbase;
+ while (sq_pbl_entries--)
+ *tpbl++ = *pbl++;
+ tpbl = pblbuffer;
+ while (rq_pbl_entries--)
+ *tpbl++ = *pbl++;
+
+ /* done with memory allocated during nes_reg_user_mr() */
+ pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
+ nespbl->pbl_pbase);
+ kfree(nespbl);
+
+ nesqp->qp_mem_size =
+ max((u32)sizeof(struct nes_qp_context), ((u32)256)) + 256; /* this is Q2 */
+ /* Round up to a multiple of a page */
+ nesqp->qp_mem_size += PAGE_SIZE - 1;
+ nesqp->qp_mem_size &= ~(PAGE_SIZE - 1);
+
+ mem = pci_alloc_consistent(nesdev->pcidev, nesqp->qp_mem_size,
+ &nesqp->hwqp.q2_pbase);
+
+ if (!mem) {
+ pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase);
+ nesqp->pbl_vbase = NULL;
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ nesadapter->free_256pbl++;
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ kunmap(nesqp->page);
+ return -ENOMEM;
+ }
+ nesqp->hwqp.q2_vbase = mem;
+ mem += 256;
+ memset(nesqp->hwqp.q2_vbase, 0, 256);
+ nesqp->nesqp_context = mem;
+ memset(nesqp->nesqp_context, 0, sizeof(*nesqp->nesqp_context));
+ nesqp->nesqp_context_pbase = nesqp->hwqp.q2_pbase + 256;
+
+ return 0;
+}
+
+
+/**
+ * nes_setup_mmap_qp
+ */
+static int nes_setup_mmap_qp(struct nes_qp *nesqp, struct nes_vnic *nesvnic,
+ int sq_size, int rq_size)
+{
+ void *mem;
+ struct nes_device *nesdev = nesvnic->nesdev;
+
+ nesqp->qp_mem_size = (sizeof(struct nes_hw_qp_wqe) * sq_size) +
+ (sizeof(struct nes_hw_qp_wqe) * rq_size) +
+ max((u32)sizeof(struct nes_qp_context), ((u32)256)) +
+ 256; /* this is Q2 */
+ /* Round up to a multiple of a page */
+ nesqp->qp_mem_size += PAGE_SIZE - 1;
+ nesqp->qp_mem_size &= ~(PAGE_SIZE - 1);
+
+ mem = pci_alloc_consistent(nesdev->pcidev, nesqp->qp_mem_size,
+ &nesqp->hwqp.sq_pbase);
+ if (!mem)
+ return -ENOMEM;
+ nes_debug(NES_DBG_QP, "PCI consistent memory for "
+ "host descriptor rings located @ %p (pa = 0x%08lX.) size = %u.\n",
+ mem, (unsigned long)nesqp->hwqp.sq_pbase, nesqp->qp_mem_size);
+
+ memset(mem, 0, nesqp->qp_mem_size);
+
+ nesqp->hwqp.sq_vbase = mem;
+ mem += sizeof(struct nes_hw_qp_wqe) * sq_size;
+
+ nesqp->hwqp.rq_vbase = mem;
+ nesqp->hwqp.rq_pbase = nesqp->hwqp.sq_pbase +
+ sizeof(struct nes_hw_qp_wqe) * sq_size;
+ mem += sizeof(struct nes_hw_qp_wqe) * rq_size;
+
+ nesqp->hwqp.q2_vbase = mem;
+ nesqp->hwqp.q2_pbase = nesqp->hwqp.rq_pbase +
+ sizeof(struct nes_hw_qp_wqe) * rq_size;
+ mem += 256;
+ memset(nesqp->hwqp.q2_vbase, 0, 256);
+
+ nesqp->nesqp_context = mem;
+ nesqp->nesqp_context_pbase = nesqp->hwqp.q2_pbase + 256;
+ memset(nesqp->nesqp_context, 0, sizeof(*nesqp->nesqp_context));
+ return 0;
+}
+
+
+/**
+ * nes_free_qp_mem() is to free up the qp's pci_alloc_consistent() memory.
+ */
+static inline void nes_free_qp_mem(struct nes_device *nesdev,
+ struct nes_qp *nesqp, int virt_wqs)
+{
+ unsigned long flags;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ if (!virt_wqs) {
+ pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size,
+ nesqp->hwqp.sq_vbase, nesqp->hwqp.sq_pbase);
+ }else {
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ nesadapter->free_256pbl++;
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size, nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase);
+ pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase );
+ nesqp->pbl_vbase = NULL;
+ kunmap(nesqp->page);
+ }
+}
+
+
+/**
* nes_create_qp
*/
static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
@@ -1047,12 +1274,16 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
struct nes_ucontext *nes_ucontext;
struct nes_hw_cqp_wqe *cqp_wqe;
struct nes_cqp_request *cqp_request;
+ struct nes_create_qp_req req;
struct nes_create_qp_resp uresp;
+ struct nes_pbl *nespbl = NULL;
u32 qp_num = 0;
/* u32 counter = 0; */
void *mem;
unsigned long flags;
int ret;
+ int err;
+ int virt_wqs = 0;
int sq_size;
int rq_size;
u8 sq_encoded_size;
@@ -1067,36 +1298,20 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
} else {
init_attr->cap.max_inline_data = 64;
}
+ sq_size = init_attr->cap.max_send_wr;
+ rq_size = init_attr->cap.max_recv_wr;
- if (init_attr->cap.max_send_wr < 32) {
- sq_size = 32;
- sq_encoded_size = 1;
- } else if (init_attr->cap.max_send_wr < 128) {
- sq_size = 128;
- sq_encoded_size = 2;
- } else if (init_attr->cap.max_send_wr < 512) {
- sq_size = 512;
- sq_encoded_size = 3;
- } else {
- printk(KERN_ERR PFX "%s: SQ size (%u) too large.\n",
- __FUNCTION__, init_attr->cap.max_send_wr);
- return ERR_PTR(-EINVAL);
- }
- init_attr->cap.max_send_wr = sq_size - 2;
- if (init_attr->cap.max_recv_wr < 32) {
- rq_size = 32;
- rq_encoded_size = 1;
- } else if (init_attr->cap.max_recv_wr < 128) {
- rq_size = 128;
- rq_encoded_size = 2;
- } else if (init_attr->cap.max_recv_wr < 512) {
- rq_size = 512;
- rq_encoded_size = 3;
- } else {
- printk(KERN_ERR PFX "%s: RQ size (%u) too large.\n",
- __FUNCTION__, init_attr->cap.max_recv_wr);
+ // check if the encoded sizes are OK or not...
+ sq_encoded_size = nes_get_encoded_size(&sq_size);
+ rq_encoded_size = nes_get_encoded_size(&rq_size);
+
+ if ((!sq_encoded_size) || (!rq_encoded_size)) {
+ nes_debug(NES_DBG_QP, "ERROR bad rq (%u) or sq (%u) size\n",
+ rq_size, sq_size);
return ERR_PTR(-EINVAL);
}
+
+ init_attr->cap.max_send_wr = sq_size -2;
init_attr->cap.max_recv_wr = rq_size -1;
nes_debug(NES_DBG_QP, "RQ size=%u, SQ Size=%u\n", rq_size, sq_size);
@@ -1123,18 +1338,53 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
nesqp->allocated_buffer = mem;
if (udata) {
+ if (ib_copy_from_udata(&req, udata, sizeof(struct nes_create_qp_req))) {
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ kfree(nesqp->allocated_buffer);
+ nes_debug(NES_DBG_QP, "ib_copy_from_udata() Failed \n");
+ return NULL;
+ }
+ if (req.user_wqe_buffers) {
+ virt_wqs = 1;
+ }
if ((ibpd->uobject) && (ibpd->uobject->context)) {
nesqp->user_mode = 1;
nes_ucontext = to_nesucontext(ibpd->uobject->context);
+ if (virt_wqs) {
+ err = 1;
+ list_for_each_entry(nespbl, &nes_ucontext->qp_reg_mem_list, list) {
+ if (nespbl->user_base == (unsigned long )req.user_wqe_buffers) {
+ list_del(&nespbl->list);
+ err = 0;
+ nes_debug(NES_DBG_QP, "Found PBL for virtual QP. nespbl=%p. user_base=0x%lx\n",
+ nespbl, nespbl->user_base);
+ break;
+ }
+ }
+ if (err) {
+ nes_debug(NES_DBG_QP, "Didn't Find PBL for virtual QP. address = %llx.\n",
+ (long long unsigned int)req.user_wqe_buffers);
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ kfree(nesqp->allocated_buffer);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ nes_ucontext = to_nesucontext(ibpd->uobject->context);
nesqp->mmap_sq_db_index =
- find_next_zero_bit(nes_ucontext->allocated_wqs,
- NES_MAX_USER_WQ_REGIONS, nes_ucontext->first_free_wq);
+ find_next_zero_bit(nes_ucontext->allocated_wqs,
+ NES_MAX_USER_WQ_REGIONS, nes_ucontext->first_free_wq);
/* nes_debug(NES_DBG_QP, "find_first_zero_biton wqs returned %u\n",
- nespd->mmap_db_index); */
+ nespd->mmap_db_index); */
if (nesqp->mmap_sq_db_index > NES_MAX_USER_WQ_REGIONS) {
nes_debug(NES_DBG_QP,
- "db index > max user regions, failing create QP\n");
+ "db index > max user regions, failing create QP\n");
nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ if (virt_wqs) {
+ pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
+ nespbl->pbl_pbase);
+ kfree(nespbl);
+ }
kfree(nesqp->allocated_buffer);
return ERR_PTR(-ENOMEM);
}
@@ -1147,53 +1397,21 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
return ERR_PTR(-EFAULT);
}
}
-
- nesqp->qp_mem_size = (sizeof(struct nes_hw_qp_wqe) * sq_size) +
- (sizeof(struct nes_hw_qp_wqe) * rq_size) +
- max((u32)sizeof(struct nes_qp_context), ((u32)256)) +
- 256; /* this is Q2 */
- /* Round up to a multiple of a page */
- nesqp->qp_mem_size += PAGE_SIZE - 1;
- nesqp->qp_mem_size &= ~(PAGE_SIZE - 1);
-
- mem = pci_alloc_consistent(nesdev->pcidev, nesqp->qp_mem_size,
- &nesqp->hwqp.sq_pbase);
- if (!mem) {
- nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ err = (!virt_wqs) ? nes_setup_mmap_qp(nesqp, nesvnic, sq_size, rq_size) :
+ nes_setup_virt_qp(nesqp, nespbl, nesvnic, sq_size, rq_size);
+ if (err) {
nes_debug(NES_DBG_QP,
- "Unable to allocate memory for host descriptor rings\n");
+ "error geting qp mem code = %d\n", err);
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
kfree(nesqp->allocated_buffer);
return ERR_PTR(-ENOMEM);
}
- nes_debug(NES_DBG_QP, "PCI consistent memory for "
- "host descriptor rings located @ %p (pa = 0x%08lX.) size = %u.\n",
- mem, (unsigned long)nesqp->hwqp.sq_pbase, nesqp->qp_mem_size);
-
- memset(mem, 0, nesqp->qp_mem_size);
- nesqp->hwqp.sq_vbase = mem;
nesqp->hwqp.sq_size = sq_size;
nesqp->hwqp.sq_encoded_size = sq_encoded_size;
nesqp->hwqp.sq_head = 1;
- mem += sizeof(struct nes_hw_qp_wqe) * sq_size;
-
- nesqp->hwqp.rq_vbase = mem;
nesqp->hwqp.rq_size = rq_size;
nesqp->hwqp.rq_encoded_size = rq_encoded_size;
- nesqp->hwqp.rq_pbase = nesqp->hwqp.sq_pbase +
- sizeof(struct nes_hw_qp_wqe) * sq_size;
- mem += sizeof(struct nes_hw_qp_wqe)*rq_size;
-
- nesqp->hwqp.q2_vbase = mem;
- nesqp->hwqp.q2_pbase = nesqp->hwqp.rq_pbase +
- sizeof(struct nes_hw_qp_wqe) * rq_size;
- mem += 256;
- memset(nesqp->hwqp.q2_vbase, 0, 256);
-
- nesqp->nesqp_context = mem;
- nesqp->nesqp_context_pbase = nesqp->hwqp.q2_pbase + 256;
- memset(nesqp->nesqp_context, 0, sizeof(*nesqp->nesqp_context));
-
/* nes_debug(NES_DBG_QP, "nesqp->nesqp_context_pbase = %p\n",
(void *)nesqp->nesqp_context_pbase);
*/
@@ -1219,13 +1437,25 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
u64temp = (u64)nesqp->hwqp.sq_pbase;
nesqp->nesqp_context->sq_addr_low = cpu_to_le32((u32)u64temp);
nesqp->nesqp_context->sq_addr_high = cpu_to_le32((u32)(u64temp >> 32));
- u64temp = (u64)nesqp->hwqp.rq_pbase;
- nesqp->nesqp_context->rq_addr_low = cpu_to_le32((u32)u64temp);
- nesqp->nesqp_context->rq_addr_high = cpu_to_le32((u32)(u64temp >> 32));
- spin_lock_irqsave(&nesdev->cqp.lock, flags);
+
+
+ if (!virt_wqs) {
+ u64temp = (u64)nesqp->hwqp.sq_pbase;
+ nesqp->nesqp_context->sq_addr_low = cpu_to_le32((u32)u64temp);
+ nesqp->nesqp_context->sq_addr_high = cpu_to_le32((u32)(u64temp >> 32));
+ u64temp = (u64)nesqp->hwqp.rq_pbase;
+ nesqp->nesqp_context->rq_addr_low = cpu_to_le32((u32)u64temp);
+ nesqp->nesqp_context->rq_addr_high = cpu_to_le32((u32)(u64temp >> 32));
+ } else {
+ u64temp = (u64)nesqp->pbl_pbase;
+ nesqp->nesqp_context->rq_addr_low = cpu_to_le32((u32)u64temp);
+ nesqp->nesqp_context->rq_addr_high = cpu_to_le32((u32)(u64temp >> 32));
+ }
+
/* nes_debug(NES_DBG_QP, "next_qp_nic_index=%u, using nic_index=%d\n",
nesvnic->next_qp_nic_index,
nesvnic->qp_nic_index[nesvnic->next_qp_nic_index]); */
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
nesqp->nesqp_context->misc2 |= cpu_to_le32(
(u32)nesvnic->qp_nic_index[nesvnic->next_qp_nic_index] <<
NES_QPCONTEXT_MISC2_NIC_INDEX_SHIFT);
@@ -1239,9 +1469,9 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32((u32)nesqp->nespd->pd_id << 16);
u64temp = (u64)nesqp->hwqp.q2_pbase;
nesqp->nesqp_context->q2_addr_low = cpu_to_le32((u32)u64temp);
- nesqp->nesqp_context->q2_addr_high = cpu_to_le32((u32)(u64temp>>32));
+ nesqp->nesqp_context->q2_addr_high = cpu_to_le32((u32)(u64temp >> 32));
nesqp->nesqp_context->aeq_token_low = cpu_to_le32((u32)((u64)(nesqp)));
- nesqp->nesqp_context->aeq_token_high = cpu_to_le32((u32)(((u64)(nesqp))>>32));
+ nesqp->nesqp_context->aeq_token_high = cpu_to_le32((u32)(((u64)(nesqp)) >> 32));
nesqp->nesqp_context->ird_ord_sizes = cpu_to_le32(NES_QPCONTEXT_ORDIRD_ALSMM |
((((u32)nesadapter->max_irrq_wr) <<
NES_QPCONTEXT_ORDIRD_IRDSIZE_SHIFT) & NES_QPCONTEXT_ORDIRD_IRDSIZE_MASK));
@@ -1252,21 +1482,26 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
/* Create the QP */
- cqp_request = nes_get_cqp_request(nesdev, NES_CQP_REQUEST_NOT_HOLDING_LOCK);
- if (NULL == cqp_request) {
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
nes_debug(NES_DBG_QP, "Failed to get a cqp_request\n");
nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
- pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size,
- nesqp->hwqp.sq_vbase, nesqp->hwqp.sq_pbase);
+ nes_free_qp_mem(nesdev, nesqp,virt_wqs);
kfree(nesqp->allocated_buffer);
return ERR_PTR(-ENOMEM);
}
cqp_request->waiting = 1;
cqp_wqe = &cqp_request->cqp_wqe;
- cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(
+ if (!virt_wqs) {
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(
NES_CQP_CREATE_QP | NES_CQP_QP_TYPE_IWARP |
NES_CQP_QP_IWARP_STATE_IDLE);
+ } else {
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(
+ NES_CQP_CREATE_QP | NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_VIRT_WQS |
+ NES_CQP_QP_IWARP_STATE_IDLE);
+ }
cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_QP_CQS_VALID);
cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id);
cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_LOW_IDX] =
@@ -1281,14 +1516,13 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
cpu_to_le32((u32)(u64temp >> 32));
atomic_set(&cqp_request->refcount, 2);
- nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_NOT_HOLDING_LOCK,
- NES_CQP_REQUEST_RING_DOORBELL);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
/* Wait for CQP */
nes_debug(NES_DBG_QP, "Waiting for create iWARP QP%u to complete.\n",
nesqp->hwqp.qp_id);
ret = wait_event_timeout(cqp_request->waitq,
- (0 != cqp_request->request_done), NES_EVENT_TIMEOUT);
+ (cqp_request->request_done != 0), NES_EVENT_TIMEOUT);
nes_debug(NES_DBG_QP, "Create iwarp QP%u completed, wait_event_timeout ret=%u,"
" nesdev->cqp_head = %u, nesdev->cqp.sq_tail = %u,"
" CQP Major:Minor codes = 0x%04X:0x%04X.\n",
@@ -1297,18 +1531,15 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
if ((!ret) || (cqp_request->major_code)) {
if (atomic_dec_and_test(&cqp_request->refcount)) {
if (cqp_request->dynamic) {
- atomic_inc(&cqp_reqs_dynfreed);
kfree(cqp_request);
} else {
- atomic_inc(&cqp_reqs_freed);
spin_lock_irqsave(&nesdev->cqp.lock, flags);
list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
}
}
nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
- pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size,
- nesqp->hwqp.sq_vbase, nesqp->hwqp.sq_pbase);
+ nes_free_qp_mem(nesdev, nesqp,virt_wqs);
kfree(nesqp->allocated_buffer);
if (!ret) {
return ERR_PTR(-ETIME);
@@ -1318,10 +1549,8 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
} else {
if (atomic_dec_and_test(&cqp_request->refcount)) {
if (cqp_request->dynamic) {
- atomic_inc(&cqp_reqs_dynfreed);
kfree(cqp_request);
} else {
- atomic_inc(&cqp_reqs_freed);
spin_lock_irqsave(&nesdev->cqp.lock, flags);
list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
@@ -1336,9 +1565,8 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
uresp.qp_id = nesqp->hwqp.qp_id;
uresp.nes_drv_opt = nes_drv_opt;
if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
- pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size,
- nesqp->hwqp.sq_vbase, nesqp->hwqp.sq_pbase);
nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ nes_free_qp_mem(nesdev, nesqp,virt_wqs);
kfree(nesqp->allocated_buffer);
return ERR_PTR(-EFAULT);
}
@@ -1430,9 +1658,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
* nes_create_cq
*/
static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
-#ifndef OFED_1_2
int comp_vector,
-#endif
struct ib_ucontext *context, struct ib_udata *udata)
{
u64 u64temp;
@@ -1459,13 +1685,12 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
return ERR_PTR(err);
}
- nescq = kmalloc(sizeof(struct nes_cq), GFP_KERNEL);
+ nescq = kzalloc(sizeof(struct nes_cq), GFP_KERNEL);
if (!nescq) {
nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
nes_debug(NES_DBG_CQ, "Unable to allocate nes_cq struct\n");
return ERR_PTR(-ENOMEM);
}
- memset(nescq, 0, sizeof(struct nes_cq));
nescq->hw_cq.cq_size = max(entries + 1, 5);
nescq->hw_cq.cq_number = cq_num;
@@ -1523,8 +1748,8 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
spin_lock_init(&nescq->lock);
/* send CreateCQ request to CQP */
- cqp_request = nes_get_cqp_request(nesdev, NES_CQP_REQUEST_NOT_HOLDING_LOCK);
- if (NULL == cqp_request) {
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
nes_debug(NES_DBG_CQ, "Failed to get a cqp_request.\n");
if (!context)
pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
@@ -1540,20 +1765,20 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID |
NES_CQP_CQ_CHK_OVERFLOW |
NES_CQP_CQ_CEQE_MASK |((u32)nescq->hw_cq.cq_size << 16));
- spin_lock_irqsave(&nesdev->cqp.lock, flags);
- if (1 != pbl_entries) {
+
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+
+ if (pbl_entries != 1) {
if (pbl_entries > 32) {
/* use 4k pbl */
nes_debug(NES_DBG_CQ, "pbl_entries=%u, use a 4k PBL\n", pbl_entries);
- if (0 == nesadapter->free_4kpbl) {
+ if (nesadapter->free_4kpbl == 0) {
if (cqp_request->dynamic) {
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
- atomic_inc(&cqp_reqs_dynfreed);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
kfree(cqp_request);
} else {
- atomic_inc(&cqp_reqs_freed);
list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
}
if (!context)
pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
@@ -1570,15 +1795,13 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
} else {
/* use 256 byte pbl */
nes_debug(NES_DBG_CQ, "pbl_entries=%u, use a 256 byte PBL\n", pbl_entries);
- if (0 == nesadapter->free_256pbl) {
+ if (nesadapter->free_256pbl == 0) {
if (cqp_request->dynamic) {
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
- atomic_inc(&cqp_reqs_dynfreed);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
kfree(cqp_request);
} else {
- atomic_inc(&cqp_reqs_freed);
list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
}
if (!context)
pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
@@ -1594,12 +1817,16 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
}
}
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+
cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] =
cpu_to_le32(nescq->hw_cq.cq_number | ((u32)nesdev->ceq_index << 16));
- cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_LOW_IDX] = cpu_to_le32((u32)((u64)(&nesdev->cqp)));
- cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_HIGH_IDX] = cpu_to_le32((u32)(((u64)(&nesdev->cqp))>>32));
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_LOW_IDX] =
+ cpu_to_le32((u32)((u64)(&nesdev->cqp)));
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_HIGH_IDX] =
+ cpu_to_le32((u32)(((u64)(&nesdev->cqp)) >> 32));
if (context) {
- if (1 != pbl_entries)
+ if (pbl_entries != 1)
u64temp = (u64)nespbl->pbl_pbase;
else
u64temp = le64_to_cpu(nespbl->pbl_vbase[0]);
@@ -1613,13 +1840,13 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
cqp_wqe->wqe_words[NES_CQP_CQ_WQE_PBL_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0;
u64temp = (u64)&nescq->hw_cq;
- cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] = cpu_to_le32((u32)(u64temp>>1));
- cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = cpu_to_le32(((u32)((u64temp)>>33))&0x7FFFFFFF);
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] =
+ cpu_to_le32((u32)(u64temp >> 1));
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] =
+ cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF);
atomic_set(&cqp_request->refcount, 2);
- nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_HOLDING_LOCK,
- NES_CQP_REQUEST_RING_DOORBELL);
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
/* Wait for CQP */
nes_debug(NES_DBG_CQ, "Waiting for create iWARP CQ%u to complete.\n",
@@ -1631,10 +1858,8 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
if ((!ret) || (cqp_request->major_code)) {
if (atomic_dec_and_test(&cqp_request->refcount)) {
if (cqp_request->dynamic) {
- atomic_inc(&cqp_reqs_dynfreed);
kfree(cqp_request);
} else {
- atomic_inc(&cqp_reqs_freed);
spin_lock_irqsave(&nesdev->cqp.lock, flags);
list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
@@ -1652,10 +1877,8 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
} else {
if (atomic_dec_and_test(&cqp_request->refcount)) {
if (cqp_request->dynamic) {
- atomic_inc(&cqp_reqs_dynfreed);
kfree(cqp_request);
} else {
- atomic_inc(&cqp_reqs_freed);
spin_lock_irqsave(&nesdev->cqp.lock, flags);
list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
@@ -1707,8 +1930,8 @@ static int nes_destroy_cq(struct ib_cq *ib_cq)
nes_debug(NES_DBG_CQ, "Destroy CQ%u\n", nescq->hw_cq.cq_number);
/* Send DestroyCQ request to CQP */
- cqp_request = nes_get_cqp_request(nesdev, NES_CQP_REQUEST_NOT_HOLDING_LOCK);
- if (NULL == cqp_request) {
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
nes_debug(NES_DBG_CQ, "Failed to get a cqp_request.\n");
return -ENOMEM;
}
@@ -1718,7 +1941,7 @@ static int nes_destroy_cq(struct ib_cq *ib_cq)
cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(
NES_CQP_DESTROY_CQ | (nescq->hw_cq.cq_size << 16));
- spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
if (nescq->virtual_cq == 1) {
nesadapter->free_256pbl++;
if (nesadapter->free_256pbl > nesadapter->max_256pbl) {
@@ -1734,23 +1957,24 @@ static int nes_destroy_cq(struct ib_cq *ib_cq)
cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_CQ_4KB_CHUNK);
}
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+
cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(
nescq->hw_cq.cq_number | ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 16));
- cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_LOW_IDX] = cpu_to_le32((u32)((u64)(&nesdev->cqp)));
- cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_HIGH_IDX] = cpu_to_le32((u32)(((u64)(&nesdev->cqp))>>32));
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_LOW_IDX] = cpu_to_le32((u32)((u64)(&nesdev->cqp)));
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_HIGH_IDX] =
+ cpu_to_le32((u32)(((u64)(&nesdev->cqp)) >> 32));
cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX] = 0;
cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX] = 0;
- atomic_set(&cqp_request->refcount, 2);
nes_free_resource(nesadapter, nesadapter->allocated_cqs, nescq->hw_cq.cq_number);
- nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_HOLDING_LOCK,
- NES_CQP_REQUEST_RING_DOORBELL);
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+
+ atomic_set(&cqp_request->refcount, 2);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
/* Wait for CQP */
nes_debug(NES_DBG_CQ, "Waiting for destroy iWARP CQ%u to complete.\n",
nescq->hw_cq.cq_number);
- /* cqp_head = (cqp_head+1)&(nesdev->cqp.sq_size-1); */
ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
NES_EVENT_TIMEOUT);
nes_debug(NES_DBG_CQ, "Destroy iWARP CQ%u completed, wait_event_timeout ret = %u,"
@@ -1760,10 +1984,8 @@ static int nes_destroy_cq(struct ib_cq *ib_cq)
if ((!ret) || (cqp_request->major_code)) {
if (atomic_dec_and_test(&cqp_request->refcount)) {
if (cqp_request->dynamic) {
- atomic_inc(&cqp_reqs_dynfreed);
kfree(cqp_request);
} else {
- atomic_inc(&cqp_reqs_freed);
spin_lock_irqsave(&nesdev->cqp.lock, flags);
list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
@@ -1782,10 +2004,8 @@ static int nes_destroy_cq(struct ib_cq *ib_cq)
ret = 0;
if (atomic_dec_and_test(&cqp_request->refcount)) {
if (cqp_request->dynamic) {
- atomic_inc(&cqp_reqs_dynfreed);
kfree(cqp_request);
} else {
- atomic_inc(&cqp_reqs_freed);
spin_lock_irqsave(&nesdev->cqp.lock, flags);
list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
@@ -1819,15 +2039,15 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
u16 major_code;
/* Register the region with the adapter */
- cqp_request = nes_get_cqp_request(nesdev, NES_CQP_REQUEST_NOT_HOLDING_LOCK);
- if (NULL == cqp_request) {
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
return -ENOMEM;
}
cqp_request->waiting = 1;
cqp_wqe = &cqp_request->cqp_wqe;
- spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
/* track PBL resources */
if (pbl_count != 0) {
if (pbl_count > 1) {
@@ -1835,13 +2055,11 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
if ((pbl_count+1) > nesadapter->free_4kpbl) {
nes_debug(NES_DBG_MR, "Out of 4KB Pbls for two level request.\n");
if (cqp_request->dynamic) {
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
- atomic_inc(&cqp_reqs_dynfreed);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
kfree(cqp_request);
} else {
- atomic_inc(&cqp_reqs_freed);
list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
}
return -ENOMEM;
} else {
@@ -1851,13 +2069,11 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
if (pbl_count > nesadapter->free_4kpbl) {
nes_debug(NES_DBG_MR, "Out of 4KB Pbls.\n");
if (cqp_request->dynamic) {
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
- atomic_inc(&cqp_reqs_dynfreed);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
kfree(cqp_request);
} else {
- atomic_inc(&cqp_reqs_freed);
list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
}
return -ENOMEM;
} else {
@@ -1867,13 +2083,11 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
if (pbl_count > nesadapter->free_256pbl) {
nes_debug(NES_DBG_MR, "Out of 256B Pbls.\n");
if (cqp_request->dynamic) {
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
- atomic_inc(&cqp_reqs_dynfreed);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
kfree(cqp_request);
} else {
- atomic_inc(&cqp_reqs_freed);
list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
}
return -ENOMEM;
} else {
@@ -1881,7 +2095,8 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
}
}
}
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(
NES_CQP_REGISTER_STAG | NES_CQP_STAG_RIGHTS_LOCAL_READ);
@@ -1902,8 +2117,9 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(
NES_CQP_STAG_RIGHTS_WINDOW_BIND | NES_CQP_STAG_REM_ACC_EN);
}
- cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_LOW_IDX] = cpu_to_le32((u32)((u64)(&nesdev->cqp)));
- cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_HIGH_IDX] = cpu_to_le32((u32)(((u64)(&nesdev->cqp))>>32));
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_LOW_IDX] = cpu_to_le32((u32)((u64)(&nesdev->cqp)));
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_HIGH_IDX] =
+ cpu_to_le32((u32)(((u64)(&nesdev->cqp)) >> 32));
cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX] = 0;
cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX] = 0;
cqp_wqe->wqe_words[NES_CQP_STAG_WQE_VA_LOW_IDX] = cpu_to_le32((u32)*iova_start);
@@ -1938,8 +2154,7 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
barrier();
atomic_set(&cqp_request->refcount, 2);
- nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_NOT_HOLDING_LOCK,
- NES_CQP_REQUEST_RING_DOORBELL);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
/* Wait for CQP */
ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
@@ -1950,10 +2165,8 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
major_code = cqp_request->major_code;
if (atomic_dec_and_test(&cqp_request->refcount)) {
if (cqp_request->dynamic) {
- atomic_inc(&cqp_reqs_dynfreed);
kfree(cqp_request);
} else {
- atomic_inc(&cqp_reqs_freed);
spin_lock_irqsave(&nesdev->cqp.lock, flags);
list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
@@ -1975,7 +2188,8 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
*/
static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
struct ib_phys_buf *buffer_list, int num_phys_buf, int acc,
- u64 * iova_start) {
+ u64 * iova_start)
+{
u64 region_length;
struct nes_pd *nespd = to_nespd(ib_pd);
struct nes_vnic *nesvnic = to_nesvnic(ib_pd->device);
@@ -2021,7 +2235,7 @@ static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
return ERR_PTR(err);
}
- nesmr = kmalloc(sizeof(*nesmr), GFP_KERNEL);
+ nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
if (!nesmr) {
nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
return ERR_PTR(-ENOMEM);
@@ -2030,7 +2244,7 @@ static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
for (i = 0; i < num_phys_buf; i++) {
if ((i & 0x01FF) == 0) {
- if (1 == root_pbl_index) {
+ if (root_pbl_index == 1) {
/* Allocate the root PBL */
root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 8192,
&root_vpbl.pbl_pbase);
@@ -2043,7 +2257,7 @@ static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
kfree(nesmr);
return ERR_PTR(-ENOMEM);
}
- root_vpbl.leaf_vpbl = kmalloc(sizeof(*root_vpbl.leaf_vpbl)*1024, GFP_KERNEL);
+ root_vpbl.leaf_vpbl = kzalloc(sizeof(*root_vpbl.leaf_vpbl)*1024, GFP_KERNEL);
if (!root_vpbl.leaf_vpbl) {
pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
root_vpbl.pbl_pbase);
@@ -2168,7 +2382,8 @@ static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
/**
* nes_get_dma_mr
*/
-static struct ib_mr *nes_get_dma_mr(struct ib_pd *pd, int acc) {
+static struct ib_mr *nes_get_dma_mr(struct ib_pd *pd, int acc)
+{
struct ib_phys_buf bl;
u64 kva = 0;
@@ -2183,14 +2398,9 @@ static struct ib_mr *nes_get_dma_mr(struct ib_pd *pd, int acc) {
/**
* nes_reg_user_mr
*/
-#ifdef OFED_1_2
-static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
- int acc, struct ib_udata *udata)
-#else
static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *udata)
-#endif
- {
+{
u64 iova_start;
u64 *pbl;
u64 region_length;
@@ -2205,15 +2415,14 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct nes_ucontext *nes_ucontext;
struct nes_pbl *nespbl;
struct nes_mr *nesmr;
-#ifndef OFED_1_2
struct ib_umem *region;
-#endif
struct nes_mem_reg_req req;
struct nes_vpbl vpbl;
struct nes_root_vpbl root_vpbl;
- int j;
+ int nmap_index, page_index;
int page_count = 0;
int err, pbl_depth = 0;
+ int chunk_pages;
int ret;
u32 stag;
u32 stag_index = 0;
@@ -2225,15 +2434,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u8 single_page = 1;
u8 stag_key;
-
- nes_debug(NES_DBG_MR, "\n");
-
-#ifdef OFED_1_2
- nes_debug(NES_DBG_MR, "User base = 0x%lX, Virt base = 0x%lX, length = %u,"
- " offset = %u, page size = %u.\n",
- region->user_base, region->virt_base, (u32)region->length,
- region->offset, region->page_size);
-#else
region = ib_umem_get(pd->uobject->context, start, length, acc);
if (IS_ERR(region)) {
return (struct ib_mr *)region;
@@ -2241,7 +2441,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
nes_debug(NES_DBG_MR, "User base = 0x%lX, Virt base = 0x%lX, length = %u\n",
(unsigned long int)start, (unsigned long int)virt, (u32)length);
-#endif
if (ib_copy_from_udata(&req, udata, sizeof(req)))
return ERR_PTR(-EFAULT);
@@ -2266,33 +2465,25 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
err = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
nesadapter->max_mr, &stag_index, &next_stag_index);
if (err) {
-#ifndef OFED_1_2
ib_umem_release(region);
-#endif
return ERR_PTR(err);
}
- nesmr = kmalloc(sizeof(*nesmr), GFP_KERNEL);
+ nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
if (!nesmr) {
-#ifndef OFED_1_2
ib_umem_release(region);
-#endif
nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
return ERR_PTR(-ENOMEM);
}
-#ifndef OFED_1_2
nesmr->region = region;
-#endif
list_for_each_entry(chunk, ®ion->chunk_list, list) {
nes_debug(NES_DBG_MR, "Chunk: nents = %u, nmap = %u .\n",
chunk->nents, chunk->nmap);
- for (j = 0; j < chunk->nmap; ++j) {
+ for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) {
if ((page_count&0x01FF) == 0) {
if (page_count>(1024*512)) {
-#ifndef OFED_1_2
ib_umem_release(region);
-#endif
pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
vpbl.pbl_pbase);
nes_free_resource(nesadapter,
@@ -2300,15 +2491,13 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
kfree(nesmr);
return ERR_PTR(-E2BIG);
}
- if (1 == root_pbl_index) {
+ if (root_pbl_index == 1) {
root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev,
8192, &root_vpbl.pbl_pbase);
nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n",
root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase);
if (!root_vpbl.pbl_vbase) {
-#ifndef OFED_1_2
ib_umem_release(region);
-#endif
pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
vpbl.pbl_pbase);
nes_free_resource(nesadapter, nesadapter->allocated_mrs,
@@ -2316,12 +2505,10 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
kfree(nesmr);
return ERR_PTR(-ENOMEM);
}
- root_vpbl.leaf_vpbl = kmalloc(sizeof(*root_vpbl.leaf_vpbl)*1024,
+ root_vpbl.leaf_vpbl = kzalloc(sizeof(*root_vpbl.leaf_vpbl)*1024,
GFP_KERNEL);
if (!root_vpbl.leaf_vpbl) {
-#ifndef OFED_1_2
ib_umem_release(region);
-#endif
pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
root_vpbl.pbl_pbase);
pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
@@ -2342,9 +2529,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%08X\n",
vpbl.pbl_vbase, (unsigned int)vpbl.pbl_pbase);
if (!vpbl.pbl_vbase) {
-#ifndef OFED_1_2
ib_umem_release(region);
-#endif
nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
ibmr = ERR_PTR(-ENOMEM);
kfree(nesmr);
@@ -2360,22 +2545,18 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
root_pbl_index++;
cur_pbl_index = 0;
}
- if (sg_dma_address(&chunk->page_list[j]) & ~PAGE_MASK) {
-#ifndef OFED_1_2
+ if (sg_dma_address(&chunk->page_list[nmap_index]) & ~PAGE_MASK) {
ib_umem_release(region);
-#endif
nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
nes_debug(NES_DBG_MR, "Unaligned Memory Buffer: 0x%x\n",
- (unsigned int) sg_dma_address(&chunk->page_list[j]));
+ (unsigned int) sg_dma_address(&chunk->page_list[nmap_index]));
ibmr = ERR_PTR(-EINVAL);
kfree(nesmr);
goto reg_user_mr_err;
}
- if (!sg_dma_len(&chunk->page_list[j])) {
-#ifndef OFED_1_2
+ if (!sg_dma_len(&chunk->page_list[nmap_index])) {
ib_umem_release(region);
-#endif
nes_free_resource(nesadapter, nesadapter->allocated_mrs,
stag_index);
nes_debug(NES_DBG_MR, "Invalid Buffer Size\n");
@@ -2384,25 +2565,33 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto reg_user_mr_err;
}
- region_length += sg_dma_len(&chunk->page_list[j]);
- if (single_page) {
- if (page_count != 0) {
- if ((last_dma_addr+PAGE_SIZE) !=
- sg_dma_address(&chunk->page_list[j]))
- single_page = 0;
- last_dma_addr = sg_dma_address(&chunk->page_list[j]);
- } else {
- first_dma_addr = sg_dma_address(&chunk->page_list[j]);
- last_dma_addr = first_dma_addr;
+ region_length += sg_dma_len(&chunk->page_list[nmap_index]);
+ chunk_pages = sg_dma_len(&chunk->page_list[nmap_index]) >> PAGE_SHIFT;
+ for (page_index=0; page_index < chunk_pages; page_index++) {
+ if (single_page) {
+ if (page_count != 0) {
+ if ((last_dma_addr+PAGE_SIZE) !=
+ (sg_dma_address(&chunk->page_list[nmap_index])+
+ (page_index*PAGE_SIZE)))
+ single_page = 0;
+ last_dma_addr = sg_dma_address(&chunk->page_list[nmap_index])+
+ (page_index*PAGE_SIZE);
+ } else {
+ first_dma_addr = sg_dma_address(&chunk->page_list[nmap_index])+
+ (page_index*PAGE_SIZE);
+ last_dma_addr = first_dma_addr;
+ }
}
- }
- vpbl.pbl_vbase[cur_pbl_index].pa_low =
- cpu_to_le32((u32)sg_dma_address(&chunk->page_list[j]));
- vpbl.pbl_vbase[cur_pbl_index].pa_high =
- cpu_to_le32((u32)((((u64)sg_dma_address(&chunk->page_list[j]))>>32)));
- cur_pbl_index++;
- page_count++;
+ vpbl.pbl_vbase[cur_pbl_index].pa_low =
+ cpu_to_le32((u32)(sg_dma_address(&chunk->page_list[nmap_index])+
+ (page_index*PAGE_SIZE)));
+ vpbl.pbl_vbase[cur_pbl_index].pa_high =
+ cpu_to_le32((u32)((((u64)(sg_dma_address(&chunk->page_list[nmap_index])+
+ (page_index*PAGE_SIZE))) >> 32)));
+ cur_pbl_index++;
+ page_count++;
+ }
}
}
@@ -2416,11 +2605,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
stag = 1;
}
-#ifdef OFED_1_2
- iova_start = (u64)region->virt_base;
-#else
iova_start = virt;
-#endif
nes_debug(NES_DBG_MR, "Registering STag 0x%08X, VA = 0x%08X, length = 0x%08X,"
" index = 0x%08X, region->length=0x%08llx\n",
stag, (unsigned int)iova_start,
@@ -2454,9 +2639,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
nesmr->pbls_used++;
}
} else {
-#ifndef OFED_1_2
ib_umem_release(region);
-#endif
kfree(nesmr);
ibmr = ERR_PTR(-ENOMEM);
}
@@ -2467,10 +2650,10 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
vpbl.pbl_pbase);
} else {
- for (j=0; j<root_pbl_index; j++) {
+ for (page_index=0; page_index<root_pbl_index; page_index++) {
pci_free_consistent(nesdev->pcidev, 4096,
- root_vpbl.leaf_vpbl[j].pbl_vbase,
- root_vpbl.leaf_vpbl[j].pbl_pbase);
+ root_vpbl.leaf_vpbl[page_index].pbl_vbase,
+ root_vpbl.leaf_vpbl[page_index].pbl_pbase);
}
kfree(root_vpbl.leaf_vpbl);
pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
@@ -2482,75 +2665,76 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ibmr;
break;
case IWNES_MEMREG_TYPE_QP:
-#ifndef OFED_1_2
- ib_umem_release(region);
-#endif
- return ERR_PTR(-ENOSYS);
- break;
case IWNES_MEMREG_TYPE_CQ:
- nespbl = kmalloc(sizeof(*nespbl), GFP_KERNEL);
+ nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL);
if (!nespbl) {
nes_debug(NES_DBG_MR, "Unable to allocate PBL\n");
-#ifndef OFED_1_2
ib_umem_release(region);
-#endif
return ERR_PTR(-ENOMEM);
}
- memset(nespbl, 0, sizeof(*nespbl));
- nesmr = kmalloc(sizeof(*nesmr), GFP_KERNEL);
+ nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
if (!nesmr) {
-#ifndef OFED_1_2
ib_umem_release(region);
-#endif
kfree(nespbl);
nes_debug(NES_DBG_MR, "Unable to allocate nesmr\n");
return ERR_PTR(-ENOMEM);
}
- memset(nesmr, 0, sizeof(*nesmr));
-#ifndef OFED_1_2
nesmr->region = region;
-#endif
nes_ucontext = to_nesucontext(pd->uobject->context);
pbl_depth = region->length >> PAGE_SHIFT;
pbl_depth += (region->length & ~PAGE_MASK) ? 1 : 0;
nespbl->pbl_size = pbl_depth*sizeof(u64);
- nes_debug(NES_DBG_MR, "Attempting to allocate CQ PBL memory, %u bytes, %u entries.\n",
+ if (req.reg_type == IWNES_MEMREG_TYPE_QP) {
+ nes_debug(NES_DBG_MR, "Attempting to allocate QP PBL memory");
+ } else {
+ nes_debug(NES_DBG_MR, "Attempting to allocate CP PBL memory");
+ }
+
+ nes_debug(NES_DBG_MR, " %u bytes, %u entries.\n",
nespbl->pbl_size, pbl_depth);
pbl = pci_alloc_consistent(nesdev->pcidev, nespbl->pbl_size,
&nespbl->pbl_pbase);
if (!pbl) {
-#ifndef OFED_1_2
ib_umem_release(region);
-#endif
kfree(nesmr);
kfree(nespbl);
- nes_debug(NES_DBG_MR, "Unable to allocate cq PBL memory\n");
+ nes_debug(NES_DBG_MR, "Unable to allocate PBL memory\n");
return ERR_PTR(-ENOMEM);
}
nespbl->pbl_vbase = pbl;
-#ifdef OFED_1_2
- nespbl->user_base = region->user_base;
-#else
nespbl->user_base = start;
-#endif
- nes_debug(NES_DBG_MR, "Allocated CQ PBL memory, %u bytes, pbl_pbase=%p,"
+ nes_debug(NES_DBG_MR, "Allocated PBL memory, %u bytes, pbl_pbase=%p,"
" pbl_vbase=%p user_base=0x%lx\n",
nespbl->pbl_size, (void *)nespbl->pbl_pbase,
(void*)nespbl->pbl_vbase, nespbl->user_base);
list_for_each_entry(chunk, ®ion->chunk_list, list) {
- for (j = 0; j < chunk->nmap; ++j) {
- ((u32 *)pbl)[0] = cpu_to_le32((u32)sg_dma_address(&chunk->page_list[j]));
- ((u32 *)pbl)[1] = cpu_to_le32(((u64)sg_dma_address(&chunk->page_list[j]))>>32);
- nes_debug(NES_DBG_MR, "pbl=%p, *pbl=0x%016llx, 0x%08x%08x\n", pbl, *pbl, le32_to_cpu(((u32 *)pbl)[1]), le32_to_cpu(((u32 *)pbl)[0]));
- pbl++;
+ for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) {
+ chunk_pages = sg_dma_len(&chunk->page_list[nmap_index]) >> PAGE_SHIFT;
+ nespbl->page = sg_page(&chunk->page_list[0]);
+ for (page_index=0; page_index<chunk_pages; page_index++) {
+ ((u32 *)pbl)[0] = cpu_to_le32((u32)
+ (sg_dma_address(&chunk->page_list[nmap_index])+
+ (page_index*PAGE_SIZE)));
+ ((u32 *)pbl)[1] = cpu_to_le32(((u64)
+ (sg_dma_address(&chunk->page_list[nmap_index])+
+ (page_index*PAGE_SIZE)))>>32);
+ nes_debug(NES_DBG_MR, "pbl=%p, *pbl=0x%016llx, 0x%08x%08x\n", pbl,
+ (unsigned long long)*pbl,
+ le32_to_cpu(((u32 *)pbl)[1]), le32_to_cpu(((u32 *)pbl)[0]));
+ pbl++;
+ }
}
}
- list_add_tail(&nespbl->list, &nes_ucontext->cq_reg_mem_list);
+ if (req.reg_type == IWNES_MEMREG_TYPE_QP) {
+ list_add_tail(&nespbl->list, &nes_ucontext->qp_reg_mem_list);
+ } else {
+ list_add_tail(&nespbl->list, &nes_ucontext->cq_reg_mem_list);
+ }
nesmr->ibmr.rkey = -1;
nesmr->ibmr.lkey = -1;
- nesmr->mode = IWNES_MEMREG_TYPE_CQ;
+ nesmr->mode = req.reg_type;
return &nesmr->ibmr;
break;
}
@@ -2575,11 +2759,9 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
u16 major_code;
u16 minor_code;
-#ifndef OFED_1_2
if (nesmr->region) {
ib_umem_release(nesmr->region);
}
-#endif
if (nesmr->mode != IWNES_MEMREG_TYPE_MEM) {
kfree(nesmr);
return 0;
@@ -2587,16 +2769,16 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
/* Deallocate the region with the adapter */
- cqp_request = nes_get_cqp_request(nesdev, NES_CQP_REQUEST_NOT_HOLDING_LOCK);
- if (NULL == cqp_request) {
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
return -ENOMEM;
}
cqp_request->waiting = 1;
cqp_wqe = &cqp_request->cqp_wqe;
- spin_lock_irqsave(&nesdev->cqp.lock, flags);
- if (0 != nesmr->pbls_used) {
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ if (nesmr->pbls_used != 0) {
if (nesmr->pbl_4k) {
nesadapter->free_4kpbl += nesmr->pbls_used;
if (nesadapter->free_4kpbl > nesadapter->max_4kpbl) {
@@ -2612,11 +2794,15 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
}
}
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+
cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(
NES_CQP_DEALLOCATE_STAG | NES_CQP_STAG_VA_TO |
NES_CQP_STAG_DEALLOC_PBLS | NES_CQP_STAG_MR);
- cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_LOW_IDX] = cpu_to_le32((u32)((u64)(&nesdev->cqp)));
- cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_HIGH_IDX] = cpu_to_le32((u32)(((u64)(&nesdev->cqp))>>32));
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_LOW_IDX] =
+ cpu_to_le32((u32)((u64)(&nesdev->cqp)));
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_HIGH_IDX] =
+ cpu_to_le32((u32)(((u64)(&nesdev->cqp)) >> 32));
cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX] = 0;
cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX] = 0;
cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX] = 0;
@@ -2624,13 +2810,11 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
cqp_wqe->wqe_words[NES_CQP_STAG_WQE_STAG_IDX] = cpu_to_le32(ib_mr->rkey);
atomic_set(&cqp_request->refcount, 2);
- nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_HOLDING_LOCK,
- NES_CQP_REQUEST_RING_DOORBELL);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
/* Wait for CQP */
- spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
nes_debug(NES_DBG_MR, "Waiting for deallocate STag 0x%08X completed\n", ib_mr->rkey);
- ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
+ ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
NES_EVENT_TIMEOUT);
nes_debug(NES_DBG_MR, "Deallocate STag 0x%08X completed, wait_event_timeout ret = %u,"
" CQP Major:Minor codes = 0x%04X:0x%04X\n",
@@ -2645,10 +2829,8 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
minor_code = cqp_request->minor_code;
if (atomic_dec_and_test(&cqp_request->refcount)) {
if (cqp_request->dynamic) {
- atomic_inc(&cqp_reqs_dynfreed);
kfree(cqp_request);
} else {
- atomic_inc(&cqp_reqs_freed);
spin_lock_irqsave(&nesdev->cqp.lock, flags);
list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
@@ -2767,7 +2949,8 @@ static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
/**
* nes_hw_modify_qp
*/
-int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp, u32 next_iwarp_state, u32 wait_completion)
+int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
+ u32 next_iwarp_state, u32 wait_completion)
{
u64 u64temp;
struct nes_hw_cqp_wqe *cqp_wqe;
@@ -2781,8 +2964,8 @@ int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp, u32 next_i
nes_debug(NES_DBG_MOD_QP, "QP%u, refcount=%d\n",
nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount));
- cqp_request = nes_get_cqp_request(nesdev, NES_CQP_REQUEST_NOT_HOLDING_LOCK);
- if (NULL == cqp_request) {
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
nes_debug(NES_DBG_MOD_QP, "Failed to get a cqp_request.\n");
return -ENOMEM;
}
@@ -2808,14 +2991,13 @@ int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp, u32 next_i
cqp_wqe->wqe_words[NES_CQP_QP_WQE_CONTEXT_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
atomic_set(&cqp_request->refcount, 2);
- nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_NOT_HOLDING_LOCK,
- NES_CQP_REQUEST_RING_DOORBELL);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
/* Wait for CQP */
if (wait_completion) {
/* nes_debug(NES_DBG_MOD_QP, "Waiting for modify iWARP QP%u to complete.\n",
nesqp->hwqp.qp_id); */
- ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
+ ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
NES_EVENT_TIMEOUT);
nes_debug(NES_DBG_MOD_QP, "Modify iwarp QP%u completed, wait_event_timeout ret=%u, "
"CQP Major:Minor codes = 0x%04X:0x%04X.\n",
@@ -2829,10 +3011,8 @@ int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp, u32 next_i
}
if (atomic_dec_and_test(&cqp_request->refcount)) {
if (cqp_request->dynamic) {
- atomic_inc(&cqp_reqs_dynfreed);
kfree(cqp_request);
} else {
- atomic_inc(&cqp_reqs_freed);
spin_lock_irqsave(&nesdev->cqp.lock, flags);
list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
@@ -2935,7 +3115,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
issue_modify_qp = 1;
nes_debug(NES_DBG_MOD_QP, "QP%u: new state=closing. SQ head=%u, SQ tail=%u\n",
nesqp->hwqp.qp_id, nesqp->hwqp.sq_head, nesqp->hwqp.sq_tail);
- if (nesqp->iwarp_state==(u32)NES_CQP_QP_IWARP_STATE_CLOSING) {
+ if (nesqp->iwarp_state == (u32)NES_CQP_QP_IWARP_STATE_CLOSING) {
spin_unlock_irqrestore(&nesqp->lock, qplockflags);
nes_rem_ref(&nesqp->ibqp);
return 0;
@@ -2988,7 +3168,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
break;
case IB_QPS_ERR:
case IB_QPS_RESET:
- if (nesqp->iwarp_state==(u32)NES_CQP_QP_IWARP_STATE_ERROR) {
+ if (nesqp->iwarp_state == (u32)NES_CQP_QP_IWARP_STATE_ERROR) {
spin_unlock_irqrestore(&nesqp->lock, qplockflags);
nes_rem_ref(&nesqp->ibqp);
return -EINVAL;
@@ -3111,7 +3291,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
spin_lock_irqsave(&nesqp->lock, qplockflags);
if (nesqp->cm_id) {
/* These two are for the timer thread */
- if (atomic_inc_return(&nesqp->close_timer_started)==1) {
+ if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
nes_add_ref(&nesqp->ibqp);
nesqp->cm_id->add_ref(nesqp->cm_id);
nes_debug(NES_DBG_MOD_QP, "QP%u Not decrementing QP refcount (%d),"
@@ -3213,7 +3393,6 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
wqe_count = 0;
total_payload_length = 0;
- nes_debug(NES_DBG_IW_TX, "\n");
if (nesqp->ibqp_state > IB_QPS_RTS)
return -EINVAL;
@@ -3232,10 +3411,12 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
/* nes_debug(NES_DBG_IW_TX, "processing sq wqe for QP%u at %p, head = %u.\n",
nesqp->hwqp.qp_id, wqe, head); */
u64temp = (u64)(ib_wr->wr_id);
- wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX] = cpu_to_le32((u32)u64temp);
- wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX] = cpu_to_le32((u32)((u64temp)>>32));
- wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX] = cpu_to_le32((u32)((u64)(nesqp)));
- wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_CTX_HIGH_IDX] = cpu_to_le32((u32)(((u64)(nesqp))>>32));
+ wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX] = cpu_to_le32((u32)u64temp);
+ wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX] =
+ cpu_to_le32((u32)((u64temp) >> 32));
+ wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX] = cpu_to_le32((u32)((u64)(nesqp)));
+ wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_CTX_HIGH_IDX] =
+ cpu_to_le32((u32)(((u64)(nesqp)) >> 32));
wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX] |= cpu_to_le32(head);
switch (ib_wr->opcode) {
@@ -3253,7 +3434,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
}
if ((ib_wr->send_flags & IB_SEND_INLINE) &&
- (0 == (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA)) &&
+ ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
(ib_wr->sg_list[0].length <= 64)) {
memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
(void *)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
@@ -3298,12 +3479,12 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
cpu_to_le32((u32)(ib_wr->wr.rdma.remote_addr >> 32));
if ((ib_wr->send_flags & IB_SEND_INLINE) &&
- (0 == (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA)) &&
+ ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
(ib_wr->sg_list[0].length <= 64)) {
memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
(void *)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
- wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = cpu_to_le32(
- ib_wr->sg_list[0].length);
+ wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] =
+ cpu_to_le32(ib_wr->sg_list[0].length);
wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
} else {
total_payload_length = 0;
@@ -3405,7 +3586,6 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
u32 counter;
u32 total_payload_length;
- nes_debug(NES_DBG_IW_RX, "\n");
if (nesqp->ibqp_state > IB_QPS_RTS)
return -EINVAL;
@@ -3503,7 +3683,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
head = nescq->hw_cq.cq_head;
cq_size = nescq->hw_cq.cq_size;
- while (cqe_count<num_entries) {
+ while (cqe_count < num_entries) {
if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
NES_CQE_VALID) {
cqe = nescq->hw_cq.cq_vbase[head];
@@ -3517,7 +3697,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
((u64)u32temp);
nesqp = *((struct nes_qp **)&u64temp);
memset(entry, 0, sizeof *entry);
- if (0 == cqe.cqe_words[NES_CQE_ERROR_CODE_IDX]) {
+ if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) {
entry->status = IB_WC_SUCCESS;
} else {
entry->status = IB_WC_WR_FLUSH_ERR;
@@ -3535,8 +3715,10 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
/* Working on a SQ Completion*/
wq_tail = wqe_index;
nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
- wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX])))<<32) |
- ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX])));
+ wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].
+ wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) |
+ ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].
+ wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX])));
entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]);
@@ -3575,7 +3757,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
head = 0;
cqe_count++;
nescq->polled_completions++;
- if ((nescq->polled_completions > (cq_size/2)) ||
+ if ((nescq->polled_completions > (cq_size / 2)) ||
(nescq->polled_completions == 255)) {
nes_debug(NES_DBG_CQ, "CQ%u Issuing CQE Allocate since more than half of cqes"
" are pending %u of %u.\n",
@@ -3608,11 +3790,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
/**
* nes_req_notify_cq
*/
-#ifdef OFED_1_2
-static int nes_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
-#else
static int nes_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
-#endif
{
struct nes_vnic *nesvnic = to_nesvnic(ibcq->device);
struct nes_device *nesdev = nesvnic->nesdev;
@@ -3623,17 +3801,10 @@ static int nes_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_
nescq->hw_cq.cq_number);
cq_arm = nescq->hw_cq.cq_number;
-#ifdef OFED_1_2
- if (notify == IB_CQ_NEXT_COMP)
- cq_arm |= NES_CQE_ALLOC_NOTIFY_NEXT;
- else if (notify == IB_CQ_SOLICITED)
- cq_arm |= NES_CQE_ALLOC_NOTIFY_SE;
-#else
if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
cq_arm |= NES_CQE_ALLOC_NOTIFY_NEXT;
else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
cq_arm |= NES_CQE_ALLOC_NOTIFY_SE;
-#endif
else
return -EINVAL;
@@ -3688,9 +3859,7 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev) {
(1ull << IB_USER_VERBS_CMD_POST_SEND);
nesibdev->ibdev.phys_port_cnt = 1;
-#ifndef OFED_1_2
nesibdev->ibdev.num_comp_vectors = 1;
-#endif
nesibdev->ibdev.dma_device = &nesdev->pcidev->dev;
nesibdev->ibdev.class_dev.dev = &nesdev->pcidev->dev;
nesibdev->ibdev.query_device = nes_query_device;
@@ -3733,7 +3902,7 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev) {
nesibdev->ibdev.post_send = nes_post_send;
nesibdev->ibdev.post_recv = nes_post_recv;
- nesibdev->ibdev.iwcm = kmalloc(sizeof(*nesibdev->ibdev.iwcm), GFP_KERNEL);
+ nesibdev->ibdev.iwcm = kzalloc(sizeof(*nesibdev->ibdev.iwcm), GFP_KERNEL);
if (nesibdev->ibdev.iwcm == NULL) {
ib_dealloc_device(&nesibdev->ibdev);
return NULL;
@@ -3756,15 +3925,13 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev) {
*/
void nes_destroy_ofa_device(struct nes_ib_device *nesibdev)
{
- if (NULL == nesibdev)
+ if (nesibdev == NULL)
return;
nes_unregister_ofa_device(nesibdev);
kfree(nesibdev->ibdev.iwcm);
ib_dealloc_device(&nesibdev->ibdev);
-
- nes_debug(NES_DBG_SHUTDOWN, "\n");
}
@@ -3780,7 +3947,6 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev)
ret = ib_register_device(&nesvnic->nesibdev->ibdev);
if (ret) {
- nes_debug(NES_DBG_INIT, "\n");
return ret;
}
@@ -3791,7 +3957,6 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev)
nesibdev->max_pd = nesadapter->max_pd / nesadapter->port_count;
for (i = 0; i < ARRAY_SIZE(nes_class_attributes); ++i) {
- nes_debug(NES_DBG_INIT, "call class_device_create_file\n");
ret = class_device_create_file(&nesibdev->ibdev.class_dev, nes_class_attributes[i]);
if (ret) {
while (i > 0) {
@@ -3818,7 +3983,7 @@ void nes_unregister_ofa_device(struct nes_ib_device *nesibdev)
struct nes_vnic *nesvnic = nesibdev->nesvnic;
int i;
- if (NULL == nesibdev)
+ if (nesibdev == NULL)
return;
for (i = 0; i < ARRAY_SIZE(nes_class_attributes); ++i) {
@@ -3826,11 +3991,8 @@ void nes_unregister_ofa_device(struct nes_ib_device *nesibdev)
}
if (nesvnic->of_device_registered) {
- nes_debug(NES_DBG_SHUTDOWN, "call ib_unregister_device()\n");
ib_unregister_device(&nesibdev->ibdev);
}
nesvnic->of_device_registered = 0;
-
}
-
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
index ef358f2..b53e492 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.h
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -42,38 +42,37 @@ struct nes_device;
struct nes_ucontext {
struct ib_ucontext ibucontext;
- struct nes_device *nesdev;
- unsigned long mmap_wq_offset;
- unsigned long mmap_cq_offset; /* to be removed */
- int index; /* rnic index (minor) */
- unsigned long allocated_doorbells[BITS_TO_LONGS(NES_MAX_USER_DB_REGIONS)];
- u16 mmap_db_index[NES_MAX_USER_DB_REGIONS];
- u16 first_free_db;
- unsigned long allocated_wqs[BITS_TO_LONGS(NES_MAX_USER_WQ_REGIONS)];
- struct nes_qp * mmap_nesqp[NES_MAX_USER_WQ_REGIONS];
- u16 first_free_wq;
- struct list_head cq_reg_mem_list;
+ struct nes_device *nesdev;
+ unsigned long mmap_wq_offset;
+ unsigned long mmap_cq_offset; /* to be removed */
+ int index; /* rnic index (minor) */
+ unsigned long allocated_doorbells[BITS_TO_LONGS(NES_MAX_USER_DB_REGIONS)];
+ u16 mmap_db_index[NES_MAX_USER_DB_REGIONS];
+ u16 first_free_db;
+ unsigned long allocated_wqs[BITS_TO_LONGS(NES_MAX_USER_WQ_REGIONS)];
+ struct nes_qp *mmap_nesqp[NES_MAX_USER_WQ_REGIONS];
+ u16 first_free_wq;
+ struct list_head cq_reg_mem_list;
+ struct list_head qp_reg_mem_list;
};
struct nes_pd {
struct ib_pd ibpd;
- u16 pd_id;
- atomic_t sqp_count;
- u16 mmap_db_index;
+ u16 pd_id;
+ atomic_t sqp_count;
+ u16 mmap_db_index;
};
struct nes_mr {
union {
- struct ib_mr ibmr;
- struct ib_mw ibmw;
+ struct ib_mr ibmr;
+ struct ib_mw ibmw;
struct ib_fmr ibfmr;
};
-#ifndef OFED_1_2
- struct ib_umem *region;
-#endif
- u16 pbls_used;
- u8 mode;
- u8 pbl_4k;
+ struct ib_umem *region;
+ u16 pbls_used;
+ u8 mode;
+ u8 pbl_4k;
};
struct nes_hw_pb {
@@ -82,35 +81,35 @@ struct nes_hw_pb {
};
struct nes_vpbl {
- dma_addr_t pbl_pbase;
+ dma_addr_t pbl_pbase;
struct nes_hw_pb *pbl_vbase;
};
struct nes_root_vpbl {
- dma_addr_t pbl_pbase;
+ dma_addr_t pbl_pbase;
struct nes_hw_pb *pbl_vbase;
- struct nes_vpbl *leaf_vpbl;
+ struct nes_vpbl *leaf_vpbl;
};
struct nes_fmr {
- struct nes_mr nesmr;
- u32 leaf_pbl_cnt;
+ struct nes_mr nesmr;
+ u32 leaf_pbl_cnt;
struct nes_root_vpbl root_vpbl;
- struct ib_qp* ib_qp;
- int access_rights;
- struct ib_fmr_attr attr;
+ struct ib_qp *ib_qp;
+ int access_rights;
+ struct ib_fmr_attr attr;
};
struct nes_av;
struct nes_cq {
- struct ib_cq ibcq;
+ struct ib_cq ibcq;
struct nes_hw_cq hw_cq;
- u32 polled_completions;
- u32 cq_mem_size;
- spinlock_t lock;
- u8 virtual_cq;
- u8 pad[3];
+ u32 polled_completions;
+ u32 cq_mem_size;
+ spinlock_t lock;
+ u8 virtual_cq;
+ u8 pad[3];
};
struct nes_wq {
@@ -121,45 +120,48 @@ struct iw_cm_id;
struct ietf_mpa_frame;
struct nes_qp {
- struct ib_qp ibqp;
- void * allocated_buffer;
- struct iw_cm_id *cm_id;
+ struct ib_qp ibqp;
+ void *allocated_buffer;
+ struct iw_cm_id *cm_id;
struct workqueue_struct *wq;
- struct work_struct disconn_work;
- struct nes_cq *nesscq;
- struct nes_cq *nesrcq;
- struct nes_pd *nespd;
+ struct work_struct disconn_work;
+ struct nes_cq *nesscq;
+ struct nes_cq *nesrcq;
+ struct nes_pd *nespd;
void *cm_node; /* handle of the node this QP is associated with */
struct ietf_mpa_frame *ietf_frame;
- dma_addr_t ietf_frame_pbase;
- wait_queue_head_t state_waitq;
- unsigned long socket;
- struct nes_hw_qp hwqp;
- struct work_struct work;
- struct work_struct ae_work;
- enum ib_qp_state ibqp_state;
- u32 iwarp_state;
- u32 hte_index;
- u32 last_aeq;
- u32 qp_mem_size;
- atomic_t refcount;
- atomic_t close_timer_started;
- u32 mmap_sq_db_index;
- u32 mmap_rq_db_index;
- spinlock_t lock;
+ dma_addr_t ietf_frame_pbase;
+ wait_queue_head_t state_waitq;
+ unsigned long socket;
+ struct nes_hw_qp hwqp;
+ struct work_struct work;
+ struct work_struct ae_work;
+ enum ib_qp_state ibqp_state;
+ u32 iwarp_state;
+ u32 hte_index;
+ u32 last_aeq;
+ u32 qp_mem_size;
+ atomic_t refcount;
+ atomic_t close_timer_started;
+ u32 mmap_sq_db_index;
+ u32 mmap_rq_db_index;
+ spinlock_t lock;
struct nes_qp_context *nesqp_context;
- dma_addr_t nesqp_context_pbase;
- wait_queue_head_t kick_waitq;
- u16 in_disconnect;
- u16 private_data_len;
- u8 active_conn;
- u8 skip_lsmm;
- u8 user_mode;
- u8 hte_added;
- u8 hw_iwarp_state;
- u8 flush_issued;
- u8 hw_tcp_state;
- u8 disconn_pending;
- u8 destroyed;
+ dma_addr_t nesqp_context_pbase;
+ void *pbl_vbase;
+ dma_addr_t pbl_pbase;
+ struct page *page;
+ wait_queue_head_t kick_waitq;
+ u16 in_disconnect;
+ u16 private_data_len;
+ u8 active_conn;
+ u8 skip_lsmm;
+ u8 user_mode;
+ u8 hte_added;
+ u8 hw_iwarp_state;
+ u8 flush_issued;
+ u8 hw_tcp_state;
+ u8 disconn_pending;
+ u8 destroyed;
};
#endif /* NES_VERBS_H */
More information about the ewg
mailing list