[openib-general] InfiniPath driver announcement
Roland Dreier
rolandd at cisco.com
Tue Oct 11 12:46:36 PDT 2005
I started working on some final cleanups of the uverbs interface
before merging stuff onto the trunk. The patch below mixes some
simple cleanups with a slight change to the work request posting
interface. I changed the ABI so that the work requests are passed as
part of the same write as the command, and modified the implementation
to copy the work requests one by one instead of in one giant chunk.
I also added a WQE size field to allow for future device-specific
extensions to work request posting.
The following is compile-tested only, and I haven't modified the
userspace library to match, but I wanted to give you some idea of what
I was doing in case you had some comments or started working on it too.
What do you think?
- R.
--- infiniband/include/rdma/ib_user_verbs.h (revision 3725)
+++ infiniband/include/rdma/ib_user_verbs.h (working copy)
@@ -89,8 +89,11 @@ enum {
* Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
+ * Specifically:
+ * - Do not use pointer types -- pass pointers in __u64 instead.
+ * - Make sure that any structure larger than 4 bytes is padded to a
+ * multiple of 8 bytes. Otherwise the structure size will be
+ * different between 32-bit and 64-bit architectures.
*/
struct ib_uverbs_async_event_desc {
@@ -284,12 +287,12 @@ struct ib_uverbs_wc {
__u8 sl;
__u8 dlid_path_bits;
__u8 port_num;
- __u8 reserved; /* Align struct to 8 bytes */
+ __u8 reserved;
};
struct ib_uverbs_poll_cq_resp {
__u32 count;
- __u32 reserved; /* Align struct to 8 bytes */
+ __u32 reserved;
struct ib_uverbs_wc wc[];
};
@@ -417,20 +420,20 @@ struct ib_uverbs_send_wr {
struct {
__u64 remote_addr;
__u32 rkey;
- __u32 reserved; /* Align struct to 8 bytes */
+ __u32 reserved;
} rdma;
struct {
__u64 remote_addr;
__u64 compare_add;
__u64 swap;
__u32 rkey;
- __u32 reserved; /* Align struct to 8 bytes */
+ __u32 reserved;
} atomic;
struct {
__u32 ah;
__u32 remote_qpn;
__u32 remote_qkey;
- __u32 reserved; /* Align struct to 8 bytes */
+ __u32 reserved;
} ud;
} wr;
};
@@ -440,8 +443,7 @@ struct ib_uverbs_post_send {
__u32 qp_handle;
__u32 wr_count;
__u32 sge_count;
- __u32 reserved; /* Align struct to 8 bytes */
- __u64 wr;
+ __u32 wqe_size;
};
struct ib_uverbs_post_send_resp {
@@ -451,7 +453,7 @@ struct ib_uverbs_post_send_resp {
struct ib_uverbs_recv_wr {
__u64 wr_id;
__u32 num_sge;
- __u32 reserved; /* Align struct to 8 bytes */
+ __u32 reserved;
};
struct ib_uverbs_post_recv {
@@ -459,8 +461,7 @@ struct ib_uverbs_post_recv {
__u32 qp_handle;
__u32 wr_count;
__u32 sge_count;
- __u32 reserved; /* Align struct to 8 bytes */
- __u64 wr;
+ __u32 wqe_size;
};
struct ib_uverbs_post_recv_resp {
@@ -472,47 +473,38 @@ struct ib_uverbs_post_srq_recv {
__u32 srq_handle;
__u32 wr_count;
__u32 sge_count;
- __u32 reserved; /* Align struct to 8 bytes */
- __u64 wr;
+ __u32 wqe_size;
};
struct ib_uverbs_post_srq_recv_resp {
__u32 bad_wr;
};
-union ib_uverbs_gid {
- __u8 raw[16];
- struct {
- __u64 subnet_prefix;
- __u64 interface_id;
- } global;
-};
-
-struct ibv_m_global_route {
- union ib_uverbs_gid dgid;
+struct ib_uverbs_global_route {
+ __u8 dgid[16];
__u32 flow_label;
__u8 sgid_index;
__u8 hop_limit;
__u8 traffic_class;
- __u8 reserved; /* Align struct to 8 bytes */
+ __u8 reserved;
};
struct ib_uverbs_ah_attr {
- struct ibv_m_global_route grh;
+ struct ib_uverbs_global_route grh;
__u16 dlid;
__u8 sl;
__u8 src_path_bits;
__u8 static_rate;
__u8 is_global;
__u8 port_num;
- __u8 reserved; /* Align struct to 8 bytes */
+ __u8 reserved;
};
struct ib_uverbs_create_ah {
__u64 response;
__u64 user_handle;
__u32 pd_handle;
- __u32 reserved; /* Align struct to 8 bytes */
+ __u32 reserved;
struct ib_uverbs_ah_attr attr;
};
--- infiniband/core/uverbs_cmd.c (revision 3725)
+++ infiniband/core/uverbs_cmd.c (working copy)
@@ -680,6 +680,10 @@ ssize_t ib_uverbs_poll_cq(struct ib_uver
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
+ /* Don't let userspace make us allocate a huge buffer */
+ if (cmd.ne > 256)
+ return -ENOMEM;
+
wc = kmalloc(cmd.ne * sizeof *wc, GFP_KERNEL);
if (!wc)
return -ENOMEM;
@@ -699,24 +703,25 @@ ssize_t ib_uverbs_poll_cq(struct ib_uver
}
resp->count = ib_poll_cq(cq, cmd.ne, wc);
- for(i = 0; i < cmd.ne; i++) {
- resp->wc[i].wr_id = wc[i].wr_id;
- resp->wc[i].status = wc[i].status;
- resp->wc[i].opcode = wc[i].opcode;
- resp->wc[i].vendor_err = wc[i].vendor_err;
- resp->wc[i].byte_len = wc[i].byte_len;
- resp->wc[i].imm_data = wc[i].imm_data;
- resp->wc[i].qp_num = wc[i].qp_num;
- resp->wc[i].src_qp = wc[i].src_qp;
- resp->wc[i].wc_flags = wc[i].wc_flags;
- resp->wc[i].pkey_index = wc[i].pkey_index;
- resp->wc[i].slid = wc[i].slid;
- resp->wc[i].sl = wc[i].sl;
+ for (i = 0; i < resp->count; i++) {
+ resp->wc[i].wr_id = wc[i].wr_id;
+ resp->wc[i].status = wc[i].status;
+ resp->wc[i].opcode = wc[i].opcode;
+ resp->wc[i].vendor_err = wc[i].vendor_err;
+ resp->wc[i].byte_len = wc[i].byte_len;
+ resp->wc[i].imm_data = wc[i].imm_data;
+ resp->wc[i].qp_num = wc[i].qp_num;
+ resp->wc[i].src_qp = wc[i].src_qp;
+ resp->wc[i].wc_flags = wc[i].wc_flags;
+ resp->wc[i].pkey_index = wc[i].pkey_index;
+ resp->wc[i].slid = wc[i].slid;
+ resp->wc[i].sl = wc[i].sl;
resp->wc[i].dlid_path_bits = wc[i].dlid_path_bits;
- resp->wc[i].port_num = wc[i].port_num;
+ resp->wc[i].port_num = wc[i].port_num;
}
- if (copy_to_user((void __user *)cmd.response, resp, rsize))
+ if (copy_to_user((void __user *) (unsigned long) cmd.response,
+ &resp, sizeof resp))
ret = -EFAULT;
out:
@@ -741,15 +746,12 @@ ssize_t ib_uverbs_req_notify_cq(struct i
down(&ib_uverbs_idr_mutex);
cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
- if (!cq || cq->uobject->context != file->ucontext)
- goto out;
-
- ib_req_notify_cq(cq, cmd.solicited ? IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
-
- ret = in_len;
-
-out:
+ if (cq && cq->uobject->context == file->ucontext) {
+ ib_req_notify_cq(cq, cmd.solicited ? IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
+ ret = in_len;
+ }
up(&ib_uverbs_idr_mutex);
+
return ret;
}
@@ -1097,195 +1099,296 @@ ssize_t ib_uverbs_post_send(struct ib_uv
{
struct ib_uverbs_post_send cmd;
struct ib_uverbs_post_send_resp resp;
- struct ib_uverbs_send_wr *m_wr, *j;
- struct ib_send_wr *wr, *i, *bad_wr;
- struct ib_sge *s;
+ struct ib_uverbs_send_wr *user_wr;
+ struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
struct ib_qp *qp;
- int size;
- int count;
+ int i, sg_ind;
ssize_t ret = -EINVAL;
- resp.bad_wr = 0;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
+ if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
+ cmd.sge_count * sizeof (struct ib_uverbs_sge))
+ return -EINVAL;
+
+ if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
+ return -EINVAL;
+
+ /* Don't let userspace make us allocate a huge buffer */
+ if (cmd.wqe_size > 4096)
+ return -ENOMEM;
+
+ user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
+ if (!user_wr)
+ return -ENOMEM;
+
down(&ib_uverbs_idr_mutex);
qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
if (!qp || qp->uobject->context != file->ucontext)
goto out;
- size = (cmd.wr_count * sizeof *wr) + (cmd.sge_count * sizeof *s);
- m_wr = kmalloc(size, GFP_KERNEL);
- if (!m_wr) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (copy_from_user(m_wr, (void __user *)cmd.wr, size)) {
- ret = -EFAULT;
- goto wrout;
- }
+ sg_ind = 0;
+ last = NULL;
+ for (i = 0; i < cmd.wr_count; ++i) {
+ if (copy_from_user(user_wr,
+ buf + sizeof cmd + i * cmd.wqe_size,
+ cmd.wqe_size)) {
+ ret = -EFAULT;
+ goto out;
+ }
- wr = kmalloc(cmd.wr_count * sizeof *wr, GFP_KERNEL);
- if (!wr) {
- ret = -ENOMEM;
- goto wrout;
- }
+ /* Don't let userspace make us allocate a huge buffer */
+ if (user_wr->num_sge > 256) {
+ ret = -ENOMEM;
+ goto out;
+ }
- s = (struct ib_sge *)(m_wr + cmd.wr_count);
+ if (user_wr->num_sge + sg_ind > cmd.sge_count) {
+ ret = -EINVAL;
+ goto out;
+ }
- i = wr;
- j = m_wr;
- count = 0;
- while (count++ < cmd.wr_count) {
- struct ib_send_wr *t = i++;
- struct ib_uverbs_send_wr *u = j++;
+ next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
+ user_wr->num_sge * sizeof (struct ib_sge),
+ GFP_KERNEL);
+ if (!next) {
+ ret = -ENOMEM;
+ goto out;
+ }
- if (count < cmd.wr_count)
- t->next = i;
+ if (!last)
+ wr = next;
else
- t->next = NULL;
+ last->next = next;
+ last = next;
- t->wr_id = u->wr_id;
- t->num_sge = u->num_sge;
- t->opcode = u->opcode;
- t->send_flags = u->send_flags;
- t->imm_data = u->imm_data;
+ next->next = NULL;
+ next->wr_id = user_wr->wr_id;
+ next->num_sge = user_wr->num_sge;
+ next->opcode = user_wr->opcode;
+ next->send_flags = user_wr->send_flags;
+ next->imm_data = user_wr->imm_data;
if (qp->qp_type == IB_QPT_UD) {
- t->wr.ud.ah = idr_find(&ib_uverbs_ah_idr, u->wr.ud.ah);
- if (!t->wr.ud.ah)
- goto kwrout;
- t->wr.ud.remote_qpn = u->wr.ud.remote_qpn;
- t->wr.ud.remote_qkey = u->wr.ud.remote_qkey;
+ next->wr.ud.ah = idr_find(&ib_uverbs_ah_idr,
+ user_wr->wr.ud.ah);
+ if (!next->wr.ud.ah) {
+ ret = -EINVAL;
+ goto out;
+ }
+ next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn;
+ next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
} else {
- switch (t->opcode) {
+ switch (next->opcode) {
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
case IB_WR_RDMA_READ:
- t->wr.rdma.remote_addr = u->wr.rdma.remote_addr;
- t->wr.rdma.rkey = u->wr.rdma.rkey;
+ next->wr.rdma.remote_addr =
+ user_wr->wr.rdma.remote_addr;
+ next->wr.rdma.rkey =
+ user_wr->wr.rdma.rkey;
break;
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
- t->wr.atomic.remote_addr =
- u->wr.atomic.remote_addr;
- t->wr.atomic.compare_add =
- u->wr.atomic.compare_add;
- t->wr.atomic.swap = u->wr.atomic.swap;
- t->wr.atomic.rkey = u->wr.atomic.rkey;
+ next->wr.atomic.remote_addr =
+ user_wr->wr.atomic.remote_addr;
+ next->wr.atomic.compare_add =
+ user_wr->wr.atomic.compare_add;
+ next->wr.atomic.swap = user_wr->wr.atomic.swap;
+ next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
break;
default:
break;
}
}
- if (t->num_sge) {
- t->sg_list = s;
- s += t->num_sge;
+ if (next->num_sge) {
+ next->sg_list = (void *) next +
+ ALIGN(sizeof *next, sizeof (struct ib_sge));
+ if (copy_from_user(next->sg_list,
+ buf + sizeof cmd +
+ cmd.wr_count * cmd.wqe_size +
+ sg_ind * sizeof (struct ib_sge),
+ next->num_sge * sizeof (struct ib_sge))) {
+ ret = -EFAULT;
+ goto out;
+ }
+ sg_ind += next->num_sge;
} else
- t->sg_list = NULL;
+ next->sg_list = NULL;
}
+ resp.bad_wr = 0;
ret = qp->device->post_send(qp, wr, &bad_wr);
- resp.bad_wr = ret ? (bad_wr - wr) + 1 : 0;
-
-kwrout:
- kfree(wr);
+ if (ret) {
+ for (next = wr; next; next = next->next) {
+ if (next == bad_wr)
+ break;
+ ++resp.bad_wr;
+ }
+ }
-wrout:
- kfree(m_wr);
+ if (copy_to_user((void __user *) (unsigned long) cmd.response,
+ &resp, sizeof resp))
+ ret = -EFAULT;
out:
up(&ib_uverbs_idr_mutex);
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
- ret = -EFAULT;
+ while (wr) {
+ next = wr->next;
+ kfree(wr);
+ wr = next;
+ }
+
+ kfree(user_wr);
return ret ? ret : in_len;
}
+static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
+ int in_len,
+ u32 wr_count,
+ u32 sge_count,
+ u32 wqe_size)
+{
+ struct ib_uverbs_recv_wr *user_wr;
+ struct ib_recv_wr *wr = NULL, *last, *next;
+ int sg_ind;
+ int i;
+ int ret;
+
+ if (in_len < wqe_size * wr_count +
+ sge_count * sizeof (struct ib_uverbs_sge))
+ return ERR_PTR(-EINVAL);
+
+ if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
+ return ERR_PTR(-EINVAL);
+
+ /* Don't let userspace make us allocate a huge buffer */
+ if (wqe_size > 4096)
+ return ERR_PTR(-ENOMEM);
+
+ user_wr = kmalloc(wqe_size, GFP_KERNEL);
+ if (!user_wr)
+ return ERR_PTR(-ENOMEM);
+
+ sg_ind = 0;
+ last = NULL;
+ for (i = 0; i < wr_count; ++i) {
+ if (copy_from_user(user_wr, buf + i * wqe_size,
+ wqe_size)) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ /* Don't let userspace make us allocate a huge buffer */
+ if (user_wr->num_sge > 256) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (user_wr->num_sge + sg_ind > sge_count) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
+ user_wr->num_sge * sizeof (struct ib_sge),
+ GFP_KERNEL);
+ if (!next) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (!last)
+ wr = next;
+ else
+ last->next = next;
+ last = next;
+
+ next->next = NULL;
+ next->wr_id = user_wr->wr_id;
+ next->num_sge = user_wr->num_sge;
+
+ if (next->num_sge) {
+ next->sg_list = (void *) next +
+ ALIGN(sizeof *next, sizeof (struct ib_sge));
+ if (copy_from_user(next->sg_list,
+ buf + wr_count * wqe_size +
+ sg_ind * sizeof (struct ib_sge),
+ next->num_sge * sizeof (struct ib_sge))) {
+ ret = -EFAULT;
+ goto err;
+ }
+ sg_ind += next->num_sge;
+ } else
+ next->sg_list = NULL;
+ }
+
+ kfree(user_wr);
+ return wr;
+
+err:
+ kfree(user_wr);
+
+ while (wr) {
+ next = wr->next;
+ kfree(wr);
+ wr = next;
+ }
+
+ return ERR_PTR(ret);
+}
+
ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
const char __user *buf, int in_len,
int out_len)
{
struct ib_uverbs_post_recv cmd;
struct ib_uverbs_post_recv_resp resp;
- struct ib_uverbs_recv_wr *m_wr, *j;
- struct ib_recv_wr *wr, *i, *bad_wr;
- struct ib_sge *s;
+ struct ib_recv_wr *wr, *next, *bad_wr;
struct ib_qp *qp;
- int size;
- int count;
ssize_t ret = -EINVAL;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
+ wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
+ in_len - sizeof cmd, cmd.wr_count,
+ cmd.sge_count, cmd.wqe_size);
+ if (IS_ERR(wr))
+ return PTR_ERR(wr);
+
down(&ib_uverbs_idr_mutex);
qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
if (!qp || qp->uobject->context != file->ucontext)
goto out;
- size = (cmd.wr_count * sizeof *m_wr) + (cmd.sge_count * sizeof *s);
- m_wr = kmalloc(size, GFP_KERNEL);
- if (!m_wr) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (copy_from_user(m_wr, (void __user *)cmd.wr, size)) {
- ret = -EFAULT;
- goto wrout;
- }
-
- wr = kmalloc(cmd.wr_count * sizeof *wr, GFP_KERNEL);
- if (!wr) {
- ret = -ENOMEM;
- goto wrout;
- }
-
- s = (struct ib_sge *)(m_wr + cmd.wr_count);
-
- i = wr;
- j = m_wr;
- count = 0;
- while (count++ < cmd.wr_count) {
- struct ib_recv_wr *t = i++;
- struct ib_uverbs_recv_wr *u = j++;
-
- if (count < cmd.wr_count)
- t->next = i;
- else
- t->next = NULL;
-
- t->wr_id = u->wr_id;
- t->num_sge = u->num_sge;
-
- if (t->num_sge) {
- t->sg_list = s;
- s += t->num_sge;
- } else
- t->sg_list = NULL;
- }
-
+ resp.bad_wr = 0;
ret = qp->device->post_recv(qp, wr, &bad_wr);
- resp.bad_wr = ret ? (bad_wr - wr) + 1 : 0;
+ if (ret)
+ for (next = wr; next; next = next->next) {
+ if (next == bad_wr)
+ break;
+ ++resp.bad_wr;
+ }
+
+ up(&ib_uverbs_idr_mutex);
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp))
ret = -EFAULT;
- kfree(wr);
-
-wrout:
- kfree(m_wr);
-
out:
- up(&ib_uverbs_idr_mutex);
+ while (wr) {
+ next = wr->next;
+ kfree(wr);
+ wr = next;
+ }
return ret ? ret : in_len;
}
@@ -1294,80 +1397,48 @@ ssize_t ib_uverbs_post_srq_recv(struct i
const char __user *buf, int in_len,
int out_len)
{
- struct ib_uverbs_post_srq_recv cmd;
+ struct ib_uverbs_post_srq_recv cmd;
struct ib_uverbs_post_srq_recv_resp resp;
- struct ib_uverbs_recv_wr *m_wr, *j;
- struct ib_recv_wr *wr, *i, *bad_wr;
- struct ib_sge *s;
- struct ib_srq *srq;
- int size;
- int count;
- ssize_t ret = -EFAULT;
+ struct ib_recv_wr *wr, *next, *bad_wr;
+ struct ib_srq *srq;
+ ssize_t ret = -EINVAL;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
+ wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
+ in_len - sizeof cmd, cmd.wr_count,
+ cmd.sge_count, cmd.wqe_size);
+ if (IS_ERR(wr))
+ return PTR_ERR(wr);
+
down(&ib_uverbs_idr_mutex);
srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
if (!srq || srq->uobject->context != file->ucontext)
goto out;
- size = (cmd.wr_count * sizeof *m_wr) + (cmd.sge_count * sizeof *s);
- m_wr = kmalloc(size, GFP_KERNEL);
- if (!m_wr) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (copy_from_user(m_wr, (void __user *)cmd.wr, size)) {
- goto wrout;
- }
-
- wr = kmalloc(cmd.wr_count * sizeof *wr, GFP_KERNEL);
- if (!wr) {
- ret = -ENOMEM;
- goto wrout;
- }
-
- s = (struct ib_sge *)(m_wr + cmd.wr_count);
-
- i = wr;
- j = m_wr;
- count = 0;
- while (count++ < cmd.wr_count) {
- struct ib_recv_wr *t = i++;
- struct ib_uverbs_recv_wr *u = j++;
-
- if (count < cmd.wr_count)
- t->next = i;
- else
- t->next = NULL;
-
- t->wr_id = u->wr_id;
- t->num_sge = u->num_sge;
-
- if (t->num_sge) {
- t->sg_list = s;
- s += t->num_sge;
- } else
- t->sg_list = NULL;
- }
-
+ resp.bad_wr = 0;
ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
- resp.bad_wr = ret ? (bad_wr - wr) + 1 : 0;
+ if (ret)
+ for (next = wr; next; next = next->next) {
+ if (next == bad_wr)
+ break;
+ ++resp.bad_wr;
+ }
+
+ up(&ib_uverbs_idr_mutex);
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp))
ret = -EFAULT;
- kfree(wr);
-
-wrout:
- kfree(m_wr);
-
out:
- up(&ib_uverbs_idr_mutex);
+ while (wr) {
+ next = wr->next;
+ kfree(wr);
+ wr = next;
+ }
return ret ? ret : in_len;
}
@@ -1405,19 +1476,16 @@ ssize_t ib_uverbs_create_ah(struct ib_uv
uobj->user_handle = cmd.user_handle;
uobj->context = file->ucontext;
- attr.dlid = cmd.attr.dlid;
- attr.sl = cmd.attr.sl;
- attr.src_path_bits = cmd.attr.src_path_bits;
- attr.static_rate = cmd.attr.static_rate;
- attr.port_num = cmd.attr.port_num;
- attr.grh.flow_label = cmd.attr.grh.flow_label;
- attr.grh.sgid_index = cmd.attr.grh.sgid_index;
- attr.grh.hop_limit = cmd.attr.grh.hop_limit;
+ attr.dlid = cmd.attr.dlid;
+ attr.sl = cmd.attr.sl;
+ attr.src_path_bits = cmd.attr.src_path_bits;
+ attr.static_rate = cmd.attr.static_rate;
+ attr.port_num = cmd.attr.port_num;
+ attr.grh.flow_label = cmd.attr.grh.flow_label;
+ attr.grh.sgid_index = cmd.attr.grh.sgid_index;
+ attr.grh.hop_limit = cmd.attr.grh.hop_limit;
attr.grh.traffic_class = cmd.attr.grh.traffic_class;
- attr.grh.dgid.global.subnet_prefix =
- cmd.attr.grh.dgid.global.subnet_prefix;
- attr.grh.dgid.global.interface_id =
- cmd.attr.grh.dgid.global.interface_id;
+ memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
ah = ib_create_ah(pd, &attr);
if (IS_ERR(ah)) {
More information about the general
mailing list