@@ -88,6 +88,7 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
}
}
@@ -194,6 +194,9 @@ int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
u64 iova, int access_flags, struct rxe_mr *mr);
int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
enum rxe_mr_copy_dir dir);
+int rxe_odp_mr_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val);
+
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline int
rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
@@ -207,6 +210,12 @@ rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
{
return -EOPNOTSUPP;
}
+static inline int
+rxe_odp_mr_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
+{
+ return RESPST_ERR_UNSUPPORTED_OPCODE;
+}
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
@@ -498,7 +498,12 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
}
page_offset = rxe_mr_iova_to_page_offset(mr, iova);
index = rxe_mr_iova_to_index(mr, iova);
- page = xa_load(&mr->page_list, index);
+
+ if (mr->umem->is_odp)
+ page = xa_untag_pointer(xa_load(&mr->page_list, index));
+ else
+ page = xa_load(&mr->page_list, index);
+
if (!page)
return RESPST_ERR_RKEY_VIOLATION;
}
@@ -254,3 +254,36 @@ int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
return err;
}
+
+int rxe_odp_mr_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ int err;
+
+ spin_lock(&mr->page_list.xa_lock);
+
+ /* Atomic operations manipulate a single char. */
+ if (rxe_odp_check_pages(mr, iova, sizeof(char), 0)) {
+ spin_unlock(&mr->page_list.xa_lock);
+
+ /* umem_mutex is locked on success */
+ err = rxe_odp_do_pagefault_and_lock(mr, iova, sizeof(char), 0);
+ if (err < 0)
+ return err;
+
+ /*
+ * The spinlock is always locked under mutex_lock except
+ * for MR initialization. No worry about deadlock.
+ */
+ spin_lock(&mr->page_list.xa_lock);
+ mutex_unlock(&umem_odp->umem_mutex);
+ }
+
+ err = rxe_mr_do_atomic_op(mr, iova, opcode, compare,
+ swap_add, orig_val);
+
+ spin_unlock(&mr->page_list.xa_lock);
+
+ return err;
+}
@@ -693,7 +693,10 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
u64 iova = qp->resp.va + qp->resp.offset;
if (mr->umem->is_odp)
- err = RESPST_ERR_UNSUPPORTED_OPCODE;
+ err = rxe_odp_mr_atomic_op(mr, iova, pkt->opcode,
+ atmeth_comp(pkt),
+ atmeth_swap_add(pkt),
+ &res->atomic.orig_val);
else
err = rxe_mr_do_atomic_op(mr, iova, pkt->opcode,
atmeth_comp(pkt),