@@ -88,6 +88,7 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
}
}
@@ -194,6 +194,9 @@ int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
u64 iova, int access_flags, struct rxe_mr *mr);
int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
enum rxe_mr_copy_dir dir);
+int rxe_odp_mr_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val);
+
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline int
rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
@@ -207,6 +210,12 @@ rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
{
return -EOPNOTSUPP;
}
+static inline int
+rxe_odp_mr_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
+{
+ return RESPST_ERR_UNSUPPORTED_OPCODE;
+}
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
@@ -268,3 +268,46 @@ int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
return err;
}
+
+int rxe_odp_mr_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
+{
+ int err;
+ int retry = 0;
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+
+ mutex_lock(&umem_odp->umem_mutex);
+
+ /* Atomic operations manipulate a single char. */
+ if (rxe_odp_check_pages(mr, iova, sizeof(char), 0))
+ goto need_fault;
+
+ err = rxe_mr_do_atomic_op(mr, iova, opcode, compare,
+ swap_add, orig_val);
+
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ return err;
+
+need_fault:
+ /* allow max 3 tries for pagefault */
+ do {
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ if (retry > 2)
+ return -EFAULT;
+
+ /* umem_mutex is locked on success */
+ err = rxe_odp_do_pagefault_and_lock(mr, iova, sizeof(char), 0);
+ if (err < 0)
+ return err;
+ retry++;
+ } while (rxe_odp_check_pages(mr, iova, sizeof(char), 0));
+
+ err = rxe_mr_do_atomic_op(mr, iova, opcode, compare,
+ swap_add, orig_val);
+
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ return err;
+}
@@ -693,7 +693,10 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
u64 iova = qp->resp.va + qp->resp.offset;
if (mr->umem->is_odp)
- err = RESPST_ERR_UNSUPPORTED_OPCODE;
+ err = rxe_odp_mr_atomic_op(mr, iova, pkt->opcode,
+ atmeth_comp(pkt),
+ atmeth_swap_add(pkt),
+ &res->atomic.orig_val);
else
err = rxe_mr_do_atomic_op(mr, iova, pkt->opcode,
atmeth_comp(pkt),