@@ -1042,7 +1042,7 @@ static void cleanup_status_page(struct intel_engine_cs *engine)
/* Prevent writes into HWSP after returning the page to the system */
intel_engine_set_hwsp_writemask(engine, ~0u);
- vma = fetch_and_zero(&engine->status_page.vma);
+ vma = __xchg(&engine->status_page.vma, 0);
if (!vma)
return;
@@ -229,7 +229,7 @@ static void heartbeat(struct work_struct *wrk)
mutex_unlock(&ce->timeline->mutex);
out:
if (!engine->i915->params.enable_hangcheck || !next_heartbeat(engine))
- i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
+ i915_request_put(__xchg(&engine->heartbeat.systole, 0));
intel_engine_pm_put(engine);
}
@@ -244,7 +244,7 @@ void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine)
void intel_engine_park_heartbeat(struct intel_engine_cs *engine)
{
if (cancel_delayed_work(&engine->heartbeat.work))
- i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
+ i915_request_put(__xchg(&engine->heartbeat.systole, 0));
}
void intel_gt_unpark_heartbeats(struct intel_gt *gt)
@@ -3199,7 +3199,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
RB_CLEAR_NODE(rb);
spin_lock(&ve->base.sched_engine->lock);
- rq = fetch_and_zero(&ve->request);
+ rq = __xchg(&ve->request, NULL);
if (rq) {
if (i915_request_mark_eio(rq)) {
rq->engine = engine;
@@ -3604,7 +3604,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
spin_lock_irq(&ve->base.sched_engine->lock);
- old = fetch_and_zero(&ve->request);
+ old = __xchg(&ve->request, NULL);
if (old) {
GEM_BUG_ON(!__i915_request_is_complete(old));
__i915_request_submit(old);
@@ -684,7 +684,7 @@ static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
{
struct i915_ppgtt *ppgtt;
- ppgtt = fetch_and_zero(&ggtt->alias);
+ ppgtt = __xchg(&ggtt->alias, NULL);
if (!ppgtt)
return;
@@ -1238,7 +1238,7 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
was_bound);
if (obj) { /* only used during resume => exclusive access */
- write_domain_objs |= fetch_and_zero(&obj->write_domain);
+ write_domain_objs |= __xchg(&obj->write_domain, 0);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
}
}
@@ -70,7 +70,7 @@ gsc_ext_om_alloc(struct intel_gsc *gsc, struct intel_gsc_intf *intf, size_t size
static void gsc_ext_om_destroy(struct intel_gsc_intf *intf)
{
- struct drm_i915_gem_object *obj = fetch_and_zero(&intf->gem_obj);
+ struct drm_i915_gem_object *obj = __xchg(&intf->gem_obj, 0);
if (!obj)
return;
@@ -753,7 +753,7 @@ int intel_gt_init(struct intel_gt *gt)
intel_uc_fini(>->uc);
err_engines:
intel_engines_release(gt);
- i915_vm_put(fetch_and_zero(>->vm));
+ i915_vm_put(__xchg(>->vm, 0));
err_pm:
intel_gt_pm_fini(gt);
intel_gt_fini_scratch(gt);
@@ -800,7 +800,7 @@ void intel_gt_driver_release(struct intel_gt *gt)
{
struct i915_address_space *vm;
- vm = fetch_and_zero(>->vm);
+ vm = __xchg(>->vm, 0);
if (vm) /* FIXME being called twice on error paths :( */
i915_vm_put(vm);
@@ -124,7 +124,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
static int __gt_park(struct intel_wakeref *wf)
{
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
- intel_wakeref_t wakeref = fetch_and_zero(>->awake);
+ intel_wakeref_t wakeref = __xchg(>->awake, 0);
struct drm_i915_private *i915 = gt->i915;
GT_TRACE(gt, "\n");
@@ -1144,7 +1144,7 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
static struct intel_timeline *
pinned_timeline(struct intel_context *ce, struct intel_engine_cs *engine)
{
- struct intel_timeline *tl = fetch_and_zero(&ce->timeline);
+ struct intel_timeline *tl = __xchg(&ce->timeline, 0);
return intel_timeline_create_from_engine(engine, page_unmask_bits(tl));
}
@@ -1261,8 +1261,8 @@ void lrc_fini(struct intel_context *ce)
if (!ce->state)
return;
- intel_ring_put(fetch_and_zero(&ce->ring));
- i915_vma_put(fetch_and_zero(&ce->state));
+ intel_ring_put(__xchg(&ce->ring, 0));
+ i915_vma_put(__xchg(&ce->state, 0));
}
void lrc_destroy(struct kref *kref)
@@ -1147,7 +1147,7 @@ void intel_migrate_fini(struct intel_migrate *m)
{
struct intel_context *ce;
- ce = fetch_and_zero(&m->context);
+ ce = __xchg(&m->context, 0);
if (!ce)
return;
@@ -702,7 +702,7 @@ void intel_rc6_fini(struct intel_rc6 *rc6)
intel_rc6_disable(rc6);
- pctx = fetch_and_zero(&rc6->pctx);
+ pctx = __xchg(&rc6->pctx, 0);
if (pctx)
i915_gem_object_put(pctx);
@@ -1831,7 +1831,7 @@ static void rps_work(struct work_struct *work)
u32 pm_iir = 0;
spin_lock_irq(gt->irq_lock);
- pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events;
+ pm_iir = __xchg(&rps->pm_iir, 0) & rps->pm_events;
client_boost = atomic_read(&rps->num_waiters);
spin_unlock_irq(gt->irq_lock);
@@ -171,7 +171,7 @@ static int live_context_size(void *arg)
* active state is sufficient, we are only checking that we
* don't use more than we planned.
*/
- saved = fetch_and_zero(&engine->default_state);
+ saved = __xchg(&engine->default_state, 0);
/* Overlaps with the execlists redzone */
engine->context_size += I915_GTT_PAGE_SIZE;
@@ -269,7 +269,7 @@ static int live_ctx_switch_wa(void *arg)
if (IS_GRAPHICS_VER(gt->i915, 4, 5))
continue; /* MI_STORE_DWORD is privileged! */
- saved_wa = fetch_and_zero(&engine->wa_ctx.vma);
+ saved_wa = __xchg(&engine->wa_ctx.vma, 0);
intel_engine_pm_get(engine);
err = __live_ctx_switch_wa(engine);
@@ -892,7 +892,7 @@ static int create_watcher(struct hwsp_watcher *w,
static int check_watcher(struct hwsp_watcher *w, const char *name,
bool (*op)(u32 hwsp, u32 seqno))
{
- struct i915_request *rq = fetch_and_zero(&w->rq);
+ struct i915_request *rq = __xchg(&w->rq, NULL);
u32 offset, end;
int err;
@@ -110,7 +110,7 @@ void intel_gsc_uc_fini(struct intel_gsc_uc *gsc)
flush_work(&gsc->work);
if (gsc->ce)
- intel_engine_destroy_pinned_context(fetch_and_zero(&gsc->ce));
+ intel_engine_destroy_pinned_context(__xchg(&gsc->ce, NULL));
i915_vma_unpin_and_release(&gsc->local, 0);
@@ -169,7 +169,7 @@ static void __uc_capture_load_err_log(struct intel_uc *uc)
static void __uc_free_load_err_log(struct intel_uc *uc)
{
- struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log);
+ struct drm_i915_gem_object *log = __xchg(&uc->load_err_log, NULL);
if (log)
i915_gem_object_put(log);
@@ -1119,7 +1119,7 @@ void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
if (!intel_uc_fw_is_available(uc_fw))
return;
- i915_gem_object_put(fetch_and_zero(&uc_fw->obj));
+ i915_gem_object_put(__xchg(&uc_fw->obj, NULL));
intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED);
}
@@ -26,6 +26,7 @@
#define __I915_UTILS_H
#include <linux/list.h>
+#include <linux/non-atomic/xchg.h>
#include <linux/overflow.h>
#include <linux/sched.h>
#include <linux/string_helpers.h>