@@ -45,10 +45,12 @@ static struct arm_smmu_ctx_desc *
arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
{
int ret;
+ unsigned long flags;
u32 new_asid;
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_device *smmu;
struct arm_smmu_domain *smmu_domain;
+ struct arm_smmu_master *master;
cd = xa_load(&arm_smmu_asid_xa, asid);
if (!cd)
@@ -80,7 +82,11 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
* be some overlap between use of both ASIDs, until we invalidate the
* TLB.
*/
- arm_smmu_write_ctx_desc(smmu_domain, 0, cd);
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_for_each_entry(master, &smmu_domain->devices, domain_head) {
+ arm_smmu_write_ctx_desc(smmu, master->s1_cfg, master, 0, cd);
+ }
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
/* Invalidate TLB entries previously associated with that context */
arm_smmu_tlb_inv_asid(smmu, asid);
@@ -211,6 +217,8 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
{
struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
+ struct arm_smmu_master *master;
+ unsigned long flags;
mutex_lock(&sva_lock);
if (smmu_mn->cleared) {
@@ -222,7 +230,12 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
* DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
* but disable translation.
*/
- arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd);
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_for_each_entry(master, &smmu_domain->devices, domain_head) {
+ arm_smmu_write_ctx_desc(master->smmu, master->s1_cfg, master,
+ mm->pasid, &quiet_cd);
+ }
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
@@ -248,8 +261,10 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
struct mm_struct *mm)
{
int ret;
+ unsigned long flags;
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_mmu_notifier *smmu_mn;
+ struct arm_smmu_master *master;
list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
if (smmu_mn->mn.mm == mm) {
@@ -279,7 +294,12 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
goto err_free_cd;
}
- ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd);
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_for_each_entry(master, &smmu_domain->devices, domain_head) {
+ ret = arm_smmu_write_ctx_desc(master->smmu, master->s1_cfg,
+ master, mm->pasid, cd);
+ }
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
if (ret)
goto err_put_notifier;
@@ -296,15 +316,23 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
{
+ unsigned long flags;
struct mm_struct *mm = smmu_mn->mn.mm;
struct arm_smmu_ctx_desc *cd = smmu_mn->cd;
+ struct arm_smmu_master *master;
struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
if (!refcount_dec_and_test(&smmu_mn->refs))
return;
list_del(&smmu_mn->list);
- arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, NULL);
+
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_for_each_entry(master, &smmu_domain->devices, domain_head) {
+ arm_smmu_write_ctx_desc(master->smmu, master->s1_cfg, master,
+ mm->pasid, NULL);
+ }
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
/*
* If we went through clear(), we've already invalidated, and no
@@ -965,14 +965,13 @@ void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
-static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
+/* master may be null */
+static void arm_smmu_sync_cd(struct arm_smmu_master *master,
int ssid, bool leaf)
{
size_t i;
- unsigned long flags;
- struct arm_smmu_master *master;
struct arm_smmu_cmdq_batch cmds;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_device *smmu;
struct arm_smmu_cmdq_ent cmd = {
.opcode = CMDQ_OP_CFGI_CD,
.cfgi = {
@@ -981,16 +980,15 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
},
};
- cmds.num = 0;
+ if (!master)
+ return;
- spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- list_for_each_entry(master, &smmu_domain->devices, domain_head) {
- for (i = 0; i < master->num_streams; i++) {
- cmd.cfgi.sid = master->streams[i].id;
- arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
- }
+ smmu = master->smmu;
+ cmds.num = 0;
+ for (i = 0; i < master->num_streams; i++) {
+ cmd.cfgi.sid = master->streams[i].id;
+ arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
}
- spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
arm_smmu_cmdq_batch_submit(smmu, &cmds);
}
@@ -1020,16 +1018,18 @@ static void arm_smmu_write_cd_l1_desc(__le64 *dst,
WRITE_ONCE(*dst, cpu_to_le64(val));
}
-static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_domain *smmu_domain,
+/* master may be null */
+static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_device *smmu,
+ struct arm_smmu_s1_cfg *s1_cfg,
+ struct arm_smmu_master *master,
u32 ssid)
{
__le64 *l1ptr;
unsigned int idx;
struct arm_smmu_l1_ctx_desc *l1_desc;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
+ struct arm_smmu_ctx_desc_cfg *cdcfg = &s1_cfg->cdcfg;
- if (smmu_domain->s1_cfg.s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
+ if (s1_cfg->s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
return cdcfg->cdtab + ssid * CTXDESC_CD_DWORDS;
idx = ssid >> CTXDESC_SPLIT;
@@ -1041,13 +1041,21 @@ static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_domain *smmu_domain,
l1ptr = cdcfg->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
arm_smmu_write_cd_l1_desc(l1ptr, l1_desc);
/* An invalid L1CD can be cached */
- arm_smmu_sync_cd(smmu_domain, ssid, false);
+ arm_smmu_sync_cd(master, ssid, false);
}
idx = ssid & (CTXDESC_L2_ENTRIES - 1);
return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS;
}
-int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
+/*
+ * master must be provided if a CD sync is required but may be null otherwise
+ * (such as when the CD table isn't inserted into the STE yet, or is about to
+ * be detached.
+ */
+int arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
+ struct arm_smmu_s1_cfg *s1_cfg,
+ struct arm_smmu_master *master,
+ int ssid,
struct arm_smmu_ctx_desc *cd)
{
/*
@@ -1065,10 +1073,10 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
bool cd_live;
__le64 *cdptr;
- if (WARN_ON(ssid >= (1 << smmu_domain->s1_cfg.s1cdmax)))
+ if (WARN_ON(ssid >= (1 << s1_cfg->s1cdmax)))
return -E2BIG;
- cdptr = arm_smmu_get_cd_ptr(smmu_domain, ssid);
+ cdptr = arm_smmu_get_cd_ptr(smmu, s1_cfg, master, ssid);
if (!cdptr)
return -ENOMEM;
@@ -1092,11 +1100,11 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
cdptr[3] = cpu_to_le64(cd->mair);
/*
- * STE is live, and the SMMU might read dwords of this CD in any
- * order. Ensure that it observes valid values before reading
- * V=1.
+ * STE may be live, and the SMMU might read dwords of this CD
+ * in any order. Ensure that it observes valid values before
+ * reading V=1.
*/
- arm_smmu_sync_cd(smmu_domain, ssid, true);
+ arm_smmu_sync_cd(master, ssid, true);
val = cd->tcr |
#ifdef __BIG_ENDIAN
@@ -1108,7 +1116,7 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) |
CTXDESC_CD_0_V;
- if (smmu_domain->stall_enabled)
+ if (s1_cfg->stall_enabled)
val |= CTXDESC_CD_0_S;
}
@@ -1122,7 +1130,7 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
* without first making the structure invalid.
*/
WRITE_ONCE(cdptr[0], cpu_to_le64(val));
- arm_smmu_sync_cd(smmu_domain, ssid, true);
+ arm_smmu_sync_cd(master, ssid, true);
return 0;
}
@@ -1136,6 +1144,7 @@ static int arm_smmu_init_s1_cfg(struct arm_smmu_master *master,
struct arm_smmu_ctx_desc_cfg *cdcfg = &cfg->cdcfg;
cfg->s1cdmax = master->ssid_bits;
+ cfg->stall_enabled = master->stall_enabled;
max_contexts = 1 << cfg->s1cdmax;
if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) ||
@@ -2093,8 +2102,6 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
if (ret)
goto out_unlock;
- smmu_domain->stall_enabled = master->stall_enabled;
-
ret = arm_smmu_init_s1_cfg(master, cfg);
if (ret)
goto out_free_asid;
@@ -2110,12 +2117,9 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
cd->mair = pgtbl_cfg->arm_lpae_s1_cfg.mair;
- /*
- * Note that this will end up calling arm_smmu_sync_cd() before
- * the master has been added to the devices list for this domain.
- * This isn't an issue because the STE hasn't been installed yet.
- */
- ret = arm_smmu_write_ctx_desc(smmu_domain, 0, cd);
+ ret = arm_smmu_write_ctx_desc(smmu, cfg,
+ NULL /*Not attached to a master yet */,
+ 0, cd);
if (ret)
goto out_free_cd_tables;
@@ -2386,6 +2390,11 @@ static void arm_smmu_detach_dev(struct arm_smmu_master *master)
master->domain = NULL;
master->ats_enabled = false;
+ if (master->s1_cfg)
+ arm_smmu_write_ctx_desc(
+ master->smmu, master->s1_cfg,
+ NULL /* Skip sync since we detach the CD table next*/,
+ 0, NULL);
master->s1_cfg = NULL;
master->s2_cfg = NULL;
arm_smmu_install_ste_for_dev(master);
@@ -2435,7 +2444,8 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
ret = -EINVAL;
goto out_unlock;
} else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
- smmu_domain->stall_enabled != master->stall_enabled) {
+ smmu_domain->s1_cfg.stall_enabled !=
+ master->stall_enabled) {
ret = -EINVAL;
goto out_unlock;
}
@@ -595,6 +595,7 @@ struct arm_smmu_s1_cfg {
struct arm_smmu_ctx_desc_cfg cdcfg;
u8 s1fmt;
u8 s1cdmax;
+ bool stall_enabled;
};
struct arm_smmu_s2_cfg {
@@ -713,7 +714,6 @@ struct arm_smmu_domain {
struct mutex init_mutex; /* Protects smmu pointer */
struct io_pgtable_ops *pgtbl_ops;
- bool stall_enabled;
atomic_t nr_ats_masters;
enum arm_smmu_domain_stage stage;
@@ -742,7 +742,9 @@ extern struct xarray arm_smmu_asid_xa;
extern struct mutex arm_smmu_asid_lock;
extern struct arm_smmu_ctx_desc quiet_cd;
-int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
+int arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
+ struct arm_smmu_s1_cfg *s1_cfg,
+ struct arm_smmu_master *smmu_master, int ssid,
struct arm_smmu_ctx_desc *cd);
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,