@@ -2181,6 +2181,76 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
return 0;
}
+static int
+arm_smmu_domain_finalise_nested(struct iommu_domain *domain,
+ struct arm_smmu_master *master,
+ const struct iommu_hwpt_arm_smmuv3 *user_cfg)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ bool feat_2lvl_cdtab = smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB;
+ bool feat_s1 = smmu->features & ARM_SMMU_FEAT_TRANS_S1;
+ bool feat_s2 = smmu->features & ARM_SMMU_FEAT_TRANS_S2;
+ size_t event_len = EVTQ_ENT_DWORDS * sizeof(u64);
+ size_t ste_len = STRTAB_STE_DWORDS * sizeof(u64);
+ struct device *dev = master->dev;
+ void __user *event_user = NULL;
+ u64 event[EVTQ_ENT_DWORDS];
+ u64 ste[STRTAB_STE_DWORDS];
+ u8 s1dss, s1fmt, s1cdmax;
+ u64 s1ctxptr;
+
+ if (user_cfg->out_event_uptr && user_cfg->event_len == event_len)
+ event_user = u64_to_user_ptr(user_cfg->out_event_uptr);
+ event[0] = FIELD_PREP(EVTQ_0_ID, EVT_ID_BAD_STE);
+
+ if (!feat_s1 || !feat_s2) {
+ dev_dbg(dev, "does not implement two stages\n");
+ if (event_user && copy_to_user(event_user, event, event_len))
+ return -EFAULT;
+ return -EINVAL;
+ }
+
+ if (!user_cfg->ste_uptr || user_cfg->ste_len != ste_len)
+ return -EINVAL;
+ if (copy_from_user(ste, u64_to_user_ptr(user_cfg->ste_uptr), ste_len))
+ return -EFAULT;
+
+ s1dss = FIELD_GET(STRTAB_STE_1_S1DSS, ste[1]);
+
+ s1fmt = FIELD_GET(STRTAB_STE_0_S1FMT, ste[0]);
+ if (!feat_2lvl_cdtab && s1fmt != STRTAB_STE_0_S1FMT_LINEAR) {
+ dev_dbg(dev, "unsupported format (0x%x)\n", s1fmt);
+ if (event_user && copy_to_user(event_user, event, event_len))
+ return -EFAULT;
+ return -EINVAL;
+ }
+
+ s1cdmax = FIELD_GET(STRTAB_STE_0_S1CDMAX, ste[0]);
+ if (s1cdmax > master->ssid_bits) {
+ dev_dbg(dev, "s1cdmax (%d-bit) is out of range (%d-bit)\n",
+ s1cdmax, master->ssid_bits);
+ if (event_user && copy_to_user(event_user, event, event_len))
+ return -EFAULT;
+ return -EINVAL;
+ }
+
+ s1ctxptr = ste[0] & STRTAB_STE_0_S1CTXPTR_MASK;
+ if (s1ctxptr & ~GENMASK_ULL(smmu->ias, 0)) {
+ dev_dbg(dev, "s1ctxptr (0x%llx) is out of range (%lu-bit)\n",
+ s1ctxptr, smmu->ias);
+ if (event_user && copy_to_user(event_user, event, event_len))
+ return -EFAULT;
+ return -EINVAL;
+ }
+
+ smmu_domain->s1_cfg.s1dss = s1dss;
+ smmu_domain->s1_cfg.s1fmt = s1fmt;
+ smmu_domain->s1_cfg.s1cdmax = s1cdmax;
+ smmu_domain->s1_cfg.cdcfg.cdtab_dma = s1ctxptr;
+ return 0;
+}
+
static int arm_smmu_domain_finalise(struct iommu_domain *domain,
struct arm_smmu_master *master,
const struct iommu_hwpt_arm_smmuv3 *user_cfg)
@@ -2202,6 +2272,11 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
return 0;
}
+ if (domain->type == IOMMU_DOMAIN_NESTED) {
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+ return arm_smmu_domain_finalise_nested(domain, master, user_cfg);
+ }
+
if (user_cfg_s2 && !(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
return -EINVAL;
if (user_cfg_s2)
@@ -2898,9 +2973,16 @@ static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
arm_smmu_sva_remove_dev_pasid(domain, dev, pasid);
}
+static const struct iommu_domain_ops arm_smmu_nested_domain_ops = {
+ .attach_dev = arm_smmu_attach_dev,
+ .free = arm_smmu_domain_free,
+};
+
/**
* __arm_smmu_domain_alloc - Allocate a customizable iommu_domain
* @type: Type of the new iommu_domain, in form of IOMMU_DOMAIN_*
+ * @s2: Optional pointer to an stage-2 domain, used by an stage-1 nested domain
+ * allocation, pairing with a valid user_cfg data to configure the domain.
* @master: Optional master pointer for the allocation. If given, this will be
* used to call arm_smmu_domain_finalise at the end of the allocation.
* Otherwise, arm_smmu_domain_finalise will be done when the domain is
@@ -2918,6 +3000,7 @@ static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
*/
static struct iommu_domain *
__arm_smmu_domain_alloc(unsigned type,
+ struct arm_smmu_domain *s2,
struct arm_smmu_master *master,
const struct iommu_hwpt_arm_smmuv3 *user_cfg)
{
@@ -2929,11 +3012,15 @@ __arm_smmu_domain_alloc(unsigned type,
return arm_smmu_sva_domain_alloc();
if (type != IOMMU_DOMAIN_UNMANAGED &&
+ type != IOMMU_DOMAIN_NESTED &&
type != IOMMU_DOMAIN_DMA &&
type != IOMMU_DOMAIN_DMA_FQ &&
type != IOMMU_DOMAIN_IDENTITY)
return NULL;
+ if (s2 && (s2->stage != ARM_SMMU_DOMAIN_S2 || !user_cfg))
+ return NULL;
+
/*
* Allocate the domain and initialise some of its data structures.
* We can't really finalise the domain unless a master is given.
@@ -2941,10 +3028,14 @@ __arm_smmu_domain_alloc(unsigned type,
smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
if (!smmu_domain)
return NULL;
+ smmu_domain->s2 = s2;
domain = &smmu_domain->domain;
domain->type = type;
- domain->ops = arm_smmu_ops.default_domain_ops;
+ if (s2)
+ domain->ops = &arm_smmu_nested_domain_ops;
+ else
+ domain->ops = arm_smmu_ops.default_domain_ops;
mutex_init(&smmu_domain->init_mutex);
INIT_LIST_HEAD(&smmu_domain->devices);
@@ -2965,7 +3056,7 @@ __arm_smmu_domain_alloc(unsigned type,
static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
- return __arm_smmu_domain_alloc(type, NULL, NULL);
+ return __arm_smmu_domain_alloc(type, NULL, NULL, NULL);
}
static struct iommu_domain *
@@ -2975,8 +3066,23 @@ arm_smmu_domain_alloc_user(struct device *dev, struct iommu_domain *parent,
const struct iommu_hwpt_arm_smmuv3 *user_cfg = user_data;
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
unsigned type = IOMMU_DOMAIN_UNMANAGED;
+ struct arm_smmu_domain *s2 = NULL;
+
+ /*
+ * The type of the new domain stays at IOMMU_DOMAIN_UNMANAGED, unless a
+ * valid parent domain is given, turning it to be IOMMU_DOMAIN_NESTED.
+ * The "stage" of an IOMMU_DOMAIN_UNMANAGED domain, however, is decided
+ * in the arm_smmu_domain_finalise function that reads user_cfg->flags,
+ * to set the stage accordingly.
+ */
+ if (parent) {
+ if (parent->ops != arm_smmu_ops.default_domain_ops)
+ return NULL;
+ type = IOMMU_DOMAIN_NESTED;
+ s2 = to_smmu_domain(parent);
+ }
- return __arm_smmu_domain_alloc(type, master, user_cfg);
+ return __arm_smmu_domain_alloc(type, s2, master, user_cfg);
}
static const size_t arm_smmu_domain_user_data_len[] = {
@@ -380,6 +380,7 @@
#define EVTQ_0_ID GENMASK_ULL(7, 0)
+#define EVT_ID_BAD_STE 0x4
#define EVT_ID_TRANSLATION_FAULT 0x10
#define EVT_ID_ADDR_SIZE_FAULT 0x11
#define EVT_ID_ACCESS_FAULT 0x12