@@ -388,6 +388,11 @@ static inline int domain_type_is_si(struct dmar_domain *domain)
return domain->domain.type == IOMMU_DOMAIN_IDENTITY;
}
+static inline int domain_type_is_sva(struct dmar_domain *domain)
+{
+ return domain->domain.type == IOMMU_DOMAIN_SVA;
+}
+
static inline int domain_pfn_supported(struct dmar_domain *domain,
unsigned long pfn)
{
@@ -562,6 +567,14 @@ static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain)
/* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
+ /*
+ * Caps for maintaining I/O page table are unnecessary for SVA domain,
+ * since device is using the same virtual page table with processor and
+ * processor is the owner of the page table.
+ */
+ if (domain_type_is_sva(domain))
+ return;
+
domain_update_iommu_coherency(domain);
domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL);
@@ -1798,14 +1811,18 @@ static int domain_attach_iommu(struct dmar_domain *domain,
return 0;
}
- ndomains = cap_ndoms(iommu->cap);
- num = find_first_zero_bit(iommu->domain_ids, ndomains);
- if (num >= ndomains) {
- pr_err("%s: No free domain ids\n", iommu->name);
- goto err_unlock;
+ if (!domain_type_is_sva(domain)) {
+ ndomains = cap_ndoms(iommu->cap);
+ num = find_first_zero_bit(iommu->domain_ids, ndomains);
+ if (num >= ndomains) {
+ pr_err("%s: No free domain ids\n", iommu->name);
+ goto err_unlock;
+ }
+ set_bit(num, iommu->domain_ids);
+ } else {
+ num = FLPT_DEFAULT_DID;
}
- set_bit(num, iommu->domain_ids);
info->refcnt = 1;
info->did = num;
info->iommu = iommu;
@@ -1821,7 +1838,8 @@ static int domain_attach_iommu(struct dmar_domain *domain,
return 0;
err_clear:
- clear_bit(info->did, iommu->domain_ids);
+ if (!domain_type_is_sva(domain))
+ clear_bit(info->did, iommu->domain_ids);
err_unlock:
spin_unlock(&iommu->lock);
kfree(info);
@@ -4064,6 +4082,14 @@ static int prepare_domain_attach_device(struct iommu_domain *domain,
struct intel_iommu *iommu;
int addr_width;
+ /*
+ * In SVA case, don't need to check the fields for maintaining I/O page table, as
+ * device is using the same virtual page table with processor and processor is
+ * the owner of the page table.
+ */
+ if (domain_type_is_sva(dmar_domain))
+ return 0;
+
iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu)
return -ENODEV;
@@ -4685,7 +4711,7 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
* notification. Before consolidating that code into iommu core, let
* the intel sva code handle it.
*/
- if (domain->type == IOMMU_DOMAIN_SVA) {
+ if (domain_type_is_sva(dmar_domain)) {
intel_svm_remove_dev_pasid(dev, pasid);
goto out_tear_down;
}
@@ -4709,7 +4735,7 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
intel_drain_pasid_prq(dev, pasid);
}
-static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
+int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
@@ -4733,6 +4759,16 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
if (!dev_pasid)
return -ENOMEM;
+ if (domain_type_is_sva(dmar_domain)) {
+ dev_pasid->sid = PCI_DEVID(info->bus, info->devfn);
+ init_rcu_head(&dev_pasid->rcu);
+ if (info->ats_enabled) {
+ dev_pasid->qdep = info->ats_qdep;
+ if (dev_pasid->qdep >= QI_DEV_EIOTLB_MAX_INVS)
+ dev_pasid->qdep = 0;
+ }
+ }
+
ret = domain_attach_iommu(dmar_domain, iommu);
if (ret)
goto out_free;
@@ -4743,6 +4779,10 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
else if (dmar_domain->use_first_level)
ret = domain_setup_first_level(iommu, dmar_domain,
dev, pasid);
+ else if (domain_type_is_sva(dmar_domain))
+ ret = intel_pasid_setup_first_level(iommu, dev,
+ domain->mm->pgd, pasid, FLPT_DEFAULT_DID,
+ cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0);
else
ret = intel_pasid_setup_second_level(iommu, dmar_domain,
dev, pasid);
@@ -850,6 +850,8 @@ static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
return container_of(iommu_dev, struct intel_iommu, iommu);
}
+int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid);
#ifdef CONFIG_INTEL_IOMMU_SVM
void intel_svm_check(struct intel_iommu *iommu);