@@ -722,6 +722,8 @@ struct dev_pasid_info {
struct list_head link_domain; /* link to domain siblings */
struct device *dev;
ioasid_t pasid;
+ struct rcu_head rcu;
+ u16 sid, qdep;
};
static inline void __iommu_flush_cache(
@@ -859,15 +861,6 @@ struct iommu_domain *intel_svm_domain_alloc(void);
void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid);
void intel_drain_pasid_prq(struct device *dev, u32 pasid);
-struct intel_svm_dev {
- struct list_head list;
- struct rcu_head rcu;
- struct device *dev;
- struct intel_iommu *iommu;
- u16 did;
- u16 sid, qdep;
-};
-
struct intel_svm {
struct mmu_notifier notifier;
struct mm_struct *mm;
@@ -44,21 +44,21 @@ static void *pasid_private_find(ioasid_t pasid)
return xa_load(&pasid_private_array, pasid);
}
-static struct intel_svm_dev *
-svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev)
+static struct dev_pasid_info *
+svm_lookup_dev_pasid_info_by_dev(struct intel_svm *svm, struct device *dev)
{
- struct intel_svm_dev *sdev = NULL, *t;
+ struct dev_pasid_info *dev_pasid = NULL, *t;
rcu_read_lock();
- list_for_each_entry_rcu(t, &svm->devs, list) {
+ list_for_each_entry_rcu(t, &svm->devs, link_domain) {
if (t->dev == dev) {
- sdev = t;
+ dev_pasid = t;
break;
}
}
rcu_read_unlock();
- return sdev;
+ return dev_pasid;
}
int intel_svm_enable_prq(struct intel_iommu *iommu)
@@ -170,27 +170,28 @@ void intel_svm_check(struct intel_iommu *iommu)
}
static void __flush_svm_range_dev(struct intel_svm *svm,
- struct intel_svm_dev *sdev,
+ struct dev_pasid_info *dev_pasid,
unsigned long address,
unsigned long pages, int ih)
{
- struct device_domain_info *info = dev_iommu_priv_get(sdev->dev);
+ struct device_domain_info *info = dev_iommu_priv_get(dev_pasid->dev);
+ struct intel_iommu *iommu = dev_to_intel_iommu(dev_pasid->dev);
if (WARN_ON(!pages))
return;
- qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih);
+ qi_flush_piotlb(iommu, FLPT_DEFAULT_DID, svm->pasid, address, pages, ih);
if (info->ats_enabled) {
- qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
- svm->pasid, sdev->qdep, address,
+ qi_flush_dev_iotlb_pasid(iommu, dev_pasid->sid, info->pfsid,
+ svm->pasid, dev_pasid->qdep, address,
order_base_2(pages));
quirk_extra_dev_tlb_flush(info, address, order_base_2(pages),
- svm->pasid, sdev->qdep);
+ svm->pasid, dev_pasid->qdep);
}
}
static void intel_flush_svm_range_dev(struct intel_svm *svm,
- struct intel_svm_dev *sdev,
+ struct dev_pasid_info *dev_pasid,
unsigned long address,
unsigned long pages, int ih)
{
@@ -200,7 +201,7 @@ static void intel_flush_svm_range_dev(struct intel_svm *svm,
unsigned long end = ALIGN(address + (pages << VTD_PAGE_SHIFT), align);
while (start < end) {
- __flush_svm_range_dev(svm, sdev, start, align >> VTD_PAGE_SHIFT, ih);
+ __flush_svm_range_dev(svm, dev_pasid, start, align >> VTD_PAGE_SHIFT, ih);
start += align;
}
}
@@ -208,11 +209,11 @@ static void intel_flush_svm_range_dev(struct intel_svm *svm,
static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
unsigned long pages, int ih)
{
- struct intel_svm_dev *sdev;
+ struct dev_pasid_info *dev_pasid;
rcu_read_lock();
- list_for_each_entry_rcu(sdev, &svm->devs, list)
- intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
+ list_for_each_entry_rcu(dev_pasid, &svm->devs, link_domain)
+ intel_flush_svm_range_dev(svm, dev_pasid, address, pages, ih);
rcu_read_unlock();
}
@@ -230,7 +231,8 @@ static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
{
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
- struct intel_svm_dev *sdev;
+ struct dev_pasid_info *dev_pasid;
+ struct intel_iommu *iommu;
/* This might end up being called from exit_mmap(), *before* the page
* tables are cleared. And __mmu_notifier_release() will delete us from
@@ -245,9 +247,11 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
* *has* to handle gracefully without affecting other processes.
*/
rcu_read_lock();
- list_for_each_entry_rcu(sdev, &svm->devs, list)
- intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
+ list_for_each_entry_rcu(dev_pasid, &svm->devs, link_domain) {
+ iommu = dev_to_intel_iommu(dev_pasid->dev);
+ intel_pasid_tear_down_entry(iommu, dev_pasid->dev,
svm->pasid, true);
+ }
rcu_read_unlock();
}
@@ -257,11 +261,11 @@ static const struct mmu_notifier_ops intel_mmuops = {
.arch_invalidate_secondary_tlbs = intel_arch_invalidate_secondary_tlbs,
};
-static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
+static int pasid_to_dev_pasid_info(struct device *dev, unsigned int pasid,
struct intel_svm **rsvm,
- struct intel_svm_dev **rsdev)
+ struct dev_pasid_info **rsdev_pasid_info)
{
- struct intel_svm_dev *sdev = NULL;
+ struct dev_pasid_info *dev_pasid = NULL;
struct intel_svm *svm;
if (pasid == IOMMU_PASID_INVALID || pasid >= PASID_MAX)
@@ -280,11 +284,11 @@ static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
*/
if (WARN_ON(list_empty(&svm->devs)))
return -EINVAL;
- sdev = svm_lookup_device_by_dev(svm, dev);
+ dev_pasid = svm_lookup_dev_pasid_info_by_dev(svm, dev);
out:
*rsvm = svm;
- *rsdev = sdev;
+ *rsdev_pasid_info = dev_pasid;
return 0;
}
@@ -295,7 +299,7 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
struct mm_struct *mm = domain->mm;
- struct intel_svm_dev *sdev;
+ struct dev_pasid_info *dev_pasid;
struct intel_svm *svm;
unsigned long sflags;
int ret = 0;
@@ -325,20 +329,18 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
}
}
- sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
- if (!sdev) {
+ dev_pasid = kzalloc(sizeof(*dev_pasid), GFP_KERNEL);
+ if (!dev_pasid) {
ret = -ENOMEM;
goto free_svm;
}
- sdev->dev = dev;
- sdev->iommu = iommu;
- sdev->did = FLPT_DEFAULT_DID;
- sdev->sid = PCI_DEVID(info->bus, info->devfn);
+ dev_pasid->dev = dev;
+ dev_pasid->sid = PCI_DEVID(info->bus, info->devfn);
if (info->ats_enabled) {
- sdev->qdep = info->ats_qdep;
- if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
- sdev->qdep = 0;
+ dev_pasid->qdep = info->ats_qdep;
+ if (dev_pasid->qdep >= QI_DEV_EIOTLB_MAX_INVS)
+ dev_pasid->qdep = 0;
}
/* Setup the pasid table: */
@@ -346,14 +348,14 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, pasid,
FLPT_DEFAULT_DID, sflags);
if (ret)
- goto free_sdev;
+ goto free_dev_pasid;
- list_add_rcu(&sdev->list, &svm->devs);
+ list_add_rcu(&dev_pasid->link_domain, &svm->devs);
return 0;
-free_sdev:
- kfree(sdev);
+free_dev_pasid:
+ kfree(dev_pasid);
free_svm:
if (list_empty(&svm->devs)) {
mmu_notifier_unregister(&svm->notifier, mm);
@@ -366,26 +368,24 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
void intel_svm_remove_dev_pasid(struct device *dev, u32 pasid)
{
- struct intel_svm_dev *sdev;
+ struct dev_pasid_info *dev_pasid;
struct intel_iommu *iommu;
struct intel_svm *svm;
- struct mm_struct *mm;
iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu)
return;
- if (pasid_to_svm_sdev(dev, pasid, &svm, &sdev))
+ if (pasid_to_dev_pasid_info(dev, pasid, &svm, &dev_pasid))
return;
- mm = svm->mm;
- if (sdev) {
- list_del_rcu(&sdev->list);
- kfree_rcu(sdev, rcu);
+ if (dev_pasid) {
+ list_del_rcu(&dev_pasid->link_domain);
+ kfree_rcu(dev_pasid, rcu);
if (list_empty(&svm->devs)) {
if (svm->notifier.ops)
- mmu_notifier_unregister(&svm->notifier, mm);
+ mmu_notifier_unregister(&svm->notifier, svm->mm);
pasid_private_remove(svm->pasid);
kfree(svm);
}