[RFC,3/6] iommu/amd: Introduce Guest-ID struct amd_iommu_vminfo
Commit Message
AMD HW-vIOMMU feature requires IOMMU driver to specify a unique 16-bit
Guest ID (GID) for each VM. This ID is used to index into various
data structures for configuring the hardware.
Introduce amd_iommu_vminfo_hash hashtable to store per-vm configuration,
which uses 16-bit GID as a hash key along with helper functions.
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
---
drivers/iommu/amd/amd_iommu.h | 6 +++
drivers/iommu/amd/amd_iommu_types.h | 6 +++
drivers/iommu/amd/iommu.c | 66 +++++++++++++++++++++++++++++
3 files changed, 78 insertions(+)
Comments
> From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
> Sent: Wednesday, December 13, 2023 12:02 AM
>
> AMD HW-vIOMMU feature requires IOMMU driver to specify a unique 16-bit
> Guest ID (GID) for each VM. This ID is used to index into various
> data structures for configuring the hardware.
>
> Introduce amd_iommu_vminfo_hash hashtable to store per-vm
> configuration,
> which uses 16-bit GID as a hash key along with helper functions.
>
somehow it's unclear to me whether this series is only for hw
supporting vf or broader hw supporting nested capability. for
the latter case is GID still necessary?
Hi Kevin
On 12/15/2023 2:35 PM, Tian, Kevin wrote:
>> From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
>> Sent: Wednesday, December 13, 2023 12:02 AM
>>
>> AMD HW-vIOMMU feature requires IOMMU driver to specify a unique 16-bit
>> Guest ID (GID) for each VM. This ID is used to index into various
>> data structures for configuring the hardware.
>>
>> Introduce amd_iommu_vminfo_hash hashtable to store per-vm
>> configuration,
>> which uses 16-bit GID as a hash key along with helper functions.
>>
>
> somehow it's unclear to me whether this series is only for hw
> supporting vf or broader hw supporting nested capability. for
> the latter case is GID still necessary?
I am restructuring the series and might be moving GID stuff until later
when introduce broader hw support for AMD vIOMMU.
Suravee
On Fri, Jan 05, 2024 at 08:39:56PM +0700, Suthikulpanit, Suravee wrote:
> Hi Kevin
>
> On 12/15/2023 2:35 PM, Tian, Kevin wrote:
> > > From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
> > > Sent: Wednesday, December 13, 2023 12:02 AM
> > >
> > > AMD HW-vIOMMU feature requires IOMMU driver to specify a unique 16-bit
> > > Guest ID (GID) for each VM. This ID is used to index into various
> > > data structures for configuring the hardware.
> > >
> > > Introduce amd_iommu_vminfo_hash hashtable to store per-vm
> > > configuration,
> > > which uses 16-bit GID as a hash key along with helper functions.
> > >
> >
> > somehow it's unclear to me whether this series is only for hw
> > supporting vf or broader hw supporting nested capability. for
> > the latter case is GID still necessary?
>
> I am restructuring the series and might be moving GID stuff until later when
> introduce broader hw support for AMD vIOMMU.
I'm hoping you can just skip enabling the viommu features and still
have nesting? That should be OK right? The SW will manage the
invalidations.
I'd like to do ARM and AMD accelerated viommu nesting together since
they are so similar it will help to make the APIs correct.
Jason
On 1/5/2024 9:38 PM, Jason Gunthorpe wrote:
> On Fri, Jan 05, 2024 at 08:39:56PM +0700, Suthikulpanit, Suravee wrote:
>> Hi Kevin
>>
>> On 12/15/2023 2:35 PM, Tian, Kevin wrote:
>>>> From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
>>>> Sent: Wednesday, December 13, 2023 12:02 AM
>>>>
>>>> AMD HW-vIOMMU feature requires IOMMU driver to specify a unique 16-bit
>>>> Guest ID (GID) for each VM. This ID is used to index into various
>>>> data structures for configuring the hardware.
>>>>
>>>> Introduce amd_iommu_vminfo_hash hashtable to store per-vm
>>>> configuration,
>>>> which uses 16-bit GID as a hash key along with helper functions.
>>>>
>>>
>>> somehow it's unclear to me whether this series is only for hw
>>> supporting vf or broader hw supporting nested capability. for
>>> the latter case is GID still necessary?
>>
>> I am restructuring the series and might be moving GID stuff until later when
>> introduce broader hw support for AMD vIOMMU.
>
> I'm hoping you can just skip enabling the viommu features and still
> have nesting? That should be OK right? The SW will manage the
> invalidations.
Correct, the part 1 only add support for nested translation (w/o
HW-vIOMMU feature). So, SW would need to manage the invalidation.
Suravee
@@ -182,4 +182,10 @@ void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
extern bool amd_iommu_snp_en;
+
+/* AMD IOMMU GID */
+int amd_iommu_vminfo_alloc(struct amd_iommu *iommu, struct amd_iommu_vminfo *vminfo);
+void amd_iommu_vminfo_free(struct amd_iommu *iommu, struct amd_iommu_vminfo *vminfo);
+struct amd_iommu_vminfo *amd_iommu_get_vminfo(int gid);
+
#endif
@@ -16,6 +16,7 @@
#include <linux/pci.h>
#include <linux/irqreturn.h>
#include <linux/io-pgtable.h>
+#include <linux/hashtable.h>
/*
* Maximum number of IOMMUs supported
@@ -1053,6 +1054,11 @@ struct amd_irte_ops {
void (*clear_allocated)(struct irq_remap_table *, int);
};
+struct amd_iommu_vminfo {
+ u16 gid;
+ struct hlist_node hnode;
+};
+
#ifdef CONFIG_IRQ_REMAP
extern struct amd_irte_ops irte_32_ops;
extern struct amd_irte_ops irte_128_ops;
@@ -23,6 +23,7 @@
#include <linux/amd-iommu.h>
#include <linux/notifier.h>
#include <linux/export.h>
+#include <linux/idr.h>
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/irqdomain.h>
@@ -66,6 +67,16 @@ LIST_HEAD(acpihid_map);
const struct iommu_ops amd_iommu_ops;
static const struct iommu_dirty_ops amd_dirty_ops;
+/* VMInfo Hashtable */
+#define AMD_IOMMU_VMINFO_HASH_BITS 16
+DEFINE_HASHTABLE(amd_iommu_vminfo_hash, AMD_IOMMU_VMINFO_HASH_BITS);
+DEFINE_SPINLOCK(amd_iommu_vminfo_hash_lock);
+
+/* Global VMID */
+#define AMD_IOMMU_VMID_INVALID (-1U)
+static DEFINE_IDA(amd_iommu_global_vmid_ida);
+static u32 amd_iommu_latest_gid;
+
int amd_iommu_max_glx_val = -1;
/*
@@ -101,6 +112,61 @@ static inline bool domain_id_is_per_dev(struct protection_domain *pdom)
return (pdom && pdom->pd_mode != PD_MODE_V1);
}
+int get_vmid(void)
+{
+ int ret;
+
+ ret = ida_alloc_range(&amd_iommu_global_vmid_ida, 1, 0xFFFF, GFP_KERNEL);
+ return ret < 0 ? AMD_IOMMU_VMID_INVALID : ret;
+}
+
+int amd_iommu_vminfo_alloc(struct amd_iommu *iommu, struct amd_iommu_vminfo *vminfo)
+{
+ u32 gid;
+ unsigned long flags;
+
+ spin_lock_irqsave(&amd_iommu_vminfo_hash_lock, flags);
+ gid = amd_iommu_latest_gid = get_vmid();
+ if (gid == AMD_IOMMU_VMID_INVALID)
+ return -EINVAL;
+
+ pr_debug("%s: gid=%u\n", __func__, gid);
+ vminfo->gid = gid;
+ hash_add(amd_iommu_vminfo_hash, &vminfo->hnode, vminfo->gid);
+ spin_unlock_irqrestore(&amd_iommu_vminfo_hash_lock, flags);
+ return 0;
+}
+
+void amd_iommu_vminfo_free(struct amd_iommu *iommu,
+ struct amd_iommu_vminfo *vminfo)
+{
+ unsigned long flags;
+
+ pr_debug("%s: gid=%u\n", __func__, vminfo->gid);
+ spin_lock_irqsave(&amd_iommu_vminfo_hash_lock, flags);
+ hash_del(&vminfo->hnode);
+ ida_free(&amd_iommu_global_vmid_ida, vminfo->gid);
+ spin_unlock_irqrestore(&amd_iommu_vminfo_hash_lock, flags);
+}
+
+struct amd_iommu_vminfo *amd_iommu_get_vminfo(int gid)
+{
+ unsigned long flags;
+ struct amd_iommu_vminfo *tmp, *ptr = NULL;
+
+ spin_lock_irqsave(&amd_iommu_vminfo_hash_lock, flags);
+ hash_for_each_possible(amd_iommu_vminfo_hash, tmp, hnode, gid) {
+ if (tmp->gid == gid) {
+ ptr = tmp;
+ break;
+ }
+ }
+ if (!ptr)
+ pr_debug("%s : gid=%u not found\n", __func__, gid);
+ spin_unlock_irqrestore(&amd_iommu_vminfo_hash_lock, flags);
+ return ptr;
+}
+
static inline int get_acpihid_device_id(struct device *dev,
struct acpihid_map_entry **entry)
{