[v4,2/4] KVM: SVM: Use unsigned integers when dealing with ASIDs
Commit Message
Convert all local ASID variables and parameters throughout the SEV code
from signed integers to unsigned integers. As ASIDs are fundamentally
unsigned values, and the global min/max variables are appropriately
unsigned integers, too.
Functionally, this is a glorified nop as KVM guarantees min_sev_asid is
non-zero, and no CPU supports -1u as the _only_ asid, i.e. the signed vs.
unsigned goof won't cause problems in practice.
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
arch/x86/kvm/svm/sev.c | 18 ++++++++++--------
arch/x86/kvm/trace.h | 10 +++++-----
2 files changed, 15 insertions(+), 13 deletions(-)
Comments
On 1/31/24 17:56, Sean Christopherson wrote:
> Convert all local ASID variables and parameters throughout the SEV code
> from signed integers to unsigned integers. As ASIDs are fundamentally
> unsigned values, and the global min/max variables are appropriately
> unsigned integers, too.
>
> Functionally, this is a glorified nop as KVM guarantees min_sev_asid is
> non-zero, and no CPU supports -1u as the _only_ asid, i.e. the signed vs.
> unsigned goof won't cause problems in practice.
>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
Just one minor comment below, but either way...
Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
> ---
> arch/x86/kvm/svm/sev.c | 18 ++++++++++--------
> arch/x86/kvm/trace.h | 10 +++++-----
> 2 files changed, 15 insertions(+), 13 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index 7c000088bca6..04c4c14473fd 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -84,9 +84,10 @@ struct enc_region {
> };
>
> /* Called with the sev_bitmap_lock held, or on shutdown */
> -static int sev_flush_asids(int min_asid, int max_asid)
> +static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid)
> {
> - int ret, asid, error = 0;
> + int ret, error = 0;
> + unsigned int asid;
>
> /* Check if there are any ASIDs to reclaim before performing a flush */
> asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
> @@ -116,7 +117,7 @@ static inline bool is_mirroring_enc_context(struct kvm *kvm)
> }
>
> /* Must be called with the sev_bitmap_lock held */
> -static bool __sev_recycle_asids(int min_asid, int max_asid)
> +static bool __sev_recycle_asids(unsigned int min_asid, unsigned int max_asid)
> {
> if (sev_flush_asids(min_asid, max_asid))
> return false;
> @@ -143,8 +144,9 @@ static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
>
> static int sev_asid_new(struct kvm_sev_info *sev)
> {
> - int asid, min_asid, max_asid, ret;
> + unsigned int asid, min_asid, max_asid;
> bool retry = true;
> + int ret;
>
> WARN_ON(sev->misc_cg);
> sev->misc_cg = get_current_misc_cg();
> @@ -188,7 +190,7 @@ static int sev_asid_new(struct kvm_sev_info *sev)
> return ret;
> }
>
> -static int sev_get_asid(struct kvm *kvm)
> +static unsigned int sev_get_asid(struct kvm *kvm)
> {
> struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
>
> @@ -284,8 +286,8 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
>
> static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
> {
> + unsigned int asid = sev_get_asid(kvm);
> struct sev_data_activate activate;
> - int asid = sev_get_asid(kvm);
> int ret;
>
> /* activate ASID on the given handle */
> @@ -2312,7 +2314,7 @@ int sev_cpu_init(struct svm_cpu_data *sd)
> */
> static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
> {
> - int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid;
> + unsigned int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid;
Since you're touching this, you could switch this to:
unsigned int asid = sev_get_asid(vcpu->kvm);
>
> /*
> * Note! The address must be a kernel address, as regular page walk
> @@ -2630,7 +2632,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
> void pre_sev_run(struct vcpu_svm *svm, int cpu)
> {
> struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
> - int asid = sev_get_asid(svm->vcpu.kvm);
> + unsigned int asid = sev_get_asid(svm->vcpu.kvm);
>
> /* Assign the asid allocated with this SEV guest */
> svm->asid = asid;
> diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
> index 83843379813e..b82e6ed4f024 100644
> --- a/arch/x86/kvm/trace.h
> +++ b/arch/x86/kvm/trace.h
> @@ -732,13 +732,13 @@ TRACE_EVENT(kvm_nested_intr_vmexit,
> * Tracepoint for nested #vmexit because of interrupt pending
> */
> TRACE_EVENT(kvm_invlpga,
> - TP_PROTO(__u64 rip, int asid, u64 address),
> + TP_PROTO(__u64 rip, unsigned int asid, u64 address),
> TP_ARGS(rip, asid, address),
>
> TP_STRUCT__entry(
> - __field( __u64, rip )
> - __field( int, asid )
> - __field( __u64, address )
> + __field( __u64, rip )
> + __field( unsigned int, asid )
> + __field( __u64, address )
> ),
>
> TP_fast_assign(
> @@ -747,7 +747,7 @@ TRACE_EVENT(kvm_invlpga,
> __entry->address = address;
> ),
>
> - TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
> + TP_printk("rip: 0x%016llx asid: %u address: 0x%016llx",
> __entry->rip, __entry->asid, __entry->address)
> );
>
On Thu, Feb 01, 2024, Tom Lendacky wrote:
> On 1/31/24 17:56, Sean Christopherson wrote:
> > @@ -2312,7 +2314,7 @@ int sev_cpu_init(struct svm_cpu_data *sd)
> > */
> > static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
> > {
> > - int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid;
> > + unsigned int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid;
>
> Since you're touching this, you could switch this to:
>
> unsigned int asid = sev_get_asid(vcpu->kvm);
Ah, good call, will do when applying.
Thanks!
@@ -84,9 +84,10 @@ struct enc_region {
};
/* Called with the sev_bitmap_lock held, or on shutdown */
-static int sev_flush_asids(int min_asid, int max_asid)
+static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid)
{
- int ret, asid, error = 0;
+ int ret, error = 0;
+ unsigned int asid;
/* Check if there are any ASIDs to reclaim before performing a flush */
asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
@@ -116,7 +117,7 @@ static inline bool is_mirroring_enc_context(struct kvm *kvm)
}
/* Must be called with the sev_bitmap_lock held */
-static bool __sev_recycle_asids(int min_asid, int max_asid)
+static bool __sev_recycle_asids(unsigned int min_asid, unsigned int max_asid)
{
if (sev_flush_asids(min_asid, max_asid))
return false;
@@ -143,8 +144,9 @@ static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
static int sev_asid_new(struct kvm_sev_info *sev)
{
- int asid, min_asid, max_asid, ret;
+ unsigned int asid, min_asid, max_asid;
bool retry = true;
+ int ret;
WARN_ON(sev->misc_cg);
sev->misc_cg = get_current_misc_cg();
@@ -188,7 +190,7 @@ static int sev_asid_new(struct kvm_sev_info *sev)
return ret;
}
-static int sev_get_asid(struct kvm *kvm)
+static unsigned int sev_get_asid(struct kvm *kvm)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
@@ -284,8 +286,8 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
{
+ unsigned int asid = sev_get_asid(kvm);
struct sev_data_activate activate;
- int asid = sev_get_asid(kvm);
int ret;
/* activate ASID on the given handle */
@@ -2312,7 +2314,7 @@ int sev_cpu_init(struct svm_cpu_data *sd)
*/
static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
{
- int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid;
+ unsigned int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid;
/*
* Note! The address must be a kernel address, as regular page walk
@@ -2630,7 +2632,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
void pre_sev_run(struct vcpu_svm *svm, int cpu)
{
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
- int asid = sev_get_asid(svm->vcpu.kvm);
+ unsigned int asid = sev_get_asid(svm->vcpu.kvm);
/* Assign the asid allocated with this SEV guest */
svm->asid = asid;
@@ -732,13 +732,13 @@ TRACE_EVENT(kvm_nested_intr_vmexit,
* Tracepoint for nested #vmexit because of interrupt pending
*/
TRACE_EVENT(kvm_invlpga,
- TP_PROTO(__u64 rip, int asid, u64 address),
+ TP_PROTO(__u64 rip, unsigned int asid, u64 address),
TP_ARGS(rip, asid, address),
TP_STRUCT__entry(
- __field( __u64, rip )
- __field( int, asid )
- __field( __u64, address )
+ __field( __u64, rip )
+ __field( unsigned int, asid )
+ __field( __u64, address )
),
TP_fast_assign(
@@ -747,7 +747,7 @@ TRACE_EVENT(kvm_invlpga,
__entry->address = address;
),
- TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
+ TP_printk("rip: 0x%016llx asid: %u address: 0x%016llx",
__entry->rip, __entry->asid, __entry->address)
);