@@ -5170,7 +5170,7 @@ static __init void intel_arch_events_quirk(void)
static __init void intel_nehalem_quirk(void)
{
- union cpuid10_ebx ebx;
+ union cpuid_0xa_ebx ebx;
ebx.full = x86_pmu.events_maskl;
if (ebx.split.no_branch_misses_retired) {
@@ -5878,9 +5878,9 @@ __init int intel_pmu_init(void)
struct attribute **td_attr = &empty_attrs;
struct attribute **mem_attr = &empty_attrs;
struct attribute **tsx_attr = &empty_attrs;
- union cpuid10_edx edx;
- union cpuid10_eax eax;
- union cpuid10_ebx ebx;
+ union cpuid_0xa_edx edx;
+ union cpuid_0xa_eax eax;
+ union cpuid_0xa_ebx ebx;
unsigned int fixed_mask;
bool pmem = false;
int version, i;
@@ -5903,7 +5903,7 @@ __init int intel_pmu_init(void)
* Check whether the Architectural PerfMon supports
* Branch Misses Retired hw_event or not.
*/
- cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full);
+ cpuid(0xA, &eax.full, &ebx.full, &fixed_mask, &edx.full);
if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
return -ENODEV;
@@ -1497,16 +1497,16 @@ static bool is_arch_lbr_xsave_available(void)
void __init intel_pmu_arch_lbr_init(void)
{
struct pmu *pmu = x86_get_pmu(smp_processor_id());
- union cpuid28_eax eax;
- union cpuid28_ebx ebx;
- union cpuid28_ecx ecx;
+ union cpuid_0x1c_eax eax;
+ union cpuid_0x1c_ebx ebx;
+ union cpuid_0x1c_ecx ecx;
unsigned int unused_edx;
bool arch_lbr_xsave;
size_t size;
u64 lbr_nr;
/* Arch LBR Capabilities */
- cpuid(28, &eax.full, &ebx.full, &ecx.full, &unused_edx);
+ cpuid(0x1C, &eax.full, &ebx.full, &ecx.full, &unused_edx);
lbr_nr = fls(eax.split.lbr_depth_mask) * 8;
if (!lbr_nr)
@@ -235,7 +235,7 @@ static int __init pt_pmu_hw_init(void)
}
for (i = 0; i < PT_CPUID_LEAVES; i++) {
- cpuid_count(20, i,
+ cpuid_count(0x14, i,
&pt_pmu.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM],
&pt_pmu.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM],
&pt_pmu.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM],
@@ -504,9 +504,9 @@ static __init void zhaoxin_arch_events_quirk(void)
__init int zhaoxin_pmu_init(void)
{
- union cpuid10_edx edx;
- union cpuid10_eax eax;
- union cpuid10_ebx ebx;
+ union cpuid_0xa_edx edx;
+ union cpuid_0xa_eax eax;
+ union cpuid_0xa_ebx ebx;
struct event_constraint *c;
unsigned int unused;
int version;
@@ -517,7 +517,7 @@ __init int zhaoxin_pmu_init(void)
* Check whether the Architectural PerfMon supports
* hw_event or not.
*/
- cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
+ cpuid(0xA, &eax.full, &ebx.full, &unused, &edx.full);
if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT - 1)
return -ENODEV;
@@ -125,7 +125,7 @@
* Intel "Architectural Performance Monitoring" CPUID
* detection/enumeration details:
*/
-union cpuid10_eax {
+union cpuid_0xa_eax {
struct {
unsigned int version_id:8;
unsigned int num_counters:8;
@@ -135,7 +135,7 @@ union cpuid10_eax {
unsigned int full;
};
-union cpuid10_ebx {
+union cpuid_0xa_ebx {
struct {
unsigned int no_unhalted_core_cycles:1;
unsigned int no_instructions_retired:1;
@@ -148,7 +148,7 @@ union cpuid10_ebx {
unsigned int full;
};
-union cpuid10_edx {
+union cpuid_0xa_edx {
struct {
unsigned int num_counters_fixed:5;
unsigned int bit_width_fixed:8;
@@ -170,7 +170,7 @@ union cpuid10_edx {
/*
* Intel Architectural LBR CPUID detection/enumeration details:
*/
-union cpuid28_eax {
+union cpuid_0x1c_eax {
struct {
/* Supported LBR depth values */
unsigned int lbr_depth_mask:8;
@@ -183,7 +183,7 @@ union cpuid28_eax {
unsigned int full;
};
-union cpuid28_ebx {
+union cpuid_0x1c_ebx {
struct {
/* CPL Filtering Supported */
unsigned int lbr_cpl:1;
@@ -195,7 +195,7 @@ union cpuid28_ebx {
unsigned int full;
};
-union cpuid28_ecx {
+union cpuid_0x1c_ecx {
struct {
/* Mispredict Bit Supported */
unsigned int lbr_mispred:1;
@@ -936,8 +936,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
}
break;
case 0xa: { /* Architectural Performance Monitoring */
- union cpuid10_eax eax;
- union cpuid10_edx edx;
+ union cpuid_0xa_eax eax;
+ union cpuid_0xa_edx edx;
if (!static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
@@ -507,8 +507,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
struct kvm_cpuid_entry2 *entry;
- union cpuid10_eax eax;
- union cpuid10_edx edx;
+ union cpuid_0xa_eax eax;
+ union cpuid_0xa_edx edx;
u64 perf_capabilities;
u64 counter_mask;
int i;