@@ -239,6 +239,7 @@
#endif
#define SISELECT_SSCCFG_BASE 0x40
+#define HPMEVENT_MASK GENMASK_ULL(63, 56)
/* symbolic CSR names: */
#define CSR_CYCLE 0xc00
@@ -147,7 +147,7 @@ union sbi_pmu_ctr_info {
};
};
-#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0)
+#define RISCV_PMU_SBI_RAW_EVENT_MASK GENMASK_ULL(47, 0)
#define RISCV_PMU_RAW_EVENT_IDX 0x20000
/** General pmu event codes specified in SBI PMU extension */
@@ -125,7 +125,7 @@ static u64 kvm_pmu_get_perf_event_config(unsigned long eidx, uint64_t evt_data)
config = kvm_pmu_get_perf_event_cache_config(ecode);
break;
case SBI_PMU_EVENT_TYPE_RAW:
- config = evt_data & RISCV_PMU_RAW_EVENT_MASK;
+ config = evt_data & RISCV_PMU_SBI_RAW_EVENT_MASK;
break;
case SBI_PMU_EVENT_TYPE_FW:
if (ecode < SBI_PMU_FW_MAX)
@@ -23,6 +23,8 @@
#include <asm/errata_list.h>
#include <asm/sbi.h>
#include <asm/cpufeature.h>
+#include <asm/hwcap.h>
+#include <asm/csr_ind.h>
#define SYSCTL_NO_USER_ACCESS 0
#define SYSCTL_USER_ACCESS 1
@@ -32,7 +34,20 @@
#define PERF_EVENT_FLAG_USER_ACCESS BIT(SYSCTL_USER_ACCESS)
#define PERF_EVENT_FLAG_LEGACY BIT(SYSCTL_LEGACY)
-PMU_FORMAT_ATTR(event, "config:0-47");
+#define RVPMU_SBI_PMU_FORMAT_ATTR "config:0-47"
+#define RVPMU_CDELEG_PMU_FORMAT_ATTR "config:0-55"
+
+static ssize_t __maybe_unused rvpmu_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+#define RVPMU_ATTR_ENTRY(_name, _func, _config) ( \
+ &((struct dev_ext_attribute[]) { \
+ { __ATTR(_name, 0444, _func, NULL), (void *)_config } \
+ })[0].attr.attr)
+
+#define RVPMU_FORMAT_ATTR_ENTRY(_name, _config) \
+ RVPMU_ATTR_ENTRY(_name, rvpmu_format_show, (char *)_config)
+
PMU_FORMAT_ATTR(firmware, "config:63");
static DEFINE_STATIC_KEY_FALSE(riscv_pmu_sbi_available);
@@ -40,19 +55,35 @@ static DEFINE_STATIC_KEY_FALSE(riscv_pmu_cdeleg_available);
static bool cdeleg_available;
static bool sbi_available;
-static struct attribute *riscv_arch_formats_attr[] = {
- &format_attr_event.attr,
+static struct attribute *riscv_sbi_pmu_formats_attr[] = {
+ RVPMU_FORMAT_ATTR_ENTRY(event, RVPMU_SBI_PMU_FORMAT_ATTR),
+ &format_attr_firmware.attr,
+ NULL,
+};
+
+static struct attribute_group riscv_sbi_pmu_format_group = {
+ .name = "format",
+ .attrs = riscv_sbi_pmu_formats_attr,
+};
+
+static const struct attribute_group *riscv_sbi_pmu_attr_groups[] = {
+ &riscv_sbi_pmu_format_group,
+ NULL,
+};
+
+static struct attribute *riscv_cdeleg_pmu_formats_attr[] = {
+ RVPMU_FORMAT_ATTR_ENTRY(event, RVPMU_CDELEG_PMU_FORMAT_ATTR),
&format_attr_firmware.attr,
NULL,
};
-static struct attribute_group riscv_pmu_format_group = {
+static struct attribute_group riscv_cdeleg_pmu_format_group = {
.name = "format",
- .attrs = riscv_arch_formats_attr,
+ .attrs = riscv_cdeleg_pmu_formats_attr,
};
-static const struct attribute_group *riscv_pmu_attr_groups[] = {
- &riscv_pmu_format_group,
+static const struct attribute_group *riscv_cdeleg_pmu_attr_groups[] = {
+ &riscv_cdeleg_pmu_format_group,
NULL,
};
@@ -275,6 +306,14 @@ static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_M
},
};
+static ssize_t rvpmu_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr = container_of(attr,
+ struct dev_ext_attribute, attr);
+ return sysfs_emit(buf, "%s\n", (char *)eattr->var);
+}
+
static int rvpmu_ctr_get_width(int idx)
{
return pmu_ctr_list[idx].width;
@@ -327,6 +366,39 @@ static uint8_t rvpmu_csr_index(struct perf_event *event)
return pmu_ctr_list[event->hw.idx].csr - CSR_CYCLE;
}
+static uint64_t get_deleg_priv_filter_bits(struct perf_event *event)
+{
+ uint64_t priv_filter_bits = 0;
+ bool guest_events = false;
+
+ if (event->attr.config1 & RISCV_PMU_CONFIG1_GUEST_EVENTS)
+ guest_events = true;
+ if (event->attr.exclude_kernel)
+ priv_filter_bits |= guest_events ? HPMEVENT_VSINH : HPMEVENT_SINH;
+ if (event->attr.exclude_user)
+ priv_filter_bits |= guest_events ? HPMEVENT_VUINH : HPMEVENT_UINH;
+ if (guest_events && event->attr.exclude_hv)
+ priv_filter_bits |= HPMEVENT_SINH;
+ if (event->attr.exclude_host)
+ priv_filter_bits |= HPMEVENT_UINH | HPMEVENT_SINH;
+ if (event->attr.exclude_guest)
+ priv_filter_bits |= HPMEVENT_VSINH | HPMEVENT_VUINH;
+
+
+ return priv_filter_bits;
+}
+
+static bool pmu_sbi_is_fw_event(struct perf_event *event)
+{
+ u32 type = event->attr.type;
+ u64 config = event->attr.config;
+
+ if ((type == PERF_TYPE_RAW) && ((config >> 63) == 1))
+ return true;
+ else
+ return false;
+}
+
static unsigned long rvpmu_sbi_get_filter_flags(struct perf_event *event)
{
unsigned long cflags = 0;
@@ -355,7 +427,8 @@ static int rvpmu_sbi_ctr_get_idx(struct perf_event *event)
struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
struct sbiret ret;
int idx;
- uint64_t cbase = 0, cmask = rvpmu->cmask;
+ uint64_t cbase = 0;
+ unsigned long ctr_mask = rvpmu->cmask;
unsigned long cflags = 0;
cflags = rvpmu_sbi_get_filter_flags(event);
@@ -368,21 +441,24 @@ static int rvpmu_sbi_ctr_get_idx(struct perf_event *event)
if (hwc->flags & PERF_EVENT_FLAG_LEGACY) {
if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
- cmask = 1;
+ ctr_mask = 1;
} else if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS) {
cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
- cmask = 1UL << (CSR_INSTRET - CSR_CYCLE);
+ ctr_mask = 1UL << (CSR_INSTRET - CSR_CYCLE);
}
}
+ if (pmu_sbi_is_fw_event(event) && cdeleg_available)
+ ctr_mask = firmware_cmask;
+
/* retrieve the available counter index */
#if defined(CONFIG_32BIT)
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
- cmask, cflags, hwc->event_base, hwc->config,
+ ctr_mask, cflags, hwc->event_base, hwc->config,
hwc->config >> 32);
#else
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
- cmask, cflags, hwc->event_base, hwc->config, 0);
+ ctr_mask, cflags, hwc->event_base, hwc->config, 0);
#endif
if (ret.error) {
pr_debug("Not able to find a counter for event %lx config %llx\n",
@@ -391,7 +467,7 @@ static int rvpmu_sbi_ctr_get_idx(struct perf_event *event)
}
idx = ret.value;
- if (!test_bit(idx, &rvpmu->cmask) || !pmu_ctr_list[idx].value)
+ if (!test_bit(idx, &ctr_mask) || !pmu_ctr_list[idx].value)
return -ENOENT;
/* Additional sanity check for the counter id */
@@ -441,15 +517,20 @@ static int pmu_event_find_cache(u64 config)
return ret;
}
-static bool pmu_sbi_is_fw_event(struct perf_event *event)
+static int rvpmu_deleg_event_map(struct perf_event *event, u64 *econfig)
{
- u32 type = event->attr.type;
u64 config = event->attr.config;
- if ((type == PERF_TYPE_RAW) && ((config >> 63) == 1))
- return true;
- else
- return false;
+ /*
+ * The Perf tool for RISC-V is expected to remap the standard perf event to platform
+ * specific encoding if counter delegation extension is present.
+ * Thus, the mapped value should be event encoding value specified in the userspace
+ * There is no additional mapping/validation can be done in the driver.
+ */
+ *econfig = config & RISCV_PMU_DELEG_RAW_EVENT_MASK;
+
+ /* event_base is not used for counter delegation config is sufficient for event encoding */
+ return 0;
}
static int rvpmu_sbi_event_map(struct perf_event *event, u64 *econfig)
@@ -476,7 +557,7 @@ static int rvpmu_sbi_event_map(struct perf_event *event, u64 *econfig)
* raw event and firmware events.
*/
bSoftware = config >> 63;
- raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK;
+ raw_config_val = config & RISCV_PMU_SBI_RAW_EVENT_MASK;
if (bSoftware) {
ret = (raw_config_val & 0xFFFF) |
(SBI_PMU_EVENT_TYPE_FW << 16);
@@ -493,7 +574,7 @@ static int rvpmu_sbi_event_map(struct perf_event *event, u64 *econfig)
return ret;
}
-static u64 rvpmu_sbi_ctr_read(struct perf_event *event)
+static u64 rvpmu_ctr_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
@@ -550,10 +631,6 @@ static void rvpmu_sbi_ctr_start(struct perf_event *event, u64 ival)
if (ret.error && (ret.error != SBI_ERR_ALREADY_STARTED))
pr_err("Starting counter idx %d failed with error %d\n",
hwc->idx, sbi_err_map_linux_errno(ret.error));
-
- if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
- (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
- rvpmu_set_scounteren((void *)event);
}
static void rvpmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
@@ -561,10 +638,6 @@ static void rvpmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
struct sbiret ret;
struct hw_perf_event *hwc = &event->hw;
- if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
- (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
- rvpmu_reset_scounteren((void *)event);
-
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0);
if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) &&
flag != SBI_PMU_STOP_FLAG_RESET)
@@ -583,12 +656,6 @@ static int rvpmu_sbi_find_num_ctrs(void)
return sbi_err_map_linux_errno(ret.error);
}
-static int rvpmu_deleg_find_ctrs(void)
-{
- /* TODO */
- return -1;
-}
-
static int rvpmu_sbi_get_ctrinfo(int nsbi_ctr, int ndeleg_ctr)
{
struct sbiret ret;
@@ -647,19 +714,85 @@ static inline void rvpmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
cpu_hw_evt->used_hw_ctrs[0], 0, 0, 0, 0);
}
+static void rvpmu_deleg_ctr_start_mask(unsigned long mask)
+{
+ unsigned long scountinhibit_val = 0;
+
+ scountinhibit_val = csr_read(CSR_SCOUNTINHIBIT);
+ scountinhibit_val &= ~mask;
+
+ csr_write(CSR_SCOUNTINHIBIT, scountinhibit_val);
+}
+
+static void rvpmu_deleg_ctr_enable_irq(struct perf_event *event)
+{
+ unsigned long hpmevent_curr;
+ unsigned long of_mask;
+ struct hw_perf_event *hwc = &event->hw;
+ int counter_idx = hwc->idx;
+ unsigned long sip_val = csr_read(CSR_SIP);
+
+ if (!is_sampling_event(event) || (sip_val & SIP_LCOFIP))
+ return;
+
+#if defined(CONFIG_32BIT)
+ hpmevent_curr = csr_ind_read(CSR_SIREG5, SISELECT_SSCCFG_BASE, counter_idx);
+ of_mask = (u32)~HPMEVENTH_OF;
+#else
+ hpmevent_curr = csr_ind_read(CSR_SIREG2, SISELECT_SSCCFG_BASE, counter_idx);
+ of_mask = ~HPMEVENT_OF;
+#endif
+
+ hpmevent_curr &= of_mask;
+#if defined(CONFIG_32BIT)
+ csr_ind_write(CSR_SIREG4, SISELECT_SSCCFG_BASE, counter_idx, hpmevent_curr);
+#else
+ csr_ind_write(CSR_SIREG2, SISELECT_SSCCFG_BASE, counter_idx, hpmevent_curr);
+#endif
+}
+
+static void rvpmu_deleg_ctr_start(struct perf_event *event, u64 ival)
+{
+ unsigned long scountinhibit_val = 0;
+ struct hw_perf_event *hwc = &event->hw;
+
+#if defined(CONFIG_32BIT)
+ csr_ind_write(CSR_SIREG, SISELECT_SSCCFG_BASE, hwc->idx, ival & 0xFFFFFFFF);
+ csr_ind_write(CSR_SIREG4, SISELECT_SSCCFG_BASE, hwc->idx, ival >> BITS_PER_LONG);
+#else
+ csr_ind_write(CSR_SIREG, SISELECT_SSCCFG_BASE, hwc->idx, ival);
+#endif
+
+ rvpmu_deleg_ctr_enable_irq(event);
+
+ scountinhibit_val = csr_read(CSR_SCOUNTINHIBIT);
+ scountinhibit_val &= ~(1 << hwc->idx);
+
+ csr_write(CSR_SCOUNTINHIBIT, scountinhibit_val);
+}
+
+static void rvpmu_deleg_ctr_stop_mask(unsigned long mask)
+{
+ unsigned long scountinhibit_val = 0;
+
+ scountinhibit_val = csr_read(CSR_SCOUNTINHIBIT);
+ scountinhibit_val |= mask;
+
+ csr_write(CSR_SCOUNTINHIBIT, scountinhibit_val);
+}
+
/*
* This function starts all the used counters in two step approach.
* Any counter that did not overflow can be start in a single step
* while the overflowed counters need to be started with updated initialization
* value.
*/
-static inline void rvpmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
+static inline void rvpmu_start_overflow_mask(struct riscv_pmu *pmu,
unsigned long ctr_ovf_mask)
{
int idx = 0;
struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
struct perf_event *event;
- unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
unsigned long ctr_start_mask = 0;
uint64_t max_period;
struct hw_perf_event *hwc;
@@ -667,9 +800,12 @@ static inline void rvpmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
ctr_start_mask = cpu_hw_evt->used_hw_ctrs[0] & ~ctr_ovf_mask;
- /* Start all the counters that did not overflow in a single shot */
- sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, 0, ctr_start_mask,
- 0, 0, 0, 0);
+ if (static_branch_likely(&riscv_pmu_cdeleg_available))
+ rvpmu_deleg_ctr_start_mask(ctr_start_mask);
+ else
+ /* Start all the counters that did not overflow in a single shot */
+ sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, 0, ctr_start_mask,
+ 0, 0, 0, 0);
/* Reinitialize and start all the counter that overflowed */
while (ctr_ovf_mask) {
@@ -678,13 +814,10 @@ static inline void rvpmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
hwc = &event->hw;
max_period = riscv_pmu_ctr_get_width_mask(event);
init_val = local64_read(&hwc->prev_count) & max_period;
-#if defined(CONFIG_32BIT)
- sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
- flag, init_val, init_val >> 32, 0);
-#else
- sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
- flag, init_val, 0, 0);
-#endif
+ if (static_branch_likely(&riscv_pmu_cdeleg_available))
+ rvpmu_deleg_ctr_start(event, init_val);
+ else
+ rvpmu_sbi_ctr_start(event, init_val);
perf_event_update_userpage(event);
}
ctr_ovf_mask = ctr_ovf_mask >> 1;
@@ -723,7 +856,10 @@ static irqreturn_t rvpmu_ovf_handler(int irq, void *dev)
}
pmu = to_riscv_pmu(event->pmu);
- rvpmu_sbi_stop_hw_ctrs(pmu);
+ if (static_branch_likely(&riscv_pmu_cdeleg_available))
+ rvpmu_deleg_ctr_stop_mask(cpu_hw_evt->used_hw_ctrs[0]);
+ else
+ rvpmu_sbi_stop_hw_ctrs(pmu);
/* Overflow status register should only be read after counter are stopped */
ALT_SBI_PMU_OVERFLOW(overflow);
@@ -779,22 +915,174 @@ static irqreturn_t rvpmu_ovf_handler(int irq, void *dev)
}
}
- rvpmu_sbi_start_overflow_mask(pmu, overflowed_ctrs);
+ rvpmu_start_overflow_mask(pmu, overflowed_ctrs);
perf_sample_event_took(sched_clock() - start_clock);
return IRQ_HANDLED;
}
+static int get_deleg_hw_ctr_width(int counter_offset)
+{
+ unsigned long hpm_warl;
+ int num_bits;
+
+ if (counter_offset < 3 || counter_offset > 31)
+ return 0;
+
+ hpm_warl = csr_ind_warl(CSR_SIREG, SISELECT_SSCCFG_BASE, counter_offset, -1);
+ num_bits = __fls(hpm_warl);
+
+#if defined(CONFIG_32BIT)
+ hpm_warl = csr_ind_warl(CSR_SIREG4, SISELECT_SSCCFG_BASE, counter_offset, -1);
+ num_bits += __fls(hpm_warl);
+#endif
+ return num_bits;
+}
+
+static int rvpmu_deleg_find_ctrs(void)
+{
+ int i, num_hw_ctr = 0;
+ union sbi_pmu_ctr_info cinfo;
+ unsigned long scountinhibit_old = 0;
+
+ /* Do a WARL write/read to detect which hpmcounters have been delegated */
+ scountinhibit_old = csr_read(CSR_SCOUNTINHIBIT);
+ csr_write(CSR_SCOUNTINHIBIT, -1);
+ cmask = csr_read(CSR_SCOUNTINHIBIT);
+
+ csr_write(CSR_SCOUNTINHIBIT, scountinhibit_old);
+
+ for_each_set_bit(i, &cmask, RISCV_MAX_HW_COUNTERS) {
+ if (unlikely(i == 1))
+ continue; /* This should never happen as TM is read only */
+ cinfo.value = 0;
+ cinfo.type = SBI_PMU_CTR_TYPE_HW;
+ /*
+ * If counter delegation is enabled, the csr stored to the cinfo will
+ * be a virtual counter that the delegation attempts to read.
+ */
+ cinfo.csr = CSR_CYCLE + i;
+ if (i == 0 || i == 2)
+ cinfo.width = 63;
+ else
+ cinfo.width = get_deleg_hw_ctr_width(i) - 1;
+
+ num_hw_ctr++;
+ pmu_ctr_list[i].value = cinfo.value;
+ }
+
+ return num_hw_ctr;
+}
+
+static int get_deleg_fixed_hw_idx(struct cpu_hw_events *cpuc, struct perf_event *event)
+{
+ return -EINVAL;
+}
+
+static int get_deleg_next_hpm_hw_idx(struct cpu_hw_events *cpuc, struct perf_event *event)
+{
+ unsigned long hw_ctr_mask = 0;
+
+ /*
+ * TODO: Treat every hpmcounter can monitor every event for now.
+ * The event to counter mapping should come from the json file.
+ * The mapping should also tell if sampling is supported or not.
+ */
+
+ /* Select only hpmcounters */
+ hw_ctr_mask = cmask & (~0x7);
+ hw_ctr_mask &= ~(cpuc->used_hw_ctrs[0]);
+ return __ffs(hw_ctr_mask);
+}
+
+static void update_deleg_hpmevent(int counter_idx, uint64_t event_value, uint64_t filter_bits)
+{
+ uint64_t hpmevent_value = 0;
+
+ /* OF bit should be enable during the start if sampling is requested */
+ hpmevent_value = (event_value & ~HPMEVENT_MASK) | filter_bits | HPMEVENT_OF;
+#if defined(CONFIG_32BIT)
+ csr_ind_write(CSR_SIREG2, SISELECT_SSCCFG_BASE, counter_idx, hpmevent_value & 0xFFFFFFFF);
+ if (riscv_isa_extension_available(NULL, SSCOFPMF))
+ csr_ind_write(CSR_SIREG5, SISELECT_SSCCFG_BASE, counter_idx,
+ hpmevent_value >> BITS_PER_LONG);
+#else
+ csr_ind_write(CSR_SIREG2, SISELECT_SSCCFG_BASE, counter_idx, hpmevent_value);
+#endif
+}
+
+static int rvpmu_deleg_ctr_get_idx(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
+ unsigned long hw_ctr_max_id;
+ uint64_t priv_filter;
+ int idx;
+
+ /*
+ * TODO: We should not rely on SBI Perf encoding to check if the event
+ * is a fixed one or not.
+ */
+ if (!is_sampling_event(event)) {
+ idx = get_deleg_fixed_hw_idx(cpuc, event);
+ if (idx == 0 || idx == 2) {
+ /* Priv mode filter bits are only available if smcntrpmf is present */
+ if (riscv_isa_extension_available(NULL, SMCNTRPMF))
+ goto found_idx;
+ else
+ goto skip_update;
+ }
+ }
+
+ hw_ctr_max_id = __fls(cmask);
+ idx = get_deleg_next_hpm_hw_idx(cpuc, event);
+ if (idx < 3 || idx > hw_ctr_max_id)
+ goto out_err;
+found_idx:
+ priv_filter = get_deleg_priv_filter_bits(event);
+ update_deleg_hpmevent(idx, hwc->config, priv_filter);
+skip_update:
+ if (!test_and_set_bit(idx, cpuc->used_hw_ctrs))
+ return idx;
+out_err:
+ return -ENOENT;
+}
+
static void rvpmu_ctr_start(struct perf_event *event, u64 ival)
{
- rvpmu_sbi_ctr_start(event, ival);
- /* TODO: Counter delegation implementation */
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (static_branch_likely(&riscv_pmu_cdeleg_available) && !pmu_sbi_is_fw_event(event))
+ rvpmu_deleg_ctr_start(event, ival);
+ else
+ rvpmu_sbi_ctr_start(event, ival);
+
+
+ if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
+ (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
+ rvpmu_set_scounteren((void *)event);
}
static void rvpmu_ctr_stop(struct perf_event *event, unsigned long flag)
{
- rvpmu_sbi_ctr_stop(event, flag);
- /* TODO: Counter delegation implementation */
+ struct hw_perf_event *hwc = &event->hw;
+
+ if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
+ (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
+ rvpmu_reset_scounteren((void *)event);
+
+ if (static_branch_likely(&riscv_pmu_cdeleg_available) &&
+ !pmu_sbi_is_fw_event(event)) {
+ /*
+ * The counter is already stopped. No need to stop again. Counter
+ * mapping will be reset in clear_idx function.
+ */
+ if (flag != RISCV_PMU_STOP_FLAG_RESET)
+ rvpmu_deleg_ctr_stop_mask((1 << hwc->idx));
+ } else {
+ rvpmu_sbi_ctr_stop(event, flag);
+ }
}
static int rvpmu_find_ctrs(void)
@@ -828,20 +1116,18 @@ static int rvpmu_find_ctrs(void)
static int rvpmu_event_map(struct perf_event *event, u64 *econfig)
{
- return rvpmu_sbi_event_map(event, econfig);
- /* TODO: Counter delegation implementation */
+ if (static_branch_likely(&riscv_pmu_cdeleg_available) && !pmu_sbi_is_fw_event(event))
+ return rvpmu_deleg_event_map(event, econfig);
+ else
+ return rvpmu_sbi_event_map(event, econfig);
}
static int rvpmu_ctr_get_idx(struct perf_event *event)
{
- return rvpmu_sbi_ctr_get_idx(event);
- /* TODO: Counter delegation implementation */
-}
-
-static u64 rvpmu_ctr_read(struct perf_event *event)
-{
- return rvpmu_sbi_ctr_read(event);
- /* TODO: Counter delegation implementation */
+ if (static_branch_likely(&riscv_pmu_cdeleg_available) && !pmu_sbi_is_fw_event(event))
+ return rvpmu_deleg_ctr_get_idx(event);
+ else
+ return rvpmu_sbi_ctr_get_idx(event);
}
static int rvpmu_starting_cpu(unsigned int cpu, struct hlist_node *node)
@@ -859,7 +1145,16 @@ static int rvpmu_starting_cpu(unsigned int cpu, struct hlist_node *node)
csr_write(CSR_SCOUNTEREN, 0x2);
/* Stop all the counters so that they can be enabled from perf */
- rvpmu_sbi_stop_all(pmu);
+ if (static_branch_likely(&riscv_pmu_cdeleg_available)) {
+ rvpmu_deleg_ctr_stop_mask(cmask);
+ if (static_branch_likely(&riscv_pmu_sbi_available)) {
+ /* Stop the firmware counters as well */
+ sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, 0, firmware_cmask,
+ 0, 0, 0, 0);
+ }
+ } else {
+ rvpmu_sbi_stop_all(pmu);
+ }
if (riscv_pmu_use_irq) {
cpu_hw_evt->irq = riscv_pmu_irq;
@@ -1142,7 +1437,10 @@ static int rvpmu_device_probe(struct platform_device *pdev)
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
}
- pmu->pmu.attr_groups = riscv_pmu_attr_groups;
+ if (cdeleg_available)
+ pmu->pmu.attr_groups = riscv_cdeleg_pmu_attr_groups;
+ else
+ pmu->pmu.attr_groups = riscv_sbi_pmu_attr_groups;
pmu->cmask = cmask;
pmu->ctr_start = rvpmu_ctr_start;
pmu->ctr_stop = rvpmu_ctr_stop;
@@ -20,6 +20,7 @@
*/
#define RISCV_MAX_COUNTERS 64
+#define RISCV_MAX_HW_COUNTERS 32
#define RISCV_OP_UNSUPP (-EOPNOTSUPP)
#define RISCV_PMU_PDEV_NAME "riscv-pmu"
#define RISCV_PMU_LEGACY_PDEV_NAME "riscv-pmu-legacy"
@@ -28,6 +29,8 @@
#define RISCV_PMU_CONFIG1_GUEST_EVENTS 0x1
+#define RISCV_PMU_DELEG_RAW_EVENT_MASK GENMASK_ULL(55, 0)
+
struct cpu_hw_events {
/* currently enabled events */
int n_events;