[v4,3/8] riscv/kprobe: Prepare the skeleton to prepare optimized kprobe
Commit Message
From: Liao Chang <liaochang1@huawei.com>
From: Liao Chang <liaochang1@huawei.com>
This patch provide a skeleton to prepare optimized kprobe instruction
slot, it is consist of two major parts, the first part is check if
current kprobe satifies the requirement to optimize. The kprobe bases on
breakpoint just require the instrumented instruction supports execute
out-of-line or simulation, however optimized kprobe bases on long-jump
needs more requirements, it includes:
- The target of long-jump in the range of 'AUIPC/JALR'.
- No near instruction jump to any instruction replaced by 'AUIPC/JALR'
- It managed to find one free register to form 'AUIPC/JALR' jumping to
detour buffer.
- It managed to find one free register to form 'JR' jumping back from
detour buffer
The second part is allocate a larger instruction slot for each optimized
kprobe, the payload of which is patched with the assembly code defined
in opt_trampoline.S, a call to kprobe pre_handler and these instructions
replaced by 'AUIPC/JALR'.
Signed-off-by: Liao Chang <liaochang1@huawei.com>
Co-developed-by: Chen Guokai <chenguokai17@mails.ucas.ac.cn>
Signed-off-by: Chen Guokai <chenguokai17@mails.ucas.ac.cn>
---
arch/riscv/kernel/probes/opt.c | 107 ++++++++++++++++++++++++++++++++-
1 file changed, 106 insertions(+), 1 deletion(-)
@@ -10,6 +10,54 @@
#include <linux/kprobes.h>
#include <asm/kprobes.h>
+#include <asm/patch.h>
+
+static inline int in_auipc_jalr_range(long val)
+{
+#ifdef CONFIG_ARCH_RV32I
+ return 1;
+#else
+ /*
+ * Note that the set of address offsets that can be formed
+ * by pairing LUI with LD, AUIPC with JALR, etc. in RV64I is
+ * [−2^31−2^11, 2^31−2^11−1].
+ */
+ return ((-(1L << 31) - (1L << 11)) <= val) &&
+ (val < ((1L << 31) - (1L << 11)));
+#endif
+}
+
+/*
+ * Copy optprobe assembly code template into detour buffer and modify some
+ * instructions for each kprobe.
+ */
+static void prepare_detour_buffer(kprobe_opcode_t *code, kprobe_opcode_t *slot,
+ int rd, struct optimized_kprobe *op,
+ kprobe_opcode_t opcode)
+{
+}
+
+/*
+ * In RISC-V ISA, AUIPC/JALR clobber one register to form target address,
+ * by inspired by register renaming in OoO processor, this involves search
+ * backwards that is not previously used as a source register and is used
+ * as a destination register before any branch or jump instruction.
+ */
+static void find_free_registers(struct kprobe *kp, struct optimized_kprobe *op,
+ int *rd1, int *rd2)
+{
+}
+
+/*
+ * If two free registers can be found at the beginning of both
+ * the start and the end of replaced code, it can be optimized
+ * Also, in-function jumps need to be checked to make sure that
+ * there is no jump to the second instruction to be replaced
+ */
+static bool can_optimize(unsigned long paddr, struct optimized_kprobe *op)
+{
+ return false;
+}
int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
{
@@ -24,7 +72,64 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op)
int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
struct kprobe *orig)
{
- return 0;
+ long rel;
+ int rd, ra, ret;
+ kprobe_opcode_t *code = NULL, *slot = NULL;
+
+ if (!can_optimize((unsigned long)orig->addr, op))
+ return -EILSEQ;
+
+ code = kzalloc(MAX_OPTINSN_SIZE, GFP_KERNEL);
+ slot = get_optinsn_slot();
+ if (!code || !slot) {
+ ret = -ENOMEM;
+ goto on_error;
+ }
+
+ /*
+ * Verify if the address gap is within 4GB range, because this uses
+ * a auipc+jalr pair.
+ */
+ rel = (unsigned long)slot - (unsigned long)orig->addr;
+ if (!in_auipc_jalr_range(rel)) {
+ /*
+ * Different from x86, we free code buf directly instead of
+ * calling __arch_remove_optimized_kprobe() because
+ * we have not fill any field in op.
+ */
+ ret = -ERANGE;
+ goto on_error;
+ }
+
+ /*
+ * Search two free registers, rd is used as to form AUIPC/JALR jumping
+ * to detour buffer, ra is used as to form JR jumping back from detour
+ * buffer.
+ */
+ find_free_registers(orig, op, &rd, &ra);
+ if (rd == 0 || ra == 0) {
+ ret = -EILSEQ;
+ goto on_error;
+ }
+
+ op->optinsn.rd = rd;
+ prepare_detour_buffer(code, slot, ra, op, orig->opcode);
+
+ ret = patch_text_nosync((void *)slot, code, MAX_OPTINSN_SIZE);
+ if (!ret) {
+ op->optinsn.insn = slot;
+ kfree(code);
+ return 0;
+ }
+
+on_error:
+ if (slot) {
+ free_optinsn_slot(slot, 0);
+ op->optinsn.insn = NULL;
+ op->optinsn.length = 0;
+ }
+ kfree(code);
+ return ret;
}
void arch_remove_optimized_kprobe(struct optimized_kprobe *op)