@@ -268,6 +268,8 @@ void arm64_notify_die(const char *str, struct pt_regs *regs,
}
}
+static void advance_itstate(struct pt_regs *regs);
+
void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
{
regs->pc += size;
@@ -278,6 +280,9 @@ void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
*/
if (user_mode(regs))
user_fastforward_single_step(current);
+
+ if (regs->pstate & PSR_MODE32_BIT)
+ advance_itstate(regs);
}
static LIST_HEAD(undef_hook);
@@ -629,19 +634,12 @@ static void advance_itstate(struct pt_regs *regs)
compat_set_it_state(regs, it);
}
-static void arm64_compat_skip_faulting_instruction(struct pt_regs *regs,
- unsigned int sz)
-{
- advance_itstate(regs);
- arm64_skip_faulting_instruction(regs, sz);
-}
-
static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
{
int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT;
pt_regs_write_reg(regs, reg, arch_timer_get_rate());
- arm64_compat_skip_faulting_instruction(regs, 4);
+ arm64_skip_faulting_instruction(regs, 4);
}
static const struct sys64_hook cp15_32_hooks[] = {
@@ -661,7 +659,7 @@ static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
pt_regs_write_reg(regs, rt, lower_32_bits(val));
pt_regs_write_reg(regs, rt2, upper_32_bits(val));
- arm64_compat_skip_faulting_instruction(regs, 4);
+ arm64_skip_faulting_instruction(regs, 4);
}
static const struct sys64_hook cp15_64_hooks[] = {
@@ -682,7 +680,7 @@ asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs)
* There is no T16 variant of a CP access, so we
* always advance PC by 4 bytes.
*/
- arm64_compat_skip_faulting_instruction(regs, 4);
+ arm64_skip_faulting_instruction(regs, 4);
return;
}
Correct skipping of an instruction on AArch32 works a bit differently from AArch64, mainly due to the different CPSR/PSTATE semantics. There have been various attempts to get this right. Currenty arm64_skip_faulting_instruction() mostly does the right thing, but does not advance the IT state machine for the AArch32 case. arm64_compat_skip_faulting_instruction() handles the IT state machine but is local to traps.c, and porting other code to use it will make a mess since there are some call sites that apply for both the compat and native cases. Since manual instruction skipping implies a trap, it's a relatively slow path. So, make arm64_skip_faulting_instruction() handle both compat and native, and get rid of the arm64_compat_skip_faulting_instruction() special case. Fixes: 32a3e635fb0e ("arm64: compat: Add CNTFRQ trap handler") Fixes: 1f1c014035a8 ("arm64: compat: Add condition code checks and IT advance") Fixes: 6436beeee572 ("arm64: Fix single stepping in kernel traps") Fixes: bd35a4adc413 ("arm64: Port SWP/SWPB emulation support from arm") Signed-off-by: Dave Martin <Dave.Martin@arm.com> --- **NOTE** Despite discussions on the v2 series to the effect that the prior behaviour is not broken, I'm now not so sure: Taking another look, I now can't track down for example where SWP in an IT block is specified to be UNPREDICTABLE. I only see e.g., ARM DDI 0487E.a Section 1.8.2 ("F1.8.2 Partial deprecation of IT"), which only deprecates the affected instructions. The legacy AArch32 SWP{B} insn is obsoleted by ARMv8, but the whole point of the armv8_deprecated stuff is to provide some backwards compatiblity with v7. So, this looks like it needs a closer look. I'll leave the Fixes tags for now, so that the archaeology doesn't need to be repeated if we conclude that this patch really is a fix. --- arch/arm64/kernel/traps.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) -- 2.1.4