Message ID | 1407500294-10804-10-git-send-email-peter.maydell@linaro.org |
---|---|
State | Superseded |
Headers | show |
On Fri, Aug 08, 2014 at 01:18:12PM +0100, Peter Maydell wrote: > Implement ARMv8 software single-step handling for A64 code: > correctly update the single-step state machine and generate > debug exceptions when stepping A64 code. > > This patch has no behavioural change since MDSCR_EL1.SS can't > be set by the guest yet. Hi Peter, > > Signed-off-by: Peter Maydell <peter.maydell@linaro.org> > --- > target-arm/cpu.h | 21 +++++++++++ > target-arm/helper.h | 1 + > target-arm/internals.h | 6 +++ > target-arm/op_helper.c | 5 +++ > target-arm/translate-a64.c | 91 +++++++++++++++++++++++++++++++++++++++++++--- > target-arm/translate.h | 12 ++++++ > 6 files changed, 131 insertions(+), 5 deletions(-) > > diff --git a/target-arm/cpu.h b/target-arm/cpu.h > index 74f7b15..ac01524 100644 > --- a/target-arm/cpu.h > +++ b/target-arm/cpu.h > @@ -1211,6 +1211,10 @@ static inline bool arm_singlestep_active(CPUARMState *env) > #define ARM_TBFLAG_AA64_EL_MASK (0x3 << ARM_TBFLAG_AA64_EL_SHIFT) > #define ARM_TBFLAG_AA64_FPEN_SHIFT 2 > #define ARM_TBFLAG_AA64_FPEN_MASK (1 << ARM_TBFLAG_AA64_FPEN_SHIFT) > +#define ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT 3 > +#define ARM_TBFLAG_AA64_SS_ACTIVE_MASK (1 << ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT) > +#define ARM_TBFLAG_AA64_PSTATE_SS_SHIFT 3 > +#define ARM_TBFLAG_AA64_PSTATE_SS_MASK (1 << ARM_TBFLAG_AA64_PSTATE_SS_SHIFT) Shouldn't these shifts/masks differ? > > /* some convenience accessor macros */ > #define ARM_TBFLAG_AARCH64_STATE(F) \ > @@ -1235,6 +1239,10 @@ static inline bool arm_singlestep_active(CPUARMState *env) > (((F) & ARM_TBFLAG_AA64_EL_MASK) >> ARM_TBFLAG_AA64_EL_SHIFT) > #define ARM_TBFLAG_AA64_FPEN(F) \ > (((F) & ARM_TBFLAG_AA64_FPEN_MASK) >> ARM_TBFLAG_AA64_FPEN_SHIFT) > +#define ARM_TBFLAG_AA64_SS_ACTIVE(F) \ > + (((F) & ARM_TBFLAG_AA64_SS_ACTIVE_MASK) >> ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT) > +#define ARM_TBFLAG_AA64_PSTATE_SS(F) \ > + (((F) & ARM_TBFLAG_AA64_PSTATE_SS_MASK) >> ARM_TBFLAG_AA64_PSTATE_SS_SHIFT) > > static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, > target_ulong *cs_base, int *flags) > @@ -1248,6 +1256,19 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, > if (fpen == 3 || (fpen == 1 && arm_current_pl(env) != 0)) { > *flags |= ARM_TBFLAG_AA64_FPEN_MASK; > } > + /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine > + * states defined in the ARM ARM for software singlestep: > + * SS_ACTIVE PSTATE.SS State > + * 0 x Inactive (the TB flag for SS is always 0) > + * 1 0 Active-pending > + * 1 1 Active-not-pending > + */ > + if (arm_singlestep_active(env)) { > + *flags |= ARM_TBFLAG_AA64_SS_ACTIVE_MASK; > + if (env->pstate & PSTATE_SS) { > + *flags |= ARM_TBFLAG_AA64_PSTATE_SS_MASK; > + } > + } > } else { > int privmode; > *pc = env->regs[15]; > diff --git a/target-arm/helper.h b/target-arm/helper.h > index facfcd2..1d7003b 100644 > --- a/target-arm/helper.h > +++ b/target-arm/helper.h > @@ -64,6 +64,7 @@ DEF_HELPER_3(set_cp_reg64, void, env, ptr, i64) > DEF_HELPER_2(get_cp_reg64, i64, env, ptr) > > DEF_HELPER_3(msr_i_pstate, void, env, i32, i32) > +DEF_HELPER_1(clear_pstate_ss, void, env) > DEF_HELPER_1(exception_return, void, env) > > DEF_HELPER_2(get_r13_banked, i32, env, i32) > diff --git a/target-arm/internals.h b/target-arm/internals.h > index 08fa697..53c2e3c 100644 > --- a/target-arm/internals.h > +++ b/target-arm/internals.h > @@ -290,4 +290,10 @@ static inline uint32_t syn_data_abort(int same_el, int ea, int cm, int s1ptw, > | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc; > } > > +static inline uint32_t syn_swstep(int same_el, int isv, int ex) > +{ > + return (EC_SOFTWARESTEP << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) > + | (isv << 24) | (ex << 6) | 0x22; > +} > + > #endif > diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c > index 62cc07d..fe40358 100644 > --- a/target-arm/op_helper.c > +++ b/target-arm/op_helper.c > @@ -369,6 +369,11 @@ void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm) > } > } > > +void HELPER(clear_pstate_ss)(CPUARMState *env) > +{ > + env->pstate &= ~PSTATE_SS; > +} > + > void HELPER(exception_return)(CPUARMState *env) > { > int cur_el = arm_current_pl(env); > diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c > index aa731bf..21cb12b 100644 > --- a/target-arm/translate-a64.c > +++ b/target-arm/translate-a64.c > @@ -203,10 +203,39 @@ static void gen_exception_insn(DisasContext *s, int offset, int excp, > s->is_jmp = DISAS_EXC; > } > > +static void gen_ss_advance(DisasContext *s) > +{ > + /* If the singlestep state is Active-not-pending, advance to > + * Active-pending. > + */ > + if (s->ss_active) { > + s->pstate_ss = 0; > + gen_helper_clear_pstate_ss(cpu_env); > + } > +} > + > +static void gen_step_complete_exception(DisasContext *s) > +{ > + /* We just completed step of an insn. Move from Active-not-pending > + * to Active-pending, and then also take the swstep exception. > + * This corresponds to making the (IMPDEF) choice to prioritize > + * swstep exceptions over asynchronous exceptions taken to an exception > + * level where debug is disabled. This choice has the advantage that > + * we do not need to maintain internal state corresponding to the > + * ISV/EX syndrome bits between completion of the step and generation > + * of the exception, and our syndrome information is always correct. > + */ > + gen_ss_advance(s); > + gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex)); > + s->is_jmp = DISAS_EXC; > +} > + > static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest) > { > - /* No direct tb linking with singlestep or deterministic io */ > - if (s->singlestep_enabled || (s->tb->cflags & CF_LAST_IO)) { > + /* No direct tb linking with singlestep (either QEMU's or the ARM > + * debug architecture kind) or deterministic io > + */ > + if (s->singlestep_enabled || s->ss_active || (s->tb->cflags & CF_LAST_IO)) { > return false; > } > > @@ -230,7 +259,9 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest) > s->is_jmp = DISAS_TB_JUMP; > } else { > gen_a64_set_pc_im(dest); > - if (s->singlestep_enabled) { > + if (s->ss_active) { > + gen_step_complete_exception(s); > + } else if (s->singlestep_enabled) { > gen_exception_internal(EXCP_DEBUG); > } else { > tcg_gen_exit_tb(0); > @@ -1447,6 +1478,12 @@ static void disas_exc(DisasContext *s, uint32_t insn) > unallocated_encoding(s); > break; > } > + /* For SVC, HVC and SMC we advance the single-step state > + * machine before taking the exception. This is architecturally > + * mandated, to ensure that single-stepping a system call > + * instruction works properly. > + */ > + gen_ss_advance(s); > gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16)); > break; > case 1: > @@ -1727,6 +1764,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) > > if (is_excl) { > if (!is_store) { > + s->is_ldex = true; > gen_load_exclusive(s, rt, rt2, tcg_addr, size, is_pair); > } else { > gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, is_pair); > @@ -10867,6 +10905,26 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu, > dc->current_pl = arm_current_pl(env); > dc->features = env->features; > > + /* Single step state. The code-generation logic here is: > + * SS_ACTIVE == 0: > + * generate code with no special handling for single-stepping (except > + * that anything that can make us go to SS_ACTIVE == 1 must end the TB; > + * this happens anyway because those changes are all system register or > + * PSTATE writes). > + * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending) > + * emit code for one insn > + * emit code to clear PSTATE.SS > + * emit code to generate software step exception for completed step > + * end TB (as usual for having generated an exception) > + * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending) > + * emit code to generate a software step exception > + * end the TB > + */ > + dc->ss_active = ARM_TBFLAG_AA64_SS_ACTIVE(tb->flags); > + dc->pstate_ss = ARM_TBFLAG_AA64_PSTATE_SS(tb->flags); > + dc->is_ldex = false; > + dc->ss_same_el = (arm_debug_target_el(env) == dc->current_pl); > + > init_tmp_a64_array(dc); > > next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; > @@ -10915,6 +10973,23 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu, > tcg_gen_debug_insn_start(dc->pc); > } > > + if (dc->ss_active && !dc->pstate_ss) { > + /* Singlestep state is Active-pending. > + * If we're in this state at the start of a TB then either > + * a) we just took an exception to an EL which is being debugged > + * and this is the first insn in the exception handler > + * b) debug exceptions were masked and we just unmasked them > + * without changing EL (eg by clearing PSTATE.D) > + * In either case we're going to take a swstep exception in the > + * "did not step an insn" case, and so the syndrome ISV and EX > + * bits should be zero. > + */ > + assert(num_insns == 0); > + gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0)); > + dc->is_jmp = DISAS_EXC; > + break; > + } > + > disas_a64_insn(env, dc); > > if (tcg_check_temp_count()) { > @@ -10931,6 +11006,7 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu, > } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end && > !cs->singlestep_enabled && > !singlestep && > + !dc->ss_active && > dc->pc < next_page_start && > num_insns < max_insns); > > @@ -10938,7 +11014,8 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu, > gen_io_end(); > } > > - if (unlikely(cs->singlestep_enabled) && dc->is_jmp != DISAS_EXC) { > + if (unlikely(cs->singlestep_enabled || dc->ss_active) > + && dc->is_jmp != DISAS_EXC) { > /* Note that this means single stepping WFI doesn't halt the CPU. > * For conditional branch insns this is harmless unreachable code as > * gen_goto_tb() has already handled emitting the debug exception > @@ -10948,7 +11025,11 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu, > if (dc->is_jmp != DISAS_JUMP) { > gen_a64_set_pc_im(dc->pc); > } > - gen_exception_internal(EXCP_DEBUG); > + if (cs->singlestep_enabled) { > + gen_exception_internal(EXCP_DEBUG); > + } else { > + gen_step_complete_exception(dc); > + } > } else { > switch (dc->is_jmp) { > case DISAS_NEXT: > diff --git a/target-arm/translate.h b/target-arm/translate.h > index 31a0104..b90d275 100644 > --- a/target-arm/translate.h > +++ b/target-arm/translate.h > @@ -40,6 +40,18 @@ typedef struct DisasContext { > * that it is set at the point where we actually touch the FP regs. > */ > bool fp_access_checked; > + /* ARMv8 single-step state (this is distinct from the QEMU gdbstub > + * single-step support). > + */ > + bool ss_active; > + bool pstate_ss; > + /* True if the insn just emitted was a load-exclusive instruction > + * (necessary for syndrome information for single step exceptions), > + * ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*. > + */ > + bool is_ldex; > + /* True if a single-step exception will be taken to the current EL */ > + bool ss_same_el; > #define TMP_A64_MAX 16 > int tmp_a64_count; > TCGv_i64 tmp_a64[TMP_A64_MAX]; > -- > 1.9.1 > >
On 19 August 2014 10:56, Edgar E. Iglesias <edgar.iglesias@gmail.com> wrote: > On Fri, Aug 08, 2014 at 01:18:12PM +0100, Peter Maydell wrote: >> --- a/target-arm/cpu.h >> +++ b/target-arm/cpu.h >> @@ -1211,6 +1211,10 @@ static inline bool arm_singlestep_active(CPUARMState *env) >> #define ARM_TBFLAG_AA64_EL_MASK (0x3 << ARM_TBFLAG_AA64_EL_SHIFT) >> #define ARM_TBFLAG_AA64_FPEN_SHIFT 2 >> #define ARM_TBFLAG_AA64_FPEN_MASK (1 << ARM_TBFLAG_AA64_FPEN_SHIFT) >> +#define ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT 3 >> +#define ARM_TBFLAG_AA64_SS_ACTIVE_MASK (1 << ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT) >> +#define ARM_TBFLAG_AA64_PSTATE_SS_SHIFT 3 >> +#define ARM_TBFLAG_AA64_PSTATE_SS_MASK (1 << ARM_TBFLAG_AA64_PSTATE_SS_SHIFT) > > Shouldn't these shifts/masks differ? Oops. Yes, they certainly should. -- PMM
On 19 August 2014 11:25, Peter Maydell <peter.maydell@linaro.org> wrote: > On 19 August 2014 10:56, Edgar E. Iglesias <edgar.iglesias@gmail.com> wrote: >> On Fri, Aug 08, 2014 at 01:18:12PM +0100, Peter Maydell wrote: >>> --- a/target-arm/cpu.h >>> +++ b/target-arm/cpu.h >>> @@ -1211,6 +1211,10 @@ static inline bool arm_singlestep_active(CPUARMState *env) >>> #define ARM_TBFLAG_AA64_EL_MASK (0x3 << ARM_TBFLAG_AA64_EL_SHIFT) >>> #define ARM_TBFLAG_AA64_FPEN_SHIFT 2 >>> #define ARM_TBFLAG_AA64_FPEN_MASK (1 << ARM_TBFLAG_AA64_FPEN_SHIFT) >>> +#define ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT 3 >>> +#define ARM_TBFLAG_AA64_SS_ACTIVE_MASK (1 << ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT) >>> +#define ARM_TBFLAG_AA64_PSTATE_SS_SHIFT 3 >>> +#define ARM_TBFLAG_AA64_PSTATE_SS_MASK (1 << ARM_TBFLAG_AA64_PSTATE_SS_SHIFT) >> >> Shouldn't these shifts/masks differ? > > Oops. Yes, they certainly should. The fix is just a simple s/3/4/ for the PSTATE_SS_SHIFT define. Does anybody want a retransmit of the series for this one-liner? thanks -- PMM
On Tue, Aug 19, 2014 at 11:46:23AM +0100, Peter Maydell wrote: > On 19 August 2014 11:25, Peter Maydell <peter.maydell@linaro.org> wrote: > > On 19 August 2014 10:56, Edgar E. Iglesias <edgar.iglesias@gmail.com> wrote: > >> On Fri, Aug 08, 2014 at 01:18:12PM +0100, Peter Maydell wrote: > >>> --- a/target-arm/cpu.h > >>> +++ b/target-arm/cpu.h > >>> @@ -1211,6 +1211,10 @@ static inline bool arm_singlestep_active(CPUARMState *env) > >>> #define ARM_TBFLAG_AA64_EL_MASK (0x3 << ARM_TBFLAG_AA64_EL_SHIFT) > >>> #define ARM_TBFLAG_AA64_FPEN_SHIFT 2 > >>> #define ARM_TBFLAG_AA64_FPEN_MASK (1 << ARM_TBFLAG_AA64_FPEN_SHIFT) > >>> +#define ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT 3 > >>> +#define ARM_TBFLAG_AA64_SS_ACTIVE_MASK (1 << ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT) > >>> +#define ARM_TBFLAG_AA64_PSTATE_SS_SHIFT 3 > >>> +#define ARM_TBFLAG_AA64_PSTATE_SS_MASK (1 << ARM_TBFLAG_AA64_PSTATE_SS_SHIFT) > >> > >> Shouldn't these shifts/masks differ? > > > > Oops. Yes, they certainly should. > > The fix is just a simple s/3/4/ for the PSTATE_SS_SHIFT > define. Does anybody want a retransmit of the series for > this one-liner? Hi, AFAICT, the rest of the series looks good, RB on all. Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> No need to repost for me. Cheers, Edgar
diff --git a/target-arm/cpu.h b/target-arm/cpu.h index 74f7b15..ac01524 100644 --- a/target-arm/cpu.h +++ b/target-arm/cpu.h @@ -1211,6 +1211,10 @@ static inline bool arm_singlestep_active(CPUARMState *env) #define ARM_TBFLAG_AA64_EL_MASK (0x3 << ARM_TBFLAG_AA64_EL_SHIFT) #define ARM_TBFLAG_AA64_FPEN_SHIFT 2 #define ARM_TBFLAG_AA64_FPEN_MASK (1 << ARM_TBFLAG_AA64_FPEN_SHIFT) +#define ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT 3 +#define ARM_TBFLAG_AA64_SS_ACTIVE_MASK (1 << ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT) +#define ARM_TBFLAG_AA64_PSTATE_SS_SHIFT 3 +#define ARM_TBFLAG_AA64_PSTATE_SS_MASK (1 << ARM_TBFLAG_AA64_PSTATE_SS_SHIFT) /* some convenience accessor macros */ #define ARM_TBFLAG_AARCH64_STATE(F) \ @@ -1235,6 +1239,10 @@ static inline bool arm_singlestep_active(CPUARMState *env) (((F) & ARM_TBFLAG_AA64_EL_MASK) >> ARM_TBFLAG_AA64_EL_SHIFT) #define ARM_TBFLAG_AA64_FPEN(F) \ (((F) & ARM_TBFLAG_AA64_FPEN_MASK) >> ARM_TBFLAG_AA64_FPEN_SHIFT) +#define ARM_TBFLAG_AA64_SS_ACTIVE(F) \ + (((F) & ARM_TBFLAG_AA64_SS_ACTIVE_MASK) >> ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT) +#define ARM_TBFLAG_AA64_PSTATE_SS(F) \ + (((F) & ARM_TBFLAG_AA64_PSTATE_SS_MASK) >> ARM_TBFLAG_AA64_PSTATE_SS_SHIFT) static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, target_ulong *cs_base, int *flags) @@ -1248,6 +1256,19 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, if (fpen == 3 || (fpen == 1 && arm_current_pl(env) != 0)) { *flags |= ARM_TBFLAG_AA64_FPEN_MASK; } + /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine + * states defined in the ARM ARM for software singlestep: + * SS_ACTIVE PSTATE.SS State + * 0 x Inactive (the TB flag for SS is always 0) + * 1 0 Active-pending + * 1 1 Active-not-pending + */ + if (arm_singlestep_active(env)) { + *flags |= ARM_TBFLAG_AA64_SS_ACTIVE_MASK; + if (env->pstate & PSTATE_SS) { + *flags |= ARM_TBFLAG_AA64_PSTATE_SS_MASK; + } + } } else { int privmode; *pc = env->regs[15]; diff --git a/target-arm/helper.h b/target-arm/helper.h index facfcd2..1d7003b 100644 --- a/target-arm/helper.h +++ b/target-arm/helper.h @@ -64,6 +64,7 @@ DEF_HELPER_3(set_cp_reg64, void, env, ptr, i64) DEF_HELPER_2(get_cp_reg64, i64, env, ptr) DEF_HELPER_3(msr_i_pstate, void, env, i32, i32) +DEF_HELPER_1(clear_pstate_ss, void, env) DEF_HELPER_1(exception_return, void, env) DEF_HELPER_2(get_r13_banked, i32, env, i32) diff --git a/target-arm/internals.h b/target-arm/internals.h index 08fa697..53c2e3c 100644 --- a/target-arm/internals.h +++ b/target-arm/internals.h @@ -290,4 +290,10 @@ static inline uint32_t syn_data_abort(int same_el, int ea, int cm, int s1ptw, | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc; } +static inline uint32_t syn_swstep(int same_el, int isv, int ex) +{ + return (EC_SOFTWARESTEP << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) + | (isv << 24) | (ex << 6) | 0x22; +} + #endif diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c index 62cc07d..fe40358 100644 --- a/target-arm/op_helper.c +++ b/target-arm/op_helper.c @@ -369,6 +369,11 @@ void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm) } } +void HELPER(clear_pstate_ss)(CPUARMState *env) +{ + env->pstate &= ~PSTATE_SS; +} + void HELPER(exception_return)(CPUARMState *env) { int cur_el = arm_current_pl(env); diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c index aa731bf..21cb12b 100644 --- a/target-arm/translate-a64.c +++ b/target-arm/translate-a64.c @@ -203,10 +203,39 @@ static void gen_exception_insn(DisasContext *s, int offset, int excp, s->is_jmp = DISAS_EXC; } +static void gen_ss_advance(DisasContext *s) +{ + /* If the singlestep state is Active-not-pending, advance to + * Active-pending. + */ + if (s->ss_active) { + s->pstate_ss = 0; + gen_helper_clear_pstate_ss(cpu_env); + } +} + +static void gen_step_complete_exception(DisasContext *s) +{ + /* We just completed step of an insn. Move from Active-not-pending + * to Active-pending, and then also take the swstep exception. + * This corresponds to making the (IMPDEF) choice to prioritize + * swstep exceptions over asynchronous exceptions taken to an exception + * level where debug is disabled. This choice has the advantage that + * we do not need to maintain internal state corresponding to the + * ISV/EX syndrome bits between completion of the step and generation + * of the exception, and our syndrome information is always correct. + */ + gen_ss_advance(s); + gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex)); + s->is_jmp = DISAS_EXC; +} + static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest) { - /* No direct tb linking with singlestep or deterministic io */ - if (s->singlestep_enabled || (s->tb->cflags & CF_LAST_IO)) { + /* No direct tb linking with singlestep (either QEMU's or the ARM + * debug architecture kind) or deterministic io + */ + if (s->singlestep_enabled || s->ss_active || (s->tb->cflags & CF_LAST_IO)) { return false; } @@ -230,7 +259,9 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest) s->is_jmp = DISAS_TB_JUMP; } else { gen_a64_set_pc_im(dest); - if (s->singlestep_enabled) { + if (s->ss_active) { + gen_step_complete_exception(s); + } else if (s->singlestep_enabled) { gen_exception_internal(EXCP_DEBUG); } else { tcg_gen_exit_tb(0); @@ -1447,6 +1478,12 @@ static void disas_exc(DisasContext *s, uint32_t insn) unallocated_encoding(s); break; } + /* For SVC, HVC and SMC we advance the single-step state + * machine before taking the exception. This is architecturally + * mandated, to ensure that single-stepping a system call + * instruction works properly. + */ + gen_ss_advance(s); gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16)); break; case 1: @@ -1727,6 +1764,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) if (is_excl) { if (!is_store) { + s->is_ldex = true; gen_load_exclusive(s, rt, rt2, tcg_addr, size, is_pair); } else { gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, is_pair); @@ -10867,6 +10905,26 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu, dc->current_pl = arm_current_pl(env); dc->features = env->features; + /* Single step state. The code-generation logic here is: + * SS_ACTIVE == 0: + * generate code with no special handling for single-stepping (except + * that anything that can make us go to SS_ACTIVE == 1 must end the TB; + * this happens anyway because those changes are all system register or + * PSTATE writes). + * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending) + * emit code for one insn + * emit code to clear PSTATE.SS + * emit code to generate software step exception for completed step + * end TB (as usual for having generated an exception) + * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending) + * emit code to generate a software step exception + * end the TB + */ + dc->ss_active = ARM_TBFLAG_AA64_SS_ACTIVE(tb->flags); + dc->pstate_ss = ARM_TBFLAG_AA64_PSTATE_SS(tb->flags); + dc->is_ldex = false; + dc->ss_same_el = (arm_debug_target_el(env) == dc->current_pl); + init_tmp_a64_array(dc); next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; @@ -10915,6 +10973,23 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu, tcg_gen_debug_insn_start(dc->pc); } + if (dc->ss_active && !dc->pstate_ss) { + /* Singlestep state is Active-pending. + * If we're in this state at the start of a TB then either + * a) we just took an exception to an EL which is being debugged + * and this is the first insn in the exception handler + * b) debug exceptions were masked and we just unmasked them + * without changing EL (eg by clearing PSTATE.D) + * In either case we're going to take a swstep exception in the + * "did not step an insn" case, and so the syndrome ISV and EX + * bits should be zero. + */ + assert(num_insns == 0); + gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0)); + dc->is_jmp = DISAS_EXC; + break; + } + disas_a64_insn(env, dc); if (tcg_check_temp_count()) { @@ -10931,6 +11006,7 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu, } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end && !cs->singlestep_enabled && !singlestep && + !dc->ss_active && dc->pc < next_page_start && num_insns < max_insns); @@ -10938,7 +11014,8 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu, gen_io_end(); } - if (unlikely(cs->singlestep_enabled) && dc->is_jmp != DISAS_EXC) { + if (unlikely(cs->singlestep_enabled || dc->ss_active) + && dc->is_jmp != DISAS_EXC) { /* Note that this means single stepping WFI doesn't halt the CPU. * For conditional branch insns this is harmless unreachable code as * gen_goto_tb() has already handled emitting the debug exception @@ -10948,7 +11025,11 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu, if (dc->is_jmp != DISAS_JUMP) { gen_a64_set_pc_im(dc->pc); } - gen_exception_internal(EXCP_DEBUG); + if (cs->singlestep_enabled) { + gen_exception_internal(EXCP_DEBUG); + } else { + gen_step_complete_exception(dc); + } } else { switch (dc->is_jmp) { case DISAS_NEXT: diff --git a/target-arm/translate.h b/target-arm/translate.h index 31a0104..b90d275 100644 --- a/target-arm/translate.h +++ b/target-arm/translate.h @@ -40,6 +40,18 @@ typedef struct DisasContext { * that it is set at the point where we actually touch the FP regs. */ bool fp_access_checked; + /* ARMv8 single-step state (this is distinct from the QEMU gdbstub + * single-step support). + */ + bool ss_active; + bool pstate_ss; + /* True if the insn just emitted was a load-exclusive instruction + * (necessary for syndrome information for single step exceptions), + * ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*. + */ + bool is_ldex; + /* True if a single-step exception will be taken to the current EL */ + bool ss_same_el; #define TMP_A64_MAX 16 int tmp_a64_count; TCGv_i64 tmp_a64[TMP_A64_MAX];
Implement ARMv8 software single-step handling for A64 code: correctly update the single-step state machine and generate debug exceptions when stepping A64 code. This patch has no behavioural change since MDSCR_EL1.SS can't be set by the guest yet. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> --- target-arm/cpu.h | 21 +++++++++++ target-arm/helper.h | 1 + target-arm/internals.h | 6 +++ target-arm/op_helper.c | 5 +++ target-arm/translate-a64.c | 91 +++++++++++++++++++++++++++++++++++++++++++--- target-arm/translate.h | 12 ++++++ 6 files changed, 131 insertions(+), 5 deletions(-)