Message ID | 20190616210546.17221-1-richard.henderson@linaro.org |
---|---|
State | Superseded |
Headers | show |
Series | tcg: Fix mmap lock assert on translation failure | expand |
On Sun, 16 Jun 2019 at 23:05, Richard Henderson <richard.henderson@linaro.org> wrote: > > Check page flags before letting an invalid pc cause a SIGSEGV. > > Prepare for eventially validating PROT_EXEC. The current wrinkle being > that we have a problem with our implementation of signals. We should > be using a vdso like the kernel, but we instead put the trampoline on > the stack. In the meantime, let PROT_READ match PROT_EXEC. > Thanks for the quick fix, I can confirm it works for my testcase. Christophe > Fixes: https://bugs.launchpad.net/qemu/+bug/1832353 > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > --- > include/exec/cpu-all.h | 1 + > include/exec/cpu_ldst_useronly_template.h | 8 +++++-- > accel/tcg/translate-all.c | 29 +++++++++++++++++++++++ > 3 files changed, 36 insertions(+), 2 deletions(-) > > diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h > index 536ea58f81..58b8915617 100644 > --- a/include/exec/cpu-all.h > +++ b/include/exec/cpu-all.h > @@ -259,6 +259,7 @@ int walk_memory_regions(void *, walk_memory_regions_fn); > int page_get_flags(target_ulong address); > void page_set_flags(target_ulong start, target_ulong end, int flags); > int page_check_range(target_ulong start, target_ulong len, int flags); > +void validate_exec_access(CPUArchState *env, target_ulong s, target_ulong l); > #endif > > CPUArchState *cpu_copy(CPUArchState *env); > diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h > index bc45e2b8d4..f095415149 100644 > --- a/include/exec/cpu_ldst_useronly_template.h > +++ b/include/exec/cpu_ldst_useronly_template.h > @@ -64,7 +64,9 @@ > static inline RES_TYPE > glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr) > { > -#if !defined(CODE_ACCESS) > +#ifdef CODE_ACCESS > + validate_exec_access(env, ptr, DATA_SIZE); > +#else > trace_guest_mem_before_exec( > env_cpu(env), ptr, > trace_mem_build_info(SHIFT, false, MO_TE, false)); > @@ -88,7 +90,9 @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, > static inline int > glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr) > { > -#if !defined(CODE_ACCESS) > +#ifdef CODE_ACCESS > + validate_exec_access(env, ptr, DATA_SIZE); > +#else > trace_guest_mem_before_exec( > env_cpu(env), ptr, > trace_mem_build_info(SHIFT, true, MO_TE, false)); > diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c > index 5d1e08b169..1d4a8a260f 100644 > --- a/accel/tcg/translate-all.c > +++ b/accel/tcg/translate-all.c > @@ -2600,10 +2600,39 @@ int page_check_range(target_ulong start, target_ulong len, int flags) > } > } > } > + /* > + * FIXME: We place the signal trampoline on the stack, > + * even when the guest expects that to be in the vdso. > + * Until we fix that, allow execute on any readable page. > + */ > + if ((flags & PAGE_EXEC) && !(p->flags & (PAGE_EXEC | PAGE_READ))) { > + return -1; > + } > } > return 0; > } > > +/* > + * Called for each code read, longjmp out to issue SIGSEGV if the page(s) > + * do not have execute access. > + */ > +void validate_exec_access(CPUArchState *env, > + target_ulong ptr, target_ulong len) > +{ > + if (page_check_range(ptr, len, PAGE_EXEC) < 0) { > + CPUState *cs = env_cpu(env); > + CPUClass *cc = CPU_GET_CLASS(cs); > + > + /* Like tb_gen_code, release the memory lock before cpu_loop_exit. */ > + assert_memory_lock(); > + mmap_unlock(); > + > + /* This is user-only. The target must raise an exception. */ > + cc->tlb_fill(cs, ptr, 0, MMU_INST_FETCH, MMU_USER_IDX, false, 0); > + g_assert_not_reached(); > + } > +} > + > /* called from signal handler: invalidate the code and unprotect the > * page. Return 0 if the fault was not handled, 1 if it was handled, > * and 2 if it was handled but the caller must cause the TB to be > -- > 2.17.1 >
Richard Henderson <richard.henderson@linaro.org> writes: > Check page flags before letting an invalid pc cause a SIGSEGV. > > Prepare for eventially validating PROT_EXEC. The current wrinkle being > that we have a problem with our implementation of signals. We should > be using a vdso like the kernel, but we instead put the trampoline on > the stack. In the meantime, let PROT_READ match PROT_EXEC. We can come up with a test case for this right? Would it be triggered by having: __attribute__((aligned(PAGE_SIZE))) void some_func(void) { /* does something */ } __attribute__((aligned(PAGE_SIZE))) ... rest of code ... main () { mmap(&some_func, PAGE_SIZE, PROT_READ, MAP_ANONYMOUS, 0, 0); some_func() /* causes SEGV */ mmap(&some_func, PAGE_SIZE, PROT_READ|PROT_EXEC, MAP_ANONYMOUS, 0, 0); some_func() /* works */ } Or is it trickier to mess with your own mapped memory? > > Fixes: https://bugs.launchpad.net/qemu/+bug/1832353 > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > --- > include/exec/cpu-all.h | 1 + > include/exec/cpu_ldst_useronly_template.h | 8 +++++-- > accel/tcg/translate-all.c | 29 +++++++++++++++++++++++ > 3 files changed, 36 insertions(+), 2 deletions(-) > > diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h > index 536ea58f81..58b8915617 100644 > --- a/include/exec/cpu-all.h > +++ b/include/exec/cpu-all.h > @@ -259,6 +259,7 @@ int walk_memory_regions(void *, walk_memory_regions_fn); > int page_get_flags(target_ulong address); > void page_set_flags(target_ulong start, target_ulong end, int flags); > int page_check_range(target_ulong start, target_ulong len, int flags); > +void validate_exec_access(CPUArchState *env, target_ulong s, target_ulong l); > #endif > > CPUArchState *cpu_copy(CPUArchState *env); > diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h > index bc45e2b8d4..f095415149 100644 > --- a/include/exec/cpu_ldst_useronly_template.h > +++ b/include/exec/cpu_ldst_useronly_template.h > @@ -64,7 +64,9 @@ > static inline RES_TYPE > glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr) > { > -#if !defined(CODE_ACCESS) > +#ifdef CODE_ACCESS > + validate_exec_access(env, ptr, DATA_SIZE); > +#else > trace_guest_mem_before_exec( > env_cpu(env), ptr, > trace_mem_build_info(SHIFT, false, MO_TE, false)); > @@ -88,7 +90,9 @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, > static inline int > glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr) > { > -#if !defined(CODE_ACCESS) > +#ifdef CODE_ACCESS > + validate_exec_access(env, ptr, DATA_SIZE); > +#else > trace_guest_mem_before_exec( > env_cpu(env), ptr, > trace_mem_build_info(SHIFT, true, MO_TE, false)); > diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c > index 5d1e08b169..1d4a8a260f 100644 > --- a/accel/tcg/translate-all.c > +++ b/accel/tcg/translate-all.c > @@ -2600,10 +2600,39 @@ int page_check_range(target_ulong start, target_ulong len, int flags) > } > } > } > + /* > + * FIXME: We place the signal trampoline on the stack, > + * even when the guest expects that to be in the vdso. > + * Until we fix that, allow execute on any readable page. > + */ > + if ((flags & PAGE_EXEC) && !(p->flags & (PAGE_EXEC | PAGE_READ))) { > + return -1; > + } > } > return 0; > } > > +/* > + * Called for each code read, longjmp out to issue SIGSEGV if the page(s) > + * do not have execute access. > + */ > +void validate_exec_access(CPUArchState *env, > + target_ulong ptr, target_ulong len) > +{ > + if (page_check_range(ptr, len, PAGE_EXEC) < 0) { > + CPUState *cs = env_cpu(env); > + CPUClass *cc = CPU_GET_CLASS(cs); > + > + /* Like tb_gen_code, release the memory lock before cpu_loop_exit. */ > + assert_memory_lock(); > + mmap_unlock(); > + > + /* This is user-only. The target must raise an exception. */ > + cc->tlb_fill(cs, ptr, 0, MMU_INST_FETCH, MMU_USER_IDX, false, 0); > + g_assert_not_reached(); > + } > +} > + > /* called from signal handler: invalidate the code and unprotect the > * page. Return 0 if the fault was not handled, 1 if it was handled, > * and 2 if it was handled but the caller must cause the TB to be -- Alex Bennée
On 6/17/19 9:18 AM, Alex Bennée wrote: > > Richard Henderson <richard.henderson@linaro.org> writes: > >> Check page flags before letting an invalid pc cause a SIGSEGV. >> >> Prepare for eventially validating PROT_EXEC. The current wrinkle being >> that we have a problem with our implementation of signals. We should >> be using a vdso like the kernel, but we instead put the trampoline on >> the stack. In the meantime, let PROT_READ match PROT_EXEC. > > We can come up with a test case for this right? Would it be triggered by > having: > > __attribute__((aligned(PAGE_SIZE))) > void some_func(void) { > /* does something */ > } > > __attribute__((aligned(PAGE_SIZE))) > ... rest of code ... > > main () { > mmap(&some_func, PAGE_SIZE, PROT_READ, MAP_ANONYMOUS, 0, 0); > some_func() > /* causes SEGV */ > mmap(&some_func, PAGE_SIZE, PROT_READ|PROT_EXEC, MAP_ANONYMOUS, 0, 0); > some_func() > /* works */ > } > > Or is it trickier to mess with your own mapped memory? It's trickier than that, but I do have a simple test case. https://bugs.launchpad.net/qemu/+bug/1832916 But fixing that, as I mention above, makes signal trampolines fail. Or did you mean for Christophe's failure? That's easier -- just make a NULL function call. r~
Richard Henderson <richard.henderson@linaro.org> writes: > On 6/17/19 9:18 AM, Alex Bennée wrote: >> >> Richard Henderson <richard.henderson@linaro.org> writes: >> >>> Check page flags before letting an invalid pc cause a SIGSEGV. >>> >>> Prepare for eventially validating PROT_EXEC. The current wrinkle being >>> that we have a problem with our implementation of signals. We should >>> be using a vdso like the kernel, but we instead put the trampoline on >>> the stack. In the meantime, let PROT_READ match PROT_EXEC. >> >> We can come up with a test case for this right? Would it be triggered by >> having: >> >> __attribute__((aligned(PAGE_SIZE))) >> void some_func(void) { >> /* does something */ >> } >> >> __attribute__((aligned(PAGE_SIZE))) >> ... rest of code ... >> >> main () { >> mmap(&some_func, PAGE_SIZE, PROT_READ, MAP_ANONYMOUS, 0, 0); >> some_func() >> /* causes SEGV */ >> mmap(&some_func, PAGE_SIZE, PROT_READ|PROT_EXEC, MAP_ANONYMOUS, 0, 0); >> some_func() >> /* works */ >> } >> >> Or is it trickier to mess with your own mapped memory? > > It's trickier than that, but I do have a simple test case. > > https://bugs.launchpad.net/qemu/+bug/1832916 > > But fixing that, as I mention above, makes signal trampolines fail. Ahh I missed that. I guess we add it once we have the full solution. > > Or did you mean for Christophe's failure? That's easier -- just make a NULL > function call. > > > r~ -- Alex Bennée
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 536ea58f81..58b8915617 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -259,6 +259,7 @@ int walk_memory_regions(void *, walk_memory_regions_fn); int page_get_flags(target_ulong address); void page_set_flags(target_ulong start, target_ulong end, int flags); int page_check_range(target_ulong start, target_ulong len, int flags); +void validate_exec_access(CPUArchState *env, target_ulong s, target_ulong l); #endif CPUArchState *cpu_copy(CPUArchState *env); diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h index bc45e2b8d4..f095415149 100644 --- a/include/exec/cpu_ldst_useronly_template.h +++ b/include/exec/cpu_ldst_useronly_template.h @@ -64,7 +64,9 @@ static inline RES_TYPE glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr) { -#if !defined(CODE_ACCESS) +#ifdef CODE_ACCESS + validate_exec_access(env, ptr, DATA_SIZE); +#else trace_guest_mem_before_exec( env_cpu(env), ptr, trace_mem_build_info(SHIFT, false, MO_TE, false)); @@ -88,7 +90,9 @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, static inline int glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr) { -#if !defined(CODE_ACCESS) +#ifdef CODE_ACCESS + validate_exec_access(env, ptr, DATA_SIZE); +#else trace_guest_mem_before_exec( env_cpu(env), ptr, trace_mem_build_info(SHIFT, true, MO_TE, false)); diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 5d1e08b169..1d4a8a260f 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -2600,10 +2600,39 @@ int page_check_range(target_ulong start, target_ulong len, int flags) } } } + /* + * FIXME: We place the signal trampoline on the stack, + * even when the guest expects that to be in the vdso. + * Until we fix that, allow execute on any readable page. + */ + if ((flags & PAGE_EXEC) && !(p->flags & (PAGE_EXEC | PAGE_READ))) { + return -1; + } } return 0; } +/* + * Called for each code read, longjmp out to issue SIGSEGV if the page(s) + * do not have execute access. + */ +void validate_exec_access(CPUArchState *env, + target_ulong ptr, target_ulong len) +{ + if (page_check_range(ptr, len, PAGE_EXEC) < 0) { + CPUState *cs = env_cpu(env); + CPUClass *cc = CPU_GET_CLASS(cs); + + /* Like tb_gen_code, release the memory lock before cpu_loop_exit. */ + assert_memory_lock(); + mmap_unlock(); + + /* This is user-only. The target must raise an exception. */ + cc->tlb_fill(cs, ptr, 0, MMU_INST_FETCH, MMU_USER_IDX, false, 0); + g_assert_not_reached(); + } +} + /* called from signal handler: invalidate the code and unprotect the * page. Return 0 if the fault was not handled, 1 if it was handled, * and 2 if it was handled but the caller must cause the TB to be
Check page flags before letting an invalid pc cause a SIGSEGV. Prepare for eventially validating PROT_EXEC. The current wrinkle being that we have a problem with our implementation of signals. We should be using a vdso like the kernel, but we instead put the trampoline on the stack. In the meantime, let PROT_READ match PROT_EXEC. Fixes: https://bugs.launchpad.net/qemu/+bug/1832353 Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- include/exec/cpu-all.h | 1 + include/exec/cpu_ldst_useronly_template.h | 8 +++++-- accel/tcg/translate-all.c | 29 +++++++++++++++++++++++ 3 files changed, 36 insertions(+), 2 deletions(-) -- 2.17.1