Message ID | 20170201150553.9381-26-alex.bennee@linaro.org |
---|---|
State | New |
Headers | show |
Series | MTTCG Base enabling patches with ARM enablement | expand |
On 1 February 2017 at 15:05, Alex Bennée <alex.bennee@linaro.org> wrote: > This enables the multi-threaded system emulation by default for ARMv7 > and ARMv8 guests using the x86_64 TCG backend. This is because on the > guest side: > > - The ARM translate.c/translate-64.c have been converted to > - use MTTCG safe atomic primitives > - emit the appropriate barrier ops > - The ARM machine has been updated to > - hold the BQL when modifying shared cross-vCPU state > - defer cpu_reset to async safe work > > All the host backends support the barrier and atomic primitives but > need to provide same-or-better support for normal load/store > operations. > diff --git a/cpus.c b/cpus.c > index e3d9f3fe21..e1b82bcd49 100644 > --- a/cpus.c > +++ b/cpus.c > @@ -176,8 +176,8 @@ bool mttcg_enabled; > > static bool check_tcg_memory_orders_compatible(void) > { > -#if defined(TCG_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO) > - return (TCG_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0; > +#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO) > + return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0; This looks like maybe something that should have been squashed into an earlier part of the patchset? > #else > return false; > #endif > diff --git a/target/arm/cpu.h b/target/arm/cpu.h > index a3c4d07817..0ef31db3e0 100644 > --- a/target/arm/cpu.h > +++ b/target/arm/cpu.h > @@ -30,6 +30,9 @@ > # define TARGET_LONG_BITS 32 > #endif > > +/* ARM processors have a weak memory model */ > +#define TCG_GUEST_DEFAULT_MO (0) Do you need the () ? That said, if Richard is happy with turning this on then I'm happy to do so. Acked-by: Peter Maydell <peter.maydell@linaro.org> thanks -- PMM
Peter Maydell <peter.maydell@linaro.org> writes: > On 1 February 2017 at 15:05, Alex Bennée <alex.bennee@linaro.org> wrote: >> This enables the multi-threaded system emulation by default for ARMv7 >> and ARMv8 guests using the x86_64 TCG backend. This is because on the >> guest side: >> >> - The ARM translate.c/translate-64.c have been converted to >> - use MTTCG safe atomic primitives >> - emit the appropriate barrier ops >> - The ARM machine has been updated to >> - hold the BQL when modifying shared cross-vCPU state >> - defer cpu_reset to async safe work >> >> All the host backends support the barrier and atomic primitives but >> need to provide same-or-better support for normal load/store >> operations. > >> diff --git a/cpus.c b/cpus.c >> index e3d9f3fe21..e1b82bcd49 100644 >> --- a/cpus.c >> +++ b/cpus.c >> @@ -176,8 +176,8 @@ bool mttcg_enabled; >> >> static bool check_tcg_memory_orders_compatible(void) >> { >> -#if defined(TCG_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO) >> - return (TCG_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0; >> +#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO) >> + return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0; > > This looks like maybe something that should have been squashed > into an earlier part of the patchset? Good catch. I shall fix that up. > >> #else >> return false; >> #endif >> diff --git a/target/arm/cpu.h b/target/arm/cpu.h >> index a3c4d07817..0ef31db3e0 100644 >> --- a/target/arm/cpu.h >> +++ b/target/arm/cpu.h >> @@ -30,6 +30,9 @@ >> # define TARGET_LONG_BITS 32 >> #endif >> >> +/* ARM processors have a weak memory model */ >> +#define TCG_GUEST_DEFAULT_MO (0) > > Do you need the () ? > > > That said, if Richard is happy with turning this on then > I'm happy to do so. > > Acked-by: Peter Maydell <peter.maydell@linaro.org> > > thanks > -- PMM -- Alex Bennée
diff --git a/configure b/configure index 86fd833feb..9f2a665f5b 100755 --- a/configure +++ b/configure @@ -5879,6 +5879,7 @@ mkdir -p $target_dir echo "# Automatically generated by configure - do not modify" > $config_target_mak bflt="no" +mttcg="no" interp_prefix1=$(echo "$interp_prefix" | sed "s/%M/$target_name/g") gdb_xml_files="" @@ -5897,11 +5898,13 @@ case "$target_name" in arm|armeb) TARGET_ARCH=arm bflt="yes" + mttcg="yes" gdb_xml_files="arm-core.xml arm-vfp.xml arm-vfp3.xml arm-neon.xml" ;; aarch64) TARGET_BASE_ARCH=arm bflt="yes" + mttcg="yes" gdb_xml_files="aarch64-core.xml aarch64-fpu.xml arm-core.xml arm-vfp.xml arm-vfp3.xml arm-neon.xml" ;; cris) @@ -6066,6 +6069,9 @@ if test "$target_bigendian" = "yes" ; then fi if test "$target_softmmu" = "yes" ; then echo "CONFIG_SOFTMMU=y" >> $config_target_mak + if test "$mttcg" = "yes" ; then + echo "TARGET_SUPPORTS_MTTCG=y" >> $config_target_mak + fi fi if test "$target_user_only" = "yes" ; then echo "CONFIG_USER_ONLY=y" >> $config_target_mak diff --git a/cpus.c b/cpus.c index e3d9f3fe21..e1b82bcd49 100644 --- a/cpus.c +++ b/cpus.c @@ -176,8 +176,8 @@ bool mttcg_enabled; static bool check_tcg_memory_orders_compatible(void) { -#if defined(TCG_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO) - return (TCG_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0; +#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO) + return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0; #else return false; #endif diff --git a/target/arm/cpu.h b/target/arm/cpu.h index a3c4d07817..0ef31db3e0 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -30,6 +30,9 @@ # define TARGET_LONG_BITS 32 #endif +/* ARM processors have a weak memory model */ +#define TCG_GUEST_DEFAULT_MO (0) + #define CPUArchState struct CPUARMState #include "qemu-common.h" diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h index 21d96ec35c..4275787db9 100644 --- a/tcg/i386/tcg-target.h +++ b/tcg/i386/tcg-target.h @@ -165,4 +165,15 @@ static inline void flush_icache_range(uintptr_t start, uintptr_t stop) { } +/* This defines the natural memory order supported by this + * architecture before guarantees made by various barrier + * instructions. + * + * The x86 has a pretty strong memory ordering which only really + * allows for some stores to be re-ordered after loads. + */ +#include "tcg-mo.h" + +#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) + #endif