@@ -20,9 +20,15 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm_volatile_goto("STATIC_BRANCH_NOP l_yes=\"%l[l_yes]\" key=\"%c0\" "
- "branch=\"%c1\""
- : : "i" (key), "i" (branch) : : l_yes);
+ asm_volatile_goto("1:"
+ ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
+ ".pushsection __jump_table, \"aw\" \n\t"
+ _ASM_ALIGN "\n\t"
+ ".long 1b - ., %l[l_yes] - . \n\t"
+ _ASM_PTR "%c0 + %c1 - .\n\t"
+ ".popsection \n\t"
+ : : "i" (key), "i" (branch) : : l_yes);
+
return false;
l_yes:
return true;
@@ -30,8 +36,14 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
- asm_volatile_goto("STATIC_BRANCH_JMP l_yes=\"%l[l_yes]\" key=\"%c0\" "
- "branch=\"%c1\""
+ asm_volatile_goto("1:"
+ ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
+ "2:\n\t"
+ ".pushsection __jump_table, \"aw\" \n\t"
+ _ASM_ALIGN "\n\t"
+ ".long 1b - ., %l[l_yes] - . \n\t"
+ _ASM_PTR "%c0 + %c1 - .\n\t"
+ ".popsection \n\t"
: : "i" (key), "i" (branch) : : l_yes);
return false;
@@ -13,4 +13,3 @@
#include <asm/paravirt.h>
#include <asm/asm.h>
#include <asm/cpufeature.h>
-#include <asm/jump_label.h>
This partially reverts commit 5bdcd510c2ac9efaf55c4cbd8d46421d8e2320cd. The in-kernel workarounds will be replaced with GCC's new "asm inline" syntax. Only the asm_volatile_goto parts were reverted. The other cleanups (removal of unneeded #error, replacement of STATIC_JUMP_IF_TRUE/FALSE with STATIC_JUMP_IF_NOP/JMP) are kept. Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> --- arch/x86/include/asm/jump_label.h | 22 +++++++++++++++++----- arch/x86/kernel/macros.S | 1 - 2 files changed, 17 insertions(+), 6 deletions(-) -- 2.7.4