===================================================================
@@ -23,3 +23,19 @@
#if defined __aarch64__ && !defined __LP64__
# define REG_VALUE_IN_UNWIND_CONTEXT
#endif
+
+/* Return the value of the pseudo VG register. This should only be
+ called if we know this is an SVE host. */
+static inline int
+aarch64_vg (void)
+{
+ register int vg asm ("x0");
+ /* CNTD X0. */
+ asm (".inst 0x04e0e3e0" : "=r" (vg));
+ return vg;
+}
+
+/* Lazily provide a value for VG, so that we don't try to execute SVE
+ instructions unless we know they're needed. */
+#define DWARF_LAZY_REGISTER_VALUE(REGNO, VALUE) \
+ ((REGNO) == AARCH64_DWARF_VG && ((*VALUE) = aarch64_vg (), 1))
===================================================================
@@ -216,12 +216,12 @@ _Unwind_IsExtendedContext (struct _Unwin
|| (context->flags & EXTENDED_CONTEXT_BIT));
}
-/* Get the value of register INDEX as saved in CONTEXT. */
+/* Get the value of register REGNO as saved in CONTEXT. */
inline _Unwind_Word
-_Unwind_GetGR (struct _Unwind_Context *context, int index)
+_Unwind_GetGR (struct _Unwind_Context *context, int regno)
{
- int size;
+ int size, index;
_Unwind_Context_Reg_Val val;
#ifdef DWARF_ZERO_REG
@@ -229,7 +229,7 @@ _Unwind_GetGR (struct _Unwind_Context *c
return 0;
#endif
- index = DWARF_REG_TO_UNWIND_COLUMN (index);
+ index = DWARF_REG_TO_UNWIND_COLUMN (regno);
gcc_assert (index < (int) sizeof(dwarf_reg_size_table));
size = dwarf_reg_size_table[index];
val = context->reg[index];
@@ -237,6 +237,14 @@ _Unwind_GetGR (struct _Unwind_Context *c
if (_Unwind_IsExtendedContext (context) && context->by_value[index])
return _Unwind_Get_Unwind_Word (val);
+#ifdef DWARF_LAZY_REGISTER_VALUE
+ {
+ _Unwind_Word value;
+ if (DWARF_LAZY_REGISTER_VALUE (regno, &value))
+ return value;
+ }
+#endif
+
/* This will segfault if the register hasn't been saved. */
if (size == sizeof(_Unwind_Ptr))
return * (_Unwind_Ptr *) (_Unwind_Internal_Ptr) val;
===================================================================
@@ -0,0 +1,38 @@
+# Specific regression driver for AArch64.
+# Copyright (C) 2009-2017 Free Software Foundation, Inc.
+# Contributed by ARM Ltd.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>. */
+
+# GCC testsuite that uses the `dg.exp' driver.
+
+# Exit immediately if this isn't an AArch64 target.
+if {![istarget aarch64*-*-*] } then {
+ return
+}
+
+# Load support procs.
+load_lib g++-dg.exp
+
+# Initialize `dg'.
+dg-init
+
+# Main loop.
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.C]] "" ""
+
+# All done.
+dg-finish
===================================================================
@@ -0,0 +1,70 @@
+/* { dg-do run } */
+/* { dg-options "-O3 -fopenmp-simd -fno-omit-frame-pointer" } */
+/* { dg-options "-O3 -fopenmp-simd -fno-omit-frame-pointer -march=armv8-a+sve" { target aarch64_sve_hw } } */
+
+/* Invoke X (P##n) for n in [0, 7]. */
+#define REPEAT8(X, P) \
+ X (P##0) X (P##1) X (P##2) X (P##3) X (P##4) X (P##5) X (P##6) X (P##7)
+
+/* Invoke X (n) for all octal n in [0, 39]. */
+#define REPEAT40(X) \
+ REPEAT8 (X, 0) REPEAT8 (X, 1) REPEAT8 (X, 2) REPEAT8 (X, 3) REPEAT8 (X, 4)
+
+volatile int testi;
+
+/* Throw to f3. */
+void __attribute__ ((weak))
+f1 (int x[40][100], int *y)
+{
+ /* A wild write to x and y. */
+ asm volatile ("" ::: "memory");
+ if (y[testi] == x[testi][testi])
+ throw 100;
+}
+
+/* Expect vector work to be done, with spilling of vector registers. */
+void __attribute__ ((weak))
+f2 (int x[40][100], int *y)
+{
+ /* Try to force some spilling. */
+#define DECLARE(N) int y##N = y[N];
+ REPEAT40 (DECLARE);
+ for (int j = 0; j < 20; ++j)
+ {
+ f1 (x, y);
+#pragma omp simd
+ for (int i = 0; i < 100; ++i)
+ {
+#define INC(N) x[N][i] += y##N;
+ REPEAT40 (INC);
+ }
+ }
+}
+
+/* Catch an exception thrown from f1, via f2. */
+void __attribute__ ((weak))
+f3 (int x[40][100], int *y, int *z)
+{
+ volatile int extra = 111;
+ try
+ {
+ f2 (x, y);
+ }
+ catch (int val)
+ {
+ *z = val + extra;
+ }
+}
+
+static int x[40][100];
+static int y[40];
+static int z;
+
+int
+main (void)
+{
+ f3 (x, y, &z);
+ if (z != 211)
+ __builtin_abort ();
+ return 0;
+}
===================================================================
@@ -0,0 +1,5 @@
+/* { dg-do run } */
+/* { dg-options "-O3 -fopenmp-simd -fomit-frame-pointer" } */
+/* { dg-options "-O3 -fopenmp-simd -fomit-frame-pointer -march=armv8-a+sve" { target aarch64_sve_hw } } */
+
+#include "sve_catch_1.C"
===================================================================
@@ -0,0 +1,79 @@
+/* { dg-do run } */
+/* { dg-options "-O3 -fopenmp-simd -fno-omit-frame-pointer" } */
+/* { dg-options "-O3 -fopenmp-simd -fno-omit-frame-pointer -march=armv8-a+sve" { target aarch64_sve_hw } } */
+
+/* Invoke X (P##n) for n in [0, 7]. */
+#define REPEAT8(X, P) \
+ X (P##0) X (P##1) X (P##2) X (P##3) X (P##4) X (P##5) X (P##6) X (P##7)
+
+/* Invoke X (n) for all octal n in [0, 39]. */
+#define REPEAT40(X) \
+ REPEAT8 (X, 0) REPEAT8 (X, 1) REPEAT8 (X, 2) REPEAT8 (X, 3) REPEAT8 (X, 4)
+
+volatile int testi, sink;
+
+/* Take 2 stack arguments and throw to f3. */
+void __attribute__ ((weak))
+f1 (int x[40][100], int *y, int z1, int z2, int z3, int z4,
+ int z5, int z6, int z7, int z8)
+{
+ /* A wild write to x and y. */
+ sink = z1;
+ sink = z2;
+ sink = z3;
+ sink = z4;
+ sink = z5;
+ sink = z6;
+ sink = z7;
+ sink = z8;
+ asm volatile ("" ::: "memory");
+ if (y[testi] == x[testi][testi])
+ throw 100;
+}
+
+/* Expect vector work to be done, with spilling of vector registers. */
+void __attribute__ ((weak))
+f2 (int x[40][100], int *y)
+{
+ /* Try to force some spilling. */
+#define DECLARE(N) int y##N = y[N];
+ REPEAT40 (DECLARE);
+ for (int j = 0; j < 20; ++j)
+ {
+ f1 (x, y, 1, 2, 3, 4, 5, 6, 7, 8);
+#pragma omp simd
+ for (int i = 0; i < 100; ++i)
+ {
+#define INC(N) x[N][i] += y##N;
+ REPEAT40 (INC);
+ }
+ }
+}
+
+/* Catch an exception thrown from f1, via f2. */
+void __attribute__ ((weak))
+f3 (int x[40][100], int *y, int *z)
+{
+ volatile int extra = 111;
+ try
+ {
+ f2 (x, y);
+ }
+ catch (int val)
+ {
+ *z = val + extra;
+ }
+}
+
+static int x[40][100];
+static int y[40];
+static int z;
+
+int
+main (void)
+{
+ f3 (x, y, &z);
+ if (z != 211)
+ __builtin_abort ();
+ return 0;
+}
===================================================================
@@ -0,0 +1,5 @@
+/* { dg-do run } */
+/* { dg-options "-O3 -fopenmp-simd -fomit-frame-pointer" } */
+/* { dg-options "-O3 -fopenmp-simd -fomit-frame-pointer -march=armv8-a+sve" { target aarch64_sve_hw } } */
+
+#include "sve_catch_3.C"
===================================================================
@@ -0,0 +1,82 @@
+/* { dg-do run } */
+/* { dg-options "-O3 -fopenmp-simd -fno-omit-frame-pointer" } */
+/* { dg-options "-O3 -fopenmp-simd -fno-omit-frame-pointer -march=armv8-a+sve" { target aarch64_sve_hw } } */
+
+/* Invoke X (P##n) for n in [0, 7]. */
+#define REPEAT8(X, P) \
+ X (P##0) X (P##1) X (P##2) X (P##3) X (P##4) X (P##5) X (P##6) X (P##7)
+
+/* Invoke X (n) for all octal n in [0, 39]. */
+#define REPEAT40(X) \
+ REPEAT8 (X, 0) REPEAT8 (X, 1) REPEAT8 (X, 2) REPEAT8 (X, 3) REPEAT8 (X, 4)
+
+volatile int testi, sink;
+volatile void *ptr;
+
+/* Take 2 stack arguments and throw to f3. */
+void __attribute__ ((weak))
+f1 (int x[40][100], int *y, int z1, int z2, int z3, int z4,
+ int z5, int z6, int z7, int z8)
+{
+ /* A wild write to x and y. */
+ sink = z1;
+ sink = z2;
+ sink = z3;
+ sink = z4;
+ sink = z5;
+ sink = z6;
+ sink = z7;
+ sink = z8;
+ asm volatile ("" ::: "memory");
+ if (y[testi] == x[testi][testi])
+ throw 100;
+}
+
+/* Expect vector work to be done, with spilling of vector registers. */
+void __attribute__ ((weak))
+f2 (int x[40][100], int *y)
+{
+ /* Create a true variable-sized frame. */
+ ptr = __builtin_alloca (testi + 40);
+ /* Try to force some spilling. */
+#define DECLARE(N) int y##N = y[N];
+ REPEAT40 (DECLARE);
+ for (int j = 0; j < 20; ++j)
+ {
+ f1 (x, y, 1, 2, 3, 4, 5, 6, 7, 8);
+#pragma omp simd
+ for (int i = 0; i < 100; ++i)
+ {
+#define INC(N) x[N][i] += y##N;
+ REPEAT40 (INC);
+ }
+ }
+}
+
+/* Catch an exception thrown from f1, via f2. */
+void __attribute__ ((weak))
+f3 (int x[40][100], int *y, int *z)
+{
+ volatile int extra = 111;
+ try
+ {
+ f2 (x, y);
+ }
+ catch (int val)
+ {
+ *z = val + extra;
+ }
+}
+
+static int x[40][100];
+static int y[40];
+static int z;
+
+int
+main (void)
+{
+ f3 (x, y, &z);
+ if (z != 211)
+ __builtin_abort ();
+ return 0;
+}
===================================================================
@@ -0,0 +1,5 @@
+/* { dg-do run } */
+/* { dg-options "-O3 -fopenmp-simd -fomit-frame-pointer" } */
+/* { dg-options "-O3 -fopenmp-simd -fomit-frame-pointer -march=armv8-a+sve" { target aarch64_sve_hw } } */
+
+#include "sve_catch_5.C"