7.0.1-0.11

This commit is contained in:
Jakub Jelinek 2017-03-08 19:02:18 +01:00
parent 4708d93f41
commit 88ff65fe51
8 changed files with 2010 additions and 6 deletions

1
.gitignore vendored
View File

@ -9,3 +9,4 @@
/gcc-7.0.1-20170211.tar.bz2
/gcc-7.0.1-20170219.tar.bz2
/gcc-7.0.1-20170225.tar.bz2
/gcc-7.0.1-20170308.tar.bz2

View File

@ -1,10 +1,10 @@
%global DATE 20170225
%global SVNREV 245736
%global DATE 20170308
%global SVNREV 245981
%global gcc_version 7.0.1
%global gcc_major 7
# Note, gcc_release must be integer, if you want to add suffixes to
# %{release}, append them after %{gcc_release} on Release: line.
%global gcc_release 0.10
%global gcc_release 0.11
%global nvptx_tools_gitrev c28050f60193b3b95a18866a96f03334e874e78f
%global nvptx_newlib_gitrev aadc8eb0ec43b7cd0dd2dfb484bae63c8b05ef24
%global _unpackaged_files_terminate_build 0
@ -232,6 +232,10 @@ Patch8: gcc7-no-add-needed.patch
Patch9: gcc7-aarch64-async-unw-tables.patch
Patch10: gcc7-foffload-default.patch
Patch11: gcc7-Wno-format-security.patch
Patch12: gcc7-pr79932-1.patch
Patch13: gcc7-pr79932-2.patch
Patch14: gcc7-pr79941.patch
Patch15: gcc7-pr79944.patch
Patch1000: nvptx-tools-no-ptxas.patch
Patch1001: nvptx-tools-build.patch
@ -820,6 +824,10 @@ package or when debugging this package.
%patch9 -p0 -b .aarch64-async-unw-tables~
%patch10 -p0 -b .foffload-default~
%patch11 -p0 -b .Wno-format-security~
%patch12 -p0 -b .pr79932-1~
%patch13 -p0 -b .pr79932-2~
%patch14 -p0 -b .pr79941~
%patch15 -p0 -b .pr79944~
cd nvptx-tools-%{nvptx_tools_gitrev}
%patch1000 -p1 -b .nvptx-tools-no-ptxas~
@ -3236,6 +3244,37 @@ fi
%endif
%changelog
* Wed Mar 8 2017 Jakub Jelinek <jakub@redhat.com> 7.0.1-0.11
- update from the trunk
- PRs ada/79903, ada/79945, c++/42000, c++/64574, c++/70266, c++/71568,
c++/79414, c++/79681, c++/79746, c++/79782, c++/79791, c++/79796,
c++/79821, c++/79822, c++/79825, c/79756, c/79758, c/79834, c/79836,
c/79837, c/79847, c/79855, c/79940, demangler/67264, demangler/70909,
fortran/51119, fortran/78379, fortran/79739, fortran/79841,
fortran/79894, libstdc++/79789, libstdc++/79798, lto/78140, lto/79625,
lto/79760, middle-end/68270, middle-end/79692, middle-end/79721,
middle-end/79731, middle-end/79805, middle-end/79809,
middle-end/79818, rtl-optimization/79571, rtl-optimization/79584,
rtl-optimization/79780, rtl-optimization/79901, sanitize/79783,
sanitizer/79897, sanitizer/79904, target/43763, target/68739,
target/79395, target/79439, target/79514, target/79544, target/79729,
target/79742, target/79749, target/79752, target/79793, target/79807,
target/79812, tree-optimization/45397, tree-optimization/66768,
tree-optimization/77536, tree-optimization/79345,
tree-optimization/79690, tree-optimization/79691,
tree-optimization/79699, tree-optimization/79723,
tree-optimization/79732, tree-optimization/79734,
tree-optimization/79737, tree-optimization/79740,
tree-optimization/79777, tree-optimization/79803,
tree-optimization/79824, tree-optimization/79894,
tree-optimization/79920, tree-optimization/79943,
tree-optimization/79955
- fix 64 avx512vl and 6 avx512bw intrinsics that were not available with -O0
(PR target/79932)
- temporarily disable incorrect folding of Altivec vmul[oe]u[bh] intrinsics
(#1429961, PR middle-end/79941)
- fix -fsanitize=address with some atomic/sync builtins (PR sanitizer/79944)
* Sat Feb 25 2017 Jakub Jelinek <jakub@redhat.com> 7.0.1-0.10
- update from the trunk
- PRs c++/17729, c++/41727, c++/50308, c++/69523, c++/78139, c++/78282,

181
gcc7-pr79932-1.patch Normal file
View File

@ -0,0 +1,181 @@
2017-03-07 Jakub Jelinek <jakub@redhat.com>
PR target/79932
* config/i386/avx512bwintrin.h (_mm512_packs_epi32,
_mm512_maskz_packs_epi32, _mm512_mask_packs_epi32,
_mm512_packus_epi32, _mm512_maskz_packus_epi32,
_mm512_mask_packus_epi32): Move definitions outside of __OPTIMIZE__
guarded section.
* gcc.target/i386/pr79932-1.c: New test.
--- gcc/config/i386/avx512bwintrin.h.jj 2017-01-23 18:09:48.000000000 +0100
+++ gcc/config/i386/avx512bwintrin.h 2017-03-07 07:47:28.900049849 +0100
@@ -2656,6 +2656,72 @@ _mm512_cmple_epi16_mask (__m512i __X, __
(__mmask32) -1);
}
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_packs_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi)
+ _mm512_setzero_si512 (),
+ (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_packs_epi32 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_packs_epi32 (__m512i __W, __mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi) __W,
+ __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_packus_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi)
+ _mm512_setzero_si512 (),
+ (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_packus_epi32 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_packus_epi32 (__m512i __W, __mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi) __W,
+ __M);
+}
+
#ifdef __OPTIMIZE__
extern __inline __mmask32
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
@@ -3012,72 +3078,6 @@ _mm512_cmp_epu8_mask (__m512i __X, __m51
}
extern __inline __m512i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_packs_epi32 (__m512i __A, __m512i __B)
-{
- return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v32hi)
- _mm512_setzero_si512 (),
- (__mmask32) -1);
-}
-
-extern __inline __m512i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_packs_epi32 (__mmask32 __M, __m512i __A, __m512i __B)
-{
- return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v32hi)
- _mm512_setzero_si512 (),
- __M);
-}
-
-extern __inline __m512i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_packs_epi32 (__m512i __W, __mmask32 __M, __m512i __A,
- __m512i __B)
-{
- return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v32hi) __W,
- __M);
-}
-
-extern __inline __m512i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_packus_epi32 (__m512i __A, __m512i __B)
-{
- return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v32hi)
- _mm512_setzero_si512 (),
- (__mmask32) -1);
-}
-
-extern __inline __m512i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_packus_epi32 (__mmask32 __M, __m512i __A, __m512i __B)
-{
- return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v32hi)
- _mm512_setzero_si512 (),
- __M);
-}
-
-extern __inline __m512i
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_packus_epi32 (__m512i __W, __mmask32 __M, __m512i __A,
- __m512i __B)
-{
- return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v32hi) __W,
- __M);
-}
-
-extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_bslli_epi128 (__m512i __A, const int __N)
{
--- gcc/testsuite/gcc.target/i386/pr79932-1.c.jj 2017-03-07 07:54:45.181339418 +0100
+++ gcc/testsuite/gcc.target/i386/pr79932-1.c 2017-03-07 07:54:24.000000000 +0100
@@ -0,0 +1,19 @@
+/* PR target/79932 */
+/* { dg-do compile } */
+/* { dg-options "-O0 -mavx512bw" } */
+
+#include <x86intrin.h>
+
+__m512i a, b, c, d, e, f, g, h, i;
+__mmask32 m;
+
+void
+foo (void)
+{
+ d = _mm512_packs_epi32 (a, b);
+ e = _mm512_maskz_packs_epi32 (m, a, b);
+ f = _mm512_mask_packs_epi32 (c, m, a, b);
+ g = _mm512_packus_epi32 (a, b);
+ h = _mm512_maskz_packus_epi32 (m, a, b);
+ i = _mm512_mask_packus_epi32 (c, m, a, b);
+}

1286
gcc7-pr79932-2.patch Normal file

File diff suppressed because it is too large Load Diff

92
gcc7-pr79941.patch Normal file
View File

@ -0,0 +1,92 @@
2017-03-07 Will Schmidt <will_schmidt@vnet.ibm.com>
PR middle-end/79941
* config/rs6000/rs6000.c (gimplify_init_constructor): Remove multiply
even and multiply odd unsigned (vmule/vmulo) intrinsics from the
multiply folding sequence.
* gcc.target/powerpc/fold-vec-mult-even_odd_misc.c: New test.
--- gcc/config/rs6000/rs6000.c 2017-03-03 20:31:01.000000000 +0100
+++ gcc/config/rs6000/rs6000.c 2017-03-08 18:52:48.409967121 +0100
@@ -16855,9 +16855,6 @@ rs6000_gimple_fold_builtin (gimple_stmt_
/* Even element flavors of vec_mul (signed). */
case ALTIVEC_BUILTIN_VMULESB:
case ALTIVEC_BUILTIN_VMULESH:
- /* Even element flavors of vec_mul (unsigned). */
- case ALTIVEC_BUILTIN_VMULEUB:
- case ALTIVEC_BUILTIN_VMULEUH:
{
arg0 = gimple_call_arg (stmt, 0);
arg1 = gimple_call_arg (stmt, 1);
@@ -16870,9 +16867,6 @@ rs6000_gimple_fold_builtin (gimple_stmt_
/* Odd element flavors of vec_mul (signed). */
case ALTIVEC_BUILTIN_VMULOSB:
case ALTIVEC_BUILTIN_VMULOSH:
- /* Odd element flavors of vec_mul (unsigned). */
- case ALTIVEC_BUILTIN_VMULOUB:
- case ALTIVEC_BUILTIN_VMULOUH:
{
arg0 = gimple_call_arg (stmt, 0);
arg1 = gimple_call_arg (stmt, 1);
--- gcc/testsuite/gcc.target/powerpc/fold-vec-mult-even_odd_misc.c
+++ gcc/testsuite/gcc.target/powerpc/fold-vec-mult-even_odd_misc.c
@@ -0,0 +1,58 @@
+
+/* { dg-do run } */
+/* { dg-require-effective-target powerpc_vsx_ok } */
+/* { dg-options "-maltivec -mvsx -O2 -save-temps" } */
+
+#include <altivec.h>
+
+__attribute__((noinline)) void
+test_eub_char ()
+{
+ vector unsigned char v0 = {1, 0, 0, 0, 0, 0, 0, 0};
+ vector unsigned char v1 = {0xff, 0, 0, 0, 0, 0, 0, 0};
+ vector unsigned short res = vec_vmuleub (v0, v1);
+ if (res[0] != (unsigned short)v0[0] * (unsigned short)v1[0])
+ __builtin_abort ();
+}
+__attribute__((noinline)) void
+test_oub_char ()
+{
+ vector unsigned char v0 = {0, 1, 0, 0, 0, 0, 0, 0};
+ vector unsigned char v1 = {0, 0xff, 0, 0, 0, 0, 0, 0};
+ vector unsigned short res = vec_vmuloub (v0, v1);
+ if (res[0] != (unsigned short)v0[1] * (unsigned short)v1[1])
+ __builtin_abort ();
+}
+
+__attribute__((noinline)) void
+test_euh_short ()
+{
+ vector unsigned short v0 = {1, 0, 0, 0};
+ vector unsigned short v1 = {0xff, 0, 0, 0};
+ vector unsigned int res = vec_vmuleuh (v0, v1);
+ if (res[0] != (unsigned int)v0[0] * (unsigned int)v1[0])
+ __builtin_abort ();
+}
+__attribute__((noinline)) void
+test_ouh_short ()
+{
+ vector unsigned short v0 = {0, 1, 0, 0};
+ vector unsigned short v1 = {0, 0xff, 0, 0};
+ vector unsigned int res = vec_vmulouh (v0, v1);
+ if (res[0] != (unsigned int)v0[1] * (unsigned int)v1[1])
+ __builtin_abort ();
+}
+
+int main ()
+{
+ test_eub_char();
+ test_oub_char();
+ test_euh_short();
+ test_ouh_short();
+}
+
+/* { dg-final { scan-assembler-times "vmuleub" 1 } } */
+/* { dg-final { scan-assembler-times "vmuloub" 1 } } */
+/* { dg-final { scan-assembler-times "vmuleuh" 1 } } */
+/* { dg-final { scan-assembler-times "vmulouh" 1 } } */
+

406
gcc7-pr79944.patch Normal file
View File

@ -0,0 +1,406 @@
2017-03-08 Jakub Jelinek <jakub@redhat.com>
PR sanitizer/79944
* asan.c (get_mem_refs_of_builtin_call): For BUILT_IN_ATOMIC* and
BUILT_IN_SYNC*, determine the access type from the size suffix and
always build a MEM_REF with that type. Handle forgotten
BUILT_IN_SYNC_FETCH_AND_NAND_16 and BUILT_IN_SYNC_NAND_AND_FETCH_16.
* c-c++-common/asan/pr79944.c: New test.
--- gcc/asan.c.jj 2017-03-06 12:32:28.000000000 +0100
+++ gcc/asan.c 2017-03-08 12:24:11.151353229 +0100
@@ -603,218 +603,208 @@ get_mem_refs_of_builtin_call (const gcal
case BUILT_IN_STRLEN:
source0 = gimple_call_arg (call, 0);
len = gimple_call_lhs (call);
- break ;
+ break;
/* And now the __atomic* and __sync builtins.
These are handled differently from the classical memory memory
access builtins above. */
case BUILT_IN_ATOMIC_LOAD_1:
- case BUILT_IN_ATOMIC_LOAD_2:
- case BUILT_IN_ATOMIC_LOAD_4:
- case BUILT_IN_ATOMIC_LOAD_8:
- case BUILT_IN_ATOMIC_LOAD_16:
is_store = false;
- /* fall through. */
-
+ /* FALLTHRU */
case BUILT_IN_SYNC_FETCH_AND_ADD_1:
- case BUILT_IN_SYNC_FETCH_AND_ADD_2:
- case BUILT_IN_SYNC_FETCH_AND_ADD_4:
- case BUILT_IN_SYNC_FETCH_AND_ADD_8:
- case BUILT_IN_SYNC_FETCH_AND_ADD_16:
-
case BUILT_IN_SYNC_FETCH_AND_SUB_1:
- case BUILT_IN_SYNC_FETCH_AND_SUB_2:
- case BUILT_IN_SYNC_FETCH_AND_SUB_4:
- case BUILT_IN_SYNC_FETCH_AND_SUB_8:
- case BUILT_IN_SYNC_FETCH_AND_SUB_16:
-
case BUILT_IN_SYNC_FETCH_AND_OR_1:
- case BUILT_IN_SYNC_FETCH_AND_OR_2:
- case BUILT_IN_SYNC_FETCH_AND_OR_4:
- case BUILT_IN_SYNC_FETCH_AND_OR_8:
- case BUILT_IN_SYNC_FETCH_AND_OR_16:
-
case BUILT_IN_SYNC_FETCH_AND_AND_1:
- case BUILT_IN_SYNC_FETCH_AND_AND_2:
- case BUILT_IN_SYNC_FETCH_AND_AND_4:
- case BUILT_IN_SYNC_FETCH_AND_AND_8:
- case BUILT_IN_SYNC_FETCH_AND_AND_16:
-
case BUILT_IN_SYNC_FETCH_AND_XOR_1:
- case BUILT_IN_SYNC_FETCH_AND_XOR_2:
- case BUILT_IN_SYNC_FETCH_AND_XOR_4:
- case BUILT_IN_SYNC_FETCH_AND_XOR_8:
- case BUILT_IN_SYNC_FETCH_AND_XOR_16:
-
case BUILT_IN_SYNC_FETCH_AND_NAND_1:
- case BUILT_IN_SYNC_FETCH_AND_NAND_2:
- case BUILT_IN_SYNC_FETCH_AND_NAND_4:
- case BUILT_IN_SYNC_FETCH_AND_NAND_8:
-
case BUILT_IN_SYNC_ADD_AND_FETCH_1:
- case BUILT_IN_SYNC_ADD_AND_FETCH_2:
- case BUILT_IN_SYNC_ADD_AND_FETCH_4:
- case BUILT_IN_SYNC_ADD_AND_FETCH_8:
- case BUILT_IN_SYNC_ADD_AND_FETCH_16:
-
case BUILT_IN_SYNC_SUB_AND_FETCH_1:
- case BUILT_IN_SYNC_SUB_AND_FETCH_2:
- case BUILT_IN_SYNC_SUB_AND_FETCH_4:
- case BUILT_IN_SYNC_SUB_AND_FETCH_8:
- case BUILT_IN_SYNC_SUB_AND_FETCH_16:
-
case BUILT_IN_SYNC_OR_AND_FETCH_1:
- case BUILT_IN_SYNC_OR_AND_FETCH_2:
- case BUILT_IN_SYNC_OR_AND_FETCH_4:
- case BUILT_IN_SYNC_OR_AND_FETCH_8:
- case BUILT_IN_SYNC_OR_AND_FETCH_16:
-
case BUILT_IN_SYNC_AND_AND_FETCH_1:
- case BUILT_IN_SYNC_AND_AND_FETCH_2:
- case BUILT_IN_SYNC_AND_AND_FETCH_4:
- case BUILT_IN_SYNC_AND_AND_FETCH_8:
- case BUILT_IN_SYNC_AND_AND_FETCH_16:
-
case BUILT_IN_SYNC_XOR_AND_FETCH_1:
- case BUILT_IN_SYNC_XOR_AND_FETCH_2:
- case BUILT_IN_SYNC_XOR_AND_FETCH_4:
- case BUILT_IN_SYNC_XOR_AND_FETCH_8:
- case BUILT_IN_SYNC_XOR_AND_FETCH_16:
-
case BUILT_IN_SYNC_NAND_AND_FETCH_1:
- case BUILT_IN_SYNC_NAND_AND_FETCH_2:
- case BUILT_IN_SYNC_NAND_AND_FETCH_4:
- case BUILT_IN_SYNC_NAND_AND_FETCH_8:
-
case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1:
- case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_2:
- case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_4:
- case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_8:
- case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_16:
-
case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_1:
- case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_2:
- case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_4:
- case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_8:
- case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_16:
-
case BUILT_IN_SYNC_LOCK_TEST_AND_SET_1:
- case BUILT_IN_SYNC_LOCK_TEST_AND_SET_2:
- case BUILT_IN_SYNC_LOCK_TEST_AND_SET_4:
- case BUILT_IN_SYNC_LOCK_TEST_AND_SET_8:
- case BUILT_IN_SYNC_LOCK_TEST_AND_SET_16:
-
case BUILT_IN_SYNC_LOCK_RELEASE_1:
- case BUILT_IN_SYNC_LOCK_RELEASE_2:
- case BUILT_IN_SYNC_LOCK_RELEASE_4:
- case BUILT_IN_SYNC_LOCK_RELEASE_8:
- case BUILT_IN_SYNC_LOCK_RELEASE_16:
-
case BUILT_IN_ATOMIC_EXCHANGE_1:
- case BUILT_IN_ATOMIC_EXCHANGE_2:
- case BUILT_IN_ATOMIC_EXCHANGE_4:
- case BUILT_IN_ATOMIC_EXCHANGE_8:
- case BUILT_IN_ATOMIC_EXCHANGE_16:
-
case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1:
- case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_2:
- case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_4:
- case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_8:
- case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_16:
-
case BUILT_IN_ATOMIC_STORE_1:
- case BUILT_IN_ATOMIC_STORE_2:
- case BUILT_IN_ATOMIC_STORE_4:
- case BUILT_IN_ATOMIC_STORE_8:
- case BUILT_IN_ATOMIC_STORE_16:
-
case BUILT_IN_ATOMIC_ADD_FETCH_1:
- case BUILT_IN_ATOMIC_ADD_FETCH_2:
- case BUILT_IN_ATOMIC_ADD_FETCH_4:
- case BUILT_IN_ATOMIC_ADD_FETCH_8:
- case BUILT_IN_ATOMIC_ADD_FETCH_16:
-
case BUILT_IN_ATOMIC_SUB_FETCH_1:
- case BUILT_IN_ATOMIC_SUB_FETCH_2:
- case BUILT_IN_ATOMIC_SUB_FETCH_4:
- case BUILT_IN_ATOMIC_SUB_FETCH_8:
- case BUILT_IN_ATOMIC_SUB_FETCH_16:
-
case BUILT_IN_ATOMIC_AND_FETCH_1:
- case BUILT_IN_ATOMIC_AND_FETCH_2:
- case BUILT_IN_ATOMIC_AND_FETCH_4:
- case BUILT_IN_ATOMIC_AND_FETCH_8:
- case BUILT_IN_ATOMIC_AND_FETCH_16:
-
case BUILT_IN_ATOMIC_NAND_FETCH_1:
- case BUILT_IN_ATOMIC_NAND_FETCH_2:
- case BUILT_IN_ATOMIC_NAND_FETCH_4:
- case BUILT_IN_ATOMIC_NAND_FETCH_8:
- case BUILT_IN_ATOMIC_NAND_FETCH_16:
-
case BUILT_IN_ATOMIC_XOR_FETCH_1:
- case BUILT_IN_ATOMIC_XOR_FETCH_2:
- case BUILT_IN_ATOMIC_XOR_FETCH_4:
- case BUILT_IN_ATOMIC_XOR_FETCH_8:
- case BUILT_IN_ATOMIC_XOR_FETCH_16:
-
case BUILT_IN_ATOMIC_OR_FETCH_1:
- case BUILT_IN_ATOMIC_OR_FETCH_2:
- case BUILT_IN_ATOMIC_OR_FETCH_4:
- case BUILT_IN_ATOMIC_OR_FETCH_8:
- case BUILT_IN_ATOMIC_OR_FETCH_16:
-
case BUILT_IN_ATOMIC_FETCH_ADD_1:
- case BUILT_IN_ATOMIC_FETCH_ADD_2:
- case BUILT_IN_ATOMIC_FETCH_ADD_4:
- case BUILT_IN_ATOMIC_FETCH_ADD_8:
- case BUILT_IN_ATOMIC_FETCH_ADD_16:
-
case BUILT_IN_ATOMIC_FETCH_SUB_1:
- case BUILT_IN_ATOMIC_FETCH_SUB_2:
- case BUILT_IN_ATOMIC_FETCH_SUB_4:
- case BUILT_IN_ATOMIC_FETCH_SUB_8:
- case BUILT_IN_ATOMIC_FETCH_SUB_16:
-
case BUILT_IN_ATOMIC_FETCH_AND_1:
- case BUILT_IN_ATOMIC_FETCH_AND_2:
- case BUILT_IN_ATOMIC_FETCH_AND_4:
- case BUILT_IN_ATOMIC_FETCH_AND_8:
- case BUILT_IN_ATOMIC_FETCH_AND_16:
-
case BUILT_IN_ATOMIC_FETCH_NAND_1:
- case BUILT_IN_ATOMIC_FETCH_NAND_2:
- case BUILT_IN_ATOMIC_FETCH_NAND_4:
- case BUILT_IN_ATOMIC_FETCH_NAND_8:
- case BUILT_IN_ATOMIC_FETCH_NAND_16:
-
case BUILT_IN_ATOMIC_FETCH_XOR_1:
- case BUILT_IN_ATOMIC_FETCH_XOR_2:
- case BUILT_IN_ATOMIC_FETCH_XOR_4:
- case BUILT_IN_ATOMIC_FETCH_XOR_8:
- case BUILT_IN_ATOMIC_FETCH_XOR_16:
-
case BUILT_IN_ATOMIC_FETCH_OR_1:
+ access_size = 1;
+ goto do_atomic;
+
+ case BUILT_IN_ATOMIC_LOAD_2:
+ is_store = false;
+ /* FALLTHRU */
+ case BUILT_IN_SYNC_FETCH_AND_ADD_2:
+ case BUILT_IN_SYNC_FETCH_AND_SUB_2:
+ case BUILT_IN_SYNC_FETCH_AND_OR_2:
+ case BUILT_IN_SYNC_FETCH_AND_AND_2:
+ case BUILT_IN_SYNC_FETCH_AND_XOR_2:
+ case BUILT_IN_SYNC_FETCH_AND_NAND_2:
+ case BUILT_IN_SYNC_ADD_AND_FETCH_2:
+ case BUILT_IN_SYNC_SUB_AND_FETCH_2:
+ case BUILT_IN_SYNC_OR_AND_FETCH_2:
+ case BUILT_IN_SYNC_AND_AND_FETCH_2:
+ case BUILT_IN_SYNC_XOR_AND_FETCH_2:
+ case BUILT_IN_SYNC_NAND_AND_FETCH_2:
+ case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_2:
+ case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_2:
+ case BUILT_IN_SYNC_LOCK_TEST_AND_SET_2:
+ case BUILT_IN_SYNC_LOCK_RELEASE_2:
+ case BUILT_IN_ATOMIC_EXCHANGE_2:
+ case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_2:
+ case BUILT_IN_ATOMIC_STORE_2:
+ case BUILT_IN_ATOMIC_ADD_FETCH_2:
+ case BUILT_IN_ATOMIC_SUB_FETCH_2:
+ case BUILT_IN_ATOMIC_AND_FETCH_2:
+ case BUILT_IN_ATOMIC_NAND_FETCH_2:
+ case BUILT_IN_ATOMIC_XOR_FETCH_2:
+ case BUILT_IN_ATOMIC_OR_FETCH_2:
+ case BUILT_IN_ATOMIC_FETCH_ADD_2:
+ case BUILT_IN_ATOMIC_FETCH_SUB_2:
+ case BUILT_IN_ATOMIC_FETCH_AND_2:
+ case BUILT_IN_ATOMIC_FETCH_NAND_2:
+ case BUILT_IN_ATOMIC_FETCH_XOR_2:
case BUILT_IN_ATOMIC_FETCH_OR_2:
+ access_size = 2;
+ goto do_atomic;
+
+ case BUILT_IN_ATOMIC_LOAD_4:
+ is_store = false;
+ /* FALLTHRU */
+ case BUILT_IN_SYNC_FETCH_AND_ADD_4:
+ case BUILT_IN_SYNC_FETCH_AND_SUB_4:
+ case BUILT_IN_SYNC_FETCH_AND_OR_4:
+ case BUILT_IN_SYNC_FETCH_AND_AND_4:
+ case BUILT_IN_SYNC_FETCH_AND_XOR_4:
+ case BUILT_IN_SYNC_FETCH_AND_NAND_4:
+ case BUILT_IN_SYNC_ADD_AND_FETCH_4:
+ case BUILT_IN_SYNC_SUB_AND_FETCH_4:
+ case BUILT_IN_SYNC_OR_AND_FETCH_4:
+ case BUILT_IN_SYNC_AND_AND_FETCH_4:
+ case BUILT_IN_SYNC_XOR_AND_FETCH_4:
+ case BUILT_IN_SYNC_NAND_AND_FETCH_4:
+ case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_4:
+ case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_4:
+ case BUILT_IN_SYNC_LOCK_TEST_AND_SET_4:
+ case BUILT_IN_SYNC_LOCK_RELEASE_4:
+ case BUILT_IN_ATOMIC_EXCHANGE_4:
+ case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_4:
+ case BUILT_IN_ATOMIC_STORE_4:
+ case BUILT_IN_ATOMIC_ADD_FETCH_4:
+ case BUILT_IN_ATOMIC_SUB_FETCH_4:
+ case BUILT_IN_ATOMIC_AND_FETCH_4:
+ case BUILT_IN_ATOMIC_NAND_FETCH_4:
+ case BUILT_IN_ATOMIC_XOR_FETCH_4:
+ case BUILT_IN_ATOMIC_OR_FETCH_4:
+ case BUILT_IN_ATOMIC_FETCH_ADD_4:
+ case BUILT_IN_ATOMIC_FETCH_SUB_4:
+ case BUILT_IN_ATOMIC_FETCH_AND_4:
+ case BUILT_IN_ATOMIC_FETCH_NAND_4:
+ case BUILT_IN_ATOMIC_FETCH_XOR_4:
case BUILT_IN_ATOMIC_FETCH_OR_4:
+ access_size = 4;
+ goto do_atomic;
+
+ case BUILT_IN_ATOMIC_LOAD_8:
+ is_store = false;
+ /* FALLTHRU */
+ case BUILT_IN_SYNC_FETCH_AND_ADD_8:
+ case BUILT_IN_SYNC_FETCH_AND_SUB_8:
+ case BUILT_IN_SYNC_FETCH_AND_OR_8:
+ case BUILT_IN_SYNC_FETCH_AND_AND_8:
+ case BUILT_IN_SYNC_FETCH_AND_XOR_8:
+ case BUILT_IN_SYNC_FETCH_AND_NAND_8:
+ case BUILT_IN_SYNC_ADD_AND_FETCH_8:
+ case BUILT_IN_SYNC_SUB_AND_FETCH_8:
+ case BUILT_IN_SYNC_OR_AND_FETCH_8:
+ case BUILT_IN_SYNC_AND_AND_FETCH_8:
+ case BUILT_IN_SYNC_XOR_AND_FETCH_8:
+ case BUILT_IN_SYNC_NAND_AND_FETCH_8:
+ case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_8:
+ case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_8:
+ case BUILT_IN_SYNC_LOCK_TEST_AND_SET_8:
+ case BUILT_IN_SYNC_LOCK_RELEASE_8:
+ case BUILT_IN_ATOMIC_EXCHANGE_8:
+ case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_8:
+ case BUILT_IN_ATOMIC_STORE_8:
+ case BUILT_IN_ATOMIC_ADD_FETCH_8:
+ case BUILT_IN_ATOMIC_SUB_FETCH_8:
+ case BUILT_IN_ATOMIC_AND_FETCH_8:
+ case BUILT_IN_ATOMIC_NAND_FETCH_8:
+ case BUILT_IN_ATOMIC_XOR_FETCH_8:
+ case BUILT_IN_ATOMIC_OR_FETCH_8:
+ case BUILT_IN_ATOMIC_FETCH_ADD_8:
+ case BUILT_IN_ATOMIC_FETCH_SUB_8:
+ case BUILT_IN_ATOMIC_FETCH_AND_8:
+ case BUILT_IN_ATOMIC_FETCH_NAND_8:
+ case BUILT_IN_ATOMIC_FETCH_XOR_8:
case BUILT_IN_ATOMIC_FETCH_OR_8:
+ access_size = 8;
+ goto do_atomic;
+
+ case BUILT_IN_ATOMIC_LOAD_16:
+ is_store = false;
+ /* FALLTHRU */
+ case BUILT_IN_SYNC_FETCH_AND_ADD_16:
+ case BUILT_IN_SYNC_FETCH_AND_SUB_16:
+ case BUILT_IN_SYNC_FETCH_AND_OR_16:
+ case BUILT_IN_SYNC_FETCH_AND_AND_16:
+ case BUILT_IN_SYNC_FETCH_AND_XOR_16:
+ case BUILT_IN_SYNC_FETCH_AND_NAND_16:
+ case BUILT_IN_SYNC_ADD_AND_FETCH_16:
+ case BUILT_IN_SYNC_SUB_AND_FETCH_16:
+ case BUILT_IN_SYNC_OR_AND_FETCH_16:
+ case BUILT_IN_SYNC_AND_AND_FETCH_16:
+ case BUILT_IN_SYNC_XOR_AND_FETCH_16:
+ case BUILT_IN_SYNC_NAND_AND_FETCH_16:
+ case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_16:
+ case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_16:
+ case BUILT_IN_SYNC_LOCK_TEST_AND_SET_16:
+ case BUILT_IN_SYNC_LOCK_RELEASE_16:
+ case BUILT_IN_ATOMIC_EXCHANGE_16:
+ case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_16:
+ case BUILT_IN_ATOMIC_STORE_16:
+ case BUILT_IN_ATOMIC_ADD_FETCH_16:
+ case BUILT_IN_ATOMIC_SUB_FETCH_16:
+ case BUILT_IN_ATOMIC_AND_FETCH_16:
+ case BUILT_IN_ATOMIC_NAND_FETCH_16:
+ case BUILT_IN_ATOMIC_XOR_FETCH_16:
+ case BUILT_IN_ATOMIC_OR_FETCH_16:
+ case BUILT_IN_ATOMIC_FETCH_ADD_16:
+ case BUILT_IN_ATOMIC_FETCH_SUB_16:
+ case BUILT_IN_ATOMIC_FETCH_AND_16:
+ case BUILT_IN_ATOMIC_FETCH_NAND_16:
+ case BUILT_IN_ATOMIC_FETCH_XOR_16:
case BUILT_IN_ATOMIC_FETCH_OR_16:
+ access_size = 16;
+ /* FALLTHRU */
+ do_atomic:
{
dest = gimple_call_arg (call, 0);
/* DEST represents the address of a memory location.
instrument_derefs wants the memory location, so lets
dereference the address DEST before handing it to
instrument_derefs. */
- if (TREE_CODE (dest) == ADDR_EXPR)
- dest = TREE_OPERAND (dest, 0);
- else if (TREE_CODE (dest) == SSA_NAME || TREE_CODE (dest) == INTEGER_CST)
- dest = build2 (MEM_REF, TREE_TYPE (TREE_TYPE (dest)),
- dest, build_int_cst (TREE_TYPE (dest), 0));
- else
- gcc_unreachable ();
-
- access_size = int_size_in_bytes (TREE_TYPE (dest));
+ tree type = build_nonstandard_integer_type (access_size
+ * BITS_PER_UNIT, 1);
+ dest = build2 (MEM_REF, type, dest,
+ build_int_cst (build_pointer_type (char_type_node), 0));
+ break;
}
default:
--- gcc/testsuite/c-c++-common/asan/pr79944.c.jj 2017-03-08 12:50:40.428617273 +0100
+++ gcc/testsuite/c-c++-common/asan/pr79944.c 2017-03-08 12:34:47.000000000 +0100
@@ -0,0 +1,18 @@
+/* PR sanitizer/79944 */
+/* { dg-do run } */
+
+struct S { int i; char p[1024]; };
+
+int
+main ()
+{
+ struct S *p = (struct S *) __builtin_malloc (__builtin_offsetof (struct S, p) + 64);
+ p->i = 5;
+ asm volatile ("" : "+r" (p) : : "memory");
+ __atomic_fetch_add ((int *) p, 5, __ATOMIC_RELAXED);
+ asm volatile ("" : "+r" (p) : : "memory");
+ if (p->i != 10)
+ __builtin_abort ();
+ __builtin_free (p);
+ return 0;
+}

View File

@ -211,13 +211,12 @@
break;
case 'v':
verbose = true;
@@ -948,7 +1071,9 @@ Usage: nvptx-none-as [option...] [asmfile]\n\
@@ -948,7 +1071,8 @@ Usage: nvptx-none-as [option...] [asmfile]\n\
Options:\n\
-o FILE Write output to FILE\n\
-v Be verbose\n\
+ --verify Do verify output is acceptable to ptxas\n\
--no-verify Do not verify output is acceptable to ptxas\n\
+ --verify Do verify output is acceptable to ptxas\n\
--help Print this help and exit\n\
--version Print version number and exit\n\
\n\

View File

@ -1,3 +1,3 @@
SHA512 (gcc-7.0.1-20170225.tar.bz2) = de6413101567ede96422f49d007c4072314ace58bf09886949858c315df54e8480289967cdf70636a4469a21705c6cddced9ff85c950eef6fd9e6fcad2533be0
SHA512 (gcc-7.0.1-20170308.tar.bz2) = 8fdadf218ac0d50107516cd9ea72ac03606b79db44f7a3d1fe665d1f1c51b7a54c5f37df3f6af9ab070de1b3990989745762ce4d3c2a4aa4d12bdfe29fe00dcd
SHA512 (nvptx-newlib-aadc8eb0ec43b7cd0dd2dfb484bae63c8b05ef24.tar.bz2) = 38f97c9297ad108568352a4d28277455a3c01fd8b7864e798037e5006b6f757022e874bbf3f165775fe3b873781bc108137bbeb42dd5ed3c7d3e6747746fa918
SHA512 (nvptx-tools-c28050f60193b3b95a18866a96f03334e874e78f.tar.bz2) = 95b577a06a93bb044dbc8033e550cb36bcf2ab2687da030a7318cdc90e7467ed49665e247dcafb5ff4a7e92cdc264291d19728bd17fab902fb64b22491269330