diff --git a/.gitignore b/.gitignore index ea7b0d5..b7dd852 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ /gcc-7.0.1-20170219.tar.bz2 /gcc-7.0.1-20170225.tar.bz2 /gcc-7.0.1-20170308.tar.bz2 +/gcc-7.0.1-20170309.tar.bz2 diff --git a/gcc.spec b/gcc.spec index 2413868..38a482c 100644 --- a/gcc.spec +++ b/gcc.spec @@ -4,7 +4,7 @@ %global gcc_major 7 # Note, gcc_release must be integer, if you want to add suffixes to # %{release}, append them after %{gcc_release} on Release: line. -%global gcc_release 0.11 +%global gcc_release 0.12 %global nvptx_tools_gitrev c28050f60193b3b95a18866a96f03334e874e78f %global nvptx_newlib_gitrev aadc8eb0ec43b7cd0dd2dfb484bae63c8b05ef24 %global _unpackaged_files_terminate_build 0 @@ -232,10 +232,9 @@ Patch8: gcc7-no-add-needed.patch Patch9: gcc7-aarch64-async-unw-tables.patch Patch10: gcc7-foffload-default.patch Patch11: gcc7-Wno-format-security.patch -Patch12: gcc7-pr79932-1.patch -Patch13: gcc7-pr79932-2.patch -Patch14: gcc7-pr79941.patch -Patch15: gcc7-pr79944.patch +Patch12: gcc7-pr79941.patch +Patch13: gcc7-pr79969.patch +Patch14: gcc7-pr79972.patch Patch1000: nvptx-tools-no-ptxas.patch Patch1001: nvptx-tools-build.patch @@ -824,10 +823,9 @@ package or when debugging this package. %patch9 -p0 -b .aarch64-async-unw-tables~ %patch10 -p0 -b .foffload-default~ %patch11 -p0 -b .Wno-format-security~ -%patch12 -p0 -b .pr79932-1~ -%patch13 -p0 -b .pr79932-2~ -%patch14 -p0 -b .pr79941~ -%patch15 -p0 -b .pr79944~ +%patch12 -p0 -b .pr79941~ +%patch13 -p0 -b .pr79969~ +%patch14 -p0 -b .pr79972~ cd nvptx-tools-%{nvptx_tools_gitrev} %patch1000 -p1 -b .nvptx-tools-no-ptxas~ @@ -3244,6 +3242,16 @@ fi %endif %changelog +* Thu Mar 9 2017 Jakub Jelinek 7.0.1-0.12 +- update from the trunk + - PRs c++/71966, c++/79672, c++/79797, c++/79900, ipa/79761, ipa/79764, + ipa/79970, middle-end/79971, rtl-optimization/79949, target/65705, + target/69804, target/79913, target/79928, tree-optimization/79631, + tree-optimization/79977 +- fix DW_AT_decl_line on DW_TAG_enumeration_type for C enumeration + definitions following forward declarations (#1423460, PR c/79969) +- fix ICE with -Walloca (PR tree-optimization/79972) + * Wed Mar 8 2017 Jakub Jelinek 7.0.1-0.11 - update from the trunk - PRs ada/79903, ada/79945, c++/42000, c++/64574, c++/70266, c++/71568, diff --git a/gcc7-pr79932-1.patch b/gcc7-pr79932-1.patch deleted file mode 100644 index 2d822f7..0000000 --- a/gcc7-pr79932-1.patch +++ /dev/null @@ -1,181 +0,0 @@ -2017-03-07 Jakub Jelinek - - PR target/79932 - * config/i386/avx512bwintrin.h (_mm512_packs_epi32, - _mm512_maskz_packs_epi32, _mm512_mask_packs_epi32, - _mm512_packus_epi32, _mm512_maskz_packus_epi32, - _mm512_mask_packus_epi32): Move definitions outside of __OPTIMIZE__ - guarded section. - - * gcc.target/i386/pr79932-1.c: New test. - ---- gcc/config/i386/avx512bwintrin.h.jj 2017-01-23 18:09:48.000000000 +0100 -+++ gcc/config/i386/avx512bwintrin.h 2017-03-07 07:47:28.900049849 +0100 -@@ -2656,6 +2656,72 @@ _mm512_cmple_epi16_mask (__m512i __X, __ - (__mmask32) -1); - } - -+extern __inline __m512i -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm512_packs_epi32 (__m512i __A, __m512i __B) -+{ -+ return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A, -+ (__v16si) __B, -+ (__v32hi) -+ _mm512_setzero_si512 (), -+ (__mmask32) -1); -+} -+ -+extern __inline __m512i -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm512_maskz_packs_epi32 (__mmask32 __M, __m512i __A, __m512i __B) -+{ -+ return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A, -+ (__v16si) __B, -+ (__v32hi) -+ _mm512_setzero_si512 (), -+ __M); -+} -+ -+extern __inline __m512i -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm512_mask_packs_epi32 (__m512i __W, __mmask32 __M, __m512i __A, -+ __m512i __B) -+{ -+ return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A, -+ (__v16si) __B, -+ (__v32hi) __W, -+ __M); -+} -+ -+extern __inline __m512i -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm512_packus_epi32 (__m512i __A, __m512i __B) -+{ -+ return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A, -+ (__v16si) __B, -+ (__v32hi) -+ _mm512_setzero_si512 (), -+ (__mmask32) -1); -+} -+ -+extern __inline __m512i -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm512_maskz_packus_epi32 (__mmask32 __M, __m512i __A, __m512i __B) -+{ -+ return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A, -+ (__v16si) __B, -+ (__v32hi) -+ _mm512_setzero_si512 (), -+ __M); -+} -+ -+extern __inline __m512i -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm512_mask_packus_epi32 (__m512i __W, __mmask32 __M, __m512i __A, -+ __m512i __B) -+{ -+ return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A, -+ (__v16si) __B, -+ (__v32hi) __W, -+ __M); -+} -+ - #ifdef __OPTIMIZE__ - extern __inline __mmask32 - __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -@@ -3012,72 +3078,6 @@ _mm512_cmp_epu8_mask (__m512i __X, __m51 - } - - extern __inline __m512i --__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm512_packs_epi32 (__m512i __A, __m512i __B) --{ -- return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A, -- (__v16si) __B, -- (__v32hi) -- _mm512_setzero_si512 (), -- (__mmask32) -1); --} -- --extern __inline __m512i --__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm512_maskz_packs_epi32 (__mmask32 __M, __m512i __A, __m512i __B) --{ -- return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A, -- (__v16si) __B, -- (__v32hi) -- _mm512_setzero_si512 (), -- __M); --} -- --extern __inline __m512i --__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm512_mask_packs_epi32 (__m512i __W, __mmask32 __M, __m512i __A, -- __m512i __B) --{ -- return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A, -- (__v16si) __B, -- (__v32hi) __W, -- __M); --} -- --extern __inline __m512i --__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm512_packus_epi32 (__m512i __A, __m512i __B) --{ -- return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A, -- (__v16si) __B, -- (__v32hi) -- _mm512_setzero_si512 (), -- (__mmask32) -1); --} -- --extern __inline __m512i --__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm512_maskz_packus_epi32 (__mmask32 __M, __m512i __A, __m512i __B) --{ -- return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A, -- (__v16si) __B, -- (__v32hi) -- _mm512_setzero_si512 (), -- __M); --} -- --extern __inline __m512i --__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm512_mask_packus_epi32 (__m512i __W, __mmask32 __M, __m512i __A, -- __m512i __B) --{ -- return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A, -- (__v16si) __B, -- (__v32hi) __W, -- __M); --} -- --extern __inline __m512i - __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) - _mm512_bslli_epi128 (__m512i __A, const int __N) - { ---- gcc/testsuite/gcc.target/i386/pr79932-1.c.jj 2017-03-07 07:54:45.181339418 +0100 -+++ gcc/testsuite/gcc.target/i386/pr79932-1.c 2017-03-07 07:54:24.000000000 +0100 -@@ -0,0 +1,19 @@ -+/* PR target/79932 */ -+/* { dg-do compile } */ -+/* { dg-options "-O0 -mavx512bw" } */ -+ -+#include -+ -+__m512i a, b, c, d, e, f, g, h, i; -+__mmask32 m; -+ -+void -+foo (void) -+{ -+ d = _mm512_packs_epi32 (a, b); -+ e = _mm512_maskz_packs_epi32 (m, a, b); -+ f = _mm512_mask_packs_epi32 (c, m, a, b); -+ g = _mm512_packus_epi32 (a, b); -+ h = _mm512_maskz_packus_epi32 (m, a, b); -+ i = _mm512_mask_packus_epi32 (c, m, a, b); -+} diff --git a/gcc7-pr79932-2.patch b/gcc7-pr79932-2.patch deleted file mode 100644 index 93d3b7b..0000000 --- a/gcc7-pr79932-2.patch +++ /dev/null @@ -1,1286 +0,0 @@ -2017-03-07 Jakub Jelinek - - PR target/79932 - * config/i386/avx512vlintrin.h (_mm256_cmpge_epi32_mask, - _mm256_cmpge_epi64_mask, _mm256_cmpge_epu32_mask, - _mm256_cmpge_epu64_mask, _mm256_cmple_epi32_mask, - _mm256_cmple_epi64_mask, _mm256_cmple_epu32_mask, - _mm256_cmple_epu64_mask, _mm256_cmplt_epi32_mask, - _mm256_cmplt_epi64_mask, _mm256_cmplt_epu32_mask, - _mm256_cmplt_epu64_mask, _mm256_cmpneq_epi32_mask, - _mm256_cmpneq_epi64_mask, _mm256_cmpneq_epu32_mask, - _mm256_cmpneq_epu64_mask, _mm256_mask_cmpge_epi32_mask, - _mm256_mask_cmpge_epi64_mask, _mm256_mask_cmpge_epu32_mask, - _mm256_mask_cmpge_epu64_mask, _mm256_mask_cmple_epi32_mask, - _mm256_mask_cmple_epi64_mask, _mm256_mask_cmple_epu32_mask, - _mm256_mask_cmple_epu64_mask, _mm256_mask_cmplt_epi32_mask, - _mm256_mask_cmplt_epi64_mask, _mm256_mask_cmplt_epu32_mask, - _mm256_mask_cmplt_epu64_mask, _mm256_mask_cmpneq_epi32_mask, - _mm256_mask_cmpneq_epi64_mask, _mm256_mask_cmpneq_epu32_mask, - _mm256_mask_cmpneq_epu64_mask, _mm_cmpge_epi32_mask, - _mm_cmpge_epi64_mask, _mm_cmpge_epu32_mask, _mm_cmpge_epu64_mask, - _mm_cmple_epi32_mask, _mm_cmple_epi64_mask, _mm_cmple_epu32_mask, - _mm_cmple_epu64_mask, _mm_cmplt_epi32_mask, _mm_cmplt_epi64_mask, - _mm_cmplt_epu32_mask, _mm_cmplt_epu64_mask, _mm_cmpneq_epi32_mask, - _mm_cmpneq_epi64_mask, _mm_cmpneq_epu32_mask, _mm_cmpneq_epu64_mask, - _mm_mask_cmpge_epi32_mask, _mm_mask_cmpge_epi64_mask, - _mm_mask_cmpge_epu32_mask, _mm_mask_cmpge_epu64_mask, - _mm_mask_cmple_epi32_mask, _mm_mask_cmple_epi64_mask, - _mm_mask_cmple_epu32_mask, _mm_mask_cmple_epu64_mask, - _mm_mask_cmplt_epi32_mask, _mm_mask_cmplt_epi64_mask, - _mm_mask_cmplt_epu32_mask, _mm_mask_cmplt_epu64_mask, - _mm_mask_cmpneq_epi32_mask, _mm_mask_cmpneq_epi64_mask, - _mm_mask_cmpneq_epu32_mask, _mm_mask_cmpneq_epu64_mask): Move - definitions outside of __OPTIMIZE__ guarded section. - - * gcc.target/i386/pr79932-2.c: New test. - ---- gcc/config/i386/avx512vlintrin.h.jj 2017-01-17 18:40:59.000000000 +0100 -+++ gcc/config/i386/avx512vlintrin.h 2017-03-07 08:27:31.071641043 +0100 -@@ -9172,6 +9172,582 @@ _mm256_mask_permutexvar_epi32 (__m256i _ - __M); - } - -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_mask_cmpneq_epu32_mask (__mmask8 __M, __m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, -+ (__v8si) __Y, 4, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_cmpneq_epu32_mask (__m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, -+ (__v8si) __Y, 4, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_mask_cmplt_epu32_mask (__mmask8 __M, __m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, -+ (__v8si) __Y, 1, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_cmplt_epu32_mask (__m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, -+ (__v8si) __Y, 1, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_mask_cmpge_epu32_mask (__mmask8 __M, __m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, -+ (__v8si) __Y, 5, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_cmpge_epu32_mask (__m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, -+ (__v8si) __Y, 5, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_mask_cmple_epu32_mask (__mmask8 __M, __m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, -+ (__v8si) __Y, 2, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_cmple_epu32_mask (__m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, -+ (__v8si) __Y, 2, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_mask_cmpneq_epu64_mask (__mmask8 __M, __m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, -+ (__v4di) __Y, 4, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_cmpneq_epu64_mask (__m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, -+ (__v4di) __Y, 4, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_mask_cmplt_epu64_mask (__mmask8 __M, __m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, -+ (__v4di) __Y, 1, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_cmplt_epu64_mask (__m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, -+ (__v4di) __Y, 1, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_mask_cmpge_epu64_mask (__mmask8 __M, __m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, -+ (__v4di) __Y, 5, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_cmpge_epu64_mask (__m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, -+ (__v4di) __Y, 5, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_mask_cmple_epu64_mask (__mmask8 __M, __m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, -+ (__v4di) __Y, 2, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_cmple_epu64_mask (__m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, -+ (__v4di) __Y, 2, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_mask_cmpneq_epi32_mask (__mmask8 __M, __m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, -+ (__v8si) __Y, 4, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_cmpneq_epi32_mask (__m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, -+ (__v8si) __Y, 4, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_mask_cmplt_epi32_mask (__mmask8 __M, __m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, -+ (__v8si) __Y, 1, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_cmplt_epi32_mask (__m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, -+ (__v8si) __Y, 1, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_mask_cmpge_epi32_mask (__mmask8 __M, __m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, -+ (__v8si) __Y, 5, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_cmpge_epi32_mask (__m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, -+ (__v8si) __Y, 5, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_mask_cmple_epi32_mask (__mmask8 __M, __m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, -+ (__v8si) __Y, 2, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_cmple_epi32_mask (__m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, -+ (__v8si) __Y, 2, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_mask_cmpneq_epi64_mask (__mmask8 __M, __m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, -+ (__v4di) __Y, 4, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_cmpneq_epi64_mask (__m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, -+ (__v4di) __Y, 4, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_mask_cmplt_epi64_mask (__mmask8 __M, __m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, -+ (__v4di) __Y, 1, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_cmplt_epi64_mask (__m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, -+ (__v4di) __Y, 1, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_mask_cmpge_epi64_mask (__mmask8 __M, __m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, -+ (__v4di) __Y, 5, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_cmpge_epi64_mask (__m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, -+ (__v4di) __Y, 5, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_mask_cmple_epi64_mask (__mmask8 __M, __m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, -+ (__v4di) __Y, 2, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm256_cmple_epi64_mask (__m256i __X, __m256i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, -+ (__v4di) __Y, 2, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_mask_cmpneq_epu32_mask (__mmask8 __M, __m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, -+ (__v4si) __Y, 4, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_cmpneq_epu32_mask (__m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, -+ (__v4si) __Y, 4, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_mask_cmplt_epu32_mask (__mmask8 __M, __m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, -+ (__v4si) __Y, 1, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_cmplt_epu32_mask (__m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, -+ (__v4si) __Y, 1, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_mask_cmpge_epu32_mask (__mmask8 __M, __m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, -+ (__v4si) __Y, 5, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_cmpge_epu32_mask (__m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, -+ (__v4si) __Y, 5, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_mask_cmple_epu32_mask (__mmask8 __M, __m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, -+ (__v4si) __Y, 2, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_cmple_epu32_mask (__m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, -+ (__v4si) __Y, 2, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_mask_cmpneq_epu64_mask (__mmask8 __M, __m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, -+ (__v2di) __Y, 4, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_cmpneq_epu64_mask (__m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, -+ (__v2di) __Y, 4, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_mask_cmplt_epu64_mask (__mmask8 __M, __m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, -+ (__v2di) __Y, 1, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_cmplt_epu64_mask (__m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, -+ (__v2di) __Y, 1, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_mask_cmpge_epu64_mask (__mmask8 __M, __m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, -+ (__v2di) __Y, 5, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_cmpge_epu64_mask (__m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, -+ (__v2di) __Y, 5, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_mask_cmple_epu64_mask (__mmask8 __M, __m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, -+ (__v2di) __Y, 2, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_cmple_epu64_mask (__m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, -+ (__v2di) __Y, 2, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_mask_cmpneq_epi32_mask (__mmask8 __M, __m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, -+ (__v4si) __Y, 4, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_cmpneq_epi32_mask (__m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, -+ (__v4si) __Y, 4, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_mask_cmplt_epi32_mask (__mmask8 __M, __m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, -+ (__v4si) __Y, 1, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_cmplt_epi32_mask (__m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, -+ (__v4si) __Y, 1, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_mask_cmpge_epi32_mask (__mmask8 __M, __m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, -+ (__v4si) __Y, 5, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_cmpge_epi32_mask (__m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, -+ (__v4si) __Y, 5, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_mask_cmple_epi32_mask (__mmask8 __M, __m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, -+ (__v4si) __Y, 2, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_cmple_epi32_mask (__m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, -+ (__v4si) __Y, 2, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_mask_cmpneq_epi64_mask (__mmask8 __M, __m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, -+ (__v2di) __Y, 4, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_cmpneq_epi64_mask (__m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, -+ (__v2di) __Y, 4, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_mask_cmplt_epi64_mask (__mmask8 __M, __m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, -+ (__v2di) __Y, 1, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_cmplt_epi64_mask (__m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, -+ (__v2di) __Y, 1, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_mask_cmpge_epi64_mask (__mmask8 __M, __m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, -+ (__v2di) __Y, 5, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_cmpge_epi64_mask (__m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, -+ (__v2di) __Y, 5, -+ (__mmask8) -1); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_mask_cmple_epi64_mask (__mmask8 __M, __m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, -+ (__v2di) __Y, 2, -+ (__mmask8) __M); -+} -+ -+extern __inline __mmask8 -+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+_mm_cmple_epi64_mask (__m128i __X, __m128i __Y) -+{ -+ return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, -+ (__v2di) __Y, 2, -+ (__mmask8) -1); -+} -+ - #ifdef __OPTIMIZE__ - extern __inline __m256i - __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -@@ -11784,582 +12360,6 @@ _mm256_permutex_pd (__m256d __X, const i - (__mmask8) -1); - } - --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_mask_cmpneq_epu32_mask (__mmask8 __M, __m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, -- (__v8si) __Y, 4, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_cmpneq_epu32_mask (__m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, -- (__v8si) __Y, 4, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_mask_cmplt_epu32_mask (__mmask8 __M, __m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, -- (__v8si) __Y, 1, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_cmplt_epu32_mask (__m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, -- (__v8si) __Y, 1, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_mask_cmpge_epu32_mask (__mmask8 __M, __m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, -- (__v8si) __Y, 5, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_cmpge_epu32_mask (__m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, -- (__v8si) __Y, 5, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_mask_cmple_epu32_mask (__mmask8 __M, __m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, -- (__v8si) __Y, 2, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_cmple_epu32_mask (__m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, -- (__v8si) __Y, 2, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_mask_cmpneq_epu64_mask (__mmask8 __M, __m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, -- (__v4di) __Y, 4, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_cmpneq_epu64_mask (__m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, -- (__v4di) __Y, 4, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_mask_cmplt_epu64_mask (__mmask8 __M, __m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, -- (__v4di) __Y, 1, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_cmplt_epu64_mask (__m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, -- (__v4di) __Y, 1, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_mask_cmpge_epu64_mask (__mmask8 __M, __m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, -- (__v4di) __Y, 5, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_cmpge_epu64_mask (__m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, -- (__v4di) __Y, 5, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_mask_cmple_epu64_mask (__mmask8 __M, __m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, -- (__v4di) __Y, 2, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_cmple_epu64_mask (__m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, -- (__v4di) __Y, 2, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_mask_cmpneq_epi32_mask (__mmask8 __M, __m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, -- (__v8si) __Y, 4, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_cmpneq_epi32_mask (__m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, -- (__v8si) __Y, 4, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_mask_cmplt_epi32_mask (__mmask8 __M, __m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, -- (__v8si) __Y, 1, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_cmplt_epi32_mask (__m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, -- (__v8si) __Y, 1, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_mask_cmpge_epi32_mask (__mmask8 __M, __m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, -- (__v8si) __Y, 5, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_cmpge_epi32_mask (__m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, -- (__v8si) __Y, 5, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_mask_cmple_epi32_mask (__mmask8 __M, __m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, -- (__v8si) __Y, 2, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_cmple_epi32_mask (__m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, -- (__v8si) __Y, 2, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_mask_cmpneq_epi64_mask (__mmask8 __M, __m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, -- (__v4di) __Y, 4, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_cmpneq_epi64_mask (__m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, -- (__v4di) __Y, 4, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_mask_cmplt_epi64_mask (__mmask8 __M, __m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, -- (__v4di) __Y, 1, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_cmplt_epi64_mask (__m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, -- (__v4di) __Y, 1, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_mask_cmpge_epi64_mask (__mmask8 __M, __m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, -- (__v4di) __Y, 5, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_cmpge_epi64_mask (__m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, -- (__v4di) __Y, 5, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_mask_cmple_epi64_mask (__mmask8 __M, __m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, -- (__v4di) __Y, 2, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm256_cmple_epi64_mask (__m256i __X, __m256i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, -- (__v4di) __Y, 2, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_mask_cmpneq_epu32_mask (__mmask8 __M, __m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, -- (__v4si) __Y, 4, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_cmpneq_epu32_mask (__m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, -- (__v4si) __Y, 4, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_mask_cmplt_epu32_mask (__mmask8 __M, __m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, -- (__v4si) __Y, 1, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_cmplt_epu32_mask (__m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, -- (__v4si) __Y, 1, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_mask_cmpge_epu32_mask (__mmask8 __M, __m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, -- (__v4si) __Y, 5, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_cmpge_epu32_mask (__m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, -- (__v4si) __Y, 5, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_mask_cmple_epu32_mask (__mmask8 __M, __m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, -- (__v4si) __Y, 2, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_cmple_epu32_mask (__m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, -- (__v4si) __Y, 2, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_mask_cmpneq_epu64_mask (__mmask8 __M, __m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, -- (__v2di) __Y, 4, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_cmpneq_epu64_mask (__m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, -- (__v2di) __Y, 4, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_mask_cmplt_epu64_mask (__mmask8 __M, __m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, -- (__v2di) __Y, 1, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_cmplt_epu64_mask (__m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, -- (__v2di) __Y, 1, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_mask_cmpge_epu64_mask (__mmask8 __M, __m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, -- (__v2di) __Y, 5, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_cmpge_epu64_mask (__m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, -- (__v2di) __Y, 5, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_mask_cmple_epu64_mask (__mmask8 __M, __m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, -- (__v2di) __Y, 2, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_cmple_epu64_mask (__m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, -- (__v2di) __Y, 2, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_mask_cmpneq_epi32_mask (__mmask8 __M, __m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, -- (__v4si) __Y, 4, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_cmpneq_epi32_mask (__m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, -- (__v4si) __Y, 4, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_mask_cmplt_epi32_mask (__mmask8 __M, __m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, -- (__v4si) __Y, 1, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_cmplt_epi32_mask (__m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, -- (__v4si) __Y, 1, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_mask_cmpge_epi32_mask (__mmask8 __M, __m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, -- (__v4si) __Y, 5, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_cmpge_epi32_mask (__m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, -- (__v4si) __Y, 5, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_mask_cmple_epi32_mask (__mmask8 __M, __m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, -- (__v4si) __Y, 2, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_cmple_epi32_mask (__m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, -- (__v4si) __Y, 2, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_mask_cmpneq_epi64_mask (__mmask8 __M, __m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, -- (__v2di) __Y, 4, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_cmpneq_epi64_mask (__m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, -- (__v2di) __Y, 4, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_mask_cmplt_epi64_mask (__mmask8 __M, __m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, -- (__v2di) __Y, 1, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_cmplt_epi64_mask (__m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, -- (__v2di) __Y, 1, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_mask_cmpge_epi64_mask (__mmask8 __M, __m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, -- (__v2di) __Y, 5, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_cmpge_epi64_mask (__m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, -- (__v2di) __Y, 5, -- (__mmask8) -1); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_mask_cmple_epi64_mask (__mmask8 __M, __m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, -- (__v2di) __Y, 2, -- (__mmask8) __M); --} -- --extern __inline __mmask8 -- __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) --_mm_cmple_epi64_mask (__m128i __X, __m128i __Y) --{ -- return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, -- (__v2di) __Y, 2, -- (__mmask8) -1); --} -- - #else - #define _mm256_permutex_pd(X, M) \ - ((__m256d) __builtin_ia32_permdf256_mask ((__v4df)(__m256d)(X), (int)(M), \ ---- gcc/testsuite/gcc.target/i386/pr79932-2.c.jj 2017-03-07 08:37:10.835990732 +0100 -+++ gcc/testsuite/gcc.target/i386/pr79932-2.c 2017-03-07 08:36:13.000000000 +0100 -@@ -0,0 +1,78 @@ -+/* PR target/79932 */ -+/* { dg-do compile } */ -+/* { dg-options "-O0 -mavx512vl" } */ -+ -+#include -+ -+__m256i a, b; -+__m128i c, d; -+__mmask32 e, f[64]; -+ -+void -+foo (void) -+{ -+ f[0] = _mm256_cmpge_epi32_mask (a, b); -+ f[1] = _mm256_cmpge_epi64_mask (a, b); -+ f[2] = _mm256_cmpge_epu32_mask (a, b); -+ f[3] = _mm256_cmpge_epu64_mask (a, b); -+ f[4] = _mm256_cmple_epi32_mask (a, b); -+ f[5] = _mm256_cmple_epi64_mask (a, b); -+ f[6] = _mm256_cmple_epu32_mask (a, b); -+ f[7] = _mm256_cmple_epu64_mask (a, b); -+ f[8] = _mm256_cmplt_epi32_mask (a, b); -+ f[9] = _mm256_cmplt_epi64_mask (a, b); -+ f[10] = _mm256_cmplt_epu32_mask (a, b); -+ f[11] = _mm256_cmplt_epu64_mask (a, b); -+ f[12] = _mm256_cmpneq_epi32_mask (a, b); -+ f[13] = _mm256_cmpneq_epi64_mask (a, b); -+ f[14] = _mm256_cmpneq_epu32_mask (a, b); -+ f[15] = _mm256_cmpneq_epu64_mask (a, b); -+ f[16] = _mm256_mask_cmpge_epi32_mask (e, a, b); -+ f[17] = _mm256_mask_cmpge_epi64_mask (e, a, b); -+ f[18] = _mm256_mask_cmpge_epu32_mask (e, a, b); -+ f[19] = _mm256_mask_cmpge_epu64_mask (e, a, b); -+ f[20] = _mm256_mask_cmple_epi32_mask (e, a, b); -+ f[21] = _mm256_mask_cmple_epi64_mask (e, a, b); -+ f[22] = _mm256_mask_cmple_epu32_mask (e, a, b); -+ f[23] = _mm256_mask_cmple_epu64_mask (e, a, b); -+ f[24] = _mm256_mask_cmplt_epi32_mask (e, a, b); -+ f[25] = _mm256_mask_cmplt_epi64_mask (e, a, b); -+ f[26] = _mm256_mask_cmplt_epu32_mask (e, a, b); -+ f[27] = _mm256_mask_cmplt_epu64_mask (e, a, b); -+ f[28] = _mm256_mask_cmpneq_epi32_mask (e, a, b); -+ f[29] = _mm256_mask_cmpneq_epi64_mask (e, a, b); -+ f[30] = _mm256_mask_cmpneq_epu32_mask (e, a, b); -+ f[31] = _mm256_mask_cmpneq_epu64_mask (e, a, b); -+ f[32] = _mm_cmpge_epi32_mask (c, d); -+ f[33] = _mm_cmpge_epi64_mask (c, d); -+ f[34] = _mm_cmpge_epu32_mask (c, d); -+ f[35] = _mm_cmpge_epu64_mask (c, d); -+ f[36] = _mm_cmple_epi32_mask (c, d); -+ f[37] = _mm_cmple_epi64_mask (c, d); -+ f[38] = _mm_cmple_epu32_mask (c, d); -+ f[39] = _mm_cmple_epu64_mask (c, d); -+ f[40] = _mm_cmplt_epi32_mask (c, d); -+ f[41] = _mm_cmplt_epi64_mask (c, d); -+ f[42] = _mm_cmplt_epu32_mask (c, d); -+ f[43] = _mm_cmplt_epu64_mask (c, d); -+ f[44] = _mm_cmpneq_epi32_mask (c, d); -+ f[45] = _mm_cmpneq_epi64_mask (c, d); -+ f[46] = _mm_cmpneq_epu32_mask (c, d); -+ f[47] = _mm_cmpneq_epu64_mask (c, d); -+ f[48] = _mm_mask_cmpge_epi32_mask (e, c, d); -+ f[49] = _mm_mask_cmpge_epi64_mask (e, c, d); -+ f[50] = _mm_mask_cmpge_epu32_mask (e, c, d); -+ f[51] = _mm_mask_cmpge_epu64_mask (e, c, d); -+ f[52] = _mm_mask_cmple_epi32_mask (e, c, d); -+ f[53] = _mm_mask_cmple_epi64_mask (e, c, d); -+ f[54] = _mm_mask_cmple_epu32_mask (e, c, d); -+ f[55] = _mm_mask_cmple_epu64_mask (e, c, d); -+ f[56] = _mm_mask_cmplt_epi32_mask (e, c, d); -+ f[57] = _mm_mask_cmplt_epi64_mask (e, c, d); -+ f[58] = _mm_mask_cmplt_epu32_mask (e, c, d); -+ f[59] = _mm_mask_cmplt_epu64_mask (e, c, d); -+ f[60] = _mm_mask_cmpneq_epi32_mask (e, c, d); -+ f[61] = _mm_mask_cmpneq_epi64_mask (e, c, d); -+ f[62] = _mm_mask_cmpneq_epu32_mask (e, c, d); -+ f[63] = _mm_mask_cmpneq_epu64_mask (e, c, d); -+} diff --git a/gcc7-pr79944.patch b/gcc7-pr79944.patch deleted file mode 100644 index ffcc829..0000000 --- a/gcc7-pr79944.patch +++ /dev/null @@ -1,406 +0,0 @@ -2017-03-08 Jakub Jelinek - - PR sanitizer/79944 - * asan.c (get_mem_refs_of_builtin_call): For BUILT_IN_ATOMIC* and - BUILT_IN_SYNC*, determine the access type from the size suffix and - always build a MEM_REF with that type. Handle forgotten - BUILT_IN_SYNC_FETCH_AND_NAND_16 and BUILT_IN_SYNC_NAND_AND_FETCH_16. - - * c-c++-common/asan/pr79944.c: New test. - ---- gcc/asan.c.jj 2017-03-06 12:32:28.000000000 +0100 -+++ gcc/asan.c 2017-03-08 12:24:11.151353229 +0100 -@@ -603,218 +603,208 @@ get_mem_refs_of_builtin_call (const gcal - case BUILT_IN_STRLEN: - source0 = gimple_call_arg (call, 0); - len = gimple_call_lhs (call); -- break ; -+ break; - - /* And now the __atomic* and __sync builtins. - These are handled differently from the classical memory memory - access builtins above. */ - - case BUILT_IN_ATOMIC_LOAD_1: -- case BUILT_IN_ATOMIC_LOAD_2: -- case BUILT_IN_ATOMIC_LOAD_4: -- case BUILT_IN_ATOMIC_LOAD_8: -- case BUILT_IN_ATOMIC_LOAD_16: - is_store = false; -- /* fall through. */ -- -+ /* FALLTHRU */ - case BUILT_IN_SYNC_FETCH_AND_ADD_1: -- case BUILT_IN_SYNC_FETCH_AND_ADD_2: -- case BUILT_IN_SYNC_FETCH_AND_ADD_4: -- case BUILT_IN_SYNC_FETCH_AND_ADD_8: -- case BUILT_IN_SYNC_FETCH_AND_ADD_16: -- - case BUILT_IN_SYNC_FETCH_AND_SUB_1: -- case BUILT_IN_SYNC_FETCH_AND_SUB_2: -- case BUILT_IN_SYNC_FETCH_AND_SUB_4: -- case BUILT_IN_SYNC_FETCH_AND_SUB_8: -- case BUILT_IN_SYNC_FETCH_AND_SUB_16: -- - case BUILT_IN_SYNC_FETCH_AND_OR_1: -- case BUILT_IN_SYNC_FETCH_AND_OR_2: -- case BUILT_IN_SYNC_FETCH_AND_OR_4: -- case BUILT_IN_SYNC_FETCH_AND_OR_8: -- case BUILT_IN_SYNC_FETCH_AND_OR_16: -- - case BUILT_IN_SYNC_FETCH_AND_AND_1: -- case BUILT_IN_SYNC_FETCH_AND_AND_2: -- case BUILT_IN_SYNC_FETCH_AND_AND_4: -- case BUILT_IN_SYNC_FETCH_AND_AND_8: -- case BUILT_IN_SYNC_FETCH_AND_AND_16: -- - case BUILT_IN_SYNC_FETCH_AND_XOR_1: -- case BUILT_IN_SYNC_FETCH_AND_XOR_2: -- case BUILT_IN_SYNC_FETCH_AND_XOR_4: -- case BUILT_IN_SYNC_FETCH_AND_XOR_8: -- case BUILT_IN_SYNC_FETCH_AND_XOR_16: -- - case BUILT_IN_SYNC_FETCH_AND_NAND_1: -- case BUILT_IN_SYNC_FETCH_AND_NAND_2: -- case BUILT_IN_SYNC_FETCH_AND_NAND_4: -- case BUILT_IN_SYNC_FETCH_AND_NAND_8: -- - case BUILT_IN_SYNC_ADD_AND_FETCH_1: -- case BUILT_IN_SYNC_ADD_AND_FETCH_2: -- case BUILT_IN_SYNC_ADD_AND_FETCH_4: -- case BUILT_IN_SYNC_ADD_AND_FETCH_8: -- case BUILT_IN_SYNC_ADD_AND_FETCH_16: -- - case BUILT_IN_SYNC_SUB_AND_FETCH_1: -- case BUILT_IN_SYNC_SUB_AND_FETCH_2: -- case BUILT_IN_SYNC_SUB_AND_FETCH_4: -- case BUILT_IN_SYNC_SUB_AND_FETCH_8: -- case BUILT_IN_SYNC_SUB_AND_FETCH_16: -- - case BUILT_IN_SYNC_OR_AND_FETCH_1: -- case BUILT_IN_SYNC_OR_AND_FETCH_2: -- case BUILT_IN_SYNC_OR_AND_FETCH_4: -- case BUILT_IN_SYNC_OR_AND_FETCH_8: -- case BUILT_IN_SYNC_OR_AND_FETCH_16: -- - case BUILT_IN_SYNC_AND_AND_FETCH_1: -- case BUILT_IN_SYNC_AND_AND_FETCH_2: -- case BUILT_IN_SYNC_AND_AND_FETCH_4: -- case BUILT_IN_SYNC_AND_AND_FETCH_8: -- case BUILT_IN_SYNC_AND_AND_FETCH_16: -- - case BUILT_IN_SYNC_XOR_AND_FETCH_1: -- case BUILT_IN_SYNC_XOR_AND_FETCH_2: -- case BUILT_IN_SYNC_XOR_AND_FETCH_4: -- case BUILT_IN_SYNC_XOR_AND_FETCH_8: -- case BUILT_IN_SYNC_XOR_AND_FETCH_16: -- - case BUILT_IN_SYNC_NAND_AND_FETCH_1: -- case BUILT_IN_SYNC_NAND_AND_FETCH_2: -- case BUILT_IN_SYNC_NAND_AND_FETCH_4: -- case BUILT_IN_SYNC_NAND_AND_FETCH_8: -- - case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1: -- case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_2: -- case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_4: -- case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_8: -- case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_16: -- - case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_1: -- case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_2: -- case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_4: -- case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_8: -- case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_16: -- - case BUILT_IN_SYNC_LOCK_TEST_AND_SET_1: -- case BUILT_IN_SYNC_LOCK_TEST_AND_SET_2: -- case BUILT_IN_SYNC_LOCK_TEST_AND_SET_4: -- case BUILT_IN_SYNC_LOCK_TEST_AND_SET_8: -- case BUILT_IN_SYNC_LOCK_TEST_AND_SET_16: -- - case BUILT_IN_SYNC_LOCK_RELEASE_1: -- case BUILT_IN_SYNC_LOCK_RELEASE_2: -- case BUILT_IN_SYNC_LOCK_RELEASE_4: -- case BUILT_IN_SYNC_LOCK_RELEASE_8: -- case BUILT_IN_SYNC_LOCK_RELEASE_16: -- - case BUILT_IN_ATOMIC_EXCHANGE_1: -- case BUILT_IN_ATOMIC_EXCHANGE_2: -- case BUILT_IN_ATOMIC_EXCHANGE_4: -- case BUILT_IN_ATOMIC_EXCHANGE_8: -- case BUILT_IN_ATOMIC_EXCHANGE_16: -- - case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1: -- case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_2: -- case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_4: -- case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_8: -- case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_16: -- - case BUILT_IN_ATOMIC_STORE_1: -- case BUILT_IN_ATOMIC_STORE_2: -- case BUILT_IN_ATOMIC_STORE_4: -- case BUILT_IN_ATOMIC_STORE_8: -- case BUILT_IN_ATOMIC_STORE_16: -- - case BUILT_IN_ATOMIC_ADD_FETCH_1: -- case BUILT_IN_ATOMIC_ADD_FETCH_2: -- case BUILT_IN_ATOMIC_ADD_FETCH_4: -- case BUILT_IN_ATOMIC_ADD_FETCH_8: -- case BUILT_IN_ATOMIC_ADD_FETCH_16: -- - case BUILT_IN_ATOMIC_SUB_FETCH_1: -- case BUILT_IN_ATOMIC_SUB_FETCH_2: -- case BUILT_IN_ATOMIC_SUB_FETCH_4: -- case BUILT_IN_ATOMIC_SUB_FETCH_8: -- case BUILT_IN_ATOMIC_SUB_FETCH_16: -- - case BUILT_IN_ATOMIC_AND_FETCH_1: -- case BUILT_IN_ATOMIC_AND_FETCH_2: -- case BUILT_IN_ATOMIC_AND_FETCH_4: -- case BUILT_IN_ATOMIC_AND_FETCH_8: -- case BUILT_IN_ATOMIC_AND_FETCH_16: -- - case BUILT_IN_ATOMIC_NAND_FETCH_1: -- case BUILT_IN_ATOMIC_NAND_FETCH_2: -- case BUILT_IN_ATOMIC_NAND_FETCH_4: -- case BUILT_IN_ATOMIC_NAND_FETCH_8: -- case BUILT_IN_ATOMIC_NAND_FETCH_16: -- - case BUILT_IN_ATOMIC_XOR_FETCH_1: -- case BUILT_IN_ATOMIC_XOR_FETCH_2: -- case BUILT_IN_ATOMIC_XOR_FETCH_4: -- case BUILT_IN_ATOMIC_XOR_FETCH_8: -- case BUILT_IN_ATOMIC_XOR_FETCH_16: -- - case BUILT_IN_ATOMIC_OR_FETCH_1: -- case BUILT_IN_ATOMIC_OR_FETCH_2: -- case BUILT_IN_ATOMIC_OR_FETCH_4: -- case BUILT_IN_ATOMIC_OR_FETCH_8: -- case BUILT_IN_ATOMIC_OR_FETCH_16: -- - case BUILT_IN_ATOMIC_FETCH_ADD_1: -- case BUILT_IN_ATOMIC_FETCH_ADD_2: -- case BUILT_IN_ATOMIC_FETCH_ADD_4: -- case BUILT_IN_ATOMIC_FETCH_ADD_8: -- case BUILT_IN_ATOMIC_FETCH_ADD_16: -- - case BUILT_IN_ATOMIC_FETCH_SUB_1: -- case BUILT_IN_ATOMIC_FETCH_SUB_2: -- case BUILT_IN_ATOMIC_FETCH_SUB_4: -- case BUILT_IN_ATOMIC_FETCH_SUB_8: -- case BUILT_IN_ATOMIC_FETCH_SUB_16: -- - case BUILT_IN_ATOMIC_FETCH_AND_1: -- case BUILT_IN_ATOMIC_FETCH_AND_2: -- case BUILT_IN_ATOMIC_FETCH_AND_4: -- case BUILT_IN_ATOMIC_FETCH_AND_8: -- case BUILT_IN_ATOMIC_FETCH_AND_16: -- - case BUILT_IN_ATOMIC_FETCH_NAND_1: -- case BUILT_IN_ATOMIC_FETCH_NAND_2: -- case BUILT_IN_ATOMIC_FETCH_NAND_4: -- case BUILT_IN_ATOMIC_FETCH_NAND_8: -- case BUILT_IN_ATOMIC_FETCH_NAND_16: -- - case BUILT_IN_ATOMIC_FETCH_XOR_1: -- case BUILT_IN_ATOMIC_FETCH_XOR_2: -- case BUILT_IN_ATOMIC_FETCH_XOR_4: -- case BUILT_IN_ATOMIC_FETCH_XOR_8: -- case BUILT_IN_ATOMIC_FETCH_XOR_16: -- - case BUILT_IN_ATOMIC_FETCH_OR_1: -+ access_size = 1; -+ goto do_atomic; -+ -+ case BUILT_IN_ATOMIC_LOAD_2: -+ is_store = false; -+ /* FALLTHRU */ -+ case BUILT_IN_SYNC_FETCH_AND_ADD_2: -+ case BUILT_IN_SYNC_FETCH_AND_SUB_2: -+ case BUILT_IN_SYNC_FETCH_AND_OR_2: -+ case BUILT_IN_SYNC_FETCH_AND_AND_2: -+ case BUILT_IN_SYNC_FETCH_AND_XOR_2: -+ case BUILT_IN_SYNC_FETCH_AND_NAND_2: -+ case BUILT_IN_SYNC_ADD_AND_FETCH_2: -+ case BUILT_IN_SYNC_SUB_AND_FETCH_2: -+ case BUILT_IN_SYNC_OR_AND_FETCH_2: -+ case BUILT_IN_SYNC_AND_AND_FETCH_2: -+ case BUILT_IN_SYNC_XOR_AND_FETCH_2: -+ case BUILT_IN_SYNC_NAND_AND_FETCH_2: -+ case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_2: -+ case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_2: -+ case BUILT_IN_SYNC_LOCK_TEST_AND_SET_2: -+ case BUILT_IN_SYNC_LOCK_RELEASE_2: -+ case BUILT_IN_ATOMIC_EXCHANGE_2: -+ case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_2: -+ case BUILT_IN_ATOMIC_STORE_2: -+ case BUILT_IN_ATOMIC_ADD_FETCH_2: -+ case BUILT_IN_ATOMIC_SUB_FETCH_2: -+ case BUILT_IN_ATOMIC_AND_FETCH_2: -+ case BUILT_IN_ATOMIC_NAND_FETCH_2: -+ case BUILT_IN_ATOMIC_XOR_FETCH_2: -+ case BUILT_IN_ATOMIC_OR_FETCH_2: -+ case BUILT_IN_ATOMIC_FETCH_ADD_2: -+ case BUILT_IN_ATOMIC_FETCH_SUB_2: -+ case BUILT_IN_ATOMIC_FETCH_AND_2: -+ case BUILT_IN_ATOMIC_FETCH_NAND_2: -+ case BUILT_IN_ATOMIC_FETCH_XOR_2: - case BUILT_IN_ATOMIC_FETCH_OR_2: -+ access_size = 2; -+ goto do_atomic; -+ -+ case BUILT_IN_ATOMIC_LOAD_4: -+ is_store = false; -+ /* FALLTHRU */ -+ case BUILT_IN_SYNC_FETCH_AND_ADD_4: -+ case BUILT_IN_SYNC_FETCH_AND_SUB_4: -+ case BUILT_IN_SYNC_FETCH_AND_OR_4: -+ case BUILT_IN_SYNC_FETCH_AND_AND_4: -+ case BUILT_IN_SYNC_FETCH_AND_XOR_4: -+ case BUILT_IN_SYNC_FETCH_AND_NAND_4: -+ case BUILT_IN_SYNC_ADD_AND_FETCH_4: -+ case BUILT_IN_SYNC_SUB_AND_FETCH_4: -+ case BUILT_IN_SYNC_OR_AND_FETCH_4: -+ case BUILT_IN_SYNC_AND_AND_FETCH_4: -+ case BUILT_IN_SYNC_XOR_AND_FETCH_4: -+ case BUILT_IN_SYNC_NAND_AND_FETCH_4: -+ case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_4: -+ case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_4: -+ case BUILT_IN_SYNC_LOCK_TEST_AND_SET_4: -+ case BUILT_IN_SYNC_LOCK_RELEASE_4: -+ case BUILT_IN_ATOMIC_EXCHANGE_4: -+ case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_4: -+ case BUILT_IN_ATOMIC_STORE_4: -+ case BUILT_IN_ATOMIC_ADD_FETCH_4: -+ case BUILT_IN_ATOMIC_SUB_FETCH_4: -+ case BUILT_IN_ATOMIC_AND_FETCH_4: -+ case BUILT_IN_ATOMIC_NAND_FETCH_4: -+ case BUILT_IN_ATOMIC_XOR_FETCH_4: -+ case BUILT_IN_ATOMIC_OR_FETCH_4: -+ case BUILT_IN_ATOMIC_FETCH_ADD_4: -+ case BUILT_IN_ATOMIC_FETCH_SUB_4: -+ case BUILT_IN_ATOMIC_FETCH_AND_4: -+ case BUILT_IN_ATOMIC_FETCH_NAND_4: -+ case BUILT_IN_ATOMIC_FETCH_XOR_4: - case BUILT_IN_ATOMIC_FETCH_OR_4: -+ access_size = 4; -+ goto do_atomic; -+ -+ case BUILT_IN_ATOMIC_LOAD_8: -+ is_store = false; -+ /* FALLTHRU */ -+ case BUILT_IN_SYNC_FETCH_AND_ADD_8: -+ case BUILT_IN_SYNC_FETCH_AND_SUB_8: -+ case BUILT_IN_SYNC_FETCH_AND_OR_8: -+ case BUILT_IN_SYNC_FETCH_AND_AND_8: -+ case BUILT_IN_SYNC_FETCH_AND_XOR_8: -+ case BUILT_IN_SYNC_FETCH_AND_NAND_8: -+ case BUILT_IN_SYNC_ADD_AND_FETCH_8: -+ case BUILT_IN_SYNC_SUB_AND_FETCH_8: -+ case BUILT_IN_SYNC_OR_AND_FETCH_8: -+ case BUILT_IN_SYNC_AND_AND_FETCH_8: -+ case BUILT_IN_SYNC_XOR_AND_FETCH_8: -+ case BUILT_IN_SYNC_NAND_AND_FETCH_8: -+ case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_8: -+ case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_8: -+ case BUILT_IN_SYNC_LOCK_TEST_AND_SET_8: -+ case BUILT_IN_SYNC_LOCK_RELEASE_8: -+ case BUILT_IN_ATOMIC_EXCHANGE_8: -+ case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_8: -+ case BUILT_IN_ATOMIC_STORE_8: -+ case BUILT_IN_ATOMIC_ADD_FETCH_8: -+ case BUILT_IN_ATOMIC_SUB_FETCH_8: -+ case BUILT_IN_ATOMIC_AND_FETCH_8: -+ case BUILT_IN_ATOMIC_NAND_FETCH_8: -+ case BUILT_IN_ATOMIC_XOR_FETCH_8: -+ case BUILT_IN_ATOMIC_OR_FETCH_8: -+ case BUILT_IN_ATOMIC_FETCH_ADD_8: -+ case BUILT_IN_ATOMIC_FETCH_SUB_8: -+ case BUILT_IN_ATOMIC_FETCH_AND_8: -+ case BUILT_IN_ATOMIC_FETCH_NAND_8: -+ case BUILT_IN_ATOMIC_FETCH_XOR_8: - case BUILT_IN_ATOMIC_FETCH_OR_8: -+ access_size = 8; -+ goto do_atomic; -+ -+ case BUILT_IN_ATOMIC_LOAD_16: -+ is_store = false; -+ /* FALLTHRU */ -+ case BUILT_IN_SYNC_FETCH_AND_ADD_16: -+ case BUILT_IN_SYNC_FETCH_AND_SUB_16: -+ case BUILT_IN_SYNC_FETCH_AND_OR_16: -+ case BUILT_IN_SYNC_FETCH_AND_AND_16: -+ case BUILT_IN_SYNC_FETCH_AND_XOR_16: -+ case BUILT_IN_SYNC_FETCH_AND_NAND_16: -+ case BUILT_IN_SYNC_ADD_AND_FETCH_16: -+ case BUILT_IN_SYNC_SUB_AND_FETCH_16: -+ case BUILT_IN_SYNC_OR_AND_FETCH_16: -+ case BUILT_IN_SYNC_AND_AND_FETCH_16: -+ case BUILT_IN_SYNC_XOR_AND_FETCH_16: -+ case BUILT_IN_SYNC_NAND_AND_FETCH_16: -+ case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_16: -+ case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_16: -+ case BUILT_IN_SYNC_LOCK_TEST_AND_SET_16: -+ case BUILT_IN_SYNC_LOCK_RELEASE_16: -+ case BUILT_IN_ATOMIC_EXCHANGE_16: -+ case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_16: -+ case BUILT_IN_ATOMIC_STORE_16: -+ case BUILT_IN_ATOMIC_ADD_FETCH_16: -+ case BUILT_IN_ATOMIC_SUB_FETCH_16: -+ case BUILT_IN_ATOMIC_AND_FETCH_16: -+ case BUILT_IN_ATOMIC_NAND_FETCH_16: -+ case BUILT_IN_ATOMIC_XOR_FETCH_16: -+ case BUILT_IN_ATOMIC_OR_FETCH_16: -+ case BUILT_IN_ATOMIC_FETCH_ADD_16: -+ case BUILT_IN_ATOMIC_FETCH_SUB_16: -+ case BUILT_IN_ATOMIC_FETCH_AND_16: -+ case BUILT_IN_ATOMIC_FETCH_NAND_16: -+ case BUILT_IN_ATOMIC_FETCH_XOR_16: - case BUILT_IN_ATOMIC_FETCH_OR_16: -+ access_size = 16; -+ /* FALLTHRU */ -+ do_atomic: - { - dest = gimple_call_arg (call, 0); - /* DEST represents the address of a memory location. - instrument_derefs wants the memory location, so lets - dereference the address DEST before handing it to - instrument_derefs. */ -- if (TREE_CODE (dest) == ADDR_EXPR) -- dest = TREE_OPERAND (dest, 0); -- else if (TREE_CODE (dest) == SSA_NAME || TREE_CODE (dest) == INTEGER_CST) -- dest = build2 (MEM_REF, TREE_TYPE (TREE_TYPE (dest)), -- dest, build_int_cst (TREE_TYPE (dest), 0)); -- else -- gcc_unreachable (); -- -- access_size = int_size_in_bytes (TREE_TYPE (dest)); -+ tree type = build_nonstandard_integer_type (access_size -+ * BITS_PER_UNIT, 1); -+ dest = build2 (MEM_REF, type, dest, -+ build_int_cst (build_pointer_type (char_type_node), 0)); -+ break; - } - - default: ---- gcc/testsuite/c-c++-common/asan/pr79944.c.jj 2017-03-08 12:50:40.428617273 +0100 -+++ gcc/testsuite/c-c++-common/asan/pr79944.c 2017-03-08 12:34:47.000000000 +0100 -@@ -0,0 +1,18 @@ -+/* PR sanitizer/79944 */ -+/* { dg-do run } */ -+ -+struct S { int i; char p[1024]; }; -+ -+int -+main () -+{ -+ struct S *p = (struct S *) __builtin_malloc (__builtin_offsetof (struct S, p) + 64); -+ p->i = 5; -+ asm volatile ("" : "+r" (p) : : "memory"); -+ __atomic_fetch_add ((int *) p, 5, __ATOMIC_RELAXED); -+ asm volatile ("" : "+r" (p) : : "memory"); -+ if (p->i != 10) -+ __builtin_abort (); -+ __builtin_free (p); -+ return 0; -+} diff --git a/gcc7-pr79969.patch b/gcc7-pr79969.patch new file mode 100644 index 0000000..f2d6d89 --- /dev/null +++ b/gcc7-pr79969.patch @@ -0,0 +1,43 @@ +2017-03-09 Jakub Jelinek + + PR c/79969 + * c-decl.c (start_enum): Adjust DECL_SOURCE_LOCATION of + TYPE_STUB_DECL. + + * gcc.dg/debug/dwarf2/enum-loc1.c: New test. + +--- gcc/c/c-decl.c.jj 2017-03-05 22:39:45.000000000 +0100 ++++ gcc/c/c-decl.c 2017-03-09 08:19:33.100042166 +0100 +@@ -8201,6 +8201,10 @@ start_enum (location_t loc, struct c_enu + enumtype = make_node (ENUMERAL_TYPE); + pushtag (loc, name, enumtype); + } ++ /* Update type location to the one of the definition, instead of e.g. ++ a forward declaration. */ ++ else if (TYPE_STUB_DECL (enumtype)) ++ DECL_SOURCE_LOCATION (TYPE_STUB_DECL (enumtype)) = loc; + + if (C_TYPE_BEING_DEFINED (enumtype)) + error_at (loc, "nested redefinition of %", name); +--- gcc/testsuite/gcc.dg/debug/dwarf2/enum-loc1.c.jj 2017-03-09 08:09:30.742037844 +0100 ++++ gcc/testsuite/gcc.dg/debug/dwarf2/enum-loc1.c 2017-03-09 08:16:45.202268438 +0100 +@@ -0,0 +1,19 @@ ++/* PR c/79969 */ ++/* { dg-do compile } */ ++/* { dg-options "-gdwarf -dA -fno-merge-debug-strings" } */ ++ ++enum ENUMTAG; ++ ++enum ENUMTAG ++{ ++ B = 1, ++ C = 2 ++}; ++ ++void ++bar (void) ++{ ++ enum ENUMTAG a = C; ++} ++ ++/* { dg-final { scan-assembler "DW_TAG_enumeration_type\[^\\r\\n\]*\[\\r\\n\]+\[^\\r\\n\]*\"ENUMTAG\[^\\r\\n\]*DW_AT_name(\[^\\r\\n\]*\[\\r\\n\]+\[^\\r\\n\]*DW_AT_)*\[^\\r\\n\]*\[\\r\\n\]+\[^\\r\\n\]*\[^0-9a-fA-FxX](0x)?7\[^0-9a-fA-FxX]\[^\\r\\n\]*DW_AT_decl_line" } } */ diff --git a/gcc7-pr79972.patch b/gcc7-pr79972.patch new file mode 100644 index 0000000..89259ff --- /dev/null +++ b/gcc7-pr79972.patch @@ -0,0 +1,58 @@ +2017-03-09 Jakub Jelinek + + PR tree-optimization/79972 + * gimple-ssa-warn-alloca.c (alloca_call_type): Only call + get_range_info on SSA_NAMEs. Formatting fixes. + + * gcc.dg/pr79972.c: New test. + +--- gcc/gimple-ssa-warn-alloca.c.jj 2017-03-07 20:04:52.000000000 +0100 ++++ gcc/gimple-ssa-warn-alloca.c 2017-03-09 12:11:41.943934314 +0100 +@@ -300,8 +300,9 @@ alloca_call_type (gimple *stmt, bool is_ + ret = alloca_type_and_limit (ALLOCA_OK); + } + // Check the range info if available. +- else if (value_range_type range_type = get_range_info (len, &min, &max)) ++ else if (TREE_CODE (len) == SSA_NAME) + { ++ value_range_type range_type = get_range_info (len, &min, &max); + if (range_type == VR_RANGE) + { + if (wi::leu_p (max, max_size)) +@@ -328,7 +329,6 @@ alloca_call_type (gimple *stmt, bool is_ + gimple *def = SSA_NAME_DEF_STMT (len); + if (gimple_assign_cast_p (def) + && TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def)))) +- + { + len_casted = gimple_assign_rhs1 (def); + range_type = get_range_info (len_casted, &min, &max); +@@ -344,8 +344,7 @@ alloca_call_type (gimple *stmt, bool is_ + else if (range_type == VR_ANTI_RANGE) + return alloca_type_and_limit (ALLOCA_UNBOUNDED); + else if (range_type != VR_VARYING) +- return +- alloca_type_and_limit (ALLOCA_BOUND_MAYBE_LARGE, max); ++ return alloca_type_and_limit (ALLOCA_BOUND_MAYBE_LARGE, max); + } + } + else if (range_type == VR_ANTI_RANGE) +--- gcc/testsuite/gcc.dg/pr79972.c.jj 2017-03-09 12:14:24.188800592 +0100 ++++ gcc/testsuite/gcc.dg/pr79972.c 2017-03-09 12:13:38.000000000 +0100 +@@ -0,0 +1,16 @@ ++/* PR tree-optimization/79972 */ ++/* { dg-do compile } */ ++/* { dg-require-effective-target alloca } */ ++/* { dg-options "-Walloca -Wvla-larger-than=10000" } */ ++ ++int ++f (int dim, int *b, int *c) ++{ ++ int newcentroid[3][dim]; /* { dg-warning "unbounded use of variable-length array" } */ ++ int *a = newcentroid[2]; ++ int i, dist = 0; ++ __builtin_memcpy (newcentroid, c, sizeof (newcentroid)); ++ for (i = 0; i < dim; i++) ++ dist += (a[i] - b[i]) * (a[i] - b[i]); ++ return dist; ++} diff --git a/sources b/sources index fc2bdf0..f003944 100644 --- a/sources +++ b/sources @@ -1,3 +1,3 @@ -SHA512 (gcc-7.0.1-20170308.tar.bz2) = 8fdadf218ac0d50107516cd9ea72ac03606b79db44f7a3d1fe665d1f1c51b7a54c5f37df3f6af9ab070de1b3990989745762ce4d3c2a4aa4d12bdfe29fe00dcd +SHA512 (gcc-7.0.1-20170309.tar.bz2) = d65c12060cfbe6aa685a1d8f768be6ec2dde733cb7c82ed3ac8cba839b33cfd16174f61f9fb7da3cec6ac967c0bcc9b664d71129a609e924e544edec4d7024ff SHA512 (nvptx-newlib-aadc8eb0ec43b7cd0dd2dfb484bae63c8b05ef24.tar.bz2) = 38f97c9297ad108568352a4d28277455a3c01fd8b7864e798037e5006b6f757022e874bbf3f165775fe3b873781bc108137bbeb42dd5ed3c7d3e6747746fa918 SHA512 (nvptx-tools-c28050f60193b3b95a18866a96f03334e874e78f.tar.bz2) = 95b577a06a93bb044dbc8033e550cb36bcf2ab2687da030a7318cdc90e7467ed49665e247dcafb5ff4a7e92cdc264291d19728bd17fab902fb64b22491269330