diff -up chromium-61.0.3163.79/base/numerics/safe_math_clang_gcc_impl.h.fixgccagain chromium-61.0.3163.79/base/numerics/safe_math_clang_gcc_impl.h --- chromium-61.0.3163.79/base/numerics/safe_math_clang_gcc_impl.h.fixgccagain 2017-09-06 15:48:27.560803028 -0400 +++ chromium-61.0.3163.79/base/numerics/safe_math_clang_gcc_impl.h 2017-09-06 15:50:08.715853695 -0400 @@ -126,6 +126,7 @@ struct ClampedAddFastOp { } }; +#if defined(__clang__) // Not supported on GCC. // This is the fastest negation on Intel, and a decent fallback on arm. __attribute__((always_inline)) inline int8_t ClampedNegate(uint8_t value) { uint8_t carry; @@ -169,6 +170,7 @@ __attribute__((always_inline)) inline in __attribute__((always_inline)) inline int64_t ClampedNegate(int64_t value) { return ClampedNegate(static_cast(value)); } +#endif template struct ClampedSubFastOp { @@ -180,6 +182,7 @@ struct ClampedSubFastOp { return ClampedSubFastAsmOp::template Do(x, y); } +#if defined(__clang__) // Not supported on GCC. // Fast path for generic clamped negation. if (std::is_same::value && std::is_same::value && IsCompileTimeConstant(x) && x == 0 && !IsCompileTimeConstant(y)) { @@ -190,6 +193,7 @@ struct ClampedSubFastOp { IntegerBitsPlusSign::value, std::is_signed::value>::type>( y)); } +#endif V result; return !__builtin_sub_overflow(x, y, &result)