2015-03-13 Jakub Jelinek PR rtl-optimization/65401 * combine.c (rtx_equal_for_field_assignment_p): Add widen_x argument. If true, adjust_address_nv of x with big-endian correction for the mode widening to GET_MODE (y). (make_field_assignment): Don't do MEM mode widening here. Use MEM_P instead of GET_CODE == MEM. * gcc.c-torture/execute/pr65401.c: New test. --- gcc/combine.c.jj 2015-02-03 10:38:46.000000000 +0100 +++ gcc/combine.c 2015-03-13 18:46:45.710940306 +0100 @@ -475,7 +475,7 @@ static rtx force_to_mode (rtx, machine_m unsigned HOST_WIDE_INT, int); static rtx if_then_else_cond (rtx, rtx *, rtx *); static rtx known_cond (rtx, enum rtx_code, rtx, rtx); -static int rtx_equal_for_field_assignment_p (rtx, rtx); +static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false); static rtx make_field_assignment (rtx); static rtx apply_distributive_law (rtx); static rtx distribute_and_simplify_rtx (rtx, int); @@ -9184,8 +9184,23 @@ known_cond (rtx x, enum rtx_code cond, r assignment as a field assignment. */ static int -rtx_equal_for_field_assignment_p (rtx x, rtx y) +rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x) { + if (widen_x && GET_MODE (x) != GET_MODE (y)) + { + if (GET_MODE_SIZE (GET_MODE (x)) > GET_MODE_SIZE (GET_MODE (y))) + return 0; + if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN) + return 0; + /* For big endian, adjust the memory offset. */ + if (BYTES_BIG_ENDIAN) + x = adjust_address_nv (x, GET_MODE (y), + -subreg_lowpart_offset (GET_MODE (x), + GET_MODE (y))); + else + x = adjust_address_nv (x, GET_MODE (y), 0); + } + if (x == y || rtx_equal_p (x, y)) return 1; @@ -9339,16 +9354,15 @@ make_field_assignment (rtx x) /* The second SUBREG that might get in the way is a paradoxical SUBREG around the first operand of the AND. We want to pretend the operand is as wide as the destination here. We - do this by creating a new MEM in the wider mode for the sole + do this by adjusting the MEM to wider mode for the sole purpose of the call to rtx_equal_for_field_assignment_p. Also note this trick only works for MEMs. */ else if (GET_CODE (rhs) == AND && paradoxical_subreg_p (XEXP (rhs, 0)) - && GET_CODE (SUBREG_REG (XEXP (rhs, 0))) == MEM + && MEM_P (SUBREG_REG (XEXP (rhs, 0))) && CONST_INT_P (XEXP (rhs, 1)) - && rtx_equal_for_field_assignment_p (gen_rtx_MEM (GET_MODE (dest), - XEXP (SUBREG_REG (XEXP (rhs, 0)), 0)), - dest)) + && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)), + dest, true)) c1 = INTVAL (XEXP (rhs, 1)), other = lhs; else if (GET_CODE (lhs) == AND && CONST_INT_P (XEXP (lhs, 1)) @@ -9357,16 +9371,15 @@ make_field_assignment (rtx x) /* The second SUBREG that might get in the way is a paradoxical SUBREG around the first operand of the AND. We want to pretend the operand is as wide as the destination here. We - do this by creating a new MEM in the wider mode for the sole + do this by adjusting the MEM to wider mode for the sole purpose of the call to rtx_equal_for_field_assignment_p. Also note this trick only works for MEMs. */ else if (GET_CODE (lhs) == AND && paradoxical_subreg_p (XEXP (lhs, 0)) - && GET_CODE (SUBREG_REG (XEXP (lhs, 0))) == MEM + && MEM_P (SUBREG_REG (XEXP (lhs, 0))) && CONST_INT_P (XEXP (lhs, 1)) - && rtx_equal_for_field_assignment_p (gen_rtx_MEM (GET_MODE (dest), - XEXP (SUBREG_REG (XEXP (lhs, 0)), 0)), - dest)) + && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)), + dest, true)) c1 = INTVAL (XEXP (lhs, 1)), other = rhs; else return x; --- gcc/testsuite/gcc.c-torture/execute/pr65401.c.jj 2015-03-13 18:36:30.639817393 +0100 +++ gcc/testsuite/gcc.c-torture/execute/pr65401.c 2015-03-13 18:42:02.693485127 +0100 @@ -0,0 +1,59 @@ +/* PR rtl-optimization/65401 */ + +struct S { unsigned short s[64]; }; + +__attribute__((noinline, noclone)) void +foo (struct S *x) +{ + unsigned int i; + unsigned char *s; + + s = (unsigned char *) x->s; + for (i = 0; i < 64; i++) + x->s[i] = s[i * 2] | (s[i * 2 + 1] << 8); +} + +__attribute__((noinline, noclone)) void +bar (struct S *x) +{ + unsigned int i; + unsigned char *s; + + s = (unsigned char *) x->s; + for (i = 0; i < 64; i++) + x->s[i] = (s[i * 2] << 8) | s[i * 2 + 1]; +} + +int +main () +{ + unsigned int i; + struct S s; + if (sizeof (unsigned short) != 2) + return 0; + for (i = 0; i < 64; i++) + s.s[i] = i + ((64 - i) << 8); + foo (&s); +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + for (i = 0; i < 64; i++) + if (s.s[i] != (64 - i) + (i << 8)) + __builtin_abort (); +#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + for (i = 0; i < 64; i++) + if (s.s[i] != i + ((64 - i) << 8)) + __builtin_abort (); +#endif + for (i = 0; i < 64; i++) + s.s[i] = i + ((64 - i) << 8); + bar (&s); +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + for (i = 0; i < 64; i++) + if (s.s[i] != (64 - i) + (i << 8)) + __builtin_abort (); +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + for (i = 0; i < 64; i++) + if (s.s[i] != i + ((64 - i) << 8)) + __builtin_abort (); +#endif + return 0; +}