blob: 192665924f0e16adc6fa967c6cd3228444d827d4 [file] [log] [blame]
/* Fold a constant sub-tree into a single node for C-compiler
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/*@@ This file should be rewritten to use an arbitrary precision
@@ representation for "struct tree_int_cst" and "struct tree_real_cst".
@@ Perhaps the routines could also be used for bc/dc, and made a lib.
@@ The routines that translate from the ap rep should
@@ warn if precision et. al. is lost.
@@ This would also make life easier when this technology is used
@@ for cross-compilers. */
/* The entry points in this file are fold, size_int_wide, size_binop
and force_fit_type_double.
fold takes a tree as argument and returns a simplified tree.
size_binop takes a tree code for an arithmetic operation
and two operands that are trees, and produces a tree for the
result, assuming the type comes from `sizetype'.
size_int takes an integer value, and creates a tree constant
with type from `sizetype'.
force_fit_type_double takes a constant, an overflowable flag and a
prior overflow indicator. It forces the value to fit the type and
sets TREE_OVERFLOW.
Note: Since the folders get called on non-gimple code as well as
gimple code, we need to handle GIMPLE tuples as well as their
corresponding tree equivalents. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "flags.h"
#include "tree.h"
#include "real.h"
#include "fixed-value.h"
#include "rtl.h"
#include "expr.h"
#include "tm_p.h"
#include "target.h"
#include "toplev.h"
#include "intl.h"
#include "ggc.h"
#include "hashtab.h"
#include "langhooks.h"
#include "md5.h"
#include "gimple.h"
/* Nonzero if we are folding constants inside an initializer; zero
otherwise. */
int folding_initializer = 0;
/* The following constants represent a bit based encoding of GCC's
comparison operators. This encoding simplifies transformations
on relational comparison operators, such as AND and OR. */
enum comparison_code {
COMPCODE_FALSE = 0,
COMPCODE_LT = 1,
COMPCODE_EQ = 2,
COMPCODE_LE = 3,
COMPCODE_GT = 4,
COMPCODE_LTGT = 5,
COMPCODE_GE = 6,
COMPCODE_ORD = 7,
COMPCODE_UNORD = 8,
COMPCODE_UNLT = 9,
COMPCODE_UNEQ = 10,
COMPCODE_UNLE = 11,
COMPCODE_UNGT = 12,
COMPCODE_NE = 13,
COMPCODE_UNGE = 14,
COMPCODE_TRUE = 15
};
static void encode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT, HOST_WIDE_INT);
static void decode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *);
static bool negate_mathfn_p (enum built_in_function);
static bool negate_expr_p (tree);
static tree negate_expr (tree);
static tree split_tree (tree, enum tree_code, tree *, tree *, tree *, int);
static tree associate_trees (tree, tree, enum tree_code, tree);
static tree const_binop (enum tree_code, tree, tree, int);
static enum comparison_code comparison_to_compcode (enum tree_code);
static enum tree_code compcode_to_comparison (enum comparison_code);
static tree combine_comparisons (enum tree_code, enum tree_code,
enum tree_code, tree, tree, tree);
static int operand_equal_for_comparison_p (tree, tree, tree);
static int twoval_comparison_p (tree, tree *, tree *, int *);
static tree eval_subst (tree, tree, tree, tree, tree);
static tree pedantic_omit_one_operand (tree, tree, tree);
static tree distribute_bit_expr (enum tree_code, tree, tree, tree);
static tree make_bit_field_ref (tree, tree, HOST_WIDE_INT, HOST_WIDE_INT, int);
static tree optimize_bit_field_compare (enum tree_code, tree, tree, tree);
static tree decode_field_reference (tree, HOST_WIDE_INT *, HOST_WIDE_INT *,
enum machine_mode *, int *, int *,
tree *, tree *);
static int all_ones_mask_p (const_tree, int);
static tree sign_bit_p (tree, const_tree);
static int simple_operand_p (const_tree);
static tree range_binop (enum tree_code, tree, tree, int, tree, int);
static tree range_predecessor (tree);
static tree range_successor (tree);
static tree make_range (tree, int *, tree *, tree *, bool *);
static tree build_range_check (tree, tree, int, tree, tree);
static int merge_ranges (int *, tree *, tree *, int, tree, tree, int, tree,
tree);
static tree fold_range_test (enum tree_code, tree, tree, tree);
static tree fold_cond_expr_with_comparison (tree, tree, tree, tree);
static tree unextend (tree, int, int, tree);
static tree fold_truthop (enum tree_code, tree, tree, tree);
static tree optimize_minmax_comparison (enum tree_code, tree, tree, tree);
static tree extract_muldiv (tree, tree, enum tree_code, tree, bool *);
static tree extract_muldiv_1 (tree, tree, enum tree_code, tree, bool *);
static tree fold_binary_op_with_conditional_arg (enum tree_code, tree,
tree, tree,
tree, tree, int);
static tree fold_mathfn_compare (enum built_in_function, enum tree_code,
tree, tree, tree);
static tree fold_inf_compare (enum tree_code, tree, tree, tree);
static tree fold_div_compare (enum tree_code, tree, tree, tree);
static bool reorder_operands_p (const_tree, const_tree);
static tree fold_negate_const (tree, tree);
static tree fold_not_const (tree, tree);
static tree fold_relational_const (enum tree_code, tree, tree, tree);
/* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring
overflow. Suppose A, B and SUM have the same respective signs as A1, B1,
and SUM1. Then this yields nonzero if overflow occurred during the
addition.
Overflow occurs if A and B have the same sign, but A and SUM differ in
sign. Use `^' to test whether signs differ, and `< 0' to isolate the
sign. */
#define OVERFLOW_SUM_SIGN(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
/* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
We do that by representing the two-word integer in 4 words, with only
HOST_BITS_PER_WIDE_INT / 2 bits stored in each word, as a positive
number. The value of the word is LOWPART + HIGHPART * BASE. */
#define LOWPART(x) \
((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) - 1))
#define HIGHPART(x) \
((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT / 2)
#define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT / 2)
/* Unpack a two-word integer into 4 words.
LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
WORDS points to the array of HOST_WIDE_INTs. */
static void
encode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi)
{
words[0] = LOWPART (low);
words[1] = HIGHPART (low);
words[2] = LOWPART (hi);
words[3] = HIGHPART (hi);
}
/* Pack an array of 4 words into a two-word integer.
WORDS points to the array of words.
The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
static void
decode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT *low,
HOST_WIDE_INT *hi)
{
*low = words[0] + words[1] * BASE;
*hi = words[2] + words[3] * BASE;
}
/* Force the double-word integer L1, H1 to be within the range of the
integer type TYPE. Stores the properly truncated and sign-extended
double-word integer in *LV, *HV. Returns true if the operation
overflows, that is, argument and result are different. */
int
fit_double_type (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, const_tree type)
{
unsigned HOST_WIDE_INT low0 = l1;
HOST_WIDE_INT high0 = h1;
unsigned int prec;
int sign_extended_type;
if (POINTER_TYPE_P (type)
|| TREE_CODE (type) == OFFSET_TYPE)
prec = POINTER_SIZE;
else
prec = TYPE_PRECISION (type);
/* Size types *are* sign extended. */
sign_extended_type = (!TYPE_UNSIGNED (type)
|| (TREE_CODE (type) == INTEGER_TYPE
&& TYPE_IS_SIZETYPE (type)));
/* First clear all bits that are beyond the type's precision. */
if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
;
else if (prec > HOST_BITS_PER_WIDE_INT)
h1 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
else
{
h1 = 0;
if (prec < HOST_BITS_PER_WIDE_INT)
l1 &= ~((HOST_WIDE_INT) (-1) << prec);
}
/* Then do sign extension if necessary. */
if (!sign_extended_type)
/* No sign extension */;
else if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
/* Correct width already. */;
else if (prec > HOST_BITS_PER_WIDE_INT)
{
/* Sign extend top half? */
if (h1 & ((unsigned HOST_WIDE_INT)1
<< (prec - HOST_BITS_PER_WIDE_INT - 1)))
h1 |= (HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT);
}
else if (prec == HOST_BITS_PER_WIDE_INT)
{
if ((HOST_WIDE_INT)l1 < 0)
h1 = -1;
}
else
{
/* Sign extend bottom half? */
if (l1 & ((unsigned HOST_WIDE_INT)1 << (prec - 1)))
{
h1 = -1;
l1 |= (HOST_WIDE_INT)(-1) << prec;
}
}
*lv = l1;
*hv = h1;
/* If the value didn't fit, signal overflow. */
return l1 != low0 || h1 != high0;
}
/* We force the double-int HIGH:LOW to the range of the type TYPE by
sign or zero extending it.
OVERFLOWABLE indicates if we are interested
in overflow of the value, when >0 we are only interested in signed
overflow, for <0 we are interested in any overflow. OVERFLOWED
indicates whether overflow has already occurred. CONST_OVERFLOWED
indicates whether constant overflow has already occurred. We force
T's value to be within range of T's type (by setting to 0 or 1 all
the bits outside the type's range). We set TREE_OVERFLOWED if,
OVERFLOWED is nonzero,
or OVERFLOWABLE is >0 and signed overflow occurs
or OVERFLOWABLE is <0 and any overflow occurs
We return a new tree node for the extended double-int. The node
is shared if no overflow flags are set. */
tree
force_fit_type_double (tree type, unsigned HOST_WIDE_INT low,
HOST_WIDE_INT high, int overflowable,
bool overflowed)
{
int sign_extended_type;
bool overflow;
/* Size types *are* sign extended. */
sign_extended_type = (!TYPE_UNSIGNED (type)
|| (TREE_CODE (type) == INTEGER_TYPE
&& TYPE_IS_SIZETYPE (type)));
overflow = fit_double_type (low, high, &low, &high, type);
/* If we need to set overflow flags, return a new unshared node. */
if (overflowed || overflow)
{
if (overflowed
|| overflowable < 0
|| (overflowable > 0 && sign_extended_type))
{
tree t = make_node (INTEGER_CST);
TREE_INT_CST_LOW (t) = low;
TREE_INT_CST_HIGH (t) = high;
TREE_TYPE (t) = type;
TREE_OVERFLOW (t) = 1;
return t;
}
}
/* Else build a shared node. */
return build_int_cst_wide (type, low, high);
}
/* Add two doubleword integers with doubleword result.
Return nonzero if the operation overflows according to UNSIGNED_P.
Each argument is given as two `HOST_WIDE_INT' pieces.
One argument is L1 and H1; the other, L2 and H2.
The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
int
add_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
bool unsigned_p)
{
unsigned HOST_WIDE_INT l;
HOST_WIDE_INT h;
l = l1 + l2;
h = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) h1
+ (unsigned HOST_WIDE_INT) h2
+ (l < l1));
*lv = l;
*hv = h;
if (unsigned_p)
return ((unsigned HOST_WIDE_INT) h < (unsigned HOST_WIDE_INT) h1
|| (h == h1
&& l < l1));
else
return OVERFLOW_SUM_SIGN (h1, h2, h);
}
/* Negate a doubleword integer with doubleword result.
Return nonzero if the operation overflows, assuming it's signed.
The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
int
neg_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
{
if (l1 == 0)
{
*lv = 0;
*hv = - h1;
return (*hv & h1) < 0;
}
else
{
*lv = -l1;
*hv = ~h1;
return 0;
}
}
/* Multiply two doubleword integers with doubleword result.
Return nonzero if the operation overflows according to UNSIGNED_P.
Each argument is given as two `HOST_WIDE_INT' pieces.
One argument is L1 and H1; the other, L2 and H2.
The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
int
mul_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
bool unsigned_p)
{
HOST_WIDE_INT arg1[4];
HOST_WIDE_INT arg2[4];
HOST_WIDE_INT prod[4 * 2];
unsigned HOST_WIDE_INT carry;
int i, j, k;
unsigned HOST_WIDE_INT toplow, neglow;
HOST_WIDE_INT tophigh, neghigh;
encode (arg1, l1, h1);
encode (arg2, l2, h2);
memset (prod, 0, sizeof prod);
for (i = 0; i < 4; i++)
{
carry = 0;
for (j = 0; j < 4; j++)
{
k = i + j;
/* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
carry += arg1[i] * arg2[j];
/* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
carry += prod[k];
prod[k] = LOWPART (carry);
carry = HIGHPART (carry);
}
prod[i + 4] = carry;
}
decode (prod, lv, hv);
decode (prod + 4, &toplow, &tophigh);
/* Unsigned overflow is immediate. */
if (unsigned_p)
return (toplow | tophigh) != 0;
/* Check for signed overflow by calculating the signed representation of the
top half of the result; it should agree with the low half's sign bit. */
if (h1 < 0)
{
neg_double (l2, h2, &neglow, &neghigh);
add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
}
if (h2 < 0)
{
neg_double (l1, h1, &neglow, &neghigh);
add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
}
return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
}
/* Shift the doubleword integer in L1, H1 left by COUNT places
keeping only PREC bits of result.
Shift right if COUNT is negative.
ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
void
lshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
HOST_WIDE_INT count, unsigned int prec,
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, int arith)
{
unsigned HOST_WIDE_INT signmask;
if (count < 0)
{
rshift_double (l1, h1, -count, prec, lv, hv, arith);
return;
}
if (SHIFT_COUNT_TRUNCATED)
count %= prec;
if (count >= 2 * HOST_BITS_PER_WIDE_INT)
{
/* Shifting by the host word size is undefined according to the
ANSI standard, so we must handle this as a special case. */
*hv = 0;
*lv = 0;
}
else if (count >= HOST_BITS_PER_WIDE_INT)
{
*hv = l1 << (count - HOST_BITS_PER_WIDE_INT);
*lv = 0;
}
else
{
*hv = (((unsigned HOST_WIDE_INT) h1 << count)
| (l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
*lv = l1 << count;
}
/* Sign extend all bits that are beyond the precision. */
signmask = -((prec > HOST_BITS_PER_WIDE_INT
? ((unsigned HOST_WIDE_INT) *hv
>> (prec - HOST_BITS_PER_WIDE_INT - 1))
: (*lv >> (prec - 1))) & 1);
if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
;
else if (prec >= HOST_BITS_PER_WIDE_INT)
{
*hv &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
*hv |= signmask << (prec - HOST_BITS_PER_WIDE_INT);
}
else
{
*hv = signmask;
*lv &= ~((unsigned HOST_WIDE_INT) (-1) << prec);
*lv |= signmask << prec;
}
}
/* Shift the doubleword integer in L1, H1 right by COUNT places
keeping only PREC bits of result. COUNT must be positive.
ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
void
rshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
HOST_WIDE_INT count, unsigned int prec,
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
int arith)
{
unsigned HOST_WIDE_INT signmask;
signmask = (arith
? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
: 0);
if (SHIFT_COUNT_TRUNCATED)
count %= prec;
if (count >= 2 * HOST_BITS_PER_WIDE_INT)
{
/* Shifting by the host word size is undefined according to the
ANSI standard, so we must handle this as a special case. */
*hv = 0;
*lv = 0;
}
else if (count >= HOST_BITS_PER_WIDE_INT)
{
*hv = 0;
*lv = (unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT);
}
else
{
*hv = (unsigned HOST_WIDE_INT) h1 >> count;
*lv = ((l1 >> count)
| ((unsigned HOST_WIDE_INT) h1 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1));
}
/* Zero / sign extend all bits that are beyond the precision. */
if (count >= (HOST_WIDE_INT)prec)
{
*hv = signmask;
*lv = signmask;
}
else if ((prec - count) >= 2 * HOST_BITS_PER_WIDE_INT)
;
else if ((prec - count) >= HOST_BITS_PER_WIDE_INT)
{
*hv &= ~((HOST_WIDE_INT) (-1) << (prec - count - HOST_BITS_PER_WIDE_INT));
*hv |= signmask << (prec - count - HOST_BITS_PER_WIDE_INT);
}
else
{
*hv = signmask;
*lv &= ~((unsigned HOST_WIDE_INT) (-1) << (prec - count));
*lv |= signmask << (prec - count);
}
}
/* Rotate the doubleword integer in L1, H1 left by COUNT places
keeping only PREC bits of result.
Rotate right if COUNT is negative.
Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
void
lrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
HOST_WIDE_INT count, unsigned int prec,
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
{
unsigned HOST_WIDE_INT s1l, s2l;
HOST_WIDE_INT s1h, s2h;
count %= prec;
if (count < 0)
count += prec;
lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
*lv = s1l | s2l;
*hv = s1h | s2h;
}
/* Rotate the doubleword integer in L1, H1 left by COUNT places
keeping only PREC bits of result. COUNT must be positive.
Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
void
rrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
HOST_WIDE_INT count, unsigned int prec,
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
{
unsigned HOST_WIDE_INT s1l, s2l;
HOST_WIDE_INT s1h, s2h;
count %= prec;
if (count < 0)
count += prec;
rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
*lv = s1l | s2l;
*hv = s1h | s2h;
}
/* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
CODE is a tree code for a kind of division, one of
TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
or EXACT_DIV_EXPR
It controls how the quotient is rounded to an integer.
Return nonzero if the operation overflows.
UNS nonzero says do unsigned division. */
int
div_and_round_double (enum tree_code code, int uns,
unsigned HOST_WIDE_INT lnum_orig, /* num == numerator == dividend */
HOST_WIDE_INT hnum_orig,
unsigned HOST_WIDE_INT lden_orig, /* den == denominator == divisor */
HOST_WIDE_INT hden_orig,
unsigned HOST_WIDE_INT *lquo,
HOST_WIDE_INT *hquo, unsigned HOST_WIDE_INT *lrem,
HOST_WIDE_INT *hrem)
{
int quo_neg = 0;
HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
HOST_WIDE_INT den[4], quo[4];
int i, j;
unsigned HOST_WIDE_INT work;
unsigned HOST_WIDE_INT carry = 0;
unsigned HOST_WIDE_INT lnum = lnum_orig;
HOST_WIDE_INT hnum = hnum_orig;
unsigned HOST_WIDE_INT lden = lden_orig;
HOST_WIDE_INT hden = hden_orig;
int overflow = 0;
if (hden == 0 && lden == 0)
overflow = 1, lden = 1;
/* Calculate quotient sign and convert operands to unsigned. */
if (!uns)
{
if (hnum < 0)
{
quo_neg = ~ quo_neg;
/* (minimum integer) / (-1) is the only overflow case. */
if (neg_double (lnum, hnum, &lnum, &hnum)
&& ((HOST_WIDE_INT) lden & hden) == -1)
overflow = 1;
}
if (hden < 0)
{
quo_neg = ~ quo_neg;
neg_double (lden, hden, &lden, &hden);
}
}
if (hnum == 0 && hden == 0)
{ /* single precision */
*hquo = *hrem = 0;
/* This unsigned division rounds toward zero. */
*lquo = lnum / lden;
goto finish_up;
}
if (hnum == 0)
{ /* trivial case: dividend < divisor */
/* hden != 0 already checked. */
*hquo = *lquo = 0;
*hrem = hnum;
*lrem = lnum;
goto finish_up;
}
memset (quo, 0, sizeof quo);
memset (num, 0, sizeof num); /* to zero 9th element */
memset (den, 0, sizeof den);
encode (num, lnum, hnum);
encode (den, lden, hden);
/* Special code for when the divisor < BASE. */
if (hden == 0 && lden < (unsigned HOST_WIDE_INT) BASE)
{
/* hnum != 0 already checked. */
for (i = 4 - 1; i >= 0; i--)
{
work = num[i] + carry * BASE;
quo[i] = work / lden;
carry = work % lden;
}
}
else
{
/* Full double precision division,
with thanks to Don Knuth's "Seminumerical Algorithms". */
int num_hi_sig, den_hi_sig;
unsigned HOST_WIDE_INT quo_est, scale;
/* Find the highest nonzero divisor digit. */
for (i = 4 - 1;; i--)
if (den[i] != 0)
{
den_hi_sig = i;
break;
}
/* Insure that the first digit of the divisor is at least BASE/2.
This is required by the quotient digit estimation algorithm. */
scale = BASE / (den[den_hi_sig] + 1);
if (scale > 1)
{ /* scale divisor and dividend */
carry = 0;
for (i = 0; i <= 4 - 1; i++)
{
work = (num[i] * scale) + carry;
num[i] = LOWPART (work);
carry = HIGHPART (work);
}
num[4] = carry;
carry = 0;
for (i = 0; i <= 4 - 1; i++)
{
work = (den[i] * scale) + carry;
den[i] = LOWPART (work);
carry = HIGHPART (work);
if (den[i] != 0) den_hi_sig = i;
}
}
num_hi_sig = 4;
/* Main loop */
for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--)
{
/* Guess the next quotient digit, quo_est, by dividing the first
two remaining dividend digits by the high order quotient digit.
quo_est is never low and is at most 2 high. */
unsigned HOST_WIDE_INT tmp;
num_hi_sig = i + den_hi_sig + 1;
work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
if (num[num_hi_sig] != den[den_hi_sig])
quo_est = work / den[den_hi_sig];
else
quo_est = BASE - 1;
/* Refine quo_est so it's usually correct, and at most one high. */
tmp = work - quo_est * den[den_hi_sig];
if (tmp < BASE
&& (den[den_hi_sig - 1] * quo_est
> (tmp * BASE + num[num_hi_sig - 2])))
quo_est--;
/* Try QUO_EST as the quotient digit, by multiplying the
divisor by QUO_EST and subtracting from the remaining dividend.
Keep in mind that QUO_EST is the I - 1st digit. */
carry = 0;
for (j = 0; j <= den_hi_sig; j++)
{
work = quo_est * den[j] + carry;
carry = HIGHPART (work);
work = num[i + j] - LOWPART (work);
num[i + j] = LOWPART (work);
carry += HIGHPART (work) != 0;
}
/* If quo_est was high by one, then num[i] went negative and
we need to correct things. */
if (num[num_hi_sig] < (HOST_WIDE_INT) carry)
{
quo_est--;
carry = 0; /* add divisor back in */
for (j = 0; j <= den_hi_sig; j++)
{
work = num[i + j] + den[j] + carry;
carry = HIGHPART (work);
num[i + j] = LOWPART (work);
}
num [num_hi_sig] += carry;
}
/* Store the quotient digit. */
quo[i] = quo_est;
}
}
decode (quo, lquo, hquo);
finish_up:
/* If result is negative, make it so. */
if (quo_neg)
neg_double (*lquo, *hquo, lquo, hquo);
/* Compute trial remainder: rem = num - (quo * den) */
mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
neg_double (*lrem, *hrem, lrem, hrem);
add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
switch (code)
{
case TRUNC_DIV_EXPR:
case TRUNC_MOD_EXPR: /* round toward zero */
case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
return overflow;
case FLOOR_DIV_EXPR:
case FLOOR_MOD_EXPR: /* round toward negative infinity */
if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
{
/* quo = quo - 1; */
add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
lquo, hquo);
}
else
return overflow;
break;
case CEIL_DIV_EXPR:
case CEIL_MOD_EXPR: /* round toward positive infinity */
if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
{
add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
lquo, hquo);
}
else
return overflow;
break;
case ROUND_DIV_EXPR:
case ROUND_MOD_EXPR: /* round to closest integer */
{
unsigned HOST_WIDE_INT labs_rem = *lrem;
HOST_WIDE_INT habs_rem = *hrem;
unsigned HOST_WIDE_INT labs_den = lden, ltwice;
HOST_WIDE_INT habs_den = hden, htwice;
/* Get absolute values. */
if (*hrem < 0)
neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
if (hden < 0)
neg_double (lden, hden, &labs_den, &habs_den);
/* If (2 * abs (lrem) >= abs (lden)), adjust the quotient. */
mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
labs_rem, habs_rem, &ltwice, &htwice);
if (((unsigned HOST_WIDE_INT) habs_den
< (unsigned HOST_WIDE_INT) htwice)
|| (((unsigned HOST_WIDE_INT) habs_den
== (unsigned HOST_WIDE_INT) htwice)
&& (labs_den <= ltwice)))
{
if (*hquo < 0)
/* quo = quo - 1; */
add_double (*lquo, *hquo,
(HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
else
/* quo = quo + 1; */
add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
lquo, hquo);
}
else
return overflow;
}
break;
default:
gcc_unreachable ();
}
/* Compute true remainder: rem = num - (quo * den) */
mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
neg_double (*lrem, *hrem, lrem, hrem);
add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
return overflow;
}
/* If ARG2 divides ARG1 with zero remainder, carries out the division
of type CODE and returns the quotient.
Otherwise returns NULL_TREE. */
static tree
div_if_zero_remainder (enum tree_code code, const_tree arg1, const_tree arg2)
{
unsigned HOST_WIDE_INT int1l, int2l;
HOST_WIDE_INT int1h, int2h;
unsigned HOST_WIDE_INT quol, reml;
HOST_WIDE_INT quoh, remh;
tree type = TREE_TYPE (arg1);
int uns = TYPE_UNSIGNED (type);
int1l = TREE_INT_CST_LOW (arg1);
int1h = TREE_INT_CST_HIGH (arg1);
/* &obj[0] + -128 really should be compiled as &obj[-8] rather than
&obj[some_exotic_number]. */
if (POINTER_TYPE_P (type))
{
uns = false;
type = signed_type_for (type);
fit_double_type (int1l, int1h, &int1l, &int1h,
type);
}
else
fit_double_type (int1l, int1h, &int1l, &int1h, type);
int2l = TREE_INT_CST_LOW (arg2);
int2h = TREE_INT_CST_HIGH (arg2);
div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
&quol, &quoh, &reml, &remh);
if (remh != 0 || reml != 0)
return NULL_TREE;
return build_int_cst_wide (type, quol, quoh);
}
/* This is nonzero if we should defer warnings about undefined
overflow. This facility exists because these warnings are a
special case. The code to estimate loop iterations does not want
to issue any warnings, since it works with expressions which do not
occur in user code. Various bits of cleanup code call fold(), but
only use the result if it has certain characteristics (e.g., is a
constant); that code only wants to issue a warning if the result is
used. */
static int fold_deferring_overflow_warnings;
/* If a warning about undefined overflow is deferred, this is the
warning. Note that this may cause us to turn two warnings into
one, but that is fine since it is sufficient to only give one
warning per expression. */
static const char* fold_deferred_overflow_warning;
/* If a warning about undefined overflow is deferred, this is the
level at which the warning should be emitted. */
static enum warn_strict_overflow_code fold_deferred_overflow_code;
/* Start deferring overflow warnings. We could use a stack here to
permit nested calls, but at present it is not necessary. */
void
fold_defer_overflow_warnings (void)
{
++fold_deferring_overflow_warnings;
}
/* Stop deferring overflow warnings. If there is a pending warning,
and ISSUE is true, then issue the warning if appropriate. STMT is
the statement with which the warning should be associated (used for
location information); STMT may be NULL. CODE is the level of the
warning--a warn_strict_overflow_code value. This function will use
the smaller of CODE and the deferred code when deciding whether to
issue the warning. CODE may be zero to mean to always use the
deferred code. */
void
fold_undefer_overflow_warnings (bool issue, const_gimple stmt, int code)
{
const char *warnmsg;
location_t locus;
gcc_assert (fold_deferring_overflow_warnings > 0);
--fold_deferring_overflow_warnings;
if (fold_deferring_overflow_warnings > 0)
{
if (fold_deferred_overflow_warning != NULL
&& code != 0
&& code < (int) fold_deferred_overflow_code)
fold_deferred_overflow_code = code;
return;
}
warnmsg = fold_deferred_overflow_warning;
fold_deferred_overflow_warning = NULL;
if (!issue || warnmsg == NULL)
return;
if (gimple_no_warning_p (stmt))
return;
/* Use the smallest code level when deciding to issue the
warning. */
if (code == 0 || code > (int) fold_deferred_overflow_code)
code = fold_deferred_overflow_code;
if (!issue_strict_overflow_warning (code))
return;
if (stmt == NULL)
locus = input_location;
else
locus = gimple_location (stmt);
warning (OPT_Wstrict_overflow, "%H%s", &locus, warnmsg);
}
/* Stop deferring overflow warnings, ignoring any deferred
warnings. */
void
fold_undefer_and_ignore_overflow_warnings (void)
{
fold_undefer_overflow_warnings (false, NULL, 0);
}
/* Whether we are deferring overflow warnings. */
bool
fold_deferring_overflow_warnings_p (void)
{
return fold_deferring_overflow_warnings > 0;
}
/* This is called when we fold something based on the fact that signed
overflow is undefined. */
static void
fold_overflow_warning (const char* gmsgid, enum warn_strict_overflow_code wc)
{
if (fold_deferring_overflow_warnings > 0)
{
if (fold_deferred_overflow_warning == NULL
|| wc < fold_deferred_overflow_code)
{
fold_deferred_overflow_warning = gmsgid;
fold_deferred_overflow_code = wc;
}
}
else if (issue_strict_overflow_warning (wc))
warning (OPT_Wstrict_overflow, gmsgid);
}
/* Return true if the built-in mathematical function specified by CODE
is odd, i.e. -f(x) == f(-x). */
static bool
negate_mathfn_p (enum built_in_function code)
{
switch (code)
{
CASE_FLT_FN (BUILT_IN_ASIN):
CASE_FLT_FN (BUILT_IN_ASINH):
CASE_FLT_FN (BUILT_IN_ATAN):
CASE_FLT_FN (BUILT_IN_ATANH):
CASE_FLT_FN (BUILT_IN_CASIN):
CASE_FLT_FN (BUILT_IN_CASINH):
CASE_FLT_FN (BUILT_IN_CATAN):
CASE_FLT_FN (BUILT_IN_CATANH):
CASE_FLT_FN (BUILT_IN_CBRT):
CASE_FLT_FN (BUILT_IN_CPROJ):
CASE_FLT_FN (BUILT_IN_CSIN):
CASE_FLT_FN (BUILT_IN_CSINH):
CASE_FLT_FN (BUILT_IN_CTAN):
CASE_FLT_FN (BUILT_IN_CTANH):
CASE_FLT_FN (BUILT_IN_ERF):
CASE_FLT_FN (BUILT_IN_LLROUND):
CASE_FLT_FN (BUILT_IN_LROUND):
CASE_FLT_FN (BUILT_IN_ROUND):
CASE_FLT_FN (BUILT_IN_SIN):
CASE_FLT_FN (BUILT_IN_SINH):
CASE_FLT_FN (BUILT_IN_TAN):
CASE_FLT_FN (BUILT_IN_TANH):
CASE_FLT_FN (BUILT_IN_TRUNC):
return true;
CASE_FLT_FN (BUILT_IN_LLRINT):
CASE_FLT_FN (BUILT_IN_LRINT):
CASE_FLT_FN (BUILT_IN_NEARBYINT):
CASE_FLT_FN (BUILT_IN_RINT):
return !flag_rounding_math;
default:
break;
}
return false;
}
/* Check whether we may negate an integer constant T without causing
overflow. */
bool
may_negate_without_overflow_p (const_tree t)
{
unsigned HOST_WIDE_INT val;
unsigned int prec;
tree type;
gcc_assert (TREE_CODE (t) == INTEGER_CST);
type = TREE_TYPE (t);
if (TYPE_UNSIGNED (type))
return false;
prec = TYPE_PRECISION (type);
if (prec > HOST_BITS_PER_WIDE_INT)
{
if (TREE_INT_CST_LOW (t) != 0)
return true;
prec -= HOST_BITS_PER_WIDE_INT;
val = TREE_INT_CST_HIGH (t);
}
else
val = TREE_INT_CST_LOW (t);
if (prec < HOST_BITS_PER_WIDE_INT)
val &= ((unsigned HOST_WIDE_INT) 1 << prec) - 1;
return val != ((unsigned HOST_WIDE_INT) 1 << (prec - 1));
}
/* Determine whether an expression T can be cheaply negated using
the function negate_expr without introducing undefined overflow. */
static bool
negate_expr_p (tree t)
{
tree type;
if (t == 0)
return false;
type = TREE_TYPE (t);
STRIP_SIGN_NOPS (t);
switch (TREE_CODE (t))
{
case INTEGER_CST:
if (TYPE_OVERFLOW_WRAPS (type))
return true;
/* Check that -CST will not overflow type. */
return may_negate_without_overflow_p (t);
case BIT_NOT_EXPR:
return (INTEGRAL_TYPE_P (type)
&& TYPE_OVERFLOW_WRAPS (type));
case FIXED_CST:
case REAL_CST:
case NEGATE_EXPR:
return true;
case COMPLEX_CST:
return negate_expr_p (TREE_REALPART (t))
&& negate_expr_p (TREE_IMAGPART (t));
case COMPLEX_EXPR:
return negate_expr_p (TREE_OPERAND (t, 0))
&& negate_expr_p (TREE_OPERAND (t, 1));
case CONJ_EXPR:
return negate_expr_p (TREE_OPERAND (t, 0));
case PLUS_EXPR:
if (HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
|| HONOR_SIGNED_ZEROS (TYPE_MODE (type)))
return false;
/* -(A + B) -> (-B) - A. */
if (negate_expr_p (TREE_OPERAND (t, 1))
&& reorder_operands_p (TREE_OPERAND (t, 0),
TREE_OPERAND (t, 1)))
return true;
/* -(A + B) -> (-A) - B. */
return negate_expr_p (TREE_OPERAND (t, 0));
case MINUS_EXPR:
/* We can't turn -(A-B) into B-A when we honor signed zeros. */
return !HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
&& !HONOR_SIGNED_ZEROS (TYPE_MODE (type))
&& reorder_operands_p (TREE_OPERAND (t, 0),
TREE_OPERAND (t, 1));
case MULT_EXPR:
if (TYPE_UNSIGNED (TREE_TYPE (t)))
break;
/* Fall through. */
case RDIV_EXPR:
if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (TREE_TYPE (t))))
return negate_expr_p (TREE_OPERAND (t, 1))
|| negate_expr_p (TREE_OPERAND (t, 0));
break;
case TRUNC_DIV_EXPR:
case ROUND_DIV_EXPR:
case FLOOR_DIV_EXPR:
case CEIL_DIV_EXPR:
case EXACT_DIV_EXPR:
/* In general we can't negate A / B, because if A is INT_MIN and
B is 1, we may turn this into INT_MIN / -1 which is undefined
and actually traps on some architectures. But if overflow is
undefined, we can negate, because - (INT_MIN / 1) is an
overflow. */
if (INTEGRAL_TYPE_P (TREE_TYPE (t))
&& !TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (t)))
break;
return negate_expr_p (TREE_OPERAND (t, 1))
|| negate_expr_p (TREE_OPERAND (t, 0));
case NOP_EXPR:
/* Negate -((double)float) as (double)(-float). */
if (TREE_CODE (type) == REAL_TYPE)
{
tree tem = strip_float_extensions (t);
if (tem != t)
return negate_expr_p (tem);
}
break;
case CALL_EXPR:
/* Negate -f(x) as f(-x). */
if (negate_mathfn_p (builtin_mathfn_code (t)))
return negate_expr_p (CALL_EXPR_ARG (t, 0));
break;
case RSHIFT_EXPR:
/* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
{
tree op1 = TREE_OPERAND (t, 1);
if (TREE_INT_CST_HIGH (op1) == 0
&& (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
== TREE_INT_CST_LOW (op1))
return true;
}
break;
default:
break;
}
return false;
}
/* Given T, an expression, return a folded tree for -T or NULL_TREE, if no
simplification is possible.
If negate_expr_p would return true for T, NULL_TREE will never be
returned. */
static tree
fold_negate_expr (tree t)
{
tree type = TREE_TYPE (t);
tree tem;
switch (TREE_CODE (t))
{
/* Convert - (~A) to A + 1. */
case BIT_NOT_EXPR:
if (INTEGRAL_TYPE_P (type))
return fold_build2 (PLUS_EXPR, type, TREE_OPERAND (t, 0),
build_int_cst (type, 1));
break;
case INTEGER_CST:
tem = fold_negate_const (t, type);
if (TREE_OVERFLOW (tem) == TREE_OVERFLOW (t)
|| !TYPE_OVERFLOW_TRAPS (type))
return tem;
break;
case REAL_CST:
tem = fold_negate_const (t, type);
/* Two's complement FP formats, such as c4x, may overflow. */
if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
return tem;
break;
case FIXED_CST:
tem = fold_negate_const (t, type);
return tem;
case COMPLEX_CST:
{
tree rpart = negate_expr (TREE_REALPART (t));
tree ipart = negate_expr (TREE_IMAGPART (t));
if ((TREE_CODE (rpart) == REAL_CST
&& TREE_CODE (ipart) == REAL_CST)
|| (TREE_CODE (rpart) == INTEGER_CST
&& TREE_CODE (ipart) == INTEGER_CST))
return build_complex (type, rpart, ipart);
}
break;
case COMPLEX_EXPR:
if (negate_expr_p (t))
return fold_build2 (COMPLEX_EXPR, type,
fold_negate_expr (TREE_OPERAND (t, 0)),
fold_negate_expr (TREE_OPERAND (t, 1)));
break;
case CONJ_EXPR:
if (negate_expr_p (t))
return fold_build1 (CONJ_EXPR, type,
fold_negate_expr (TREE_OPERAND (t, 0)));
break;
case NEGATE_EXPR:
return TREE_OPERAND (t, 0);
case PLUS_EXPR:
if (!HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
&& !HONOR_SIGNED_ZEROS (TYPE_MODE (type)))
{
/* -(A + B) -> (-B) - A. */
if (negate_expr_p (TREE_OPERAND (t, 1))
&& reorder_operands_p (TREE_OPERAND (t, 0),
TREE_OPERAND (t, 1)))
{
tem = negate_expr (TREE_OPERAND (t, 1));
return fold_build2 (MINUS_EXPR, type,
tem, TREE_OPERAND (t, 0));
}
/* -(A + B) -> (-A) - B. */
if (negate_expr_p (TREE_OPERAND (t, 0)))
{
tem = negate_expr (TREE_OPERAND (t, 0));
return fold_build2 (MINUS_EXPR, type,
tem, TREE_OPERAND (t, 1));
}
}
break;
case MINUS_EXPR:
/* - (A - B) -> B - A */
if (!HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
&& !HONOR_SIGNED_ZEROS (TYPE_MODE (type))
&& reorder_operands_p (TREE_OPERAND (t, 0), TREE_OPERAND (t, 1)))
return fold_build2 (MINUS_EXPR, type,
TREE_OPERAND (t, 1), TREE_OPERAND (t, 0));
break;
case MULT_EXPR:
if (TYPE_UNSIGNED (type))
break;
/* Fall through. */
case RDIV_EXPR:
if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type)))
{
tem = TREE_OPERAND (t, 1);
if (negate_expr_p (tem))
return fold_build2 (TREE_CODE (t), type,
TREE_OPERAND (t, 0), negate_expr (tem));
tem = TREE_OPERAND (t, 0);
if (negate_expr_p (tem))
return fold_build2 (TREE_CODE (t), type,
negate_expr (tem), TREE_OPERAND (t, 1));
}
break;
case TRUNC_DIV_EXPR:
case ROUND_DIV_EXPR:
case FLOOR_DIV_EXPR:
case CEIL_DIV_EXPR:
case EXACT_DIV_EXPR:
/* In general we can't negate A / B, because if A is INT_MIN and
B is 1, we may turn this into INT_MIN / -1 which is undefined
and actually traps on some architectures. But if overflow is
undefined, we can negate, because - (INT_MIN / 1) is an
overflow. */
if (!INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_UNDEFINED (type))
{
const char * const warnmsg = G_("assuming signed overflow does not "
"occur when negating a division");
tem = TREE_OPERAND (t, 1);
if (negate_expr_p (tem))
{
if (INTEGRAL_TYPE_P (type)
&& (TREE_CODE (tem) != INTEGER_CST
|| integer_onep (tem)))
fold_overflow_warning (warnmsg, WARN_STRICT_OVERFLOW_MISC);
return fold_build2 (TREE_CODE (t), type,
TREE_OPERAND (t, 0), negate_expr (tem));
}
tem = TREE_OPERAND (t, 0);
if (negate_expr_p (tem))
{
if (INTEGRAL_TYPE_P (type)
&& (TREE_CODE (tem) != INTEGER_CST
|| tree_int_cst_equal (tem, TYPE_MIN_VALUE (type))))
fold_overflow_warning (warnmsg, WARN_STRICT_OVERFLOW_MISC);
return fold_build2 (TREE_CODE (t), type,
negate_expr (tem), TREE_OPERAND (t, 1));
}
}
break;
case NOP_EXPR:
/* Convert -((double)float) into (double)(-float). */
if (TREE_CODE (type) == REAL_TYPE)
{
tem = strip_float_extensions (t);
if (tem != t && negate_expr_p (tem))
return fold_convert (type, negate_expr (tem));
}
break;
case CALL_EXPR:
/* Negate -f(x) as f(-x). */
if (negate_mathfn_p (builtin_mathfn_code (t))
&& negate_expr_p (CALL_EXPR_ARG (t, 0)))
{
tree fndecl, arg;
fndecl = get_callee_fndecl (t);
arg = negate_expr (CALL_EXPR_ARG (t, 0));
return build_call_expr (fndecl, 1, arg);
}
break;
case RSHIFT_EXPR:
/* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
{
tree op1 = TREE_OPERAND (t, 1);
if (TREE_INT_CST_HIGH (op1) == 0
&& (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
== TREE_INT_CST_LOW (op1))
{
tree ntype = TYPE_UNSIGNED (type)
? signed_type_for (type)
: unsigned_type_for (type);
tree temp = fold_convert (ntype, TREE_OPERAND (t, 0));
temp = fold_build2 (RSHIFT_EXPR, ntype, temp, op1);
return fold_convert (type, temp);
}
}
break;
default:
break;
}
return NULL_TREE;
}
/* Like fold_negate_expr, but return a NEGATE_EXPR tree, if T can not be
negated in a simpler way. Also allow for T to be NULL_TREE, in which case
return NULL_TREE. */
static tree
negate_expr (tree t)
{
tree type, tem;
if (t == NULL_TREE)
return NULL_TREE;
type = TREE_TYPE (t);
STRIP_SIGN_NOPS (t);
tem = fold_negate_expr (t);
if (!tem)
tem = build1 (NEGATE_EXPR, TREE_TYPE (t), t);
return fold_convert (type, tem);
}
/* Split a tree IN into a constant, literal and variable parts that could be
combined with CODE to make IN. "constant" means an expression with
TREE_CONSTANT but that isn't an actual constant. CODE must be a
commutative arithmetic operation. Store the constant part into *CONP,
the literal in *LITP and return the variable part. If a part isn't
present, set it to null. If the tree does not decompose in this way,
return the entire tree as the variable part and the other parts as null.
If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR. In that
case, we negate an operand that was subtracted. Except if it is a
literal for which we use *MINUS_LITP instead.
If NEGATE_P is true, we are negating all of IN, again except a literal
for which we use *MINUS_LITP instead.
If IN is itself a literal or constant, return it as appropriate.
Note that we do not guarantee that any of the three values will be the
same type as IN, but they will have the same signedness and mode. */
static tree
split_tree (tree in, enum tree_code code, tree *conp, tree *litp,
tree *minus_litp, int negate_p)
{
tree var = 0;
*conp = 0;
*litp = 0;
*minus_litp = 0;
/* Strip any conversions that don't change the machine mode or signedness. */
STRIP_SIGN_NOPS (in);
if (TREE_CODE (in) == INTEGER_CST || TREE_CODE (in) == REAL_CST
|| TREE_CODE (in) == FIXED_CST)
*litp = in;
else if (TREE_CODE (in) == code
|| ((! FLOAT_TYPE_P (TREE_TYPE (in)) || flag_associative_math)
&& ! SAT_FIXED_POINT_TYPE_P (TREE_TYPE (in))
/* We can associate addition and subtraction together (even
though the C standard doesn't say so) for integers because
the value is not affected. For reals, the value might be
affected, so we can't. */
&& ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
|| (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
{
tree op0 = TREE_OPERAND (in, 0);
tree op1 = TREE_OPERAND (in, 1);
int neg1_p = TREE_CODE (in) == MINUS_EXPR;
int neg_litp_p = 0, neg_conp_p = 0, neg_var_p = 0;
/* First see if either of the operands is a literal, then a constant. */
if (TREE_CODE (op0) == INTEGER_CST || TREE_CODE (op0) == REAL_CST
|| TREE_CODE (op0) == FIXED_CST)
*litp = op0, op0 = 0;
else if (TREE_CODE (op1) == INTEGER_CST || TREE_CODE (op1) == REAL_CST
|| TREE_CODE (op1) == FIXED_CST)
*litp = op1, neg_litp_p = neg1_p, op1 = 0;
if (op0 != 0 && TREE_CONSTANT (op0))
*conp = op0, op0 = 0;
else if (op1 != 0 && TREE_CONSTANT (op1))
*conp = op1, neg_conp_p = neg1_p, op1 = 0;
/* If we haven't dealt with either operand, this is not a case we can
decompose. Otherwise, VAR is either of the ones remaining, if any. */
if (op0 != 0 && op1 != 0)
var = in;
else if (op0 != 0)
var = op0;
else
var = op1, neg_var_p = neg1_p;
/* Now do any needed negations. */
if (neg_litp_p)
*minus_litp = *litp, *litp = 0;
if (neg_conp_p)
*conp = negate_expr (*conp);
if (neg_var_p)
var = negate_expr (var);
}
else if (TREE_CONSTANT (in))
*conp = in;
else
var = in;
if (negate_p)
{
if (*litp)
*minus_litp = *litp, *litp = 0;
else if (*minus_litp)
*litp = *minus_litp, *minus_litp = 0;
*conp = negate_expr (*conp);
var = negate_expr (var);
}
return var;
}
/* Re-associate trees split by the above function. T1 and T2 are either
expressions to associate or null. Return the new expression, if any. If
we build an operation, do it in TYPE and with CODE. */
static tree
associate_trees (tree t1, tree t2, enum tree_code code, tree type)
{
if (t1 == 0)
return t2;
else if (t2 == 0)
return t1;
/* If either input is CODE, a PLUS_EXPR, or a MINUS_EXPR, don't
try to fold this since we will have infinite recursion. But do
deal with any NEGATE_EXPRs. */
if (TREE_CODE (t1) == code || TREE_CODE (t2) == code
|| TREE_CODE (t1) == MINUS_EXPR || TREE_CODE (t2) == MINUS_EXPR)
{
if (code == PLUS_EXPR)
{
if (TREE_CODE (t1) == NEGATE_EXPR)
return build2 (MINUS_EXPR, type, fold_convert (type, t2),
fold_convert (type, TREE_OPERAND (t1, 0)));
else if (TREE_CODE (t2) == NEGATE_EXPR)
return build2 (MINUS_EXPR, type, fold_convert (type, t1),
fold_convert (type, TREE_OPERAND (t2, 0)));
else if (integer_zerop (t2))
return fold_convert (type, t1);
}
else if (code == MINUS_EXPR)
{
if (integer_zerop (t2))
return fold_convert (type, t1);
}
return build2 (code, type, fold_convert (type, t1),
fold_convert (type, t2));
}
return fold_build2 (code, type, fold_convert (type, t1),
fold_convert (type, t2));
}
/* Check whether TYPE1 and TYPE2 are equivalent integer types, suitable
for use in int_const_binop, size_binop and size_diffop. */
static bool
int_binop_types_match_p (enum tree_code code, const_tree type1, const_tree type2)
{
if (TREE_CODE (type1) != INTEGER_TYPE && !POINTER_TYPE_P (type1))
return false;
if (TREE_CODE (type2) != INTEGER_TYPE && !POINTER_TYPE_P (type2))
return false;
switch (code)
{
case LSHIFT_EXPR:
case RSHIFT_EXPR:
case LROTATE_EXPR:
case RROTATE_EXPR:
return true;
default:
break;
}
return TYPE_UNSIGNED (type1) == TYPE_UNSIGNED (type2)
&& TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
&& TYPE_MODE (type1) == TYPE_MODE (type2);
}
/* Combine two integer constants ARG1 and ARG2 under operation CODE
to produce a new constant. Return NULL_TREE if we don't know how
to evaluate CODE at compile-time.
If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
tree
int_const_binop (enum tree_code code, const_tree arg1, const_tree arg2, int notrunc)
{
unsigned HOST_WIDE_INT int1l, int2l;
HOST_WIDE_INT int1h, int2h;
unsigned HOST_WIDE_INT low;
HOST_WIDE_INT hi;
unsigned HOST_WIDE_INT garbagel;
HOST_WIDE_INT garbageh;
tree t;
tree type = TREE_TYPE (arg1);
int uns = TYPE_UNSIGNED (type);
int is_sizetype
= (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type));
int overflow = 0;
int1l = TREE_INT_CST_LOW (arg1);
int1h = TREE_INT_CST_HIGH (arg1);
int2l = TREE_INT_CST_LOW (arg2);
int2h = TREE_INT_CST_HIGH (arg2);
switch (code)
{
case BIT_IOR_EXPR:
low = int1l | int2l, hi = int1h | int2h;
break;
case BIT_XOR_EXPR:
low = int1l ^ int2l, hi = int1h ^ int2h;
break;
case BIT_AND_EXPR:
low = int1l & int2l, hi = int1h & int2h;
break;
case RSHIFT_EXPR:
int2l = -int2l;
case LSHIFT_EXPR:
/* It's unclear from the C standard whether shifts can overflow.
The following code ignores overflow; perhaps a C standard
interpretation ruling is needed. */
lshift_double (int1l, int1h, int2l, TYPE_PRECISION (type),
&low, &hi, !uns);
break;
case RROTATE_EXPR:
int2l = - int2l;
case LROTATE_EXPR:
lrotate_double (int1l, int1h, int2l, TYPE_PRECISION (type),
&low, &hi);
break;
case PLUS_EXPR:
overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
break;
case MINUS_EXPR:
neg_double (int2l, int2h, &low, &hi);
add_double (int1l, int1h, low, hi, &low, &hi);
overflow = OVERFLOW_SUM_SIGN (hi, int2h, int1h);
break;
case MULT_EXPR:
overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
break;
case TRUNC_DIV_EXPR:
case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
case EXACT_DIV_EXPR:
/* This is a shortcut for a common special case. */
if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
&& !TREE_OVERFLOW (arg1)
&& !TREE_OVERFLOW (arg2)
&& int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
{
if (code == CEIL_DIV_EXPR)
int1l += int2l - 1;
low = int1l / int2l, hi = 0;
break;
}
/* ... fall through ... */
case ROUND_DIV_EXPR:
if (int2h == 0 && int2l == 0)
return NULL_TREE;
if (int2h == 0 && int2l == 1)
{
low = int1l, hi = int1h;
break;
}
if (int1l == int2l && int1h == int2h
&& ! (int1l == 0 && int1h == 0))
{
low = 1, hi = 0;
break;
}
overflow = div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
&low, &hi, &garbagel, &garbageh);
break;
case TRUNC_MOD_EXPR:
case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
/* This is a shortcut for a common special case. */
if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
&& !TREE_OVERFLOW (arg1)
&& !TREE_OVERFLOW (arg2)
&& int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
{
if (code == CEIL_MOD_EXPR)
int1l += int2l - 1;
low = int1l % int2l, hi = 0;
break;
}
/* ... fall through ... */
case ROUND_MOD_EXPR:
if (int2h == 0 && int2l == 0)
return NULL_TREE;
overflow = div_and_round_double (code, uns,
int1l, int1h, int2l, int2h,
&garbagel, &garbageh, &low, &hi);
break;
case MIN_EXPR:
case MAX_EXPR:
if (uns)
low = (((unsigned HOST_WIDE_INT) int1h
< (unsigned HOST_WIDE_INT) int2h)
|| (((unsigned HOST_WIDE_INT) int1h
== (unsigned HOST_WIDE_INT) int2h)
&& int1l < int2l));
else
low = (int1h < int2h
|| (int1h == int2h && int1l < int2l));
if (low == (code == MIN_EXPR))
low = int1l, hi = int1h;
else
low = int2l, hi = int2h;
break;
default:
return NULL_TREE;
}
if (notrunc)
{
t = build_int_cst_wide (TREE_TYPE (arg1), low, hi);
/* Propagate overflow flags ourselves. */
if (((!uns || is_sizetype) && overflow)
| TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2))
{
t = copy_node (t);
TREE_OVERFLOW (t) = 1;
}
}
else
t = force_fit_type_double (TREE_TYPE (arg1), low, hi, 1,
((!uns || is_sizetype) && overflow)
| TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
return t;
}
/* Combine two constants ARG1 and ARG2 under operation CODE to produce a new
constant. We assume ARG1 and ARG2 have the same data type, or at least
are the same kind of constant and the same machine mode. Return zero if
combining the constants is not allowed in the current operating mode.
If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
static tree
const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc)
{
/* Sanity check for the recursive cases. */
if (!arg1 || !arg2)
return NULL_TREE;
STRIP_NOPS (arg1);
STRIP_NOPS (arg2);
if (TREE_CODE (arg1) == INTEGER_CST)
return int_const_binop (code, arg1, arg2, notrunc);
if (TREE_CODE (arg1) == REAL_CST)
{
enum machine_mode mode;
REAL_VALUE_TYPE d1;
REAL_VALUE_TYPE d2;
REAL_VALUE_TYPE value;
REAL_VALUE_TYPE result;
bool inexact;
tree t, type;
/* The following codes are handled by real_arithmetic. */
switch (code)
{
case PLUS_EXPR:
case MINUS_EXPR:
case MULT_EXPR:
case RDIV_EXPR:
case MIN_EXPR:
case MAX_EXPR:
break;
default:
return NULL_TREE;
}
d1 = TREE_REAL_CST (arg1);
d2 = TREE_REAL_CST (arg2);
type = TREE_TYPE (arg1);
mode = TYPE_MODE (type);
/* Don't perform operation if we honor signaling NaNs and
either operand is a NaN. */
if (HONOR_SNANS (mode)
&& (REAL_VALUE_ISNAN (d1) || REAL_VALUE_ISNAN (d2)))
return NULL_TREE;
/* Don't perform operation if it would raise a division
by zero exception. */
if (code == RDIV_EXPR
&& REAL_VALUES_EQUAL (d2, dconst0)
&& (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
return NULL_TREE;
/* If either operand is a NaN, just return it. Otherwise, set up
for floating-point trap; we return an overflow. */
if (REAL_VALUE_ISNAN (d1))
return arg1;
else if (REAL_VALUE_ISNAN (d2))
return arg2;
inexact = real_arithmetic (&value, code, &d1, &d2);
real_convert (&result, mode, &value);
/* Don't constant fold this floating point operation if
the result has overflowed and flag_trapping_math. */
if (flag_trapping_math
&& MODE_HAS_INFINITIES (mode)
&& REAL_VALUE_ISINF (result)
&& !REAL_VALUE_ISINF (d1)
&& !REAL_VALUE_ISINF (d2))
return NULL_TREE;
/* Don't constant fold this floating point operation if the
result may dependent upon the run-time rounding mode and
flag_rounding_math is set, or if GCC's software emulation
is unable to accurately represent the result. */
if ((flag_rounding_math
|| (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
&& (inexact || !real_identical (&result, &value)))
return NULL_TREE;
t = build_real (type, result);
TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
return t;
}
if (TREE_CODE (arg1) == FIXED_CST)
{
FIXED_VALUE_TYPE f1;
FIXED_VALUE_TYPE f2;
FIXED_VALUE_TYPE result;
tree t, type;
int sat_p;
bool overflow_p;
/* The following codes are handled by fixed_arithmetic. */
switch (code)
{
case PLUS_EXPR:
case MINUS_EXPR:
case MULT_EXPR:
case TRUNC_DIV_EXPR:
f2 = TREE_FIXED_CST (arg2);
break;
case LSHIFT_EXPR:
case RSHIFT_EXPR:
f2.data.high = TREE_INT_CST_HIGH (arg2);
f2.data.low = TREE_INT_CST_LOW (arg2);
f2.mode = SImode;
break;
default:
return NULL_TREE;
}
f1 = TREE_FIXED_CST (arg1);
type = TREE_TYPE (arg1);
sat_p = TYPE_SATURATING (type);
overflow_p = fixed_arithmetic (&result, code, &f1, &f2, sat_p);
t = build_fixed (type, result);
/* Propagate overflow flags. */
if (overflow_p | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2))
{
TREE_OVERFLOW (t) = 1;
TREE_CONSTANT_OVERFLOW (t) = 1;
}
else if (TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2))
TREE_CONSTANT_OVERFLOW (t) = 1;
return t;
}
if (TREE_CODE (arg1) == COMPLEX_CST)
{
tree type = TREE_TYPE (arg1);
tree r1 = TREE_REALPART (arg1);
tree i1 = TREE_IMAGPART (arg1);
tree r2 = TREE_REALPART (arg2);
tree i2 = TREE_IMAGPART (arg2);
tree real, imag;
switch (code)
{
case PLUS_EXPR:
case MINUS_EXPR:
real = const_binop (code, r1, r2, notrunc);
imag = const_binop (code, i1, i2, notrunc);
break;
case MULT_EXPR:
real = const_binop (MINUS_EXPR,
const_binop (MULT_EXPR, r1, r2, notrunc),
const_binop (MULT_EXPR, i1, i2, notrunc),
notrunc);
imag = const_binop (PLUS_EXPR,
const_binop (MULT_EXPR, r1, i2, notrunc),
const_binop (MULT_EXPR, i1, r2, notrunc),
notrunc);
break;
case RDIV_EXPR:
{
tree magsquared
= const_binop (PLUS_EXPR,
const_binop (MULT_EXPR, r2, r2, notrunc),
const_binop (MULT_EXPR, i2, i2, notrunc),
notrunc);
tree t1
= const_binop (PLUS_EXPR,
const_binop (MULT_EXPR, r1, r2, notrunc),
const_binop (MULT_EXPR, i1, i2, notrunc),
notrunc);
tree t2
= const_binop (MINUS_EXPR,
const_binop (MULT_EXPR, i1, r2, notrunc),
const_binop (MULT_EXPR, r1, i2, notrunc),
notrunc);
if (INTEGRAL_TYPE_P (TREE_TYPE (r1)))
code = TRUNC_DIV_EXPR;
real = const_binop (code, t1, magsquared, notrunc);
imag = const_binop (code, t2, magsquared, notrunc);
}
break;
default:
return NULL_TREE;
}
if (real && imag)
return build_complex (type, real, imag);
}
return NULL_TREE;
}
/* Create a size type INT_CST node with NUMBER sign extended. KIND
indicates which particular sizetype to create. */
tree
size_int_kind (HOST_WIDE_INT number, enum size_type_kind kind)
{
return build_int_cst (sizetype_tab[(int) kind], number);
}
/* Combine operands OP1 and OP2 with arithmetic operation CODE. CODE
is a tree code. The type of the result is taken from the operands.
Both must be equivalent integer types, ala int_binop_types_match_p.
If the operands are constant, so is the result. */
tree
size_binop (enum tree_code code, tree arg0, tree arg1)
{
tree type = TREE_TYPE (arg0);
if (arg0 == error_mark_node || arg1 == error_mark_node)
return error_mark_node;
gcc_assert (int_binop_types_match_p (code, TREE_TYPE (arg0),
TREE_TYPE (arg1)));
/* Handle the special case of two integer constants faster. */
if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
{
/* And some specific cases even faster than that. */
if (code == PLUS_EXPR)
{
if (integer_zerop (arg0) && !TREE_OVERFLOW (arg0))
return arg1;
if (integer_zerop (arg1) && !TREE_OVERFLOW (arg1))
return arg0;
}
else if (code == MINUS_EXPR)
{
if (integer_zerop (arg1) && !TREE_OVERFLOW (arg1))
return arg0;
}
else if (code == MULT_EXPR)
{
if (integer_onep (arg0) && !TREE_OVERFLOW (arg0))
return arg1;
}
/* Handle general case of two integer constants. */
return int_const_binop (code, arg0, arg1, 0);
}
return fold_build2 (code, type, arg0, arg1);
}
/* Given two values, either both of sizetype or both of bitsizetype,
compute the difference between the two values. Return the value
in signed type corresponding to the type of the operands. */
tree
size_diffop (tree arg0, tree arg1)
{
tree type = TREE_TYPE (arg0);
tree ctype;
gcc_assert (int_binop_types_match_p (MINUS_EXPR, TREE_TYPE (arg0),
TREE_TYPE (arg1)));
/* If the type is already signed, just do the simple thing. */
if (!TYPE_UNSIGNED (type))
return size_binop (MINUS_EXPR, arg0, arg1);
if (type == sizetype)
ctype = ssizetype;
else if (type == bitsizetype)
ctype = sbitsizetype;
else
ctype = signed_type_for (type);
/* If either operand is not a constant, do the conversions to the signed
type and subtract. The hardware will do the right thing with any
overflow in the subtraction. */
if (TREE_CODE (arg0) != INTEGER_CST || TREE_CODE (arg1) != INTEGER_CST)
return size_binop (MINUS_EXPR, fold_convert (ctype, arg0),
fold_convert (ctype, arg1));
/* If ARG0 is larger than ARG1, subtract and return the result in CTYPE.
Otherwise, subtract the other way, convert to CTYPE (we know that can't
overflow) and negate (which can't either). Special-case a result
of zero while we're here. */
if (tree_int_cst_equal (arg0, arg1))
return build_int_cst (ctype, 0);
else if (tree_int_cst_lt (arg1, arg0))
return fold_convert (ctype, size_binop (MINUS_EXPR, arg0, arg1));
else
return size_binop (MINUS_EXPR, build_int_cst (ctype, 0),
fold_convert (ctype, size_binop (MINUS_EXPR,
arg1, arg0)));
}
/* A subroutine of fold_convert_const handling conversions of an
INTEGER_CST to another integer type. */
static tree
fold_convert_const_int_from_int (tree type, const_tree arg1)
{
tree t;
/* Given an integer constant, make new constant with new type,
appropriately sign-extended or truncated. */
t = force_fit_type_double (type, TREE_INT_CST_LOW (arg1),
TREE_INT_CST_HIGH (arg1),
/* Don't set the overflow when
converting from a pointer, */
!POINTER_TYPE_P (TREE_TYPE (arg1))
/* or to a sizetype with same signedness
and the precision is unchanged.
??? sizetype is always sign-extended,
but its signedness depends on the
frontend. Thus we see spurious overflows
here if we do not check this. */
&& !((TYPE_PRECISION (TREE_TYPE (arg1))
== TYPE_PRECISION (type))
&& (TYPE_UNSIGNED (TREE_TYPE (arg1))
== TYPE_UNSIGNED (type))
&& ((TREE_CODE (TREE_TYPE (arg1)) == INTEGER_TYPE
&& TYPE_IS_SIZETYPE (TREE_TYPE (arg1)))
|| (TREE_CODE (type) == INTEGER_TYPE
&& TYPE_IS_SIZETYPE (type)))),
(TREE_INT_CST_HIGH (arg1) < 0
&& (TYPE_UNSIGNED (type)
< TYPE_UNSIGNED (TREE_TYPE (arg1))))
| TREE_OVERFLOW (arg1));
return t;
}
/* A subroutine of fold_convert_const handling conversions a REAL_CST
to an integer type. */
static tree
fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg1)
{
int overflow = 0;
tree t;
/* The following code implements the floating point to integer
conversion rules required by the Java Language Specification,
that IEEE NaNs are mapped to zero and values that overflow
the target precision saturate, i.e. values greater than
INT_MAX are mapped to INT_MAX, and values less than INT_MIN
are mapped to INT_MIN. These semantics are allowed by the
C and C++ standards that simply state that the behavior of
FP-to-integer conversion is unspecified upon overflow. */
HOST_WIDE_INT high, low;
REAL_VALUE_TYPE r;
REAL_VALUE_TYPE x = TREE_REAL_CST (arg1);
switch (code)
{
case FIX_TRUNC_EXPR:
real_trunc (&r, VOIDmode, &x);
break;
default:
gcc_unreachable ();
}
/* If R is NaN, return zero and show we have an overflow. */
if (REAL_VALUE_ISNAN (r))
{
overflow = 1;
high = 0;
low = 0;
}
/* See if R is less than the lower bound or greater than the
upper bound. */
if (! overflow)
{
tree lt = TYPE_MIN_VALUE (type);
REAL_VALUE_TYPE l = real_value_from_int_cst (NULL_TREE, lt);
if (REAL_VALUES_LESS (r, l))
{
overflow = 1;
high = TREE_INT_CST_HIGH (lt);
low = TREE_INT_CST_LOW (lt);
}
}
if (! overflow)
{
tree ut = TYPE_MAX_VALUE (type);
if (ut)
{
REAL_VALUE_TYPE u = real_value_from_int_cst (NULL_TREE, ut);
if (REAL_VALUES_LESS (u, r))
{
overflow = 1;
high = TREE_INT_CST_HIGH (ut);
low = TREE_INT_CST_LOW (ut);
}
}
}
if (! overflow)
REAL_VALUE_TO_INT (&low, &high, r);
t = force_fit_type_double (type, low, high, -1,
overflow | TREE_OVERFLOW (arg1));
return t;
}
/* A subroutine of fold_convert_const handling conversions of a
FIXED_CST to an integer type. */
static tree
fold_convert_const_int_from_fixed (tree type, const_tree arg1)
{
tree t;
double_int temp, temp_trunc;
unsigned int mode;
/* Right shift FIXED_CST to temp by fbit. */
temp = TREE_FIXED_CST (arg1).data;
mode = TREE_FIXED_CST (arg1).mode;
if (GET_MODE_FBIT (mode) < 2 * HOST_BITS_PER_WIDE_INT)
{
lshift_double (temp.low, temp.high,
- GET_MODE_FBIT (mode), 2 * HOST_BITS_PER_WIDE_INT,
&temp.low, &temp.high, SIGNED_FIXED_POINT_MODE_P (mode));
/* Left shift temp to temp_trunc by fbit. */
lshift_double (temp.low, temp.high,
GET_MODE_FBIT (mode), 2 * HOST_BITS_PER_WIDE_INT,
&temp_trunc.low, &temp_trunc.high,
SIGNED_FIXED_POINT_MODE_P (mode));
}
else
{
temp.low = 0;
temp.high = 0;
temp_trunc.low = 0;
temp_trunc.high = 0;
}
/* If FIXED_CST is negative, we need to round the value toward 0.
By checking if the fractional bits are not zero to add 1 to temp. */
if (SIGNED_FIXED_POINT_MODE_P (mode) && temp_trunc.high < 0
&& !double_int_equal_p (TREE_FIXED_CST (arg1).data, temp_trunc))
{
double_int one;
one.low = 1;
one.high = 0;
temp = double_int_add (temp, one);
}
/* Given a fixed-point constant, make new constant with new type,
appropriately sign-extended or truncated. */
t = force_fit_type_double (type, temp.low, temp.high, -1,
(temp.high < 0
&& (TYPE_UNSIGNED (type)
< TYPE_UNSIGNED (TREE_TYPE (arg1))))
| TREE_OVERFLOW (arg1));
return t;
}
/* A subroutine of fold_convert_const handling conversions a REAL_CST
to another floating point type. */
static tree
fold_convert_const_real_from_real (tree type, const_tree arg1)
{
REAL_VALUE_TYPE value;
tree t;
real_convert (&value, TYPE_MODE (type), &TREE_REAL_CST (arg1));
t = build_real (type, value);
TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1);
return t;
}
/* A subroutine of fold_convert_const handling conversions a FIXED_CST
to a floating point type. */
static tree
fold_convert_const_real_from_fixed (tree type, const_tree arg1)
{
REAL_VALUE_TYPE value;
tree t;
real_convert_from_fixed (&value, TYPE_MODE (type), &TREE_FIXED_CST (arg1));
t = build_real (type, value);
TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1);
TREE_CONSTANT_OVERFLOW (t)
= TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
return t;
}
/* A subroutine of fold_convert_const handling conversions a FIXED_CST
to another fixed-point type. */
static tree
fold_convert_const_fixed_from_fixed (tree type, const_tree arg1)
{
FIXED_VALUE_TYPE value;
tree t;
bool overflow_p;
overflow_p = fixed_convert (&value, TYPE_MODE (type), &TREE_FIXED_CST (arg1),
TYPE_SATURATING (type));
t = build_fixed (type, value);
/* Propagate overflow flags. */
if (overflow_p | TREE_OVERFLOW (arg1))
{
TREE_OVERFLOW (t) = 1;
TREE_CONSTANT_OVERFLOW (t) = 1;
}
else if (TREE_CONSTANT_OVERFLOW (arg1))
TREE_CONSTANT_OVERFLOW (t) = 1;
return t;
}
/* A subroutine of fold_convert_const handling conversions an INTEGER_CST
to a fixed-point type. */
static tree
fold_convert_const_fixed_from_int (tree type, const_tree arg1)
{
FIXED_VALUE_TYPE value;
tree t;
bool overflow_p;
overflow_p = fixed_convert_from_int (&value, TYPE_MODE (type),
TREE_INT_CST (arg1),
TYPE_UNSIGNED (TREE_TYPE (arg1)),
TYPE_SATURATING (type));
t = build_fixed (type, value);
/* Propagate overflow flags. */
if (overflow_p | TREE_OVERFLOW (arg1))
{
TREE_OVERFLOW (t) = 1;
TREE_CONSTANT_OVERFLOW (t) = 1;
}
else if (TREE_CONSTANT_OVERFLOW (arg1))
TREE_CONSTANT_OVERFLOW (t) = 1;
return t;
}
/* A subroutine of fold_convert_const handling conversions a REAL_CST
to a fixed-point type. */
static tree
fold_convert_const_fixed_from_real (tree type, const_tree arg1)
{
FIXED_VALUE_TYPE value;
tree t;
bool overflow_p;
overflow_p = fixed_convert_from_real (&value, TYPE_MODE (type),
&TREE_REAL_CST (arg1),
TYPE_SATURATING (type));
t = build_fixed (type, value);
/* Propagate overflow flags. */
if (overflow_p | TREE_OVERFLOW (arg1))
{
TREE_OVERFLOW (t) = 1;
TREE_CONSTANT_OVERFLOW (t) = 1;
}
else if (TREE_CONSTANT_OVERFLOW (arg1))
TREE_CONSTANT_OVERFLOW (t) = 1;
return t;
}
/* Attempt to fold type conversion operation CODE of expression ARG1 to
type TYPE. If no simplification can be done return NULL_TREE. */
static tree
fold_convert_const (enum tree_code code, tree type, tree arg1)
{
if (TREE_TYPE (arg1) == type)
return arg1;
if (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type)
|| TREE_CODE (type) == OFFSET_TYPE)
{
if (TREE_CODE (arg1) == INTEGER_CST)
return fold_convert_const_int_from_int (type, arg1);
else if (TREE_CODE (arg1) == REAL_CST)
return fold_convert_const_int_from_real (code, type, arg1);
else if (TREE_CODE (arg1) == FIXED_CST)
return fold_convert_const_int_from_fixed (type, arg1);
}
else if (TREE_CODE (type) == REAL_TYPE)
{
if (TREE_CODE (arg1) == INTEGER_CST)
return build_real_from_int_cst (type, arg1);
else if (TREE_CODE (arg1) == REAL_CST)
return fold_convert_const_real_from_real (type, arg1);
else if (TREE_CODE (arg1) == FIXED_CST)
return fold_convert_const_real_from_fixed (type, arg1);
}
else if (TREE_CODE (type) == FIXED_POINT_TYPE)
{
if (TREE_CODE (arg1) == FIXED_CST)
return fold_convert_const_fixed_from_fixed (type, arg1);
else if (TREE_CODE (arg1) == INTEGER_CST)
return fold_convert_const_fixed_from_int (type, arg1);
else if (TREE_CODE (arg1) == REAL_CST)
return fold_convert_const_fixed_from_real (type, arg1);
}
return NULL_TREE;
}
/* Construct a vector of zero elements of vector type TYPE. */
static tree
build_zero_vector (tree type)
{
tree elem, list;
int i, units;
elem = fold_convert_const (NOP_EXPR, TREE_TYPE (type), integer_zero_node);
units = TYPE_VECTOR_SUBPARTS (type);
list = NULL_TREE;
for (i = 0; i < units; i++)
list = tree_cons (NULL_TREE, elem, list);
return build_vector (type, list);
}
/* Returns true, if ARG is convertible to TYPE using a NOP_EXPR. */
bool
fold_convertible_p (const_tree type, const_tree arg)
{
tree orig = TREE_TYPE (arg);
if (type == orig)
return true;
if (TREE_CODE (arg) == ERROR_MARK
|| TREE_CODE (type) == ERROR_MARK
|| TREE_CODE (orig) == ERROR_MARK)
return false;
if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (orig))
return true;
switch (TREE_CODE (type))
{
case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
case POINTER_TYPE: case REFERENCE_TYPE:
case OFFSET_TYPE:
if (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
|| TREE_CODE (orig) == OFFSET_TYPE)
return true;
return (TREE_CODE (orig) == VECTOR_TYPE
&& tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
case REAL_TYPE:
case FIXED_POINT_TYPE:
case COMPLEX_TYPE:
case VECTOR_TYPE:
case VOID_TYPE:
return TREE_CODE (type) == TREE_CODE (orig);
default:
return false;
}
}
/* Convert expression ARG to type TYPE. Used by the middle-end for
simple conversions in preference to calling the front-end's convert. */
tree
fold_convert (tree type, tree arg)
{
tree orig = TREE_TYPE (arg);
tree tem;
if (type == orig)
return arg;
if (TREE_CODE (arg) == ERROR_MARK
|| TREE_CODE (type) == ERROR_MARK
|| TREE_CODE (orig) == ERROR_MARK)
return error_mark_node;
if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (orig))
return fold_build1 (NOP_EXPR, type, arg);
switch (TREE_CODE (type))
{
case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
case POINTER_TYPE: case REFERENCE_TYPE:
case OFFSET_TYPE:
if (TREE_CODE (arg) == INTEGER_CST)
{
tem = fold_convert_const (NOP_EXPR, type, arg);
if (tem != NULL_TREE)
return tem;
}
if (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
|| TREE_CODE (orig) == OFFSET_TYPE)
return fold_build1 (NOP_EXPR, type, arg);
if (TREE_CODE (orig) == COMPLEX_TYPE)
{
tem = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
return fold_convert (type, tem);
}
gcc_assert (TREE_CODE (orig) == VECTOR_TYPE
&& tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
return fold_build1 (NOP_EXPR, type, arg);
case REAL_TYPE:
if (TREE_CODE (arg) == INTEGER_CST)
{
tem = fold_convert_const (FLOAT_EXPR, type, arg);
if (tem != NULL_TREE)
return tem;
}
else if (TREE_CODE (arg) == REAL_CST)
{
tem = fold_convert_const (NOP_EXPR, type, arg);
if (tem != NULL_TREE)
return tem;
}
else if (TREE_CODE (arg) == FIXED_CST)
{
tem = fold_convert_const (FIXED_CONVERT_EXPR, type, arg);
if (tem != NULL_TREE)
return tem;
}
switch (TREE_CODE (orig))
{
case INTEGER_TYPE:
case BOOLEAN_TYPE: case ENUMERAL_TYPE:
case POINTER_TYPE: case REFERENCE_TYPE:
return fold_build1 (FLOAT_EXPR, type, arg);
case REAL_TYPE:
return fold_build1 (NOP_EXPR, type, arg);
case FIXED_POINT_TYPE:
return fold_build1 (FIXED_CONVERT_EXPR, type, arg);
case COMPLEX_TYPE:
tem = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
return fold_convert (type, tem);
default:
gcc_unreachable ();
}
case FIXED_POINT_TYPE:
if (TREE_CODE (arg) == FIXED_CST || TREE_CODE (arg) == INTEGER_CST
|| TREE_CODE (arg) == REAL_CST)
{
tem = fold_convert_const (FIXED_CONVERT_EXPR, type, arg);
if (tem != NULL_TREE)
return tem;
}
switch (TREE_CODE (orig))
{
case FIXED_POINT_TYPE:
case INTEGER_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
case REAL_TYPE:
return fold_build1 (FIXED_CONVERT_EXPR, type, arg);
case COMPLEX_TYPE:
tem = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
return fold_convert (type, tem);
default:
gcc_unreachable ();
}
case COMPLEX_TYPE:
switch (TREE_CODE (orig))
{
case INTEGER_TYPE:
case BOOLEAN_TYPE: case ENUMERAL_TYPE:
case POINTER_TYPE: case REFERENCE_TYPE:
case REAL_TYPE:
case FIXED_POINT_TYPE:
return fold_build2 (COMPLEX_EXPR, type,
fold_convert (TREE_TYPE (type), arg),
fold_convert (TREE_TYPE (type),
integer_zero_node));
case COMPLEX_TYPE:
{
tree rpart, ipart;
if (TREE_CODE (arg) == COMPLEX_EXPR)
{
rpart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 0));
ipart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 1));
return fold_build2 (COMPLEX_EXPR, type, rpart, ipart);
}
arg = save_expr (arg);
rpart = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
ipart = fold_build1 (IMAGPART_EXPR, TREE_TYPE (orig), arg);
rpart = fold_convert (TREE_TYPE (type), rpart);
ipart = fold_convert (TREE_TYPE (type), ipart);
return fold_build2 (COMPLEX_EXPR, type, rpart, ipart);
}
default:
gcc_unreachable ();
}
case VECTOR_TYPE:
if (integer_zerop (arg))
return build_zero_vector (type);
gcc_assert (tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
gcc_assert (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
|| TREE_CODE (orig) == VECTOR_TYPE);
return fold_build1 (VIEW_CONVERT_EXPR, type, arg);
case VOID_TYPE:
tem = fold_ignored_result (arg);
if (TREE_CODE (tem) == MODIFY_EXPR)
return tem;
return fold_build1 (NOP_EXPR, type, tem);
default:
gcc_unreachable ();
}
}
/* Return false if expr can be assumed not to be an lvalue, true
otherwise. */
static bool
maybe_lvalue_p (const_tree x)
{
/* We only need to wrap lvalue tree codes. */
switch (TREE_CODE (x))
{
case VAR_DECL:
case PARM_DECL:
case RESULT_DECL:
case LABEL_DECL:
case FUNCTION_DECL:
case SSA_NAME:
case COMPONENT_REF:
case INDIRECT_REF:
case ALIGN_INDIRECT_REF:
case MISALIGNED_INDIRECT_REF:
case ARRAY_REF:
case ARRAY_RANGE_REF:
case BIT_FIELD_REF:
case OBJ_TYPE_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
case SAVE_EXPR:
case TRY_CATCH_EXPR:
case WITH_CLEANUP_EXPR:
case COMPOUND_EXPR:
case MODIFY_EXPR:
case TARGET_EXPR:
case COND_EXPR:
case BIND_EXPR:
case MIN_EXPR:
case MAX_EXPR:
break;
default:
/* Assume the worst for front-end tree codes. */
if ((int)TREE_CODE (x) >= NUM_TREE_CODES)
break;
return false;
}
return true;
}
/* Return an expr equal to X but certainly not valid as an lvalue. */
tree
non_lvalue (tree x)
{
/* While we are in GIMPLE, NON_LVALUE_EXPR doesn't mean anything to
us. */
if (in_gimple_form)
return x;
if (! maybe_lvalue_p (x))
return x;
return build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
}
/* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
Zero means allow extended lvalues. */
int pedantic_lvalues;
/* When pedantic, return an expr equal to X but certainly not valid as a
pedantic lvalue. Otherwise, return X. */
static tree
pedantic_non_lvalue (tree x)
{
if (pedantic_lvalues)
return non_lvalue (x);
else
return x;
}
/* Given a tree comparison code, return the code that is the logical inverse
of the given code. It is not safe to do this for floating-point
comparisons, except for NE_EXPR and EQ_EXPR, so we receive a machine mode
as well: if reversing the comparison is unsafe, return ERROR_MARK. */
enum tree_code
invert_tree_comparison (enum tree_code code, bool honor_nans)
{
if (honor_nans && flag_trapping_math)
return ERROR_MARK;
switch (code)
{
case EQ_EXPR:
return NE_EXPR;
case NE_EXPR:
return EQ_EXPR;
case GT_EXPR:
return honor_nans ? UNLE_EXPR : LE_EXPR;
case GE_EXPR:
return honor_nans ? UNLT_EXPR : LT_EXPR;
case LT_EXPR: