3857 lines
110 KiB
C
3857 lines
110 KiB
C
/* Common subexpression elimination for GNU compiler.
|
||
Copyright (C) 1987, 1988, 1989 Free Software Foundation, Inc.
|
||
|
||
This file is part of GNU CC.
|
||
|
||
GNU CC is free software; you can redistribute it and/or modify
|
||
it under the terms of the GNU General Public License as published by
|
||
the Free Software Foundation; either version 1, or (at your option)
|
||
any later version.
|
||
|
||
GNU CC is distributed in the hope that it will be useful,
|
||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||
GNU General Public License for more details.
|
||
|
||
You should have received a copy of the GNU General Public License
|
||
along with GNU CC; see the file COPYING. If not, write to
|
||
the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
|
||
|
||
|
||
#include "config.h"
|
||
#include "rtl.h"
|
||
#include "regs.h"
|
||
#include "hard-reg-set.h"
|
||
#include "flags.h"
|
||
#include "real.h"
|
||
|
||
/* The basic idea of common subexpression elimination is to go
|
||
through the code, keeping a record of expressions that would
|
||
have the same value at the current scan point, and replacing
|
||
expressions encountered with the cheapest equivalent expression.
|
||
|
||
It is too complicated to keep track of the different possibilities
|
||
when control paths merge; so, at each label, we forget all that is
|
||
known and start fresh. This can be described as processing each
|
||
basic block separately. Note, however, that these are not quite
|
||
the same as the basic blocks found by a later pass and used for
|
||
data flow analysis and register packing. We do not need to start fresh
|
||
after a conditional jump instruction if there is no label there.
|
||
|
||
We use two data structures to record the equivalent expressions:
|
||
a hash table for most expressions, and several vectors together
|
||
with "quantity numbers" to record equivalent (pseudo) registers.
|
||
|
||
The use of the special data structure for registers is desirable
|
||
because it is faster. It is possible because registers references
|
||
contain a fairly small number, the register number, taken from
|
||
a contiguously allocated series, and two register references are
|
||
identical if they have the same number. General expressions
|
||
do not have any such thing, so the only way to retrieve the
|
||
information recorded on an expression other than a register
|
||
is to keep it in a hash table.
|
||
|
||
Registers and "quantity numbers":
|
||
|
||
At the start of each basic block, all of the (hardware and pseudo)
|
||
registers used in the function are given distinct quantity
|
||
numbers to indicate their contents. During scan, when the code
|
||
copies one register into another, we copy the quantity number.
|
||
When a register is loaded in any other way, we allocate a new
|
||
quantity number to describe the value generated by this operation.
|
||
`reg_qty' records what quantity a register is currently thought
|
||
of as containing.
|
||
|
||
We also maintain a bidirectional chain of registers for each
|
||
quantity number. `qty_first_reg', `qty_last_reg',
|
||
`reg_next_eqv' and `reg_prev_eqv' hold these chains.
|
||
|
||
The first register in a chain is the one whose lifespan is least local.
|
||
Among equals, it is the one that was seen first.
|
||
We replace any equivalent register with that one.
|
||
|
||
Constants and quantity numbers
|
||
|
||
When a quantity has a known constant value, that value is stored
|
||
in the appropriate element of qty_const. This is in addition to
|
||
putting the constant in the hash table as is usual for non-regs.
|
||
|
||
Regs are preferred to constants as they are to everything else,
|
||
but expressions containing constants can be simplified, by fold_rtx.
|
||
|
||
When a quantity has a known nearly constant value (such as an address
|
||
of a stack slot), that value is stored in the appropriate element
|
||
of qty_const.
|
||
|
||
Integer constants don't have a machine mode. However, cse
|
||
determines the intended machine mode from the destination
|
||
of the instruction that moves the constant. The machine mode
|
||
is recorded in the hash table along with the actual RTL
|
||
constant expression so that different modes are kept separate.
|
||
|
||
Other expressions:
|
||
|
||
To record known equivalences among expressions in general
|
||
we use a hash table called `table'. It has a fixed number of buckets
|
||
that contain chains of `struct table_elt' elements for expressions.
|
||
These chains connect the elements whose expressions have the same
|
||
hash codes.
|
||
|
||
Other chains through the same elements connect the elements which
|
||
currently have equivalent values.
|
||
|
||
Register references in an expression are canonicalized before hashing
|
||
the expression. This is done using `reg_qty' and `qty_first_reg'.
|
||
The hash code of a register reference is computed using the quantity
|
||
number, not the register number.
|
||
|
||
When the value of an expression changes, it is necessary to remove from the
|
||
hash table not just that expression but all expressions whose values
|
||
could be different as a result.
|
||
|
||
1. If the value changing is in memory, except in special cases
|
||
ANYTHING referring to memory could be changed. That is because
|
||
nobody knows where a pointer does not point.
|
||
The function `invalidate_memory' removes what is necessary.
|
||
|
||
The special cases are when the address is constant or is
|
||
a constant plus a fixed register such as the frame pointer
|
||
or a static chain pointer. When such addresses are stored in,
|
||
we can tell exactly which other such addresses must be invalidated
|
||
due to overlap. `invalidate' does this.
|
||
All expressions that refer to non-constant
|
||
memory addresses are also invalidated. `invalidate_memory' does this.
|
||
|
||
2. If the value changing is a register, all expressions
|
||
containing references to that register, and only those,
|
||
must be removed.
|
||
|
||
Because searching the entire hash table for expressions that contain
|
||
a register is very slow, we try to figure out when it isn't necessary.
|
||
Precisely, this is necessary only when expressions have been
|
||
entered in the hash table using this register, and then the value has
|
||
changed, and then another expression wants to be added to refer to
|
||
the register's new value. This sequence of circumstances is rare
|
||
within any one basic block.
|
||
|
||
The vectors `reg_tick' and `reg_in_table' are used to detect this case.
|
||
reg_tick[i] is incremented whenever a value is stored in register i.
|
||
reg_in_table[i] holds -1 if no references to register i have been
|
||
entered in the table; otherwise, it contains the value reg_tick[i] had
|
||
when the references were entered. If we want to enter a reference
|
||
and reg_in_table[i] != reg_tick[i], we must scan and remove old references.
|
||
Until we want to enter a new entry, the mere fact that the two vectors
|
||
don't match makes the entries be ignored if anyone tries to match them.
|
||
|
||
Registers themselves are entered in the hash table as well as in
|
||
the equivalent-register chains. However, the vectors `reg_tick'
|
||
and `reg_in_table' do not apply to expressions which are simple
|
||
register references. These expressions are removed from the table
|
||
immediately when they become invalid, and this can be done even if
|
||
we do not immediately search for all the expressions that refer to
|
||
the register.
|
||
|
||
A CLOBBER rtx in an instruction invalidates its operand for further
|
||
reuse. A CLOBBER or SET rtx whose operand is a MEM:BLK
|
||
invalidates everything that resides in memory.
|
||
|
||
Related expressions:
|
||
|
||
Constant expressions that differ only by an additive integer
|
||
are called related. When a constant expression is put in
|
||
the table, the related expression with no constant term
|
||
is also entered. These are made to point at each other
|
||
so that it is possible to find out if there exists any
|
||
register equivalent to an expression related to a given expression. */
|
||
|
||
/* One plus largest register number used in this function. */
|
||
|
||
static int max_reg;
|
||
|
||
/* Length of vectors indexed by quantity number.
|
||
We know in advance we will not need a quantity number this big. */
|
||
|
||
static int max_qty;
|
||
|
||
/* Next quantity number to be allocated.
|
||
This is 1 + the largest number needed so far. */
|
||
|
||
static int next_qty;
|
||
|
||
/* Indexed by quantity number, gives the first (or last) (pseudo) register
|
||
in the chain of registers that currently contain this quantity. */
|
||
|
||
static int *qty_first_reg;
|
||
static int *qty_last_reg;
|
||
|
||
/* Indexed by quantity number, gives the rtx of the constant value of the
|
||
quantity, or zero if it does not have a known value.
|
||
A sum of the frame pointer (or arg pointer) plus a constant
|
||
can also be entered here. */
|
||
|
||
static rtx *qty_const;
|
||
|
||
/* Indexed by qty number, gives the insn that stored the constant value
|
||
recorded in `qty_const'. */
|
||
|
||
static rtx *qty_const_insn;
|
||
|
||
/* Value stored in CC0 by previous insn:
|
||
0 if previous insn didn't store in CC0.
|
||
else 0100 + (M&7)<<3 + (N&7)
|
||
where M is 1, 0 or -1 if result was >, == or < as signed number
|
||
and N is 1, 0 or -1 if result was >, == or < as unsigned number.
|
||
0200 bit may also be set, meaning that only == and != comparisons
|
||
have known results. */
|
||
|
||
static int prev_insn_cc0;
|
||
|
||
/* For machines where CC0 is one bit, we may see CC0 assigned a
|
||
constant value (after fold_rtx).
|
||
Record here the value stored in the previous insn (0 if none). */
|
||
|
||
static rtx prev_insn_explicit_cc0;
|
||
|
||
/* Previous actual insn. 0 if at first insn of basic block. */
|
||
|
||
static rtx prev_insn;
|
||
|
||
/* Insn being scanned. */
|
||
|
||
static rtx this_insn;
|
||
|
||
/* Index by (pseudo) register number, gives the quantity number
|
||
of the register's current contents. */
|
||
|
||
static int *reg_qty;
|
||
|
||
/* Index by (pseudo) register number, gives the number of the next
|
||
(pseudo) register in the chain of registers sharing the same value.
|
||
Or -1 if this register is at the end of the chain. */
|
||
|
||
static int *reg_next_eqv;
|
||
|
||
/* Index by (pseudo) register number, gives the number of the previous
|
||
(pseudo) register in the chain of registers sharing the same value.
|
||
Or -1 if this register is at the beginning of the chain. */
|
||
|
||
static int *reg_prev_eqv;
|
||
|
||
/* Index by (pseudo) register number, gives the latest rtx
|
||
to use to insert a ref to that register. */
|
||
|
||
static rtx *reg_rtx;
|
||
|
||
/* Index by (pseudo) register number, gives the number of times
|
||
that register has been altered in the current basic block. */
|
||
|
||
static int *reg_tick;
|
||
|
||
/* Index by (pseudo) register number, gives the reg_tick value at which
|
||
rtx's containing this register are valid in the hash table.
|
||
If this does not equal the current reg_tick value, such expressions
|
||
existing in the hash table are invalid.
|
||
If this is -1, no expressions containing this register have been
|
||
entered in the table. */
|
||
|
||
static int *reg_in_table;
|
||
|
||
/* Two vectors of max_reg ints:
|
||
one containing all -1's; in the other, element i contains i.
|
||
These are used to initialize various other vectors fast. */
|
||
|
||
static int *all_minus_one;
|
||
static int *consec_ints;
|
||
|
||
/* Set nonzero in cse_insn to tell cse_basic_block to skip immediately
|
||
to the next basic block and treat it as a continuation of this one. */
|
||
|
||
static int cse_skip_to_next_block;
|
||
|
||
/* CUID of insn that starts the basic block currently being cse-processed. */
|
||
|
||
static int cse_basic_block_start;
|
||
|
||
/* CUID of insn that ends the basic block currently being cse-processed. */
|
||
|
||
static int cse_basic_block_end;
|
||
|
||
/* Vector mapping INSN_UIDs to cuids.
|
||
The cuids are like uids but increase monononically always.
|
||
We use them to see whether a reg is used outside a given basic block. */
|
||
|
||
static short *uid_cuid;
|
||
|
||
/* Get the cuid of an insn. */
|
||
|
||
#define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
|
||
|
||
/* Nonzero if cse has altered conditional jump insns
|
||
in such a way that jump optimization should be redone. */
|
||
|
||
static int cse_jumps_altered;
|
||
|
||
/* canon_hash stores 1 in do_not_record
|
||
if it notices a reference to CC0, CC1 or PC. */
|
||
|
||
static int do_not_record;
|
||
|
||
/* canon_hash stores 1 in hash_arg_in_memory
|
||
if it notices a reference to memory within the expression being hashed. */
|
||
|
||
static int hash_arg_in_memory;
|
||
|
||
/* canon_hash stores 1 in hash_arg_in_struct
|
||
if it notices a reference to memory that's part of a structure. */
|
||
|
||
static int hash_arg_in_struct;
|
||
|
||
/* The hash table contains buckets which are chains of `struct table_elt's,
|
||
each recording one expression's information.
|
||
That expression is in the `exp' field.
|
||
|
||
Those elements with the same hash code are chained in both directions
|
||
through the `next_same_hash' and `prev_same_hash' fields.
|
||
|
||
Each set of expressions with equivalent values
|
||
are on a two-way chain through the `next_same_value'
|
||
and `prev_same_value' fields, and all point with
|
||
the `first_same_value' field at the first element in
|
||
that chain. The chain is in order of increasing cost.
|
||
Each element's cost value is in its `cost' field.
|
||
|
||
The `in_memory' field is nonzero for elements that
|
||
involve any reference to memory. These elements are removed
|
||
whenever a write is done to an unidentified location in memory.
|
||
To be safe, we assume that a memory address is unidentified unless
|
||
the address is either a symbol constant or a constant plus
|
||
the frame pointer or argument pointer.
|
||
|
||
The `in_struct' field is nonzero for elements that
|
||
involve any reference to memory inside a structure or array.
|
||
|
||
The `equivalence_only' field means that this expression came from a
|
||
REG_EQUIV or REG_EQUAL note; it is not valid for substitution into an insn.
|
||
|
||
The `related_value' field is used to connect related expressions
|
||
(that differ by adding an integer).
|
||
The related expressions are chained in a circular fashion.
|
||
`related_value' is zero for expressions for which this
|
||
chain is not useful.
|
||
|
||
The `mode' field is usually the same as GET_MODE (`exp'), but
|
||
if `exp' is a CONST_INT and has no machine mode then the `mode'
|
||
field is the mode it was being used as. Each constant is
|
||
recorded separately for each mode it is used with. */
|
||
|
||
|
||
struct table_elt
|
||
{
|
||
rtx exp;
|
||
struct table_elt *next_same_hash;
|
||
struct table_elt *prev_same_hash;
|
||
struct table_elt *next_same_value;
|
||
struct table_elt *prev_same_value;
|
||
struct table_elt *first_same_value;
|
||
struct table_elt *related_value;
|
||
int cost;
|
||
enum machine_mode mode;
|
||
char in_memory;
|
||
char in_struct;
|
||
char equivalence_only;
|
||
};
|
||
|
||
#define HASH(x, m) (canon_hash (x, m) % NBUCKETS)
|
||
/* We don't want a lot of buckets, because we rarely have very many
|
||
things stored in the hash table, and a lot of buckets slows
|
||
down a lot of loops that happen frequently. */
|
||
#define NBUCKETS 31
|
||
|
||
static struct table_elt *table[NBUCKETS];
|
||
|
||
/* Chain of `struct table_elt's made so far for this function
|
||
but currently removed from the table. */
|
||
|
||
static struct table_elt *free_element_chain;
|
||
|
||
/* Number of `struct table_elt' structures made so far for this function. */
|
||
|
||
static int n_elements_made;
|
||
|
||
/* Maximum value `n_elements_made' has had so far in this compilation
|
||
for functions previously processed. */
|
||
|
||
static int max_elements_made;
|
||
|
||
/* Bits describing what kind of values in memory must be invalidated
|
||
for a particular instruction. If all three bits are zero,
|
||
no memory refs need to be invalidated. Each bit is more powerful
|
||
than the preceding ones, and if a bit is set then the preceding
|
||
bits are also set.
|
||
|
||
Here is how the bits are set.
|
||
Writing at a fixed address invalidates only variable addresses,
|
||
writing in a structure element at variable address
|
||
invalidates all but scalar variables,
|
||
and writing in anything else at variable address invalidates everything. */
|
||
|
||
struct write_data
|
||
{
|
||
int var : 1; /* Invalidate variable addresses. */
|
||
int nonscalar : 1; /* Invalidate all but scalar variables. */
|
||
int all : 1; /* Invalidate all memory refs. */
|
||
};
|
||
|
||
/* Nonzero if X has the form (PLUS frame-pointer integer). */
|
||
|
||
#define FIXED_BASE_PLUS_P(X) \
|
||
(GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
|
||
&& (XEXP (X, 0) == frame_pointer_rtx || XEXP (X, 0) == arg_pointer_rtx))
|
||
|
||
static struct table_elt *lookup ();
|
||
static void free_element ();
|
||
|
||
static void remove_invalid_refs ();
|
||
static int exp_equiv_p ();
|
||
int refers_to_p ();
|
||
int refers_to_mem_p ();
|
||
static void invalidate_from_clobbers ();
|
||
static int safe_hash ();
|
||
static int canon_hash ();
|
||
static rtx equiv_constant ();
|
||
static int get_integer_term ();
|
||
static rtx get_related_value ();
|
||
static void note_mem_written ();
|
||
static int cse_rtx_addr_varies_p ();
|
||
static int fold_cc0 ();
|
||
|
||
/* Return an estimate of the cost of computing rtx X.
|
||
The only use of this is to compare the costs of two expressions
|
||
to decide whether to replace one with the other. */
|
||
|
||
static int
|
||
rtx_cost (x)
|
||
rtx x;
|
||
{
|
||
register int i, j;
|
||
register enum rtx_code code;
|
||
register char *fmt;
|
||
register int total;
|
||
|
||
if (x == 0)
|
||
return 0;
|
||
|
||
code = GET_CODE (x);
|
||
switch (code)
|
||
{
|
||
case REG:
|
||
return 1;
|
||
case SUBREG:
|
||
return 2;
|
||
CONST_COSTS (x, code);
|
||
}
|
||
|
||
total = 2;
|
||
|
||
/* Sum the costs of the sub-rtx's, plus 2 just put in. */
|
||
|
||
fmt = GET_RTX_FORMAT (code);
|
||
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
|
||
if (fmt[i] == 'e')
|
||
total += rtx_cost (XEXP (x, i));
|
||
else if (fmt[i] == 'E')
|
||
for (j = 0; j < XVECLEN (x, i); j++)
|
||
total += rtx_cost (XVECEXP (x, i, j));
|
||
|
||
return total;
|
||
}
|
||
|
||
/* Clear the hash table and initialize each register with its own quantity,
|
||
for a new basic block. */
|
||
|
||
static void
|
||
new_basic_block ()
|
||
{
|
||
register int i;
|
||
register int vecsize = max_reg * sizeof (rtx);
|
||
next_qty = max_reg;
|
||
|
||
bzero (reg_rtx, vecsize);
|
||
bzero (reg_tick, vecsize);
|
||
|
||
bcopy (all_minus_one, reg_in_table, vecsize);
|
||
bcopy (all_minus_one, reg_next_eqv, vecsize);
|
||
bcopy (all_minus_one, reg_prev_eqv, vecsize);
|
||
bcopy (consec_ints, reg_qty, vecsize);
|
||
|
||
for (i = 0; i < max_qty; i++)
|
||
{
|
||
qty_first_reg[i] = i;
|
||
qty_last_reg[i] = i;
|
||
qty_const[i] = 0;
|
||
qty_const_insn[i] = 0;
|
||
}
|
||
|
||
for (i = 0; i < NBUCKETS; i++)
|
||
{
|
||
register struct table_elt *this, *next;
|
||
for (this = table[i]; this; this = next)
|
||
{
|
||
next = this->next_same_hash;
|
||
free_element (this);
|
||
}
|
||
}
|
||
|
||
bzero (table, sizeof table);
|
||
|
||
prev_insn_cc0 = 0;
|
||
prev_insn_explicit_cc0 = 0;
|
||
prev_insn = 0;
|
||
}
|
||
|
||
/* Say that register REG contains a quantity not in any register before. */
|
||
|
||
static void
|
||
make_new_qty (reg)
|
||
register int reg;
|
||
{
|
||
register int q;
|
||
|
||
q = reg_qty[reg] = next_qty++;
|
||
qty_first_reg[q] = reg;
|
||
qty_last_reg[q] = reg;
|
||
}
|
||
|
||
/* Make reg NEW equivalent to reg OLD.
|
||
OLD is not changing; NEW is. */
|
||
|
||
static void
|
||
make_regs_eqv (new, old)
|
||
register int new, old;
|
||
{
|
||
register int lastr, firstr;
|
||
register int q = reg_qty[old];
|
||
|
||
/* Nothing should become eqv until it has a "non-invalid" qty number. */
|
||
if (q == old)
|
||
abort ();
|
||
|
||
reg_qty[new] = q;
|
||
firstr = qty_first_reg[q];
|
||
lastr = qty_last_reg[q];
|
||
|
||
/* Prefer pseudo regs to hard regs with the same value.
|
||
Among pseudos, if NEW will live longer than any other reg of the same qty,
|
||
and that is beyond the current basic block,
|
||
make it the new canonical replacement for this qty. */
|
||
if (new >= FIRST_PSEUDO_REGISTER
|
||
&& (firstr < FIRST_PSEUDO_REGISTER
|
||
|| ((uid_cuid[regno_last_uid[new]] > cse_basic_block_end
|
||
|| uid_cuid[regno_first_uid[new]] < cse_basic_block_start)
|
||
&& (uid_cuid[regno_last_uid[new]]
|
||
> uid_cuid[regno_last_uid[firstr]]))))
|
||
{
|
||
reg_prev_eqv[firstr] = new;
|
||
reg_next_eqv[new] = firstr;
|
||
reg_prev_eqv[new] = -1;
|
||
qty_first_reg[q] = new;
|
||
}
|
||
else
|
||
{
|
||
/* If NEW is a hard reg, insert at end.
|
||
Otherwise, insert before any hard regs that are at the end. */
|
||
while (lastr < FIRST_PSEUDO_REGISTER && new >= FIRST_PSEUDO_REGISTER)
|
||
lastr = reg_prev_eqv[lastr];
|
||
reg_next_eqv[new] = reg_next_eqv[lastr];
|
||
if (reg_next_eqv[lastr] >= 0)
|
||
reg_prev_eqv[reg_next_eqv[lastr]] = new;
|
||
else
|
||
qty_last_reg[q] = new;
|
||
reg_next_eqv[lastr] = new;
|
||
reg_prev_eqv[new] = lastr;
|
||
}
|
||
}
|
||
|
||
/* Discard the records of what is in register REG. */
|
||
|
||
static void
|
||
reg_invalidate (reg)
|
||
register int reg;
|
||
{
|
||
register int n = reg_next_eqv[reg];
|
||
register int p = reg_prev_eqv[reg];
|
||
register int q = reg_qty[reg];
|
||
|
||
reg_tick[reg]++;
|
||
|
||
if (q == reg)
|
||
{
|
||
/* Save time if already invalid */
|
||
/* It shouldn't be linked to anything if it's invalid. */
|
||
if (reg_prev_eqv[q] != -1)
|
||
abort ();
|
||
if (reg_next_eqv[q] != -1)
|
||
abort ();
|
||
return;
|
||
}
|
||
|
||
if (n != -1)
|
||
reg_prev_eqv[n] = p;
|
||
else
|
||
qty_last_reg[q] = p;
|
||
if (p != -1)
|
||
reg_next_eqv[p] = n;
|
||
else
|
||
qty_first_reg[q] = n;
|
||
|
||
reg_qty[reg] = reg;
|
||
qty_first_reg[reg] = reg;
|
||
qty_last_reg[reg] = reg;
|
||
reg_next_eqv[reg] = -1;
|
||
reg_prev_eqv[reg] = -1;
|
||
}
|
||
|
||
/* Remove any invalid expressions from the hash table
|
||
that refer to any of the registers contained in expression X.
|
||
|
||
Make sure that newly inserted references to those registers
|
||
as subexpressions will be considered valid.
|
||
|
||
mention_regs is not called when a register itself
|
||
is being stored in the table. */
|
||
|
||
static void
|
||
mention_regs (x)
|
||
rtx x;
|
||
{
|
||
register enum rtx_code code;
|
||
register int i, j;
|
||
register char *fmt;
|
||
|
||
if (x == 0)
|
||
return;
|
||
|
||
code = GET_CODE (x);
|
||
if (code == REG)
|
||
{
|
||
register int regno = REGNO (x);
|
||
reg_rtx[regno] = x;
|
||
|
||
if (reg_in_table[regno] >= 0 && reg_in_table[regno] != reg_tick[regno])
|
||
remove_invalid_refs (regno);
|
||
|
||
reg_in_table[regno] = reg_tick[regno];
|
||
|
||
return;
|
||
}
|
||
|
||
fmt = GET_RTX_FORMAT (code);
|
||
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
|
||
if (fmt[i] == 'e')
|
||
mention_regs (XEXP (x, i));
|
||
else if (fmt[i] == 'E')
|
||
for (j = 0; j < XVECLEN (x, i); j++)
|
||
mention_regs (XVECEXP (x, i, j));
|
||
}
|
||
|
||
/* Update the register quantities for inserting X into the hash table
|
||
with a value equivalent to CLASSP.
|
||
(If CLASSP is not a REG or a SUBREG, it is irrelevant.)
|
||
If MODIFIED is nonzero, X is a destination; it is being modified.
|
||
Note that reg_invalidate should be called on a register
|
||
before insert_regs is done on that register with MODIFIED != 0.
|
||
|
||
Nonzero value means that elements of reg_qty have changed
|
||
so X's hash code may be different. */
|
||
|
||
static int
|
||
insert_regs (x, classp, modified)
|
||
rtx x;
|
||
struct table_elt *classp;
|
||
int modified;
|
||
{
|
||
if (GET_CODE (x) == REG)
|
||
{
|
||
register int regno = REGNO (x);
|
||
reg_rtx[regno] = x;
|
||
if (modified || reg_qty[regno] == regno)
|
||
{
|
||
if (classp && GET_CODE (classp->exp) == REG)
|
||
{
|
||
make_regs_eqv (regno, REGNO (classp->exp));
|
||
/* Make sure reg_rtx is set up even for regs
|
||
not explicitly set (such as function value). */
|
||
reg_rtx[REGNO (classp->exp)] = classp->exp;
|
||
}
|
||
else
|
||
make_new_qty (regno);
|
||
return 1;
|
||
}
|
||
}
|
||
/* Copying a subreg into a subreg makes the regs equivalent,
|
||
but only if the entire regs' mode is within one word.
|
||
Copying one reg of a DImode into one reg of another DImode
|
||
does not make them equivalent. */
|
||
else if (GET_CODE (x) == SUBREG
|
||
&& GET_CODE (SUBREG_REG (x)) == REG
|
||
&& GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))) <= UNITS_PER_WORD
|
||
&& (modified
|
||
|| reg_qty[REGNO (SUBREG_REG (x))] == REGNO (SUBREG_REG (x))))
|
||
{
|
||
if (classp && GET_CODE (classp->exp) == SUBREG
|
||
&& GET_CODE (SUBREG_REG (classp->exp)) == REG
|
||
&& GET_MODE (SUBREG_REG (classp->exp)) == GET_MODE (SUBREG_REG (x)))
|
||
{
|
||
int oregno = REGNO (SUBREG_REG (classp->exp));
|
||
make_regs_eqv (REGNO (SUBREG_REG (x)), oregno);
|
||
/* Make sure reg_rtx is set up even for regs
|
||
not explicitly set (such as function value). */
|
||
reg_rtx[oregno] = SUBREG_REG (classp->exp);
|
||
}
|
||
else
|
||
make_new_qty (REGNO (SUBREG_REG (x)));
|
||
return 1;
|
||
}
|
||
else
|
||
mention_regs (x);
|
||
return 0;
|
||
}
|
||
|
||
/* Look in or update the hash table. */
|
||
|
||
/* Put the element ELT on the list of free elements. */
|
||
|
||
static void
|
||
free_element (elt)
|
||
struct table_elt *elt;
|
||
{
|
||
elt->next_same_hash = free_element_chain;
|
||
free_element_chain = elt;
|
||
}
|
||
|
||
/* Return an element that is free for use. */
|
||
|
||
static struct table_elt *
|
||
get_element ()
|
||
{
|
||
struct table_elt *elt = free_element_chain;
|
||
if (elt)
|
||
{
|
||
free_element_chain = elt->next_same_hash;
|
||
return elt;
|
||
}
|
||
n_elements_made++;
|
||
return (struct table_elt *) oballoc (sizeof (struct table_elt));
|
||
}
|
||
|
||
/* Remove table element ELT from use in the table.
|
||
HASH is its hash code, made using the HASH macro.
|
||
It's an argument because often that is known in advance
|
||
and we save much time not recomputing it. */
|
||
|
||
static void
|
||
remove (elt, hash)
|
||
register struct table_elt *elt;
|
||
int hash;
|
||
{
|
||
if (elt == 0)
|
||
return;
|
||
|
||
/* Mark this element as removed. See cse_insn. */
|
||
elt->first_same_value = 0;
|
||
|
||
/* Remove the table element from its equivalence class. */
|
||
|
||
{
|
||
register struct table_elt *prev = elt->prev_same_value;
|
||
register struct table_elt *next = elt->next_same_value;
|
||
|
||
if (next) next->prev_same_value = prev;
|
||
|
||
if (prev)
|
||
prev->next_same_value = next;
|
||
else
|
||
{
|
||
register struct table_elt *newfirst = next;
|
||
while (next)
|
||
{
|
||
next->first_same_value = newfirst;
|
||
next = next->next_same_value;
|
||
}
|
||
}
|
||
}
|
||
|
||
/* Remove the table element from its hash bucket. */
|
||
|
||
{
|
||
register struct table_elt *prev = elt->prev_same_hash;
|
||
register struct table_elt *next = elt->next_same_hash;
|
||
|
||
if (next) next->prev_same_hash = prev;
|
||
|
||
if (prev)
|
||
prev->next_same_hash = next;
|
||
else
|
||
table[hash] = next;
|
||
}
|
||
|
||
/* Remove the table element from its related-value circular chain. */
|
||
|
||
if (elt->related_value != 0 && elt->related_value != elt)
|
||
{
|
||
register struct table_elt *p = elt->related_value;
|
||
while (p->related_value != elt)
|
||
p = p->related_value;
|
||
p->related_value = elt->related_value;
|
||
if (p->related_value == p)
|
||
p->related_value = 0;
|
||
}
|
||
|
||
free_element (elt);
|
||
}
|
||
|
||
/* Look up X in the hash table and return its table element,
|
||
or 0 if X is not in the table.
|
||
|
||
MODE is the machine-mode of X, or if X is an integer constant
|
||
with VOIDmode then MODE is the mode with which X will be used.
|
||
|
||
Here we are satisfied to find an expression whose tree structure
|
||
looks like X. */
|
||
|
||
static struct table_elt *
|
||
lookup (x, hash, mode)
|
||
rtx x;
|
||
int hash;
|
||
enum machine_mode mode;
|
||
{
|
||
register struct table_elt *p;
|
||
|
||
for (p = table[hash]; p; p = p->next_same_hash)
|
||
if (mode == p->mode && (x == p->exp || exp_equiv_p (x, p->exp, 1)))
|
||
return p;
|
||
|
||
return 0;
|
||
}
|
||
|
||
/* Like `lookup' but don't care whether the table element uses invalid regs.
|
||
Also ignore discrepancies in the machine mode of a register. */
|
||
|
||
static struct table_elt *
|
||
lookup_for_remove (x, hash, mode)
|
||
rtx x;
|
||
int hash;
|
||
enum machine_mode mode;
|
||
{
|
||
register struct table_elt *p;
|
||
|
||
if (GET_CODE (x) == REG)
|
||
{
|
||
int regno = REGNO (x);
|
||
/* Don't check the machine mode when comparing registers;
|
||
invalidating (REG:SI 0) also invalidates (REG:DF 0). */
|
||
for (p = table[hash]; p; p = p->next_same_hash)
|
||
if (GET_CODE (p->exp) == REG
|
||
&& REGNO (p->exp) == regno)
|
||
return p;
|
||
}
|
||
else
|
||
{
|
||
for (p = table[hash]; p; p = p->next_same_hash)
|
||
if (mode == p->mode && (x == p->exp || exp_equiv_p (x, p->exp, 0)))
|
||
return p;
|
||
}
|
||
|
||
return 0;
|
||
}
|
||
|
||
/* Look for an expression equivalent to X and with code CODE.
|
||
If one is found, return that expression. */
|
||
|
||
static rtx
|
||
lookup_as_function (x, code)
|
||
rtx x;
|
||
enum rtx_code code;
|
||
{
|
||
register struct table_elt *p = lookup (x, safe_hash (x, 0) % NBUCKETS,
|
||
GET_MODE (x));
|
||
if (p == 0)
|
||
return 0;
|
||
|
||
for (p = p->first_same_value; p; p = p->next_same_value)
|
||
{
|
||
if (GET_CODE (p->exp) == code
|
||
/* Make sure this is a valid entry in the table. */
|
||
&& (exp_equiv_p (XEXP (p->exp, 0), XEXP (p->exp, 0), 1)))
|
||
return p->exp;
|
||
}
|
||
|
||
return 0;
|
||
}
|
||
|
||
/* Insert X in the hash table, assuming HASH is its hash code
|
||
and CLASSP is the current first element of the class it should go in
|
||
(or 0 if a new class should be made).
|
||
It is inserted at the proper position to keep the class in
|
||
the order cheapest first.
|
||
|
||
MODE is the machine-mode of X, or if X is an integer constant
|
||
with VOIDmode then MODE is the mode with which X will be used.
|
||
|
||
For elements of equal cheapness, the most recent one
|
||
goes in front, except that the first element in the list
|
||
remains first unless a cheaper element is added.
|
||
|
||
The in_memory field in the hash table element is set to 0.
|
||
The caller must set it nonzero if appropriate.
|
||
|
||
You should call insert_regs (X, CLASSP, MODIFY) before calling here,
|
||
and if insert_regs returns a nonzero value
|
||
you must then recompute its hash code before calling here.
|
||
|
||
If necessary, update table showing constant values of quantities. */
|
||
|
||
#define CHEAPER(X,Y) \
|
||
(((X)->cost < (Y)->cost) || \
|
||
((X)->cost == (Y)->cost \
|
||
&& GET_CODE ((X)->exp) == REG && GET_CODE ((Y)->exp) == REG \
|
||
&& (uid_cuid[regno_last_uid[REGNO ((X)->exp)]] > cse_basic_block_end \
|
||
|| uid_cuid[regno_first_uid[REGNO ((X)->exp)]] < cse_basic_block_start) \
|
||
&& (uid_cuid[regno_last_uid[REGNO ((X)->exp)]] \
|
||
> uid_cuid[regno_last_uid[REGNO ((Y)->exp)]])))
|
||
|
||
static struct table_elt *
|
||
insert (x, classp, hash, mode)
|
||
register rtx x;
|
||
register struct table_elt *classp;
|
||
int hash;
|
||
enum machine_mode mode;
|
||
{
|
||
register struct table_elt *elt;
|
||
|
||
/* Put an element for X into the right hash bucket. */
|
||
|
||
elt = get_element ();
|
||
elt->exp = x;
|
||
elt->cost = rtx_cost (x) * 2;
|
||
/* Make pseudo regs a little cheaper than hard regs. */
|
||
if (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
|
||
elt->cost -= 1;
|
||
elt->next_same_value = 0;
|
||
elt->prev_same_value = 0;
|
||
elt->next_same_hash = table[hash];
|
||
elt->prev_same_hash = 0;
|
||
elt->related_value = 0;
|
||
elt->in_memory = 0;
|
||
elt->equivalence_only = 0;
|
||
elt->mode = mode;
|
||
if (table[hash])
|
||
table[hash]->prev_same_hash = elt;
|
||
table[hash] = elt;
|
||
|
||
/* Put it into the proper value-class. */
|
||
if (classp)
|
||
{
|
||
if (CHEAPER (elt, classp))
|
||
/** Insert at the head of the class */
|
||
{
|
||
register struct table_elt *p;
|
||
elt->next_same_value = classp;
|
||
classp->prev_same_value = elt;
|
||
elt->first_same_value = elt;
|
||
|
||
for (p = classp; p; p = p->next_same_value)
|
||
p->first_same_value = elt;
|
||
}
|
||
else
|
||
{
|
||
/* Insert not at head of the class. */
|
||
/* Put it after the last element cheaper than X. */
|
||
register struct table_elt *p, *next;
|
||
for (p = classp; (next = p->next_same_value) && CHEAPER (next, elt);
|
||
p = next);
|
||
/* Put it after P and before NEXT. */
|
||
elt->next_same_value = next;
|
||
if (next)
|
||
next->prev_same_value = elt;
|
||
elt->prev_same_value = p;
|
||
p->next_same_value = elt;
|
||
elt->first_same_value = classp;
|
||
}
|
||
}
|
||
else
|
||
elt->first_same_value = elt;
|
||
|
||
if ((CONSTANT_P (x) || GET_CODE (x) == CONST_DOUBLE || FIXED_BASE_PLUS_P (x))
|
||
&& GET_CODE (elt->first_same_value->exp) == REG)
|
||
{
|
||
qty_const[reg_qty[REGNO (elt->first_same_value->exp)]] = x;
|
||
qty_const_insn[reg_qty[REGNO (elt->first_same_value->exp)]] = this_insn;
|
||
}
|
||
|
||
if (GET_CODE (x) == REG)
|
||
{
|
||
if (elt->next_same_value != 0
|
||
&& (CONSTANT_P (elt->next_same_value->exp)
|
||
|| GET_CODE (elt->next_same_value->exp) == CONST_DOUBLE
|
||
|| FIXED_BASE_PLUS_P (elt->next_same_value->exp)))
|
||
{
|
||
qty_const[reg_qty[REGNO (x)]] = elt->next_same_value->exp;
|
||
qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
|
||
}
|
||
if (CONSTANT_P (elt->first_same_value->exp)
|
||
|| GET_CODE (elt->first_same_value->exp) == CONST_DOUBLE
|
||
|| FIXED_BASE_PLUS_P (elt->first_same_value->exp))
|
||
{
|
||
qty_const[reg_qty[REGNO (x)]] = elt->first_same_value->exp;
|
||
qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
|
||
}
|
||
}
|
||
|
||
/* If this is a constant with symbolic value,
|
||
and it has a term with an explicit integer value,
|
||
link it up with related expressions. */
|
||
if (GET_CODE (x) == CONST)
|
||
{
|
||
rtx subexp = get_related_value (x);
|
||
int subhash;
|
||
struct table_elt *subelt, *subelt_prev;
|
||
|
||
if (subexp != 0)
|
||
{
|
||
/* Get the integer-free subexpression in the hash table. */
|
||
subhash = safe_hash (subexp, mode) % NBUCKETS;
|
||
subelt = lookup (subexp, subhash, mode);
|
||
if (subelt == 0)
|
||
subelt = insert (subexp, 0, subhash, mode);
|
||
/* Initialize SUBELT's circular chain if it has none. */
|
||
if (subelt->related_value == 0)
|
||
subelt->related_value = subelt;
|
||
/* Find the element in the circular chain that precedes SUBELT. */
|
||
subelt_prev = subelt;
|
||
while (subelt_prev->related_value != subelt)
|
||
subelt_prev = subelt_prev->related_value;
|
||
/* Put new ELT into SUBELT's circular chain just before SUBELT.
|
||
This way the element that follows SUBELT is the oldest one. */
|
||
elt->related_value = subelt_prev->related_value;
|
||
subelt_prev->related_value = elt;
|
||
}
|
||
}
|
||
|
||
return elt;
|
||
}
|
||
|
||
/* Remove from the hash table, or mark as invalid,
|
||
all expressions whose values could be altered by storing in X.
|
||
X is a register, a subreg, or a memory reference with nonvarying address
|
||
(because, when a memory reference with a varying address is stored in,
|
||
all memory references are removed by invalidate_memory
|
||
so specific invalidation is superfluous).
|
||
|
||
A nonvarying address may be just a register or just
|
||
a symbol reference, or it may be either of those plus
|
||
a numeric offset. */
|
||
|
||
static void
|
||
invalidate (x)
|
||
rtx x;
|
||
{
|
||
register int i;
|
||
register struct table_elt *p;
|
||
register rtx base;
|
||
register int start, end;
|
||
|
||
/* If X is a register, dependencies on its contents
|
||
are recorded through the qty number mechanism.
|
||
Just change the qty number of the register,
|
||
mark it as invalid for expressions that refer to it,
|
||
and remove it itself. */
|
||
|
||
if (GET_CODE (x) == REG)
|
||
{
|
||
register int hash = HASH (x, 0);
|
||
reg_invalidate (REGNO (x));
|
||
remove (lookup_for_remove (x, hash, GET_MODE (x)), hash);
|
||
return;
|
||
}
|
||
|
||
if (GET_CODE (x) == SUBREG)
|
||
{
|
||
if (GET_CODE (SUBREG_REG (x)) != REG)
|
||
abort ();
|
||
invalidate (SUBREG_REG (x));
|
||
return;
|
||
}
|
||
|
||
/* X is not a register; it must be a memory reference with
|
||
a nonvarying address. Remove all hash table elements
|
||
that refer to overlapping pieces of memory. */
|
||
|
||
if (GET_CODE (x) != MEM)
|
||
abort ();
|
||
base = XEXP (x, 0);
|
||
start = 0;
|
||
|
||
/* Registers with nonvarying addresses usually have constant equivalents;
|
||
but the frame pointer register is also possible. */
|
||
if (GET_CODE (base) == REG
|
||
&& qty_const[reg_qty[REGNO (base)]] != 0)
|
||
base = qty_const[reg_qty[REGNO (base)]];
|
||
|
||
if (GET_CODE (base) == CONST)
|
||
base = XEXP (base, 0);
|
||
if (GET_CODE (base) == PLUS
|
||
&& GET_CODE (XEXP (base, 1)) == CONST_INT)
|
||
{
|
||
start = INTVAL (XEXP (base, 1));
|
||
base = XEXP (base, 0);
|
||
}
|
||
|
||
end = start + GET_MODE_SIZE (GET_MODE (x));
|
||
for (i = 0; i < NBUCKETS; i++)
|
||
{
|
||
register struct table_elt *next;
|
||
for (p = table[i]; p; p = next)
|
||
{
|
||
next = p->next_same_hash;
|
||
if (refers_to_mem_p (p->exp, base, start, end))
|
||
remove (p, i);
|
||
}
|
||
}
|
||
}
|
||
|
||
/* Remove all expressions that refer to register REGNO,
|
||
since they are already invalid, and we are about to
|
||
mark that register valid again and don't want the old
|
||
expressions to reappear as valid. */
|
||
|
||
static void
|
||
remove_invalid_refs (regno)
|
||
int regno;
|
||
{
|
||
register int i;
|
||
register struct table_elt *p, *next;
|
||
register rtx x = reg_rtx[regno];
|
||
|
||
for (i = 0; i < NBUCKETS; i++)
|
||
for (p = table[i]; p; p = next)
|
||
{
|
||
next = p->next_same_hash;
|
||
if (GET_CODE (p->exp) != REG && refers_to_p (p->exp, x))
|
||
remove (p, i);
|
||
}
|
||
}
|
||
|
||
/* Remove from the hash table all expressions that reference memory,
|
||
or some of them as specified by *WRITES. */
|
||
|
||
static void
|
||
invalidate_memory (writes)
|
||
struct write_data *writes;
|
||
{
|
||
register int i;
|
||
register struct table_elt *p, *next;
|
||
int all = writes->all;
|
||
int nonscalar = writes->nonscalar;
|
||
|
||
for (i = 0; i < NBUCKETS; i++)
|
||
for (p = table[i]; p; p = next)
|
||
{
|
||
next = p->next_same_hash;
|
||
if (p->in_memory
|
||
&& (all
|
||
|| (nonscalar && p->in_struct)
|
||
|| cse_rtx_addr_varies_p (p->exp)))
|
||
remove (p, i);
|
||
}
|
||
}
|
||
|
||
/* Return the value of the integer term in X, if one is apparent;
|
||
otherwise return 0.
|
||
We do not check extremely carefully for the presence of integer terms
|
||
but rather consider only the cases that `insert' notices
|
||
for the `related_value' field. */
|
||
|
||
static int
|
||
get_integer_term (x)
|
||
rtx x;
|
||
{
|
||
if (GET_CODE (x) == CONST)
|
||
x = XEXP (x, 0);
|
||
|
||
if (GET_CODE (x) == MINUS
|
||
&& GET_CODE (XEXP (x, 1)) == CONST_INT)
|
||
return - INTVAL (XEXP (x, 1));
|
||
if (GET_CODE (x) != PLUS)
|
||
return 0;
|
||
if (GET_CODE (XEXP (x, 0)) == CONST_INT)
|
||
return INTVAL (XEXP (x, 0));
|
||
if (GET_CODE (XEXP (x, 1)) == CONST_INT)
|
||
return INTVAL (XEXP (x, 1));
|
||
return 0;
|
||
}
|
||
|
||
static rtx
|
||
get_related_value (x)
|
||
rtx x;
|
||
{
|
||
if (GET_CODE (x) != CONST)
|
||
return 0;
|
||
x = XEXP (x, 0);
|
||
if (GET_CODE (x) == PLUS)
|
||
{
|
||
if (GET_CODE (XEXP (x, 0)) == CONST_INT)
|
||
return XEXP (x, 1);
|
||
if (GET_CODE (XEXP (x, 1)) == CONST_INT)
|
||
return XEXP (x, 0);
|
||
}
|
||
else if (GET_CODE (x) == MINUS
|
||
&& GET_CODE (XEXP (x, 1)) == CONST_INT)
|
||
return XEXP (x, 0);
|
||
return 0;
|
||
}
|
||
|
||
/* Given an expression X of type CONST,
|
||
and ELT which is its table entry (or 0 if it
|
||
is not in the hash table),
|
||
return an alternate expression for X as a register plus integer.
|
||
If none can be found or it would not be a valid address, return 0. */
|
||
|
||
static rtx
|
||
use_related_value (x, elt)
|
||
rtx x;
|
||
struct table_elt *elt;
|
||
{
|
||
register struct table_elt *relt = 0;
|
||
register struct table_elt *p;
|
||
int offset;
|
||
rtx addr;
|
||
|
||
/* First, is there anything related known?
|
||
If we have a table element, we can tell from that.
|
||
Otherwise, must look it up. */
|
||
|
||
if (elt != 0 && elt->related_value != 0)
|
||
relt = elt;
|
||
else if (elt == 0 && GET_CODE (x) == CONST)
|
||
{
|
||
rtx subexp = get_related_value (x);
|
||
if (subexp != 0)
|
||
relt = lookup (subexp,
|
||
safe_hash (subexp, GET_MODE (subexp)) % NBUCKETS,
|
||
GET_MODE (subexp));
|
||
}
|
||
|
||
if (relt == 0)
|
||
return 0;
|
||
|
||
/* Search all related table entries for one that has an
|
||
equivalent register. */
|
||
|
||
p = relt;
|
||
while (1)
|
||
{
|
||
if (p->first_same_value != 0
|
||
&& GET_CODE (p->first_same_value->exp) == REG)
|
||
break;
|
||
p = p->related_value;
|
||
|
||
/* We went all the way around, so there is nothing to be found.
|
||
Return failure. */
|
||
if (p == relt)
|
||
return 0;
|
||
/* Perhaps RELT was in the table for some other reason and
|
||
it has no related values recorded. */
|
||
if (p == 0)
|
||
return 0;
|
||
}
|
||
|
||
offset = (get_integer_term (x) - get_integer_term (p->exp));
|
||
if (offset == 0)
|
||
abort ();
|
||
|
||
addr = plus_constant (p->first_same_value->exp, offset);
|
||
if (memory_address_p (QImode, addr))
|
||
return addr;
|
||
return 0;
|
||
}
|
||
|
||
/* Hash an rtx. We are careful to make sure the value is never negative.
|
||
Equivalent registers hash identically.
|
||
MODE is used in hashing for CONST_INTs only;
|
||
otherwise the mode of X is used.
|
||
|
||
Store 1 in do_not_record if any subexpression is volatile.
|
||
|
||
Store 1 in hash_arg_in_memory if X contains a MEM rtx
|
||
which does not have the RTX_UNCHANGING_P bit set.
|
||
In this case, also store 1 in hash_arg_in_struct
|
||
if there is a MEM rtx which has the MEM_IN_STRUCT_P bit set.
|
||
|
||
Note that cse_insn knows that the hash code of a MEM expression
|
||
is just (int) MEM plus the hash code of the address.
|
||
It also knows it can use HASHREG to get the hash code of (REG n). */
|
||
|
||
#define HASHBITS 16
|
||
|
||
#define HASHREG(RTX) \
|
||
((((int) REG << 7) + reg_qty[REGNO (RTX)]) % NBUCKETS)
|
||
|
||
static int
|
||
canon_hash (x, mode)
|
||
rtx x;
|
||
enum machine_mode mode;
|
||
{
|
||
register int i, j;
|
||
register int hash = 0;
|
||
register enum rtx_code code;
|
||
register char *fmt;
|
||
|
||
/* repeat is used to turn tail-recursion into iteration. */
|
||
repeat:
|
||
if (x == 0)
|
||
return hash;
|
||
|
||
code = GET_CODE (x);
|
||
switch (code)
|
||
{
|
||
case REG:
|
||
{
|
||
/* We do not invalidate anything on pushing or popping
|
||
because they cannot change anything but the stack pointer;
|
||
but that means we must consider the stack pointer volatile
|
||
since it can be changed "mysteriously". */
|
||
|
||
register int regno = REGNO (x);
|
||
if (regno == STACK_POINTER_REGNUM
|
||
|| (regno < FIRST_PSEUDO_REGISTER && global_regs[regno]))
|
||
{
|
||
do_not_record = 1;
|
||
return 0;
|
||
}
|
||
return hash + ((int) REG << 7) + reg_qty[regno];
|
||
}
|
||
|
||
case CONST_INT:
|
||
hash += ((int) mode + ((int) CONST_INT << 7)
|
||
+ INTVAL (x) + (INTVAL (x) >> HASHBITS));
|
||
return ((1 << HASHBITS) - 1) & hash;
|
||
|
||
case CONST_DOUBLE:
|
||
/* This is like the general case, except that it only counts
|
||
the first two elements. */
|
||
hash += (int) code + (int) GET_MODE (x);
|
||
{
|
||
int i;
|
||
for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
|
||
{
|
||
int tem = XINT (x, i);
|
||
hash += ((1 << HASHBITS) - 1) & (tem + (tem >> HASHBITS));
|
||
}
|
||
}
|
||
return hash;
|
||
|
||
/* Assume there is only one rtx object for any given label. */
|
||
case LABEL_REF:
|
||
/* Use `and' to ensure a positive number. */
|
||
return (hash + ((int) LABEL_REF << 7)
|
||
+ ((int) XEXP (x, 0) & ((1 << HASHBITS) - 1)));
|
||
|
||
case SYMBOL_REF:
|
||
return (hash + ((int) SYMBOL_REF << 7)
|
||
+ ((int) XEXP (x, 0) & ((1 << HASHBITS) - 1)));
|
||
|
||
case MEM:
|
||
if (MEM_VOLATILE_P (x))
|
||
{
|
||
do_not_record = 1;
|
||
return 0;
|
||
}
|
||
if (! RTX_UNCHANGING_P (x))
|
||
{
|
||
hash_arg_in_memory = 1;
|
||
if (MEM_IN_STRUCT_P (x)) hash_arg_in_struct = 1;
|
||
}
|
||
/* Now that we have already found this special case,
|
||
might as well speed it up as much as possible. */
|
||
hash += (int) MEM;
|
||
x = XEXP (x, 0);
|
||
goto repeat;
|
||
|
||
case PRE_DEC:
|
||
case PRE_INC:
|
||
case POST_DEC:
|
||
case POST_INC:
|
||
case PC:
|
||
case CC0:
|
||
case CALL:
|
||
do_not_record = 1;
|
||
return 0;
|
||
|
||
case ASM_OPERANDS:
|
||
if (MEM_VOLATILE_P (x))
|
||
{
|
||
do_not_record = 1;
|
||
return 0;
|
||
}
|
||
}
|
||
|
||
i = GET_RTX_LENGTH (code) - 1;
|
||
hash += (int) code + (int) GET_MODE (x);
|
||
fmt = GET_RTX_FORMAT (code);
|
||
for (; i >= 0; i--)
|
||
{
|
||
if (fmt[i] == 'e')
|
||
{
|
||
/* If we are about to do the last recursive call
|
||
needed at this level, change it into iteration.
|
||
This function is called enough to be worth it. */
|
||
if (i == 0)
|
||
{
|
||
x = XEXP (x, 0);
|
||
goto repeat;
|
||
}
|
||
hash += canon_hash (XEXP (x, i), 0);
|
||
}
|
||
else if (fmt[i] == 'E')
|
||
for (j = 0; j < XVECLEN (x, i); j++)
|
||
hash += canon_hash (XVECEXP (x, i, j), 0);
|
||
else if (fmt[i] == 's')
|
||
{
|
||
register char *p = XSTR (x, i);
|
||
if (p)
|
||
while (*p)
|
||
{
|
||
register int tem = *p++;
|
||
hash += ((1 << HASHBITS) - 1) & (tem + (tem >> HASHBITS));
|
||
}
|
||
}
|
||
else
|
||
{
|
||
register int tem = XINT (x, i);
|
||
hash += ((1 << HASHBITS) - 1) & (tem + (tem >> HASHBITS));
|
||
}
|
||
}
|
||
return hash;
|
||
}
|
||
|
||
/* Like canon_hash but with no side effects. */
|
||
|
||
static int
|
||
safe_hash (x, mode)
|
||
rtx x;
|
||
enum machine_mode mode;
|
||
{
|
||
int save_do_not_record = do_not_record;
|
||
int save_hash_arg_in_memory = hash_arg_in_memory;
|
||
int save_hash_arg_in_struct = hash_arg_in_struct;
|
||
int hash = canon_hash (x, mode);
|
||
hash_arg_in_memory = save_hash_arg_in_memory;
|
||
hash_arg_in_struct = save_hash_arg_in_struct;
|
||
do_not_record = save_do_not_record;
|
||
return hash;
|
||
}
|
||
|
||
/* Return 1 iff X and Y would canonicalize into the same thing,
|
||
without actually constructing the canonicalization of either one.
|
||
If VALIDATE is nonzero,
|
||
we assume X is an expression being processed from the rtl
|
||
and Y was found in the hash table. We check register refs
|
||
in Y for being marked as valid. */
|
||
|
||
static int
|
||
exp_equiv_p (x, y, validate)
|
||
rtx x, y;
|
||
int validate;
|
||
{
|
||
register int i;
|
||
register enum rtx_code code;
|
||
register char *fmt;
|
||
|
||
/* Note: it is incorrect to assume an expression is equivalent to itself
|
||
if VALIDATE is nonzero. */
|
||
if (x == y && !validate)
|
||
return 1;
|
||
if (x == 0 || y == 0)
|
||
return x == y;
|
||
code = GET_CODE (x);
|
||
if (code != GET_CODE (y))
|
||
return 0;
|
||
|
||
switch (code)
|
||
{
|
||
case PC:
|
||
case CC0:
|
||
return x == y;
|
||
|
||
case CONST_INT:
|
||
return XINT (x, 0) == XINT (y, 0);
|
||
|
||
case LABEL_REF:
|
||
case SYMBOL_REF:
|
||
return XEXP (x, 0) == XEXP (y, 0);
|
||
|
||
case REG:
|
||
return (reg_qty[REGNO (x)] == reg_qty[REGNO (y)]
|
||
&& (!validate
|
||
|| reg_in_table[REGNO (y)] == reg_tick[REGNO (y)]));
|
||
}
|
||
|
||
/* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
|
||
|
||
if (GET_MODE (x) != GET_MODE (y))
|
||
return 0;
|
||
|
||
/* Compare the elements. If any pair of corresponding elements
|
||
fail to match, return 0 for the whole things. */
|
||
|
||
fmt = GET_RTX_FORMAT (code);
|
||
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
|
||
{
|
||
if (fmt[i] == 'e')
|
||
{
|
||
if (! exp_equiv_p (XEXP (x, i), XEXP (y, i), validate))
|
||
return 0;
|
||
}
|
||
else if (fmt[i] == 'E')
|
||
{
|
||
int j;
|
||
if (XVECLEN (x, i) != XVECLEN (y, i))
|
||
return 0;
|
||
for (j = 0; j < XVECLEN (x, i); j++)
|
||
if (! exp_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j), validate))
|
||
return 0;
|
||
}
|
||
else if (fmt[i] == 's')
|
||
{
|
||
if (strcmp (XSTR (x, i), XSTR (y, i)))
|
||
return 0;
|
||
}
|
||
else
|
||
{
|
||
if (XINT (x, i) != XINT (y, i))
|
||
return 0;
|
||
}
|
||
}
|
||
return 1;
|
||
}
|
||
|
||
/* Return 1 iff any subexpression of X matches Y.
|
||
Here we do not require that X or Y be valid (for registers referred to)
|
||
for being in the hash table. */
|
||
|
||
int
|
||
refers_to_p (x, y)
|
||
rtx x, y;
|
||
{
|
||
register int i;
|
||
register enum rtx_code code;
|
||
register char *fmt;
|
||
|
||
repeat:
|
||
if (x == y)
|
||
return 1;
|
||
if (x == 0 || y == 0)
|
||
return 0;
|
||
|
||
code = GET_CODE (x);
|
||
/* If X as a whole has the same code as Y, they may match.
|
||
If so, return 1. */
|
||
if (code == GET_CODE (y))
|
||
{
|
||
if (exp_equiv_p (x, y, 0))
|
||
return 1;
|
||
}
|
||
|
||
/* X does not match, so try its subexpressions. */
|
||
|
||
fmt = GET_RTX_FORMAT (code);
|
||
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
|
||
if (fmt[i] == 'e')
|
||
{
|
||
if (i == 0)
|
||
{
|
||
x = XEXP (x, 0);
|
||
goto repeat;
|
||
}
|
||
else
|
||
if (refers_to_p (XEXP (x, i), y))
|
||
return 1;
|
||
}
|
||
else if (fmt[i] == 'E')
|
||
{
|
||
int j;
|
||
for (j = 0; j < XVECLEN (x, i); j++)
|
||
if (refers_to_p (XVECEXP (x, i, j), y))
|
||
return 1;
|
||
}
|
||
|
||
return 0;
|
||
}
|
||
|
||
/* Return 1 iff any subexpression of X refers to memory
|
||
at an address of REG plus some offset
|
||
such that any of the bytes' offsets fall between START (inclusive)
|
||
and END (exclusive).
|
||
|
||
The value is undefined if X is a varying address.
|
||
This function is not used in such cases.
|
||
|
||
When used in the cse pass, `qty_const' is nonzero, and it is used
|
||
to treat an address that is a register with a known constant value
|
||
as if it were that constant value.
|
||
In the loop pass, `qty_const' is zero, so this is not done. */
|
||
|
||
int
|
||
refers_to_mem_p (x, reg, start, end)
|
||
rtx x, reg;
|
||
int start, end;
|
||
{
|
||
register int i;
|
||
register enum rtx_code code;
|
||
register char *fmt;
|
||
|
||
repeat:
|
||
if (x == 0)
|
||
return 0;
|
||
|
||
code = GET_CODE (x);
|
||
if (code == MEM)
|
||
{
|
||
register rtx addr = XEXP (x, 0); /* Get the address. */
|
||
int myend;
|
||
if (GET_CODE (addr) == REG
|
||
/* qty_const is 0 when outside the cse pass;
|
||
at such times, this info is not available. */
|
||
&& qty_const != 0
|
||
&& qty_const[reg_qty[REGNO (addr)]] != 0)
|
||
addr = qty_const[reg_qty[REGNO (addr)]];
|
||
if (GET_CODE (addr) == CONST)
|
||
addr = XEXP (addr, 0);
|
||
|
||
/* If ADDR is BASE, or BASE plus an integer, put
|
||
the integer in I. */
|
||
if (addr == reg)
|
||
i = 0;
|
||
else if (GET_CODE (addr) == PLUS
|
||
&& XEXP (addr, 0) == reg
|
||
&& GET_CODE (XEXP (addr, 1)) == CONST_INT)
|
||
i = INTVAL (XEXP (addr, 1));
|
||
else
|
||
return 0;
|
||
|
||
myend = i + GET_MODE_SIZE (GET_MODE (x));
|
||
return myend > start && i < end;
|
||
}
|
||
|
||
/* X does not match, so try its subexpressions. */
|
||
|
||
fmt = GET_RTX_FORMAT (code);
|
||
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
|
||
if (fmt[i] == 'e')
|
||
{
|
||
if (i == 0)
|
||
{
|
||
x = XEXP (x, 0);
|
||
goto repeat;
|
||
}
|
||
else
|
||
if (refers_to_mem_p (XEXP (x, i), reg, start, end))
|
||
return 1;
|
||
}
|
||
else if (fmt[i] == 'E')
|
||
{
|
||
int j;
|
||
for (j = 0; j < XVECLEN (x, i); j++)
|
||
if (refers_to_mem_p (XVECEXP (x, i, j), reg, start, end))
|
||
return 1;
|
||
}
|
||
|
||
return 0;
|
||
}
|
||
|
||
/* Nonzero if X refers to memory at a varying address;
|
||
except that a register which has at the moment a known constant value
|
||
isn't considered variable. */
|
||
|
||
static int
|
||
cse_rtx_addr_varies_p (x)
|
||
rtx x;
|
||
{
|
||
if (GET_CODE (x) == MEM
|
||
&& GET_CODE (XEXP (x, 0)) == REG
|
||
&& qty_const[reg_qty[REGNO (XEXP (x, 0))]] != 0)
|
||
return 0;
|
||
return rtx_addr_varies_p (x);
|
||
}
|
||
|
||
/* Canonicalize an expression:
|
||
replace each register reference inside it
|
||
with the "oldest" equivalent register. */
|
||
|
||
static rtx
|
||
canon_reg (x)
|
||
rtx x;
|
||
{
|
||
register int i;
|
||
register enum rtx_code code;
|
||
register char *fmt;
|
||
|
||
if (x == 0)
|
||
return x;
|
||
|
||
code = GET_CODE (x);
|
||
switch (code)
|
||
{
|
||
case PC:
|
||
case CC0:
|
||
case CONST:
|
||
case CONST_INT:
|
||
case CONST_DOUBLE:
|
||
case SYMBOL_REF:
|
||
case LABEL_REF:
|
||
case ADDR_VEC:
|
||
case ADDR_DIFF_VEC:
|
||
return x;
|
||
|
||
case REG:
|
||
{
|
||
register rtx new;
|
||
/* Never replace a hard reg, because hard regs can appear
|
||
in more than one machine mode, and we must preserve the mode
|
||
of each occurrence. Also, some hard regs appear in
|
||
MEMs that are shared and mustn't be altered. */
|
||
if (REGNO (x) < FIRST_PSEUDO_REGISTER)
|
||
return x;
|
||
new = reg_rtx[qty_first_reg[reg_qty[REGNO (x)]]];
|
||
return new ? new : x;
|
||
}
|
||
}
|
||
|
||
fmt = GET_RTX_FORMAT (code);
|
||
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
|
||
{
|
||
register int j;
|
||
|
||
if (fmt[i] == 'e')
|
||
XEXP (x, i) = canon_reg (XEXP (x, i));
|
||
else if (fmt[i] == 'E')
|
||
for (j = 0; j < XVECLEN (x, i); j++)
|
||
XVECEXP (x, i, j) = canon_reg (XVECEXP (x, i, j));
|
||
}
|
||
|
||
return x;
|
||
}
|
||
|
||
/* If X is a nontrivial arithmetic operation on an argument
|
||
for which a constant value can be determined, return
|
||
the result of operating on that value, as a constant.
|
||
Otherwise, return X, possibly with one or more operands
|
||
modified by recursive calls to this function.
|
||
|
||
If X is a register whose contents are known, we do NOT
|
||
return those contents. This is because an instruction that
|
||
uses a register is usually faster than one that uses a constant.
|
||
|
||
COPYFLAG is nonzero for memory addresses and subexpressions thereof.
|
||
If COPYFLAG is nonzero, we avoid altering X itself
|
||
by creating new structure when necessary. In this case we
|
||
can risk creating invalid structure because it will be tested.
|
||
If COPYFLAG is zero, be careful not to substitute constants
|
||
into expressions that cannot be simplified. */
|
||
|
||
static rtx
|
||
fold_rtx (x, copyflag)
|
||
rtx x;
|
||
int copyflag;
|
||
{
|
||
register enum rtx_code code;
|
||
register char *fmt;
|
||
register int i, val;
|
||
rtx new = 0;
|
||
int copied = ! copyflag;
|
||
int width;
|
||
|
||
/* Constant equivalents of first three operands of X;
|
||
0 when no such equivalent is known. */
|
||
rtx const_arg0;
|
||
rtx const_arg1;
|
||
rtx const_arg2;
|
||
|
||
if (x == 0)
|
||
return x;
|
||
|
||
width = GET_MODE_BITSIZE (GET_MODE (x));
|
||
|
||
code = GET_CODE (x);
|
||
switch (code)
|
||
{
|
||
case CONST:
|
||
case CONST_INT:
|
||
case CONST_DOUBLE:
|
||
case SYMBOL_REF:
|
||
case LABEL_REF:
|
||
case PC:
|
||
case CC0:
|
||
case REG:
|
||
/* No use simplifying an EXPR_LIST
|
||
since they are used only for lists of args
|
||
in a function call's REG_EQUAL note. */
|
||
case EXPR_LIST:
|
||
return x;
|
||
|
||
/* We must be careful when folding a memory address
|
||
to avoid making it invalid. So fold nondestructively
|
||
and use the result only if it's valid. */
|
||
case MEM:
|
||
{
|
||
rtx newaddr = fold_rtx (XEXP (x, 0), 1);
|
||
/* Save time if no change was made. */
|
||
if (XEXP (x, 0) == newaddr)
|
||
return x;
|
||
|
||
if (! memory_address_p (GET_MODE (x), newaddr)
|
||
&& memory_address_p (GET_MODE (x), XEXP (x, 0)))
|
||
return x;
|
||
|
||
/* Don't replace a value with a more expensive one. */
|
||
if (rtx_cost (XEXP (x, 0)) < rtx_cost (newaddr))
|
||
return x;
|
||
|
||
if (copyflag)
|
||
return gen_rtx (MEM, GET_MODE (x), newaddr);
|
||
XEXP (x, 0) = newaddr;
|
||
return x;
|
||
}
|
||
}
|
||
|
||
const_arg0 = 0;
|
||
const_arg1 = 0;
|
||
const_arg2 = 0;
|
||
|
||
/* Try folding our operands.
|
||
Then see which ones have constant values known. */
|
||
|
||
fmt = GET_RTX_FORMAT (code);
|
||
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
|
||
if (fmt[i] == 'e')
|
||
{
|
||
register rtx tem = fold_rtx (XEXP (x, i), copyflag);
|
||
|
||
/* If an operand has changed under folding, and we are not supposed to
|
||
alter the original structure, copy X if we haven't yet done so. */
|
||
if (! copied && tem != XEXP (x, i))
|
||
{
|
||
int j;
|
||
rtx new = rtx_alloc (code);
|
||
PUT_MODE (new, GET_MODE (x));
|
||
for (j = 0; j < GET_RTX_LENGTH (code); j++)
|
||
XINT (new, j) = XINT (x, j);
|
||
x = new;
|
||
copied = 1;
|
||
}
|
||
|
||
/* Install the possibly altered folded operand. */
|
||
XEXP (x, i) = tem;
|
||
|
||
/* For the first three operands, see if the operand
|
||
is constant or equivalent to a constant. */
|
||
if (i < 3)
|
||
{
|
||
rtx const_arg = equiv_constant (tem);
|
||
|
||
switch (i)
|
||
{
|
||
case 0:
|
||
const_arg0 = const_arg;
|
||
break;
|
||
case 1:
|
||
const_arg1 = const_arg;
|
||
break;
|
||
case 2:
|
||
const_arg2 = const_arg;
|
||
break;
|
||
}
|
||
}
|
||
}
|
||
else if (fmt[i] == 'E')
|
||
/* Don't try to fold inside of a vector of expressions.
|
||
Doing nothing is is harmless. */
|
||
;
|
||
|
||
/* If a commutative operation, place a constant integer as the second
|
||
operand unless the first operand is also a constant integer. Otherwise,
|
||
place any constant second unless the first operand is also a constant. */
|
||
|
||
switch (code)
|
||
{
|
||
case PLUS:
|
||
case MULT:
|
||
case UMULT:
|
||
case AND:
|
||
case IOR:
|
||
case XOR:
|
||
case NE:
|
||
case EQ:
|
||
if (const_arg0 && const_arg0 == XEXP (x, 0)
|
||
&& (! (const_arg1 && const_arg1 == XEXP (x, 1))
|
||
|| (GET_CODE (const_arg0) == CONST_INT
|
||
&& GET_CODE (const_arg1) != CONST_INT)))
|
||
{
|
||
register rtx tem;
|
||
|
||
if (! copied)
|
||
copied = 1, x = copy_rtx (x);
|
||
tem = XEXP (x, 0); XEXP (x, 0) = XEXP (x, 1); XEXP (x, 1) = tem;
|
||
tem = const_arg0; const_arg0 = const_arg1; const_arg1 = tem;
|
||
}
|
||
break;
|
||
}
|
||
|
||
/* Now decode the kind of rtx X is
|
||
and then return X (if nothing can be done)
|
||
or return a folded rtx
|
||
or store a value in VAL and drop through
|
||
(to return a CONST_INT for the integer VAL). */
|
||
|
||
if (GET_RTX_LENGTH (code) == 1)
|
||
{
|
||
if (const_arg0 == 0)
|
||
return x;
|
||
|
||
if (GET_CODE (const_arg0) == CONST_INT)
|
||
{
|
||
register int arg0 = INTVAL (const_arg0);
|
||
|
||
switch (GET_CODE (x))
|
||
{
|
||
case NOT:
|
||
val = ~ arg0;
|
||
break;
|
||
|
||
case NEG:
|
||
val = - arg0;
|
||
break;
|
||
|
||
case TRUNCATE:
|
||
val = arg0;
|
||
break;
|
||
|
||
case ZERO_EXTEND:
|
||
{
|
||
enum machine_mode mode = GET_MODE (XEXP (x, 0));
|
||
if (mode == VOIDmode)
|
||
return x;
|
||
if (GET_MODE_BITSIZE (mode) < HOST_BITS_PER_INT)
|
||
val = arg0 & ~((-1) << GET_MODE_BITSIZE (mode));
|
||
else
|
||
return x;
|
||
break;
|
||
}
|
||
|
||
case SIGN_EXTEND:
|
||
{
|
||
enum machine_mode mode = GET_MODE (XEXP (x, 0));
|
||
if (mode == VOIDmode)
|
||
return x;
|
||
if (GET_MODE_BITSIZE (mode) < HOST_BITS_PER_INT)
|
||
{
|
||
val = arg0 & ~((-1) << GET_MODE_BITSIZE (mode));
|
||
if (val & (1 << (GET_MODE_BITSIZE (mode) - 1)))
|
||
val -= 1 << GET_MODE_BITSIZE (mode);
|
||
}
|
||
else
|
||
return x;
|
||
break;
|
||
}
|
||
|
||
default:
|
||
return x;
|
||
}
|
||
}
|
||
#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
|
||
else if (GET_CODE (const_arg0) == CONST_DOUBLE
|
||
&& GET_CODE (x) == NEG
|
||
&& GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
|
||
{
|
||
union real_extract u;
|
||
register REAL_VALUE_TYPE arg0;
|
||
bcopy (&CONST_DOUBLE_LOW (const_arg0), &u, sizeof u);
|
||
arg0 = u.d;
|
||
|
||
u.d = REAL_VALUE_NEGATE (arg0);
|
||
return immed_real_const_1 (u.d, GET_MODE (x));
|
||
}
|
||
#endif
|
||
else
|
||
return x;
|
||
}
|
||
else if (GET_RTX_LENGTH (code) == 2)
|
||
{
|
||
register int arg0, arg1, arg0s, arg1s;
|
||
int arithwidth = width;
|
||
|
||
/* If 1st arg is the condition codes, 2nd must be zero
|
||
and this must be a comparison.
|
||
Decode the info on how the previous insn set the cc0
|
||
and use that to deduce result of comparison. */
|
||
if (XEXP (x, 0) == cc0_rtx
|
||
|| GET_CODE (XEXP (x, 0)) == COMPARE)
|
||
{
|
||
if (XEXP (x, 0) == cc0_rtx)
|
||
arg0 = prev_insn_cc0;
|
||
else
|
||
arg0 = fold_cc0 (VOIDmode, XEXP (x, 0));
|
||
|
||
if (arg0 == 0
|
||
|| const_arg1 != const0_rtx
|
||
/* 0200 bit in arg0 means only zeroness is known,
|
||
and sign is not known. */
|
||
|| ((arg0 & 0200) != 0 && code != EQ && code != NE))
|
||
return x;
|
||
|
||
/* Extract either the signed or the unsigned digit from ARG0. */
|
||
if (code == LEU || code == LTU || code == GEU || code == GTU)
|
||
arg0 = arg0 & 7;
|
||
else
|
||
arg0 = (arg0 >> 3) & 7;
|
||
if (arg0 == 7) arg0 = -1;
|
||
|
||
switch (code)
|
||
{
|
||
case LE:
|
||
case LEU:
|
||
return (arg0 <= 0) ? const1_rtx : const0_rtx;
|
||
case LT:
|
||
case LTU:
|
||
return (arg0 < 0) ? const1_rtx : const0_rtx;
|
||
case GE:
|
||
case GEU:
|
||
return (arg0 >= 0) ? const1_rtx : const0_rtx;
|
||
case GT:
|
||
case GTU:
|
||
return (arg0 > 0) ? const1_rtx : const0_rtx;
|
||
case NE:
|
||
return (arg0 != 0) ? const1_rtx : const0_rtx;
|
||
case EQ:
|
||
return (arg0 == 0) ? const1_rtx : const0_rtx;
|
||
default:
|
||
abort ();
|
||
}
|
||
}
|
||
|
||
if (const_arg0 == 0 || const_arg1 == 0
|
||
|| GET_CODE (const_arg0) != CONST_INT
|
||
|| GET_CODE (const_arg1) != CONST_INT)
|
||
{
|
||
/* Even if we can't compute a constant result,
|
||
there are some cases worth simplifying. */
|
||
/* Note that we cannot rely on constant args to come last,
|
||
even for commutative operators,
|
||
because that happens only when the constant is explicit. */
|
||
switch (code)
|
||
{
|
||
case PLUS:
|
||
if (const_arg0 == const0_rtx
|
||
|| const_arg0 == fconst0_rtx
|
||
|| const_arg0 == dconst0_rtx)
|
||
return XEXP (x, 1);
|
||
if (const_arg1 == const0_rtx
|
||
|| const_arg1 == fconst0_rtx
|
||
|| const_arg1 == dconst0_rtx)
|
||
return XEXP (x, 0);
|
||
|
||
/* Handle both-operands-constant cases. */
|
||
if (const_arg0 != 0 && const_arg1 != 0
|
||
&& GET_CODE (const_arg0) != CONST_DOUBLE
|
||
&& GET_CODE (const_arg1) != CONST_DOUBLE
|
||
&& GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
|
||
{
|
||
if (GET_CODE (const_arg1) == CONST_INT)
|
||
new = plus_constant (const_arg0, INTVAL (const_arg1));
|
||
else
|
||
{
|
||
new = gen_rtx (PLUS, GET_MODE (x), const0_rtx, const0_rtx);
|
||
XEXP (new, 0) = const_arg0;
|
||
if (GET_CODE (const_arg0) == CONST)
|
||
XEXP (new, 0) = XEXP (const_arg0, 0);
|
||
XEXP (new, 1) = const_arg1;
|
||
if (GET_CODE (const_arg1) == CONST)
|
||
XEXP (new, 1) = XEXP (const_arg1, 0);
|
||
new = gen_rtx (CONST, GET_MODE (new), new);
|
||
}
|
||
}
|
||
else if (const_arg1 != 0
|
||
&& GET_CODE (const_arg1) == CONST_INT
|
||
&& GET_CODE (XEXP (x, 0)) == PLUS
|
||
&& (CONSTANT_P (XEXP (XEXP (x, 0), 0))
|
||
|| CONSTANT_P (XEXP (XEXP (x, 0), 1))))
|
||
/* constant + (variable + constant)
|
||
can result if an index register is made constant.
|
||
We simplify this by adding the constants.
|
||
If we did not, it would become an invalid address. */
|
||
new = plus_constant (XEXP (x, 0),
|
||
INTVAL (const_arg1));
|
||
break;
|
||
|
||
case COMPARE:
|
||
if (const_arg1 == const0_rtx)
|
||
return XEXP (x, 0);
|
||
|
||
if (XEXP (x, 0) == XEXP (x, 1)
|
||
|| (const_arg0 != 0 && const_arg0 == const_arg1))
|
||
{
|
||
/* We can't assume x-x is 0 with IEEE floating point. */
|
||
if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
|
||
return const0_rtx;
|
||
}
|
||
break;
|
||
|
||
case MINUS:
|
||
if (const_arg1 == const0_rtx
|
||
|| const_arg1 == fconst0_rtx
|
||
|| const_arg1 == dconst0_rtx)
|
||
return XEXP (x, 0);
|
||
|
||
if (XEXP (x, 0) == XEXP (x, 1)
|
||
|| (const_arg0 != 0 && const_arg0 == const_arg1))
|
||
{
|
||
/* We can't assume x-x is 0 with IEEE floating point. */
|
||
if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
|
||
return const0_rtx;
|
||
}
|
||
|
||
/* Change subtraction from zero into negation. */
|
||
if (const_arg0 == const0_rtx)
|
||
return gen_rtx (NEG, GET_MODE (x), XEXP (x, 1));
|
||
|
||
/* Don't let a relocatable value get a negative coeff. */
|
||
if (const_arg0 != 0 && const_arg1 != 0
|
||
&& GET_CODE (const_arg1) == CONST_INT)
|
||
new = plus_constant (const_arg0, - INTVAL (const_arg1));
|
||
break;
|
||
|
||
case MULT:
|
||
case UMULT:
|
||
if (const_arg1 && GET_CODE (const_arg1) == CONST_INT
|
||
&& INTVAL (const_arg1) == -1
|
||
/* Don't do this in the case of widening multiplication. */
|
||
&& GET_MODE (XEXP (x, 0)) == GET_MODE (x))
|
||
return gen_rtx (NEG, GET_MODE (x), XEXP (x, 0));
|
||
if (const_arg0 && GET_CODE (const_arg0) == CONST_INT
|
||
&& INTVAL (const_arg0) == -1
|
||
&& GET_MODE (XEXP (x, 1)) == GET_MODE (x))
|
||
return gen_rtx (NEG, GET_MODE (x), XEXP (x, 1));
|
||
if (const_arg1 == const0_rtx || const_arg0 == const0_rtx)
|
||
new = const0_rtx;
|
||
if (const_arg1 == fconst0_rtx || const_arg0 == fconst0_rtx)
|
||
new = fconst0_rtx;
|
||
if (const_arg1 == dconst0_rtx || const_arg0 == dconst0_rtx)
|
||
new = dconst0_rtx;
|
||
if (const_arg1 == const1_rtx)
|
||
return XEXP (x, 0);
|
||
if (const_arg0 == const1_rtx)
|
||
return XEXP (x, 1);
|
||
break;
|
||
|
||
case IOR:
|
||
if (const_arg1 == const0_rtx)
|
||
return XEXP (x, 0);
|
||
if (const_arg0 == const0_rtx)
|
||
return XEXP (x, 1);
|
||
if (const_arg1 && GET_CODE (const_arg1) == CONST_INT
|
||
&& (INTVAL (const_arg1) & GET_MODE_MASK (GET_MODE (x)))
|
||
== GET_MODE_MASK (GET_MODE (x)))
|
||
new = const_arg1;
|
||
if (const_arg0 && GET_CODE (const_arg0) == CONST_INT
|
||
&& (INTVAL (const_arg0) & GET_MODE_MASK (GET_MODE (x)))
|
||
== GET_MODE_MASK (GET_MODE (x)))
|
||
new = const_arg0;
|
||
break;
|
||
|
||
case XOR:
|
||
if (const_arg1 == const0_rtx)
|
||
return XEXP (x, 0);
|
||
if (const_arg0 == const0_rtx)
|
||
return XEXP (x, 1);
|
||
if (const_arg1 && GET_CODE (const_arg1) == CONST_INT
|
||
&& (INTVAL (const_arg1) & GET_MODE_MASK (GET_MODE (x)))
|
||
== GET_MODE_MASK (GET_MODE (x)))
|
||
return gen_rtx (NOT, GET_MODE (x), XEXP (x, 0));
|
||
if (const_arg0 && GET_CODE (const_arg0) == CONST_INT
|
||
&& (INTVAL (const_arg0) & GET_MODE_MASK (GET_MODE (x)))
|
||
== GET_MODE_MASK (GET_MODE (x)))
|
||
return gen_rtx (NOT, GET_MODE (x), XEXP (x, 1));
|
||
break;
|
||
|
||
case AND:
|
||
if (const_arg1 == const0_rtx || const_arg0 == const0_rtx)
|
||
new = const0_rtx;
|
||
if (const_arg1 && GET_CODE (const_arg1) == CONST_INT
|
||
&& (INTVAL (const_arg1) & GET_MODE_MASK (GET_MODE (x)))
|
||
== GET_MODE_MASK (GET_MODE (x)))
|
||
return XEXP (x, 0);
|
||
if (const_arg0 && GET_CODE (const_arg0) == CONST_INT
|
||
&& (INTVAL (const_arg0) & GET_MODE_MASK (GET_MODE (x)))
|
||
== GET_MODE_MASK (GET_MODE (x)))
|
||
return XEXP (x, 1);
|
||
break;
|
||
|
||
case DIV:
|
||
case UDIV:
|
||
if (const_arg1 == const1_rtx)
|
||
return XEXP (x, 0);
|
||
if (const_arg0 == const0_rtx)
|
||
new = const0_rtx;
|
||
break;
|
||
|
||
case UMOD:
|
||
case MOD:
|
||
if (const_arg0 == const0_rtx || const_arg1 == const1_rtx)
|
||
new = const0_rtx;
|
||
break;
|
||
|
||
case LSHIFT:
|
||
case ASHIFT:
|
||
case ROTATE:
|
||
case ASHIFTRT:
|
||
case LSHIFTRT:
|
||
case ROTATERT:
|
||
if (const_arg1 == const0_rtx)
|
||
return XEXP (x, 0);
|
||
if (const_arg0 == const0_rtx)
|
||
new = const_arg0;
|
||
break;
|
||
}
|
||
|
||
if (new != 0 && LEGITIMATE_CONSTANT_P (new))
|
||
return new;
|
||
return x;
|
||
}
|
||
|
||
if (arithwidth == 0)
|
||
{
|
||
if (GET_MODE (XEXP (x, 0)) != VOIDmode)
|
||
arithwidth = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)));
|
||
if (GET_MODE (XEXP (x, 1)) != VOIDmode)
|
||
arithwidth = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 1)));
|
||
}
|
||
|
||
/* Get the integer argument values in two forms:
|
||
zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
|
||
|
||
arg0 = INTVAL (const_arg0);
|
||
arg1 = INTVAL (const_arg1);
|
||
|
||
if (arithwidth < HOST_BITS_PER_INT && arithwidth > 0)
|
||
{
|
||
arg0 &= (1 << arithwidth) - 1;
|
||
arg1 &= (1 << arithwidth) - 1;
|
||
|
||
arg0s = arg0;
|
||
if (arg0s & (1 << (arithwidth - 1)))
|
||
arg0s |= ((-1) << arithwidth);
|
||
|
||
arg1s = arg1;
|
||
if (arg1s & (1 << (arithwidth - 1)))
|
||
arg1s |= ((-1) << arithwidth);
|
||
}
|
||
else
|
||
{
|
||
arg0s = arg0;
|
||
arg1s = arg1;
|
||
}
|
||
|
||
/* Compute the value of the arithmetic. */
|
||
|
||
switch (code)
|
||
{
|
||
case PLUS:
|
||
val = arg0 + arg1;
|
||
break;
|
||
|
||
case MINUS:
|
||
val = arg0 - arg1;
|
||
break;
|
||
|
||
case MULT:
|
||
val = arg0s * arg1s;
|
||
break;
|
||
|
||
case DIV:
|
||
if (arg1s == 0)
|
||
return x;
|
||
val = arg0s / arg1s;
|
||
break;
|
||
|
||
case MOD:
|
||
if (arg1s == 0)
|
||
return x;
|
||
val = arg0s % arg1s;
|
||
break;
|
||
|
||
case UMULT:
|
||
val = (unsigned) arg0 * arg1;
|
||
break;
|
||
|
||
case UDIV:
|
||
if (arg1 == 0)
|
||
return x;
|
||
val = (unsigned) arg0 / arg1;
|
||
break;
|
||
|
||
case UMOD:
|
||
if (arg1 == 0)
|
||
return x;
|
||
val = (unsigned) arg0 % arg1;
|
||
break;
|
||
|
||
case AND:
|
||
val = arg0 & arg1;
|
||
break;
|
||
|
||
case IOR:
|
||
val = arg0 | arg1;
|
||
break;
|
||
|
||
case XOR:
|
||
val = arg0 ^ arg1;
|
||
break;
|
||
|
||
case NE:
|
||
val = arg0 != arg1;
|
||
break;
|
||
|
||
case EQ:
|
||
val = arg0 == arg1;
|
||
break;
|
||
|
||
case LE:
|
||
val = arg0s <= arg1s;
|
||
break;
|
||
|
||
case LT:
|
||
val = arg0s < arg1s;
|
||
break;
|
||
|
||
case GE:
|
||
val = arg0s >= arg1s;
|
||
break;
|
||
|
||
case GT:
|
||
val = arg0s > arg1s;
|
||
break;
|
||
|
||
case LEU:
|
||
val = ((unsigned) arg0) <= ((unsigned) arg1);
|
||
break;
|
||
|
||
case LTU:
|
||
val = ((unsigned) arg0) < ((unsigned) arg1);
|
||
break;
|
||
|
||
case GEU:
|
||
val = ((unsigned) arg0) >= ((unsigned) arg1);
|
||
break;
|
||
|
||
case GTU:
|
||
val = ((unsigned) arg0) > ((unsigned) arg1);
|
||
break;
|
||
|
||
case LSHIFT:
|
||
val = ((unsigned) arg0) << arg1;
|
||
break;
|
||
|
||
case ASHIFT:
|
||
val = arg0s << arg1;
|
||
break;
|
||
|
||
case ROTATERT:
|
||
arg1 = - arg1;
|
||
case ROTATE:
|
||
{
|
||
int size = GET_MODE_SIZE (GET_MODE (x)) * BITS_PER_UNIT;
|
||
if (arg1 > 0)
|
||
{
|
||
arg1 %= size;
|
||
val = ((((unsigned) arg0) << arg1)
|
||
| (((unsigned) arg0) >> (size - arg1)));
|
||
}
|
||
else if (arg1 < 0)
|
||
{
|
||
arg1 = (- arg1) % size;
|
||
val = ((((unsigned) arg0) >> arg1)
|
||
| (((unsigned) arg0) << (size - arg1)));
|
||
}
|
||
else
|
||
val = arg0;
|
||
}
|
||
break;
|
||
|
||
case LSHIFTRT:
|
||
val = ((unsigned) arg0) >> arg1;
|
||
break;
|
||
|
||
case ASHIFTRT:
|
||
val = arg0s >> arg1;
|
||
break;
|
||
|
||
default:
|
||
return x;
|
||
}
|
||
}
|
||
else if (code == IF_THEN_ELSE && const_arg0 != 0
|
||
&& GET_CODE (const_arg0) == CONST_INT)
|
||
return XEXP (x, ((INTVAL (const_arg0) != 0) ? 1 : 2));
|
||
else if (code == IF_THEN_ELSE && XEXP (x, 0) == cc0_rtx
|
||
&& prev_insn_explicit_cc0 != 0)
|
||
return XEXP (x, ((INTVAL (prev_insn_explicit_cc0) != 0) ? 1 : 2));
|
||
else if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
|
||
{
|
||
if (const_arg0 != 0 && const_arg1 != 0 && const_arg2 != 0
|
||
&& GET_CODE (const_arg0) == CONST_INT
|
||
&& GET_CODE (const_arg1) == CONST_INT
|
||
&& GET_CODE (const_arg2) == CONST_INT)
|
||
{
|
||
/* Extracting a bit-field from a constant */
|
||
val = INTVAL (const_arg0);
|
||
#ifdef BITS_BIG_ENDIAN
|
||
val >>= (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
|
||
- INTVAL (const_arg2) - INTVAL (const_arg1));
|
||
#else
|
||
val >>= INTVAL (const_arg2);
|
||
#endif
|
||
if (HOST_BITS_PER_INT != INTVAL (const_arg1))
|
||
{
|
||
/* First zero-extend. */
|
||
val &= (1 << INTVAL (const_arg1)) - 1;
|
||
/* If desired, propagate sign bit. */
|
||
if (code == SIGN_EXTRACT
|
||
&& (val & (1 << (INTVAL (const_arg1) - 1))))
|
||
val |= ~ (1 << INTVAL (const_arg1));
|
||
}
|
||
}
|
||
else
|
||
return x;
|
||
}
|
||
else
|
||
return x;
|
||
|
||
/* Clear the bits that don't belong in our mode,
|
||
unless they and our sign bit are all one.
|
||
So we get either a reasonable negative value or a reasonable
|
||
unsigned value for this mode. */
|
||
if (width < HOST_BITS_PER_INT && width > 0)
|
||
{
|
||
if ((val & ((-1) << (width - 1)))
|
||
!= ((-1) << (width - 1)))
|
||
val &= (1 << width) - 1;
|
||
}
|
||
|
||
/* Now make the new constant. */
|
||
{
|
||
rtx new = gen_rtx (CONST_INT, VOIDmode, val);
|
||
return LEGITIMATE_CONSTANT_P (new) ? new : x;
|
||
}
|
||
}
|
||
|
||
/* Return a constant value currently equivalent to X.
|
||
Return 0 if we don't know one. */
|
||
|
||
static rtx
|
||
equiv_constant (x)
|
||
rtx x;
|
||
{
|
||
rtx tem1;
|
||
|
||
if (CONSTANT_P (x) || GET_CODE (x) == CONST_DOUBLE)
|
||
return x;
|
||
else if (GET_CODE (x) == REG
|
||
&& (tem1 = qty_const[reg_qty[REGNO (x)]]) != 0
|
||
/* Make sure it is really a constant */
|
||
&& GET_CODE (tem1) != REG && GET_CODE (tem1) != PLUS)
|
||
return tem1;
|
||
/* If integer truncation is being done with SUBREG,
|
||
we can compute the result. */
|
||
else if (GET_CODE (x) == SUBREG && SUBREG_WORD (x) == 0
|
||
&& (tem1 = qty_const[reg_qty[REGNO (SUBREG_REG (x))]]) != 0
|
||
/* Make sure it is a known integer. */
|
||
&& GET_CODE (tem1) == CONST_INT
|
||
&& GET_MODE_SIZE (GET_MODE (x)) <= HOST_BITS_PER_INT
|
||
/* Make sure this SUBREG is truncation. */
|
||
&& GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
|
||
{
|
||
int value = INTVAL (tem1);
|
||
if (GET_MODE_BITSIZE (GET_MODE (x)) != HOST_BITS_PER_INT)
|
||
value &= (1 << GET_MODE_BITSIZE (GET_MODE (x))) - 1;
|
||
|
||
if (value == INTVAL (tem1))
|
||
return tem1;
|
||
else
|
||
return gen_rtx (CONST_INT, VOIDmode, value);
|
||
}
|
||
return 0;
|
||
}
|
||
|
||
/* Given an expression X which is used to set CC0,
|
||
return an integer recording (in the encoding used for prev_insn_cc0)
|
||
how the condition codes would be set by that expression.
|
||
Return 0 if the value is not constant
|
||
or if there is any doubt what condition codes result from it.
|
||
|
||
MODE is the machine mode to use to interpret X if it is a CONST_INT. */
|
||
|
||
static int
|
||
fold_cc0 (mode, x)
|
||
enum machine_mode mode;
|
||
rtx x;
|
||
{
|
||
if (GET_CODE (x) == COMPARE)
|
||
{
|
||
rtx y0 = fold_rtx (XEXP (x, 0), 0);
|
||
rtx y1 = fold_rtx (XEXP (x, 1), 0);
|
||
int u0, u1, s0, s1;
|
||
enum machine_mode m;
|
||
rtx tem;
|
||
|
||
m = GET_MODE (y0);
|
||
if (m == VOIDmode)
|
||
m = GET_MODE (y1);
|
||
if (m == VOIDmode)
|
||
return 0;
|
||
|
||
tem = equiv_constant (y0);
|
||
if (tem != 0)
|
||
y0 = tem;
|
||
|
||
if (y0 == 0)
|
||
return 0;
|
||
|
||
tem = equiv_constant (y1);
|
||
if (tem != 0)
|
||
y1 = tem;
|
||
|
||
if (y1 == 0)
|
||
return 0;
|
||
|
||
/* Compare floats; report the result only for signed compares
|
||
since that's all there are for floats. */
|
||
if (GET_CODE (y0) == CONST_DOUBLE
|
||
&& GET_CODE (y1) == CONST_DOUBLE
|
||
&& GET_MODE_CLASS (GET_MODE (y0)) == MODE_FLOAT)
|
||
{
|
||
union real_extract u0, u1;
|
||
bcopy (&CONST_DOUBLE_LOW (y0), &u0, sizeof u0);
|
||
bcopy (&CONST_DOUBLE_LOW (y1), &u1, sizeof u1);
|
||
return 0100 + (REAL_VALUES_LESS (u0.d, u1.d) ? 7 << 3
|
||
: REAL_VALUES_LESS (u1.d, u0.d) ? 1 << 3 : 0);
|
||
}
|
||
|
||
/* Aside from that, demand explicit integers. */
|
||
|
||
if (GET_CODE (y0) != CONST_INT)
|
||
return 0;
|
||
|
||
if (GET_CODE (y1) != CONST_INT)
|
||
return 0;
|
||
|
||
s0 = u0 = INTVAL (y0);
|
||
s1 = u1 = INTVAL (y1);
|
||
|
||
{
|
||
int width = GET_MODE_BITSIZE (m);
|
||
if (width < HOST_BITS_PER_INT)
|
||
{
|
||
s0 = u0 &= ~ ((-1) << width);
|
||
s1 = u1 &= ~ ((-1) << width);
|
||
if (u0 & (1 << (width - 1)))
|
||
s0 |= ((-1) << width);
|
||
if (u1 & (1 << (width - 1)))
|
||
s1 |= ((-1) << width);
|
||
}
|
||
}
|
||
|
||
return 0100 + ((s0 < s1 ? 7 : s0 > s1) << 3)
|
||
+ (((unsigned) u0 < (unsigned) u1) ? 7
|
||
: ((unsigned) u0 > (unsigned) u1));
|
||
}
|
||
{
|
||
rtx y0;
|
||
int u0, s0;
|
||
enum machine_mode m;
|
||
|
||
y0 = fold_rtx (x, 0);
|
||
|
||
m = GET_MODE (y0);
|
||
if (m == VOIDmode)
|
||
m = mode;
|
||
|
||
if (GET_CODE (y0) == REG)
|
||
y0 = qty_const[reg_qty[REGNO (y0)]];
|
||
|
||
/* Register had no constant equivalent? We can't do anything. */
|
||
if (y0 == 0)
|
||
return 0;
|
||
|
||
/* If we don't know the mode, we can't test the sign. */
|
||
if (m == VOIDmode)
|
||
return 0;
|
||
|
||
/* Value is frame-pointer plus a constant? Or non-explicit constant?
|
||
That isn't zero, but we don't know its sign. */
|
||
if (FIXED_BASE_PLUS_P (y0)
|
||
|| GET_CODE (y0) == SYMBOL_REF || GET_CODE (y0) == CONST
|
||
|| GET_CODE (y0) == LABEL_REF)
|
||
return 0300 + (1<<3) + 1;
|
||
|
||
/* Otherwise, only integers enable us to optimize. */
|
||
if (GET_CODE (y0) != CONST_INT)
|
||
return 0;
|
||
|
||
s0 = u0 = INTVAL (y0);
|
||
{
|
||
int width = GET_MODE_BITSIZE (m);
|
||
if (width < HOST_BITS_PER_INT)
|
||
{
|
||
s0 = u0 &= ~ ((-1) << GET_MODE_BITSIZE (m));
|
||
if (u0 & (1 << (GET_MODE_BITSIZE (m) - 1)))
|
||
s0 |= ((-1) << GET_MODE_BITSIZE (m));
|
||
}
|
||
}
|
||
return 0100 + ((s0 < 0 ? 7 : s0 > 0) << 3) + (u0 != 0);
|
||
}
|
||
}
|
||
|
||
/* Attempt to prove that a loop will be executed >= 1 times,
|
||
or prove it will be executed 0 times.
|
||
If either can be proved, delete some of the code. */
|
||
|
||
static void
|
||
predecide_loop_entry (insn)
|
||
register rtx insn;
|
||
{
|
||
register rtx jump = NEXT_INSN (insn);
|
||
register rtx p;
|
||
register rtx loop_top_label = NEXT_INSN (jump);
|
||
enum anon1 { UNK, DELETE_LOOP, DELETE_JUMP } disposition = UNK;
|
||
int count = 0;
|
||
|
||
/* Find the label at the top of the loop. */
|
||
while (GET_CODE (loop_top_label) == BARRIER
|
||
|| GET_CODE (loop_top_label) == NOTE)
|
||
{
|
||
loop_top_label = NEXT_INSN (loop_top_label);
|
||
/* No label? Give up. */
|
||
if (loop_top_label == 0)
|
||
return;
|
||
}
|
||
if (GET_CODE (loop_top_label) != CODE_LABEL)
|
||
abort ();
|
||
|
||
/* Find the label at which the loop is entered. */
|
||
p = XEXP (SET_SRC (PATTERN (jump)), 0);
|
||
if (GET_CODE (p) != CODE_LABEL)
|
||
abort ();
|
||
|
||
/* Trace the flow of control through the end test,
|
||
propagating constants, to see if result is determined. */
|
||
prev_insn_cc0 = 0;
|
||
prev_insn_explicit_cc0 = 0;
|
||
/* Avoid infinite loop if we find a cycle of jumps. */
|
||
while (count < 10)
|
||
{
|
||
/* At end of function? Means rtl is inconsistent,
|
||
but this can happen when stmt.c gets confused
|
||
by a syntax error. */
|
||
if (p == 0)
|
||
break;
|
||
/* Arriving at end of loop means endtest will drop out. */
|
||
if (GET_CODE (p) == NOTE
|
||
&& NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
|
||
{
|
||
disposition = DELETE_LOOP;
|
||
break;
|
||
}
|
||
else if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == NOTE)
|
||
;
|
||
/* We only know how to handle two kinds of insns:
|
||
conditional jumps, and those that set the condition codes. */
|
||
else if (GET_CODE (p) == INSN && GET_CODE (PATTERN (p)) == SET
|
||
&& SET_DEST (PATTERN (p)) == cc0_rtx)
|
||
{
|
||
prev_insn_cc0 = fold_cc0 (GET_MODE (SET_SRC (PATTERN (p))),
|
||
copy_rtx (SET_SRC (PATTERN (p))));
|
||
if (GET_CODE (SET_SRC (PATTERN (p))) == CONST_INT)
|
||
prev_insn_explicit_cc0 = SET_SRC (PATTERN (p));
|
||
}
|
||
else if (GET_CODE (p) == JUMP_INSN
|
||
&& GET_CODE (PATTERN (p)) == SET
|
||
&& SET_DEST (PATTERN (p)) == pc_rtx)
|
||
{
|
||
register rtx target
|
||
= fold_rtx (SET_SRC (PATTERN (p)), 1);
|
||
if (GET_CODE (target) == LABEL_REF)
|
||
p = XEXP (target, 0);
|
||
else if (target != pc_rtx)
|
||
/* If destination of jump is not fixed, give up. */
|
||
break;
|
||
count++;
|
||
}
|
||
/* Any other kind of insn means we don't know
|
||
what result the test will have. */
|
||
else
|
||
break;
|
||
|
||
/* Arriving at top of loop means we can drop straight in.
|
||
Check here because we can arrive only via a jump insn
|
||
which would have changed P above. */
|
||
if (p == loop_top_label)
|
||
{
|
||
disposition = DELETE_JUMP;
|
||
break;
|
||
}
|
||
/* We went past one insn; consider the next. */
|
||
p = NEXT_INSN (p);
|
||
}
|
||
if (disposition == DELETE_JUMP)
|
||
{
|
||
/* We know the loop test will succeed the first time,
|
||
so delete the jump to the test; drop right into loop.
|
||
Note that one call to delete_insn gets the BARRIER as well. */
|
||
delete_insn (jump);
|
||
}
|
||
if (disposition == DELETE_LOOP)
|
||
{
|
||
/* We know the endtest will fail and drop right out of the loop,
|
||
but it isn't safe to delete the loop here.
|
||
There could be jumps into it from outside.
|
||
So make the entry-jump jump around the loop.
|
||
This will cause find_basic_blocks to delete it if appropriate. */
|
||
register rtx label = gen_label_rtx ();
|
||
emit_label_after (label, p);
|
||
redirect_jump (jump, label);
|
||
}
|
||
}
|
||
|
||
/* CSE processing for one instruction.
|
||
First simplify sources and addresses of all assignments
|
||
in the instruction, using previously-computed equivalents values.
|
||
Then install the new sources and destinations in the table
|
||
of available values. */
|
||
|
||
/* Data on one SET contained in the instruction. */
|
||
|
||
struct set
|
||
{
|
||
/* The SET rtx itself. */
|
||
rtx rtl;
|
||
/* The hash-table element for the SET_SRC of the SET. */
|
||
struct table_elt *src_elt;
|
||
/* Hash code for the SET_SRC. */
|
||
int src_hash_code;
|
||
/* Hash code for the SET_DEST. */
|
||
int dest_hash_code;
|
||
/* The SET_DEST, with SUBREG, etc., stripped. */
|
||
rtx inner_dest;
|
||
/* Place where the pointer to the INNER_DEST was found. */
|
||
rtx *inner_dest_loc;
|
||
/* Nonzero if the SET_SRC is in memory. */
|
||
char src_in_memory;
|
||
/* Nonzero if the SET_SRC is in a structure. */
|
||
char src_in_struct;
|
||
/* Nonzero if the SET_SRC contains something
|
||
whose value cannot be predicted and understood. */
|
||
char src_volatile;
|
||
/* Original machine mode, in case it becomes a CONST_INT. */
|
||
enum machine_mode mode;
|
||
};
|
||
|
||
static void
|
||
cse_insn (insn)
|
||
rtx insn;
|
||
{
|
||
register rtx x = PATTERN (insn);
|
||
register int i;
|
||
register int n_sets = 0;
|
||
|
||
/* Records what this insn does to set CC0,
|
||
using same encoding used for prev_insn_cc0. */
|
||
int this_insn_cc0 = 0;
|
||
/* Likewise, what to store in prev_insn_explicit_cc0. */
|
||
rtx this_insn_explicit_cc0 = 0;
|
||
struct write_data writes_memory;
|
||
static struct write_data init = {0, 0, 0};
|
||
|
||
rtx src_eqv = 0;
|
||
struct table_elt *src_eqv_elt = 0;
|
||
int src_eqv_in_memory;
|
||
int src_eqv_in_struct;
|
||
int src_eqv_hash_code;
|
||
|
||
struct set *sets;
|
||
|
||
this_insn = insn;
|
||
writes_memory = init;
|
||
|
||
/* Find all the SETs and CLOBBERs in this instruction.
|
||
Record all the SETs in the array `set' and count them.
|
||
Also determine whether there is a CLOBBER that invalidates
|
||
all memory references, or all references at varying addresses. */
|
||
|
||
if (GET_CODE (x) == SET)
|
||
{
|
||
rtx tem;
|
||
n_sets = 1;
|
||
sets = (struct set *) alloca (sizeof (struct set));
|
||
sets[0].rtl = x;
|
||
|
||
if (REG_NOTES (insn) != 0)
|
||
{
|
||
/* Store the equivalent value (re REG_EQUAL or REG_EQUIV) in SRC_EQV. */
|
||
tem = find_reg_note (insn, REG_EQUIV, 0);
|
||
if (tem == 0)
|
||
tem = find_reg_note (insn, REG_EQUAL, 0);
|
||
if (tem) src_eqv = XEXP (tem, 0);
|
||
|
||
/* Ignore the REG_EQUAL or REG_EQUIV note if its contents
|
||
are the same as the source. */
|
||
if (src_eqv && rtx_equal_p (src_eqv, SET_SRC (x)))
|
||
src_eqv = 0;
|
||
}
|
||
|
||
/* Return now for unconditional jumps.
|
||
They never need cse processing, so this does not hurt.
|
||
The reason is not efficiency but rather
|
||
so that we can test at the end for instructions
|
||
that have been simplified to unconditional jumps
|
||
and not be misled by unchanged instructions
|
||
that were unconditional jumps to begin with. */
|
||
if (SET_DEST (x) == pc_rtx
|
||
&& GET_CODE (SET_SRC (x)) == LABEL_REF)
|
||
return;
|
||
|
||
/* Return now for call-insns, (set (reg 0) (call ...)).
|
||
The hard function value register is used only once, to copy to
|
||
someplace else, so it isn't worth cse'ing (and on 80386 is unsafe)! */
|
||
if (GET_CODE (SET_SRC (x)) == CALL)
|
||
{
|
||
canon_reg (SET_SRC (x));
|
||
return;
|
||
}
|
||
}
|
||
else if (GET_CODE (x) == PARALLEL)
|
||
{
|
||
register int lim = XVECLEN (x, 0);
|
||
|
||
sets = (struct set *) alloca (lim * sizeof (struct set));
|
||
|
||
/* Find all regs explicitly clobbered in this insn,
|
||
and ensure they are not replaced with any other regs
|
||
elsewhere in this insn.
|
||
When a reg that is clobbered is also used for input,
|
||
we should presume that that is for a reason,
|
||
and we should not substitute some other register
|
||
which is not supposed to be clobbered. */
|
||
for (i = 0; i < lim; i++)
|
||
{
|
||
register rtx y = XVECEXP (x, 0, i);
|
||
if (GET_CODE (y) == CLOBBER && GET_CODE (XEXP (y, 0)) == REG)
|
||
invalidate (XEXP (y, 0));
|
||
}
|
||
|
||
for (i = 0; i < lim; i++)
|
||
{
|
||
register rtx y = XVECEXP (x, 0, i);
|
||
if (GET_CODE (y) == SET)
|
||
sets[n_sets++].rtl = y;
|
||
else if (GET_CODE (y) == CLOBBER)
|
||
{
|
||
/* If we clobber memory, take note of that,
|
||
and canon the address.
|
||
This does nothing when a register is clobbered
|
||
because we have already invalidated the reg. */
|
||
canon_reg (y);
|
||
note_mem_written (XEXP (y, 0), &writes_memory);
|
||
}
|
||
else if (GET_CODE (y) == USE
|
||
&& ! (GET_CODE (XEXP (y, 0)) == REG
|
||
&& REGNO (XEXP (y, 0)) < FIRST_PSEUDO_REGISTER))
|
||
canon_reg (y);
|
||
else if (GET_CODE (y) == CALL)
|
||
canon_reg (y);
|
||
}
|
||
}
|
||
else if (GET_CODE (x) == CLOBBER)
|
||
note_mem_written (XEXP (x, 0), &writes_memory);
|
||
else if (GET_CODE (x) == CALL)
|
||
canon_reg (x);
|
||
|
||
if (n_sets == 0)
|
||
{
|
||
invalidate_from_clobbers (&writes_memory, x);
|
||
return;
|
||
}
|
||
|
||
/* Canonicalize sources and addresses of destinations.
|
||
set sets[i].src_elt to the class each source belongs to.
|
||
Detect assignments from or to volatile things
|
||
and set set[i] to zero so they will be ignored
|
||
in the rest of this function.
|
||
|
||
Nothing in this loop changes the hash table or the register chains. */
|
||
|
||
for (i = 0; i < n_sets; i++)
|
||
{
|
||
register rtx src, dest;
|
||
register struct table_elt *elt;
|
||
enum machine_mode mode;
|
||
|
||
dest = SET_DEST (sets[i].rtl);
|
||
src = SET_SRC (sets[i].rtl);
|
||
|
||
/* If SRC is a constant that has no machine mode,
|
||
hash it with the destination's machine mode.
|
||
This way we can keep different modes separate. */
|
||
|
||
mode = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
|
||
sets[i].mode = mode;
|
||
|
||
/* Replace each registers in SRC with oldest equivalent register,
|
||
but if DEST is a register do not replace it if it appears in SRC. */
|
||
|
||
if (GET_CODE (dest) == REG)
|
||
{
|
||
int tem = reg_qty[REGNO (dest)];
|
||
reg_qty[REGNO (dest)] = REGNO (dest);
|
||
src = canon_reg (src);
|
||
|
||
if (src_eqv)
|
||
src_eqv = canon_reg (src_eqv);
|
||
|
||
reg_qty[REGNO (dest)] = tem;
|
||
}
|
||
else
|
||
{
|
||
src = canon_reg (src);
|
||
|
||
if (src_eqv)
|
||
src_eqv = canon_reg (src_eqv);
|
||
}
|
||
|
||
if (src_eqv)
|
||
{
|
||
enum machine_mode eqvmode = mode;
|
||
if (GET_CODE (dest) == STRICT_LOW_PART)
|
||
eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
|
||
do_not_record = 0;
|
||
hash_arg_in_memory = 0;
|
||
hash_arg_in_struct = 0;
|
||
src_eqv = fold_rtx (src_eqv, 0);
|
||
src_eqv_hash_code = HASH (src_eqv, eqvmode);
|
||
|
||
/* Replace the src_eqv with its cheapest equivalent. */
|
||
|
||
if (!do_not_record)
|
||
{
|
||
elt = lookup (src_eqv, src_eqv_hash_code, eqvmode);
|
||
if (elt && elt != elt->first_same_value)
|
||
{
|
||
elt = elt->first_same_value;
|
||
/* Find the cheapest one that is still valid. */
|
||
while ((GET_CODE (elt->exp) != REG
|
||
&& !exp_equiv_p (elt->exp, elt->exp, 1))
|
||
|| elt->equivalence_only)
|
||
elt = elt->next_same_value;
|
||
src_eqv = copy_rtx (elt->exp);
|
||
hash_arg_in_memory = 0;
|
||
hash_arg_in_struct = 0;
|
||
src_eqv_hash_code = HASH (src_eqv, elt->mode);
|
||
}
|
||
src_eqv_elt = elt;
|
||
}
|
||
else
|
||
src_eqv = 0;
|
||
|
||
src_eqv_in_memory = hash_arg_in_memory;
|
||
src_eqv_in_struct = hash_arg_in_struct;
|
||
}
|
||
|
||
/* Compute SRC's hash code, and also notice if it
|
||
should not be recorded at all. In that case,
|
||
prevent any further processing of this assignment. */
|
||
do_not_record = 0;
|
||
hash_arg_in_memory = 0;
|
||
hash_arg_in_struct = 0;
|
||
src = fold_rtx (src, 0);
|
||
/* If SRC is a subreg of a reg with a known value,
|
||
perform the truncation now. */
|
||
if (GET_CODE (src) == SUBREG)
|
||
{
|
||
rtx temp = equiv_constant (src);
|
||
if (temp)
|
||
src = temp;
|
||
}
|
||
/* If we have (NOT Y), see if Y is known to be (NOT Z).
|
||
If so, (NOT Y) simplifies to Z. */
|
||
if (GET_CODE (src) == NOT || GET_CODE (src) == NEG)
|
||
{
|
||
rtx y = lookup_as_function (XEXP (src, 0), GET_CODE (src));
|
||
if (y != 0)
|
||
src = copy_rtx (XEXP (y, 0));
|
||
}
|
||
|
||
/* If storing a constant value in a register that
|
||
previously held the constant value 0,
|
||
record this fact with a REG_WAS_0 note on this insn. */
|
||
if (GET_CODE (src) == CONST_INT
|
||
&& GET_CODE (dest) == REG
|
||
&& qty_const[reg_qty[REGNO (dest)]] == const0_rtx)
|
||
REG_NOTES (insn) = gen_rtx (INSN_LIST, REG_WAS_0,
|
||
qty_const_insn[reg_qty[REGNO (dest)]],
|
||
REG_NOTES (insn));
|
||
|
||
sets[i].src_hash_code = HASH (src, mode);
|
||
|
||
sets[i].src_volatile = do_not_record;
|
||
|
||
#if 0
|
||
/* This code caused multiple hash-table entries
|
||
to be created for registers. Invalidation
|
||
would only get one, leaving others that didn't belong.
|
||
I don't know what good this ever did. */
|
||
if (GET_CODE (src) == REG)
|
||
{
|
||
sets[i].src_in_memory = 0;
|
||
sets[i].src_elt = 0;
|
||
}
|
||
else ...;
|
||
#endif
|
||
/* If source is a perverse subreg (such as QI treated as an SI),
|
||
treat it as volatile. It may do the work of an SI in one context
|
||
where the extra bits are not being used, but cannot replace an SI
|
||
in general. */
|
||
if (GET_CODE (src) == SUBREG
|
||
&& (GET_MODE_SIZE (GET_MODE (src))
|
||
> GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
|
||
sets[i].src_volatile = 1;
|
||
else if (!sets[i].src_volatile)
|
||
{
|
||
/* Replace the source with its cheapest equivalent. */
|
||
|
||
elt = lookup (src, sets[i].src_hash_code, mode);
|
||
if (elt && elt != elt->first_same_value)
|
||
{
|
||
elt = elt->first_same_value;
|
||
/* Find the cheapest one that is still valid. */
|
||
while ((GET_CODE (elt->exp) != REG
|
||
&& !exp_equiv_p (elt->exp, elt->exp, 1))
|
||
|| elt->equivalence_only)
|
||
elt = elt->next_same_value;
|
||
src = copy_rtx (elt->exp);
|
||
hash_arg_in_memory = 0;
|
||
hash_arg_in_struct = 0;
|
||
sets[i].src_hash_code = HASH (src, elt->mode);
|
||
}
|
||
|
||
/* If ELT is a constant, is there a register
|
||
linearly related to it? If so, replace it
|
||
with the sum of that register plus an offset. */
|
||
|
||
if (GET_CODE (src) == CONST && n_sets == 1
|
||
&& SET_DEST (sets[i].rtl) != cc0_rtx)
|
||
{
|
||
rtx newsrc = use_related_value (src, elt);
|
||
if (newsrc == 0 && src_eqv != 0)
|
||
newsrc = use_related_value (src_eqv, src_eqv_elt);
|
||
if (newsrc)
|
||
{
|
||
rtx oldsrc = src;
|
||
src = newsrc;
|
||
hash_arg_in_memory = 0;
|
||
hash_arg_in_struct = 0;
|
||
sets[i].src_hash_code = HASH (src, GET_MODE (src));
|
||
/* The new expression for the SRC has the same value
|
||
as the previous one; so if the previous one is in
|
||
the hash table, put the new one in as equivalent. */
|
||
if (elt != 0)
|
||
elt = insert (src, elt->first_same_value, sets[i].src_hash_code,
|
||
elt->mode);
|
||
else
|
||
{
|
||
/* Maybe the new expression is in the table already. */
|
||
elt = lookup (src, sets[i].src_hash_code, mode);
|
||
/* And maybe a register contains the same value. */
|
||
if (elt && elt != elt->first_same_value)
|
||
{
|
||
elt = elt->first_same_value;
|
||
/* Find the cheapest one that is still valid. */
|
||
while ((GET_CODE (elt->exp) != REG
|
||
&& !exp_equiv_p (elt->exp, elt->exp, 1))
|
||
|| elt->equivalence_only)
|
||
elt = elt->next_same_value;
|
||
src = copy_rtx (elt->exp);
|
||
hash_arg_in_memory = 0;
|
||
hash_arg_in_struct = 0;
|
||
sets[i].src_hash_code = HASH (src, elt->mode);
|
||
}
|
||
}
|
||
|
||
/* This would normally be inhibited by the REG_EQUIV
|
||
note we are about to make. */
|
||
#if 0
|
||
/* Deleted because the inhibition was deleted. */
|
||
SET_SRC (sets[i].rtl) = src;
|
||
#endif
|
||
|
||
/* Record the actual constant value in a REG_EQUIV note. */
|
||
if (GET_CODE (SET_DEST (sets[i].rtl)) == REG)
|
||
REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_EQUIV,
|
||
oldsrc, REG_NOTES (insn));
|
||
}
|
||
}
|
||
|
||
sets[i].src_elt = elt;
|
||
sets[i].src_in_memory = hash_arg_in_memory;
|
||
sets[i].src_in_struct = hash_arg_in_struct;
|
||
}
|
||
|
||
/* Either canon_reg or the copy_rtx may have changed this. */
|
||
/* Note it is not safe to replace the sources if there
|
||
is more than one set. We could get an insn
|
||
[(set (reg) (reg)) (set (reg) (reg))], which is probably
|
||
not in the machine description.
|
||
This case we could handle by breaking into several insns.
|
||
Cases of partial substitution cannot win at all. */
|
||
/* Also, if this insn is setting a "constant" register,
|
||
we may not replace the value that is given to it. */
|
||
if (n_sets == 1)
|
||
#if 0
|
||
/* Now that the REG_EQUIV contains the constant instead of the reg,
|
||
it should be ok to modify the insn's actual source. */
|
||
if (REG_NOTES (insn) == 0
|
||
|| REG_NOTE_KIND (REG_NOTES (insn)) != REG_EQUIV)
|
||
#endif
|
||
SET_SRC (sets[0].rtl) = src;
|
||
|
||
do_not_record = 0;
|
||
sets[i].inner_dest_loc = &SET_DEST (sets[0].rtl);
|
||
|
||
/* Look within any SIGN_EXTRACT or ZERO_EXTRACT
|
||
to the MEM or REG within it. */
|
||
while (1)
|
||
{
|
||
if (GET_CODE (dest) == SIGN_EXTRACT
|
||
|| GET_CODE (dest) == ZERO_EXTRACT)
|
||
{
|
||
XEXP (dest, 1) = canon_reg (XEXP (dest, 1));
|
||
XEXP (dest, 2) = canon_reg (XEXP (dest, 2));
|
||
sets[i].inner_dest_loc = &XEXP (dest, 0);
|
||
dest = XEXP (dest, 0);
|
||
}
|
||
else if (GET_CODE (dest) == SUBREG
|
||
|| GET_CODE (dest) == STRICT_LOW_PART)
|
||
{
|
||
sets[i].inner_dest_loc = &XEXP (dest, 0);
|
||
dest = XEXP (dest, 0);
|
||
}
|
||
else
|
||
break;
|
||
}
|
||
|
||
sets[i].inner_dest = dest;
|
||
|
||
/* If storing into memory, do cse on the memory address.
|
||
Also compute the hash code of the destination now,
|
||
before the effects of this instruction are recorded,
|
||
since the register values used in the address computation
|
||
are those before this instruction. */
|
||
if (GET_CODE (dest) == MEM)
|
||
{
|
||
register rtx addr;
|
||
register int hash;
|
||
|
||
canon_reg (dest);
|
||
dest = fold_rtx (dest, 0);
|
||
addr = XEXP (dest, 0);
|
||
|
||
/* Pushing or popping does not invalidate anything. */
|
||
if ((GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
|
||
|| GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
|
||
&& GET_CODE (XEXP (addr, 0)) == REG
|
||
&& REGNO (XEXP (addr, 0)) == STACK_POINTER_REGNUM)
|
||
;
|
||
else
|
||
/* Otherwise, decide whether we invalidate
|
||
everything in memory, or just things at non-fixed places.
|
||
Writing a large aggregate must invalidate everything
|
||
because we don't know how long it is. */
|
||
note_mem_written (dest, &writes_memory);
|
||
|
||
/* Do not try to replace addresses of local and argument slots.
|
||
The MEM expressions for args and non-register local variables
|
||
are made only once and inserted in many instructions,
|
||
as well as being used to control symbol table output.
|
||
It is not safe to clobber them. It also doesn't do any good! */
|
||
if ((GET_CODE (addr) == PLUS
|
||
&& GET_CODE (XEXP (addr, 0)) == REG
|
||
&& GET_CODE (XEXP (addr, 1)) == CONST_INT
|
||
&& (hash = REGNO (XEXP (addr, 0)),
|
||
hash == FRAME_POINTER_REGNUM || hash == ARG_POINTER_REGNUM))
|
||
|| (GET_CODE (addr) == REG
|
||
&& (hash = REGNO (addr),
|
||
hash == FRAME_POINTER_REGNUM || hash == ARG_POINTER_REGNUM)))
|
||
sets[i].dest_hash_code = ((int)MEM + canon_hash (addr, GET_MODE (dest))) % NBUCKETS;
|
||
else
|
||
{
|
||
/* Look for a simpler equivalent for the destination address. */
|
||
hash = HASH (addr, Pmode);
|
||
if (! do_not_record)
|
||
{
|
||
elt = lookup (addr, hash, Pmode);
|
||
sets[i].dest_hash_code = ((int) MEM + hash) % NBUCKETS;
|
||
|
||
if (elt && elt != elt->first_same_value)
|
||
{
|
||
elt = elt->first_same_value;
|
||
/* Find the cheapest one that is still valid. */
|
||
while ((GET_CODE (elt->exp) != REG
|
||
&& !exp_equiv_p (elt->exp, elt->exp, 1))
|
||
|| elt->equivalence_only)
|
||
elt = elt->next_same_value;
|
||
|
||
addr = copy_rtx (elt->exp);
|
||
/* Create a new MEM rtx, in case the old one
|
||
is shared somewhere else. */
|
||
dest = gen_rtx (MEM, GET_MODE (dest), addr);
|
||
MEM_VOLATILE_P (dest)
|
||
= MEM_VOLATILE_P (sets[i].inner_dest);
|
||
MEM_IN_STRUCT_P (dest)
|
||
= MEM_IN_STRUCT_P (sets[i].inner_dest);
|
||
*sets[i].inner_dest_loc = dest;
|
||
sets[i].inner_dest = dest;
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
/* Don't enter a bit-field in the hash table
|
||
because the value in it after the store
|
||
may not equal what was stored, due to truncation. */
|
||
|
||
if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT
|
||
|| GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT)
|
||
{
|
||
rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
|
||
rtx value = equiv_constant (SET_SRC (sets[i].rtl));
|
||
|
||
if (value != 0 && GET_CODE (value) == CONST_INT
|
||
&& GET_CODE (width) == CONST_INT
|
||
&& INTVAL (width) < HOST_BITS_PER_INT
|
||
&& ! (INTVAL (value) & (-1) << INTVAL (width)))
|
||
/* Exception: if the value is constant,
|
||
we can tell whether truncation would change it. */
|
||
;
|
||
else
|
||
sets[i].src_volatile = 1, src_eqv = 0;
|
||
}
|
||
|
||
/* No further processing for this assignment
|
||
if destination is volatile or if the source and destination
|
||
are the same. */
|
||
|
||
else if (do_not_record
|
||
|| (GET_CODE (dest) == REG
|
||
? REGNO (dest) == STACK_POINTER_REGNUM
|
||
: GET_CODE (dest) != MEM)
|
||
|| rtx_equal_p (SET_SRC (sets[i].rtl), SET_DEST (sets[i].rtl)))
|
||
sets[i].rtl = 0;
|
||
|
||
if (sets[i].rtl != 0 && dest != SET_DEST (sets[i].rtl))
|
||
sets[i].dest_hash_code = HASH (SET_DEST (sets[i].rtl), mode);
|
||
|
||
if (dest == cc0_rtx
|
||
&& (GET_CODE (src) == COMPARE
|
||
|| CONSTANT_P (src)
|
||
|| GET_CODE (src) == REG))
|
||
this_insn_cc0 = fold_cc0 (sets[i].mode, src);
|
||
|
||
if (dest == cc0_rtx && GET_CODE (src) == CONST_INT)
|
||
this_insn_explicit_cc0 = src;
|
||
}
|
||
|
||
/* Now enter all non-volatile source expressions in the hash table
|
||
if they are not already present.
|
||
Record in src_elt the heads of their equivalence classes.
|
||
This way we can insert the corresponding destinations into
|
||
the same classes even if the actual sources are no longer in them
|
||
(having been invalidated). */
|
||
|
||
if (src_eqv && src_eqv_elt == 0 && sets[0].rtl != 0)
|
||
{
|
||
register struct table_elt *elt;
|
||
rtx dest = SET_DEST (sets[0].rtl);
|
||
enum machine_mode eqvmode = GET_MODE (dest);
|
||
|
||
if (GET_CODE (dest) == STRICT_LOW_PART)
|
||
eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
|
||
if (insert_regs (src_eqv, 0, 0))
|
||
src_eqv_hash_code = HASH (src_eqv, eqvmode);
|
||
elt = insert (src_eqv, 0, src_eqv_hash_code, eqvmode);
|
||
elt->in_memory = src_eqv_in_memory;
|
||
elt->in_struct = src_eqv_in_struct;
|
||
elt->equivalence_only = 1;
|
||
src_eqv_elt = elt->first_same_value;
|
||
}
|
||
|
||
for (i = 0; i < n_sets; i++)
|
||
if (sets[i].rtl && ! sets[i].src_volatile)
|
||
{
|
||
if (GET_CODE (SET_DEST (sets[i].rtl)) == STRICT_LOW_PART)
|
||
{
|
||
/* REG_EQUAL in setting a STRICT_LOW_PART
|
||
gives an equivalent for the entire destination register,
|
||
not just for the subreg being stored in now.
|
||
This is a more interesting equivalent, so we arrange later
|
||
to treat the entire reg as the destination. */
|
||
sets[i].src_elt = src_eqv_elt;
|
||
sets[i].src_hash_code = src_eqv_hash_code;
|
||
}
|
||
else if (sets[i].src_elt == 0)
|
||
{
|
||
register rtx src = SET_SRC (sets[i].rtl);
|
||
register rtx dest = SET_DEST (sets[i].rtl);
|
||
register struct table_elt *elt;
|
||
enum machine_mode mode
|
||
= GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
|
||
|
||
/* Note that these insert_regs calls cannot remove
|
||
any of the src_elt's, because they would have failed to match
|
||
if not still valid. */
|
||
if (insert_regs (src, 0, 0))
|
||
sets[i].src_hash_code = HASH (src, mode);
|
||
elt = insert (src, src_eqv_elt, sets[i].src_hash_code, mode);
|
||
elt->in_memory = sets[i].src_in_memory;
|
||
elt->in_struct = sets[i].src_in_struct;
|
||
sets[i].src_elt = elt->first_same_value;
|
||
}
|
||
}
|
||
|
||
invalidate_from_clobbers (&writes_memory, x);
|
||
|
||
/* Now invalidate everything set by this instruction.
|
||
If a SUBREG or other funny destination is being set,
|
||
sets[i].rtl is still nonzero, so here we invalidate the reg
|
||
a part of which is being set. */
|
||
|
||
for (i = 0; i < n_sets; i++)
|
||
if (sets[i].rtl)
|
||
{
|
||
register rtx dest = sets[i].inner_dest;
|
||
|
||
/* Needed for registers to remove the register from its
|
||
previous quantity's chain.
|
||
Needed for memory if this is a nonvarying address, unless
|
||
we have just done an invalidate_memory that covers even those. */
|
||
if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
|
||
|| (! writes_memory.all && ! cse_rtx_addr_varies_p (dest)))
|
||
invalidate (dest);
|
||
}
|
||
|
||
/* Make sure registers mentioned in destinations
|
||
are safe for use in an expression to be inserted.
|
||
This removes from the hash table
|
||
any invalid entry that refers to one of these registers. */
|
||
|
||
for (i = 0; i < n_sets; i++)
|
||
if (sets[i].rtl && GET_CODE (SET_DEST (sets[i].rtl)) != REG)
|
||
mention_regs (SET_DEST (sets[i].rtl));
|
||
|
||
/* We may have just removed some of the src_elt's from the hash table.
|
||
So replace each one with the current head of the same class. */
|
||
|
||
for (i = 0; i < n_sets; i++)
|
||
if (sets[i].rtl)
|
||
{
|
||
/* If the source is volatile, its destination goes in
|
||
a class of its own. */
|
||
if (sets[i].src_volatile)
|
||
sets[i].src_elt = 0;
|
||
|
||
if (sets[i].src_elt && sets[i].src_elt->first_same_value == 0)
|
||
/* If elt was removed, find current head of same class,
|
||
or 0 if nothing remains of that class. */
|
||
{
|
||
register struct table_elt *elt = sets[i].src_elt;
|
||
|
||
while (elt && elt->first_same_value == 0)
|
||
elt = elt->next_same_value;
|
||
sets[i].src_elt = elt ? elt->first_same_value : 0;
|
||
}
|
||
}
|
||
|
||
/* Now insert the destinations into their equivalence classes. */
|
||
|
||
for (i = 0; i < n_sets; i++)
|
||
if (sets[i].rtl)
|
||
{
|
||
register rtx dest = SET_DEST (sets[i].rtl);
|
||
register struct table_elt *elt;
|
||
|
||
if (flag_float_store
|
||
&& GET_CODE (dest) == MEM
|
||
&& (GET_MODE (dest) == SFmode || GET_MODE (dest) == DFmode))
|
||
continue;
|
||
|
||
/* STRICT_LOW_PART isn't part of the value BEING set,
|
||
and neither is the SUBREG inside it.
|
||
Note that in this case SETS[I].SRC_ELT is really SRC_EQV_ELT. */
|
||
if (GET_CODE (dest) == STRICT_LOW_PART)
|
||
dest = SUBREG_REG (XEXP (dest, 0));
|
||
|
||
if (GET_CODE (dest) == REG)
|
||
/* Registers must also be inserted into chains for quantities. */
|
||
if (insert_regs (dest, sets[i].src_elt, 1))
|
||
/* If `insert_regs' changes something, the hash code must be
|
||
recalculated. */
|
||
sets[i].dest_hash_code = HASHREG (dest);
|
||
|
||
if (GET_CODE (dest) == SUBREG)
|
||
/* Registers must also be inserted into chains for quantities. */
|
||
if (insert_regs (dest, sets[i].src_elt, 1))
|
||
/* If `insert_regs' changes something, the hash code must be
|
||
recalculated. */
|
||
sets[i].dest_hash_code
|
||
= canon_hash (dest, GET_MODE (dest)) % NBUCKETS;
|
||
|
||
elt = insert (dest, sets[i].src_elt, sets[i].dest_hash_code, GET_MODE (dest));
|
||
elt->in_memory = GET_CODE (sets[i].inner_dest) == MEM;
|
||
if (elt->in_memory)
|
||
{
|
||
elt->in_struct = (MEM_IN_STRUCT_P (sets[i].inner_dest)
|
||
|| sets[i].inner_dest != SET_DEST (sets[i].rtl));
|
||
}
|
||
}
|
||
|
||
/* Special handling for (set REG0 REG1)
|
||
where REG0 is the "cheapest", cheaper than REG1.
|
||
After cse, REG1 will probably not be used in the sequel,
|
||
so (if easily done) change this insn to (set REG1 REG0) and
|
||
replace REG1 with REG0 in the previous insn that computed their value.
|
||
Then REG1 will become a dead store and won't cloud the situation
|
||
for later optimizations. */
|
||
if (n_sets == 1 && sets[0].rtl && GET_CODE (SET_DEST (sets[0].rtl)) == REG
|
||
&& GET_CODE (SET_SRC (sets[0].rtl)) == REG
|
||
&& rtx_equal_p (canon_reg (SET_SRC (sets[0].rtl)), SET_DEST (sets[0].rtl)))
|
||
{
|
||
rtx prev = PREV_INSN (insn);
|
||
while (prev && GET_CODE (prev) == NOTE)
|
||
prev = PREV_INSN (prev);
|
||
|
||
if (prev && GET_CODE (prev) == INSN && GET_CODE (PATTERN (prev)) == SET
|
||
&& SET_DEST (PATTERN (prev)) == SET_SRC (sets[0].rtl))
|
||
{
|
||
rtx dest = SET_DEST (sets[0].rtl);
|
||
SET_DEST (PATTERN (prev)) = dest;
|
||
SET_DEST (sets[0].rtl) = SET_SRC (sets[0].rtl);
|
||
SET_SRC (sets[0].rtl) = dest;
|
||
}
|
||
}
|
||
|
||
/* Did this insn become an unconditional branch or become a no-op? */
|
||
if (GET_CODE (insn) == JUMP_INSN
|
||
&& GET_CODE (x) == SET
|
||
&& SET_DEST (x) == pc_rtx)
|
||
{
|
||
if (SET_SRC (x) == pc_rtx)
|
||
{
|
||
PUT_CODE (insn, NOTE);
|
||
NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
|
||
NOTE_SOURCE_FILE (insn) = 0;
|
||
cse_jumps_altered = 1;
|
||
/* If previous insn just set CC0 for us, delete it too. */
|
||
if (prev_insn_cc0 != 0 || prev_insn_explicit_cc0 != 0)
|
||
{
|
||
PUT_CODE (prev_insn, NOTE);
|
||
NOTE_LINE_NUMBER (prev_insn) = NOTE_INSN_DELETED;
|
||
NOTE_SOURCE_FILE (prev_insn) = 0;
|
||
}
|
||
/* One less use of the label this insn used to jump to. */
|
||
--LABEL_NUSES (JUMP_LABEL (insn));
|
||
}
|
||
else if (GET_CODE (SET_SRC (x)) == LABEL_REF)
|
||
{
|
||
rtx label;
|
||
|
||
emit_barrier_after (insn);
|
||
cse_jumps_altered = 1;
|
||
/* If previous insn just set CC0 for us, delete it too. */
|
||
if (prev_insn_cc0 != 0 || prev_insn_explicit_cc0 != 0)
|
||
{
|
||
PUT_CODE (prev_insn, NOTE);
|
||
NOTE_LINE_NUMBER (prev_insn) = NOTE_INSN_DELETED;
|
||
NOTE_SOURCE_FILE (prev_insn) = 0;
|
||
}
|
||
/* If jump target is the following label, and this is only use of it,
|
||
skip direct to that label and continue optimizing there. */
|
||
label = insn;
|
||
while (label != 0 && GET_CODE (label) != CODE_LABEL)
|
||
label = NEXT_INSN (label);
|
||
if (label == XEXP (SET_SRC (x), 0)
|
||
&& LABEL_NUSES (label) == 1)
|
||
cse_skip_to_next_block = 1;
|
||
}
|
||
}
|
||
|
||
/* If this insn used to store a value based on CC0 but now value is constant,
|
||
and the previous insn just set CC0 for us, delete previous insn.
|
||
Here we use the fact that nothing expects CC0 to be valid over an insn,
|
||
which is true until the final pass. */
|
||
if (GET_CODE (x) == SET && prev_insn_cc0
|
||
&& CONSTANT_P (SET_SRC (x)))
|
||
{
|
||
PUT_CODE (prev_insn, NOTE);
|
||
NOTE_LINE_NUMBER (prev_insn) = NOTE_INSN_DELETED;
|
||
NOTE_SOURCE_FILE (prev_insn) = 0;
|
||
}
|
||
|
||
prev_insn_explicit_cc0 = this_insn_explicit_cc0;
|
||
prev_insn_cc0 = this_insn_cc0;
|
||
prev_insn = insn;
|
||
}
|
||
|
||
/* Store 1 in *WRITES_PTR for those categories of memory ref
|
||
that must be invalidated when the expression WRITTEN is stored in.
|
||
If WRITTEN is null, say everything must be invalidated. */
|
||
|
||
static void
|
||
note_mem_written (written, writes_ptr)
|
||
rtx written;
|
||
struct write_data *writes_ptr;
|
||
{
|
||
static struct write_data everything = {1, 1, 1};
|
||
|
||
if (written == 0)
|
||
*writes_ptr = everything;
|
||
else if (GET_CODE (written) == MEM)
|
||
{
|
||
/* Pushing or popping the stack invalidates nothing. */
|
||
rtx addr = XEXP (written, 0);
|
||
if ((GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
|
||
|| GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
|
||
&& GET_CODE (XEXP (addr, 0)) == REG
|
||
&& REGNO (XEXP (addr, 0)) == STACK_POINTER_REGNUM)
|
||
return;
|
||
if (GET_MODE (written) == BLKmode)
|
||
*writes_ptr = everything;
|
||
else if (cse_rtx_addr_varies_p (written))
|
||
{
|
||
/* A varying address that is a sum indicates an array element,
|
||
and that's just as good as a structure element
|
||
in implying that we need not invalidate scalar variables. */
|
||
if (!(MEM_IN_STRUCT_P (written)
|
||
|| GET_CODE (XEXP (written, 0)) == PLUS))
|
||
writes_ptr->all = 1;
|
||
writes_ptr->nonscalar = 1;
|
||
}
|
||
writes_ptr->var = 1;
|
||
}
|
||
}
|
||
|
||
/* Perform invalidation on the basis of everything about an insn
|
||
except for invalidating the actual places that are SET in it.
|
||
This includes the places CLOBBERed, and anything that might
|
||
alias with something that is SET or CLOBBERed.
|
||
|
||
W points to the writes_memory for this insn, a struct write_data
|
||
saying which kinds of memory references must be invalidated.
|
||
X is the pattern of the insn. */
|
||
|
||
static void
|
||
invalidate_from_clobbers (w, x)
|
||
struct write_data *w;
|
||
rtx x;
|
||
{
|
||
/* If W->var is not set, W specifies no action.
|
||
If W->all is set, this step gets all memory refs
|
||
so they can be ignored in the rest of this function. */
|
||
if (w->var)
|
||
invalidate_memory (w);
|
||
|
||
if (GET_CODE (x) == CLOBBER)
|
||
{
|
||
rtx ref = XEXP (x, 0);
|
||
if (ref
|
||
&& (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
|
||
|| (GET_CODE (ref) == MEM && ! w->all)))
|
||
invalidate (ref);
|
||
}
|
||
else if (GET_CODE (x) == PARALLEL)
|
||
{
|
||
register int i;
|
||
for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
|
||
{
|
||
register rtx y = XVECEXP (x, 0, i);
|
||
if (GET_CODE (y) == CLOBBER)
|
||
{
|
||
rtx ref = XEXP (y, 0);
|
||
if (ref
|
||
&&(GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
|
||
|| (GET_CODE (ref) == MEM && !w->all)))
|
||
invalidate (ref);
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
/* Find the end of INSN's basic block, and return the cuid of its last insn
|
||
and the total number of SETs in all the insns of the block. */
|
||
|
||
struct cse_basic_block_data { int cuid, nsets; rtx last; };
|
||
|
||
static struct cse_basic_block_data
|
||
cse_end_of_basic_block (insn)
|
||
rtx insn;
|
||
{
|
||
rtx p = insn;
|
||
struct cse_basic_block_data val;
|
||
int nsets = 0;
|
||
int last_uid = 0;
|
||
|
||
/* Scan to end of this basic block. */
|
||
while (p && GET_CODE (p) != CODE_LABEL)
|
||
{
|
||
/* Don't cse out the end of a loop. This makes a difference
|
||
only for the unusual loops that always execute at least once;
|
||
all other loops have labels there so we will stop in any case.
|
||
Cse'ing out the end of the loop is dangerous because it
|
||
might cause an invariant expression inside the loop
|
||
to be reused after the end of the loop. This would make it
|
||
hard to move the expression out of the loop in loop.c,
|
||
especially if it is one of several equivalent expressions
|
||
and loop.c would like to eliminate it.
|
||
The occasional optimizations lost by this will all come back
|
||
if loop and cse are made to work alternatingly. */
|
||
if (GET_CODE (p) == NOTE
|
||
&& NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
|
||
break;
|
||
|
||
/* Don't cse over a call to setjmp; on some machines (eg vax)
|
||
the regs restored by the longjmp come from
|
||
a later time than the setjmp. */
|
||
if (GET_CODE (p) == NOTE
|
||
&& NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP)
|
||
break;
|
||
|
||
/* A PARALLEL can have lots of SETs in it,
|
||
especially if it is really an ASM_OPERANDS. */
|
||
if (GET_CODE (p) == INSN && GET_CODE (PATTERN (p)) == PARALLEL)
|
||
nsets += XVECLEN (PATTERN (p), 0);
|
||
else
|
||
nsets += 1;
|
||
|
||
last_uid = INSN_UID (p);
|
||
p = NEXT_INSN (p);
|
||
}
|
||
val.cuid = uid_cuid[last_uid];
|
||
val.nsets = nsets;
|
||
val.last = p;
|
||
|
||
return val;
|
||
}
|
||
|
||
static rtx cse_basic_block ();
|
||
|
||
/* Perform cse on the instructions of a function.
|
||
F is the first instruction.
|
||
NREGS is one plus the highest pseudo-reg number used in the instruction.
|
||
|
||
Returns 1 if jump_optimize should be redone due to simplifications
|
||
in conditional jump instructions. */
|
||
|
||
int
|
||
cse_main (f, nregs)
|
||
/* f is the first instruction of a chain of insns for one function */
|
||
rtx f;
|
||
/* nregs is the total number of registers used in it */
|
||
int nregs;
|
||
{
|
||
register rtx insn = f;
|
||
register int i;
|
||
|
||
cse_jumps_altered = 0;
|
||
|
||
init_recog ();
|
||
|
||
max_reg = nregs;
|
||
|
||
all_minus_one = (int *) alloca (nregs * sizeof (int));
|
||
consec_ints = (int *) alloca (nregs * sizeof (int));
|
||
for (i = 0; i < nregs; i++)
|
||
{
|
||
all_minus_one[i] = -1;
|
||
consec_ints[i] = i;
|
||
}
|
||
|
||
reg_next_eqv = (int *) alloca (nregs * sizeof (int));
|
||
reg_prev_eqv = (int *) alloca (nregs * sizeof (int));
|
||
reg_qty = (int *) alloca (nregs * sizeof (int));
|
||
reg_rtx = (rtx *) alloca (nregs * sizeof (rtx));
|
||
reg_in_table = (int *) alloca (nregs * sizeof (int));
|
||
reg_tick = (int *) alloca (nregs * sizeof (int));
|
||
|
||
/* Discard all the free elements of the previous function
|
||
since they are allocated in the temporarily obstack. */
|
||
bzero (table, sizeof table);
|
||
free_element_chain = 0;
|
||
n_elements_made = 0;
|
||
|
||
/* Find the largest uid. */
|
||
|
||
for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
|
||
if (INSN_UID (insn) > i)
|
||
i = INSN_UID (insn);
|
||
|
||
uid_cuid = (short *) alloca ((i + 1) * sizeof (short));
|
||
bzero (uid_cuid, (i + 1) * sizeof (short));
|
||
|
||
/* Compute the mapping from uids to cuids.
|
||
CUIDs are numbers assigned to insns, like uids,
|
||
except that cuids increase monotonically through the code.
|
||
Don't assign cuids to line-number NOTEs, so that the distance in cuids
|
||
between two insns is not affected by -g. */
|
||
|
||
for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
|
||
{
|
||
if (GET_CODE (insn) != NOTE
|
||
|| NOTE_LINE_NUMBER (insn) < 0)
|
||
INSN_CUID (insn) = ++i;
|
||
else
|
||
/* Give a line number note the same cuid as preceding insn. */
|
||
INSN_CUID (insn) = i;
|
||
}
|
||
|
||
/* Loop over basic blocks.
|
||
Compute the maximum number of qty's needed for each basic block
|
||
(which is 2 for each SET). */
|
||
insn = f;
|
||
while (insn)
|
||
{
|
||
struct cse_basic_block_data val;
|
||
|
||
val = cse_end_of_basic_block (insn);
|
||
|
||
cse_basic_block_end = val.cuid;
|
||
cse_basic_block_start = INSN_CUID (insn);
|
||
max_qty = val.nsets * 2;
|
||
|
||
/* Make MAX_QTY bigger to give us room to optimize
|
||
past the end of this basic block, if that should prove useful. */
|
||
if (max_qty < 500)
|
||
max_qty = 500;
|
||
|
||
max_qty += max_reg;
|
||
|
||
insn = cse_basic_block (insn, val.last);
|
||
#ifdef USE_C_ALLOCA
|
||
alloca (0);
|
||
#endif
|
||
}
|
||
|
||
/* Tell refers_to_mem_p that qty_const info is not available. */
|
||
qty_const = 0;
|
||
|
||
if (max_elements_made < n_elements_made)
|
||
max_elements_made = n_elements_made;
|
||
|
||
return cse_jumps_altered;
|
||
}
|
||
|
||
static rtx
|
||
cse_basic_block (from, to)
|
||
register rtx from, to;
|
||
{
|
||
register rtx insn;
|
||
int *qv1 = (int *) alloca (max_qty * sizeof (int));
|
||
int *qv2 = (int *) alloca (max_qty * sizeof (int));
|
||
rtx *qv3 = (rtx *) alloca (max_qty * sizeof (rtx));
|
||
|
||
qty_first_reg = qv1;
|
||
qty_last_reg = qv2;
|
||
qty_const = qv3;
|
||
qty_const_insn = (rtx *) alloca (max_qty * sizeof (rtx));
|
||
|
||
new_basic_block ();
|
||
|
||
cse_skip_to_next_block = 0;
|
||
|
||
for (insn = from; insn != to; insn = NEXT_INSN (insn))
|
||
{
|
||
register enum rtx_code code;
|
||
|
||
code = GET_CODE (insn);
|
||
|
||
if (code == INSN || code == JUMP_INSN || code == CALL_INSN)
|
||
cse_insn (insn);
|
||
/* Memory, and some registers, are invalidate by subroutine calls. */
|
||
if (code == CALL_INSN)
|
||
{
|
||
register int i;
|
||
static struct write_data everything = {1, 1, 1};
|
||
invalidate_memory (&everything);
|
||
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
|
||
if (call_used_regs[i] && reg_rtx[i]
|
||
&& i != FRAME_POINTER_REGNUM
|
||
&& i != ARG_POINTER_REGNUM)
|
||
invalidate (reg_rtx[i]);
|
||
}
|
||
/* Loop beginnings are often followed by jumps
|
||
(that enter the loop above the endtest).
|
||
See if we can prove the loop will be executed at least once;
|
||
if so, delete the jump. Also perhaps we can prove loop
|
||
will never be executed and delete the entire thing. */
|
||
if (code == NOTE
|
||
&& NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
|
||
&& GET_CODE (NEXT_INSN (insn)) == JUMP_INSN)
|
||
{
|
||
predecide_loop_entry (insn);
|
||
/* Whether that jump was deleted or not,
|
||
it certainly is the end of the basic block.
|
||
Since the jump is unconditional,
|
||
it requires no further processing here. */
|
||
break;
|
||
}
|
||
|
||
/* See if it is ok to keep on going past the label
|
||
which used to end our basic block. */
|
||
if (cse_skip_to_next_block
|
||
|| (to != 0 && NEXT_INSN (insn) == to && LABEL_NUSES (to) == 0))
|
||
{
|
||
struct cse_basic_block_data val;
|
||
|
||
/* Skip the remaining insns in this block. */
|
||
cse_skip_to_next_block = 0;
|
||
insn = to;
|
||
if (insn == 0)
|
||
break;
|
||
|
||
/* Find the end of the following block. */
|
||
val = cse_end_of_basic_block (NEXT_INSN (insn));
|
||
|
||
/* If the tables we allocated have enough space left
|
||
to handle all the SETs in the next basic block,
|
||
continue through it. Otherwise, return,
|
||
and that block will be scanned individually. */
|
||
if (val.nsets * 2 + next_qty > max_qty)
|
||
break;
|
||
|
||
cse_basic_block_end = val.cuid;
|
||
to = val.last;
|
||
}
|
||
}
|
||
|
||
if (next_qty > max_qty)
|
||
abort ();
|
||
|
||
return to ? NEXT_INSN (to) : 0;
|
||
}
|