config root man

Current Path : /usr/src/contrib/gcc/

FreeBSD hs32.drive.ne.jp 9.1-RELEASE FreeBSD 9.1-RELEASE #1: Wed Jan 14 12:18:08 JST 2015 root@hs32.drive.ne.jp:/sys/amd64/compile/hs32 amd64
Upload File :
Current File : //usr/src/contrib/gcc/cse.c

/* Common subexpression elimination for GNU compiler.
   Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998
   1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2, or (at your option) any later
version.

GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING.  If not, write to the Free
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.  */

#include "config.h"
/* stdio.h must precede rtl.h for FFS.  */
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "rtl.h"
#include "tm_p.h"
#include "hard-reg-set.h"
#include "regs.h"
#include "basic-block.h"
#include "flags.h"
#include "real.h"
#include "insn-config.h"
#include "recog.h"
#include "function.h"
#include "expr.h"
#include "toplev.h"
#include "output.h"
#include "ggc.h"
#include "timevar.h"
#include "except.h"
#include "target.h"
#include "params.h"
#include "rtlhooks-def.h"
#include "tree-pass.h"

/* The basic idea of common subexpression elimination is to go
   through the code, keeping a record of expressions that would
   have the same value at the current scan point, and replacing
   expressions encountered with the cheapest equivalent expression.

   It is too complicated to keep track of the different possibilities
   when control paths merge in this code; so, at each label, we forget all
   that is known and start fresh.  This can be described as processing each
   extended basic block separately.  We have a separate pass to perform
   global CSE.

   Note CSE can turn a conditional or computed jump into a nop or
   an unconditional jump.  When this occurs we arrange to run the jump
   optimizer after CSE to delete the unreachable code.

   We use two data structures to record the equivalent expressions:
   a hash table for most expressions, and a vector of "quantity
   numbers" to record equivalent (pseudo) registers.

   The use of the special data structure for registers is desirable
   because it is faster.  It is possible because registers references
   contain a fairly small number, the register number, taken from
   a contiguously allocated series, and two register references are
   identical if they have the same number.  General expressions
   do not have any such thing, so the only way to retrieve the
   information recorded on an expression other than a register
   is to keep it in a hash table.

Registers and "quantity numbers":

   At the start of each basic block, all of the (hardware and pseudo)
   registers used in the function are given distinct quantity
   numbers to indicate their contents.  During scan, when the code
   copies one register into another, we copy the quantity number.
   When a register is loaded in any other way, we allocate a new
   quantity number to describe the value generated by this operation.
   `REG_QTY (N)' records what quantity register N is currently thought
   of as containing.

   All real quantity numbers are greater than or equal to zero.
   If register N has not been assigned a quantity, `REG_QTY (N)' will
   equal -N - 1, which is always negative.

   Quantity numbers below zero do not exist and none of the `qty_table'
   entries should be referenced with a negative index.

   We also maintain a bidirectional chain of registers for each
   quantity number.  The `qty_table` members `first_reg' and `last_reg',
   and `reg_eqv_table' members `next' and `prev' hold these chains.

   The first register in a chain is the one whose lifespan is least local.
   Among equals, it is the one that was seen first.
   We replace any equivalent register with that one.

   If two registers have the same quantity number, it must be true that
   REG expressions with qty_table `mode' must be in the hash table for both
   registers and must be in the same class.

   The converse is not true.  Since hard registers may be referenced in
   any mode, two REG expressions might be equivalent in the hash table
   but not have the same quantity number if the quantity number of one
   of the registers is not the same mode as those expressions.

Constants and quantity numbers

   When a quantity has a known constant value, that value is stored
   in the appropriate qty_table `const_rtx'.  This is in addition to
   putting the constant in the hash table as is usual for non-regs.

   Whether a reg or a constant is preferred is determined by the configuration
   macro CONST_COSTS and will often depend on the constant value.  In any
   event, expressions containing constants can be simplified, by fold_rtx.

   When a quantity has a known nearly constant value (such as an address
   of a stack slot), that value is stored in the appropriate qty_table
   `const_rtx'.

   Integer constants don't have a machine mode.  However, cse
   determines the intended machine mode from the destination
   of the instruction that moves the constant.  The machine mode
   is recorded in the hash table along with the actual RTL
   constant expression so that different modes are kept separate.

Other expressions:

   To record known equivalences among expressions in general
   we use a hash table called `table'.  It has a fixed number of buckets
   that contain chains of `struct table_elt' elements for expressions.
   These chains connect the elements whose expressions have the same
   hash codes.

   Other chains through the same elements connect the elements which
   currently have equivalent values.

   Register references in an expression are canonicalized before hashing
   the expression.  This is done using `reg_qty' and qty_table `first_reg'.
   The hash code of a register reference is computed using the quantity
   number, not the register number.

   When the value of an expression changes, it is necessary to remove from the
   hash table not just that expression but all expressions whose values
   could be different as a result.

     1. If the value changing is in memory, except in special cases
     ANYTHING referring to memory could be changed.  That is because
     nobody knows where a pointer does not point.
     The function `invalidate_memory' removes what is necessary.

     The special cases are when the address is constant or is
     a constant plus a fixed register such as the frame pointer
     or a static chain pointer.  When such addresses are stored in,
     we can tell exactly which other such addresses must be invalidated
     due to overlap.  `invalidate' does this.
     All expressions that refer to non-constant
     memory addresses are also invalidated.  `invalidate_memory' does this.

     2. If the value changing is a register, all expressions
     containing references to that register, and only those,
     must be removed.

   Because searching the entire hash table for expressions that contain
   a register is very slow, we try to figure out when it isn't necessary.
   Precisely, this is necessary only when expressions have been
   entered in the hash table using this register, and then the value has
   changed, and then another expression wants to be added to refer to
   the register's new value.  This sequence of circumstances is rare
   within any one basic block.

   `REG_TICK' and `REG_IN_TABLE', accessors for members of
   cse_reg_info, are used to detect this case.  REG_TICK (i) is
   incremented whenever a value is stored in register i.
   REG_IN_TABLE (i) holds -1 if no references to register i have been
   entered in the table; otherwise, it contains the value REG_TICK (i)
   had when the references were entered.  If we want to enter a
   reference and REG_IN_TABLE (i) != REG_TICK (i), we must scan and
   remove old references.  Until we want to enter a new entry, the
   mere fact that the two vectors don't match makes the entries be
   ignored if anyone tries to match them.

   Registers themselves are entered in the hash table as well as in
   the equivalent-register chains.  However, `REG_TICK' and
   `REG_IN_TABLE' do not apply to expressions which are simple
   register references.  These expressions are removed from the table
   immediately when they become invalid, and this can be done even if
   we do not immediately search for all the expressions that refer to
   the register.

   A CLOBBER rtx in an instruction invalidates its operand for further
   reuse.  A CLOBBER or SET rtx whose operand is a MEM:BLK
   invalidates everything that resides in memory.

Related expressions:

   Constant expressions that differ only by an additive integer
   are called related.  When a constant expression is put in
   the table, the related expression with no constant term
   is also entered.  These are made to point at each other
   so that it is possible to find out if there exists any
   register equivalent to an expression related to a given expression.  */

/* Length of qty_table vector.  We know in advance we will not need
   a quantity number this big.  */

static int max_qty;

/* Next quantity number to be allocated.
   This is 1 + the largest number needed so far.  */

static int next_qty;

/* Per-qty information tracking.

   `first_reg' and `last_reg' track the head and tail of the
   chain of registers which currently contain this quantity.

   `mode' contains the machine mode of this quantity.

   `const_rtx' holds the rtx of the constant value of this
   quantity, if known.  A summations of the frame/arg pointer
   and a constant can also be entered here.  When this holds
   a known value, `const_insn' is the insn which stored the
   constant value.

   `comparison_{code,const,qty}' are used to track when a
   comparison between a quantity and some constant or register has
   been passed.  In such a case, we know the results of the comparison
   in case we see it again.  These members record a comparison that
   is known to be true.  `comparison_code' holds the rtx code of such
   a comparison, else it is set to UNKNOWN and the other two
   comparison members are undefined.  `comparison_const' holds
   the constant being compared against, or zero if the comparison
   is not against a constant.  `comparison_qty' holds the quantity
   being compared against when the result is known.  If the comparison
   is not with a register, `comparison_qty' is -1.  */

struct qty_table_elem
{
  rtx const_rtx;
  rtx const_insn;
  rtx comparison_const;
  int comparison_qty;
  unsigned int first_reg, last_reg;
  /* The sizes of these fields should match the sizes of the
     code and mode fields of struct rtx_def (see rtl.h).  */
  ENUM_BITFIELD(rtx_code) comparison_code : 16;
  ENUM_BITFIELD(machine_mode) mode : 8;
};

/* The table of all qtys, indexed by qty number.  */
static struct qty_table_elem *qty_table;

/* Structure used to pass arguments via for_each_rtx to function
   cse_change_cc_mode.  */
struct change_cc_mode_args
{
  rtx insn;
  rtx newreg;
};

#ifdef HAVE_cc0
/* For machines that have a CC0, we do not record its value in the hash
   table since its use is guaranteed to be the insn immediately following
   its definition and any other insn is presumed to invalidate it.

   Instead, we store below the value last assigned to CC0.  If it should
   happen to be a constant, it is stored in preference to the actual
   assigned value.  In case it is a constant, we store the mode in which
   the constant should be interpreted.  */

static rtx prev_insn_cc0;
static enum machine_mode prev_insn_cc0_mode;

/* Previous actual insn.  0 if at first insn of basic block.  */

static rtx prev_insn;
#endif

/* Insn being scanned.  */

static rtx this_insn;

/* Index by register number, gives the number of the next (or
   previous) register in the chain of registers sharing the same
   value.

   Or -1 if this register is at the end of the chain.

   If REG_QTY (N) == -N - 1, reg_eqv_table[N].next is undefined.  */

/* Per-register equivalence chain.  */
struct reg_eqv_elem
{
  int next, prev;
};

/* The table of all register equivalence chains.  */
static struct reg_eqv_elem *reg_eqv_table;

struct cse_reg_info
{
  /* The timestamp at which this register is initialized.  */
  unsigned int timestamp;

  /* The quantity number of the register's current contents.  */
  int reg_qty;

  /* The number of times the register has been altered in the current
     basic block.  */
  int reg_tick;

  /* The REG_TICK value at which rtx's containing this register are
     valid in the hash table.  If this does not equal the current
     reg_tick value, such expressions existing in the hash table are
     invalid.  */
  int reg_in_table;

  /* The SUBREG that was set when REG_TICK was last incremented.  Set
     to -1 if the last store was to the whole register, not a subreg.  */
  unsigned int subreg_ticked;
};

/* A table of cse_reg_info indexed by register numbers.  */
static struct cse_reg_info *cse_reg_info_table;

/* The size of the above table.  */
static unsigned int cse_reg_info_table_size;

/* The index of the first entry that has not been initialized.  */
static unsigned int cse_reg_info_table_first_uninitialized;

/* The timestamp at the beginning of the current run of
   cse_basic_block.  We increment this variable at the beginning of
   the current run of cse_basic_block.  The timestamp field of a
   cse_reg_info entry matches the value of this variable if and only
   if the entry has been initialized during the current run of
   cse_basic_block.  */
static unsigned int cse_reg_info_timestamp;

/* A HARD_REG_SET containing all the hard registers for which there is
   currently a REG expression in the hash table.  Note the difference
   from the above variables, which indicate if the REG is mentioned in some
   expression in the table.  */

static HARD_REG_SET hard_regs_in_table;

/* CUID of insn that starts the basic block currently being cse-processed.  */

static int cse_basic_block_start;

/* CUID of insn that ends the basic block currently being cse-processed.  */

static int cse_basic_block_end;

/* Vector mapping INSN_UIDs to cuids.
   The cuids are like uids but increase monotonically always.
   We use them to see whether a reg is used outside a given basic block.  */

static int *uid_cuid;

/* Highest UID in UID_CUID.  */
static int max_uid;

/* Get the cuid of an insn.  */

#define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])

/* Nonzero if this pass has made changes, and therefore it's
   worthwhile to run the garbage collector.  */

static int cse_altered;

/* Nonzero if cse has altered conditional jump insns
   in such a way that jump optimization should be redone.  */

static int cse_jumps_altered;

/* Nonzero if we put a LABEL_REF into the hash table for an INSN without a
   REG_LABEL, we have to rerun jump after CSE to put in the note.  */
static int recorded_label_ref;

/* canon_hash stores 1 in do_not_record
   if it notices a reference to CC0, PC, or some other volatile
   subexpression.  */

static int do_not_record;

/* canon_hash stores 1 in hash_arg_in_memory
   if it notices a reference to memory within the expression being hashed.  */

static int hash_arg_in_memory;

/* The hash table contains buckets which are chains of `struct table_elt's,
   each recording one expression's information.
   That expression is in the `exp' field.

   The canon_exp field contains a canonical (from the point of view of
   alias analysis) version of the `exp' field.

   Those elements with the same hash code are chained in both directions
   through the `next_same_hash' and `prev_same_hash' fields.

   Each set of expressions with equivalent values
   are on a two-way chain through the `next_same_value'
   and `prev_same_value' fields, and all point with
   the `first_same_value' field at the first element in
   that chain.  The chain is in order of increasing cost.
   Each element's cost value is in its `cost' field.

   The `in_memory' field is nonzero for elements that
   involve any reference to memory.  These elements are removed
   whenever a write is done to an unidentified location in memory.
   To be safe, we assume that a memory address is unidentified unless
   the address is either a symbol constant or a constant plus
   the frame pointer or argument pointer.

   The `related_value' field is used to connect related expressions
   (that differ by adding an integer).
   The related expressions are chained in a circular fashion.
   `related_value' is zero for expressions for which this
   chain is not useful.

   The `cost' field stores the cost of this element's expression.
   The `regcost' field stores the value returned by approx_reg_cost for
   this element's expression.

   The `is_const' flag is set if the element is a constant (including
   a fixed address).

   The `flag' field is used as a temporary during some search routines.

   The `mode' field is usually the same as GET_MODE (`exp'), but
   if `exp' is a CONST_INT and has no machine mode then the `mode'
   field is the mode it was being used as.  Each constant is
   recorded separately for each mode it is used with.  */

struct table_elt
{
  rtx exp;
  rtx canon_exp;
  struct table_elt *next_same_hash;
  struct table_elt *prev_same_hash;
  struct table_elt *next_same_value;
  struct table_elt *prev_same_value;
  struct table_elt *first_same_value;
  struct table_elt *related_value;
  int cost;
  int regcost;
  /* The size of this field should match the size
     of the mode field of struct rtx_def (see rtl.h).  */
  ENUM_BITFIELD(machine_mode) mode : 8;
  char in_memory;
  char is_const;
  char flag;
};

/* We don't want a lot of buckets, because we rarely have very many
   things stored in the hash table, and a lot of buckets slows
   down a lot of loops that happen frequently.  */
#define HASH_SHIFT	5
#define HASH_SIZE	(1 << HASH_SHIFT)
#define HASH_MASK	(HASH_SIZE - 1)

/* Compute hash code of X in mode M.  Special-case case where X is a pseudo
   register (hard registers may require `do_not_record' to be set).  */

#define HASH(X, M)	\
 ((REG_P (X) && REGNO (X) >= FIRST_PSEUDO_REGISTER	\
  ? (((unsigned) REG << 7) + (unsigned) REG_QTY (REGNO (X)))	\
  : canon_hash (X, M)) & HASH_MASK)

/* Like HASH, but without side-effects.  */
#define SAFE_HASH(X, M)	\
 ((REG_P (X) && REGNO (X) >= FIRST_PSEUDO_REGISTER	\
  ? (((unsigned) REG << 7) + (unsigned) REG_QTY (REGNO (X)))	\
  : safe_hash (X, M)) & HASH_MASK)

/* Determine whether register number N is considered a fixed register for the
   purpose of approximating register costs.
   It is desirable to replace other regs with fixed regs, to reduce need for
   non-fixed hard regs.
   A reg wins if it is either the frame pointer or designated as fixed.  */
#define FIXED_REGNO_P(N)  \
  ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
   || fixed_regs[N] || global_regs[N])

/* Compute cost of X, as stored in the `cost' field of a table_elt.  Fixed
   hard registers and pointers into the frame are the cheapest with a cost
   of 0.  Next come pseudos with a cost of one and other hard registers with
   a cost of 2.  Aside from these special cases, call `rtx_cost'.  */

#define CHEAP_REGNO(N)							\
  (REGNO_PTR_FRAME_P(N)							\
   || (HARD_REGISTER_NUM_P (N)						\
       && FIXED_REGNO_P (N) && REGNO_REG_CLASS (N) != NO_REGS))

#define COST(X) (REG_P (X) ? 0 : notreg_cost (X, SET))
#define COST_IN(X,OUTER) (REG_P (X) ? 0 : notreg_cost (X, OUTER))

/* Get the number of times this register has been updated in this
   basic block.  */

#define REG_TICK(N) (get_cse_reg_info (N)->reg_tick)

/* Get the point at which REG was recorded in the table.  */

#define REG_IN_TABLE(N) (get_cse_reg_info (N)->reg_in_table)

/* Get the SUBREG set at the last increment to REG_TICK (-1 if not a
   SUBREG).  */

#define SUBREG_TICKED(N) (get_cse_reg_info (N)->subreg_ticked)

/* Get the quantity number for REG.  */

#define REG_QTY(N) (get_cse_reg_info (N)->reg_qty)

/* Determine if the quantity number for register X represents a valid index
   into the qty_table.  */

#define REGNO_QTY_VALID_P(N) (REG_QTY (N) >= 0)

static struct table_elt *table[HASH_SIZE];

/* Number of elements in the hash table.  */

static unsigned int table_size;

/* Chain of `struct table_elt's made so far for this function
   but currently removed from the table.  */

static struct table_elt *free_element_chain;

/* Set to the cost of a constant pool reference if one was found for a
   symbolic constant.  If this was found, it means we should try to
   convert constants into constant pool entries if they don't fit in
   the insn.  */

static int constant_pool_entries_cost;
static int constant_pool_entries_regcost;

/* This data describes a block that will be processed by cse_basic_block.  */

struct cse_basic_block_data
{
  /* Lowest CUID value of insns in block.  */
  int low_cuid;
  /* Highest CUID value of insns in block.  */
  int high_cuid;
  /* Total number of SETs in block.  */
  int nsets;
  /* Last insn in the block.  */
  rtx last;
  /* Size of current branch path, if any.  */
  int path_size;
  /* Current branch path, indicating which branches will be taken.  */
  struct branch_path
    {
      /* The branch insn.  */
      rtx branch;
      /* Whether it should be taken or not.  AROUND is the same as taken
	 except that it is used when the destination label is not preceded
       by a BARRIER.  */
      enum taken {PATH_TAKEN, PATH_NOT_TAKEN, PATH_AROUND} status;
    } *path;
};

static bool fixed_base_plus_p (rtx x);
static int notreg_cost (rtx, enum rtx_code);
static int approx_reg_cost_1 (rtx *, void *);
static int approx_reg_cost (rtx);
static int preferable (int, int, int, int);
static void new_basic_block (void);
static void make_new_qty (unsigned int, enum machine_mode);
static void make_regs_eqv (unsigned int, unsigned int);
static void delete_reg_equiv (unsigned int);
static int mention_regs (rtx);
static int insert_regs (rtx, struct table_elt *, int);
static void remove_from_table (struct table_elt *, unsigned);
static void remove_pseudo_from_table (rtx, unsigned);
static struct table_elt *lookup (rtx, unsigned, enum machine_mode);
static struct table_elt *lookup_for_remove (rtx, unsigned, enum machine_mode);
static rtx lookup_as_function (rtx, enum rtx_code);
static struct table_elt *insert (rtx, struct table_elt *, unsigned,
				 enum machine_mode);
static void merge_equiv_classes (struct table_elt *, struct table_elt *);
static void invalidate (rtx, enum machine_mode);
static int cse_rtx_varies_p (rtx, int);
static void remove_invalid_refs (unsigned int);
static void remove_invalid_subreg_refs (unsigned int, unsigned int,
					enum machine_mode);
static void rehash_using_reg (rtx);
static void invalidate_memory (void);
static void invalidate_for_call (void);
static rtx use_related_value (rtx, struct table_elt *);

static inline unsigned canon_hash (rtx, enum machine_mode);
static inline unsigned safe_hash (rtx, enum machine_mode);
static unsigned hash_rtx_string (const char *);

static rtx canon_reg (rtx, rtx);
static void find_best_addr (rtx, rtx *, enum machine_mode);
static enum rtx_code find_comparison_args (enum rtx_code, rtx *, rtx *,
					   enum machine_mode *,
					   enum machine_mode *);
static rtx fold_rtx (rtx, rtx);
static rtx equiv_constant (rtx);
static void record_jump_equiv (rtx, int);
static void record_jump_cond (enum rtx_code, enum machine_mode, rtx, rtx,
			      int);
static void cse_insn (rtx, rtx);
static void cse_end_of_basic_block (rtx, struct cse_basic_block_data *,
				    int, int);
static int addr_affects_sp_p (rtx);
static void invalidate_from_clobbers (rtx);
static rtx cse_process_notes (rtx, rtx);
static void invalidate_skipped_set (rtx, rtx, void *);
static void invalidate_skipped_block (rtx);
static rtx cse_basic_block (rtx, rtx, struct branch_path *);
static void count_reg_usage (rtx, int *, rtx, int);
static int check_for_label_ref (rtx *, void *);
extern void dump_class (struct table_elt*);
static void get_cse_reg_info_1 (unsigned int regno);
static struct cse_reg_info * get_cse_reg_info (unsigned int regno);
static int check_dependence (rtx *, void *);

static void flush_hash_table (void);
static bool insn_live_p (rtx, int *);
static bool set_live_p (rtx, rtx, int *);
static bool dead_libcall_p (rtx, int *);
static int cse_change_cc_mode (rtx *, void *);
static void cse_change_cc_mode_insn (rtx, rtx);
static void cse_change_cc_mode_insns (rtx, rtx, rtx);
static enum machine_mode cse_cc_succs (basic_block, rtx, rtx, bool);


#undef RTL_HOOKS_GEN_LOWPART
#define RTL_HOOKS_GEN_LOWPART		gen_lowpart_if_possible

static const struct rtl_hooks cse_rtl_hooks = RTL_HOOKS_INITIALIZER;

/* Nonzero if X has the form (PLUS frame-pointer integer).  We check for
   virtual regs here because the simplify_*_operation routines are called
   by integrate.c, which is called before virtual register instantiation.  */

static bool
fixed_base_plus_p (rtx x)
{
  switch (GET_CODE (x))
    {
    case REG:
      if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx)
	return true;
      if (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])
	return true;
      if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
	  && REGNO (x) <= LAST_VIRTUAL_REGISTER)
	return true;
      return false;

    case PLUS:
      if (GET_CODE (XEXP (x, 1)) != CONST_INT)
	return false;
      return fixed_base_plus_p (XEXP (x, 0));

    default:
      return false;
    }
}

/* Dump the expressions in the equivalence class indicated by CLASSP.
   This function is used only for debugging.  */
void
dump_class (struct table_elt *classp)
{
  struct table_elt *elt;

  fprintf (stderr, "Equivalence chain for ");
  print_rtl (stderr, classp->exp);
  fprintf (stderr, ": \n");

  for (elt = classp->first_same_value; elt; elt = elt->next_same_value)
    {
      print_rtl (stderr, elt->exp);
      fprintf (stderr, "\n");
    }
}

/* Subroutine of approx_reg_cost; called through for_each_rtx.  */

static int
approx_reg_cost_1 (rtx *xp, void *data)
{
  rtx x = *xp;
  int *cost_p = data;

  if (x && REG_P (x))
    {
      unsigned int regno = REGNO (x);

      if (! CHEAP_REGNO (regno))
	{
	  if (regno < FIRST_PSEUDO_REGISTER)
	    {
	      if (SMALL_REGISTER_CLASSES)
		return 1;
	      *cost_p += 2;
	    }
	  else
	    *cost_p += 1;
	}
    }

  return 0;
}

/* Return an estimate of the cost of the registers used in an rtx.
   This is mostly the number of different REG expressions in the rtx;
   however for some exceptions like fixed registers we use a cost of
   0.  If any other hard register reference occurs, return MAX_COST.  */

static int
approx_reg_cost (rtx x)
{
  int cost = 0;

  if (for_each_rtx (&x, approx_reg_cost_1, (void *) &cost))
    return MAX_COST;

  return cost;
}

/* Returns a canonical version of X for the address, from the point of view,
   that all multiplications are represented as MULT instead of the multiply
   by a power of 2 being represented as ASHIFT.  */

static rtx
canon_for_address (rtx x)
{
  enum rtx_code code;
  enum machine_mode mode;
  rtx new = 0;
  int i;
  const char *fmt;
  
  if (!x)
    return x;
  
  code = GET_CODE (x);
  mode = GET_MODE (x);
  
  switch (code)
    {
    case ASHIFT:
      if (GET_CODE (XEXP (x, 1)) == CONST_INT
	  && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (mode)
	  && INTVAL (XEXP (x, 1)) >= 0)
        {
	  new = canon_for_address (XEXP (x, 0));
	  new = gen_rtx_MULT (mode, new,
			      gen_int_mode ((HOST_WIDE_INT) 1
				            << INTVAL (XEXP (x, 1)),
					    mode));
	}
      break;
    default:
      break;
      
    }
  if (new)
    return new;
  
  /* Now recursively process each operand of this operation.  */
  fmt = GET_RTX_FORMAT (code);
  for (i = 0; i < GET_RTX_LENGTH (code); i++)
    if (fmt[i] == 'e')
      {
	new = canon_for_address (XEXP (x, i));
	XEXP (x, i) = new;
      }
  return x;
}

/* Return a negative value if an rtx A, whose costs are given by COST_A
   and REGCOST_A, is more desirable than an rtx B.
   Return a positive value if A is less desirable, or 0 if the two are
   equally good.  */
static int
preferable (int cost_a, int regcost_a, int cost_b, int regcost_b)
{
  /* First, get rid of cases involving expressions that are entirely
     unwanted.  */
  if (cost_a != cost_b)
    {
      if (cost_a == MAX_COST)
	return 1;
      if (cost_b == MAX_COST)
	return -1;
    }

  /* Avoid extending lifetimes of hardregs.  */
  if (regcost_a != regcost_b)
    {
      if (regcost_a == MAX_COST)
	return 1;
      if (regcost_b == MAX_COST)
	return -1;
    }

  /* Normal operation costs take precedence.  */
  if (cost_a != cost_b)
    return cost_a - cost_b;
  /* Only if these are identical consider effects on register pressure.  */
  if (regcost_a != regcost_b)
    return regcost_a - regcost_b;
  return 0;
}

/* Internal function, to compute cost when X is not a register; called
   from COST macro to keep it simple.  */

static int
notreg_cost (rtx x, enum rtx_code outer)
{
  return ((GET_CODE (x) == SUBREG
	   && REG_P (SUBREG_REG (x))
	   && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
	   && GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_INT
	   && (GET_MODE_SIZE (GET_MODE (x))
	       < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
	   && subreg_lowpart_p (x)
	   && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (x)),
				     GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))))
	  ? 0
	  : rtx_cost (x, outer) * 2);
}


/* Initialize CSE_REG_INFO_TABLE.  */

static void
init_cse_reg_info (unsigned int nregs)
{
  /* Do we need to grow the table?  */
  if (nregs > cse_reg_info_table_size)
    {
      unsigned int new_size;

      if (cse_reg_info_table_size < 2048)
	{
	  /* Compute a new size that is a power of 2 and no smaller
	     than the large of NREGS and 64.  */
	  new_size = (cse_reg_info_table_size
		      ? cse_reg_info_table_size : 64);

	  while (new_size < nregs)
	    new_size *= 2;
	}
      else
	{
	  /* If we need a big table, allocate just enough to hold
	     NREGS registers.  */
	  new_size = nregs;
	}

      /* Reallocate the table with NEW_SIZE entries.  */
      if (cse_reg_info_table)
	free (cse_reg_info_table);
      cse_reg_info_table = XNEWVEC (struct cse_reg_info, new_size);
      cse_reg_info_table_size = new_size;
      cse_reg_info_table_first_uninitialized = 0;
    }

  /* Do we have all of the first NREGS entries initialized?  */
  if (cse_reg_info_table_first_uninitialized < nregs)
    {
      unsigned int old_timestamp = cse_reg_info_timestamp - 1;
      unsigned int i;

      /* Put the old timestamp on newly allocated entries so that they
	 will all be considered out of date.  We do not touch those
	 entries beyond the first NREGS entries to be nice to the
	 virtual memory.  */
      for (i = cse_reg_info_table_first_uninitialized; i < nregs; i++)
	cse_reg_info_table[i].timestamp = old_timestamp;

      cse_reg_info_table_first_uninitialized = nregs;
    }
}

/* Given REGNO, initialize the cse_reg_info entry for REGNO.  */

static void
get_cse_reg_info_1 (unsigned int regno)
{
  /* Set TIMESTAMP field to CSE_REG_INFO_TIMESTAMP so that this
     entry will be considered to have been initialized.  */
  cse_reg_info_table[regno].timestamp = cse_reg_info_timestamp;

  /* Initialize the rest of the entry.  */
  cse_reg_info_table[regno].reg_tick = 1;
  cse_reg_info_table[regno].reg_in_table = -1;
  cse_reg_info_table[regno].subreg_ticked = -1;
  cse_reg_info_table[regno].reg_qty = -regno - 1;
}

/* Find a cse_reg_info entry for REGNO.  */

static inline struct cse_reg_info *
get_cse_reg_info (unsigned int regno)
{
  struct cse_reg_info *p = &cse_reg_info_table[regno];

  /* If this entry has not been initialized, go ahead and initialize
     it.  */
  if (p->timestamp != cse_reg_info_timestamp)
    get_cse_reg_info_1 (regno);

  return p;
}

/* Clear the hash table and initialize each register with its own quantity,
   for a new basic block.  */

static void
new_basic_block (void)
{
  int i;

  next_qty = 0;

  /* Invalidate cse_reg_info_table.  */
  cse_reg_info_timestamp++;

  /* Clear out hash table state for this pass.  */
  CLEAR_HARD_REG_SET (hard_regs_in_table);

  /* The per-quantity values used to be initialized here, but it is
     much faster to initialize each as it is made in `make_new_qty'.  */

  for (i = 0; i < HASH_SIZE; i++)
    {
      struct table_elt *first;

      first = table[i];
      if (first != NULL)
	{
	  struct table_elt *last = first;

	  table[i] = NULL;

	  while (last->next_same_hash != NULL)
	    last = last->next_same_hash;

	  /* Now relink this hash entire chain into
	     the free element list.  */

	  last->next_same_hash = free_element_chain;
	  free_element_chain = first;
	}
    }

  table_size = 0;

#ifdef HAVE_cc0
  prev_insn = 0;
  prev_insn_cc0 = 0;
#endif
}

/* Say that register REG contains a quantity in mode MODE not in any
   register before and initialize that quantity.  */

static void
make_new_qty (unsigned int reg, enum machine_mode mode)
{
  int q;
  struct qty_table_elem *ent;
  struct reg_eqv_elem *eqv;

  gcc_assert (next_qty < max_qty);

  q = REG_QTY (reg) = next_qty++;
  ent = &qty_table[q];
  ent->first_reg = reg;
  ent->last_reg = reg;
  ent->mode = mode;
  ent->const_rtx = ent->const_insn = NULL_RTX;
  ent->comparison_code = UNKNOWN;

  eqv = &reg_eqv_table[reg];
  eqv->next = eqv->prev = -1;
}

/* Make reg NEW equivalent to reg OLD.
   OLD is not changing; NEW is.  */

static void
make_regs_eqv (unsigned int new, unsigned int old)
{
  unsigned int lastr, firstr;
  int q = REG_QTY (old);
  struct qty_table_elem *ent;

  ent = &qty_table[q];

  /* Nothing should become eqv until it has a "non-invalid" qty number.  */
  gcc_assert (REGNO_QTY_VALID_P (old));

  REG_QTY (new) = q;
  firstr = ent->first_reg;
  lastr = ent->last_reg;

  /* Prefer fixed hard registers to anything.  Prefer pseudo regs to other
     hard regs.  Among pseudos, if NEW will live longer than any other reg
     of the same qty, and that is beyond the current basic block,
     make it the new canonical replacement for this qty.  */
  if (! (firstr < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (firstr))
      /* Certain fixed registers might be of the class NO_REGS.  This means
	 that not only can they not be allocated by the compiler, but
	 they cannot be used in substitutions or canonicalizations
	 either.  */
      && (new >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (new) != NO_REGS)
      && ((new < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (new))
	  || (new >= FIRST_PSEUDO_REGISTER
	      && (firstr < FIRST_PSEUDO_REGISTER
		  || ((uid_cuid[REGNO_LAST_UID (new)] > cse_basic_block_end
		       || (uid_cuid[REGNO_FIRST_UID (new)]
			   < cse_basic_block_start))
		      && (uid_cuid[REGNO_LAST_UID (new)]
			  > uid_cuid[REGNO_LAST_UID (firstr)]))))))
    {
      reg_eqv_table[firstr].prev = new;
      reg_eqv_table[new].next = firstr;
      reg_eqv_table[new].prev = -1;
      ent->first_reg = new;
    }
  else
    {
      /* If NEW is a hard reg (known to be non-fixed), insert at end.
	 Otherwise, insert before any non-fixed hard regs that are at the
	 end.  Registers of class NO_REGS cannot be used as an
	 equivalent for anything.  */
      while (lastr < FIRST_PSEUDO_REGISTER && reg_eqv_table[lastr].prev >= 0
	     && (REGNO_REG_CLASS (lastr) == NO_REGS || ! FIXED_REGNO_P (lastr))
	     && new >= FIRST_PSEUDO_REGISTER)
	lastr = reg_eqv_table[lastr].prev;
      reg_eqv_table[new].next = reg_eqv_table[lastr].next;
      if (reg_eqv_table[lastr].next >= 0)
	reg_eqv_table[reg_eqv_table[lastr].next].prev = new;
      else
	qty_table[q].last_reg = new;
      reg_eqv_table[lastr].next = new;
      reg_eqv_table[new].prev = lastr;
    }
}

/* Remove REG from its equivalence class.  */

static void
delete_reg_equiv (unsigned int reg)
{
  struct qty_table_elem *ent;
  int q = REG_QTY (reg);
  int p, n;

  /* If invalid, do nothing.  */
  if (! REGNO_QTY_VALID_P (reg))
    return;

  ent = &qty_table[q];

  p = reg_eqv_table[reg].prev;
  n = reg_eqv_table[reg].next;

  if (n != -1)
    reg_eqv_table[n].prev = p;
  else
    ent->last_reg = p;
  if (p != -1)
    reg_eqv_table[p].next = n;
  else
    ent->first_reg = n;

  REG_QTY (reg) = -reg - 1;
}

/* Remove any invalid expressions from the hash table
   that refer to any of the registers contained in expression X.

   Make sure that newly inserted references to those registers
   as subexpressions will be considered valid.

   mention_regs is not called when a register itself
   is being stored in the table.

   Return 1 if we have done something that may have changed the hash code
   of X.  */

static int
mention_regs (rtx x)
{
  enum rtx_code code;
  int i, j;
  const char *fmt;
  int changed = 0;

  if (x == 0)
    return 0;

  code = GET_CODE (x);
  if (code == REG)
    {
      unsigned int regno = REGNO (x);
      unsigned int endregno
	= regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
		   : hard_regno_nregs[regno][GET_MODE (x)]);
      unsigned int i;

      for (i = regno; i < endregno; i++)
	{
	  if (REG_IN_TABLE (i) >= 0 && REG_IN_TABLE (i) != REG_TICK (i))
	    remove_invalid_refs (i);

	  REG_IN_TABLE (i) = REG_TICK (i);
	  SUBREG_TICKED (i) = -1;
	}

      return 0;
    }

  /* If this is a SUBREG, we don't want to discard other SUBREGs of the same
     pseudo if they don't use overlapping words.  We handle only pseudos
     here for simplicity.  */
  if (code == SUBREG && REG_P (SUBREG_REG (x))
      && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER)
    {
      unsigned int i = REGNO (SUBREG_REG (x));

      if (REG_IN_TABLE (i) >= 0 && REG_IN_TABLE (i) != REG_TICK (i))
	{
	  /* If REG_IN_TABLE (i) differs from REG_TICK (i) by one, and
	     the last store to this register really stored into this
	     subreg, then remove the memory of this subreg.
	     Otherwise, remove any memory of the entire register and
	     all its subregs from the table.  */
	  if (REG_TICK (i) - REG_IN_TABLE (i) > 1
	      || SUBREG_TICKED (i) != REGNO (SUBREG_REG (x)))
	    remove_invalid_refs (i);
	  else
	    remove_invalid_subreg_refs (i, SUBREG_BYTE (x), GET_MODE (x));
	}

      REG_IN_TABLE (i) = REG_TICK (i);
      SUBREG_TICKED (i) = REGNO (SUBREG_REG (x));
      return 0;
    }

  /* If X is a comparison or a COMPARE and either operand is a register
     that does not have a quantity, give it one.  This is so that a later
     call to record_jump_equiv won't cause X to be assigned a different
     hash code and not found in the table after that call.

     It is not necessary to do this here, since rehash_using_reg can
     fix up the table later, but doing this here eliminates the need to
     call that expensive function in the most common case where the only
     use of the register is in the comparison.  */

  if (code == COMPARE || COMPARISON_P (x))
    {
      if (REG_P (XEXP (x, 0))
	  && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 0))))
	if (insert_regs (XEXP (x, 0), NULL, 0))
	  {
	    rehash_using_reg (XEXP (x, 0));
	    changed = 1;
	  }

      if (REG_P (XEXP (x, 1))
	  && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 1))))
	if (insert_regs (XEXP (x, 1), NULL, 0))
	  {
	    rehash_using_reg (XEXP (x, 1));
	    changed = 1;
	  }
    }

  fmt = GET_RTX_FORMAT (code);
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
    if (fmt[i] == 'e')
      changed |= mention_regs (XEXP (x, i));
    else if (fmt[i] == 'E')
      for (j = 0; j < XVECLEN (x, i); j++)
	changed |= mention_regs (XVECEXP (x, i, j));

  return changed;
}

/* Update the register quantities for inserting X into the hash table
   with a value equivalent to CLASSP.
   (If the class does not contain a REG, it is irrelevant.)
   If MODIFIED is nonzero, X is a destination; it is being modified.
   Note that delete_reg_equiv should be called on a register
   before insert_regs is done on that register with MODIFIED != 0.

   Nonzero value means that elements of reg_qty have changed
   so X's hash code may be different.  */

static int
insert_regs (rtx x, struct table_elt *classp, int modified)
{
  if (REG_P (x))
    {
      unsigned int regno = REGNO (x);
      int qty_valid;

      /* If REGNO is in the equivalence table already but is of the
	 wrong mode for that equivalence, don't do anything here.  */

      qty_valid = REGNO_QTY_VALID_P (regno);
      if (qty_valid)
	{
	  struct qty_table_elem *ent = &qty_table[REG_QTY (regno)];

	  if (ent->mode != GET_MODE (x))
	    return 0;
	}

      if (modified || ! qty_valid)
	{
	  if (classp)
	    for (classp = classp->first_same_value;
		 classp != 0;
		 classp = classp->next_same_value)
	      if (REG_P (classp->exp)
		  && GET_MODE (classp->exp) == GET_MODE (x))
		{
		  unsigned c_regno = REGNO (classp->exp);

		  gcc_assert (REGNO_QTY_VALID_P (c_regno));

		  /* Suppose that 5 is hard reg and 100 and 101 are
		     pseudos.  Consider

		     (set (reg:si 100) (reg:si 5))
		     (set (reg:si 5) (reg:si 100))
		     (set (reg:di 101) (reg:di 5))

		     We would now set REG_QTY (101) = REG_QTY (5), but the
		     entry for 5 is in SImode.  When we use this later in
		     copy propagation, we get the register in wrong mode.  */
		  if (qty_table[REG_QTY (c_regno)].mode != GET_MODE (x))
		    continue;

		  make_regs_eqv (regno, c_regno);
		  return 1;
		}

	  /* Mention_regs for a SUBREG checks if REG_TICK is exactly one larger
	     than REG_IN_TABLE to find out if there was only a single preceding
	     invalidation - for the SUBREG - or another one, which would be
	     for the full register.  However, if we find here that REG_TICK
	     indicates that the register is invalid, it means that it has
	     been invalidated in a separate operation.  The SUBREG might be used
	     now (then this is a recursive call), or we might use the full REG
	     now and a SUBREG of it later.  So bump up REG_TICK so that
	     mention_regs will do the right thing.  */
	  if (! modified
	      && REG_IN_TABLE (regno) >= 0
	      && REG_TICK (regno) == REG_IN_TABLE (regno) + 1)
	    REG_TICK (regno)++;
	  make_new_qty (regno, GET_MODE (x));
	  return 1;
	}

      return 0;
    }

  /* If X is a SUBREG, we will likely be inserting the inner register in the
     table.  If that register doesn't have an assigned quantity number at
     this point but does later, the insertion that we will be doing now will
     not be accessible because its hash code will have changed.  So assign
     a quantity number now.  */

  else if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x))
	   && ! REGNO_QTY_VALID_P (REGNO (SUBREG_REG (x))))
    {
      insert_regs (SUBREG_REG (x), NULL, 0);
      mention_regs (x);
      return 1;
    }
  else
    return mention_regs (x);
}

/* Look in or update the hash table.  */

/* Remove table element ELT from use in the table.
   HASH is its hash code, made using the HASH macro.
   It's an argument because often that is known in advance
   and we save much time not recomputing it.  */

static void
remove_from_table (struct table_elt *elt, unsigned int hash)
{
  if (elt == 0)
    return;

  /* Mark this element as removed.  See cse_insn.  */
  elt->first_same_value = 0;

  /* Remove the table element from its equivalence class.  */

  {
    struct table_elt *prev = elt->prev_same_value;
    struct table_elt *next = elt->next_same_value;

    if (next)
      next->prev_same_value = prev;

    if (prev)
      prev->next_same_value = next;
    else
      {
	struct table_elt *newfirst = next;
	while (next)
	  {
	    next->first_same_value = newfirst;
	    next = next->next_same_value;
	  }
      }
  }

  /* Remove the table element from its hash bucket.  */

  {
    struct table_elt *prev = elt->prev_same_hash;
    struct table_elt *next = elt->next_same_hash;

    if (next)
      next->prev_same_hash = prev;

    if (prev)
      prev->next_same_hash = next;
    else if (table[hash] == elt)
      table[hash] = next;
    else
      {
	/* This entry is not in the proper hash bucket.  This can happen
	   when two classes were merged by `merge_equiv_classes'.  Search
	   for the hash bucket that it heads.  This happens only very
	   rarely, so the cost is acceptable.  */
	for (hash = 0; hash < HASH_SIZE; hash++)
	  if (table[hash] == elt)
	    table[hash] = next;
      }
  }

  /* Remove the table element from its related-value circular chain.  */

  if (elt->related_value != 0 && elt->related_value != elt)
    {
      struct table_elt *p = elt->related_value;

      while (p->related_value != elt)
	p = p->related_value;
      p->related_value = elt->related_value;
      if (p->related_value == p)
	p->related_value = 0;
    }

  /* Now add it to the free element chain.  */
  elt->next_same_hash = free_element_chain;
  free_element_chain = elt;

  table_size--;
}

/* Same as above, but X is a pseudo-register.  */

static void
remove_pseudo_from_table (rtx x, unsigned int hash)
{
  struct table_elt *elt;

  /* Because a pseudo-register can be referenced in more than one
     mode, we might have to remove more than one table entry.  */
  while ((elt = lookup_for_remove (x, hash, VOIDmode)))
    remove_from_table (elt, hash);
}

/* Look up X in the hash table and return its table element,
   or 0 if X is not in the table.

   MODE is the machine-mode of X, or if X is an integer constant
   with VOIDmode then MODE is the mode with which X will be used.

   Here we are satisfied to find an expression whose tree structure
   looks like X.  */

static struct table_elt *
lookup (rtx x, unsigned int hash, enum machine_mode mode)
{
  struct table_elt *p;

  for (p = table[hash]; p; p = p->next_same_hash)
    if (mode == p->mode && ((x == p->exp && REG_P (x))
			    || exp_equiv_p (x, p->exp, !REG_P (x), false)))
      return p;

  return 0;
}

/* Like `lookup' but don't care whether the table element uses invalid regs.
   Also ignore discrepancies in the machine mode of a register.  */

static struct table_elt *
lookup_for_remove (rtx x, unsigned int hash, enum machine_mode mode)
{
  struct table_elt *p;

  if (REG_P (x))
    {
      unsigned int regno = REGNO (x);

      /* Don't check the machine mode when comparing registers;
	 invalidating (REG:SI 0) also invalidates (REG:DF 0).  */
      for (p = table[hash]; p; p = p->next_same_hash)
	if (REG_P (p->exp)
	    && REGNO (p->exp) == regno)
	  return p;
    }
  else
    {
      for (p = table[hash]; p; p = p->next_same_hash)
	if (mode == p->mode
	    && (x == p->exp || exp_equiv_p (x, p->exp, 0, false)))
	  return p;
    }

  return 0;
}

/* Look for an expression equivalent to X and with code CODE.
   If one is found, return that expression.  */

static rtx
lookup_as_function (rtx x, enum rtx_code code)
{
  struct table_elt *p
    = lookup (x, SAFE_HASH (x, VOIDmode), GET_MODE (x));

  /* If we are looking for a CONST_INT, the mode doesn't really matter, as
     long as we are narrowing.  So if we looked in vain for a mode narrower
     than word_mode before, look for word_mode now.  */
  if (p == 0 && code == CONST_INT
      && GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (word_mode))
    {
      x = copy_rtx (x);
      PUT_MODE (x, word_mode);
      p = lookup (x, SAFE_HASH (x, VOIDmode), word_mode);
    }

  if (p == 0)
    return 0;

  for (p = p->first_same_value; p; p = p->next_same_value)
    if (GET_CODE (p->exp) == code
	/* Make sure this is a valid entry in the table.  */
	&& exp_equiv_p (p->exp, p->exp, 1, false))
      return p->exp;

  return 0;
}

/* Insert X in the hash table, assuming HASH is its hash code
   and CLASSP is an element of the class it should go in
   (or 0 if a new class should be made).
   It is inserted at the proper position to keep the class in
   the order cheapest first.

   MODE is the machine-mode of X, or if X is an integer constant
   with VOIDmode then MODE is the mode with which X will be used.

   For elements of equal cheapness, the most recent one
   goes in front, except that the first element in the list
   remains first unless a cheaper element is added.  The order of
   pseudo-registers does not matter, as canon_reg will be called to
   find the cheapest when a register is retrieved from the table.

   The in_memory field in the hash table element is set to 0.
   The caller must set it nonzero if appropriate.

   You should call insert_regs (X, CLASSP, MODIFY) before calling here,
   and if insert_regs returns a nonzero value
   you must then recompute its hash code before calling here.

   If necessary, update table showing constant values of quantities.  */

#define CHEAPER(X, Y) \
 (preferable ((X)->cost, (X)->regcost, (Y)->cost, (Y)->regcost) < 0)

static struct table_elt *
insert (rtx x, struct table_elt *classp, unsigned int hash, enum machine_mode mode)
{
  struct table_elt *elt;

  /* If X is a register and we haven't made a quantity for it,
     something is wrong.  */
  gcc_assert (!REG_P (x) || REGNO_QTY_VALID_P (REGNO (x)));

  /* If X is a hard register, show it is being put in the table.  */
  if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
    {
      unsigned int regno = REGNO (x);
      unsigned int endregno = regno + hard_regno_nregs[regno][GET_MODE (x)];
      unsigned int i;

      for (i = regno; i < endregno; i++)
	SET_HARD_REG_BIT (hard_regs_in_table, i);
    }

  /* Put an element for X into the right hash bucket.  */

  elt = free_element_chain;
  if (elt)
    free_element_chain = elt->next_same_hash;
  else
    elt = XNEW (struct table_elt);

  elt->exp = x;
  elt->canon_exp = NULL_RTX;
  elt->cost = COST (x);
  elt->regcost = approx_reg_cost (x);
  elt->next_same_value = 0;
  elt->prev_same_value = 0;
  elt->next_same_hash = table[hash];
  elt->prev_same_hash = 0;
  elt->related_value = 0;
  elt->in_memory = 0;
  elt->mode = mode;
  elt->is_const = (CONSTANT_P (x) || fixed_base_plus_p (x));

  if (table[hash])
    table[hash]->prev_same_hash = elt;
  table[hash] = elt;

  /* Put it into the proper value-class.  */
  if (classp)
    {
      classp = classp->first_same_value;
      if (CHEAPER (elt, classp))
	/* Insert at the head of the class.  */
	{
	  struct table_elt *p;
	  elt->next_same_value = classp;
	  classp->prev_same_value = elt;
	  elt->first_same_value = elt;

	  for (p = classp; p; p = p->next_same_value)
	    p->first_same_value = elt;
	}
      else
	{
	  /* Insert not at head of the class.  */
	  /* Put it after the last element cheaper than X.  */
	  struct table_elt *p, *next;

	  for (p = classp; (next = p->next_same_value) && CHEAPER (next, elt);
	       p = next);

	  /* Put it after P and before NEXT.  */
	  elt->next_same_value = next;
	  if (next)
	    next->prev_same_value = elt;

	  elt->prev_same_value = p;
	  p->next_same_value = elt;
	  elt->first_same_value = classp;
	}
    }
  else
    elt->first_same_value = elt;

  /* If this is a constant being set equivalent to a register or a register
     being set equivalent to a constant, note the constant equivalence.

     If this is a constant, it cannot be equivalent to a different constant,
     and a constant is the only thing that can be cheaper than a register.  So
     we know the register is the head of the class (before the constant was
     inserted).

     If this is a register that is not already known equivalent to a
     constant, we must check the entire class.

     If this is a register that is already known equivalent to an insn,
     update the qtys `const_insn' to show that `this_insn' is the latest
     insn making that quantity equivalent to the constant.  */

  if (elt->is_const && classp && REG_P (classp->exp)
      && !REG_P (x))
    {
      int exp_q = REG_QTY (REGNO (classp->exp));
      struct qty_table_elem *exp_ent = &qty_table[exp_q];

      exp_ent->const_rtx = gen_lowpart (exp_ent->mode, x);
      exp_ent->const_insn = this_insn;
    }

  else if (REG_P (x)
	   && classp
	   && ! qty_table[REG_QTY (REGNO (x))].const_rtx
	   && ! elt->is_const)
    {
      struct table_elt *p;

      for (p = classp; p != 0; p = p->next_same_value)
	{
	  if (p->is_const && !REG_P (p->exp))
	    {
	      int x_q = REG_QTY (REGNO (x));
	      struct qty_table_elem *x_ent = &qty_table[x_q];

	      x_ent->const_rtx
		= gen_lowpart (GET_MODE (x), p->exp);
	      x_ent->const_insn = this_insn;
	      break;
	    }
	}
    }

  else if (REG_P (x)
	   && qty_table[REG_QTY (REGNO (x))].const_rtx
	   && GET_MODE (x) == qty_table[REG_QTY (REGNO (x))].mode)
    qty_table[REG_QTY (REGNO (x))].const_insn = this_insn;

  /* If this is a constant with symbolic value,
     and it has a term with an explicit integer value,
     link it up with related expressions.  */
  if (GET_CODE (x) == CONST)
    {
      rtx subexp = get_related_value (x);
      unsigned subhash;
      struct table_elt *subelt, *subelt_prev;

      if (subexp != 0)
	{
	  /* Get the integer-free subexpression in the hash table.  */
	  subhash = SAFE_HASH (subexp, mode);
	  subelt = lookup (subexp, subhash, mode);
	  if (subelt == 0)
	    subelt = insert (subexp, NULL, subhash, mode);
	  /* Initialize SUBELT's circular chain if it has none.  */
	  if (subelt->related_value == 0)
	    subelt->related_value = subelt;
	  /* Find the element in the circular chain that precedes SUBELT.  */
	  subelt_prev = subelt;
	  while (subelt_prev->related_value != subelt)
	    subelt_prev = subelt_prev->related_value;
	  /* Put new ELT into SUBELT's circular chain just before SUBELT.
	     This way the element that follows SUBELT is the oldest one.  */
	  elt->related_value = subelt_prev->related_value;
	  subelt_prev->related_value = elt;
	}
    }

  table_size++;

  return elt;
}

/* Given two equivalence classes, CLASS1 and CLASS2, put all the entries from
   CLASS2 into CLASS1.  This is done when we have reached an insn which makes
   the two classes equivalent.

   CLASS1 will be the surviving class; CLASS2 should not be used after this
   call.

   Any invalid entries in CLASS2 will not be copied.  */

static void
merge_equiv_classes (struct table_elt *class1, struct table_elt *class2)
{
  struct table_elt *elt, *next, *new;

  /* Ensure we start with the head of the classes.  */
  class1 = class1->first_same_value;
  class2 = class2->first_same_value;

  /* If they were already equal, forget it.  */
  if (class1 == class2)
    return;

  for (elt = class2; elt; elt = next)
    {
      unsigned int hash;
      rtx exp = elt->exp;
      enum machine_mode mode = elt->mode;

      next = elt->next_same_value;

      /* Remove old entry, make a new one in CLASS1's class.
	 Don't do this for invalid entries as we cannot find their
	 hash code (it also isn't necessary).  */
      if (REG_P (exp) || exp_equiv_p (exp, exp, 1, false))
	{
	  bool need_rehash = false;

	  hash_arg_in_memory = 0;
	  hash = HASH (exp, mode);

	  if (REG_P (exp))
	    {
	      need_rehash = REGNO_QTY_VALID_P (REGNO (exp));
	      delete_reg_equiv (REGNO (exp));
	    }

	  if (REG_P (exp) && REGNO (exp) >= FIRST_PSEUDO_REGISTER)
	    remove_pseudo_from_table (exp, hash);
	  else
	    remove_from_table (elt, hash);

	  if (insert_regs (exp, class1, 0) || need_rehash)
	    {
	      rehash_using_reg (exp);
	      hash = HASH (exp, mode);
	    }
	  new = insert (exp, class1, hash, mode);
	  new->in_memory = hash_arg_in_memory;
	}
    }
}

/* Flush the entire hash table.  */

static void
flush_hash_table (void)
{
  int i;
  struct table_elt *p;

  for (i = 0; i < HASH_SIZE; i++)
    for (p = table[i]; p; p = table[i])
      {
	/* Note that invalidate can remove elements
	   after P in the current hash chain.  */
	if (REG_P (p->exp))
	  invalidate (p->exp, VOIDmode);
	else
	  remove_from_table (p, i);
      }
}

/* Function called for each rtx to check whether true dependence exist.  */
struct check_dependence_data
{
  enum machine_mode mode;
  rtx exp;
  rtx addr;
};

static int
check_dependence (rtx *x, void *data)
{
  struct check_dependence_data *d = (struct check_dependence_data *) data;
  if (*x && MEM_P (*x))
    return canon_true_dependence (d->exp, d->mode, d->addr, *x,
		    		  cse_rtx_varies_p);
  else
    return 0;
}

/* Remove from the hash table, or mark as invalid, all expressions whose
   values could be altered by storing in X.  X is a register, a subreg, or
   a memory reference with nonvarying address (because, when a memory
   reference with a varying address is stored in, all memory references are
   removed by invalidate_memory so specific invalidation is superfluous).
   FULL_MODE, if not VOIDmode, indicates that this much should be
   invalidated instead of just the amount indicated by the mode of X.  This
   is only used for bitfield stores into memory.

   A nonvarying address may be just a register or just a symbol reference,
   or it may be either of those plus a numeric offset.  */

static void
invalidate (rtx x, enum machine_mode full_mode)
{
  int i;
  struct table_elt *p;
  rtx addr;

  switch (GET_CODE (x))
    {
    case REG:
      {
	/* If X is a register, dependencies on its contents are recorded
	   through the qty number mechanism.  Just change the qty number of
	   the register, mark it as invalid for expressions that refer to it,
	   and remove it itself.  */
	unsigned int regno = REGNO (x);
	unsigned int hash = HASH (x, GET_MODE (x));

	/* Remove REGNO from any quantity list it might be on and indicate
	   that its value might have changed.  If it is a pseudo, remove its
	   entry from the hash table.

	   For a hard register, we do the first two actions above for any
	   additional hard registers corresponding to X.  Then, if any of these
	   registers are in the table, we must remove any REG entries that
	   overlap these registers.  */

	delete_reg_equiv (regno);
	REG_TICK (regno)++;
	SUBREG_TICKED (regno) = -1;

	if (regno >= FIRST_PSEUDO_REGISTER)
	  remove_pseudo_from_table (x, hash);
	else
	  {
	    HOST_WIDE_INT in_table
	      = TEST_HARD_REG_BIT (hard_regs_in_table, regno);
	    unsigned int endregno
	      = regno + hard_regno_nregs[regno][GET_MODE (x)];
	    unsigned int tregno, tendregno, rn;
	    struct table_elt *p, *next;

	    CLEAR_HARD_REG_BIT (hard_regs_in_table, regno);

	    for (rn = regno + 1; rn < endregno; rn++)
	      {
		in_table |= TEST_HARD_REG_BIT (hard_regs_in_table, rn);
		CLEAR_HARD_REG_BIT (hard_regs_in_table, rn);
		delete_reg_equiv (rn);
		REG_TICK (rn)++;
		SUBREG_TICKED (rn) = -1;
	      }

	    if (in_table)
	      for (hash = 0; hash < HASH_SIZE; hash++)
		for (p = table[hash]; p; p = next)
		  {
		    next = p->next_same_hash;

		    if (!REG_P (p->exp)
			|| REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
		      continue;

		    tregno = REGNO (p->exp);
		    tendregno
		      = tregno + hard_regno_nregs[tregno][GET_MODE (p->exp)];
		    if (tendregno > regno && tregno < endregno)
		      remove_from_table (p, hash);
		  }
	  }
      }
      return;

    case SUBREG:
      invalidate (SUBREG_REG (x), VOIDmode);
      return;

    case PARALLEL:
      for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
	invalidate (XVECEXP (x, 0, i), VOIDmode);
      return;

    case EXPR_LIST:
      /* This is part of a disjoint return value; extract the location in
	 question ignoring the offset.  */
      invalidate (XEXP (x, 0), VOIDmode);
      return;

    case MEM:
      addr = canon_rtx (get_addr (XEXP (x, 0)));
      /* Calculate the canonical version of X here so that
	 true_dependence doesn't generate new RTL for X on each call.  */
      x = canon_rtx (x);

      /* Remove all hash table elements that refer to overlapping pieces of
	 memory.  */
      if (full_mode == VOIDmode)
	full_mode = GET_MODE (x);

      for (i = 0; i < HASH_SIZE; i++)
	{
	  struct table_elt *next;

	  for (p = table[i]; p; p = next)
	    {
	      next = p->next_same_hash;
	      if (p->in_memory)
		{
		  struct check_dependence_data d;

		  /* Just canonicalize the expression once;
		     otherwise each time we call invalidate
		     true_dependence will canonicalize the
		     expression again.  */
		  if (!p->canon_exp)
		    p->canon_exp = canon_rtx (p->exp);
		  d.exp = x;
		  d.addr = addr;
		  d.mode = full_mode;
		  if (for_each_rtx (&p->canon_exp, check_dependence, &d))
		    remove_from_table (p, i);
		}
	    }
	}
      return;

    default:
      gcc_unreachable ();
    }
}

/* Remove all expressions that refer to register REGNO,
   since they are already invalid, and we are about to
   mark that register valid again and don't want the old
   expressions to reappear as valid.  */

static void
remove_invalid_refs (unsigned int regno)
{
  unsigned int i;
  struct table_elt *p, *next;

  for (i = 0; i < HASH_SIZE; i++)
    for (p = table[i]; p; p = next)
      {
	next = p->next_same_hash;
	if (!REG_P (p->exp)
	    && refers_to_regno_p (regno, regno + 1, p->exp, (rtx *) 0))
	  remove_from_table (p, i);
      }
}

/* Likewise for a subreg with subreg_reg REGNO, subreg_byte OFFSET,
   and mode MODE.  */
static void
remove_invalid_subreg_refs (unsigned int regno, unsigned int offset,
			    enum machine_mode mode)
{
  unsigned int i;
  struct table_elt *p, *next;
  unsigned int end = offset + (GET_MODE_SIZE (mode) - 1);

  for (i = 0; i < HASH_SIZE; i++)
    for (p = table[i]; p; p = next)
      {
	rtx exp = p->exp;
	next = p->next_same_hash;

	if (!REG_P (exp)
	    && (GET_CODE (exp) != SUBREG
		|| !REG_P (SUBREG_REG (exp))
		|| REGNO (SUBREG_REG (exp)) != regno
		|| (((SUBREG_BYTE (exp)
		      + (GET_MODE_SIZE (GET_MODE (exp)) - 1)) >= offset)
		    && SUBREG_BYTE (exp) <= end))
	    && refers_to_regno_p (regno, regno + 1, p->exp, (rtx *) 0))
	  remove_from_table (p, i);
      }
}

/* Recompute the hash codes of any valid entries in the hash table that
   reference X, if X is a register, or SUBREG_REG (X) if X is a SUBREG.

   This is called when we make a jump equivalence.  */

static void
rehash_using_reg (rtx x)
{
  unsigned int i;
  struct table_elt *p, *next;
  unsigned hash;

  if (GET_CODE (x) == SUBREG)
    x = SUBREG_REG (x);

  /* If X is not a register or if the register is known not to be in any
     valid entries in the table, we have no work to do.  */

  if (!REG_P (x)
      || REG_IN_TABLE (REGNO (x)) < 0
      || REG_IN_TABLE (REGNO (x)) != REG_TICK (REGNO (x)))
    return;

  /* Scan all hash chains looking for valid entries that mention X.
     If we find one and it is in the wrong hash chain, move it.  */

  for (i = 0; i < HASH_SIZE; i++)
    for (p = table[i]; p; p = next)
      {
	next = p->next_same_hash;
	if (reg_mentioned_p (x, p->exp)
	    && exp_equiv_p (p->exp, p->exp, 1, false)
	    && i != (hash = SAFE_HASH (p->exp, p->mode)))
	  {
	    if (p->next_same_hash)
	      p->next_same_hash->prev_same_hash = p->prev_same_hash;

	    if (p->prev_same_hash)
	      p->prev_same_hash->next_same_hash = p->next_same_hash;
	    else
	      table[i] = p->next_same_hash;

	    p->next_same_hash = table[hash];
	    p->prev_same_hash = 0;
	    if (table[hash])
	      table[hash]->prev_same_hash = p;
	    table[hash] = p;
	  }
      }
}

/* Remove from the hash table any expression that is a call-clobbered
   register.  Also update their TICK values.  */

static void
invalidate_for_call (void)
{
  unsigned int regno, endregno;
  unsigned int i;
  unsigned hash;
  struct table_elt *p, *next;
  int in_table = 0;

  /* Go through all the hard registers.  For each that is clobbered in
     a CALL_INSN, remove the register from quantity chains and update
     reg_tick if defined.  Also see if any of these registers is currently
     in the table.  */

  for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
    if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
      {
	delete_reg_equiv (regno);
	if (REG_TICK (regno) >= 0)
	  {
	    REG_TICK (regno)++;
	    SUBREG_TICKED (regno) = -1;
	  }

	in_table |= (TEST_HARD_REG_BIT (hard_regs_in_table, regno) != 0);
      }

  /* In the case where we have no call-clobbered hard registers in the
     table, we are done.  Otherwise, scan the table and remove any
     entry that overlaps a call-clobbered register.  */

  if (in_table)
    for (hash = 0; hash < HASH_SIZE; hash++)
      for (p = table[hash]; p; p = next)
	{
	  next = p->next_same_hash;

	  if (!REG_P (p->exp)
	      || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
	    continue;

	  regno = REGNO (p->exp);
	  endregno = regno + hard_regno_nregs[regno][GET_MODE (p->exp)];

	  for (i = regno; i < endregno; i++)
	    if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
	      {
		remove_from_table (p, hash);
		break;
	      }
	}
}

/* Given an expression X of type CONST,
   and ELT which is its table entry (or 0 if it
   is not in the hash table),
   return an alternate expression for X as a register plus integer.
   If none can be found, return 0.  */

static rtx
use_related_value (rtx x, struct table_elt *elt)
{
  struct table_elt *relt = 0;
  struct table_elt *p, *q;
  HOST_WIDE_INT offset;

  /* First, is there anything related known?
     If we have a table element, we can tell from that.
     Otherwise, must look it up.  */

  if (elt != 0 && elt->related_value != 0)
    relt = elt;
  else if (elt == 0 && GET_CODE (x) == CONST)
    {
      rtx subexp = get_related_value (x);
      if (subexp != 0)
	relt = lookup (subexp,
		       SAFE_HASH (subexp, GET_MODE (subexp)),
		       GET_MODE (subexp));
    }

  if (relt == 0)
    return 0;

  /* Search all related table entries for one that has an
     equivalent register.  */

  p = relt;
  while (1)
    {
      /* This loop is strange in that it is executed in two different cases.
	 The first is when X is already in the table.  Then it is searching
	 the RELATED_VALUE list of X's class (RELT).  The second case is when
	 X is not in the table.  Then RELT points to a class for the related
	 value.

	 Ensure that, whatever case we are in, that we ignore classes that have
	 the same value as X.  */

      if (rtx_equal_p (x, p->exp))
	q = 0;
      else
	for (q = p->first_same_value; q; q = q->next_same_value)
	  if (REG_P (q->exp))
	    break;

      if (q)
	break;

      p = p->related_value;

      /* We went all the way around, so there is nothing to be found.
	 Alternatively, perhaps RELT was in the table for some other reason
	 and it has no related values recorded.  */
      if (p == relt || p == 0)
	break;
    }

  if (q == 0)
    return 0;

  offset = (get_integer_term (x) - get_integer_term (p->exp));
  /* Note: OFFSET may be 0 if P->xexp and X are related by commutativity.  */
  return plus_constant (q->exp, offset);
}

/* Hash a string.  Just add its bytes up.  */
static inline unsigned
hash_rtx_string (const char *ps)
{
  unsigned hash = 0;
  const unsigned char *p = (const unsigned char *) ps;

  if (p)
    while (*p)
      hash += *p++;

  return hash;
}

/* Hash an rtx.  We are careful to make sure the value is never negative.
   Equivalent registers hash identically.
   MODE is used in hashing for CONST_INTs only;
   otherwise the mode of X is used.

   Store 1 in DO_NOT_RECORD_P if any subexpression is volatile.

   If HASH_ARG_IN_MEMORY_P is not NULL, store 1 in it if X contains
   a MEM rtx which does not have the RTX_UNCHANGING_P bit set.

   Note that cse_insn knows that the hash code of a MEM expression
   is just (int) MEM plus the hash code of the address.  */

unsigned
hash_rtx (rtx x, enum machine_mode mode, int *do_not_record_p,
	  int *hash_arg_in_memory_p, bool have_reg_qty)
{
  int i, j;
  unsigned hash = 0;
  enum rtx_code code;
  const char *fmt;

  /* Used to turn recursion into iteration.  We can't rely on GCC's
     tail-recursion elimination since we need to keep accumulating values
     in HASH.  */
 repeat:
  if (x == 0)
    return hash;

  code = GET_CODE (x);
  switch (code)
    {
    case REG:
      {
	unsigned int regno = REGNO (x);

	if (!reload_completed)
	  {
	    /* On some machines, we can't record any non-fixed hard register,
	       because extending its life will cause reload problems.  We
	       consider ap, fp, sp, gp to be fixed for this purpose.

	       We also consider CCmode registers to be fixed for this purpose;
	       failure to do so leads to failure to simplify 0<100 type of
	       conditionals.

	       On all machines, we can't record any global registers.
	       Nor should we record any register that is in a small
	       class, as defined by CLASS_LIKELY_SPILLED_P.  */
	    bool record;

	    if (regno >= FIRST_PSEUDO_REGISTER)
	      record = true;
	    else if (x == frame_pointer_rtx
		     || x == hard_frame_pointer_rtx
		     || x == arg_pointer_rtx
		     || x == stack_pointer_rtx
		     || x == pic_offset_table_rtx)
	      record = true;
	    else if (global_regs[regno])
	      record = false;
	    else if (fixed_regs[regno])
	      record = true;
	    else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
	      record = true;
	    else if (SMALL_REGISTER_CLASSES)
	      record = false;
	    else if (CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (regno)))
	      record = false;
	    else
	      record = true;

	    if (!record)
	      {
		*do_not_record_p = 1;
		return 0;
	      }
	  }

	hash += ((unsigned int) REG << 7);
        hash += (have_reg_qty ? (unsigned) REG_QTY (regno) : regno);
	return hash;
      }

    /* We handle SUBREG of a REG specially because the underlying
       reg changes its hash value with every value change; we don't
       want to have to forget unrelated subregs when one subreg changes.  */
    case SUBREG:
      {
	if (REG_P (SUBREG_REG (x)))
	  {
	    hash += (((unsigned int) SUBREG << 7)
		     + REGNO (SUBREG_REG (x))
		     + (SUBREG_BYTE (x) / UNITS_PER_WORD));
	    return hash;
	  }
	break;
      }

    case CONST_INT:
      hash += (((unsigned int) CONST_INT << 7) + (unsigned int) mode
               + (unsigned int) INTVAL (x));
      return hash;

    case CONST_DOUBLE:
      /* This is like the general case, except that it only counts
	 the integers representing the constant.  */
      hash += (unsigned int) code + (unsigned int) GET_MODE (x);
      if (GET_MODE (x) != VOIDmode)
	hash += real_hash (CONST_DOUBLE_REAL_VALUE (x));
      else
	hash += ((unsigned int) CONST_DOUBLE_LOW (x)
		 + (unsigned int) CONST_DOUBLE_HIGH (x));
      return hash;

    case CONST_VECTOR:
      {
	int units;
	rtx elt;

	units = CONST_VECTOR_NUNITS (x);

	for (i = 0; i < units; ++i)
	  {
	    elt = CONST_VECTOR_ELT (x, i);
	    hash += hash_rtx (elt, GET_MODE (elt), do_not_record_p,
			      hash_arg_in_memory_p, have_reg_qty);
	  }

	return hash;
      }

      /* Assume there is only one rtx object for any given label.  */
    case LABEL_REF:
      /* We don't hash on the address of the CODE_LABEL to avoid bootstrap
	 differences and differences between each stage's debugging dumps.  */
	 hash += (((unsigned int) LABEL_REF << 7)
		  + CODE_LABEL_NUMBER (XEXP (x, 0)));
      return hash;

    case SYMBOL_REF:
      {
	/* Don't hash on the symbol's address to avoid bootstrap differences.
	   Different hash values may cause expressions to be recorded in
	   different orders and thus different registers to be used in the
	   final assembler.  This also avoids differences in the dump files
	   between various stages.  */
	unsigned int h = 0;
	const unsigned char *p = (const unsigned char *) XSTR (x, 0);

	while (*p)
	  h += (h << 7) + *p++; /* ??? revisit */

	hash += ((unsigned int) SYMBOL_REF << 7) + h;
	return hash;
      }

    case MEM:
      /* We don't record if marked volatile or if BLKmode since we don't
	 know the size of the move.  */
      if (MEM_VOLATILE_P (x) || GET_MODE (x) == BLKmode)
	{
	  *do_not_record_p = 1;
	  return 0;
	}
      if (hash_arg_in_memory_p && !MEM_READONLY_P (x))
	*hash_arg_in_memory_p = 1;

      /* Now that we have already found this special case,
	 might as well speed it up as much as possible.  */
      hash += (unsigned) MEM;
      x = XEXP (x, 0);
      goto repeat;

    case USE:
      /* A USE that mentions non-volatile memory needs special
	 handling since the MEM may be BLKmode which normally
	 prevents an entry from being made.  Pure calls are
	 marked by a USE which mentions BLKmode memory.
	 See calls.c:emit_call_1.  */
      if (MEM_P (XEXP (x, 0))
	  && ! MEM_VOLATILE_P (XEXP (x, 0)))
	{
	  hash += (unsigned) USE;
	  x = XEXP (x, 0);

	  if (hash_arg_in_memory_p && !MEM_READONLY_P (x))
	    *hash_arg_in_memory_p = 1;

	  /* Now that we have already found this special case,
	     might as well speed it up as much as possible.  */
	  hash += (unsigned) MEM;
	  x = XEXP (x, 0);
	  goto repeat;
	}
      break;

    case PRE_DEC:
    case PRE_INC:
    case POST_DEC:
    case POST_INC:
    case PRE_MODIFY:
    case POST_MODIFY:
    case PC:
    case CC0:
    case CALL:
    case UNSPEC_VOLATILE:
      *do_not_record_p = 1;
      return 0;

    case ASM_OPERANDS:
      if (MEM_VOLATILE_P (x))
	{
	  *do_not_record_p = 1;
	  return 0;
	}
      else
	{
	  /* We don't want to take the filename and line into account.  */
	  hash += (unsigned) code + (unsigned) GET_MODE (x)
	    + hash_rtx_string (ASM_OPERANDS_TEMPLATE (x))
	    + hash_rtx_string (ASM_OPERANDS_OUTPUT_CONSTRAINT (x))
	    + (unsigned) ASM_OPERANDS_OUTPUT_IDX (x);

	  if (ASM_OPERANDS_INPUT_LENGTH (x))
	    {
	      for (i = 1; i < ASM_OPERANDS_INPUT_LENGTH (x); i++)
		{
		  hash += (hash_rtx (ASM_OPERANDS_INPUT (x, i),
				     GET_MODE (ASM_OPERANDS_INPUT (x, i)),
				     do_not_record_p, hash_arg_in_memory_p,
				     have_reg_qty)
			   + hash_rtx_string
				(ASM_OPERANDS_INPUT_CONSTRAINT (x, i)));
		}

	      hash += hash_rtx_string (ASM_OPERANDS_INPUT_CONSTRAINT (x, 0));
	      x = ASM_OPERANDS_INPUT (x, 0);
	      mode = GET_MODE (x);
	      goto repeat;
	    }

	  return hash;
	}
      break;

    default:
      break;
    }

  i = GET_RTX_LENGTH (code) - 1;
  hash += (unsigned) code + (unsigned) GET_MODE (x);
  fmt = GET_RTX_FORMAT (code);
  for (; i >= 0; i--)
    {
      switch (fmt[i])
	{
	case 'e':
	  /* If we are about to do the last recursive call
	     needed at this level, change it into iteration.
	     This function  is called enough to be worth it.  */
	  if (i == 0)
	    {
	      x = XEXP (x, i);
	      goto repeat;
	    }

	  hash += hash_rtx (XEXP (x, i), 0, do_not_record_p,
			    hash_arg_in_memory_p, have_reg_qty);
	  break;

	case 'E':
	  for (j = 0; j < XVECLEN (x, i); j++)
	    hash += hash_rtx (XVECEXP (x, i, j), 0, do_not_record_p,
			      hash_arg_in_memory_p, have_reg_qty);
	  break;

	case 's':
	  hash += hash_rtx_string (XSTR (x, i));
	  break;

	case 'i':
	  hash += (unsigned int) XINT (x, i);
	  break;

	case '0': case 't':
	  /* Unused.  */
	  break;

	default:
	  gcc_unreachable ();
	}
    }

  return hash;
}

/* Hash an rtx X for cse via hash_rtx.
   Stores 1 in do_not_record if any subexpression is volatile.
   Stores 1 in hash_arg_in_memory if X contains a mem rtx which
   does not have the RTX_UNCHANGING_P bit set.  */

static inline unsigned
canon_hash (rtx x, enum machine_mode mode)
{
  return hash_rtx (x, mode, &do_not_record, &hash_arg_in_memory, true);
}

/* Like canon_hash but with no side effects, i.e. do_not_record
   and hash_arg_in_memory are not changed.  */

static inline unsigned
safe_hash (rtx x, enum machine_mode mode)
{
  int dummy_do_not_record;
  return hash_rtx (x, mode, &dummy_do_not_record, NULL, true);
}

/* Return 1 iff X and Y would canonicalize into the same thing,
   without actually constructing the canonicalization of either one.
   If VALIDATE is nonzero,
   we assume X is an expression being processed from the rtl
   and Y was found in the hash table.  We check register refs
   in Y for being marked as valid.

   If FOR_GCSE is true, we compare X and Y for equivalence for GCSE.  */

int
exp_equiv_p (rtx x, rtx y, int validate, bool for_gcse)
{
  int i, j;
  enum rtx_code code;
  const char *fmt;

  /* Note: it is incorrect to assume an expression is equivalent to itself
     if VALIDATE is nonzero.  */
  if (x == y && !validate)
    return 1;

  if (x == 0 || y == 0)
    return x == y;

  code = GET_CODE (x);
  if (code != GET_CODE (y))
    return 0;

  /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.  */
  if (GET_MODE (x) != GET_MODE (y))
    return 0;

  switch (code)
    {
    case PC:
    case CC0:
    case CONST_INT:
    case CONST_DOUBLE:
      return x == y;

    case LABEL_REF:
      return XEXP (x, 0) == XEXP (y, 0);

    case SYMBOL_REF:
      return XSTR (x, 0) == XSTR (y, 0);

    case REG:
      if (for_gcse)
	return REGNO (x) == REGNO (y);
      else
	{
	  unsigned int regno = REGNO (y);
	  unsigned int i;
	  unsigned int endregno
	    = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
		       : hard_regno_nregs[regno][GET_MODE (y)]);

	  /* If the quantities are not the same, the expressions are not
	     equivalent.  If there are and we are not to validate, they
	     are equivalent.  Otherwise, ensure all regs are up-to-date.  */

	  if (REG_QTY (REGNO (x)) != REG_QTY (regno))
	    return 0;

	  if (! validate)
	    return 1;

	  for (i = regno; i < endregno; i++)
	    if (REG_IN_TABLE (i) != REG_TICK (i))
	      return 0;

	  return 1;
	}

    case MEM:
      if (for_gcse)
	{
	  /* A volatile mem should not be considered equivalent to any
	     other.  */
	  if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
	    return 0;

	  /* Can't merge two expressions in different alias sets, since we
	     can decide that the expression is transparent in a block when
	     it isn't, due to it being set with the different alias set.

	     Also, can't merge two expressions with different MEM_ATTRS.
	     They could e.g. be two different entities allocated into the
	     same space on the stack (see e.g. PR25130).  In that case, the
	     MEM addresses can be the same, even though the two MEMs are
	     absolutely not equivalent.  
   
	     But because really all MEM attributes should be the same for
	     equivalent MEMs, we just use the invariant that MEMs that have
	     the same attributes share the same mem_attrs data structure.  */
	  if (MEM_ATTRS (x) != MEM_ATTRS (y))
	    return 0;
	}
      break;

    /*  For commutative operations, check both orders.  */
    case PLUS:
    case MULT:
    case AND:
    case IOR:
    case XOR:
    case NE:
    case EQ:
      return ((exp_equiv_p (XEXP (x, 0), XEXP (y, 0),
			     validate, for_gcse)
	       && exp_equiv_p (XEXP (x, 1), XEXP (y, 1),
				validate, for_gcse))
	      || (exp_equiv_p (XEXP (x, 0), XEXP (y, 1),
				validate, for_gcse)
		  && exp_equiv_p (XEXP (x, 1), XEXP (y, 0),
				   validate, for_gcse)));

    case ASM_OPERANDS:
      /* We don't use the generic code below because we want to
	 disregard filename and line numbers.  */

      /* A volatile asm isn't equivalent to any other.  */
      if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
	return 0;

      if (GET_MODE (x) != GET_MODE (y)
	  || strcmp (ASM_OPERANDS_TEMPLATE (x), ASM_OPERANDS_TEMPLATE (y))
	  || strcmp (ASM_OPERANDS_OUTPUT_CONSTRAINT (x),
		     ASM_OPERANDS_OUTPUT_CONSTRAINT (y))
	  || ASM_OPERANDS_OUTPUT_IDX (x) != ASM_OPERANDS_OUTPUT_IDX (y)
	  || ASM_OPERANDS_INPUT_LENGTH (x) != ASM_OPERANDS_INPUT_LENGTH (y))
	return 0;

      if (ASM_OPERANDS_INPUT_LENGTH (x))
	{
	  for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
	    if (! exp_equiv_p (ASM_OPERANDS_INPUT (x, i),
			       ASM_OPERANDS_INPUT (y, i),
			       validate, for_gcse)
		|| strcmp (ASM_OPERANDS_INPUT_CONSTRAINT (x, i),
			   ASM_OPERANDS_INPUT_CONSTRAINT (y, i)))
	      return 0;
	}

      return 1;

    default:
      break;
    }

  /* Compare the elements.  If any pair of corresponding elements
     fail to match, return 0 for the whole thing.  */

  fmt = GET_RTX_FORMAT (code);
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
    {
      switch (fmt[i])
	{
	case 'e':
	  if (! exp_equiv_p (XEXP (x, i), XEXP (y, i),
			      validate, for_gcse))
	    return 0;
	  break;

	case 'E':
	  if (XVECLEN (x, i) != XVECLEN (y, i))
	    return 0;
	  for (j = 0; j < XVECLEN (x, i); j++)
	    if (! exp_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
				validate, for_gcse))
	      return 0;
	  break;

	case 's':
	  if (strcmp (XSTR (x, i), XSTR (y, i)))
	    return 0;
	  break;

	case 'i':
	  if (XINT (x, i) != XINT (y, i))
	    return 0;
	  break;

	case 'w':
	  if (XWINT (x, i) != XWINT (y, i))
	    return 0;
	  break;

	case '0':
	case 't':
	  break;

	default:
	  gcc_unreachable ();
	}
    }

  return 1;
}

/* Return 1 if X has a value that can vary even between two
   executions of the program.  0 means X can be compared reliably
   against certain constants or near-constants.  */

static int
cse_rtx_varies_p (rtx x, int from_alias)
{
  /* We need not check for X and the equivalence class being of the same
     mode because if X is equivalent to a constant in some mode, it
     doesn't vary in any mode.  */

  if (REG_P (x)
      && REGNO_QTY_VALID_P (REGNO (x)))
    {
      int x_q = REG_QTY (REGNO (x));
      struct qty_table_elem *x_ent = &qty_table[x_q];

      if (GET_MODE (x) == x_ent->mode
	  && x_ent->const_rtx != NULL_RTX)
	return 0;
    }

  if (GET_CODE (x) == PLUS
      && GET_CODE (XEXP (x, 1)) == CONST_INT
      && REG_P (XEXP (x, 0))
      && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0))))
    {
      int x0_q = REG_QTY (REGNO (XEXP (x, 0)));
      struct qty_table_elem *x0_ent = &qty_table[x0_q];

      if ((GET_MODE (XEXP (x, 0)) == x0_ent->mode)
	  && x0_ent->const_rtx != NULL_RTX)
	return 0;
    }

  /* This can happen as the result of virtual register instantiation, if
     the initial constant is too large to be a valid address.  This gives
     us a three instruction sequence, load large offset into a register,
     load fp minus a constant into a register, then a MEM which is the
     sum of the two `constant' registers.  */
  if (GET_CODE (x) == PLUS
      && REG_P (XEXP (x, 0))
      && REG_P (XEXP (x, 1))
      && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))
      && REGNO_QTY_VALID_P (REGNO (XEXP (x, 1))))
    {
      int x0_q = REG_QTY (REGNO (XEXP (x, 0)));
      int x1_q = REG_QTY (REGNO (XEXP (x, 1)));
      struct qty_table_elem *x0_ent = &qty_table[x0_q];
      struct qty_table_elem *x1_ent = &qty_table[x1_q];

      if ((GET_MODE (XEXP (x, 0)) == x0_ent->mode)
	  && x0_ent->const_rtx != NULL_RTX
	  && (GET_MODE (XEXP (x, 1)) == x1_ent->mode)
	  && x1_ent->const_rtx != NULL_RTX)
	return 0;
    }

  return rtx_varies_p (x, from_alias);
}

/* Subroutine of canon_reg.  Pass *XLOC through canon_reg, and validate
   the result if necessary.  INSN is as for canon_reg.  */

static void
validate_canon_reg (rtx *xloc, rtx insn)
{
  rtx new = canon_reg (*xloc, insn);

  /* If replacing pseudo with hard reg or vice versa, ensure the
     insn remains valid.  Likewise if the insn has MATCH_DUPs.  */
  if (insn != 0 && new != 0)
    validate_change (insn, xloc, new, 1);
  else
    *xloc = new;
}

/* Canonicalize an expression:
   replace each register reference inside it
   with the "oldest" equivalent register.

   If INSN is nonzero validate_change is used to ensure that INSN remains valid
   after we make our substitution.  The calls are made with IN_GROUP nonzero
   so apply_change_group must be called upon the outermost return from this
   function (unless INSN is zero).  The result of apply_change_group can
   generally be discarded since the changes we are making are optional.  */

static rtx
canon_reg (rtx x, rtx insn)
{
  int i;
  enum rtx_code code;
  const char *fmt;

  if (x == 0)
    return x;

  code = GET_CODE (x);
  switch (code)
    {
    case PC:
    case CC0:
    case CONST:
    case CONST_INT:
    case CONST_DOUBLE:
    case CONST_VECTOR:
    case SYMBOL_REF:
    case LABEL_REF:
    case ADDR_VEC:
    case ADDR_DIFF_VEC:
      return x;

    case REG:
      {
	int first;
	int q;
	struct qty_table_elem *ent;

	/* Never replace a hard reg, because hard regs can appear
	   in more than one machine mode, and we must preserve the mode
	   of each occurrence.  Also, some hard regs appear in
	   MEMs that are shared and mustn't be altered.  Don't try to
	   replace any reg that maps to a reg of class NO_REGS.  */
	if (REGNO (x) < FIRST_PSEUDO_REGISTER
	    || ! REGNO_QTY_VALID_P (REGNO (x)))
	  return x;

	q = REG_QTY (REGNO (x));
	ent = &qty_table[q];
	first = ent->first_reg;
	return (first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
		: REGNO_REG_CLASS (first) == NO_REGS ? x
		: gen_rtx_REG (ent->mode, first));
      }

    default:
      break;
    }

  fmt = GET_RTX_FORMAT (code);
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
    {
      int j;

      if (fmt[i] == 'e')
	validate_canon_reg (&XEXP (x, i), insn);
      else if (fmt[i] == 'E')
	for (j = 0; j < XVECLEN (x, i); j++)
	  validate_canon_reg (&XVECEXP (x, i, j), insn);
    }

  return x;
}

/* LOC is a location within INSN that is an operand address (the contents of
   a MEM).  Find the best equivalent address to use that is valid for this
   insn.

   On most CISC machines, complicated address modes are costly, and rtx_cost
   is a good approximation for that cost.  However, most RISC machines have
   only a few (usually only one) memory reference formats.  If an address is
   valid at all, it is often just as cheap as any other address.  Hence, for
   RISC machines, we use `address_cost' to compare the costs of various
   addresses.  For two addresses of equal cost, choose the one with the
   highest `rtx_cost' value as that has the potential of eliminating the
   most insns.  For equal costs, we choose the first in the equivalence
   class.  Note that we ignore the fact that pseudo registers are cheaper than
   hard registers here because we would also prefer the pseudo registers.  */

static void
find_best_addr (rtx insn, rtx *loc, enum machine_mode mode)
{
  struct table_elt *elt;
  rtx addr = *loc;
  struct table_elt *p;
  int found_better = 1;
  int save_do_not_record = do_not_record;
  int save_hash_arg_in_memory = hash_arg_in_memory;
  int addr_volatile;
  int regno;
  unsigned hash;

  /* Do not try to replace constant addresses or addresses of local and
     argument slots.  These MEM expressions are made only once and inserted
     in many instructions, as well as being used to control symbol table
     output.  It is not safe to clobber them.

     There are some uncommon cases where the address is already in a register
     for some reason, but we cannot take advantage of that because we have
     no easy way to unshare the MEM.  In addition, looking up all stack
     addresses is costly.  */
  if ((GET_CODE (addr) == PLUS
       && REG_P (XEXP (addr, 0))
       && GET_CODE (XEXP (addr, 1)) == CONST_INT
       && (regno = REGNO (XEXP (addr, 0)),
	   regno == FRAME_POINTER_REGNUM || regno == HARD_FRAME_POINTER_REGNUM
	   || regno == ARG_POINTER_REGNUM))
      || (REG_P (addr)
	  && (regno = REGNO (addr), regno == FRAME_POINTER_REGNUM
	      || regno == HARD_FRAME_POINTER_REGNUM
	      || regno == ARG_POINTER_REGNUM))
      || CONSTANT_ADDRESS_P (addr))
    return;

  /* If this address is not simply a register, try to fold it.  This will
     sometimes simplify the expression.  Many simplifications
     will not be valid, but some, usually applying the associative rule, will
     be valid and produce better code.  */
  if (!REG_P (addr))
    {
      rtx folded = canon_for_address (fold_rtx (addr, NULL_RTX));

      if (folded != addr)
	{
	  int addr_folded_cost = address_cost (folded, mode);
	  int addr_cost = address_cost (addr, mode);

	  if ((addr_folded_cost < addr_cost
	       || (addr_folded_cost == addr_cost
		   /* ??? The rtx_cost comparison is left over from an older
		      version of this code.  It is probably no longer helpful.*/
		   && (rtx_cost (folded, MEM) > rtx_cost (addr, MEM)
		       || approx_reg_cost (folded) < approx_reg_cost (addr))))
	      && validate_change (insn, loc, folded, 0))
	    addr = folded;
	}
    }

  /* If this address is not in the hash table, we can't look for equivalences
     of the whole address.  Also, ignore if volatile.  */

  do_not_record = 0;
  hash = HASH (addr, Pmode);
  addr_volatile = do_not_record;
  do_not_record = save_do_not_record;
  hash_arg_in_memory = save_hash_arg_in_memory;

  if (addr_volatile)
    return;

  elt = lookup (addr, hash, Pmode);

  if (elt)
    {
      /* We need to find the best (under the criteria documented above) entry
	 in the class that is valid.  We use the `flag' field to indicate
	 choices that were invalid and iterate until we can't find a better
	 one that hasn't already been tried.  */

      for (p = elt->first_same_value; p; p = p->next_same_value)
	p->flag = 0;

      while (found_better)
	{
	  int best_addr_cost = address_cost (*loc, mode);
	  int best_rtx_cost = (elt->cost + 1) >> 1;
	  int exp_cost;
	  struct table_elt *best_elt = elt;

	  found_better = 0;
	  for (p = elt->first_same_value; p; p = p->next_same_value)
	    if (! p->flag)
	      {
		if ((REG_P (p->exp)
		     || exp_equiv_p (p->exp, p->exp, 1, false))
		    && ((exp_cost = address_cost (p->exp, mode)) < best_addr_cost
			|| (exp_cost == best_addr_cost
			    && ((p->cost + 1) >> 1) > best_rtx_cost)))
		  {
		    found_better = 1;
		    best_addr_cost = exp_cost;
		    best_rtx_cost = (p->cost + 1) >> 1;
		    best_elt = p;
		  }
	      }

	  if (found_better)
	    {
	      if (validate_change (insn, loc,
				   canon_reg (copy_rtx (best_elt->exp),
					      NULL_RTX), 0))
		return;
	      else
		best_elt->flag = 1;
	    }
	}
    }

  /* If the address is a binary operation with the first operand a register
     and the second a constant, do the same as above, but looking for
     equivalences of the register.  Then try to simplify before checking for
     the best address to use.  This catches a few cases:  First is when we
     have REG+const and the register is another REG+const.  We can often merge
     the constants and eliminate one insn and one register.  It may also be
     that a machine has a cheap REG+REG+const.  Finally, this improves the
     code on the Alpha for unaligned byte stores.  */

  if (flag_expensive_optimizations
      && ARITHMETIC_P (*loc)
      && REG_P (XEXP (*loc, 0)))
    {
      rtx op1 = XEXP (*loc, 1);

      do_not_record = 0;
      hash = HASH (XEXP (*loc, 0), Pmode);
      do_not_record = save_do_not_record;
      hash_arg_in_memory = save_hash_arg_in_memory;

      elt = lookup (XEXP (*loc, 0), hash, Pmode);
      if (elt == 0)
	return;

      /* We need to find the best (under the criteria documented above) entry
	 in the class that is valid.  We use the `flag' field to indicate
	 choices that were invalid and iterate until we can't find a better
	 one that hasn't already been tried.  */

      for (p = elt->first_same_value; p; p = p->next_same_value)
	p->flag = 0;

      while (found_better)
	{
	  int best_addr_cost = address_cost (*loc, mode);
	  int best_rtx_cost = (COST (*loc) + 1) >> 1;
	  struct table_elt *best_elt = elt;
	  rtx best_rtx = *loc;
	  int count;

	  /* This is at worst case an O(n^2) algorithm, so limit our search
	     to the first 32 elements on the list.  This avoids trouble
	     compiling code with very long basic blocks that can easily
	     call simplify_gen_binary so many times that we run out of
	     memory.  */

	  found_better = 0;
	  for (p = elt->first_same_value, count = 0;
	       p && count < 32;
	       p = p->next_same_value, count++)
	    if (! p->flag
		&& (REG_P (p->exp)
		    || (GET_CODE (p->exp) != EXPR_LIST
			&& exp_equiv_p (p->exp, p->exp, 1, false))))

	      {
		rtx new = simplify_gen_binary (GET_CODE (*loc), Pmode,
					       p->exp, op1);
		int new_cost;
		
		/* Get the canonical version of the address so we can accept
		   more.  */
		new = canon_for_address (new);
		
		new_cost = address_cost (new, mode);

		if (new_cost < best_addr_cost
		    || (new_cost == best_addr_cost
			&& (COST (new) + 1) >> 1 > best_rtx_cost))
		  {
		    found_better = 1;
		    best_addr_cost = new_cost;
		    best_rtx_cost = (COST (new) + 1) >> 1;
		    best_elt = p;
		    best_rtx = new;
		  }
	      }

	  if (found_better)
	    {
	      if (validate_change (insn, loc,
				   canon_reg (copy_rtx (best_rtx),
					      NULL_RTX), 0))
		return;
	      else
		best_elt->flag = 1;
	    }
	}
    }
}

/* Given an operation (CODE, *PARG1, *PARG2), where code is a comparison
   operation (EQ, NE, GT, etc.), follow it back through the hash table and
   what values are being compared.

   *PARG1 and *PARG2 are updated to contain the rtx representing the values
   actually being compared.  For example, if *PARG1 was (cc0) and *PARG2
   was (const_int 0), *PARG1 and *PARG2 will be set to the objects that were
   compared to produce cc0.

   The return value is the comparison operator and is either the code of
   A or the code corresponding to the inverse of the comparison.  */

static enum rtx_code
find_comparison_args (enum rtx_code code, rtx *parg1, rtx *parg2,
		      enum machine_mode *pmode1, enum machine_mode *pmode2)
{
  rtx arg1, arg2;

  arg1 = *parg1, arg2 = *parg2;

  /* If ARG2 is const0_rtx, see what ARG1 is equivalent to.  */

  while (arg2 == CONST0_RTX (GET_MODE (arg1)))
    {
      /* Set nonzero when we find something of interest.  */
      rtx x = 0;
      int reverse_code = 0;
      struct table_elt *p = 0;

      /* If arg1 is a COMPARE, extract the comparison arguments from it.
	 On machines with CC0, this is the only case that can occur, since
	 fold_rtx will return the COMPARE or item being compared with zero
	 when given CC0.  */

      if (GET_CODE (arg1) == COMPARE && arg2 == const0_rtx)
	x = arg1;

      /* If ARG1 is a comparison operator and CODE is testing for
	 STORE_FLAG_VALUE, get the inner arguments.  */

      else if (COMPARISON_P (arg1))
	{
#ifdef FLOAT_STORE_FLAG_VALUE
	  REAL_VALUE_TYPE fsfv;
#endif

	  if (code == NE
	      || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
		  && code == LT && STORE_FLAG_VALUE == -1)
#ifdef FLOAT_STORE_FLAG_VALUE
	      || (SCALAR_FLOAT_MODE_P (GET_MODE (arg1))
		  && (fsfv = FLOAT_STORE_FLAG_VALUE (GET_MODE (arg1)),
		      REAL_VALUE_NEGATIVE (fsfv)))
#endif
	      )
	    x = arg1;
	  else if (code == EQ
		   || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
		       && code == GE && STORE_FLAG_VALUE == -1)
#ifdef FLOAT_STORE_FLAG_VALUE
		   || (SCALAR_FLOAT_MODE_P (GET_MODE (arg1))
		       && (fsfv = FLOAT_STORE_FLAG_VALUE (GET_MODE (arg1)),
			   REAL_VALUE_NEGATIVE (fsfv)))
#endif
		   )
	    x = arg1, reverse_code = 1;
	}

      /* ??? We could also check for

	 (ne (and (eq (...) (const_int 1))) (const_int 0))

	 and related forms, but let's wait until we see them occurring.  */

      if (x == 0)
	/* Look up ARG1 in the hash table and see if it has an equivalence
	   that lets us see what is being compared.  */
	p = lookup (arg1, SAFE_HASH (arg1, GET_MODE (arg1)), GET_MODE (arg1));
      if (p)
	{
	  p = p->first_same_value;

	  /* If what we compare is already known to be constant, that is as
	     good as it gets.
	     We need to break the loop in this case, because otherwise we
	     can have an infinite loop when looking at a reg that is known
	     to be a constant which is the same as a comparison of a reg
	     against zero which appears later in the insn stream, which in
	     turn is constant and the same as the comparison of the first reg
	     against zero...  */
	  if (p->is_const)
	    break;
	}

      for (; p; p = p->next_same_value)
	{
	  enum machine_mode inner_mode = GET_MODE (p->exp);
#ifdef FLOAT_STORE_FLAG_VALUE
	  REAL_VALUE_TYPE fsfv;
#endif

	  /* If the entry isn't valid, skip it.  */
	  if (! exp_equiv_p (p->exp, p->exp, 1, false))
	    continue;

	  if (GET_CODE (p->exp) == COMPARE
	      /* Another possibility is that this machine has a compare insn
		 that includes the comparison code.  In that case, ARG1 would
		 be equivalent to a comparison operation that would set ARG1 to
		 either STORE_FLAG_VALUE or zero.  If this is an NE operation,
		 ORIG_CODE is the actual comparison being done; if it is an EQ,
		 we must reverse ORIG_CODE.  On machine with a negative value
		 for STORE_FLAG_VALUE, also look at LT and GE operations.  */
	      || ((code == NE
		   || (code == LT
		       && GET_MODE_CLASS (inner_mode) == MODE_INT
		       && (GET_MODE_BITSIZE (inner_mode)
			   <= HOST_BITS_PER_WIDE_INT)
		       && (STORE_FLAG_VALUE
			   & ((HOST_WIDE_INT) 1
			      << (GET_MODE_BITSIZE (inner_mode) - 1))))
#ifdef FLOAT_STORE_FLAG_VALUE
		   || (code == LT
		       && SCALAR_FLOAT_MODE_P (inner_mode)
		       && (fsfv = FLOAT_STORE_FLAG_VALUE (GET_MODE (arg1)),
			   REAL_VALUE_NEGATIVE (fsfv)))
#endif
		   )
		  && COMPARISON_P (p->exp)))
	    {
	      x = p->exp;
	      break;
	    }
	  else if ((code == EQ
		    || (code == GE
			&& GET_MODE_CLASS (inner_mode) == MODE_INT
			&& (GET_MODE_BITSIZE (inner_mode)
			    <= HOST_BITS_PER_WIDE_INT)
			&& (STORE_FLAG_VALUE
			    & ((HOST_WIDE_INT) 1
			       << (GET_MODE_BITSIZE (inner_mode) - 1))))
#ifdef FLOAT_STORE_FLAG_VALUE
		    || (code == GE
			&& SCALAR_FLOAT_MODE_P (inner_mode)
			&& (fsfv = FLOAT_STORE_FLAG_VALUE (GET_MODE (arg1)),
			    REAL_VALUE_NEGATIVE (fsfv)))
#endif
		    )
		   && COMPARISON_P (p->exp))
	    {
	      reverse_code = 1;
	      x = p->exp;
	      break;
	    }

	  /* If this non-trapping address, e.g. fp + constant, the
	     equivalent is a better operand since it may let us predict
	     the value of the comparison.  */
	  else if (!rtx_addr_can_trap_p (p->exp))
	    {
	      arg1 = p->exp;
	      continue;
	    }
	}

      /* If we didn't find a useful equivalence for ARG1, we are done.
	 Otherwise, set up for the next iteration.  */
      if (x == 0)
	break;

      /* If we need to reverse the comparison, make sure that that is
	 possible -- we can't necessarily infer the value of GE from LT
	 with floating-point operands.  */
      if (reverse_code)
	{
	  enum rtx_code reversed = reversed_comparison_code (x, NULL_RTX);
	  if (reversed == UNKNOWN)
	    break;
	  else
	    code = reversed;
	}
      else if (COMPARISON_P (x))
	code = GET_CODE (x);
      arg1 = XEXP (x, 0), arg2 = XEXP (x, 1);
    }

  /* Return our results.  Return the modes from before fold_rtx
     because fold_rtx might produce const_int, and then it's too late.  */
  *pmode1 = GET_MODE (arg1), *pmode2 = GET_MODE (arg2);
  *parg1 = fold_rtx (arg1, 0), *parg2 = fold_rtx (arg2, 0);

  return code;
}

/* Fold SUBREG.  */

static rtx
fold_rtx_subreg (rtx x, rtx insn)
{
  enum machine_mode mode = GET_MODE (x);
  rtx folded_arg0;
  rtx const_arg0;
  rtx new;

  /* See if we previously assigned a constant value to this SUBREG.  */
  if ((new = lookup_as_function (x, CONST_INT)) != 0
      || (new = lookup_as_function (x, CONST_DOUBLE)) != 0)
    return new;

  /* If this is a paradoxical SUBREG, we have no idea what value the
     extra bits would have.  However, if the operand is equivalent to
     a SUBREG whose operand is the same as our mode, and all the modes
     are within a word, we can just use the inner operand because
     these SUBREGs just say how to treat the register.

     Similarly if we find an integer constant.  */

  if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
    {
      enum machine_mode imode = GET_MODE (SUBREG_REG (x));
      struct table_elt *elt;

      if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
	  && GET_MODE_SIZE (imode) <= UNITS_PER_WORD
	  && (elt = lookup (SUBREG_REG (x), HASH (SUBREG_REG (x), imode),
			    imode)) != 0)
	for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
	  {
	    if (CONSTANT_P (elt->exp)
		&& GET_MODE (elt->exp) == VOIDmode)
	      return elt->exp;

	    if (GET_CODE (elt->exp) == SUBREG
		&& GET_MODE (SUBREG_REG (elt->exp)) == mode
		&& exp_equiv_p (elt->exp, elt->exp, 1, false))
	      return copy_rtx (SUBREG_REG (elt->exp));
	  }

      return x;
    }

  /* Fold SUBREG_REG.  If it changed, see if we can simplify the
     SUBREG.  We might be able to if the SUBREG is extracting a single
     word in an integral mode or extracting the low part.  */

  folded_arg0 = fold_rtx (SUBREG_REG (x), insn);
  const_arg0 = equiv_constant (folded_arg0);
  if (const_arg0)
    folded_arg0 = const_arg0;

  if (folded_arg0 != SUBREG_REG (x))
    {
      new = simplify_subreg (mode, folded_arg0,
			     GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
      if (new)
	return new;
    }

  if (REG_P (folded_arg0)
      && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (folded_arg0)))
    {
      struct table_elt *elt;

      elt = lookup (folded_arg0,
		    HASH (folded_arg0, GET_MODE (folded_arg0)),
		    GET_MODE (folded_arg0));

      if (elt)
	elt = elt->first_same_value;

      if (subreg_lowpart_p (x))
	/* If this is a narrowing SUBREG and our operand is a REG, see
	   if we can find an equivalence for REG that is an arithmetic
	   operation in a wider mode where both operands are
	   paradoxical SUBREGs from objects of our result mode.  In
	   that case, we couldn-t report an equivalent value for that
	   operation, since we don't know what the extra bits will be.
	   But we can find an equivalence for this SUBREG by folding
	   that operation in the narrow mode.  This allows us to fold
	   arithmetic in narrow modes when the machine only supports
	   word-sized arithmetic.

	   Also look for a case where we have a SUBREG whose operand
	   is the same as our result.  If both modes are smaller than
	   a word, we are simply interpreting a register in different
	   modes and we can use the inner value.  */

	for (; elt; elt = elt->next_same_value)
	  {
	    enum rtx_code eltcode = GET_CODE (elt->exp);

	    /* Just check for unary and binary operations.  */
	    if (UNARY_P (elt->exp)
		&& eltcode != SIGN_EXTEND
		&& eltcode != ZERO_EXTEND
		&& GET_CODE (XEXP (elt->exp, 0)) == SUBREG
		&& GET_MODE (SUBREG_REG (XEXP (elt->exp, 0))) == mode
		&& (GET_MODE_CLASS (mode)
		    == GET_MODE_CLASS (GET_MODE (XEXP (elt->exp, 0)))))
	      {
		rtx op0 = SUBREG_REG (XEXP (elt->exp, 0));

		if (!REG_P (op0) && ! CONSTANT_P (op0))
		  op0 = fold_rtx (op0, NULL_RTX);

		op0 = equiv_constant (op0);
		if (op0)
		  new = simplify_unary_operation (GET_CODE (elt->exp), mode,
						  op0, mode);
	      }
	    else if (ARITHMETIC_P (elt->exp)
		     && eltcode != DIV && eltcode != MOD
		     && eltcode != UDIV && eltcode != UMOD
		     && eltcode != ASHIFTRT && eltcode != LSHIFTRT
		     && eltcode != ROTATE && eltcode != ROTATERT
		     && ((GET_CODE (XEXP (elt->exp, 0)) == SUBREG
			  && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 0)))
			      == mode))
			 || CONSTANT_P (XEXP (elt->exp, 0)))
		     && ((GET_CODE (XEXP (elt->exp, 1)) == SUBREG
			  && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 1)))
			      == mode))
			 || CONSTANT_P (XEXP (elt->exp, 1))))
	      {
		rtx op0 = gen_lowpart_common (mode, XEXP (elt->exp, 0));
		rtx op1 = gen_lowpart_common (mode, XEXP (elt->exp, 1));

		if (op0 && !REG_P (op0) && ! CONSTANT_P (op0))
		  op0 = fold_rtx (op0, NULL_RTX);

		if (op0)
		  op0 = equiv_constant (op0);

		if (op1 && !REG_P (op1) && ! CONSTANT_P (op1))
		  op1 = fold_rtx (op1, NULL_RTX);

		if (op1)
		  op1 = equiv_constant (op1);

		/* If we are looking for the low SImode part of
		   (ashift:DI c (const_int 32)), it doesn't work to
		   compute that in SImode, because a 32-bit shift in
		   SImode is unpredictable.  We know the value is
		   0.  */
		if (op0 && op1
		    && GET_CODE (elt->exp) == ASHIFT
		    && GET_CODE (op1) == CONST_INT
		    && INTVAL (op1) >= GET_MODE_BITSIZE (mode))
		  {
		    if (INTVAL (op1)
			< GET_MODE_BITSIZE (GET_MODE (elt->exp)))
		      /* If the count fits in the inner mode's width,
			 but exceeds the outer mode's width, the value
			 will get truncated to 0 by the subreg.  */
		      new = CONST0_RTX (mode);
		    else
		      /* If the count exceeds even the inner mode's width,
			 don't fold this expression.  */
		      new = 0;
		  }
		else if (op0 && op1)
		  new = simplify_binary_operation (GET_CODE (elt->exp),
						   mode, op0, op1);
	      }

	    else if (GET_CODE (elt->exp) == SUBREG
		     && GET_MODE (SUBREG_REG (elt->exp)) == mode
		     && (GET_MODE_SIZE (GET_MODE (folded_arg0))
			 <= UNITS_PER_WORD)
		     && exp_equiv_p (elt->exp, elt->exp, 1, false))
	      new = copy_rtx (SUBREG_REG (elt->exp));

	    if (new)
	      return new;
	  }
      else
	/* A SUBREG resulting from a zero extension may fold to zero
	   if it extracts higher bits than the ZERO_EXTEND's source
	   bits.  FIXME: if combine tried to, er, combine these
	   instructions, this transformation may be moved to
	   simplify_subreg.  */
	for (; elt; elt = elt->next_same_value)
	  {
	    if (GET_CODE (elt->exp) == ZERO_EXTEND
		&& subreg_lsb (x)
		>= GET_MODE_BITSIZE (GET_MODE (XEXP (elt->exp, 0))))
	      return CONST0_RTX (mode);
	  }
    }

  return x;
}

/* Fold MEM.  Not to be called directly, see fold_rtx_mem instead.  */

static rtx
fold_rtx_mem_1 (rtx x, rtx insn)
{
  enum machine_mode mode = GET_MODE (x);
  rtx new;

  /* If we are not actually processing an insn, don't try to find the
     best address.  Not only don't we care, but we could modify the
     MEM in an invalid way since we have no insn to validate
     against.  */
  if (insn != 0)
    find_best_addr (insn, &XEXP (x, 0), mode);

  {
    /* Even if we don't fold in the insn itself, we can safely do so
       here, in hopes of getting a constant.  */
    rtx addr = fold_rtx (XEXP (x, 0), NULL_RTX);
    rtx base = 0;
    HOST_WIDE_INT offset = 0;

    if (REG_P (addr)
	&& REGNO_QTY_VALID_P (REGNO (addr)))
      {
	int addr_q = REG_QTY (REGNO (addr));
	struct qty_table_elem *addr_ent = &qty_table[addr_q];

	if (GET_MODE (addr) == addr_ent->mode
	    && addr_ent->const_rtx != NULL_RTX)
	  addr = addr_ent->const_rtx;
      }

    /* Call target hook to avoid the effects of -fpic etc....  */
    addr = targetm.delegitimize_address (addr);

    /* If address is constant, split it into a base and integer
       offset.  */
    if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
      base = addr;
    else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
	     && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
      {
	base = XEXP (XEXP (addr, 0), 0);
	offset = INTVAL (XEXP (XEXP (addr, 0), 1));
      }
    else if (GET_CODE (addr) == LO_SUM
	     && GET_CODE (XEXP (addr, 1)) == SYMBOL_REF)
      base = XEXP (addr, 1);

    /* If this is a constant pool reference, we can fold it into its
       constant to allow better value tracking.  */
    if (base && GET_CODE (base) == SYMBOL_REF
	&& CONSTANT_POOL_ADDRESS_P (base))
      {
	rtx constant = get_pool_constant (base);
	enum machine_mode const_mode = get_pool_mode (base);
	rtx new;

	if (CONSTANT_P (constant) && GET_CODE (constant) != CONST_INT)
	  {
	    constant_pool_entries_cost = COST (constant);
	    constant_pool_entries_regcost = approx_reg_cost (constant);
	  }

	/* If we are loading the full constant, we have an
	   equivalence.  */
	if (offset == 0 && mode == const_mode)
	  return constant;

	/* If this actually isn't a constant (weird!), we can't do
	   anything.  Otherwise, handle the two most common cases:
	   extracting a word from a multi-word constant, and
	   extracting the low-order bits.  Other cases don't seem
	   common enough to worry about.  */
	if (! CONSTANT_P (constant))
	  return x;

	if (GET_MODE_CLASS (mode) == MODE_INT
	    && GET_MODE_SIZE (mode) == UNITS_PER_WORD
	    && offset % UNITS_PER_WORD == 0
	    && (new = operand_subword (constant,
				       offset / UNITS_PER_WORD,
				       0, const_mode)) != 0)
	  return new;

	if (((BYTES_BIG_ENDIAN
	      && offset == GET_MODE_SIZE (GET_MODE (constant)) - 1)
	     || (! BYTES_BIG_ENDIAN && offset == 0))
	    && (new = gen_lowpart (mode, constant)) != 0)
	  return new;
      }

    /* If this is a reference to a label at a known position in a jump
       table, we also know its value.  */
    if (base && GET_CODE (base) == LABEL_REF)
      {
	rtx label = XEXP (base, 0);
	rtx table_insn = NEXT_INSN (label);

	if (table_insn && JUMP_P (table_insn)
	    && GET_CODE (PATTERN (table_insn)) == ADDR_VEC)
	  {
	    rtx table = PATTERN (table_insn);

	    if (offset >= 0
		&& (offset / GET_MODE_SIZE (GET_MODE (table))
		    < XVECLEN (table, 0)))
	      {
		rtx label = XVECEXP
		  (table, 0, offset / GET_MODE_SIZE (GET_MODE (table)));
		rtx set;

		/* If we have an insn that loads the label from the
		   jumptable into a reg, we don't want to set the reg
		   to the label, because this may cause a reference to
		   the label to remain after the label is removed in
		   some very obscure cases (PR middle-end/18628).  */
		if (!insn)
		  return label;

		set = single_set (insn);

		if (! set || SET_SRC (set) != x)
		  return x;

		/* If it's a jump, it's safe to reference the label.  */
		if (SET_DEST (set) == pc_rtx)
		  return label;

		return x;
	      }
	  }
	if (table_insn && JUMP_P (table_insn)
	    && GET_CODE (PATTERN (table_insn)) == ADDR_DIFF_VEC)
	  {
	    rtx table = PATTERN (table_insn);

	    if (offset >= 0
		&& (offset / GET_MODE_SIZE (GET_MODE (table))
		    < XVECLEN (table, 1)))
	      {
		offset /= GET_MODE_SIZE (GET_MODE (table));
		new = gen_rtx_MINUS (Pmode, XVECEXP (table, 1, offset),
				     XEXP (table, 0));

		if (GET_MODE (table) != Pmode)
		  new = gen_rtx_TRUNCATE (GET_MODE (table), new);

		/* Indicate this is a constant.  This isn't a valid
		   form of CONST, but it will only be used to fold the
		   next insns and then discarded, so it should be
		   safe.

		   Note this expression must be explicitly discarded,
		   by cse_insn, else it may end up in a REG_EQUAL note
		   and "escape" to cause problems elsewhere.  */
		return gen_rtx_CONST (GET_MODE (new), new);
	      }
	  }
      }

    return x;
  }
}

/* Fold MEM.  */

static rtx
fold_rtx_mem (rtx x, rtx insn)
{
  /* To avoid infinite oscillations between fold_rtx and fold_rtx_mem,
     refuse to allow recursion of the latter past n levels.  This can
     happen because fold_rtx_mem will try to fold the address of the
     memory reference it is passed, i.e. conceptually throwing away
     the MEM and reinjecting the bare address into fold_rtx.  As a
     result, patterns like

       set (reg1)
	   (plus (reg)
		 (mem (plus (reg2) (const_int))))

       set (reg2)
	   (plus (reg)
		 (mem (plus (reg1) (const_int))))

     will defeat any "first-order" short-circuit put in either
     function to prevent these infinite oscillations.

     The heuristics for determining n is as follows: since each time
     it is invoked fold_rtx_mem throws away a MEM, and since MEMs
     are generically not nested, we assume that each invocation of
     fold_rtx_mem corresponds to a new "top-level" operand, i.e.
     the source or the destination of a SET.  So fold_rtx_mem is
     bound to stop or cycle before n recursions, n being the number
     of expressions recorded in the hash table.  We also leave some
     play to account for the initial steps.  */

  static unsigned int depth;
  rtx ret;

  if (depth > 3 + table_size)
    return x;

  depth++;
  ret = fold_rtx_mem_1 (x, insn);
  depth--;

  return ret;
}

/* If X is a nontrivial arithmetic operation on an argument
   for which a constant value can be determined, return
   the result of operating on that value, as a constant.
   Otherwise, return X, possibly with one or more operands
   modified by recursive calls to this function.

   If X is a register whose contents are known, we do NOT
   return those contents here.  equiv_constant is called to
   perform that task.

   INSN is the insn that we may be modifying.  If it is 0, make a copy
   of X before modifying it.  */

static rtx
fold_rtx (rtx x, rtx insn)
{
  enum rtx_code code;
  enum machine_mode mode;
  const char *fmt;
  int i;
  rtx new = 0;
  int copied = 0;
  int must_swap = 0;

  /* Folded equivalents of first two operands of X.  */
  rtx folded_arg0;
  rtx folded_arg1;

  /* Constant equivalents of first three operands of X;
     0 when no such equivalent is known.  */
  rtx const_arg0;
  rtx const_arg1;
  rtx const_arg2;

  /* The mode of the first operand of X.  We need this for sign and zero
     extends.  */
  enum machine_mode mode_arg0;

  if (x == 0)
    return x;

  mode = GET_MODE (x);
  code = GET_CODE (x);
  switch (code)
    {
    case CONST:
    case CONST_INT:
    case CONST_DOUBLE:
    case CONST_VECTOR:
    case SYMBOL_REF:
    case LABEL_REF:
    case REG:
    case PC:
      /* No use simplifying an EXPR_LIST
	 since they are used only for lists of args
	 in a function call's REG_EQUAL note.  */
    case EXPR_LIST:
      return x;

#ifdef HAVE_cc0
    case CC0:
      return prev_insn_cc0;
#endif

    case SUBREG:
      return fold_rtx_subreg (x, insn);

    case NOT:
    case NEG:
      /* If we have (NOT Y), see if Y is known to be (NOT Z).
	 If so, (NOT Y) simplifies to Z.  Similarly for NEG.  */
      new = lookup_as_function (XEXP (x, 0), code);
      if (new)
	return fold_rtx (copy_rtx (XEXP (new, 0)), insn);
      break;

    case MEM:
      return fold_rtx_mem (x, insn);

#ifdef NO_FUNCTION_CSE
    case CALL:
      if (CONSTANT_P (XEXP (XEXP (x, 0), 0)))
	return x;
      break;
#endif

    case ASM_OPERANDS:
      if (insn)
	{
	  for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
	    validate_change (insn, &ASM_OPERANDS_INPUT (x, i),
			     fold_rtx (ASM_OPERANDS_INPUT (x, i), insn), 0);
	}
      break;

    default:
      break;
    }

  const_arg0 = 0;
  const_arg1 = 0;
  const_arg2 = 0;
  mode_arg0 = VOIDmode;

  /* Try folding our operands.
     Then see which ones have constant values known.  */

  fmt = GET_RTX_FORMAT (code);
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
    if (fmt[i] == 'e')
      {
	rtx arg = XEXP (x, i);
	rtx folded_arg = arg, const_arg = 0;
	enum machine_mode mode_arg = GET_MODE (arg);
	rtx cheap_arg, expensive_arg;
	rtx replacements[2];
	int j;
	int old_cost = COST_IN (XEXP (x, i), code);

	/* Most arguments are cheap, so handle them specially.  */
	switch (GET_CODE (arg))
	  {
	  case REG:
	    /* This is the same as calling equiv_constant; it is duplicated
	       here for speed.  */
	    if (REGNO_QTY_VALID_P (REGNO (arg)))
	      {
		int arg_q = REG_QTY (REGNO (arg));
		struct qty_table_elem *arg_ent = &qty_table[arg_q];

		if (arg_ent->const_rtx != NULL_RTX
		    && !REG_P (arg_ent->const_rtx)
		    && GET_CODE (arg_ent->const_rtx) != PLUS)
		  const_arg
		    = gen_lowpart (GET_MODE (arg),
					       arg_ent->const_rtx);
	      }
	    break;

	  case CONST:
	  case CONST_INT:
	  case SYMBOL_REF:
	  case LABEL_REF:
	  case CONST_DOUBLE:
	  case CONST_VECTOR:
	    const_arg = arg;
	    break;

#ifdef HAVE_cc0
	  case CC0:
	    folded_arg = prev_insn_cc0;
	    mode_arg = prev_insn_cc0_mode;
	    const_arg = equiv_constant (folded_arg);
	    break;
#endif

	  default:
	    folded_arg = fold_rtx (arg, insn);
	    const_arg = equiv_constant (folded_arg);
	  }

	/* For the first three operands, see if the operand
	   is constant or equivalent to a constant.  */
	switch (i)
	  {
	  case 0:
	    folded_arg0 = folded_arg;
	    const_arg0 = const_arg;
	    mode_arg0 = mode_arg;
	    break;
	  case 1:
	    folded_arg1 = folded_arg;
	    const_arg1 = const_arg;
	    break;
	  case 2:
	    const_arg2 = const_arg;
	    break;
	  }

	/* Pick the least expensive of the folded argument and an
	   equivalent constant argument.  */
	if (const_arg == 0 || const_arg == folded_arg
	    || COST_IN (const_arg, code) > COST_IN (folded_arg, code))
	  cheap_arg = folded_arg, expensive_arg = const_arg;
	else
	  cheap_arg = const_arg, expensive_arg = folded_arg;

	/* Try to replace the operand with the cheapest of the two
	   possibilities.  If it doesn't work and this is either of the first
	   two operands of a commutative operation, try swapping them.
	   If THAT fails, try the more expensive, provided it is cheaper
	   than what is already there.  */

	if (cheap_arg == XEXP (x, i))
	  continue;

	if (insn == 0 && ! copied)
	  {
	    x = copy_rtx (x);
	    copied = 1;
	  }

	/* Order the replacements from cheapest to most expensive.  */
	replacements[0] = cheap_arg;
	replacements[1] = expensive_arg;

	for (j = 0; j < 2 && replacements[j]; j++)
	  {
	    int new_cost = COST_IN (replacements[j], code);

	    /* Stop if what existed before was cheaper.  Prefer constants
	       in the case of a tie.  */
	    if (new_cost > old_cost
		|| (new_cost == old_cost && CONSTANT_P (XEXP (x, i))))
	      break;

	    /* It's not safe to substitute the operand of a conversion
	       operator with a constant, as the conversion's identity
	       depends upon the mode of its operand.  This optimization
	       is handled by the call to simplify_unary_operation.  */
	    if (GET_RTX_CLASS (code) == RTX_UNARY
		&& GET_MODE (replacements[j]) != mode_arg0
		&& (code == ZERO_EXTEND
		    || code == SIGN_EXTEND
		    || code == TRUNCATE
		    || code == FLOAT_TRUNCATE
		    || code == FLOAT_EXTEND
		    || code == FLOAT
		    || code == FIX
		    || code == UNSIGNED_FLOAT
		    || code == UNSIGNED_FIX))
	      continue;

	    if (validate_change (insn, &XEXP (x, i), replacements[j], 0))
	      break;

	    if (GET_RTX_CLASS (code) == RTX_COMM_COMPARE
		|| GET_RTX_CLASS (code) == RTX_COMM_ARITH)
	      {
		validate_change (insn, &XEXP (x, i), XEXP (x, 1 - i), 1);
		validate_change (insn, &XEXP (x, 1 - i), replacements[j], 1);

		if (apply_change_group ())
		  {
		    /* Swap them back to be invalid so that this loop can
		       continue and flag them to be swapped back later.  */
		    rtx tem;

		    tem = XEXP (x, 0); XEXP (x, 0) = XEXP (x, 1);
				       XEXP (x, 1) = tem;
		    must_swap = 1;
		    break;
		  }
	      }
	  }
      }

    else
      {
	if (fmt[i] == 'E')
	  /* Don't try to fold inside of a vector of expressions.
	     Doing nothing is harmless.  */
	  {;}
      }

  /* If a commutative operation, place a constant integer as the second
     operand unless the first operand is also a constant integer.  Otherwise,
     place any constant second unless the first operand is also a constant.  */

  if (COMMUTATIVE_P (x))
    {
      if (must_swap
	  || swap_commutative_operands_p (const_arg0 ? const_arg0
						     : XEXP (x, 0),
					  const_arg1 ? const_arg1
						     : XEXP (x, 1)))
	{
	  rtx tem = XEXP (x, 0);

	  if (insn == 0 && ! copied)
	    {
	      x = copy_rtx (x);
	      copied = 1;
	    }

	  validate_change (insn, &XEXP (x, 0), XEXP (x, 1), 1);
	  validate_change (insn, &XEXP (x, 1), tem, 1);
	  if (apply_change_group ())
	    {
	      tem = const_arg0, const_arg0 = const_arg1, const_arg1 = tem;
	      tem = folded_arg0, folded_arg0 = folded_arg1, folded_arg1 = tem;
	    }
	}
    }

  /* If X is an arithmetic operation, see if we can simplify it.  */

  switch (GET_RTX_CLASS (code))
    {
    case RTX_UNARY:
      {
	int is_const = 0;

	/* We can't simplify extension ops unless we know the
	   original mode.  */
	if ((code == ZERO_EXTEND || code == SIGN_EXTEND)
	    && mode_arg0 == VOIDmode)
	  break;

	/* If we had a CONST, strip it off and put it back later if we
	   fold.  */
	if (const_arg0 != 0 && GET_CODE (const_arg0) == CONST)
	  is_const = 1, const_arg0 = XEXP (const_arg0, 0);

	new = simplify_unary_operation (code, mode,
					const_arg0 ? const_arg0 : folded_arg0,
					mode_arg0);
	/* NEG of PLUS could be converted into MINUS, but that causes
	   expressions of the form
	   (CONST (MINUS (CONST_INT) (SYMBOL_REF)))
	   which many ports mistakenly treat as LEGITIMATE_CONSTANT_P.
	   FIXME: those ports should be fixed.  */
	if (new != 0 && is_const
	    && GET_CODE (new) == PLUS
	    && (GET_CODE (XEXP (new, 0)) == SYMBOL_REF
		|| GET_CODE (XEXP (new, 0)) == LABEL_REF)
	    && GET_CODE (XEXP (new, 1)) == CONST_INT)
	  new = gen_rtx_CONST (mode, new);
      }
      break;

    case RTX_COMPARE:
    case RTX_COMM_COMPARE:
      /* See what items are actually being compared and set FOLDED_ARG[01]
	 to those values and CODE to the actual comparison code.  If any are
	 constant, set CONST_ARG0 and CONST_ARG1 appropriately.  We needn't
	 do anything if both operands are already known to be constant.  */

      /* ??? Vector mode comparisons are not supported yet.  */
      if (VECTOR_MODE_P (mode))
	break;

      if (const_arg0 == 0 || const_arg1 == 0)
	{
	  struct table_elt *p0, *p1;
	  rtx true_rtx = const_true_rtx, false_rtx = const0_rtx;
	  enum machine_mode mode_arg1;

#ifdef FLOAT_STORE_FLAG_VALUE
	  if (SCALAR_FLOAT_MODE_P (mode))
	    {
	      true_rtx = (CONST_DOUBLE_FROM_REAL_VALUE
			  (FLOAT_STORE_FLAG_VALUE (mode), mode));
	      false_rtx = CONST0_RTX (mode);
	    }
#endif

	  code = find_comparison_args (code, &folded_arg0, &folded_arg1,
				       &mode_arg0, &mode_arg1);

	  /* If the mode is VOIDmode or a MODE_CC mode, we don't know
	     what kinds of things are being compared, so we can't do
	     anything with this comparison.  */

	  if (mode_arg0 == VOIDmode || GET_MODE_CLASS (mode_arg0) == MODE_CC)
	    break;

	  const_arg0 = equiv_constant (folded_arg0);
	  const_arg1 = equiv_constant (folded_arg1);

	  /* If we do not now have two constants being compared, see
	     if we can nevertheless deduce some things about the
	     comparison.  */
	  if (const_arg0 == 0 || const_arg1 == 0)
	    {
	      if (const_arg1 != NULL)
		{
		  rtx cheapest_simplification;
		  int cheapest_cost;
		  rtx simp_result;
		  struct table_elt *p;

		  /* See if we can find an equivalent of folded_arg0
		     that gets us a cheaper expression, possibly a
		     constant through simplifications.  */
		  p = lookup (folded_arg0, SAFE_HASH (folded_arg0, mode_arg0),
			      mode_arg0);
		  
		  if (p != NULL)
		    {
		      cheapest_simplification = x;
		      cheapest_cost = COST (x);

		      for (p = p->first_same_value; p != NULL; p = p->next_same_value)
			{
			  int cost;

			  /* If the entry isn't valid, skip it.  */
			  if (! exp_equiv_p (p->exp, p->exp, 1, false))
			    continue;

			  /* Try to simplify using this equivalence.  */
			  simp_result
			    = simplify_relational_operation (code, mode,
							     mode_arg0,
							     p->exp,
							     const_arg1);

			  if (simp_result == NULL)
			    continue;

			  cost = COST (simp_result);
			  if (cost < cheapest_cost)
			    {
			      cheapest_cost = cost;
			      cheapest_simplification = simp_result;
			    }
			}

		      /* If we have a cheaper expression now, use that
			 and try folding it further, from the top.  */
		      if (cheapest_simplification != x)
			return fold_rtx (cheapest_simplification, insn);
		    }
		}

	      /* Some addresses are known to be nonzero.  We don't know
		 their sign, but equality comparisons are known.  */
	      if (const_arg1 == const0_rtx
		  && nonzero_address_p (folded_arg0))
		{
		  if (code == EQ)
		    return false_rtx;
		  else if (code == NE)
		    return true_rtx;
		}

	      /* See if the two operands are the same.  */

	      if (folded_arg0 == folded_arg1
		  || (REG_P (folded_arg0)
		      && REG_P (folded_arg1)
		      && (REG_QTY (REGNO (folded_arg0))
			  == REG_QTY (REGNO (folded_arg1))))
		  || ((p0 = lookup (folded_arg0,
				    SAFE_HASH (folded_arg0, mode_arg0),
				    mode_arg0))
		      && (p1 = lookup (folded_arg1,
				       SAFE_HASH (folded_arg1, mode_arg0),
				       mode_arg0))
		      && p0->first_same_value == p1->first_same_value))
		{
		  /* Sadly two equal NaNs are not equivalent.  */
		  if (!HONOR_NANS (mode_arg0))
		    return ((code == EQ || code == LE || code == GE
			     || code == LEU || code == GEU || code == UNEQ
			     || code == UNLE || code == UNGE
			     || code == ORDERED)
			    ? true_rtx : false_rtx);
		  /* Take care for the FP compares we can resolve.  */
		  if (code == UNEQ || code == UNLE || code == UNGE)
		    return true_rtx;
		  if (code == LTGT || code == LT || code == GT)
		    return false_rtx;
		}

	      /* If FOLDED_ARG0 is a register, see if the comparison we are
		 doing now is either the same as we did before or the reverse
		 (we only check the reverse if not floating-point).  */
	      else if (REG_P (folded_arg0))
		{
		  int qty = REG_QTY (REGNO (folded_arg0));

		  if (REGNO_QTY_VALID_P (REGNO (folded_arg0)))
		    {
		      struct qty_table_elem *ent = &qty_table[qty];

		      if ((comparison_dominates_p (ent->comparison_code, code)
			   || (! FLOAT_MODE_P (mode_arg0)
			       && comparison_dominates_p (ent->comparison_code,
						          reverse_condition (code))))
			  && (rtx_equal_p (ent->comparison_const, folded_arg1)
			      || (const_arg1
				  && rtx_equal_p (ent->comparison_const,
						  const_arg1))
			      || (REG_P (folded_arg1)
				  && (REG_QTY (REGNO (folded_arg1)) == ent->comparison_qty))))
			return (comparison_dominates_p (ent->comparison_code, code)
				? true_rtx : false_rtx);
		    }
		}
	    }
	}

      /* If we are comparing against zero, see if the first operand is
	 equivalent to an IOR with a constant.  If so, we may be able to
	 determine the result of this comparison.  */

      if (const_arg1 == const0_rtx)
	{
	  rtx y = lookup_as_function (folded_arg0, IOR);
	  rtx inner_const;

	  if (y != 0
	      && (inner_const = equiv_constant (XEXP (y, 1))) != 0
	      && GET_CODE (inner_const) == CONST_INT
	      && INTVAL (inner_const) != 0)
	    {
	      int sign_bitnum = GET_MODE_BITSIZE (mode_arg0) - 1;
	      int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
			      && (INTVAL (inner_const)
				  & ((HOST_WIDE_INT) 1 << sign_bitnum)));
	      rtx true_rtx = const_true_rtx, false_rtx = const0_rtx;

#ifdef FLOAT_STORE_FLAG_VALUE
	      if (SCALAR_FLOAT_MODE_P (mode))
		{
		  true_rtx = (CONST_DOUBLE_FROM_REAL_VALUE
			  (FLOAT_STORE_FLAG_VALUE (mode), mode));
		  false_rtx = CONST0_RTX (mode);
		}
#endif

	      switch (code)
		{
		case EQ:
		  return false_rtx;
		case NE:
		  return true_rtx;
		case LT:  case LE:
		  if (has_sign)
		    return true_rtx;
		  break;
		case GT:  case GE:
		  if (has_sign)
		    return false_rtx;
		  break;
		default:
		  break;
		}
	    }
	}

      {
	rtx op0 = const_arg0 ? const_arg0 : folded_arg0;
	rtx op1 = const_arg1 ? const_arg1 : folded_arg1;
        new = simplify_relational_operation (code, mode, mode_arg0, op0, op1);
      }
      break;

    case RTX_BIN_ARITH:
    case RTX_COMM_ARITH:
      switch (code)
	{
	case PLUS:
	  /* If the second operand is a LABEL_REF, see if the first is a MINUS
	     with that LABEL_REF as its second operand.  If so, the result is
	     the first operand of that MINUS.  This handles switches with an
	     ADDR_DIFF_VEC table.  */
	  if (const_arg1 && GET_CODE (const_arg1) == LABEL_REF)
	    {
	      rtx y
		= GET_CODE (folded_arg0) == MINUS ? folded_arg0
		: lookup_as_function (folded_arg0, MINUS);

	      if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
		  && XEXP (XEXP (y, 1), 0) == XEXP (const_arg1, 0))
		return XEXP (y, 0);

	      /* Now try for a CONST of a MINUS like the above.  */
	      if ((y = (GET_CODE (folded_arg0) == CONST ? folded_arg0
			: lookup_as_function (folded_arg0, CONST))) != 0
		  && GET_CODE (XEXP (y, 0)) == MINUS
		  && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF
		  && XEXP (XEXP (XEXP (y, 0), 1), 0) == XEXP (const_arg1, 0))
		return XEXP (XEXP (y, 0), 0);
	    }

	  /* Likewise if the operands are in the other order.  */
	  if (const_arg0 && GET_CODE (const_arg0) == LABEL_REF)
	    {
	      rtx y
		= GET_CODE (folded_arg1) == MINUS ? folded_arg1
		: lookup_as_function (folded_arg1, MINUS);

	      if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
		  && XEXP (XEXP (y, 1), 0) == XEXP (const_arg0, 0))
		return XEXP (y, 0);

	      /* Now try for a CONST of a MINUS like the above.  */
	      if ((y = (GET_CODE (folded_arg1) == CONST ? folded_arg1
			: lookup_as_function (folded_arg1, CONST))) != 0
		  && GET_CODE (XEXP (y, 0)) == MINUS
		  && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF
		  && XEXP (XEXP (XEXP (y, 0), 1), 0) == XEXP (const_arg0, 0))
		return XEXP (XEXP (y, 0), 0);
	    }

	  /* If second operand is a register equivalent to a negative
	     CONST_INT, see if we can find a register equivalent to the
	     positive constant.  Make a MINUS if so.  Don't do this for
	     a non-negative constant since we might then alternate between
	     choosing positive and negative constants.  Having the positive
	     constant previously-used is the more common case.  Be sure
	     the resulting constant is non-negative; if const_arg1 were
	     the smallest negative number this would overflow: depending
	     on the mode, this would either just be the same value (and
	     hence not save anything) or be incorrect.  */
	  if (const_arg1 != 0 && GET_CODE (const_arg1) == CONST_INT
	      && INTVAL (const_arg1) < 0
	      /* This used to test

	         -INTVAL (const_arg1) >= 0

		 But The Sun V5.0 compilers mis-compiled that test.  So
		 instead we test for the problematic value in a more direct
		 manner and hope the Sun compilers get it correct.  */
	      && INTVAL (const_arg1) !=
	        ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1))
	      && REG_P (folded_arg1))
	    {
	      rtx new_const = GEN_INT (-INTVAL (const_arg1));
	      struct table_elt *p
		= lookup (new_const, SAFE_HASH (new_const, mode), mode);

	      if (p)
		for (p = p->first_same_value; p; p = p->next_same_value)
		  if (REG_P (p->exp))
		    return simplify_gen_binary (MINUS, mode, folded_arg0,
						canon_reg (p->exp, NULL_RTX));
	    }
	  goto from_plus;

	case MINUS:
	  /* If we have (MINUS Y C), see if Y is known to be (PLUS Z C2).
	     If so, produce (PLUS Z C2-C).  */
	  if (const_arg1 != 0 && GET_CODE (const_arg1) == CONST_INT)
	    {
	      rtx y = lookup_as_function (XEXP (x, 0), PLUS);
	      if (y && GET_CODE (XEXP (y, 1)) == CONST_INT)
		return fold_rtx (plus_constant (copy_rtx (y),
						-INTVAL (const_arg1)),
				 NULL_RTX);
	    }

	  /* Fall through.  */

	from_plus:
	case SMIN:    case SMAX:      case UMIN:    case UMAX:
	case IOR:     case AND:       case XOR:
	case MULT:
	case ASHIFT:  case LSHIFTRT:  case ASHIFTRT:
	  /* If we have (<op> <reg> <const_int>) for an associative OP and REG
	     is known to be of similar form, we may be able to replace the
	     operation with a combined operation.  This may eliminate the
	     intermediate operation if every use is simplified in this way.
	     Note that the similar optimization done by combine.c only works
	     if the intermediate operation's result has only one reference.  */

	  if (REG_P (folded_arg0)
	      && const_arg1 && GET_CODE (const_arg1) == CONST_INT)
	    {
	      int is_shift
		= (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT);
	      rtx y, inner_const, new_const;
	      enum rtx_code associate_code;

	      if (is_shift
		  && (INTVAL (const_arg1) >= GET_MODE_BITSIZE (mode)
		      || INTVAL (const_arg1) < 0))
		{
		  if (SHIFT_COUNT_TRUNCATED)
		    const_arg1 = GEN_INT (INTVAL (const_arg1)
					  & (GET_MODE_BITSIZE (mode) - 1));
		  else
		    break;
		}

	      y = lookup_as_function (folded_arg0, code);
	      if (y == 0)
		break;

	      /* If we have compiled a statement like
		 "if (x == (x & mask1))", and now are looking at
		 "x & mask2", we will have a case where the first operand
		 of Y is the same as our first operand.  Unless we detect
		 this case, an infinite loop will result.  */
	      if (XEXP (y, 0) == folded_arg0)
		break;

	      inner_const = equiv_constant (fold_rtx (XEXP (y, 1), 0));
	      if (!inner_const || GET_CODE (inner_const) != CONST_INT)
		break;

	      /* Don't associate these operations if they are a PLUS with the
		 same constant and it is a power of two.  These might be doable
		 with a pre- or post-increment.  Similarly for two subtracts of
		 identical powers of two with post decrement.  */

	      if (code == PLUS && const_arg1 == inner_const
		  && ((HAVE_PRE_INCREMENT
			  && exact_log2 (INTVAL (const_arg1)) >= 0)
		      || (HAVE_POST_INCREMENT
			  && exact_log2 (INTVAL (const_arg1)) >= 0)
		      || (HAVE_PRE_DECREMENT
			  && exact_log2 (- INTVAL (const_arg1)) >= 0)
		      || (HAVE_POST_DECREMENT
			  && exact_log2 (- INTVAL (const_arg1)) >= 0)))
		break;

	      if (is_shift
		  && (INTVAL (inner_const) >= GET_MODE_BITSIZE (mode)
		      || INTVAL (inner_const) < 0))
		{
		  if (SHIFT_COUNT_TRUNCATED)
		    inner_const = GEN_INT (INTVAL (inner_const)
					   & (GET_MODE_BITSIZE (mode) - 1));
		  else
		    break;
		}

	      /* Compute the code used to compose the constants.  For example,
		 A-C1-C2 is A-(C1 + C2), so if CODE == MINUS, we want PLUS.  */

	      associate_code = (is_shift || code == MINUS ? PLUS : code);

	      new_const = simplify_binary_operation (associate_code, mode,
						     const_arg1, inner_const);

	      if (new_const == 0)
		break;

	      /* If we are associating shift operations, don't let this
		 produce a shift of the size of the object or larger.
		 This could occur when we follow a sign-extend by a right
		 shift on a machine that does a sign-extend as a pair
		 of shifts.  */

	      if (is_shift
		  && GET_CODE (new_const) == CONST_INT
		  && INTVAL (new_const) >= GET_MODE_BITSIZE (mode))
		{
		  /* As an exception, we can turn an ASHIFTRT of this
		     form into a shift of the number of bits - 1.  */
		  if (code == ASHIFTRT)
		    new_const = GEN_INT (GET_MODE_BITSIZE (mode) - 1);
		  else if (!side_effects_p (XEXP (y, 0)))
		    return CONST0_RTX (mode);
		  else
		    break;
		}

	      y = copy_rtx (XEXP (y, 0));

	      /* If Y contains our first operand (the most common way this
		 can happen is if Y is a MEM), we would do into an infinite
		 loop if we tried to fold it.  So don't in that case.  */

	      if (! reg_mentioned_p (folded_arg0, y))
		y = fold_rtx (y, insn);

	      return simplify_gen_binary (code, mode, y, new_const);
	    }
	  break;

	case DIV:       case UDIV:
	  /* ??? The associative optimization performed immediately above is
	     also possible for DIV and UDIV using associate_code of MULT.
	     However, we would need extra code to verify that the
	     multiplication does not overflow, that is, there is no overflow
	     in the calculation of new_const.  */
	  break;

	default:
	  break;
	}

      new = simplify_binary_operation (code, mode,
				       const_arg0 ? const_arg0 : folded_arg0,
				       const_arg1 ? const_arg1 : folded_arg1);
      break;

    case RTX_OBJ:
      /* (lo_sum (high X) X) is simply X.  */
      if (code == LO_SUM && const_arg0 != 0
	  && GET_CODE (const_arg0) == HIGH
	  && rtx_equal_p (XEXP (const_arg0, 0), const_arg1))
	return const_arg1;
      break;

    case RTX_TERNARY:
    case RTX_BITFIELD_OPS:
      new = simplify_ternary_operation (code, mode, mode_arg0,
					const_arg0 ? const_arg0 : folded_arg0,
					const_arg1 ? const_arg1 : folded_arg1,
					const_arg2 ? const_arg2 : XEXP (x, 2));
      break;

    default:
      break;
    }

  return new ? new : x;
}

/* Return a constant value currently equivalent to X.
   Return 0 if we don't know one.  */

static rtx
equiv_constant (rtx x)
{
  if (REG_P (x)
      && REGNO_QTY_VALID_P (REGNO (x)))
    {
      int x_q = REG_QTY (REGNO (x));
      struct qty_table_elem *x_ent = &qty_table[x_q];

      if (x_ent->const_rtx)
	x = gen_lowpart (GET_MODE (x), x_ent->const_rtx);
    }

  if (x == 0 || CONSTANT_P (x))
    return x;

  /* If X is a MEM, try to fold it outside the context of any insn to see if
     it might be equivalent to a constant.  That handles the case where it
     is a constant-pool reference.  Then try to look it up in the hash table
     in case it is something whose value we have seen before.  */

  if (MEM_P (x))
    {
      struct table_elt *elt;

      x = fold_rtx (x, NULL_RTX);
      if (CONSTANT_P (x))
	return x;

      elt = lookup (x, SAFE_HASH (x, GET_MODE (x)), GET_MODE (x));
      if (elt == 0)
	return 0;

      for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
	if (elt->is_const && CONSTANT_P (elt->exp))
	  return elt->exp;
    }

  return 0;
}

/* Given INSN, a jump insn, PATH_TAKEN indicates if we are following the "taken"
   branch.  It will be zero if not.

   In certain cases, this can cause us to add an equivalence.  For example,
   if we are following the taken case of
	if (i == 2)
   we can add the fact that `i' and '2' are now equivalent.

   In any case, we can record that this comparison was passed.  If the same
   comparison is seen later, we will know its value.  */

static void
record_jump_equiv (rtx insn, int taken)
{
  int cond_known_true;
  rtx op0, op1;
  rtx set;
  enum machine_mode mode, mode0, mode1;
  int reversed_nonequality = 0;
  enum rtx_code code;

  /* Ensure this is the right kind of insn.  */
  if (! any_condjump_p (insn))
    return;
  set = pc_set (insn);

  /* See if this jump condition is known true or false.  */
  if (taken)
    cond_known_true = (XEXP (SET_SRC (set), 2) == pc_rtx);
  else
    cond_known_true = (XEXP (SET_SRC (set), 1) == pc_rtx);

  /* Get the type of comparison being done and the operands being compared.
     If we had to reverse a non-equality condition, record that fact so we
     know that it isn't valid for floating-point.  */
  code = GET_CODE (XEXP (SET_SRC (set), 0));
  op0 = fold_rtx (XEXP (XEXP (SET_SRC (set), 0), 0), insn);
  op1 = fold_rtx (XEXP (XEXP (SET_SRC (set), 0), 1), insn);

  code = find_comparison_args (code, &op0, &op1, &mode0, &mode1);

  /* If the mode is a MODE_CC mode, we don't know what kinds of things
     are being compared, so we can't do anything with this
     comparison.  */

  if (GET_MODE_CLASS (mode0) == MODE_CC)
    return;

  if (! cond_known_true)
    {
      code = reversed_comparison_code_parts (code, op0, op1, insn);

      /* Don't remember if we can't find the inverse.  */
      if (code == UNKNOWN)
	return;
    }

  /* The mode is the mode of the non-constant.  */
  mode = mode0;
  if (mode1 != VOIDmode)
    mode = mode1;

  record_jump_cond (code, mode, op0, op1, reversed_nonequality);
}

/* Yet another form of subreg creation.  In this case, we want something in
   MODE, and we should assume OP has MODE iff it is naturally modeless.  */

static rtx
record_jump_cond_subreg (enum machine_mode mode, rtx op)
{
  enum machine_mode op_mode = GET_MODE (op);
  if (op_mode == mode || op_mode == VOIDmode)
    return op;
  return lowpart_subreg (mode, op, op_mode);
}

/* We know that comparison CODE applied to OP0 and OP1 in MODE is true.
   REVERSED_NONEQUALITY is nonzero if CODE had to be swapped.
   Make any useful entries we can with that information.  Called from
   above function and called recursively.  */

static void
record_jump_cond (enum rtx_code code, enum machine_mode mode, rtx op0,
		  rtx op1, int reversed_nonequality)
{
  unsigned op0_hash, op1_hash;
  int op0_in_memory, op1_in_memory;
  struct table_elt *op0_elt, *op1_elt;

  /* If OP0 and OP1 are known equal, and either is a paradoxical SUBREG,
     we know that they are also equal in the smaller mode (this is also
     true for all smaller modes whether or not there is a SUBREG, but
     is not worth testing for with no SUBREG).  */

  /* Note that GET_MODE (op0) may not equal MODE.  */
  if (code == EQ && GET_CODE (op0) == SUBREG
      && (GET_MODE_SIZE (GET_MODE (op0))
	  > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
    {
      enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
      rtx tem = record_jump_cond_subreg (inner_mode, op1);
      if (tem)
	record_jump_cond (code, mode, SUBREG_REG (op0), tem,
			  reversed_nonequality);
    }

  if (code == EQ && GET_CODE (op1) == SUBREG
      && (GET_MODE_SIZE (GET_MODE (op1))
	  > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
    {
      enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
      rtx tem = record_jump_cond_subreg (inner_mode, op0);
      if (tem)
	record_jump_cond (code, mode, SUBREG_REG (op1), tem,
			  reversed_nonequality);
    }

  /* Similarly, if this is an NE comparison, and either is a SUBREG
     making a smaller mode, we know the whole thing is also NE.  */

  /* Note that GET_MODE (op0) may not equal MODE;
     if we test MODE instead, we can get an infinite recursion
     alternating between two modes each wider than MODE.  */

  if (code == NE && GET_CODE (op0) == SUBREG
      && subreg_lowpart_p (op0)
      && (GET_MODE_SIZE (GET_MODE (op0))
	  < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
    {
      enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
      rtx tem = record_jump_cond_subreg (inner_mode, op1);
      if (tem)
	record_jump_cond (code, mode, SUBREG_REG (op0), tem,
			  reversed_nonequality);
    }

  if (code == NE && GET_CODE (op1) == SUBREG
      && subreg_lowpart_p (op1)
      && (GET_MODE_SIZE (GET_MODE (op1))
	  < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
    {
      enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
      rtx tem = record_jump_cond_subreg (inner_mode, op0);
      if (tem)
	record_jump_cond (code, mode, SUBREG_REG (op1), tem,
			  reversed_nonequality);
    }

  /* Hash both operands.  */

  do_not_record = 0;
  hash_arg_in_memory = 0;
  op0_hash = HASH (op0, mode);
  op0_in_memory = hash_arg_in_memory;

  if (do_not_record)
    return;

  do_not_record = 0;
  hash_arg_in_memory = 0;
  op1_hash = HASH (op1, mode);
  op1_in_memory = hash_arg_in_memory;

  if (do_not_record)
    return;

  /* Look up both operands.  */
  op0_elt = lookup (op0, op0_hash, mode);
  op1_elt = lookup (op1, op1_hash, mode);

  /* If both operands are already equivalent or if they are not in the
     table but are identical, do nothing.  */
  if ((op0_elt != 0 && op1_elt != 0
       && op0_elt->first_same_value == op1_elt->first_same_value)
      || op0 == op1 || rtx_equal_p (op0, op1))
    return;

  /* If we aren't setting two things equal all we can do is save this
     comparison.   Similarly if this is floating-point.  In the latter
     case, OP1 might be zero and both -0.0 and 0.0 are equal to it.
     If we record the equality, we might inadvertently delete code
     whose intent was to change -0 to +0.  */

  if (code != EQ || FLOAT_MODE_P (GET_MODE (op0)))
    {
      struct qty_table_elem *ent;
      int qty;

      /* If we reversed a floating-point comparison, if OP0 is not a
	 register, or if OP1 is neither a register or constant, we can't
	 do anything.  */

      if (!REG_P (op1))
	op1 = equiv_constant (op1);

      if ((reversed_nonequality && FLOAT_MODE_P (mode))
	  || !REG_P (op0) || op1 == 0)
	return;

      /* Put OP0 in the hash table if it isn't already.  This gives it a
	 new quantity number.  */
      if (op0_elt == 0)
	{
	  if (insert_regs (op0, NULL, 0))
	    {
	      rehash_using_reg (op0);
	      op0_hash = HASH (op0, mode);

	      /* If OP0 is contained in OP1, this changes its hash code
		 as well.  Faster to rehash than to check, except
		 for the simple case of a constant.  */
	      if (! CONSTANT_P (op1))
		op1_hash = HASH (op1,mode);
	    }

	  op0_elt = insert (op0, NULL, op0_hash, mode);
	  op0_elt->in_memory = op0_in_memory;
	}

      qty = REG_QTY (REGNO (op0));
      ent = &qty_table[qty];

      ent->comparison_code = code;
      if (REG_P (op1))
	{
	  /* Look it up again--in case op0 and op1 are the same.  */
	  op1_elt = lookup (op1, op1_hash, mode);

	  /* Put OP1 in the hash table so it gets a new quantity number.  */
	  if (op1_elt == 0)
	    {
	      if (insert_regs (op1, NULL, 0))
		{
		  rehash_using_reg (op1);
		  op1_hash = HASH (op1, mode);
		}

	      op1_elt = insert (op1, NULL, op1_hash, mode);
	      op1_elt->in_memory = op1_in_memory;
	    }

	  ent->comparison_const = NULL_RTX;
	  ent->comparison_qty = REG_QTY (REGNO (op1));
	}
      else
	{
	  ent->comparison_const = op1;
	  ent->comparison_qty = -1;
	}

      return;
    }

  /* If either side is still missing an equivalence, make it now,
     then merge the equivalences.  */

  if (op0_elt == 0)
    {
      if (insert_regs (op0, NULL, 0))
	{
	  rehash_using_reg (op0);
	  op0_hash = HASH (op0, mode);
	}

      op0_elt = insert (op0, NULL, op0_hash, mode);
      op0_elt->in_memory = op0_in_memory;
    }

  if (op1_elt == 0)
    {
      if (insert_regs (op1, NULL, 0))
	{
	  rehash_using_reg (op1);
	  op1_hash = HASH (op1, mode);
	}

      op1_elt = insert (op1, NULL, op1_hash, mode);
      op1_elt->in_memory = op1_in_memory;
    }

  merge_equiv_classes (op0_elt, op1_elt);
}

/* CSE processing for one instruction.
   First simplify sources and addresses of all assignments
   in the instruction, using previously-computed equivalents values.
   Then install the new sources and destinations in the table
   of available values.

   If LIBCALL_INSN is nonzero, don't record any equivalence made in
   the insn.  It means that INSN is inside libcall block.  In this
   case LIBCALL_INSN is the corresponding insn with REG_LIBCALL.  */

/* Data on one SET contained in the instruction.  */

struct set
{
  /* The SET rtx itself.  */
  rtx rtl;
  /* The SET_SRC of the rtx (the original value, if it is changing).  */
  rtx src;
  /* The hash-table element for the SET_SRC of the SET.  */
  struct table_elt *src_elt;
  /* Hash value for the SET_SRC.  */
  unsigned src_hash;
  /* Hash value for the SET_DEST.  */
  unsigned dest_hash;
  /* The SET_DEST, with SUBREG, etc., stripped.  */
  rtx inner_dest;
  /* Nonzero if the SET_SRC is in memory.  */
  char src_in_memory;
  /* Nonzero if the SET_SRC contains something
     whose value cannot be predicted and understood.  */
  char src_volatile;
  /* Original machine mode, in case it becomes a CONST_INT.
     The size of this field should match the size of the mode
     field of struct rtx_def (see rtl.h).  */
  ENUM_BITFIELD(machine_mode) mode : 8;
  /* A constant equivalent for SET_SRC, if any.  */
  rtx src_const;
  /* Original SET_SRC value used for libcall notes.  */
  rtx orig_src;
  /* Hash value of constant equivalent for SET_SRC.  */
  unsigned src_const_hash;
  /* Table entry for constant equivalent for SET_SRC, if any.  */
  struct table_elt *src_const_elt;
  /* Table entry for the destination address.  */
  struct table_elt *dest_addr_elt;
};

static void
cse_insn (rtx insn, rtx libcall_insn)
{
  rtx x = PATTERN (insn);
  int i;
  rtx tem;
  int n_sets = 0;

#ifdef HAVE_cc0
  /* Records what this insn does to set CC0.  */
  rtx this_insn_cc0 = 0;
  enum machine_mode this_insn_cc0_mode = VOIDmode;
#endif

  rtx src_eqv = 0;
  struct table_elt *src_eqv_elt = 0;
  int src_eqv_volatile = 0;
  int src_eqv_in_memory = 0;
  unsigned src_eqv_hash = 0;

  struct set *sets = (struct set *) 0;

  this_insn = insn;

  /* Find all the SETs and CLOBBERs in this instruction.
     Record all the SETs in the array `set' and count them.
     Also determine whether there is a CLOBBER that invalidates
     all memory references, or all references at varying addresses.  */

  if (CALL_P (insn))
    {
      for (tem = CALL_INSN_FUNCTION_USAGE (insn); tem; tem = XEXP (tem, 1))
	{
	  if (GET_CODE (XEXP (tem, 0)) == CLOBBER)
	    invalidate (SET_DEST (XEXP (tem, 0)), VOIDmode);
	  XEXP (tem, 0) = canon_reg (XEXP (tem, 0), insn);
	}
    }

  if (GET_CODE (x) == SET)
    {
      sets = alloca (sizeof (struct set));
      sets[0].rtl = x;

      /* Ignore SETs that are unconditional jumps.
	 They never need cse processing, so this does not hurt.
	 The reason is not efficiency but rather
	 so that we can test at the end for instructions
	 that have been simplified to unconditional jumps
	 and not be misled by unchanged instructions
	 that were unconditional jumps to begin with.  */
      if (SET_DEST (x) == pc_rtx
	  && GET_CODE (SET_SRC (x)) == LABEL_REF)
	;

      /* Don't count call-insns, (set (reg 0) (call ...)), as a set.
	 The hard function value register is used only once, to copy to
	 someplace else, so it isn't worth cse'ing (and on 80386 is unsafe)!
	 Ensure we invalidate the destination register.  On the 80386 no
	 other code would invalidate it since it is a fixed_reg.
	 We need not check the return of apply_change_group; see canon_reg.  */

      else if (GET_CODE (SET_SRC (x)) == CALL)
	{
	  canon_reg (SET_SRC (x), insn);
	  apply_change_group ();
	  fold_rtx (SET_SRC (x), insn);
	  invalidate (SET_DEST (x), VOIDmode);
	}
      else
	n_sets = 1;
    }
  else if (GET_CODE (x) == PARALLEL)
    {
      int lim = XVECLEN (x, 0);

      sets = alloca (lim * sizeof (struct set));

      /* Find all regs explicitly clobbered in this insn,
	 and ensure they are not replaced with any other regs
	 elsewhere in this insn.
	 When a reg that is clobbered is also used for input,
	 we should presume that that is for a reason,
	 and we should not substitute some other register
	 which is not supposed to be clobbered.
	 Therefore, this loop cannot be merged into the one below
	 because a CALL may precede a CLOBBER and refer to the
	 value clobbered.  We must not let a canonicalization do
	 anything in that case.  */
      for (i = 0; i < lim; i++)
	{
	  rtx y = XVECEXP (x, 0, i);
	  if (GET_CODE (y) == CLOBBER)
	    {
	      rtx clobbered = XEXP (y, 0);

	      if (REG_P (clobbered)
		  || GET_CODE (clobbered) == SUBREG)
		invalidate (clobbered, VOIDmode);
	      else if (GET_CODE (clobbered) == STRICT_LOW_PART
		       || GET_CODE (clobbered) == ZERO_EXTRACT)
		invalidate (XEXP (clobbered, 0), GET_MODE (clobbered));
	    }
	}

      for (i = 0; i < lim; i++)
	{
	  rtx y = XVECEXP (x, 0, i);
	  if (GET_CODE (y) == SET)
	    {
	      /* As above, we ignore unconditional jumps and call-insns and
		 ignore the result of apply_change_group.  */
	      if (GET_CODE (SET_SRC (y)) == CALL)
		{
		  canon_reg (SET_SRC (y), insn);
		  apply_change_group ();
		  fold_rtx (SET_SRC (y), insn);
		  invalidate (SET_DEST (y), VOIDmode);
		}
	      else if (SET_DEST (y) == pc_rtx
		       && GET_CODE (SET_SRC (y)) == LABEL_REF)
		;
	      else
		sets[n_sets++].rtl = y;
	    }
	  else if (GET_CODE (y) == CLOBBER)
	    {
	      /* If we clobber memory, canon the address.
		 This does nothing when a register is clobbered
		 because we have already invalidated the reg.  */
	      if (MEM_P (XEXP (y, 0)))
		canon_reg (XEXP (y, 0), NULL_RTX);
	    }
	  else if (GET_CODE (y) == USE
		   && ! (REG_P (XEXP (y, 0))
			 && REGNO (XEXP (y, 0)) < FIRST_PSEUDO_REGISTER))
	    canon_reg (y, NULL_RTX);
	  else if (GET_CODE (y) == CALL)
	    {
	      /* The result of apply_change_group can be ignored; see
		 canon_reg.  */
	      canon_reg (y, insn);
	      apply_change_group ();
	      fold_rtx (y, insn);
	    }
	}
    }
  else if (GET_CODE (x) == CLOBBER)
    {
      if (MEM_P (XEXP (x, 0)))
	canon_reg (XEXP (x, 0), NULL_RTX);
    }

  /* Canonicalize a USE of a pseudo register or memory location.  */
  else if (GET_CODE (x) == USE
	   && ! (REG_P (XEXP (x, 0))
		 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER))
    canon_reg (XEXP (x, 0), NULL_RTX);
  else if (GET_CODE (x) == CALL)
    {
      /* The result of apply_change_group can be ignored; see canon_reg.  */
      canon_reg (x, insn);
      apply_change_group ();
      fold_rtx (x, insn);
    }

  /* Store the equivalent value in SRC_EQV, if different, or if the DEST
     is a STRICT_LOW_PART.  The latter condition is necessary because SRC_EQV
     is handled specially for this case, and if it isn't set, then there will
     be no equivalence for the destination.  */
  if (n_sets == 1 && REG_NOTES (insn) != 0
      && (tem = find_reg_note (insn, REG_EQUAL, NULL_RTX)) != 0
      && (! rtx_equal_p (XEXP (tem, 0), SET_SRC (sets[0].rtl))
	  || GET_CODE (SET_DEST (sets[0].rtl)) == STRICT_LOW_PART))
    {
      src_eqv = fold_rtx (canon_reg (XEXP (tem, 0), NULL_RTX), insn);
      XEXP (tem, 0) = src_eqv;
    }

  /* Canonicalize sources and addresses of destinations.
     We do this in a separate pass to avoid problems when a MATCH_DUP is
     present in the insn pattern.  In that case, we want to ensure that
     we don't break the duplicate nature of the pattern.  So we will replace
     both operands at the same time.  Otherwise, we would fail to find an
     equivalent substitution in the loop calling validate_change below.

     We used to suppress canonicalization of DEST if it appears in SRC,
     but we don't do this any more.  */

  for (i = 0; i < n_sets; i++)
    {
      rtx dest = SET_DEST (sets[i].rtl);
      rtx src = SET_SRC (sets[i].rtl);
      rtx new = canon_reg (src, insn);

      sets[i].orig_src = src;
      validate_change (insn, &SET_SRC (sets[i].rtl), new, 1);

      if (GET_CODE (dest) == ZERO_EXTRACT)
	{
	  validate_change (insn, &XEXP (dest, 1),
			   canon_reg (XEXP (dest, 1), insn), 1);
	  validate_change (insn, &XEXP (dest, 2),
			   canon_reg (XEXP (dest, 2), insn), 1);
	}

      while (GET_CODE (dest) == SUBREG
	     || GET_CODE (dest) == ZERO_EXTRACT
	     || GET_CODE (dest) == STRICT_LOW_PART)
	dest = XEXP (dest, 0);

      if (MEM_P (dest))
	canon_reg (dest, insn);
    }

  /* Now that we have done all the replacements, we can apply the change
     group and see if they all work.  Note that this will cause some
     canonicalizations that would have worked individually not to be applied
     because some other canonicalization didn't work, but this should not
     occur often.

     The result of apply_change_group can be ignored; see canon_reg.  */

  apply_change_group ();

  /* Set sets[i].src_elt to the class each source belongs to.
     Detect assignments from or to volatile things
     and set set[i] to zero so they will be ignored
     in the rest of this function.

     Nothing in this loop changes the hash table or the register chains.  */

  for (i = 0; i < n_sets; i++)
    {
      rtx src, dest;
      rtx src_folded;
      struct table_elt *elt = 0, *p;
      enum machine_mode mode;
      rtx src_eqv_here;
      rtx src_const = 0;
      rtx src_related = 0;
      struct table_elt *src_const_elt = 0;
      int src_cost = MAX_COST;
      int src_eqv_cost = MAX_COST;
      int src_folded_cost = MAX_COST;
      int src_related_cost = MAX_COST;
      int src_elt_cost = MAX_COST;
      int src_regcost = MAX_COST;
      int src_eqv_regcost = MAX_COST;
      int src_folded_regcost = MAX_COST;
      int src_related_regcost = MAX_COST;
      int src_elt_regcost = MAX_COST;
      /* Set nonzero if we need to call force_const_mem on with the
	 contents of src_folded before using it.  */
      int src_folded_force_flag = 0;

      dest = SET_DEST (sets[i].rtl);
      src = SET_SRC (sets[i].rtl);

      /* If SRC is a constant that has no machine mode,
	 hash it with the destination's machine mode.
	 This way we can keep different modes separate.  */

      mode = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
      sets[i].mode = mode;

      if (src_eqv)
	{
	  enum machine_mode eqvmode = mode;
	  if (GET_CODE (dest) == STRICT_LOW_PART)
	    eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
	  do_not_record = 0;
	  hash_arg_in_memory = 0;
	  src_eqv_hash = HASH (src_eqv, eqvmode);

	  /* Find the equivalence class for the equivalent expression.  */

	  if (!do_not_record)
	    src_eqv_elt = lookup (src_eqv, src_eqv_hash, eqvmode);

	  src_eqv_volatile = do_not_record;
	  src_eqv_in_memory = hash_arg_in_memory;
	}

      /* If this is a STRICT_LOW_PART assignment, src_eqv corresponds to the
	 value of the INNER register, not the destination.  So it is not
	 a valid substitution for the source.  But save it for later.  */
      if (GET_CODE (dest) == STRICT_LOW_PART)
	src_eqv_here = 0;
      else
	src_eqv_here = src_eqv;

      /* Simplify and foldable subexpressions in SRC.  Then get the fully-
	 simplified result, which may not necessarily be valid.  */
      src_folded = fold_rtx (src, insn);

#if 0
      /* ??? This caused bad code to be generated for the m68k port with -O2.
	 Suppose src is (CONST_INT -1), and that after truncation src_folded
	 is (CONST_INT 3).  Suppose src_folded is then used for src_const.
	 At the end we will add src and src_const to the same equivalence
	 class.  We now have 3 and -1 on the same equivalence class.  This
	 causes later instructions to be mis-optimized.  */
      /* If storing a constant in a bitfield, pre-truncate the constant
	 so we will be able to record it later.  */
      if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT)
	{
	  rtx width = XEXP (SET_DEST (sets[i].rtl), 1);

	  if (GET_CODE (src) == CONST_INT
	      && GET_CODE (width) == CONST_INT
	      && INTVAL (width) < HOST_BITS_PER_WIDE_INT
	      && (INTVAL (src) & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
	    src_folded
	      = GEN_INT (INTVAL (src) & (((HOST_WIDE_INT) 1
					  << INTVAL (width)) - 1));
	}
#endif

      /* Compute SRC's hash code, and also notice if it
	 should not be recorded at all.  In that case,
	 prevent any further processing of this assignment.  */
      do_not_record = 0;
      hash_arg_in_memory = 0;

      sets[i].src = src;
      sets[i].src_hash = HASH (src, mode);
      sets[i].src_volatile = do_not_record;
      sets[i].src_in_memory = hash_arg_in_memory;

      /* If SRC is a MEM, there is a REG_EQUIV note for SRC, and DEST is
	 a pseudo, do not record SRC.  Using SRC as a replacement for
	 anything else will be incorrect in that situation.  Note that
	 this usually occurs only for stack slots, in which case all the
	 RTL would be referring to SRC, so we don't lose any optimization
	 opportunities by not having SRC in the hash table.  */

      if (MEM_P (src)
	  && find_reg_note (insn, REG_EQUIV, NULL_RTX) != 0
	  && REG_P (dest)
	  && REGNO (dest) >= FIRST_PSEUDO_REGISTER)
	sets[i].src_volatile = 1;

#if 0
      /* It is no longer clear why we used to do this, but it doesn't
	 appear to still be needed.  So let's try without it since this
	 code hurts cse'ing widened ops.  */
      /* If source is a paradoxical subreg (such as QI treated as an SI),
	 treat it as volatile.  It may do the work of an SI in one context
	 where the extra bits are not being used, but cannot replace an SI
	 in general.  */
      if (GET_CODE (src) == SUBREG
	  && (GET_MODE_SIZE (GET_MODE (src))
	      > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
	sets[i].src_volatile = 1;
#endif

      /* Locate all possible equivalent forms for SRC.  Try to replace
         SRC in the insn with each cheaper equivalent.

         We have the following types of equivalents: SRC itself, a folded
         version, a value given in a REG_EQUAL note, or a value related
	 to a constant.

         Each of these equivalents may be part of an additional class
         of equivalents (if more than one is in the table, they must be in
         the same class; we check for this).

	 If the source is volatile, we don't do any table lookups.

         We note any constant equivalent for possible later use in a
         REG_NOTE.  */

      if (!sets[i].src_volatile)
	elt = lookup (src, sets[i].src_hash, mode);

      sets[i].src_elt = elt;

      if (elt && src_eqv_here && src_eqv_elt)
	{
	  if (elt->first_same_value != src_eqv_elt->first_same_value)
	    {
	      /* The REG_EQUAL is indicating that two formerly distinct
		 classes are now equivalent.  So merge them.  */
	      merge_equiv_classes (elt, src_eqv_elt);
	      src_eqv_hash = HASH (src_eqv, elt->mode);
	      src_eqv_elt = lookup (src_eqv, src_eqv_hash, elt->mode);
	    }

	  src_eqv_here = 0;
	}

      else if (src_eqv_elt)
	elt = src_eqv_elt;

      /* Try to find a constant somewhere and record it in `src_const'.
	 Record its table element, if any, in `src_const_elt'.  Look in
	 any known equivalences first.  (If the constant is not in the
	 table, also set `sets[i].src_const_hash').  */
      if (elt)
	for (p = elt->first_same_value; p; p = p->next_same_value)
	  if (p->is_const)
	    {
	      src_const = p->exp;
	      src_const_elt = elt;
	      break;
	    }

      if (src_const == 0
	  && (CONSTANT_P (src_folded)
	      /* Consider (minus (label_ref L1) (label_ref L2)) as
		 "constant" here so we will record it. This allows us
		 to fold switch statements when an ADDR_DIFF_VEC is used.  */
	      || (GET_CODE (src_folded) == MINUS
		  && GET_CODE (XEXP (src_folded, 0)) == LABEL_REF
		  && GET_CODE (XEXP (src_folded, 1)) == LABEL_REF)))
	src_const = src_folded, src_const_elt = elt;
      else if (src_const == 0 && src_eqv_here && CONSTANT_P (src_eqv_here))
	src_const = src_eqv_here, src_const_elt = src_eqv_elt;

      /* If we don't know if the constant is in the table, get its
	 hash code and look it up.  */
      if (src_const && src_const_elt == 0)
	{
	  sets[i].src_const_hash = HASH (src_const, mode);
	  src_const_elt = lookup (src_const, sets[i].src_const_hash, mode);
	}

      sets[i].src_const = src_const;
      sets[i].src_const_elt = src_const_elt;

      /* If the constant and our source are both in the table, mark them as
	 equivalent.  Otherwise, if a constant is in the table but the source
	 isn't, set ELT to it.  */
      if (src_const_elt && elt
	  && src_const_elt->first_same_value != elt->first_same_value)
	merge_equiv_classes (elt, src_const_elt);
      else if (src_const_elt && elt == 0)
	elt = src_const_elt;

      /* See if there is a register linearly related to a constant
         equivalent of SRC.  */
      if (src_const
	  && (GET_CODE (src_const) == CONST
	      || (src_const_elt && src_const_elt->related_value != 0)))
	{
	  src_related = use_related_value (src_const, src_const_elt);
	  if (src_related)
	    {
	      struct table_elt *src_related_elt
		= lookup (src_related, HASH (src_related, mode), mode);
	      if (src_related_elt && elt)
		{
		  if (elt->first_same_value
		      != src_related_elt->first_same_value)
		    /* This can occur when we previously saw a CONST
		       involving a SYMBOL_REF and then see the SYMBOL_REF
		       twice.  Merge the involved classes.  */
		    merge_equiv_classes (elt, src_related_elt);

		  src_related = 0;
		  src_related_elt = 0;
		}
	      else if (src_related_elt && elt == 0)
		elt = src_related_elt;
	    }
	}

      /* See if we have a CONST_INT that is already in a register in a
	 wider mode.  */

      if (src_const && src_related == 0 && GET_CODE (src_const) == CONST_INT
	  && GET_MODE_CLASS (mode) == MODE_INT
	  && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
	{
	  enum machine_mode wider_mode;

	  for (wider_mode = GET_MODE_WIDER_MODE (mode);
	       GET_MODE_BITSIZE (wider_mode) <= BITS_PER_WORD
	       && src_related == 0;
	       wider_mode = GET_MODE_WIDER_MODE (wider_mode))
	    {
	      struct table_elt *const_elt
		= lookup (src_const, HASH (src_const, wider_mode), wider_mode);

	      if (const_elt == 0)
		continue;

	      for (const_elt = const_elt->first_same_value;
		   const_elt; const_elt = const_elt->next_same_value)
		if (REG_P (const_elt->exp))
		  {
		    src_related = gen_lowpart (mode,
							   const_elt->exp);
		    break;
		  }
	    }
	}

      /* Another possibility is that we have an AND with a constant in
	 a mode narrower than a word.  If so, it might have been generated
	 as part of an "if" which would narrow the AND.  If we already
	 have done the AND in a wider mode, we can use a SUBREG of that
	 value.  */

      if (flag_expensive_optimizations && ! src_related
	  && GET_CODE (src) == AND && GET_CODE (XEXP (src, 1)) == CONST_INT
	  && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
	{
	  enum machine_mode tmode;
	  rtx new_and = gen_rtx_AND (VOIDmode, NULL_RTX, XEXP (src, 1));

	  for (tmode = GET_MODE_WIDER_MODE (mode);
	       GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
	       tmode = GET_MODE_WIDER_MODE (tmode))
	    {
	      rtx inner = gen_lowpart (tmode, XEXP (src, 0));
	      struct table_elt *larger_elt;

	      if (inner)
		{
		  PUT_MODE (new_and, tmode);
		  XEXP (new_and, 0) = inner;
		  larger_elt = lookup (new_and, HASH (new_and, tmode), tmode);
		  if (larger_elt == 0)
		    continue;

		  for (larger_elt = larger_elt->first_same_value;
		       larger_elt; larger_elt = larger_elt->next_same_value)
		    if (REG_P (larger_elt->exp))
		      {
			src_related
			  = gen_lowpart (mode, larger_elt->exp);
			break;
		      }

		  if (src_related)
		    break;
		}
	    }
	}

#ifdef LOAD_EXTEND_OP
      /* See if a MEM has already been loaded with a widening operation;
	 if it has, we can use a subreg of that.  Many CISC machines
	 also have such operations, but this is only likely to be
	 beneficial on these machines.  */

      if (flag_expensive_optimizations && src_related == 0
	  && (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
	  && GET_MODE_CLASS (mode) == MODE_INT
	  && MEM_P (src) && ! do_not_record
	  && LOAD_EXTEND_OP (mode) != UNKNOWN)
	{
	  struct rtx_def memory_extend_buf;
	  rtx memory_extend_rtx = &memory_extend_buf;
	  enum machine_mode tmode;

	  /* Set what we are trying to extend and the operation it might
	     have been extended with.  */
	  memset (memory_extend_rtx, 0, sizeof(*memory_extend_rtx));
	  PUT_CODE (memory_extend_rtx, LOAD_EXTEND_OP (mode));
	  XEXP (memory_extend_rtx, 0) = src;

	  for (tmode = GET_MODE_WIDER_MODE (mode);
	       GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
	       tmode = GET_MODE_WIDER_MODE (tmode))
	    {
	      struct table_elt *larger_elt;

	      PUT_MODE (memory_extend_rtx, tmode);
	      larger_elt = lookup (memory_extend_rtx,
				   HASH (memory_extend_rtx, tmode), tmode);
	      if (larger_elt == 0)
		continue;

	      for (larger_elt = larger_elt->first_same_value;
		   larger_elt; larger_elt = larger_elt->next_same_value)
		if (REG_P (larger_elt->exp))
		  {
		    src_related = gen_lowpart (mode,
							   larger_elt->exp);
		    break;
		  }

	      if (src_related)
		break;
	    }
	}
#endif /* LOAD_EXTEND_OP */

      if (src == src_folded)
	src_folded = 0;

      /* At this point, ELT, if nonzero, points to a class of expressions
         equivalent to the source of this SET and SRC, SRC_EQV, SRC_FOLDED,
	 and SRC_RELATED, if nonzero, each contain additional equivalent
	 expressions.  Prune these latter expressions by deleting expressions
	 already in the equivalence class.

	 Check for an equivalent identical to the destination.  If found,
	 this is the preferred equivalent since it will likely lead to
	 elimination of the insn.  Indicate this by placing it in
	 `src_related'.  */

      if (elt)
	elt = elt->first_same_value;
      for (p = elt; p; p = p->next_same_value)
	{
	  enum rtx_code code = GET_CODE (p->exp);

	  /* If the expression is not valid, ignore it.  Then we do not
	     have to check for validity below.  In most cases, we can use
	     `rtx_equal_p', since canonicalization has already been done.  */
	  if (code != REG && ! exp_equiv_p (p->exp, p->exp, 1, false))
	    continue;

	  /* Also skip paradoxical subregs, unless that's what we're
	     looking for.  */
	  if (code == SUBREG
	      && (GET_MODE_SIZE (GET_MODE (p->exp))
		  > GET_MODE_SIZE (GET_MODE (SUBREG_REG (p->exp))))
	      && ! (src != 0
		    && GET_CODE (src) == SUBREG
		    && GET_MODE (src) == GET_MODE (p->exp)
		    && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
			< GET_MODE_SIZE (GET_MODE (SUBREG_REG (p->exp))))))
	    continue;

	  if (src && GET_CODE (src) == code && rtx_equal_p (src, p->exp))
	    src = 0;
	  else if (src_folded && GET_CODE (src_folded) == code
		   && rtx_equal_p (src_folded, p->exp))
	    src_folded = 0;
	  else if (src_eqv_here && GET_CODE (src_eqv_here) == code
		   && rtx_equal_p (src_eqv_here, p->exp))
	    src_eqv_here = 0;
	  else if (src_related && GET_CODE (src_related) == code
		   && rtx_equal_p (src_related, p->exp))
	    src_related = 0;

	  /* This is the same as the destination of the insns, we want
	     to prefer it.  Copy it to src_related.  The code below will
	     then give it a negative cost.  */
	  if (GET_CODE (dest) == code && rtx_equal_p (p->exp, dest))
	    src_related = dest;
	}

      /* Find the cheapest valid equivalent, trying all the available
         possibilities.  Prefer items not in the hash table to ones
         that are when they are equal cost.  Note that we can never
         worsen an insn as the current contents will also succeed.
	 If we find an equivalent identical to the destination, use it as best,
	 since this insn will probably be eliminated in that case.  */
      if (src)
	{
	  if (rtx_equal_p (src, dest))
	    src_cost = src_regcost = -1;
	  else
	    {
	      src_cost = COST (src);
	      src_regcost = approx_reg_cost (src);
	    }
	}

      if (src_eqv_here)
	{
	  if (rtx_equal_p (src_eqv_here, dest))
	    src_eqv_cost = src_eqv_regcost = -1;
	  else
	    {
	      src_eqv_cost = COST (src_eqv_here);
	      src_eqv_regcost = approx_reg_cost (src_eqv_here);
	    }
	}

      if (src_folded)
	{
	  if (rtx_equal_p (src_folded, dest))
	    src_folded_cost = src_folded_regcost = -1;
	  else
	    {
	      src_folded_cost = COST (src_folded);
	      src_folded_regcost = approx_reg_cost (src_folded);
	    }
	}

      if (src_related)
	{
	  if (rtx_equal_p (src_related, dest))
	    src_related_cost = src_related_regcost = -1;
	  else
	    {
	      src_related_cost = COST (src_related);
	      src_related_regcost = approx_reg_cost (src_related);
	    }
	}

      /* If this was an indirect jump insn, a known label will really be
	 cheaper even though it looks more expensive.  */
      if (dest == pc_rtx && src_const && GET_CODE (src_const) == LABEL_REF)
	src_folded = src_const, src_folded_cost = src_folded_regcost = -1;

      /* Terminate loop when replacement made.  This must terminate since
         the current contents will be tested and will always be valid.  */
      while (1)
	{
	  rtx trial;

	  /* Skip invalid entries.  */
	  while (elt && !REG_P (elt->exp)
		 && ! exp_equiv_p (elt->exp, elt->exp, 1, false))
	    elt = elt->next_same_value;

	  /* A paradoxical subreg would be bad here: it'll be the right
	     size, but later may be adjusted so that the upper bits aren't
	     what we want.  So reject it.  */
	  if (elt != 0
	      && GET_CODE (elt->exp) == SUBREG
	      && (GET_MODE_SIZE (GET_MODE (elt->exp))
		  > GET_MODE_SIZE (GET_MODE (SUBREG_REG (elt->exp))))
	      /* It is okay, though, if the rtx we're trying to match
		 will ignore any of the bits we can't predict.  */
	      && ! (src != 0
		    && GET_CODE (src) == SUBREG
		    && GET_MODE (src) == GET_MODE (elt->exp)
		    && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
			< GET_MODE_SIZE (GET_MODE (SUBREG_REG (elt->exp))))))
	    {
	      elt = elt->next_same_value;
	      continue;
	    }

	  if (elt)
	    {
	      src_elt_cost = elt->cost;
	      src_elt_regcost = elt->regcost;
	    }

	  /* Find cheapest and skip it for the next time.   For items
	     of equal cost, use this order:
	     src_folded, src, src_eqv, src_related and hash table entry.  */
	  if (src_folded
	      && preferable (src_folded_cost, src_folded_regcost,
			     src_cost, src_regcost) <= 0
	      && preferable (src_folded_cost, src_folded_regcost,
			     src_eqv_cost, src_eqv_regcost) <= 0
	      && preferable (src_folded_cost, src_folded_regcost,
			     src_related_cost, src_related_regcost) <= 0
	      && preferable (src_folded_cost, src_folded_regcost,
			     src_elt_cost, src_elt_regcost) <= 0)
	    {
	      trial = src_folded, src_folded_cost = MAX_COST;
	      if (src_folded_force_flag)
		{
		  rtx forced = force_const_mem (mode, trial);
		  if (forced)
		    trial = forced;
		}
	    }
	  else if (src
		   && preferable (src_cost, src_regcost,
				  src_eqv_cost, src_eqv_regcost) <= 0
		   && preferable (src_cost, src_regcost,
				  src_related_cost, src_related_regcost) <= 0
		   && preferable (src_cost, src_regcost,
				  src_elt_cost, src_elt_regcost) <= 0)
	    trial = src, src_cost = MAX_COST;
	  else if (src_eqv_here
		   && preferable (src_eqv_cost, src_eqv_regcost,
				  src_related_cost, src_related_regcost) <= 0
		   && preferable (src_eqv_cost, src_eqv_regcost,
				  src_elt_cost, src_elt_regcost) <= 0)
	    trial = copy_rtx (src_eqv_here), src_eqv_cost = MAX_COST;
	  else if (src_related
		   && preferable (src_related_cost, src_related_regcost,
				  src_elt_cost, src_elt_regcost) <= 0)
	    trial = copy_rtx (src_related), src_related_cost = MAX_COST;
	  else
	    {
	      trial = copy_rtx (elt->exp);
	      elt = elt->next_same_value;
	      src_elt_cost = MAX_COST;
	    }

	  /* We don't normally have an insn matching (set (pc) (pc)), so
	     check for this separately here.  We will delete such an
	     insn below.

	     For other cases such as a table jump or conditional jump
	     where we know the ultimate target, go ahead and replace the
	     operand.  While that may not make a valid insn, we will
	     reemit the jump below (and also insert any necessary
	     barriers).  */
	  if (n_sets == 1 && dest == pc_rtx
	      && (trial == pc_rtx
		  || (GET_CODE (trial) == LABEL_REF
		      && ! condjump_p (insn))))
	    {
	      /* Don't substitute non-local labels, this confuses CFG.  */
	      if (GET_CODE (trial) == LABEL_REF
		  && LABEL_REF_NONLOCAL_P (trial))
		continue;

	      SET_SRC (sets[i].rtl) = trial;
	      cse_jumps_altered = 1;
	      break;
	    }

	  /* Reject certain invalid forms of CONST that we create.  */
	  else if (CONSTANT_P (trial)
		   && GET_CODE (trial) == CONST
		   /* Reject cases that will cause decode_rtx_const to
		      die.  On the alpha when simplifying a switch, we
		      get (const (truncate (minus (label_ref)
		      (label_ref)))).  */
		   && (GET_CODE (XEXP (trial, 0)) == TRUNCATE
		       /* Likewise on IA-64, except without the
			  truncate.  */
		       || (GET_CODE (XEXP (trial, 0)) == MINUS
			   && GET_CODE (XEXP (XEXP (trial, 0), 0)) == LABEL_REF
			   && GET_CODE (XEXP (XEXP (trial, 0), 1)) == LABEL_REF)))
	    /* Do nothing for this case.  */
	    ;

	  /* Look for a substitution that makes a valid insn.  */
	  else if (validate_change (insn, &SET_SRC (sets[i].rtl), trial, 0))
	    {
	      rtx new = canon_reg (SET_SRC (sets[i].rtl), insn);

	      /* If we just made a substitution inside a libcall, then we
		 need to make the same substitution in any notes attached
		 to the RETVAL insn.  */
	      if (libcall_insn
		  && (REG_P (sets[i].orig_src)
		      || GET_CODE (sets[i].orig_src) == SUBREG
		      || MEM_P (sets[i].orig_src)))
		{
	          rtx note = find_reg_equal_equiv_note (libcall_insn);
		  if (note != 0)
		    XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0),
							   sets[i].orig_src,
							   copy_rtx (new));
		}

	      /* The result of apply_change_group can be ignored; see
		 canon_reg.  */

	      validate_change (insn, &SET_SRC (sets[i].rtl), new, 1);
	      apply_change_group ();
	      break;
	    }

	  /* If we previously found constant pool entries for
	     constants and this is a constant, try making a
	     pool entry.  Put it in src_folded unless we already have done
	     this since that is where it likely came from.  */

	  else if (constant_pool_entries_cost
		   && CONSTANT_P (trial)
		   && (src_folded == 0
		       || (!MEM_P (src_folded)
			   && ! src_folded_force_flag))
		   && GET_MODE_CLASS (mode) != MODE_CC
		   && mode != VOIDmode)
	    {
	      src_folded_force_flag = 1;
	      src_folded = trial;
	      src_folded_cost = constant_pool_entries_cost;
	      src_folded_regcost = constant_pool_entries_regcost;
	    }
	}

      src = SET_SRC (sets[i].rtl);

      /* In general, it is good to have a SET with SET_SRC == SET_DEST.
	 However, there is an important exception:  If both are registers
	 that are not the head of their equivalence class, replace SET_SRC
	 with the head of the class.  If we do not do this, we will have
	 both registers live over a portion of the basic block.  This way,
	 their lifetimes will likely abut instead of overlapping.  */
      if (REG_P (dest)
	  && REGNO_QTY_VALID_P (REGNO (dest)))
	{
	  int dest_q = REG_QTY (REGNO (dest));
	  struct qty_table_elem *dest_ent = &qty_table[dest_q];

	  if (dest_ent->mode == GET_MODE (dest)
	      && dest_ent->first_reg != REGNO (dest)
	      && REG_P (src) && REGNO (src) == REGNO (dest)
	      /* Don't do this if the original insn had a hard reg as
		 SET_SRC or SET_DEST.  */
	      && (!REG_P (sets[i].src)
		  || REGNO (sets[i].src) >= FIRST_PSEUDO_REGISTER)
	      && (!REG_P (dest) || REGNO (dest) >= FIRST_PSEUDO_REGISTER))
	    /* We can't call canon_reg here because it won't do anything if
	       SRC is a hard register.  */
	    {
	      int src_q = REG_QTY (REGNO (src));
	      struct qty_table_elem *src_ent = &qty_table[src_q];
	      int first = src_ent->first_reg;
	      rtx new_src
		= (first >= FIRST_PSEUDO_REGISTER
		   ? regno_reg_rtx[first] : gen_rtx_REG (GET_MODE (src), first));

	      /* We must use validate-change even for this, because this
		 might be a special no-op instruction, suitable only to
		 tag notes onto.  */
	      if (validate_change (insn, &SET_SRC (sets[i].rtl), new_src, 0))
		{
		  src = new_src;
		  /* If we had a constant that is cheaper than what we are now
		     setting SRC to, use that constant.  We ignored it when we
		     thought we could make this into a no-op.  */
		  if (src_const && COST (src_const) < COST (src)
		      && validate_change (insn, &SET_SRC (sets[i].rtl),
					  src_const, 0))
		    src = src_const;
		}
	    }
	}

      /* If we made a change, recompute SRC values.  */
      if (src != sets[i].src)
	{
	  cse_altered = 1;
	  do_not_record = 0;
	  hash_arg_in_memory = 0;
	  sets[i].src = src;
	  sets[i].src_hash = HASH (src, mode);
	  sets[i].src_volatile = do_not_record;
	  sets[i].src_in_memory = hash_arg_in_memory;
	  sets[i].src_elt = lookup (src, sets[i].src_hash, mode);
	}

      /* If this is a single SET, we are setting a register, and we have an
	 equivalent constant, we want to add a REG_NOTE.   We don't want
	 to write a REG_EQUAL note for a constant pseudo since verifying that
	 that pseudo hasn't been eliminated is a pain.  Such a note also
	 won't help anything.

	 Avoid a REG_EQUAL note for (CONST (MINUS (LABEL_REF) (LABEL_REF)))
	 which can be created for a reference to a compile time computable
	 entry in a jump table.  */

      if (n_sets == 1 && src_const && REG_P (dest)
	  && !REG_P (src_const)
	  && ! (GET_CODE (src_const) == CONST
		&& GET_CODE (XEXP (src_const, 0)) == MINUS
		&& GET_CODE (XEXP (XEXP (src_const, 0), 0)) == LABEL_REF
		&& GET_CODE (XEXP (XEXP (src_const, 0), 1)) == LABEL_REF))
	{
	  /* We only want a REG_EQUAL note if src_const != src.  */
	  if (! rtx_equal_p (src, src_const))
	    {
	      /* Make sure that the rtx is not shared.  */
	      src_const = copy_rtx (src_const);

	      /* Record the actual constant value in a REG_EQUAL note,
		 making a new one if one does not already exist.  */
	      set_unique_reg_note (insn, REG_EQUAL, src_const);
	    }
	}

      /* Now deal with the destination.  */
      do_not_record = 0;

      /* Look within any ZERO_EXTRACT to the MEM or REG within it.  */
      while (GET_CODE (dest) == SUBREG
	     || GET_CODE (dest) == ZERO_EXTRACT
	     || GET_CODE (dest) == STRICT_LOW_PART)
	dest = XEXP (dest, 0);

      sets[i].inner_dest = dest;

      if (MEM_P (dest))
	{
#ifdef PUSH_ROUNDING
	  /* Stack pushes invalidate the stack pointer.  */
	  rtx addr = XEXP (dest, 0);
	  if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC
	      && XEXP (addr, 0) == stack_pointer_rtx)
	    invalidate (stack_pointer_rtx, VOIDmode);
#endif
	  dest = fold_rtx (dest, insn);
	}

      /* Compute the hash code of the destination now,
	 before the effects of this instruction are recorded,
	 since the register values used in the address computation
	 are those before this instruction.  */
      sets[i].dest_hash = HASH (dest, mode);

      /* Don't enter a bit-field in the hash table
	 because the value in it after the store
	 may not equal what was stored, due to truncation.  */

      if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT)
	{
	  rtx width = XEXP (SET_DEST (sets[i].rtl), 1);

	  if (src_const != 0 && GET_CODE (src_const) == CONST_INT
	      && GET_CODE (width) == CONST_INT
	      && INTVAL (width) < HOST_BITS_PER_WIDE_INT
	      && ! (INTVAL (src_const)
		    & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
	    /* Exception: if the value is constant,
	       and it won't be truncated, record it.  */
	    ;
	  else
	    {
	      /* This is chosen so that the destination will be invalidated
		 but no new value will be recorded.
		 We must invalidate because sometimes constant
		 values can be recorded for bitfields.  */
	      sets[i].src_elt = 0;
	      sets[i].src_volatile = 1;
	      src_eqv = 0;
	      src_eqv_elt = 0;
	    }
	}

      /* If only one set in a JUMP_INSN and it is now a no-op, we can delete
	 the insn.  */
      else if (n_sets == 1 && dest == pc_rtx && src == pc_rtx)
	{
	  /* One less use of the label this insn used to jump to.  */
	  delete_insn (insn);
	  cse_jumps_altered = 1;
	  /* No more processing for this set.  */
	  sets[i].rtl = 0;
	}

      /* If this SET is now setting PC to a label, we know it used to
	 be a conditional or computed branch.  */
      else if (dest == pc_rtx && GET_CODE (src) == LABEL_REF
	       && !LABEL_REF_NONLOCAL_P (src))
	{
	  /* Now emit a BARRIER after the unconditional jump.  */
	  if (NEXT_INSN (insn) == 0
	      || !BARRIER_P (NEXT_INSN (insn)))
	    emit_barrier_after (insn);

	  /* We reemit the jump in as many cases as possible just in
	     case the form of an unconditional jump is significantly
	     different than a computed jump or conditional jump.

	     If this insn has multiple sets, then reemitting the
	     jump is nontrivial.  So instead we just force rerecognition
	     and hope for the best.  */
	  if (n_sets == 1)
	    {
	      rtx new, note;

	      new = emit_jump_insn_after (gen_jump (XEXP (src, 0)), insn);
	      JUMP_LABEL (new) = XEXP (src, 0);
	      LABEL_NUSES (XEXP (src, 0))++;

	      /* Make sure to copy over REG_NON_LOCAL_GOTO.  */
	      note = find_reg_note (insn, REG_NON_LOCAL_GOTO, 0);
	      if (note)
		{
		  XEXP (note, 1) = NULL_RTX;
		  REG_NOTES (new) = note;
		}

	      delete_insn (insn);
	      insn = new;

	      /* Now emit a BARRIER after the unconditional jump.  */
	      if (NEXT_INSN (insn) == 0
		  || !BARRIER_P (NEXT_INSN (insn)))
		emit_barrier_after (insn);
	    }
	  else
	    INSN_CODE (insn) = -1;

	  /* Do not bother deleting any unreachable code,
	     let jump/flow do that.  */

	  cse_jumps_altered = 1;
	  sets[i].rtl = 0;
	}

      /* If destination is volatile, invalidate it and then do no further
	 processing for this assignment.  */

      else if (do_not_record)
	{
	  if (REG_P (dest) || GET_CODE (dest) == SUBREG)
	    invalidate (dest, VOIDmode);
	  else if (MEM_P (dest))
	    invalidate (dest, VOIDmode);
	  else if (GET_CODE (dest) == STRICT_LOW_PART
		   || GET_CODE (dest) == ZERO_EXTRACT)
	    invalidate (XEXP (dest, 0), GET_MODE (dest));
	  sets[i].rtl = 0;
	}

      if (sets[i].rtl != 0 && dest != SET_DEST (sets[i].rtl))
	sets[i].dest_hash = HASH (SET_DEST (sets[i].rtl), mode);

#ifdef HAVE_cc0
      /* If setting CC0, record what it was set to, or a constant, if it
	 is equivalent to a constant.  If it is being set to a floating-point
	 value, make a COMPARE with the appropriate constant of 0.  If we
	 don't do this, later code can interpret this as a test against
	 const0_rtx, which can cause problems if we try to put it into an
	 insn as a floating-point operand.  */
      if (dest == cc0_rtx)
	{
	  this_insn_cc0 = src_const && mode != VOIDmode ? src_const : src;
	  this_insn_cc0_mode = mode;
	  if (FLOAT_MODE_P (mode))
	    this_insn_cc0 = gen_rtx_COMPARE (VOIDmode, this_insn_cc0,
					     CONST0_RTX (mode));
	}
#endif
    }

  /* Now enter all non-volatile source expressions in the hash table
     if they are not already present.
     Record their equivalence classes in src_elt.
     This way we can insert the corresponding destinations into
     the same classes even if the actual sources are no longer in them
     (having been invalidated).  */

  if (src_eqv && src_eqv_elt == 0 && sets[0].rtl != 0 && ! src_eqv_volatile
      && ! rtx_equal_p (src_eqv, SET_DEST (sets[0].rtl)))
    {
      struct table_elt *elt;
      struct table_elt *classp = sets[0].src_elt;
      rtx dest = SET_DEST (sets[0].rtl);
      enum machine_mode eqvmode = GET_MODE (dest);

      if (GET_CODE (dest) == STRICT_LOW_PART)
	{
	  eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
	  classp = 0;
	}
      if (insert_regs (src_eqv, classp, 0))
	{
	  rehash_using_reg (src_eqv);
	  src_eqv_hash = HASH (src_eqv, eqvmode);
	}
      elt = insert (src_eqv, classp, src_eqv_hash, eqvmode);
      elt->in_memory = src_eqv_in_memory;
      src_eqv_elt = elt;

      /* Check to see if src_eqv_elt is the same as a set source which
	 does not yet have an elt, and if so set the elt of the set source
	 to src_eqv_elt.  */
      for (i = 0; i < n_sets; i++)
	if (sets[i].rtl && sets[i].src_elt == 0
	    && rtx_equal_p (SET_SRC (sets[i].rtl), src_eqv))
	  sets[i].src_elt = src_eqv_elt;
    }

  for (i = 0; i < n_sets; i++)
    if (sets[i].rtl && ! sets[i].src_volatile
	&& ! rtx_equal_p (SET_SRC (sets[i].rtl), SET_DEST (sets[i].rtl)))
      {
	if (GET_CODE (SET_DEST (sets[i].rtl)) == STRICT_LOW_PART)
	  {
	    /* REG_EQUAL in setting a STRICT_LOW_PART
	       gives an equivalent for the entire destination register,
	       not just for the subreg being stored in now.
	       This is a more interesting equivalence, so we arrange later
	       to treat the entire reg as the destination.  */
	    sets[i].src_elt = src_eqv_elt;
	    sets[i].src_hash = src_eqv_hash;
	  }
	else
	  {
	    /* Insert source and constant equivalent into hash table, if not
	       already present.  */
	    struct table_elt *classp = src_eqv_elt;
	    rtx src = sets[i].src;
	    rtx dest = SET_DEST (sets[i].rtl);
	    enum machine_mode mode
	      = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);

	    /* It's possible that we have a source value known to be
	       constant but don't have a REG_EQUAL note on the insn.
	       Lack of a note will mean src_eqv_elt will be NULL.  This
	       can happen where we've generated a SUBREG to access a
	       CONST_INT that is already in a register in a wider mode.
	       Ensure that the source expression is put in the proper
	       constant class.  */
	    if (!classp)
	      classp = sets[i].src_const_elt;

	    if (sets[i].src_elt == 0)
	      {
		/* Don't put a hard register source into the table if this is
		   the last insn of a libcall.  In this case, we only need
		   to put src_eqv_elt in src_elt.  */
		if (! find_reg_note (insn, REG_RETVAL, NULL_RTX))
		  {
		    struct table_elt *elt;

		    /* Note that these insert_regs calls cannot remove
		       any of the src_elt's, because they would have failed to
		       match if not still valid.  */
		    if (insert_regs (src, classp, 0))
		      {
			rehash_using_reg (src);
			sets[i].src_hash = HASH (src, mode);
		      }
		    elt = insert (src, classp, sets[i].src_hash, mode);
		    elt->in_memory = sets[i].src_in_memory;
		    sets[i].src_elt = classp = elt;
		  }
		else
		  sets[i].src_elt = classp;
	      }
	    if (sets[i].src_const && sets[i].src_const_elt == 0
		&& src != sets[i].src_const
		&& ! rtx_equal_p (sets[i].src_const, src))
	      sets[i].src_elt = insert (sets[i].src_const, classp,
					sets[i].src_const_hash, mode);
	  }
      }
    else if (sets[i].src_elt == 0)
      /* If we did not insert the source into the hash table (e.g., it was
	 volatile), note the equivalence class for the REG_EQUAL value, if any,
	 so that the destination goes into that class.  */
      sets[i].src_elt = src_eqv_elt;

  /* Record destination addresses in the hash table.  This allows us to
     check if they are invalidated by other sets.  */
  for (i = 0; i < n_sets; i++)
    {
      if (sets[i].rtl)
	{
	  rtx x = sets[i].inner_dest;
	  struct table_elt *elt;
	  enum machine_mode mode;
	  unsigned hash;

	  if (MEM_P (x))
	    {
	      x = XEXP (x, 0);
	      mode = GET_MODE (x);
	      hash = HASH (x, mode);
	      elt = lookup (x, hash, mode);
	      if (!elt)
		{
		  if (insert_regs (x, NULL, 0))
		    {
		      rtx dest = SET_DEST (sets[i].rtl);

		      rehash_using_reg (x);
		      hash = HASH (x, mode);
		      sets[i].dest_hash = HASH (dest, GET_MODE (dest));
		    }
		  elt = insert (x, NULL, hash, mode);
		}

	      sets[i].dest_addr_elt = elt;
	    }
	  else
	    sets[i].dest_addr_elt = NULL;
	}
    }

  invalidate_from_clobbers (x);

  /* Some registers are invalidated by subroutine calls.  Memory is
     invalidated by non-constant calls.  */

  if (CALL_P (insn))
    {
      if (! CONST_OR_PURE_CALL_P (insn))
	invalidate_memory ();
      invalidate_for_call ();
    }

  /* Now invalidate everything set by this instruction.
     If a SUBREG or other funny destination is being set,
     sets[i].rtl is still nonzero, so here we invalidate the reg
     a part of which is being set.  */

  for (i = 0; i < n_sets; i++)
    if (sets[i].rtl)
      {
	/* We can't use the inner dest, because the mode associated with
	   a ZERO_EXTRACT is significant.  */
	rtx dest = SET_DEST (sets[i].rtl);

	/* Needed for registers to remove the register from its
	   previous quantity's chain.
	   Needed for memory if this is a nonvarying address, unless
	   we have just done an invalidate_memory that covers even those.  */
	if (REG_P (dest) || GET_CODE (dest) == SUBREG)
	  invalidate (dest, VOIDmode);
	else if (MEM_P (dest))
	  invalidate (dest, VOIDmode);
	else if (GET_CODE (dest) == STRICT_LOW_PART
		 || GET_CODE (dest) == ZERO_EXTRACT)
	  invalidate (XEXP (dest, 0), GET_MODE (dest));
      }

  /* A volatile ASM invalidates everything.  */
  if (NONJUMP_INSN_P (insn)
      && GET_CODE (PATTERN (insn)) == ASM_OPERANDS
      && MEM_VOLATILE_P (PATTERN (insn)))
    flush_hash_table ();

  /* Make sure registers mentioned in destinations
     are safe for use in an expression to be inserted.
     This removes from the hash table
     any invalid entry that refers to one of these registers.

     We don't care about the return value from mention_regs because
     we are going to hash the SET_DEST values unconditionally.  */

  for (i = 0; i < n_sets; i++)
    {
      if (sets[i].rtl)
	{
	  rtx x = SET_DEST (sets[i].rtl);

	  if (!REG_P (x))
	    mention_regs (x);
	  else
	    {
	      /* We used to rely on all references to a register becoming
		 inaccessible when a register changes to a new quantity,
		 since that changes the hash code.  However, that is not
		 safe, since after HASH_SIZE new quantities we get a
		 hash 'collision' of a register with its own invalid
		 entries.  And since SUBREGs have been changed not to
		 change their hash code with the hash code of the register,
		 it wouldn't work any longer at all.  So we have to check
		 for any invalid references lying around now.
		 This code is similar to the REG case in mention_regs,
		 but it knows that reg_tick has been incremented, and
		 it leaves reg_in_table as -1 .  */
	      unsigned int regno = REGNO (x);
	      unsigned int endregno
		= regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
			   : hard_regno_nregs[regno][GET_MODE (x)]);
	      unsigned int i;

	      for (i = regno; i < endregno; i++)
		{
		  if (REG_IN_TABLE (i) >= 0)
		    {
		      remove_invalid_refs (i);
		      REG_IN_TABLE (i) = -1;
		    }
		}
	    }
	}
    }

  /* We may have just removed some of the src_elt's from the hash table.
     So replace each one with the current head of the same class.
     Also check if destination addresses have been removed.  */

  for (i = 0; i < n_sets; i++)
    if (sets[i].rtl)
      {
	if (sets[i].dest_addr_elt
	    && sets[i].dest_addr_elt->first_same_value == 0)
	  {
	    /* The elt was removed, which means this destination is not
	       valid after this instruction.  */
	    sets[i].rtl = NULL_RTX;
	  }
	else if (sets[i].src_elt && sets[i].src_elt->first_same_value == 0)
	  /* If elt was removed, find current head of same class,
	     or 0 if nothing remains of that class.  */
	  {
	    struct table_elt *elt = sets[i].src_elt;

	    while (elt && elt->prev_same_value)
	      elt = elt->prev_same_value;

	    while (elt && elt->first_same_value == 0)
	      elt = elt->next_same_value;
	    sets[i].src_elt = elt ? elt->first_same_value : 0;
	  }
      }

  /* Now insert the destinations into their equivalence classes.  */

  for (i = 0; i < n_sets; i++)
    if (sets[i].rtl)
      {
	rtx dest = SET_DEST (sets[i].rtl);
	struct table_elt *elt;

	/* Don't record value if we are not supposed to risk allocating
	   floating-point values in registers that might be wider than
	   memory.  */
	if ((flag_float_store
	     && MEM_P (dest)
	     && FLOAT_MODE_P (GET_MODE (dest)))
	    /* Don't record BLKmode values, because we don't know the
	       size of it, and can't be sure that other BLKmode values
	       have the same or smaller size.  */
	    || GET_MODE (dest) == BLKmode
	    /* Don't record values of destinations set inside a libcall block
	       since we might delete the libcall.  Things should have been set
	       up so we won't want to reuse such a value, but we play it safe
	       here.  */
	    || libcall_insn
	    /* If we didn't put a REG_EQUAL value or a source into the hash
	       table, there is no point is recording DEST.  */
	    || sets[i].src_elt == 0
	    /* If DEST is a paradoxical SUBREG and SRC is a ZERO_EXTEND
	       or SIGN_EXTEND, don't record DEST since it can cause
	       some tracking to be wrong.

	       ??? Think about this more later.  */
	    || (GET_CODE (dest) == SUBREG
		&& (GET_MODE_SIZE (GET_MODE (dest))
		    > GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
		&& (GET_CODE (sets[i].src) == SIGN_EXTEND
		    || GET_CODE (sets[i].src) == ZERO_EXTEND)))
	  continue;

	/* STRICT_LOW_PART isn't part of the value BEING set,
	   and neither is the SUBREG inside it.
	   Note that in this case SETS[I].SRC_ELT is really SRC_EQV_ELT.  */
	if (GET_CODE (dest) == STRICT_LOW_PART)
	  dest = SUBREG_REG (XEXP (dest, 0));

	if (REG_P (dest) || GET_CODE (dest) == SUBREG)
	  /* Registers must also be inserted into chains for quantities.  */
	  if (insert_regs (dest, sets[i].src_elt, 1))
	    {
	      /* If `insert_regs' changes something, the hash code must be
		 recalculated.  */
	      rehash_using_reg (dest);
	      sets[i].dest_hash = HASH (dest, GET_MODE (dest));
	    }

	elt = insert (dest, sets[i].src_elt,
		      sets[i].dest_hash, GET_MODE (dest));

	elt->in_memory = (MEM_P (sets[i].inner_dest)
			  && !MEM_READONLY_P (sets[i].inner_dest));

	/* If we have (set (subreg:m1 (reg:m2 foo) 0) (bar:m1)), M1 is no
	   narrower than M2, and both M1 and M2 are the same number of words,
	   we are also doing (set (reg:m2 foo) (subreg:m2 (bar:m1) 0)) so
	   make that equivalence as well.

	   However, BAR may have equivalences for which gen_lowpart
	   will produce a simpler value than gen_lowpart applied to
	   BAR (e.g., if BAR was ZERO_EXTENDed from M2), so we will scan all
	   BAR's equivalences.  If we don't get a simplified form, make
	   the SUBREG.  It will not be used in an equivalence, but will
	   cause two similar assignments to be detected.

	   Note the loop below will find SUBREG_REG (DEST) since we have
	   already entered SRC and DEST of the SET in the table.  */

	if (GET_CODE (dest) == SUBREG
	    && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) - 1)
		 / UNITS_PER_WORD)
		== (GET_MODE_SIZE (GET_MODE (dest)) - 1) / UNITS_PER_WORD)
	    && (GET_MODE_SIZE (GET_MODE (dest))
		>= GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
	    && sets[i].src_elt != 0)
	  {
	    enum machine_mode new_mode = GET_MODE (SUBREG_REG (dest));
	    struct table_elt *elt, *classp = 0;

	    for (elt = sets[i].src_elt->first_same_value; elt;
		 elt = elt->next_same_value)
	      {
		rtx new_src = 0;
		unsigned src_hash;
		struct table_elt *src_elt;
		int byte = 0;

		/* Ignore invalid entries.  */
		if (!REG_P (elt->exp)
		    && ! exp_equiv_p (elt->exp, elt->exp, 1, false))
		  continue;

		/* We may have already been playing subreg games.  If the
		   mode is already correct for the destination, use it.  */
		if (GET_MODE (elt->exp) == new_mode)
		  new_src = elt->exp;
		else
		  {
		    /* Calculate big endian correction for the SUBREG_BYTE.
		       We have already checked that M1 (GET_MODE (dest))
		       is not narrower than M2 (new_mode).  */
		    if (BYTES_BIG_ENDIAN)
		      byte = (GET_MODE_SIZE (GET_MODE (dest))
			      - GET_MODE_SIZE (new_mode));

		    new_src = simplify_gen_subreg (new_mode, elt->exp,
					           GET_MODE (dest), byte);
		  }

		/* The call to simplify_gen_subreg fails if the value
		   is VOIDmode, yet we can't do any simplification, e.g.
		   for EXPR_LISTs denoting function call results.
		   It is invalid to construct a SUBREG with a VOIDmode
		   SUBREG_REG, hence a zero new_src means we can't do
		   this substitution.  */
		if (! new_src)
		  continue;

		src_hash = HASH (new_src, new_mode);
		src_elt = lookup (new_src, src_hash, new_mode);

		/* Put the new source in the hash table is if isn't
		   already.  */
		if (src_elt == 0)
		  {
		    if (insert_regs (new_src, classp, 0))
		      {
			rehash_using_reg (new_src);
			src_hash = HASH (new_src, new_mode);
		      }
		    src_elt = insert (new_src, classp, src_hash, new_mode);
		    src_elt->in_memory = elt->in_memory;
		  }
		else if (classp && classp != src_elt->first_same_value)
		  /* Show that two things that we've seen before are
		     actually the same.  */
		  merge_equiv_classes (src_elt, classp);

		classp = src_elt->first_same_value;
		/* Ignore invalid entries.  */
		while (classp
		       && !REG_P (classp->exp)
		       && ! exp_equiv_p (classp->exp, classp->exp, 1, false))
		  classp = classp->next_same_value;
	      }
	  }
      }

  /* Special handling for (set REG0 REG1) where REG0 is the
     "cheapest", cheaper than REG1.  After cse, REG1 will probably not
     be used in the sequel, so (if easily done) change this insn to
     (set REG1 REG0) and replace REG1 with REG0 in the previous insn
     that computed their value.  Then REG1 will become a dead store
     and won't cloud the situation for later optimizations.

     Do not make this change if REG1 is a hard register, because it will
     then be used in the sequel and we may be changing a two-operand insn
     into a three-operand insn.

     Also do not do this if we are operating on a copy of INSN.

     Also don't do this if INSN ends a libcall; this would cause an unrelated
     register to be set in the middle of a libcall, and we then get bad code
     if the libcall is deleted.  */

  if (n_sets == 1 && sets[0].rtl && REG_P (SET_DEST (sets[0].rtl))
      && NEXT_INSN (PREV_INSN (insn)) == insn
      && REG_P (SET_SRC (sets[0].rtl))
      && REGNO (SET_SRC (sets[0].rtl)) >= FIRST_PSEUDO_REGISTER
      && REGNO_QTY_VALID_P (REGNO (SET_SRC (sets[0].rtl))))
    {
      int src_q = REG_QTY (REGNO (SET_SRC (sets[0].rtl)));
      struct qty_table_elem *src_ent = &qty_table[src_q];

      if ((src_ent->first_reg == REGNO (SET_DEST (sets[0].rtl)))
	  && ! find_reg_note (insn, REG_RETVAL, NULL_RTX))
	{
	  rtx prev = insn;
	  /* Scan for the previous nonnote insn, but stop at a basic
	     block boundary.  */
	  do
	    {
	      prev = PREV_INSN (prev);
	    }
	  while (prev && NOTE_P (prev)
		 && NOTE_LINE_NUMBER (prev) != NOTE_INSN_BASIC_BLOCK);

	  /* Do not swap the registers around if the previous instruction
	     attaches a REG_EQUIV note to REG1.

	     ??? It's not entirely clear whether we can transfer a REG_EQUIV
	     from the pseudo that originally shadowed an incoming argument
	     to another register.  Some uses of REG_EQUIV might rely on it
	     being attached to REG1 rather than REG2.

	     This section previously turned the REG_EQUIV into a REG_EQUAL
	     note.  We cannot do that because REG_EQUIV may provide an
	     uninitialized stack slot when REG_PARM_STACK_SPACE is used.  */

	  if (prev != 0 && NONJUMP_INSN_P (prev)
	      && GET_CODE (PATTERN (prev)) == SET
	      && SET_DEST (PATTERN (prev)) == SET_SRC (sets[0].rtl)
	      && ! find_reg_note (prev, REG_EQUIV, NULL_RTX))
	    {
	      rtx dest = SET_DEST (sets[0].rtl);
	      rtx src = SET_SRC (sets[0].rtl);
	      rtx note;

	      validate_change (prev, &SET_DEST (PATTERN (prev)), dest, 1);
	      validate_change (insn, &SET_DEST (sets[0].rtl), src, 1);
	      validate_change (insn, &SET_SRC (sets[0].rtl), dest, 1);
	      apply_change_group ();

	      /* If INSN has a REG_EQUAL note, and this note mentions
		 REG0, then we must delete it, because the value in
		 REG0 has changed.  If the note's value is REG1, we must
		 also delete it because that is now this insn's dest.  */
	      note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
	      if (note != 0
		  && (reg_mentioned_p (dest, XEXP (note, 0))
		      || rtx_equal_p (src, XEXP (note, 0))))
		remove_note (insn, note);
	    }
	}
    }

  /* If this is a conditional jump insn, record any known equivalences due to
     the condition being tested.  */

  if (JUMP_P (insn)
      && n_sets == 1 && GET_CODE (x) == SET
      && GET_CODE (SET_SRC (x)) == IF_THEN_ELSE)
    record_jump_equiv (insn, 0);

#ifdef HAVE_cc0
  /* If the previous insn set CC0 and this insn no longer references CC0,
     delete the previous insn.  Here we use the fact that nothing expects CC0
     to be valid over an insn, which is true until the final pass.  */
  if (prev_insn && NONJUMP_INSN_P (prev_insn)
      && (tem = single_set (prev_insn)) != 0
      && SET_DEST (tem) == cc0_rtx
      && ! reg_mentioned_p (cc0_rtx, x))
    delete_insn (prev_insn);

  prev_insn_cc0 = this_insn_cc0;
  prev_insn_cc0_mode = this_insn_cc0_mode;
  prev_insn = insn;
#endif
}

/* Remove from the hash table all expressions that reference memory.  */

static void
invalidate_memory (void)
{
  int i;
  struct table_elt *p, *next;

  for (i = 0; i < HASH_SIZE; i++)
    for (p = table[i]; p; p = next)
      {
	next = p->next_same_hash;
	if (p->in_memory)
	  remove_from_table (p, i);
      }
}

/* If ADDR is an address that implicitly affects the stack pointer, return
   1 and update the register tables to show the effect.  Else, return 0.  */

static int
addr_affects_sp_p (rtx addr)
{
  if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC
      && REG_P (XEXP (addr, 0))
      && REGNO (XEXP (addr, 0)) == STACK_POINTER_REGNUM)
    {
      if (REG_TICK (STACK_POINTER_REGNUM) >= 0)
	{
	  REG_TICK (STACK_POINTER_REGNUM)++;
	  /* Is it possible to use a subreg of SP?  */
	  SUBREG_TICKED (STACK_POINTER_REGNUM) = -1;
	}

      /* This should be *very* rare.  */
      if (TEST_HARD_REG_BIT (hard_regs_in_table, STACK_POINTER_REGNUM))
	invalidate (stack_pointer_rtx, VOIDmode);

      return 1;
    }

  return 0;
}

/* Perform invalidation on the basis of everything about an insn
   except for invalidating the actual places that are SET in it.
   This includes the places CLOBBERed, and anything that might
   alias with something that is SET or CLOBBERed.

   X is the pattern of the insn.  */

static void
invalidate_from_clobbers (rtx x)
{
  if (GET_CODE (x) == CLOBBER)
    {
      rtx ref = XEXP (x, 0);
      if (ref)
	{
	  if (REG_P (ref) || GET_CODE (ref) == SUBREG
	      || MEM_P (ref))
	    invalidate (ref, VOIDmode);
	  else if (GET_CODE (ref) == STRICT_LOW_PART
		   || GET_CODE (ref) == ZERO_EXTRACT)
	    invalidate (XEXP (ref, 0), GET_MODE (ref));
	}
    }
  else if (GET_CODE (x) == PARALLEL)
    {
      int i;
      for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
	{
	  rtx y = XVECEXP (x, 0, i);
	  if (GET_CODE (y) == CLOBBER)
	    {
	      rtx ref = XEXP (y, 0);
	      if (REG_P (ref) || GET_CODE (ref) == SUBREG
		  || MEM_P (ref))
		invalidate (ref, VOIDmode);
	      else if (GET_CODE (ref) == STRICT_LOW_PART
		       || GET_CODE (ref) == ZERO_EXTRACT)
		invalidate (XEXP (ref, 0), GET_MODE (ref));
	    }
	}
    }
}

/* Process X, part of the REG_NOTES of an insn.  Look at any REG_EQUAL notes
   and replace any registers in them with either an equivalent constant
   or the canonical form of the register.  If we are inside an address,
   only do this if the address remains valid.

   OBJECT is 0 except when within a MEM in which case it is the MEM.

   Return the replacement for X.  */

static rtx
cse_process_notes (rtx x, rtx object)
{
  enum rtx_code code = GET_CODE (x);
  const char *fmt = GET_RTX_FORMAT (code);
  int i;

  switch (code)
    {
    case CONST_INT:
    case CONST:
    case SYMBOL_REF:
    case LABEL_REF:
    case CONST_DOUBLE:
    case CONST_VECTOR:
    case PC:
    case CC0:
    case LO_SUM:
      return x;

    case MEM:
      validate_change (x, &XEXP (x, 0),
		       cse_process_notes (XEXP (x, 0), x), 0);
      return x;

    case EXPR_LIST:
    case INSN_LIST:
      if (REG_NOTE_KIND (x) == REG_EQUAL)
	XEXP (x, 0) = cse_process_notes (XEXP (x, 0), NULL_RTX);
      if (XEXP (x, 1))
	XEXP (x, 1) = cse_process_notes (XEXP (x, 1), NULL_RTX);
      return x;

    case SIGN_EXTEND:
    case ZERO_EXTEND:
    case SUBREG:
      {
	rtx new = cse_process_notes (XEXP (x, 0), object);
	/* We don't substitute VOIDmode constants into these rtx,
	   since they would impede folding.  */
	if (GET_MODE (new) != VOIDmode)
	  validate_change (object, &XEXP (x, 0), new, 0);
	return x;
      }

    case REG:
      i = REG_QTY (REGNO (x));

      /* Return a constant or a constant register.  */
      if (REGNO_QTY_VALID_P (REGNO (x)))
	{
	  struct qty_table_elem *ent = &qty_table[i];

	  if (ent->const_rtx != NULL_RTX
	      && (CONSTANT_P (ent->const_rtx)
		  || REG_P (ent->const_rtx)))
	    {
	      rtx new = gen_lowpart (GET_MODE (x), ent->const_rtx);
	      if (new)
		return new;
	    }
	}

      /* Otherwise, canonicalize this register.  */
      return canon_reg (x, NULL_RTX);

    default:
      break;
    }

  for (i = 0; i < GET_RTX_LENGTH (code); i++)
    if (fmt[i] == 'e')
      validate_change (object, &XEXP (x, i),
		       cse_process_notes (XEXP (x, i), object), 0);

  return x;
}

/* Process one SET of an insn that was skipped.  We ignore CLOBBERs
   since they are done elsewhere.  This function is called via note_stores.  */

static void
invalidate_skipped_set (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED)
{
  enum rtx_code code = GET_CODE (dest);

  if (code == MEM
      && ! addr_affects_sp_p (dest)	/* If this is not a stack push ...  */
      /* There are times when an address can appear varying and be a PLUS
	 during this scan when it would be a fixed address were we to know
	 the proper equivalences.  So invalidate all memory if there is
	 a BLKmode or nonscalar memory reference or a reference to a
	 variable address.  */
      && (MEM_IN_STRUCT_P (dest) || GET_MODE (dest) == BLKmode
	  || cse_rtx_varies_p (XEXP (dest, 0), 0)))
    {
      invalidate_memory ();
      return;
    }

  if (GET_CODE (set) == CLOBBER
      || CC0_P (dest)
      || dest == pc_rtx)
    return;

  if (code == STRICT_LOW_PART || code == ZERO_EXTRACT)
    invalidate (XEXP (dest, 0), GET_MODE (dest));
  else if (code == REG || code == SUBREG || code == MEM)
    invalidate (dest, VOIDmode);
}

/* Invalidate all insns from START up to the end of the function or the
   next label.  This called when we wish to CSE around a block that is
   conditionally executed.  */

static void
invalidate_skipped_block (rtx start)
{
  rtx insn;

  for (insn = start; insn && !LABEL_P (insn);
       insn = NEXT_INSN (insn))
    {
      if (! INSN_P (insn))
	continue;

      if (CALL_P (insn))
	{
	  if (! CONST_OR_PURE_CALL_P (insn))
	    invalidate_memory ();
	  invalidate_for_call ();
	}

      invalidate_from_clobbers (PATTERN (insn));
      note_stores (PATTERN (insn), invalidate_skipped_set, NULL);
    }
}

/* Find the end of INSN's basic block and return its range,
   the total number of SETs in all the insns of the block, the last insn of the
   block, and the branch path.

   The branch path indicates which branches should be followed.  If a nonzero
   path size is specified, the block should be rescanned and a different set
   of branches will be taken.  The branch path is only used if
   FLAG_CSE_FOLLOW_JUMPS or FLAG_CSE_SKIP_BLOCKS is nonzero.

   DATA is a pointer to a struct cse_basic_block_data, defined below, that is
   used to describe the block.  It is filled in with the information about
   the current block.  The incoming structure's branch path, if any, is used
   to construct the output branch path.  */

static void
cse_end_of_basic_block (rtx insn, struct cse_basic_block_data *data,
			int follow_jumps, int skip_blocks)
{
  rtx p = insn, q;
  int nsets = 0;
  int low_cuid = INSN_CUID (insn), high_cuid = INSN_CUID (insn);
  rtx next = INSN_P (insn) ? insn : next_real_insn (insn);
  int path_size = data->path_size;
  int path_entry = 0;
  int i;

  /* Update the previous branch path, if any.  If the last branch was
     previously PATH_TAKEN, mark it PATH_NOT_TAKEN.
     If it was previously PATH_NOT_TAKEN,
     shorten the path by one and look at the previous branch.  We know that
     at least one branch must have been taken if PATH_SIZE is nonzero.  */
  while (path_size > 0)
    {
      if (data->path[path_size - 1].status != PATH_NOT_TAKEN)
	{
	  data->path[path_size - 1].status = PATH_NOT_TAKEN;
	  break;
	}
      else
	path_size--;
    }

  /* If the first instruction is marked with QImode, that means we've
     already processed this block.  Our caller will look at DATA->LAST
     to figure out where to go next.  We want to return the next block
     in the instruction stream, not some branched-to block somewhere
     else.  We accomplish this by pretending our called forbid us to
     follow jumps, or skip blocks.  */
  if (GET_MODE (insn) == QImode)
    follow_jumps = skip_blocks = 0;

  /* Scan to end of this basic block.  */
  while (p && !LABEL_P (p))
    {
      /* Don't cse over a call to setjmp; on some machines (eg VAX)
	 the regs restored by the longjmp come from
	 a later time than the setjmp.  */
      if (PREV_INSN (p) && CALL_P (PREV_INSN (p))
	  && find_reg_note (PREV_INSN (p), REG_SETJMP, NULL))
	break;

      /* A PARALLEL can have lots of SETs in it,
	 especially if it is really an ASM_OPERANDS.  */
      if (INSN_P (p) && GET_CODE (PATTERN (p)) == PARALLEL)
	nsets += XVECLEN (PATTERN (p), 0);
      else if (!NOTE_P (p))
	nsets += 1;

      /* Ignore insns made by CSE; they cannot affect the boundaries of
	 the basic block.  */

      if (INSN_UID (p) <= max_uid && INSN_CUID (p) > high_cuid)
	high_cuid = INSN_CUID (p);
      if (INSN_UID (p) <= max_uid && INSN_CUID (p) < low_cuid)
	low_cuid = INSN_CUID (p);

      /* See if this insn is in our branch path.  If it is and we are to
	 take it, do so.  */
      if (path_entry < path_size && data->path[path_entry].branch == p)
	{
	  if (data->path[path_entry].status != PATH_NOT_TAKEN)
	    p = JUMP_LABEL (p);

	  /* Point to next entry in path, if any.  */
	  path_entry++;
	}

      /* If this is a conditional jump, we can follow it if -fcse-follow-jumps
	 was specified, we haven't reached our maximum path length, there are
	 insns following the target of the jump, this is the only use of the
	 jump label, and the target label is preceded by a BARRIER.

	 Alternatively, we can follow the jump if it branches around a
	 block of code and there are no other branches into the block.
	 In this case invalidate_skipped_block will be called to invalidate any
	 registers set in the block when following the jump.  */

      else if ((follow_jumps || skip_blocks) && path_size < PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH) - 1
	       && JUMP_P (p)
	       && GET_CODE (PATTERN (p)) == SET
	       && GET_CODE (SET_SRC (PATTERN (p))) == IF_THEN_ELSE
	       && JUMP_LABEL (p) != 0
	       && LABEL_NUSES (JUMP_LABEL (p)) == 1
	       && NEXT_INSN (JUMP_LABEL (p)) != 0)
	{
	  for (q = PREV_INSN (JUMP_LABEL (p)); q; q = PREV_INSN (q))
	    if ((!NOTE_P (q)
		 || (PREV_INSN (q) && CALL_P (PREV_INSN (q))
		     && find_reg_note (PREV_INSN (q), REG_SETJMP, NULL)))
		&& (!LABEL_P (q) || LABEL_NUSES (q) != 0))
	      break;

	  /* If we ran into a BARRIER, this code is an extension of the
	     basic block when the branch is taken.  */
	  if (follow_jumps && q != 0 && BARRIER_P (q))
	    {
	      /* Don't allow ourself to keep walking around an
		 always-executed loop.  */
	      if (next_real_insn (q) == next)
		{
		  p = NEXT_INSN (p);
		  continue;
		}

	      /* Similarly, don't put a branch in our path more than once.  */
	      for (i = 0; i < path_entry; i++)
		if (data->path[i].branch == p)
		  break;

	      if (i != path_entry)
		break;

	      data->path[path_entry].branch = p;
	      data->path[path_entry++].status = PATH_TAKEN;

	      /* This branch now ends our path.  It was possible that we
		 didn't see this branch the last time around (when the
		 insn in front of the target was a JUMP_INSN that was
		 turned into a no-op).  */
	      path_size = path_entry;

	      p = JUMP_LABEL (p);
	      /* Mark block so we won't scan it again later.  */
	      PUT_MODE (NEXT_INSN (p), QImode);
	    }
	  /* Detect a branch around a block of code.  */
	  else if (skip_blocks && q != 0 && !LABEL_P (q))
	    {
	      rtx tmp;

	      if (next_real_insn (q) == next)
		{
		  p = NEXT_INSN (p);
		  continue;
		}

	      for (i = 0; i < path_entry; i++)
		if (data->path[i].branch == p)
		  break;

	      if (i != path_entry)
		break;

	      /* This is no_labels_between_p (p, q) with an added check for
		 reaching the end of a function (in case Q precedes P).  */
	      for (tmp = NEXT_INSN (p); tmp && tmp != q; tmp = NEXT_INSN (tmp))
		if (LABEL_P (tmp))
		  break;

	      if (tmp == q)
		{
		  data->path[path_entry].branch = p;
		  data->path[path_entry++].status = PATH_AROUND;

		  path_size = path_entry;

		  p = JUMP_LABEL (p);
		  /* Mark block so we won't scan it again later.  */
		  PUT_MODE (NEXT_INSN (p), QImode);
		}
	    }
	}
      p = NEXT_INSN (p);
    }

  data->low_cuid = low_cuid;
  data->high_cuid = high_cuid;
  data->nsets = nsets;
  data->last = p;

  /* If all jumps in the path are not taken, set our path length to zero
     so a rescan won't be done.  */
  for (i = path_size - 1; i >= 0; i--)
    if (data->path[i].status != PATH_NOT_TAKEN)
      break;

  if (i == -1)
    data->path_size = 0;
  else
    data->path_size = path_size;

  /* End the current branch path.  */
  data->path[path_size].branch = 0;
}

/* Perform cse on the instructions of a function.
   F is the first instruction.
   NREGS is one plus the highest pseudo-reg number used in the instruction.

   Returns 1 if jump_optimize should be redone due to simplifications
   in conditional jump instructions.  */

int
cse_main (rtx f, int nregs)
{
  struct cse_basic_block_data val;
  rtx insn = f;
  int i;

  init_cse_reg_info (nregs);

  val.path = XNEWVEC (struct branch_path, PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH));

  cse_jumps_altered = 0;
  recorded_label_ref = 0;
  constant_pool_entries_cost = 0;
  constant_pool_entries_regcost = 0;
  val.path_size = 0;
  rtl_hooks = cse_rtl_hooks;

  init_recog ();
  init_alias_analysis ();

  reg_eqv_table = XNEWVEC (struct reg_eqv_elem, nregs);

  /* Find the largest uid.  */

  max_uid = get_max_uid ();
  uid_cuid = XCNEWVEC (int, max_uid + 1);

  /* Compute the mapping from uids to cuids.
     CUIDs are numbers assigned to insns, like uids,
     except that cuids increase monotonically through the code.
     Don't assign cuids to line-number NOTEs, so that the distance in cuids
     between two insns is not affected by -g.  */

  for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
    {
      if (!NOTE_P (insn)
	  || NOTE_LINE_NUMBER (insn) < 0)
	INSN_CUID (insn) = ++i;
      else
	/* Give a line number note the same cuid as preceding insn.  */
	INSN_CUID (insn) = i;
    }

  /* Loop over basic blocks.
     Compute the maximum number of qty's needed for each basic block
     (which is 2 for each SET).  */
  insn = f;
  while (insn)
    {
      cse_altered = 0;
      cse_end_of_basic_block (insn, &val, flag_cse_follow_jumps,
			      flag_cse_skip_blocks);

      /* If this basic block was already processed or has no sets, skip it.  */
      if (val.nsets == 0 || GET_MODE (insn) == QImode)
	{
	  PUT_MODE (insn, VOIDmode);
	  insn = (val.last ? NEXT_INSN (val.last) : 0);
	  val.path_size = 0;
	  continue;
	}

      cse_basic_block_start = val.low_cuid;
      cse_basic_block_end = val.high_cuid;
      max_qty = val.nsets * 2;

      if (dump_file)
	fprintf (dump_file, ";; Processing block from %d to %d, %d sets.\n",
		 INSN_UID (insn), val.last ? INSN_UID (val.last) : 0,
		 val.nsets);

      /* Make MAX_QTY bigger to give us room to optimize
	 past the end of this basic block, if that should prove useful.  */
      if (max_qty < 500)
	max_qty = 500;

      /* If this basic block is being extended by following certain jumps,
         (see `cse_end_of_basic_block'), we reprocess the code from the start.
         Otherwise, we start after this basic block.  */
      if (val.path_size > 0)
	cse_basic_block (insn, val.last, val.path);
      else
	{
	  int old_cse_jumps_altered = cse_jumps_altered;
	  rtx temp;

	  /* When cse changes a conditional jump to an unconditional
	     jump, we want to reprocess the block, since it will give
	     us a new branch path to investigate.  */
	  cse_jumps_altered = 0;
	  temp = cse_basic_block (insn, val.last, val.path);
	  if (cse_jumps_altered == 0
	      || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0))
	    insn = temp;

	  cse_jumps_altered |= old_cse_jumps_altered;
	}

      if (cse_altered)
	ggc_collect ();

#ifdef USE_C_ALLOCA
      alloca (0);
#endif
    }

  /* Clean up.  */
  end_alias_analysis ();
  free (uid_cuid);
  free (reg_eqv_table);
  free (val.path);
  rtl_hooks = general_rtl_hooks;

  return cse_jumps_altered || recorded_label_ref;
}

/* Process a single basic block.  FROM and TO and the limits of the basic
   block.  NEXT_BRANCH points to the branch path when following jumps or
   a null path when not following jumps.  */

static rtx
cse_basic_block (rtx from, rtx to, struct branch_path *next_branch)
{
  rtx insn;
  int to_usage = 0;
  rtx libcall_insn = NULL_RTX;
  int num_insns = 0;
  int no_conflict = 0;

  /* Allocate the space needed by qty_table.  */
  qty_table = XNEWVEC (struct qty_table_elem, max_qty);

  new_basic_block ();

  /* TO might be a label.  If so, protect it from being deleted.  */
  if (to != 0 && LABEL_P (to))
    ++LABEL_NUSES (to);

  for (insn = from; insn != to; insn = NEXT_INSN (insn))
    {
      enum rtx_code code = GET_CODE (insn);

      /* If we have processed 1,000 insns, flush the hash table to
	 avoid extreme quadratic behavior.  We must not include NOTEs
	 in the count since there may be more of them when generating
	 debugging information.  If we clear the table at different
	 times, code generated with -g -O might be different than code
	 generated with -O but not -g.

	 ??? This is a real kludge and needs to be done some other way.
	 Perhaps for 2.9.  */
      if (code != NOTE && num_insns++ > PARAM_VALUE (PARAM_MAX_CSE_INSNS))
	{
	  flush_hash_table ();
	  num_insns = 0;
	}

      /* See if this is a branch that is part of the path.  If so, and it is
	 to be taken, do so.  */
      if (next_branch->branch == insn)
	{
	  enum taken status = next_branch++->status;
	  if (status != PATH_NOT_TAKEN)
	    {
	      if (status == PATH_TAKEN)
		record_jump_equiv (insn, 1);
	      else
		invalidate_skipped_block (NEXT_INSN (insn));

	      /* Set the last insn as the jump insn; it doesn't affect cc0.
		 Then follow this branch.  */
#ifdef HAVE_cc0
	      prev_insn_cc0 = 0;
	      prev_insn = insn;
#endif
	      insn = JUMP_LABEL (insn);
	      continue;
	    }
	}

      if (GET_MODE (insn) == QImode)
	PUT_MODE (insn, VOIDmode);

      if (GET_RTX_CLASS (code) == RTX_INSN)
	{
	  rtx p;

	  /* Process notes first so we have all notes in canonical forms when
	     looking for duplicate operations.  */

	  if (REG_NOTES (insn))
	    REG_NOTES (insn) = cse_process_notes (REG_NOTES (insn), NULL_RTX);

	  /* Track when we are inside in LIBCALL block.  Inside such a block,
	     we do not want to record destinations.  The last insn of a
	     LIBCALL block is not considered to be part of the block, since
	     its destination is the result of the block and hence should be
	     recorded.  */

	  if (REG_NOTES (insn) != 0)
	    {
	      if ((p = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
		libcall_insn = XEXP (p, 0);
	      else if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
		{
		  /* Keep libcall_insn for the last SET insn of a no-conflict
		     block to prevent changing the destination.  */
		  if (! no_conflict)
		    libcall_insn = 0;
		  else
		    no_conflict = -1;
		}
	      else if (find_reg_note (insn, REG_NO_CONFLICT, NULL_RTX))
		no_conflict = 1;
	    }

	  cse_insn (insn, libcall_insn);

	  if (no_conflict == -1)
	    {
	      libcall_insn = 0;
	      no_conflict = 0;
	    }
	    
	  /* If we haven't already found an insn where we added a LABEL_REF,
	     check this one.  */
	  if (NONJUMP_INSN_P (insn) && ! recorded_label_ref
	      && for_each_rtx (&PATTERN (insn), check_for_label_ref,
			       (void *) insn))
	    recorded_label_ref = 1;
	}

      /* If INSN is now an unconditional jump, skip to the end of our
	 basic block by pretending that we just did the last insn in the
	 basic block.  If we are jumping to the end of our block, show
	 that we can have one usage of TO.  */

      if (any_uncondjump_p (insn))
	{
	  if (to == 0)
	    {
	      free (qty_table);
	      return 0;
	    }

	  if (JUMP_LABEL (insn) == to)
	    to_usage = 1;

	  /* Maybe TO was deleted because the jump is unconditional.
	     If so, there is nothing left in this basic block.  */
	  /* ??? Perhaps it would be smarter to set TO
	     to whatever follows this insn,
	     and pretend the basic block had always ended here.  */
	  if (INSN_DELETED_P (to))
	    break;

	  insn = PREV_INSN (to);
	}

      /* See if it is ok to keep on going past the label
	 which used to end our basic block.  Remember that we incremented
	 the count of that label, so we decrement it here.  If we made
	 a jump unconditional, TO_USAGE will be one; in that case, we don't
	 want to count the use in that jump.  */

      if (to != 0 && NEXT_INSN (insn) == to
	  && LABEL_P (to) && --LABEL_NUSES (to) == to_usage)
	{
	  struct cse_basic_block_data val;
	  rtx prev;

	  insn = NEXT_INSN (to);

	  /* If TO was the last insn in the function, we are done.  */
	  if (insn == 0)
	    {
	      free (qty_table);
	      return 0;
	    }

	  /* If TO was preceded by a BARRIER we are done with this block
	     because it has no continuation.  */
	  prev = prev_nonnote_insn (to);
	  if (prev && BARRIER_P (prev))
	    {
	      free (qty_table);
	      return insn;
	    }

	  /* Find the end of the following block.  Note that we won't be
	     following branches in this case.  */
	  to_usage = 0;
	  val.path_size = 0;
	  val.path = XNEWVEC (struct branch_path, PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH));
	  cse_end_of_basic_block (insn, &val, 0, 0);
	  free (val.path);

	  /* If the tables we allocated have enough space left
	     to handle all the SETs in the next basic block,
	     continue through it.  Otherwise, return,
	     and that block will be scanned individually.  */
	  if (val.nsets * 2 + next_qty > max_qty)
	    break;

	  cse_basic_block_start = val.low_cuid;
	  cse_basic_block_end = val.high_cuid;
	  to = val.last;

	  /* Prevent TO from being deleted if it is a label.  */
	  if (to != 0 && LABEL_P (to))
	    ++LABEL_NUSES (to);

	  /* Back up so we process the first insn in the extension.  */
	  insn = PREV_INSN (insn);
	}
    }

  gcc_assert (next_qty <= max_qty);

  free (qty_table);

  return to ? NEXT_INSN (to) : 0;
}

/* Called via for_each_rtx to see if an insn is using a LABEL_REF for which
   there isn't a REG_LABEL note.  Return one if so.  DATA is the insn.  */

static int
check_for_label_ref (rtx *rtl, void *data)
{
  rtx insn = (rtx) data;

  /* If this insn uses a LABEL_REF and there isn't a REG_LABEL note for it,
     we must rerun jump since it needs to place the note.  If this is a
     LABEL_REF for a CODE_LABEL that isn't in the insn chain, don't do this
     since no REG_LABEL will be added.  */
  return (GET_CODE (*rtl) == LABEL_REF
	  && ! LABEL_REF_NONLOCAL_P (*rtl)
	  && LABEL_P (XEXP (*rtl, 0))
	  && INSN_UID (XEXP (*rtl, 0)) != 0
	  && ! find_reg_note (insn, REG_LABEL, XEXP (*rtl, 0)));
}

/* Count the number of times registers are used (not set) in X.
   COUNTS is an array in which we accumulate the count, INCR is how much
   we count each register usage.

   Don't count a usage of DEST, which is the SET_DEST of a SET which
   contains X in its SET_SRC.  This is because such a SET does not
   modify the liveness of DEST.
   DEST is set to pc_rtx for a trapping insn, which means that we must count
   uses of a SET_DEST regardless because the insn can't be deleted here.  */

static void
count_reg_usage (rtx x, int *counts, rtx dest, int incr)
{
  enum rtx_code code;
  rtx note;
  const char *fmt;
  int i, j;

  if (x == 0)
    return;

  switch (code = GET_CODE (x))
    {
    case REG:
      if (x != dest)
	counts[REGNO (x)] += incr;
      return;

    case PC:
    case CC0:
    case CONST:
    case CONST_INT:
    case CONST_DOUBLE:
    case CONST_VECTOR:
    case SYMBOL_REF:
    case LABEL_REF:
      return;

    case CLOBBER:
      /* If we are clobbering a MEM, mark any registers inside the address
         as being used.  */
      if (MEM_P (XEXP (x, 0)))
	count_reg_usage (XEXP (XEXP (x, 0), 0), counts, NULL_RTX, incr);
      return;

    case SET:
      /* Unless we are setting a REG, count everything in SET_DEST.  */
      if (!REG_P (SET_DEST (x)))
	count_reg_usage (SET_DEST (x), counts, NULL_RTX, incr);
      count_reg_usage (SET_SRC (x), counts,
		       dest ? dest : SET_DEST (x),
		       incr);
      return;

    case CALL_INSN:
    case INSN:
    case JUMP_INSN:
    /* We expect dest to be NULL_RTX here.  If the insn may trap, mark
       this fact by setting DEST to pc_rtx.  */
      if (flag_non_call_exceptions && may_trap_p (PATTERN (x)))
	dest = pc_rtx;
      if (code == CALL_INSN)
	count_reg_usage (CALL_INSN_FUNCTION_USAGE (x), counts, dest, incr);
      count_reg_usage (PATTERN (x), counts, dest, incr);

      /* Things used in a REG_EQUAL note aren't dead since loop may try to
	 use them.  */

      note = find_reg_equal_equiv_note (x);
      if (note)
	{
	  rtx eqv = XEXP (note, 0);

	  if (GET_CODE (eqv) == EXPR_LIST)
	  /* This REG_EQUAL note describes the result of a function call.
	     Process all the arguments.  */
	    do
	      {
		count_reg_usage (XEXP (eqv, 0), counts, dest, incr);
		eqv = XEXP (eqv, 1);
	      }
	    while (eqv && GET_CODE (eqv) == EXPR_LIST);
	  else
	    count_reg_usage (eqv, counts, dest, incr);
	}
      return;

    case EXPR_LIST:
      if (REG_NOTE_KIND (x) == REG_EQUAL
	  || (REG_NOTE_KIND (x) != REG_NONNEG && GET_CODE (XEXP (x,0)) == USE)
	  /* FUNCTION_USAGE expression lists may include (CLOBBER (mem /u)),
	     involving registers in the address.  */
	  || GET_CODE (XEXP (x, 0)) == CLOBBER)
	count_reg_usage (XEXP (x, 0), counts, NULL_RTX, incr);

      count_reg_usage (XEXP (x, 1), counts, NULL_RTX, incr);
      return;

    case ASM_OPERANDS:
      /* If the asm is volatile, then this insn cannot be deleted,
	 and so the inputs *must* be live.  */
      if (MEM_VOLATILE_P (x))
	dest = NULL_RTX;
      /* Iterate over just the inputs, not the constraints as well.  */
      for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
	count_reg_usage (ASM_OPERANDS_INPUT (x, i), counts, dest, incr);
      return;

    case INSN_LIST:
      gcc_unreachable ();

    default:
      break;
    }

  fmt = GET_RTX_FORMAT (code);
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
    {
      if (fmt[i] == 'e')
	count_reg_usage (XEXP (x, i), counts, dest, incr);
      else if (fmt[i] == 'E')
	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
	  count_reg_usage (XVECEXP (x, i, j), counts, dest, incr);
    }
}

/* Return true if set is live.  */
static bool
set_live_p (rtx set, rtx insn ATTRIBUTE_UNUSED, /* Only used with HAVE_cc0.  */
	    int *counts)
{
#ifdef HAVE_cc0
  rtx tem;
#endif

  if (set_noop_p (set))
    ;

#ifdef HAVE_cc0
  else if (GET_CODE (SET_DEST (set)) == CC0
	   && !side_effects_p (SET_SRC (set))
	   && ((tem = next_nonnote_insn (insn)) == 0
	       || !INSN_P (tem)
	       || !reg_referenced_p (cc0_rtx, PATTERN (tem))))
    return false;
#endif
  else if (!REG_P (SET_DEST (set))
	   || REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
	   || counts[REGNO (SET_DEST (set))] != 0
	   || side_effects_p (SET_SRC (set)))
    return true;
  return false;
}

/* Return true if insn is live.  */

static bool
insn_live_p (rtx insn, int *counts)
{
  int i;
  if (flag_non_call_exceptions && may_trap_p (PATTERN (insn)))
    return true;
  else if (GET_CODE (PATTERN (insn)) == SET)
    return set_live_p (PATTERN (insn), insn, counts);
  else if (GET_CODE (PATTERN (insn)) == PARALLEL)
    {
      for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
	{
	  rtx elt = XVECEXP (PATTERN (insn), 0, i);

	  if (GET_CODE (elt) == SET)
	    {
	      if (set_live_p (elt, insn, counts))
		return true;
	    }
	  else if (GET_CODE (elt) != CLOBBER && GET_CODE (elt) != USE)
	    return true;
	}
      return false;
    }
  else
    return true;
}

/* Return true if libcall is dead as a whole.  */

static bool
dead_libcall_p (rtx insn, int *counts)
{
  rtx note, set, new;

  /* See if there's a REG_EQUAL note on this insn and try to
     replace the source with the REG_EQUAL expression.

     We assume that insns with REG_RETVALs can only be reg->reg
     copies at this point.  */
  note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
  if (!note)
    return false;

  set = single_set (insn);
  if (!set)
    return false;

  new = simplify_rtx (XEXP (note, 0));
  if (!new)
    new = XEXP (note, 0);

  /* While changing insn, we must update the counts accordingly.  */
  count_reg_usage (insn, counts, NULL_RTX, -1);

  if (validate_change (insn, &SET_SRC (set), new, 0))
    {
      count_reg_usage (insn, counts, NULL_RTX, 1);
      remove_note (insn, find_reg_note (insn, REG_RETVAL, NULL_RTX));
      remove_note (insn, note);
      return true;
    }

  if (CONSTANT_P (new))
    {
      new = force_const_mem (GET_MODE (SET_DEST (set)), new);
      if (new && validate_change (insn, &SET_SRC (set), new, 0))
	{
	  count_reg_usage (insn, counts, NULL_RTX, 1);
	  remove_note (insn, find_reg_note (insn, REG_RETVAL, NULL_RTX));
	  remove_note (insn, note);
	  return true;
	}
    }

  count_reg_usage (insn, counts, NULL_RTX, 1);
  return false;
}

/* Scan all the insns and delete any that are dead; i.e., they store a register
   that is never used or they copy a register to itself.

   This is used to remove insns made obviously dead by cse, loop or other
   optimizations.  It improves the heuristics in loop since it won't try to
   move dead invariants out of loops or make givs for dead quantities.  The
   remaining passes of the compilation are also sped up.  */

int
delete_trivially_dead_insns (rtx insns, int nreg)
{
  int *counts;
  rtx insn, prev;
  int in_libcall = 0, dead_libcall = 0;
  int ndead = 0;

  timevar_push (TV_DELETE_TRIVIALLY_DEAD);
  /* First count the number of times each register is used.  */
  counts = XCNEWVEC (int, nreg);
  for (insn = insns; insn; insn = NEXT_INSN (insn))
    if (INSN_P (insn))
      count_reg_usage (insn, counts, NULL_RTX, 1);

  /* Go from the last insn to the first and delete insns that only set unused
     registers or copy a register to itself.  As we delete an insn, remove
     usage counts for registers it uses.

     The first jump optimization pass may leave a real insn as the last
     insn in the function.   We must not skip that insn or we may end
     up deleting code that is not really dead.  */
  for (insn = get_last_insn (); insn; insn = prev)
    {
      int live_insn = 0;

      prev = PREV_INSN (insn);
      if (!INSN_P (insn))
	continue;

      /* Don't delete any insns that are part of a libcall block unless
	 we can delete the whole libcall block.

	 Flow or loop might get confused if we did that.  Remember
	 that we are scanning backwards.  */
      if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
	{
	  in_libcall = 1;
	  live_insn = 1;
	  dead_libcall = dead_libcall_p (insn, counts);
	}
      else if (in_libcall)
	live_insn = ! dead_libcall;
      else
	live_insn = insn_live_p (insn, counts);

      /* If this is a dead insn, delete it and show registers in it aren't
	 being used.  */

      if (! live_insn)
	{
	  count_reg_usage (insn, counts, NULL_RTX, -1);
	  delete_insn_and_edges (insn);
	  ndead++;
	}

      if (in_libcall && find_reg_note (insn, REG_LIBCALL, NULL_RTX))
	{
	  in_libcall = 0;
	  dead_libcall = 0;
	}
    }

  if (dump_file && ndead)
    fprintf (dump_file, "Deleted %i trivially dead insns\n",
	     ndead);
  /* Clean up.  */
  free (counts);
  timevar_pop (TV_DELETE_TRIVIALLY_DEAD);
  return ndead;
}

/* This function is called via for_each_rtx.  The argument, NEWREG, is
   a condition code register with the desired mode.  If we are looking
   at the same register in a different mode, replace it with
   NEWREG.  */

static int
cse_change_cc_mode (rtx *loc, void *data)
{
  struct change_cc_mode_args* args = (struct change_cc_mode_args*)data;

  if (*loc
      && REG_P (*loc)
      && REGNO (*loc) == REGNO (args->newreg)
      && GET_MODE (*loc) != GET_MODE (args->newreg))
    {
      validate_change (args->insn, loc, args->newreg, 1);
      
      return -1;
    }
  return 0;
}

/* Change the mode of any reference to the register REGNO (NEWREG) to
   GET_MODE (NEWREG) in INSN.  */

static void
cse_change_cc_mode_insn (rtx insn, rtx newreg)
{
  struct change_cc_mode_args args;
  int success;

  if (!INSN_P (insn))
    return;

  args.insn = insn;
  args.newreg = newreg;
  
  for_each_rtx (&PATTERN (insn), cse_change_cc_mode, &args);
  for_each_rtx (&REG_NOTES (insn), cse_change_cc_mode, &args);
  
  /* If the following assertion was triggered, there is most probably
     something wrong with the cc_modes_compatible back end function.
     CC modes only can be considered compatible if the insn - with the mode
     replaced by any of the compatible modes - can still be recognized.  */
  success = apply_change_group ();
  gcc_assert (success);
}

/* Change the mode of any reference to the register REGNO (NEWREG) to
   GET_MODE (NEWREG), starting at START.  Stop before END.  Stop at
   any instruction which modifies NEWREG.  */

static void
cse_change_cc_mode_insns (rtx start, rtx end, rtx newreg)
{
  rtx insn;

  for (insn = start; insn != end; insn = NEXT_INSN (insn))
    {
      if (! INSN_P (insn))
	continue;

      if (reg_set_p (newreg, insn))
	return;

      cse_change_cc_mode_insn (insn, newreg);
    }
}

/* BB is a basic block which finishes with CC_REG as a condition code
   register which is set to CC_SRC.  Look through the successors of BB
   to find blocks which have a single predecessor (i.e., this one),
   and look through those blocks for an assignment to CC_REG which is
   equivalent to CC_SRC.  CAN_CHANGE_MODE indicates whether we are
   permitted to change the mode of CC_SRC to a compatible mode.  This
   returns VOIDmode if no equivalent assignments were found.
   Otherwise it returns the mode which CC_SRC should wind up with.

   The main complexity in this function is handling the mode issues.
   We may have more than one duplicate which we can eliminate, and we
   try to find a mode which will work for multiple duplicates.  */

static enum machine_mode
cse_cc_succs (basic_block bb, rtx cc_reg, rtx cc_src, bool can_change_mode)
{
  bool found_equiv;
  enum machine_mode mode;
  unsigned int insn_count;
  edge e;
  rtx insns[2];
  enum machine_mode modes[2];
  rtx last_insns[2];
  unsigned int i;
  rtx newreg;
  edge_iterator ei;

  /* We expect to have two successors.  Look at both before picking
     the final mode for the comparison.  If we have more successors
     (i.e., some sort of table jump, although that seems unlikely),
     then we require all beyond the first two to use the same
     mode.  */

  found_equiv = false;
  mode = GET_MODE (cc_src);
  insn_count = 0;
  FOR_EACH_EDGE (e, ei, bb->succs)
    {
      rtx insn;
      rtx end;

      if (e->flags & EDGE_COMPLEX)
	continue;

      if (EDGE_COUNT (e->dest->preds) != 1
	  || e->dest == EXIT_BLOCK_PTR)
	continue;

      end = NEXT_INSN (BB_END (e->dest));
      for (insn = BB_HEAD (e->dest); insn != end; insn = NEXT_INSN (insn))
	{
	  rtx set;

	  if (! INSN_P (insn))
	    continue;

	  /* If CC_SRC is modified, we have to stop looking for
	     something which uses it.  */
	  if (modified_in_p (cc_src, insn))
	    break;

	  /* Check whether INSN sets CC_REG to CC_SRC.  */
	  set = single_set (insn);
	  if (set
	      && REG_P (SET_DEST (set))
	      && REGNO (SET_DEST (set)) == REGNO (cc_reg))
	    {
	      bool found;
	      enum machine_mode set_mode;
	      enum machine_mode comp_mode;

	      found = false;
	      set_mode = GET_MODE (SET_SRC (set));
	      comp_mode = set_mode;
	      if (rtx_equal_p (cc_src, SET_SRC (set)))
		found = true;
	      else if (GET_CODE (cc_src) == COMPARE
		       && GET_CODE (SET_SRC (set)) == COMPARE
		       && mode != set_mode
		       && rtx_equal_p (XEXP (cc_src, 0),
				       XEXP (SET_SRC (set), 0))
		       && rtx_equal_p (XEXP (cc_src, 1),
				       XEXP (SET_SRC (set), 1)))
			   
		{
		  comp_mode = targetm.cc_modes_compatible (mode, set_mode);
		  if (comp_mode != VOIDmode
		      && (can_change_mode || comp_mode == mode))
		    found = true;
		}

	      if (found)
		{
		  found_equiv = true;
		  if (insn_count < ARRAY_SIZE (insns))
		    {
		      insns[insn_count] = insn;
		      modes[insn_count] = set_mode;
		      last_insns[insn_count] = end;
		      ++insn_count;

		      if (mode != comp_mode)
			{
			  gcc_assert (can_change_mode);
			  mode = comp_mode;

			  /* The modified insn will be re-recognized later.  */
			  PUT_MODE (cc_src, mode);
			}
		    }
		  else
		    {
		      if (set_mode != mode)
			{
			  /* We found a matching expression in the
			     wrong mode, but we don't have room to
			     store it in the array.  Punt.  This case
			     should be rare.  */
			  break;
			}
		      /* INSN sets CC_REG to a value equal to CC_SRC
			 with the right mode.  We can simply delete
			 it.  */
		      delete_insn (insn);
		    }

		  /* We found an instruction to delete.  Keep looking,
		     in the hopes of finding a three-way jump.  */
		  continue;
		}

	      /* We found an instruction which sets the condition
		 code, so don't look any farther.  */
	      break;
	    }

	  /* If INSN sets CC_REG in some other way, don't look any
	     farther.  */
	  if (reg_set_p (cc_reg, insn))
	    break;
	}

      /* If we fell off the bottom of the block, we can keep looking
	 through successors.  We pass CAN_CHANGE_MODE as false because
	 we aren't prepared to handle compatibility between the
	 further blocks and this block.  */
      if (insn == end)
	{
	  enum machine_mode submode;

	  submode = cse_cc_succs (e->dest, cc_reg, cc_src, false);
	  if (submode != VOIDmode)
	    {
	      gcc_assert (submode == mode);
	      found_equiv = true;
	      can_change_mode = false;
	    }
	}
    }

  if (! found_equiv)
    return VOIDmode;

  /* Now INSN_COUNT is the number of instructions we found which set
     CC_REG to a value equivalent to CC_SRC.  The instructions are in
     INSNS.  The modes used by those instructions are in MODES.  */

  newreg = NULL_RTX;
  for (i = 0; i < insn_count; ++i)
    {
      if (modes[i] != mode)
	{
	  /* We need to change the mode of CC_REG in INSNS[i] and
	     subsequent instructions.  */
	  if (! newreg)
	    {
	      if (GET_MODE (cc_reg) == mode)
		newreg = cc_reg;
	      else
		newreg = gen_rtx_REG (mode, REGNO (cc_reg));
	    }
	  cse_change_cc_mode_insns (NEXT_INSN (insns[i]), last_insns[i],
				    newreg);
	}

      delete_insn (insns[i]);
    }

  return mode;
}

/* If we have a fixed condition code register (or two), walk through
   the instructions and try to eliminate duplicate assignments.  */

static void
cse_condition_code_reg (void)
{
  unsigned int cc_regno_1;
  unsigned int cc_regno_2;
  rtx cc_reg_1;
  rtx cc_reg_2;
  basic_block bb;

  if (! targetm.fixed_condition_code_regs (&cc_regno_1, &cc_regno_2))
    return;

  cc_reg_1 = gen_rtx_REG (CCmode, cc_regno_1);
  if (cc_regno_2 != INVALID_REGNUM)
    cc_reg_2 = gen_rtx_REG (CCmode, cc_regno_2);
  else
    cc_reg_2 = NULL_RTX;

  FOR_EACH_BB (bb)
    {
      rtx last_insn;
      rtx cc_reg;
      rtx insn;
      rtx cc_src_insn;
      rtx cc_src;
      enum machine_mode mode;
      enum machine_mode orig_mode;

      /* Look for blocks which end with a conditional jump based on a
	 condition code register.  Then look for the instruction which
	 sets the condition code register.  Then look through the
	 successor blocks for instructions which set the condition
	 code register to the same value.  There are other possible
	 uses of the condition code register, but these are by far the
	 most common and the ones which we are most likely to be able
	 to optimize.  */

      last_insn = BB_END (bb);
      if (!JUMP_P (last_insn))
	continue;

      if (reg_referenced_p (cc_reg_1, PATTERN (last_insn)))
	cc_reg = cc_reg_1;
      else if (cc_reg_2 && reg_referenced_p (cc_reg_2, PATTERN (last_insn)))
	cc_reg = cc_reg_2;
      else
	continue;

      cc_src_insn = NULL_RTX;
      cc_src = NULL_RTX;
      for (insn = PREV_INSN (last_insn);
	   insn && insn != PREV_INSN (BB_HEAD (bb));
	   insn = PREV_INSN (insn))
	{
	  rtx set;

	  if (! INSN_P (insn))
	    continue;
	  set = single_set (insn);
	  if (set
	      && REG_P (SET_DEST (set))
	      && REGNO (SET_DEST (set)) == REGNO (cc_reg))
	    {
	      cc_src_insn = insn;
	      cc_src = SET_SRC (set);
	      break;
	    }
	  else if (reg_set_p (cc_reg, insn))
	    break;
	}

      if (! cc_src_insn)
	continue;

      if (modified_between_p (cc_src, cc_src_insn, NEXT_INSN (last_insn)))
	continue;

      /* Now CC_REG is a condition code register used for a
	 conditional jump at the end of the block, and CC_SRC, in
	 CC_SRC_INSN, is the value to which that condition code
	 register is set, and CC_SRC is still meaningful at the end of
	 the basic block.  */

      orig_mode = GET_MODE (cc_src);
      mode = cse_cc_succs (bb, cc_reg, cc_src, true);
      if (mode != VOIDmode)
	{
	  gcc_assert (mode == GET_MODE (cc_src));
	  if (mode != orig_mode)
	    {
	      rtx newreg = gen_rtx_REG (mode, REGNO (cc_reg));

	      cse_change_cc_mode_insn (cc_src_insn, newreg);

	      /* Do the same in the following insns that use the
		 current value of CC_REG within BB.  */
	      cse_change_cc_mode_insns (NEXT_INSN (cc_src_insn),
					NEXT_INSN (last_insn),
					newreg);
	    }
	}
    }
}


/* Perform common subexpression elimination.  Nonzero value from
   `cse_main' means that jumps were simplified and some code may now
   be unreachable, so do jump optimization again.  */
static bool
gate_handle_cse (void)
{
  return optimize > 0;
}

static unsigned int
rest_of_handle_cse (void)
{
  int tem;

  if (dump_file)
    dump_flow_info (dump_file, dump_flags);

  reg_scan (get_insns (), max_reg_num ());

  tem = cse_main (get_insns (), max_reg_num ());
  if (tem)
    rebuild_jump_labels (get_insns ());
  if (purge_all_dead_edges ())
    delete_unreachable_blocks ();

  delete_trivially_dead_insns (get_insns (), max_reg_num ());

  /* If we are not running more CSE passes, then we are no longer
     expecting CSE to be run.  But always rerun it in a cheap mode.  */
  cse_not_expected = !flag_rerun_cse_after_loop && !flag_gcse;

  if (tem)
    delete_dead_jumptables ();

  if (tem || optimize > 1)
    cleanup_cfg (CLEANUP_EXPENSIVE);
  return 0;
}

struct tree_opt_pass pass_cse =
{
  "cse1",                               /* name */
  gate_handle_cse,                      /* gate */   
  rest_of_handle_cse,			/* execute */       
  NULL,                                 /* sub */
  NULL,                                 /* next */
  0,                                    /* static_pass_number */
  TV_CSE,                               /* tv_id */
  0,                                    /* properties_required */
  0,                                    /* properties_provided */
  0,                                    /* properties_destroyed */
  0,                                    /* todo_flags_start */
  TODO_dump_func |
  TODO_ggc_collect,                     /* todo_flags_finish */
  's'                                   /* letter */
};


static bool
gate_handle_cse2 (void)
{
  return optimize > 0 && flag_rerun_cse_after_loop;
}

/* Run second CSE pass after loop optimizations.  */
static unsigned int
rest_of_handle_cse2 (void)
{
  int tem;

  if (dump_file)
    dump_flow_info (dump_file, dump_flags);

  tem = cse_main (get_insns (), max_reg_num ());

  /* Run a pass to eliminate duplicated assignments to condition code
     registers.  We have to run this after bypass_jumps, because it
     makes it harder for that pass to determine whether a jump can be
     bypassed safely.  */
  cse_condition_code_reg ();

  purge_all_dead_edges ();
  delete_trivially_dead_insns (get_insns (), max_reg_num ());

  if (tem)
    {
      timevar_push (TV_JUMP);
      rebuild_jump_labels (get_insns ());
      delete_dead_jumptables ();
      cleanup_cfg (CLEANUP_EXPENSIVE);
      timevar_pop (TV_JUMP);
    }
  reg_scan (get_insns (), max_reg_num ());
  cse_not_expected = 1;
  return 0;
}


struct tree_opt_pass pass_cse2 =
{
  "cse2",                               /* name */
  gate_handle_cse2,                     /* gate */   
  rest_of_handle_cse2,			/* execute */       
  NULL,                                 /* sub */
  NULL,                                 /* next */
  0,                                    /* static_pass_number */
  TV_CSE2,                              /* tv_id */
  0,                                    /* properties_required */
  0,                                    /* properties_provided */
  0,                                    /* properties_destroyed */
  0,                                    /* todo_flags_start */
  TODO_dump_func |
  TODO_ggc_collect,                     /* todo_flags_finish */
  't'                                   /* letter */
};


Man Man