diff options
Diffstat (limited to 'contrib/gcc/expr.c')
-rw-r--r-- | contrib/gcc/expr.c | 3648 |
1 files changed, 1148 insertions, 2500 deletions
diff --git a/contrib/gcc/expr.c b/contrib/gcc/expr.c index 9f7c284..aa80ae6 100644 --- a/contrib/gcc/expr.c +++ b/contrib/gcc/expr.c @@ -1,6 +1,6 @@ /* Convert tree expression to rtl instructions, for GNU compiler. Copyright (C) 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, - 2000, 2001, 2002, 2003 Free Software Foundation, Inc. + 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. @@ -21,6 +21,8 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #include "config.h" #include "system.h" +#include "coretypes.h" +#include "tm.h" #include "machmode.h" #include "real.h" #include "rtl.h" @@ -45,6 +47,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #include "langhooks.h" #include "intl.h" #include "tm_p.h" +#include "target.h" /* Decide whether a function's arguments should be processed from first to last or from last to first. @@ -93,7 +96,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA int cse_not_expected; /* Chain of pending expressions for PLACEHOLDER_EXPR to replace. */ -static tree placeholder_list = 0; +tree placeholder_list = 0; /* This structure is used by move_by_pieces to describe the move to be performed. */ @@ -123,61 +126,53 @@ struct store_by_pieces int explicit_inc_to; unsigned HOST_WIDE_INT len; HOST_WIDE_INT offset; - rtx (*constfun) PARAMS ((PTR, HOST_WIDE_INT, enum machine_mode)); - PTR constfundata; + rtx (*constfun) (void *, HOST_WIDE_INT, enum machine_mode); + void *constfundata; int reverse; }; -static rtx enqueue_insn PARAMS ((rtx, rtx)); -static unsigned HOST_WIDE_INT move_by_pieces_ninsns - PARAMS ((unsigned HOST_WIDE_INT, - unsigned int)); -static void move_by_pieces_1 PARAMS ((rtx (*) (rtx, ...), enum machine_mode, - struct move_by_pieces *)); -static bool block_move_libcall_safe_for_call_parm PARAMS ((void)); -static bool emit_block_move_via_movstr PARAMS ((rtx, rtx, rtx, unsigned)); -static rtx emit_block_move_via_libcall PARAMS ((rtx, rtx, rtx)); -static tree emit_block_move_libcall_fn PARAMS ((int)); -static void emit_block_move_via_loop PARAMS ((rtx, rtx, rtx, unsigned)); -static rtx clear_by_pieces_1 PARAMS ((PTR, HOST_WIDE_INT, - enum machine_mode)); -static void clear_by_pieces PARAMS ((rtx, unsigned HOST_WIDE_INT, - unsigned int)); -static void store_by_pieces_1 PARAMS ((struct store_by_pieces *, - unsigned int)); -static void store_by_pieces_2 PARAMS ((rtx (*) (rtx, ...), - enum machine_mode, - struct store_by_pieces *)); -static bool clear_storage_via_clrstr PARAMS ((rtx, rtx, unsigned)); -static rtx clear_storage_via_libcall PARAMS ((rtx, rtx)); -static tree clear_storage_libcall_fn PARAMS ((int)); -static rtx compress_float_constant PARAMS ((rtx, rtx)); -static rtx get_subtarget PARAMS ((rtx)); -static int is_zeros_p PARAMS ((tree)); -static int mostly_zeros_p PARAMS ((tree)); -static void store_constructor_field PARAMS ((rtx, unsigned HOST_WIDE_INT, - HOST_WIDE_INT, enum machine_mode, - tree, tree, int, int)); -static void store_constructor PARAMS ((tree, rtx, int, HOST_WIDE_INT)); -static rtx store_field PARAMS ((rtx, HOST_WIDE_INT, - HOST_WIDE_INT, enum machine_mode, - tree, enum machine_mode, int, tree, - int)); -static rtx var_rtx PARAMS ((tree)); -static HOST_WIDE_INT highest_pow2_factor PARAMS ((tree)); -static HOST_WIDE_INT highest_pow2_factor_for_type PARAMS ((tree, tree)); -static int is_aligning_offset PARAMS ((tree, tree)); -static rtx expand_increment PARAMS ((tree, int, int)); -static void do_jump_by_parts_greater PARAMS ((tree, int, rtx, rtx)); -static void do_jump_by_parts_equality PARAMS ((tree, rtx, rtx)); -static void do_compare_and_jump PARAMS ((tree, enum rtx_code, enum rtx_code, - rtx, rtx)); -static rtx do_store_flag PARAMS ((tree, rtx, enum machine_mode, int)); +static rtx enqueue_insn (rtx, rtx); +static unsigned HOST_WIDE_INT move_by_pieces_ninsns (unsigned HOST_WIDE_INT, + unsigned int); +static void move_by_pieces_1 (rtx (*) (rtx, ...), enum machine_mode, + struct move_by_pieces *); +static bool block_move_libcall_safe_for_call_parm (void); +static bool emit_block_move_via_movstr (rtx, rtx, rtx, unsigned); +static rtx emit_block_move_via_libcall (rtx, rtx, rtx); +static tree emit_block_move_libcall_fn (int); +static void emit_block_move_via_loop (rtx, rtx, rtx, unsigned); +static rtx clear_by_pieces_1 (void *, HOST_WIDE_INT, enum machine_mode); +static void clear_by_pieces (rtx, unsigned HOST_WIDE_INT, unsigned int); +static void store_by_pieces_1 (struct store_by_pieces *, unsigned int); +static void store_by_pieces_2 (rtx (*) (rtx, ...), enum machine_mode, + struct store_by_pieces *); +static bool clear_storage_via_clrstr (rtx, rtx, unsigned); +static rtx clear_storage_via_libcall (rtx, rtx); +static tree clear_storage_libcall_fn (int); +static rtx compress_float_constant (rtx, rtx); +static rtx get_subtarget (rtx); +static int is_zeros_p (tree); +static void store_constructor_field (rtx, unsigned HOST_WIDE_INT, + HOST_WIDE_INT, enum machine_mode, + tree, tree, int, int); +static void store_constructor (tree, rtx, int, HOST_WIDE_INT); +static rtx store_field (rtx, HOST_WIDE_INT, HOST_WIDE_INT, enum machine_mode, + tree, enum machine_mode, int, tree, int); +static rtx var_rtx (tree); + +static unsigned HOST_WIDE_INT highest_pow2_factor (tree); +static unsigned HOST_WIDE_INT highest_pow2_factor_for_target (tree, tree); + +static int is_aligning_offset (tree, tree); +static rtx expand_increment (tree, int, int); +static void expand_operands (tree, tree, rtx, rtx*, rtx*, + enum expand_modifier); +static rtx do_store_flag (tree, rtx, enum machine_mode, int); #ifdef PUSH_ROUNDING -static void emit_single_push_insn PARAMS ((enum machine_mode, rtx, tree)); +static void emit_single_push_insn (enum machine_mode, rtx, tree); #endif -static void do_tablejump PARAMS ((rtx, enum machine_mode, rtx, rtx, rtx)); -static rtx const_vector_from_tree PARAMS ((tree)); +static void do_tablejump (rtx, enum machine_mode, rtx, rtx, rtx); +static rtx const_vector_from_tree (tree); /* Record for each mode whether we can move a register directly to or from an object of that mode in memory. If we can't, we won't try @@ -190,18 +185,6 @@ static char direct_store[NUM_MACHINE_MODES]; static bool float_extend_from_mem[NUM_MACHINE_MODES][NUM_MACHINE_MODES]; -/* If a memory-to-memory move would take MOVE_RATIO or more simple - move-instruction sequences, we will do a movstr or libcall instead. */ - -#ifndef MOVE_RATIO -#if defined (HAVE_movstrqi) || defined (HAVE_movstrhi) || defined (HAVE_movstrsi) || defined (HAVE_movstrdi) || defined (HAVE_movstrti) -#define MOVE_RATIO 2 -#else -/* If we are optimizing for space (-Os), cut down the default move ratio. */ -#define MOVE_RATIO (optimize_size ? 3 : 15) -#endif -#endif - /* This macro is used to determine whether move_by_pieces should be called to perform a structure copy. */ #ifndef MOVE_BY_PIECES_P @@ -209,18 +192,6 @@ static bool float_extend_from_mem[NUM_MACHINE_MODES][NUM_MACHINE_MODES]; (move_by_pieces_ninsns (SIZE, ALIGN) < (unsigned int) MOVE_RATIO) #endif -/* If a clear memory operation would take CLEAR_RATIO or more simple - move-instruction sequences, we will do a clrstr or libcall instead. */ - -#ifndef CLEAR_RATIO -#if defined (HAVE_clrstrqi) || defined (HAVE_clrstrhi) || defined (HAVE_clrstrsi) || defined (HAVE_clrstrdi) || defined (HAVE_clrstrti) -#define CLEAR_RATIO 2 -#else -/* If we are optimizing for space, cut down the default clear ratio. */ -#define CLEAR_RATIO (optimize_size ? 3 : 15) -#endif -#endif - /* This macro is used to determine whether clear_by_pieces should be called to clear storage. */ #ifndef CLEAR_BY_PIECES_P @@ -228,12 +199,27 @@ static bool float_extend_from_mem[NUM_MACHINE_MODES][NUM_MACHINE_MODES]; (move_by_pieces_ninsns (SIZE, ALIGN) < (unsigned int) CLEAR_RATIO) #endif +/* This macro is used to determine whether store_by_pieces should be + called to "memset" storage with byte values other than zero, or + to "memcpy" storage when the source is a constant string. */ +#ifndef STORE_BY_PIECES_P +#define STORE_BY_PIECES_P(SIZE, ALIGN) MOVE_BY_PIECES_P (SIZE, ALIGN) +#endif + /* This array records the insn_code of insns to perform block moves. */ enum insn_code movstr_optab[NUM_MACHINE_MODES]; /* This array records the insn_code of insns to perform block clears. */ enum insn_code clrstr_optab[NUM_MACHINE_MODES]; +/* These arrays record the insn_code of two different kinds of insns + to perform block compares. */ +enum insn_code cmpstr_optab[NUM_MACHINE_MODES]; +enum insn_code cmpmem_optab[NUM_MACHINE_MODES]; + +/* Stack of EXPR_WITH_FILE_LOCATION nested expressions. */ +struct file_stack *expr_wfl_stack; + /* SLOW_UNALIGNED_ACCESS is nonzero if unaligned accesses are very slow. */ #ifndef SLOW_UNALIGNED_ACCESS @@ -244,7 +230,7 @@ enum insn_code clrstr_optab[NUM_MACHINE_MODES]; directly in memory and to initialize the block move optab. */ void -init_expr_once () +init_expr_once (void) { rtx insn, pat; enum machine_mode mode; @@ -337,23 +323,15 @@ init_expr_once () /* This is run at the start of compiling a function. */ void -init_expr () +init_expr (void) { - cfun->expr = (struct expr_status *) ggc_alloc (sizeof (struct expr_status)); - - pending_chain = 0; - pending_stack_adjust = 0; - stack_pointer_delta = 0; - inhibit_defer_pop = 0; - saveregs_value = 0; - apply_args_value = 0; - forced_labels = 0; + cfun->expr = ggc_alloc_cleared (sizeof (struct expr_status)); } /* Small sanity check that the queue is empty at the end of a function. */ void -finish_expr_for_function () +finish_expr_for_function (void) { if (pending_chain) abort (); @@ -370,8 +348,7 @@ finish_expr_for_function () where you want to guarantee the pre-incrementation value of VAR. */ static rtx -enqueue_insn (var, body) - rtx var, body; +enqueue_insn (rtx var, rtx body) { pending_chain = gen_rtx_QUEUED (GET_MODE (var), var, NULL_RTX, NULL_RTX, body, pending_chain); @@ -394,9 +371,7 @@ enqueue_insn (var, body) If the queue is flushed in between, incorrect code will result. */ rtx -protect_from_queue (x, modify) - rtx x; - int modify; +protect_from_queue (rtx x, int modify) { RTX_CODE code = GET_CODE (x); @@ -480,8 +455,7 @@ protect_from_queue (x, modify) since memory addresses generally contain only those. */ int -queued_subexp_p (x) - rtx x; +queued_subexp_p (rtx x) { enum rtx_code code = GET_CODE (x); switch (code) @@ -500,13 +474,30 @@ queued_subexp_p (x) } } -/* Perform all the pending incrementations. */ +/* Retrieve a mark on the queue. */ + +static rtx +mark_queue (void) +{ + return pending_chain; +} -void -emit_queue () +/* Perform all the pending incrementations that have been enqueued + after MARK was retrieved. If MARK is null, perform all the + pending incrementations. */ + +static void +emit_insns_enqueued_after_mark (rtx mark) { rtx p; - while ((p = pending_chain)) + + /* The marked incrementation may have been emitted in the meantime + through a call to emit_queue. In this case, the mark is not valid + anymore so do nothing. */ + if (mark && ! QUEUED_BODY (mark)) + return; + + while ((p = pending_chain) != mark) { rtx body = QUEUED_BODY (p); @@ -533,9 +524,18 @@ emit_queue () break; } + QUEUED_BODY (p) = 0; pending_chain = QUEUED_NEXT (p); } } + +/* Perform all the pending incrementations. */ + +void +emit_queue (void) +{ + emit_insns_enqueued_after_mark (NULL_RTX); +} /* Copy data from FROM to TO, where the machine modes are not the same. Both modes may be integer, or both may be floating. @@ -543,9 +543,7 @@ emit_queue () This causes zero-extension instead of sign-extension. */ void -convert_move (to, from, unsignedp) - rtx to, from; - int unsignedp; +convert_move (rtx to, rtx from, int unsignedp) { enum machine_mode to_mode = GET_MODE (to); enum machine_mode from_mode = GET_MODE (from); @@ -598,248 +596,39 @@ convert_move (to, from, unsignedp) return; } - if (to_real != from_real) - abort (); + if (GET_CODE (to) == CONCAT && GET_CODE (from) == CONCAT) + { + convert_move (XEXP (to, 0), XEXP (from, 0), unsignedp); + convert_move (XEXP (to, 1), XEXP (from, 1), unsignedp); + return; + } if (to_real) { rtx value, insns; + convert_optab tab; - if (GET_MODE_BITSIZE (from_mode) < GET_MODE_BITSIZE (to_mode)) - { - /* Try converting directly if the insn is supported. */ - if ((code = can_extend_p (to_mode, from_mode, 0)) - != CODE_FOR_nothing) - { - emit_unop_insn (code, to, from, UNKNOWN); - return; - } - } - -#ifdef HAVE_trunchfqf2 - if (HAVE_trunchfqf2 && from_mode == HFmode && to_mode == QFmode) - { - emit_unop_insn (CODE_FOR_trunchfqf2, to, from, UNKNOWN); - return; - } -#endif -#ifdef HAVE_trunctqfqf2 - if (HAVE_trunctqfqf2 && from_mode == TQFmode && to_mode == QFmode) - { - emit_unop_insn (CODE_FOR_trunctqfqf2, to, from, UNKNOWN); - return; - } -#endif -#ifdef HAVE_truncsfqf2 - if (HAVE_truncsfqf2 && from_mode == SFmode && to_mode == QFmode) - { - emit_unop_insn (CODE_FOR_truncsfqf2, to, from, UNKNOWN); - return; - } -#endif -#ifdef HAVE_truncdfqf2 - if (HAVE_truncdfqf2 && from_mode == DFmode && to_mode == QFmode) - { - emit_unop_insn (CODE_FOR_truncdfqf2, to, from, UNKNOWN); - return; - } -#endif -#ifdef HAVE_truncxfqf2 - if (HAVE_truncxfqf2 && from_mode == XFmode && to_mode == QFmode) - { - emit_unop_insn (CODE_FOR_truncxfqf2, to, from, UNKNOWN); - return; - } -#endif -#ifdef HAVE_trunctfqf2 - if (HAVE_trunctfqf2 && from_mode == TFmode && to_mode == QFmode) - { - emit_unop_insn (CODE_FOR_trunctfqf2, to, from, UNKNOWN); - return; - } -#endif - -#ifdef HAVE_trunctqfhf2 - if (HAVE_trunctqfhf2 && from_mode == TQFmode && to_mode == HFmode) - { - emit_unop_insn (CODE_FOR_trunctqfhf2, to, from, UNKNOWN); - return; - } -#endif -#ifdef HAVE_truncsfhf2 - if (HAVE_truncsfhf2 && from_mode == SFmode && to_mode == HFmode) - { - emit_unop_insn (CODE_FOR_truncsfhf2, to, from, UNKNOWN); - return; - } -#endif -#ifdef HAVE_truncdfhf2 - if (HAVE_truncdfhf2 && from_mode == DFmode && to_mode == HFmode) - { - emit_unop_insn (CODE_FOR_truncdfhf2, to, from, UNKNOWN); - return; - } -#endif -#ifdef HAVE_truncxfhf2 - if (HAVE_truncxfhf2 && from_mode == XFmode && to_mode == HFmode) - { - emit_unop_insn (CODE_FOR_truncxfhf2, to, from, UNKNOWN); - return; - } -#endif -#ifdef HAVE_trunctfhf2 - if (HAVE_trunctfhf2 && from_mode == TFmode && to_mode == HFmode) - { - emit_unop_insn (CODE_FOR_trunctfhf2, to, from, UNKNOWN); - return; - } -#endif + if (GET_MODE_PRECISION (from_mode) < GET_MODE_PRECISION (to_mode)) + tab = sext_optab; + else if (GET_MODE_PRECISION (from_mode) > GET_MODE_PRECISION (to_mode)) + tab = trunc_optab; + else + abort (); -#ifdef HAVE_truncsftqf2 - if (HAVE_truncsftqf2 && from_mode == SFmode && to_mode == TQFmode) - { - emit_unop_insn (CODE_FOR_truncsftqf2, to, from, UNKNOWN); - return; - } -#endif -#ifdef HAVE_truncdftqf2 - if (HAVE_truncdftqf2 && from_mode == DFmode && to_mode == TQFmode) - { - emit_unop_insn (CODE_FOR_truncdftqf2, to, from, UNKNOWN); - return; - } -#endif -#ifdef HAVE_truncxftqf2 - if (HAVE_truncxftqf2 && from_mode == XFmode && to_mode == TQFmode) - { - emit_unop_insn (CODE_FOR_truncxftqf2, to, from, UNKNOWN); - return; - } -#endif -#ifdef HAVE_trunctftqf2 - if (HAVE_trunctftqf2 && from_mode == TFmode && to_mode == TQFmode) - { - emit_unop_insn (CODE_FOR_trunctftqf2, to, from, UNKNOWN); - return; - } -#endif + /* Try converting directly if the insn is supported. */ -#ifdef HAVE_truncdfsf2 - if (HAVE_truncdfsf2 && from_mode == DFmode && to_mode == SFmode) - { - emit_unop_insn (CODE_FOR_truncdfsf2, to, from, UNKNOWN); - return; - } -#endif -#ifdef HAVE_truncxfsf2 - if (HAVE_truncxfsf2 && from_mode == XFmode && to_mode == SFmode) - { - emit_unop_insn (CODE_FOR_truncxfsf2, to, from, UNKNOWN); - return; - } -#endif -#ifdef HAVE_trunctfsf2 - if (HAVE_trunctfsf2 && from_mode == TFmode && to_mode == SFmode) - { - emit_unop_insn (CODE_FOR_trunctfsf2, to, from, UNKNOWN); - return; - } -#endif -#ifdef HAVE_truncxfdf2 - if (HAVE_truncxfdf2 && from_mode == XFmode && to_mode == DFmode) + code = tab->handlers[to_mode][from_mode].insn_code; + if (code != CODE_FOR_nothing) { - emit_unop_insn (CODE_FOR_truncxfdf2, to, from, UNKNOWN); + emit_unop_insn (code, to, from, + tab == sext_optab ? FLOAT_EXTEND : FLOAT_TRUNCATE); return; } -#endif -#ifdef HAVE_trunctfdf2 - if (HAVE_trunctfdf2 && from_mode == TFmode && to_mode == DFmode) - { - emit_unop_insn (CODE_FOR_trunctfdf2, to, from, UNKNOWN); - return; - } -#endif - libcall = (rtx) 0; - switch (from_mode) - { - case SFmode: - switch (to_mode) - { - case DFmode: - libcall = extendsfdf2_libfunc; - break; - - case XFmode: - libcall = extendsfxf2_libfunc; - break; - - case TFmode: - libcall = extendsftf2_libfunc; - break; - - default: - break; - } - break; + /* Otherwise use a libcall. */ + libcall = tab->handlers[to_mode][from_mode].libfunc; - case DFmode: - switch (to_mode) - { - case SFmode: - libcall = truncdfsf2_libfunc; - break; - - case XFmode: - libcall = extenddfxf2_libfunc; - break; - - case TFmode: - libcall = extenddftf2_libfunc; - break; - - default: - break; - } - break; - - case XFmode: - switch (to_mode) - { - case SFmode: - libcall = truncxfsf2_libfunc; - break; - - case DFmode: - libcall = truncxfdf2_libfunc; - break; - - default: - break; - } - break; - - case TFmode: - switch (to_mode) - { - case SFmode: - libcall = trunctfsf2_libfunc; - break; - - case DFmode: - libcall = trunctfdf2_libfunc; - break; - - default: - break; - } - break; - - default: - break; - } - - if (libcall == (rtx) 0) + if (!libcall) /* This conversion is not implemented yet. */ abort (); @@ -848,11 +637,49 @@ convert_move (to, from, unsignedp) 1, from, from_mode); insns = get_insns (); end_sequence (); - emit_libcall_block (insns, to, value, gen_rtx_FLOAT_TRUNCATE (to_mode, - from)); + emit_libcall_block (insns, to, value, + tab == trunc_optab ? gen_rtx_FLOAT_TRUNCATE (to_mode, + from) + : gen_rtx_FLOAT_EXTEND (to_mode, from)); return; } + /* Handle pointer conversion. */ /* SPEE 900220. */ + /* Targets are expected to provide conversion insns between PxImode and + xImode for all MODE_PARTIAL_INT modes they use, but no others. */ + if (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT) + { + enum machine_mode full_mode + = smallest_mode_for_size (GET_MODE_BITSIZE (to_mode), MODE_INT); + + if (trunc_optab->handlers[to_mode][full_mode].insn_code + == CODE_FOR_nothing) + abort (); + + if (full_mode != from_mode) + from = convert_to_mode (full_mode, from, unsignedp); + emit_unop_insn (trunc_optab->handlers[to_mode][full_mode].insn_code, + to, from, UNKNOWN); + return; + } + if (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT) + { + enum machine_mode full_mode + = smallest_mode_for_size (GET_MODE_BITSIZE (from_mode), MODE_INT); + + if (sext_optab->handlers[full_mode][from_mode].insn_code + == CODE_FOR_nothing) + abort (); + + emit_unop_insn (sext_optab->handlers[full_mode][from_mode].insn_code, + to, from, UNKNOWN); + if (to_mode == full_mode) + return; + + /* else proceed to integer conversions below */ + from_mode = full_mode; + } + /* Now both modes are integers. */ /* Handle expanding beyond a word. */ @@ -886,7 +713,11 @@ convert_move (to, from, unsignedp) != CODE_FOR_nothing)) { if (GET_CODE (to) == REG) - emit_insn (gen_rtx_CLOBBER (VOIDmode, to)); + { + if (reg_overlap_mentioned_p (to, from)) + from = force_reg (from_mode, from); + emit_insn (gen_rtx_CLOBBER (VOIDmode, to)); + } convert_move (gen_lowpart (word_mode, to), from, unsignedp); emit_unop_insn (code, to, gen_lowpart (word_mode, to), equiv_code); @@ -975,119 +806,6 @@ convert_move (to, from, unsignedp) return; } - /* Handle pointer conversion. */ /* SPEE 900220. */ - if (to_mode == PQImode) - { - if (from_mode != QImode) - from = convert_to_mode (QImode, from, unsignedp); - -#ifdef HAVE_truncqipqi2 - if (HAVE_truncqipqi2) - { - emit_unop_insn (CODE_FOR_truncqipqi2, to, from, UNKNOWN); - return; - } -#endif /* HAVE_truncqipqi2 */ - abort (); - } - - if (from_mode == PQImode) - { - if (to_mode != QImode) - { - from = convert_to_mode (QImode, from, unsignedp); - from_mode = QImode; - } - else - { -#ifdef HAVE_extendpqiqi2 - if (HAVE_extendpqiqi2) - { - emit_unop_insn (CODE_FOR_extendpqiqi2, to, from, UNKNOWN); - return; - } -#endif /* HAVE_extendpqiqi2 */ - abort (); - } - } - - if (to_mode == PSImode) - { - if (from_mode != SImode) - from = convert_to_mode (SImode, from, unsignedp); - -#ifdef HAVE_truncsipsi2 - if (HAVE_truncsipsi2) - { - emit_unop_insn (CODE_FOR_truncsipsi2, to, from, UNKNOWN); - return; - } -#endif /* HAVE_truncsipsi2 */ - abort (); - } - - if (from_mode == PSImode) - { - if (to_mode != SImode) - { - from = convert_to_mode (SImode, from, unsignedp); - from_mode = SImode; - } - else - { -#ifdef HAVE_extendpsisi2 - if (! unsignedp && HAVE_extendpsisi2) - { - emit_unop_insn (CODE_FOR_extendpsisi2, to, from, UNKNOWN); - return; - } -#endif /* HAVE_extendpsisi2 */ -#ifdef HAVE_zero_extendpsisi2 - if (unsignedp && HAVE_zero_extendpsisi2) - { - emit_unop_insn (CODE_FOR_zero_extendpsisi2, to, from, UNKNOWN); - return; - } -#endif /* HAVE_zero_extendpsisi2 */ - abort (); - } - } - - if (to_mode == PDImode) - { - if (from_mode != DImode) - from = convert_to_mode (DImode, from, unsignedp); - -#ifdef HAVE_truncdipdi2 - if (HAVE_truncdipdi2) - { - emit_unop_insn (CODE_FOR_truncdipdi2, to, from, UNKNOWN); - return; - } -#endif /* HAVE_truncdipdi2 */ - abort (); - } - - if (from_mode == PDImode) - { - if (to_mode != DImode) - { - from = convert_to_mode (DImode, from, unsignedp); - from_mode = DImode; - } - else - { -#ifdef HAVE_extendpdidi2 - if (HAVE_extendpdidi2) - { - emit_unop_insn (CODE_FOR_extendpdidi2, to, from, UNKNOWN); - return; - } -#endif /* HAVE_extendpdidi2 */ - abort (); - } - } - /* Now follow all the conversions between integers no more than a word long. */ @@ -1161,140 +879,20 @@ convert_move (to, from, unsignedp) } /* Support special truncate insns for certain modes. */ - - if (from_mode == DImode && to_mode == SImode) - { -#ifdef HAVE_truncdisi2 - if (HAVE_truncdisi2) - { - emit_unop_insn (CODE_FOR_truncdisi2, to, from, UNKNOWN); - return; - } -#endif - convert_move (to, force_reg (from_mode, from), unsignedp); - return; - } - - if (from_mode == DImode && to_mode == HImode) - { -#ifdef HAVE_truncdihi2 - if (HAVE_truncdihi2) - { - emit_unop_insn (CODE_FOR_truncdihi2, to, from, UNKNOWN); - return; - } -#endif - convert_move (to, force_reg (from_mode, from), unsignedp); - return; - } - - if (from_mode == DImode && to_mode == QImode) + if (trunc_optab->handlers[to_mode][from_mode].insn_code != CODE_FOR_nothing) { -#ifdef HAVE_truncdiqi2 - if (HAVE_truncdiqi2) - { - emit_unop_insn (CODE_FOR_truncdiqi2, to, from, UNKNOWN); - return; - } -#endif - convert_move (to, force_reg (from_mode, from), unsignedp); - return; - } - - if (from_mode == SImode && to_mode == HImode) - { -#ifdef HAVE_truncsihi2 - if (HAVE_truncsihi2) - { - emit_unop_insn (CODE_FOR_truncsihi2, to, from, UNKNOWN); - return; - } -#endif - convert_move (to, force_reg (from_mode, from), unsignedp); - return; - } - - if (from_mode == SImode && to_mode == QImode) - { -#ifdef HAVE_truncsiqi2 - if (HAVE_truncsiqi2) - { - emit_unop_insn (CODE_FOR_truncsiqi2, to, from, UNKNOWN); - return; - } -#endif - convert_move (to, force_reg (from_mode, from), unsignedp); - return; - } - - if (from_mode == HImode && to_mode == QImode) - { -#ifdef HAVE_trunchiqi2 - if (HAVE_trunchiqi2) - { - emit_unop_insn (CODE_FOR_trunchiqi2, to, from, UNKNOWN); - return; - } -#endif - convert_move (to, force_reg (from_mode, from), unsignedp); - return; - } - - if (from_mode == TImode && to_mode == DImode) - { -#ifdef HAVE_trunctidi2 - if (HAVE_trunctidi2) - { - emit_unop_insn (CODE_FOR_trunctidi2, to, from, UNKNOWN); - return; - } -#endif - convert_move (to, force_reg (from_mode, from), unsignedp); - return; - } - - if (from_mode == TImode && to_mode == SImode) - { -#ifdef HAVE_trunctisi2 - if (HAVE_trunctisi2) - { - emit_unop_insn (CODE_FOR_trunctisi2, to, from, UNKNOWN); - return; - } -#endif - convert_move (to, force_reg (from_mode, from), unsignedp); - return; - } - - if (from_mode == TImode && to_mode == HImode) - { -#ifdef HAVE_trunctihi2 - if (HAVE_trunctihi2) - { - emit_unop_insn (CODE_FOR_trunctihi2, to, from, UNKNOWN); - return; - } -#endif - convert_move (to, force_reg (from_mode, from), unsignedp); - return; - } - - if (from_mode == TImode && to_mode == QImode) - { -#ifdef HAVE_trunctiqi2 - if (HAVE_trunctiqi2) - { - emit_unop_insn (CODE_FOR_trunctiqi2, to, from, UNKNOWN); - return; - } -#endif - convert_move (to, force_reg (from_mode, from), unsignedp); + emit_unop_insn (trunc_optab->handlers[to_mode][from_mode].insn_code, + to, from, UNKNOWN); return; } /* Handle truncation of volatile memrefs, and so on; the things that couldn't be truncated directly, - and for which there was no special instruction. */ + and for which there was no special instruction. + + ??? Code above formerly short-circuited this, for most integer + mode pairs, with a force_reg in from_mode followed by a recursive + call to this routine. Appears always to have been wrong. */ if (GET_MODE_BITSIZE (to_mode) < GET_MODE_BITSIZE (from_mode)) { rtx temp = force_reg (to_mode, gen_lowpart (to_mode, from)); @@ -1317,10 +915,7 @@ convert_move (to, from, unsignedp) except when putting X into an insn (in which case convert_move does it). */ rtx -convert_to_mode (mode, x, unsignedp) - enum machine_mode mode; - rtx x; - int unsignedp; +convert_to_mode (enum machine_mode mode, rtx x, int unsignedp) { return convert_modes (mode, VOIDmode, x, unsignedp); } @@ -1339,10 +934,7 @@ convert_to_mode (mode, x, unsignedp) except when putting X into an insn (in which case convert_move does it). */ rtx -convert_modes (mode, oldmode, x, unsignedp) - enum machine_mode mode, oldmode; - rtx x; - int unsignedp; +convert_modes (enum machine_mode mode, enum machine_mode oldmode, rtx x, int unsignedp) { rtx temp; @@ -1439,17 +1031,6 @@ convert_modes (mode, oldmode, x, unsignedp) return temp; } -/* This macro is used to determine what the largest unit size that - move_by_pieces can use is. */ - -/* MOVE_MAX_PIECES is the number of bytes at a time which we can - move efficiently, as opposed to MOVE_MAX which is the maximum - number of bytes we can move with a single instruction. */ - -#ifndef MOVE_MAX_PIECES -#define MOVE_MAX_PIECES MOVE_MAX -#endif - /* STORE_MAX_PIECES is the number of bytes at a time that we can store efficiently. Due to internal GCC limitations, this is MOVE_MAX_PIECES limited by the number of bytes GCC can represent @@ -1457,6 +1038,17 @@ convert_modes (mode, oldmode, x, unsignedp) #define STORE_MAX_PIECES MIN (MOVE_MAX_PIECES, 2 * sizeof (HOST_WIDE_INT)) +/* Determine whether the LEN bytes can be moved by using several move + instructions. Return nonzero if a call to move_by_pieces should + succeed. */ + +int +can_move_by_pieces (unsigned HOST_WIDE_INT len, + unsigned int align ATTRIBUTE_UNUSED) +{ + return MOVE_BY_PIECES_P (len, align); +} + /* Generate several move instructions to copy LEN bytes from block FROM to block TO. (These are MEM rtx's with BLKmode). The caller must pass FROM and TO through protect_from_queue before calling. @@ -1464,13 +1056,15 @@ convert_modes (mode, oldmode, x, unsignedp) If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is used to push FROM to the stack. - ALIGN is maximum alignment we can assume. */ + ALIGN is maximum stack alignment we can assume. -void -move_by_pieces (to, from, len, align) - rtx to, from; - unsigned HOST_WIDE_INT len; - unsigned int align; + If ENDP is 0 return to, if ENDP is 1 return memory at the end ala + mempcpy, and if ENDP is 2 return memory the end minus one byte ala + stpcpy. */ + +rtx +move_by_pieces (rtx to, rtx from, unsigned HOST_WIDE_INT len, + unsigned int align, int endp) { struct move_by_pieces data; rtx to_addr, from_addr = XEXP (from, 0); @@ -1478,6 +1072,8 @@ move_by_pieces (to, from, len, align) enum machine_mode mode = VOIDmode, tmode; enum insn_code icode; + align = MIN (to ? MEM_ALIGN (to) : align, MEM_ALIGN (from)); + data.offset = 0; data.from_addr = from_addr; if (to) @@ -1582,15 +1178,43 @@ move_by_pieces (to, from, len, align) /* The code above should have handled everything. */ if (data.len > 0) abort (); + + if (endp) + { + rtx to1; + + if (data.reverse) + abort (); + if (data.autinc_to) + { + if (endp == 2) + { + if (HAVE_POST_INCREMENT && data.explicit_inc_to > 0) + emit_insn (gen_add2_insn (data.to_addr, constm1_rtx)); + else + data.to_addr = copy_addr_to_reg (plus_constant (data.to_addr, + -1)); + } + to1 = adjust_automodify_address (data.to, QImode, data.to_addr, + data.offset); + } + else + { + if (endp == 2) + --data.offset; + to1 = adjust_address (data.to, QImode, data.offset); + } + return to1; + } + else + return data.to; } /* Return number of insns required to move L bytes by pieces. ALIGN (in bits) is maximum alignment we can assume. */ static unsigned HOST_WIDE_INT -move_by_pieces_ninsns (l, align) - unsigned HOST_WIDE_INT l; - unsigned int align; +move_by_pieces_ninsns (unsigned HOST_WIDE_INT l, unsigned int align) { unsigned HOST_WIDE_INT n_insns = 0; unsigned HOST_WIDE_INT max_size = MOVE_MAX + 1; @@ -1629,10 +1253,8 @@ move_by_pieces_ninsns (l, align) to make a move insn for that mode. DATA has all the other info. */ static void -move_by_pieces_1 (genfun, mode, data) - rtx (*genfun) PARAMS ((rtx, ...)); - enum machine_mode mode; - struct move_by_pieces *data; +move_by_pieces_1 (rtx (*genfun) (rtx, ...), enum machine_mode mode, + struct move_by_pieces *data) { unsigned int size = GET_MODE_SIZE (mode); rtx to1 = NULL_RTX, from1; @@ -1700,9 +1322,7 @@ move_by_pieces_1 (genfun, mode, data) 0 otherwise. */ rtx -emit_block_move (x, y, size, method) - rtx x, y, size; - enum block_op_methods method; +emit_block_move (rtx x, rtx y, rtx size, enum block_op_methods method) { bool may_use_call; rtx retval = 0; @@ -1752,6 +1372,9 @@ emit_block_move (x, y, size, method) can be incorrect is coming from __builtin_memcpy. */ if (GET_CODE (size) == CONST_INT) { + if (INTVAL (size) == 0) + return 0; + x = shallow_copy_rtx (x); y = shallow_copy_rtx (y); set_mem_size (x, size); @@ -1759,7 +1382,7 @@ emit_block_move (x, y, size, method) } if (GET_CODE (size) == CONST_INT && MOVE_BY_PIECES_P (INTVAL (size), align)) - move_by_pieces (x, y, INTVAL (size), align); + move_by_pieces (x, y, INTVAL (size), align, 0); else if (emit_block_move_via_movstr (x, y, size, align)) ; else if (may_use_call) @@ -1773,83 +1396,71 @@ emit_block_move (x, y, size, method) return retval; } -/* A subroutine of emit_block_move. Returns true if calling the +/* A subroutine of emit_block_move. Returns true if calling the block move libcall will not clobber any parameters which may have already been placed on the stack. */ static bool -block_move_libcall_safe_for_call_parm () +block_move_libcall_safe_for_call_parm (void) { + /* If arguments are pushed on the stack, then they're safe. */ if (PUSH_ARGS) return true; - else - { - /* Check to see whether memcpy takes all register arguments. */ - static enum { - takes_regs_uninit, takes_regs_no, takes_regs_yes - } takes_regs = takes_regs_uninit; - - switch (takes_regs) - { - case takes_regs_uninit: - { - CUMULATIVE_ARGS args_so_far; - tree fn, arg; - - fn = emit_block_move_libcall_fn (false); - INIT_CUMULATIVE_ARGS (args_so_far, TREE_TYPE (fn), NULL_RTX, 0); - arg = TYPE_ARG_TYPES (TREE_TYPE (fn)); - for ( ; arg != void_list_node ; arg = TREE_CHAIN (arg)) - { - enum machine_mode mode = TYPE_MODE (TREE_VALUE (arg)); - rtx tmp = FUNCTION_ARG (args_so_far, mode, NULL_TREE, 1); - if (!tmp || !REG_P (tmp)) - goto fail_takes_regs; -#ifdef FUNCTION_ARG_PARTIAL_NREGS - if (FUNCTION_ARG_PARTIAL_NREGS (args_so_far, mode, - NULL_TREE, 1)) - goto fail_takes_regs; + /* If registers go on the stack anyway, any argument is sure to clobber + an outgoing argument. */ +#if defined (REG_PARM_STACK_SPACE) && defined (OUTGOING_REG_PARM_STACK_SPACE) + { + tree fn = emit_block_move_libcall_fn (false); + (void) fn; + if (REG_PARM_STACK_SPACE (fn) != 0) + return false; + } #endif - FUNCTION_ARG_ADVANCE (args_so_far, mode, NULL_TREE, 1); - } - } - takes_regs = takes_regs_yes; - /* FALLTHRU */ - case takes_regs_yes: - return true; + /* If any argument goes in memory, then it might clobber an outgoing + argument. */ + { + CUMULATIVE_ARGS args_so_far; + tree fn, arg; - fail_takes_regs: - takes_regs = takes_regs_no; - /* FALLTHRU */ - case takes_regs_no: - return false; + fn = emit_block_move_libcall_fn (false); + INIT_CUMULATIVE_ARGS (args_so_far, TREE_TYPE (fn), NULL_RTX, 0, 3); - default: - abort (); - } - } + arg = TYPE_ARG_TYPES (TREE_TYPE (fn)); + for ( ; arg != void_list_node ; arg = TREE_CHAIN (arg)) + { + enum machine_mode mode = TYPE_MODE (TREE_VALUE (arg)); + rtx tmp = FUNCTION_ARG (args_so_far, mode, NULL_TREE, 1); + if (!tmp || !REG_P (tmp)) + return false; +#ifdef FUNCTION_ARG_PARTIAL_NREGS + if (FUNCTION_ARG_PARTIAL_NREGS (args_so_far, mode, + NULL_TREE, 1)) + return false; +#endif + FUNCTION_ARG_ADVANCE (args_so_far, mode, NULL_TREE, 1); + } + } + return true; } -/* A subroutine of emit_block_move. Expand a movstr pattern; +/* A subroutine of emit_block_move. Expand a movstr pattern; return true if successful. */ static bool -emit_block_move_via_movstr (x, y, size, align) - rtx x, y, size; - unsigned int align; +emit_block_move_via_movstr (rtx x, rtx y, rtx size, unsigned int align) { - /* Try the most limited insn first, because there's no point - including more than one in the machine description unless - the more limited one has some advantage. */ - rtx opalign = GEN_INT (align / BITS_PER_UNIT); enum machine_mode mode; /* Since this is a move insn, we don't care about volatility. */ volatile_ok = 1; + /* Try the most limited insn first, because there's no point + including more than one in the machine description unless + the more limited one has some advantage. */ + for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) { @@ -1906,41 +1517,48 @@ emit_block_move_via_movstr (x, y, size, align) Return the return value from memcpy, 0 otherwise. */ static rtx -emit_block_move_via_libcall (dst, src, size) - rtx dst, src, size; +emit_block_move_via_libcall (rtx dst, rtx src, rtx size) { + rtx dst_addr, src_addr; tree call_expr, arg_list, fn, src_tree, dst_tree, size_tree; enum machine_mode size_mode; rtx retval; /* DST, SRC, or SIZE may have been passed through protect_from_queue. - It is unsafe to save the value generated by protect_from_queue - and reuse it later. Consider what happens if emit_queue is - called before the return value from protect_from_queue is used. + It is unsafe to save the value generated by protect_from_queue and reuse + it later. Consider what happens if emit_queue is called before the + return value from protect_from_queue is used. - Expansion of the CALL_EXPR below will call emit_queue before - we are finished emitting RTL for argument setup. So if we are - not careful we could get the wrong value for an argument. + Expansion of the CALL_EXPR below will call emit_queue before we are + finished emitting RTL for argument setup. So if we are not careful we + could get the wrong value for an argument. - To avoid this problem we go ahead and emit code to copy X, Y & - SIZE into new pseudos. We can then place those new pseudos - into an RTL_EXPR and use them later, even after a call to + To avoid this problem we go ahead and emit code to copy the addresses of + DST and SRC and SIZE into new pseudos. We can then place those new + pseudos into an RTL_EXPR and use them later, even after a call to emit_queue. - Note this is not strictly needed for library calls since they - do not call emit_queue before loading their arguments. However, - we may need to have library calls call emit_queue in the future - since failing to do so could cause problems for targets which - define SMALL_REGISTER_CLASSES and pass arguments in registers. */ + Note this is not strictly needed for library calls since they do not call + emit_queue before loading their arguments. However, we may need to have + library calls call emit_queue in the future since failing to do so could + cause problems for targets which define SMALL_REGISTER_CLASSES and pass + arguments in registers. */ - dst = copy_to_mode_reg (Pmode, XEXP (dst, 0)); - src = copy_to_mode_reg (Pmode, XEXP (src, 0)); + dst_addr = copy_to_mode_reg (Pmode, XEXP (dst, 0)); + src_addr = copy_to_mode_reg (Pmode, XEXP (src, 0)); + + dst_addr = convert_memory_address (ptr_mode, dst_addr); + src_addr = convert_memory_address (ptr_mode, src_addr); + + dst_tree = make_tree (ptr_type_node, dst_addr); + src_tree = make_tree (ptr_type_node, src_addr); if (TARGET_MEM_FUNCTIONS) size_mode = TYPE_MODE (sizetype); else size_mode = TYPE_MODE (unsigned_type_node); + size = convert_to_mode (size_mode, size, 1); size = copy_to_mode_reg (size_mode, size); @@ -1952,8 +1570,6 @@ emit_block_move_via_libcall (dst, src, size) For convenience, we generate the call to bcopy this way as well. */ - dst_tree = make_tree (ptr_type_node, dst); - src_tree = make_tree (ptr_type_node, src); if (TARGET_MEM_FUNCTIONS) size_tree = make_tree (sizetype, size); else @@ -1976,17 +1592,20 @@ emit_block_move_via_libcall (dst, src, size) call_expr = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (fn)), fn); call_expr = build (CALL_EXPR, TREE_TYPE (TREE_TYPE (fn)), call_expr, arg_list, NULL_TREE); - TREE_SIDE_EFFECTS (call_expr) = 1; retval = expand_expr (call_expr, NULL_RTX, VOIDmode, 0); - /* If we are initializing a readonly value, show the above call - clobbered it. Otherwise, a load from it may erroneously be - hoisted from a loop. */ + /* If we are initializing a readonly value, show the above call clobbered + it. Otherwise, a load from it may erroneously be hoisted from a loop, or + the delay slot scheduler might overlook conflicts and take nasty + decisions. */ if (RTX_UNCHANGING_P (dst)) - emit_insn (gen_rtx_CLOBBER (VOIDmode, dst)); + add_function_usage_to + (last_call_insn (), gen_rtx_EXPR_LIST (VOIDmode, + gen_rtx_CLOBBER (VOIDmode, dst), + NULL_RTX)); - return (TARGET_MEM_FUNCTIONS ? retval : NULL_RTX); + return TARGET_MEM_FUNCTIONS ? retval : NULL_RTX; } /* A subroutine of emit_block_move_via_libcall. Create the tree node @@ -1995,15 +1614,13 @@ emit_block_move_via_libcall (dst, src, size) static GTY(()) tree block_move_fn; -static tree -emit_block_move_libcall_fn (for_call) - int for_call; +void +init_block_move_fn (const char *asmspec) { - static bool emitted_extern; - tree fn = block_move_fn, args; - - if (!fn) + if (!block_move_fn) { + tree args, fn; + if (TARGET_MEM_FUNCTIONS) { fn = get_identifier ("memcpy"); @@ -2028,14 +1645,29 @@ emit_block_move_libcall_fn (for_call) block_move_fn = fn; } + if (asmspec) + { + SET_DECL_RTL (block_move_fn, NULL_RTX); + SET_DECL_ASSEMBLER_NAME (block_move_fn, get_identifier (asmspec)); + } +} + +static tree +emit_block_move_libcall_fn (int for_call) +{ + static bool emitted_extern; + + if (!block_move_fn) + init_block_move_fn (NULL); + if (for_call && !emitted_extern) { emitted_extern = true; - make_decl_rtl (fn, NULL); - assemble_external (fn); + make_decl_rtl (block_move_fn, NULL); + assemble_external (block_move_fn); } - return fn; + return block_move_fn; } /* A subroutine of emit_block_move. Copy the data via an explicit @@ -2043,9 +1675,8 @@ emit_block_move_libcall_fn (for_call) /* ??? It'd be nice to copy in hunks larger than QImode. */ static void -emit_block_move_via_loop (x, y, size, align) - rtx x, y, size; - unsigned int align ATTRIBUTE_UNUSED; +emit_block_move_via_loop (rtx x, rtx y, rtx size, + unsigned int align ATTRIBUTE_UNUSED) { rtx cmp_label, top_label, iter, x_addr, y_addr, tmp; enum machine_mode iter_mode; @@ -2064,7 +1695,7 @@ emit_block_move_via_loop (x, y, size, align) y_addr = force_operand (XEXP (y, 0), NULL_RTX); do_pending_stack_adjust (); - emit_note (NULL, NOTE_INSN_LOOP_BEG); + emit_note (NOTE_INSN_LOOP_BEG); emit_jump (cmp_label); emit_label (top_label); @@ -2082,24 +1713,20 @@ emit_block_move_via_loop (x, y, size, align) if (tmp != iter) emit_move_insn (iter, tmp); - emit_note (NULL, NOTE_INSN_LOOP_CONT); + emit_note (NOTE_INSN_LOOP_CONT); emit_label (cmp_label); emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode, true, top_label); - emit_note (NULL, NOTE_INSN_LOOP_END); + emit_note (NOTE_INSN_LOOP_END); } /* Copy all or part of a value X into registers starting at REGNO. The number of registers to be filled is NREGS. */ void -move_block_to_reg (regno, x, nregs, mode) - int regno; - rtx x; - int nregs; - enum machine_mode mode; +move_block_to_reg (int regno, rtx x, int nregs, enum machine_mode mode) { int i; #ifdef HAVE_load_multiple @@ -2136,61 +1763,23 @@ move_block_to_reg (regno, x, nregs, mode) } /* Copy all or part of a BLKmode value X out of registers starting at REGNO. - The number of registers to be filled is NREGS. SIZE indicates the number - of bytes in the object X. */ + The number of registers to be filled is NREGS. */ void -move_block_from_reg (regno, x, nregs, size) - int regno; - rtx x; - int nregs; - int size; +move_block_from_reg (int regno, rtx x, int nregs) { int i; -#ifdef HAVE_store_multiple - rtx pat; - rtx last; -#endif - enum machine_mode mode; if (nregs == 0) return; - /* If SIZE is that of a mode no bigger than a word, just use that - mode's store operation. */ - if (size <= UNITS_PER_WORD - && (mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0)) != BLKmode) - { - emit_move_insn (adjust_address (x, mode, 0), gen_rtx_REG (mode, regno)); - return; - } - - /* Blocks smaller than a word on a BYTES_BIG_ENDIAN machine must be aligned - to the left before storing to memory. Note that the previous test - doesn't handle all cases (e.g. SIZE == 3). */ - if (size < UNITS_PER_WORD && BYTES_BIG_ENDIAN) - { - rtx tem = operand_subword (x, 0, 1, BLKmode); - rtx shift; - - if (tem == 0) - abort (); - - shift = expand_shift (LSHIFT_EXPR, word_mode, - gen_rtx_REG (word_mode, regno), - build_int_2 ((UNITS_PER_WORD - size) - * BITS_PER_UNIT, 0), NULL_RTX, 0); - emit_move_insn (tem, shift); - return; - } - /* See if the machine can do this with a store multiple insn. */ #ifdef HAVE_store_multiple if (HAVE_store_multiple) { - last = get_last_insn (); - pat = gen_store_multiple (x, gen_rtx_REG (word_mode, regno), - GEN_INT (nregs)); + rtx last = get_last_insn (); + rtx pat = gen_store_multiple (x, gen_rtx_REG (word_mode, regno), + GEN_INT (nregs)); if (pat) { emit_insn (pat); @@ -2219,8 +1808,7 @@ move_block_from_reg (regno, x, nregs, size) The new set has the same modes as the original set. */ rtx -gen_group_rtx (orig) - rtx orig; +gen_group_rtx (rtx orig) { int i, length; rtx *tmps; @@ -2229,7 +1817,7 @@ gen_group_rtx (orig) abort (); length = XVECLEN (orig, 0); - tmps = (rtx *) alloca (sizeof (rtx) * length); + tmps = alloca (sizeof (rtx) * length); /* Skip a NULL entry in first slot. */ i = XEXP (XVECEXP (orig, 0, 0), 0) ? 0 : 1; @@ -2248,20 +1836,13 @@ gen_group_rtx (orig) return gen_rtx_PARALLEL (GET_MODE (orig), gen_rtvec_v (length, tmps)); } -/* Emit code to move a block SRC to a block DST, where DST is non-consecutive - registers represented by a PARALLEL. SSIZE represents the total size of - block SRC in bytes, or -1 if not known. */ -/* ??? If SSIZE % UNITS_PER_WORD != 0, we make the blatant assumption that - the balance will be in what would be the low-order memory addresses, i.e. - left justified for big endian, right justified for little endian. This - happens to be true for the targets currently using this support. If this - ever changes, a new target macro along the lines of FUNCTION_ARG_PADDING - would be needed. */ +/* Emit code to move a block ORIG_SRC of type TYPE to a block DST, + where DST is non-consecutive registers represented by a PARALLEL. + SSIZE represents the total size of block ORIG_SRC in bytes, or -1 + if not known. */ void -emit_group_load (dst, orig_src, ssize) - rtx dst, orig_src; - int ssize; +emit_group_load (rtx dst, rtx orig_src, tree type ATTRIBUTE_UNUSED, int ssize) { rtx *tmps, src; int start, i; @@ -2276,7 +1857,7 @@ emit_group_load (dst, orig_src, ssize) else start = 1; - tmps = (rtx *) alloca (sizeof (rtx) * XVECLEN (dst, 0)); + tmps = alloca (sizeof (rtx) * XVECLEN (dst, 0)); /* Process the pieces. */ for (i = start; i < XVECLEN (dst, 0); i++) @@ -2289,7 +1870,17 @@ emit_group_load (dst, orig_src, ssize) /* Handle trailing fragments that run over the size of the struct. */ if (ssize >= 0 && bytepos + (HOST_WIDE_INT) bytelen > ssize) { - shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT; + /* Arrange to shift the fragment to where it belongs. + extract_bit_field loads to the lsb of the reg. */ + if ( +#ifdef BLOCK_REG_PADDING + BLOCK_REG_PADDING (GET_MODE (orig_src), type, i == start) + == (BYTES_BIG_ENDIAN ? upward : downward) +#else + BYTES_BIG_ENDIAN +#endif + ) + shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT; bytelen = ssize - bytepos; if (bytelen <= 0) abort (); @@ -2314,7 +1905,8 @@ emit_group_load (dst, orig_src, ssize) /* Optimize the access just a bit. */ if (GET_CODE (src) == MEM - && MEM_ALIGN (src) >= GET_MODE_ALIGNMENT (mode) + && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (src)) + || MEM_ALIGN (src) >= GET_MODE_ALIGNMENT (mode)) && bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0 && bytelen == GET_MODE_SIZE (mode)) { @@ -2349,6 +1941,22 @@ emit_group_load (dst, orig_src, ssize) else abort (); } + /* FIXME: A SIMD parallel will eventually lead to a subreg of a + SIMD register, which is currently broken. While we get GCC + to emit proper RTL for these cases, let's dump to memory. */ + else if (VECTOR_MODE_P (GET_MODE (dst)) + && GET_CODE (src) == REG) + { + int slen = GET_MODE_SIZE (GET_MODE (src)); + rtx mem; + + mem = assign_stack_temp (GET_MODE (src), slen, 0); + emit_move_insn (mem, src); + tmps[i] = adjust_address (mem, mode, (int) bytepos); + } + else if (CONSTANT_P (src) && GET_MODE (dst) != BLKmode + && XVECLEN (dst, 0) > 1) + tmps[i] = simplify_gen_subreg (mode, src, GET_MODE(dst), bytepos); else if (CONSTANT_P (src) || (GET_CODE (src) == REG && GET_MODE (src) == mode)) tmps[i] = src; @@ -2357,7 +1965,7 @@ emit_group_load (dst, orig_src, ssize) bytepos * BITS_PER_UNIT, 1, NULL_RTX, mode, mode, ssize); - if (BYTES_BIG_ENDIAN && shift) + if (shift) expand_binop (mode, ashl_optab, tmps[i], GEN_INT (shift), tmps[i], 0, OPTAB_WIDEN); } @@ -2373,8 +1981,7 @@ emit_group_load (dst, orig_src, ssize) non-consecutive groups of registers, each represented by a PARALLEL. */ void -emit_group_move (dst, src) - rtx dst, src; +emit_group_move (rtx dst, rtx src) { int i; @@ -2389,14 +1996,13 @@ emit_group_move (dst, src) XEXP (XVECEXP (src, 0, i), 0)); } -/* Emit code to move a block SRC to a block DST, where SRC is non-consecutive - registers represented by a PARALLEL. SSIZE represents the total size of - block DST, or -1 if not known. */ +/* Emit code to move a block SRC to a block ORIG_DST of type TYPE, + where SRC is non-consecutive registers represented by a PARALLEL. + SSIZE represents the total size of block ORIG_DST, or -1 if not + known. */ void -emit_group_store (orig_dst, src, ssize) - rtx orig_dst, src; - int ssize; +emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED, int ssize) { rtx *tmps, dst; int start, i; @@ -2411,7 +2017,7 @@ emit_group_store (orig_dst, src, ssize) else start = 1; - tmps = (rtx *) alloca (sizeof (rtx) * XVECLEN (src, 0)); + tmps = alloca (sizeof (rtx) * XVECLEN (src, 0)); /* Copy the (probable) hard regs into pseudos. */ for (i = start; i < XVECLEN (src, 0); i++) @@ -2440,8 +2046,8 @@ emit_group_store (orig_dst, src, ssize) the temporary. */ temp = assign_stack_temp (GET_MODE (dst), ssize, 0); - emit_group_store (temp, src, ssize); - emit_group_load (dst, temp, ssize); + emit_group_store (temp, src, type, ssize); + emit_group_load (dst, temp, type, ssize); return; } else if (GET_CODE (dst) != MEM && GET_CODE (dst) != CONCAT) @@ -2462,7 +2068,16 @@ emit_group_store (orig_dst, src, ssize) /* Handle trailing fragments that run over the size of the struct. */ if (ssize >= 0 && bytepos + (HOST_WIDE_INT) bytelen > ssize) { - if (BYTES_BIG_ENDIAN) + /* store_bit_field always takes its value from the lsb. + Move the fragment to the lsb if it's not already there. */ + if ( +#ifdef BLOCK_REG_PADDING + BLOCK_REG_PADDING (GET_MODE (orig_dst), type, i == start) + == (BYTES_BIG_ENDIAN ? upward : downward) +#else + BYTES_BIG_ENDIAN +#endif + ) { int shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT; expand_binop (mode, ashr_optab, tmps[i], GEN_INT (shift), @@ -2495,7 +2110,8 @@ emit_group_store (orig_dst, src, ssize) /* Optimize the access just a bit. */ if (GET_CODE (dest) == MEM - && MEM_ALIGN (dest) >= GET_MODE_ALIGNMENT (mode) + && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (dest)) + || MEM_ALIGN (dest) >= GET_MODE_ALIGNMENT (mode)) && bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0 && bytelen == GET_MODE_SIZE (mode)) emit_move_insn (adjust_address (dest, mode, bytepos), tmps[i]); @@ -2515,21 +2131,18 @@ emit_group_store (orig_dst, src, ssize) set of registers starting with SRCREG into TGTBLK. If TGTBLK is null, a stack temporary is created. TGTBLK is returned. - The primary purpose of this routine is to handle functions - that return BLKmode structures in registers. Some machines - (the PA for example) want to return all small structures - in registers regardless of the structure's alignment. */ + The purpose of this routine is to handle functions that return + BLKmode structures in registers. Some machines (the PA for example) + want to return all small structures in registers regardless of the + structure's alignment. */ rtx -copy_blkmode_from_reg (tgtblk, srcreg, type) - rtx tgtblk; - rtx srcreg; - tree type; +copy_blkmode_from_reg (rtx tgtblk, rtx srcreg, tree type) { unsigned HOST_WIDE_INT bytes = int_size_in_bytes (type); rtx src = NULL, dst = NULL; unsigned HOST_WIDE_INT bitsize = MIN (TYPE_ALIGN (type), BITS_PER_WORD); - unsigned HOST_WIDE_INT bitpos, xbitpos, big_endian_correction = 0; + unsigned HOST_WIDE_INT bitpos, xbitpos, padding_correction = 0; if (tgtblk == 0) { @@ -2547,13 +2160,20 @@ copy_blkmode_from_reg (tgtblk, srcreg, type) && GET_MODE_SIZE (GET_MODE (srcreg)) < UNITS_PER_WORD) srcreg = convert_to_mode (word_mode, srcreg, TREE_UNSIGNED (type)); - /* Structures whose size is not a multiple of a word are aligned - to the least significant byte (to the right). On a BYTES_BIG_ENDIAN - machine, this means we must skip the empty high order bytes when - calculating the bit offset. */ - if (BYTES_BIG_ENDIAN - && bytes % UNITS_PER_WORD) - big_endian_correction + /* If the structure doesn't take up a whole number of words, see whether + SRCREG is padded on the left or on the right. If it's on the left, + set PADDING_CORRECTION to the number of bits to skip. + + In most ABIs, the structure will be returned at the least end of + the register, which translates to right padding on little-endian + targets and left padding on big-endian targets. The opposite + holds if the structure is returned at the most significant + end of the register. */ + if (bytes % UNITS_PER_WORD != 0 + && (targetm.calls.return_in_msb (type) + ? !BYTES_BIG_ENDIAN + : BYTES_BIG_ENDIAN)) + padding_correction = (BITS_PER_WORD - ((bytes % UNITS_PER_WORD) * BITS_PER_UNIT)); /* Copy the structure BITSIZE bites at a time. @@ -2561,15 +2181,15 @@ copy_blkmode_from_reg (tgtblk, srcreg, type) We could probably emit more efficient code for machines which do not use strict alignment, but it doesn't seem worth the effort at the current time. */ - for (bitpos = 0, xbitpos = big_endian_correction; + for (bitpos = 0, xbitpos = padding_correction; bitpos < bytes * BITS_PER_UNIT; bitpos += bitsize, xbitpos += bitsize) { /* We need a new source operand each time xbitpos is on a - word boundary and when xbitpos == big_endian_correction + word boundary and when xbitpos == padding_correction (the first time through). */ if (xbitpos % BITS_PER_WORD == 0 - || xbitpos == big_endian_correction) + || xbitpos == padding_correction) src = operand_subword_force (srcreg, xbitpos / BITS_PER_WORD, GET_MODE (srcreg)); @@ -2595,8 +2215,7 @@ copy_blkmode_from_reg (tgtblk, srcreg, type) to by CALL_FUSAGE. REG must denote a hard register. */ void -use_reg (call_fusage, reg) - rtx *call_fusage, reg; +use_reg (rtx *call_fusage, rtx reg) { if (GET_CODE (reg) != REG || REGNO (reg) >= FIRST_PSEUDO_REGISTER) @@ -2611,10 +2230,7 @@ use_reg (call_fusage, reg) starting at REGNO. All of these registers must be hard registers. */ void -use_regs (call_fusage, regno, nregs) - rtx *call_fusage; - int regno; - int nregs; +use_regs (rtx *call_fusage, int regno, int nregs) { int i; @@ -2630,9 +2246,7 @@ use_regs (call_fusage, regno, nregs) non-contiguous locations. The Irix 6 ABI has examples of this. */ void -use_group_regs (call_fusage, regs) - rtx *call_fusage; - rtx regs; +use_group_regs (rtx *call_fusage, rtx regs) { int i; @@ -2656,11 +2270,9 @@ use_group_regs (call_fusage, regs) call to store_by_pieces should succeed. */ int -can_store_by_pieces (len, constfun, constfundata, align) - unsigned HOST_WIDE_INT len; - rtx (*constfun) PARAMS ((PTR, HOST_WIDE_INT, enum machine_mode)); - PTR constfundata; - unsigned int align; +can_store_by_pieces (unsigned HOST_WIDE_INT len, + rtx (*constfun) (void *, HOST_WIDE_INT, enum machine_mode), + void *constfundata, unsigned int align) { unsigned HOST_WIDE_INT max_size, l; HOST_WIDE_INT offset = 0; @@ -2672,7 +2284,7 @@ can_store_by_pieces (len, constfun, constfundata, align) if (len == 0) return 1; - if (! MOVE_BY_PIECES_P (len, align)) + if (! STORE_BY_PIECES_P (len, align)) return 0; if (! SLOW_UNALIGNED_ACCESS (word_mode, align) @@ -2735,22 +2347,26 @@ can_store_by_pieces (len, constfun, constfundata, align) /* Generate several move instructions to store LEN bytes generated by CONSTFUN to block TO. (A MEM rtx with BLKmode). CONSTFUNDATA is a pointer which will be passed as argument in every CONSTFUN call. - ALIGN is maximum alignment we can assume. */ + ALIGN is maximum alignment we can assume. + If ENDP is 0 return to, if ENDP is 1 return memory at the end ala + mempcpy, and if ENDP is 2 return memory the end minus one byte ala + stpcpy. */ -void -store_by_pieces (to, len, constfun, constfundata, align) - rtx to; - unsigned HOST_WIDE_INT len; - rtx (*constfun) PARAMS ((PTR, HOST_WIDE_INT, enum machine_mode)); - PTR constfundata; - unsigned int align; +rtx +store_by_pieces (rtx to, unsigned HOST_WIDE_INT len, + rtx (*constfun) (void *, HOST_WIDE_INT, enum machine_mode), + void *constfundata, unsigned int align, int endp) { struct store_by_pieces data; if (len == 0) - return; + { + if (endp == 2) + abort (); + return to; + } - if (! MOVE_BY_PIECES_P (len, align)) + if (! STORE_BY_PIECES_P (len, align)) abort (); to = protect_from_queue (to, 1); data.constfun = constfun; @@ -2758,6 +2374,35 @@ store_by_pieces (to, len, constfun, constfundata, align) data.len = len; data.to = to; store_by_pieces_1 (&data, align); + if (endp) + { + rtx to1; + + if (data.reverse) + abort (); + if (data.autinc_to) + { + if (endp == 2) + { + if (HAVE_POST_INCREMENT && data.explicit_inc_to > 0) + emit_insn (gen_add2_insn (data.to_addr, constm1_rtx)); + else + data.to_addr = copy_addr_to_reg (plus_constant (data.to_addr, + -1)); + } + to1 = adjust_automodify_address (data.to, QImode, data.to_addr, + data.offset); + } + else + { + if (endp == 2) + --data.offset; + to1 = adjust_address (data.to, QImode, data.offset); + } + return to1; + } + else + return data.to; } /* Generate several move instructions to clear LEN bytes of block TO. (A MEM @@ -2765,10 +2410,7 @@ store_by_pieces (to, len, constfun, constfundata, align) before calling. ALIGN is maximum alignment we can assume. */ static void -clear_by_pieces (to, len, align) - rtx to; - unsigned HOST_WIDE_INT len; - unsigned int align; +clear_by_pieces (rtx to, unsigned HOST_WIDE_INT len, unsigned int align) { struct store_by_pieces data; @@ -2786,10 +2428,9 @@ clear_by_pieces (to, len, align) Return const0_rtx unconditionally. */ static rtx -clear_by_pieces_1 (data, offset, mode) - PTR data ATTRIBUTE_UNUSED; - HOST_WIDE_INT offset ATTRIBUTE_UNUSED; - enum machine_mode mode ATTRIBUTE_UNUSED; +clear_by_pieces_1 (void *data ATTRIBUTE_UNUSED, + HOST_WIDE_INT offset ATTRIBUTE_UNUSED, + enum machine_mode mode ATTRIBUTE_UNUSED) { return const0_rtx; } @@ -2800,9 +2441,8 @@ clear_by_pieces_1 (data, offset, mode) before calling. ALIGN is maximum alignment we can assume. */ static void -store_by_pieces_1 (data, align) - struct store_by_pieces *data; - unsigned int align; +store_by_pieces_1 (struct store_by_pieces *data ATTRIBUTE_UNUSED, + unsigned int align ATTRIBUTE_UNUSED) { rtx to_addr = XEXP (data->to, 0); unsigned HOST_WIDE_INT max_size = STORE_MAX_PIECES + 1; @@ -2886,10 +2526,8 @@ store_by_pieces_1 (data, align) to make a move insn for that mode. DATA has all the other info. */ static void -store_by_pieces_2 (genfun, mode, data) - rtx (*genfun) PARAMS ((rtx, ...)); - enum machine_mode mode; - struct store_by_pieces *data; +store_by_pieces_2 (rtx (*genfun) (rtx, ...), enum machine_mode mode, + struct store_by_pieces *data) { unsigned int size = GET_MODE_SIZE (mode); rtx to1, cst; @@ -2926,9 +2564,7 @@ store_by_pieces_2 (genfun, mode, data) its length in bytes. */ rtx -clear_storage (object, size) - rtx object; - rtx size; +clear_storage (rtx object, rtx size) { rtx retval = 0; unsigned int align = (GET_CODE (object) == MEM ? MEM_ALIGN (object) @@ -2945,7 +2581,7 @@ clear_storage (object, size) object = protect_from_queue (object, 1); size = protect_from_queue (size, 0); - if (GET_CODE (size) == CONST_INT && INTVAL (size) == 0) + if (size == const0_rtx) ; else if (GET_CODE (size) == CONST_INT && CLEAR_BY_PIECES_P (INTVAL (size), align)) @@ -2963,9 +2599,7 @@ clear_storage (object, size) return true if successful. */ static bool -clear_storage_via_clrstr (object, size, align) - rtx object, size; - unsigned int align; +clear_storage_via_clrstr (rtx object, rtx size, unsigned int align) { /* Try the most limited insn first, because there's no point including more than one in the machine description unless @@ -3021,8 +2655,7 @@ clear_storage_via_clrstr (object, size, align) Return the return value of memset, 0 otherwise. */ static rtx -clear_storage_via_libcall (object, size) - rtx object, size; +clear_storage_via_libcall (rtx object, rtx size) { tree call_expr, arg_list, fn, object_tree, size_tree; enum machine_mode size_mode; @@ -3082,7 +2715,6 @@ clear_storage_via_libcall (object, size) call_expr = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (fn)), fn); call_expr = build (CALL_EXPR, TREE_TYPE (TREE_TYPE (fn)), call_expr, arg_list, NULL_TREE); - TREE_SIDE_EFFECTS (call_expr) = 1; retval = expand_expr (call_expr, NULL_RTX, VOIDmode, 0); @@ -3101,15 +2733,13 @@ clear_storage_via_libcall (object, size) static GTY(()) tree block_clear_fn; -static tree -clear_storage_libcall_fn (for_call) - int for_call; +void +init_block_clear_fn (const char *asmspec) { - static bool emitted_extern; - tree fn = block_clear_fn, args; - - if (!fn) + if (!block_clear_fn) { + tree fn, args; + if (TARGET_MEM_FUNCTIONS) { fn = get_identifier ("memset"); @@ -3133,14 +2763,29 @@ clear_storage_libcall_fn (for_call) block_clear_fn = fn; } + if (asmspec) + { + SET_DECL_RTL (block_clear_fn, NULL_RTX); + SET_DECL_ASSEMBLER_NAME (block_clear_fn, get_identifier (asmspec)); + } +} + +static tree +clear_storage_libcall_fn (int for_call) +{ + static bool emitted_extern; + + if (!block_clear_fn) + init_block_clear_fn (NULL); + if (for_call && !emitted_extern) { emitted_extern = true; - make_decl_rtl (fn, NULL); - assemble_external (fn); + make_decl_rtl (block_clear_fn, NULL); + assemble_external (block_clear_fn); } - return fn; + return block_clear_fn; } /* Generate code to copy Y into X. @@ -3151,12 +2796,11 @@ clear_storage_libcall_fn (for_call) Return the last instruction emitted. */ rtx -emit_move_insn (x, y) - rtx x, y; +emit_move_insn (rtx x, rtx y) { enum machine_mode mode = GET_MODE (x); rtx y_cst = NULL_RTX; - rtx last_insn; + rtx last_insn, set; x = protect_from_queue (x, 1); y = protect_from_queue (y, 0); @@ -3174,9 +2818,10 @@ emit_move_insn (x, y) && (last_insn = compress_float_constant (x, y))) return last_insn; + y_cst = y; + if (!LEGITIMATE_CONSTANT_P (y)) { - y_cst = y; y = force_const_mem (mode, y); /* If the target's cannot_force_const_mem prevented the spill, @@ -3207,7 +2852,10 @@ emit_move_insn (x, y) last_insn = emit_move_insn_1 (x, y); - if (y_cst && GET_CODE (x) == REG) + if (y_cst && GET_CODE (x) == REG + && (set = single_set (last_insn)) != NULL_RTX + && SET_DEST (set) == x + && ! rtx_equal_p (y_cst, SET_SRC (set))) set_unique_reg_note (last_insn, REG_EQUAL, y_cst); return last_insn; @@ -3218,8 +2866,7 @@ emit_move_insn (x, y) are basically valid. */ rtx -emit_move_insn_1 (x, y) - rtx x, y; +emit_move_insn_1 (rtx x, rtx y) { enum machine_mode mode = GET_MODE (x); enum machine_mode submode; @@ -3242,8 +2889,8 @@ emit_move_insn_1 (x, y) int stack = push_operand (x, GET_MODE (x)); #ifdef PUSH_ROUNDING - /* In case we output to the stack, but the size is smaller machine can - push exactly, we need to use move instructions. */ + /* In case we output to the stack, but the size is smaller than the + machine can push exactly, we need to use move instructions. */ if (stack && (PUSH_ROUNDING (GET_MODE_SIZE (submode)) != GET_MODE_SIZE (submode))) @@ -3300,19 +2947,15 @@ emit_move_insn_1 (x, y) /* Note that the real part always precedes the imag part in memory regardless of machine's endianness. */ #ifdef STACK_GROWS_DOWNWARD - emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code) - (gen_rtx_MEM (submode, XEXP (x, 0)), - gen_imagpart (submode, y))); - emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code) - (gen_rtx_MEM (submode, XEXP (x, 0)), - gen_realpart (submode, y))); + emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)), + gen_imagpart (submode, y)); + emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)), + gen_realpart (submode, y)); #else - emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code) - (gen_rtx_MEM (submode, XEXP (x, 0)), - gen_realpart (submode, y))); - emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code) - (gen_rtx_MEM (submode, XEXP (x, 0)), - gen_imagpart (submode, y))); + emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)), + gen_realpart (submode, y)); + emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)), + gen_imagpart (submode, y)); #endif } else @@ -3387,15 +3030,79 @@ emit_move_insn_1 (x, y) || GET_CODE (imagpart_x) == SUBREG)) emit_insn (gen_rtx_CLOBBER (VOIDmode, x)); - emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code) - (realpart_x, realpart_y)); - emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code) - (imagpart_x, imagpart_y)); + emit_move_insn (realpart_x, realpart_y); + emit_move_insn (imagpart_x, imagpart_y); } return get_last_insn (); } + /* Handle MODE_CC modes: If we don't have a special move insn for this mode, + find a mode to do it in. If we have a movcc, use it. Otherwise, + find the MODE_INT mode of the same width. */ + else if (GET_MODE_CLASS (mode) == MODE_CC + && mov_optab->handlers[(int) mode].insn_code == CODE_FOR_nothing) + { + enum insn_code insn_code; + enum machine_mode tmode = VOIDmode; + rtx x1 = x, y1 = y; + + if (mode != CCmode + && mov_optab->handlers[(int) CCmode].insn_code != CODE_FOR_nothing) + tmode = CCmode; + else + for (tmode = QImode; tmode != VOIDmode; + tmode = GET_MODE_WIDER_MODE (tmode)) + if (GET_MODE_SIZE (tmode) == GET_MODE_SIZE (mode)) + break; + + if (tmode == VOIDmode) + abort (); + + /* Get X and Y in TMODE. We can't use gen_lowpart here because it + may call change_address which is not appropriate if we were + called when a reload was in progress. We don't have to worry + about changing the address since the size in bytes is supposed to + be the same. Copy the MEM to change the mode and move any + substitutions from the old MEM to the new one. */ + + if (reload_in_progress) + { + x = gen_lowpart_common (tmode, x1); + if (x == 0 && GET_CODE (x1) == MEM) + { + x = adjust_address_nv (x1, tmode, 0); + copy_replacements (x1, x); + } + + y = gen_lowpart_common (tmode, y1); + if (y == 0 && GET_CODE (y1) == MEM) + { + y = adjust_address_nv (y1, tmode, 0); + copy_replacements (y1, y); + } + } + else + { + x = gen_lowpart (tmode, x); + y = gen_lowpart (tmode, y); + } + + insn_code = mov_optab->handlers[(int) tmode].insn_code; + return emit_insn (GEN_FCN (insn_code) (x, y)); + } + + /* Try using a move pattern for the corresponding integer mode. This is + only safe when simplify_subreg can convert MODE constants into integer + constants. At present, it can only do this reliably if the value + fits within a HOST_WIDE_INT. */ + else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + && (submode = int_mode_for_mode (mode)) != BLKmode + && mov_optab->handlers[submode].insn_code != CODE_FOR_nothing) + return emit_insn (GEN_FCN (mov_optab->handlers[submode].insn_code) + (simplify_gen_subreg (submode, x, mode, 0), + simplify_gen_subreg (submode, y, mode, 0))); + /* This will handle any multi-word or full-word mode that lacks a move_insn pattern. However, you will get better code if you define such patterns, even if they must turn into multiple assembler instructions. */ @@ -3512,8 +3219,7 @@ emit_move_insn_1 (x, y) move as an extension. */ static rtx -compress_float_constant (x, y) - rtx x, y; +compress_float_constant (rtx x, rtx y) { enum machine_mode dstmode = GET_MODE (x); enum machine_mode orig_srcmode = GET_MODE (y); @@ -3556,8 +3262,7 @@ compress_float_constant (x, y) last_insn = get_last_insn (); if (GET_CODE (x) == REG) - REG_NOTES (last_insn) - = gen_rtx_EXPR_LIST (REG_EQUAL, y, REG_NOTES (last_insn)); + set_unique_reg_note (last_insn, REG_EQUAL, y); return last_insn; } @@ -3577,9 +3282,7 @@ compress_float_constant (x, y) otherwise, the padding comes at high addresses. */ rtx -push_block (size, extra, below) - rtx size; - int extra, below; +push_block (rtx size, int extra, int below) { rtx temp; @@ -3628,10 +3331,7 @@ push_block (size, extra, below) /* Emit single push insn. */ static void -emit_single_push_insn (mode, x, type) - rtx x; - enum machine_mode mode; - tree type; +emit_single_push_insn (enum machine_mode mode, rtx x, tree type) { rtx dest_addr; unsigned rounded_size = PUSH_ROUNDING (GET_MODE_SIZE (mode)); @@ -3653,12 +3353,48 @@ emit_single_push_insn (mode, x, type) } if (GET_MODE_SIZE (mode) == rounded_size) dest_addr = gen_rtx_fmt_e (STACK_PUSH_CODE, Pmode, stack_pointer_rtx); + /* If we are to pad downward, adjust the stack pointer first and + then store X into the stack location using an offset. This is + because emit_move_insn does not know how to pad; it does not have + access to type. */ + else if (FUNCTION_ARG_PADDING (mode, type) == downward) + { + unsigned padding_size = rounded_size - GET_MODE_SIZE (mode); + HOST_WIDE_INT offset; + + emit_move_insn (stack_pointer_rtx, + expand_binop (Pmode, +#ifdef STACK_GROWS_DOWNWARD + sub_optab, +#else + add_optab, +#endif + stack_pointer_rtx, + GEN_INT (rounded_size), + NULL_RTX, 0, OPTAB_LIB_WIDEN)); + + offset = (HOST_WIDE_INT) padding_size; +#ifdef STACK_GROWS_DOWNWARD + if (STACK_PUSH_CODE == POST_DEC) + /* We have already decremented the stack pointer, so get the + previous value. */ + offset += (HOST_WIDE_INT) rounded_size; +#else + if (STACK_PUSH_CODE == POST_INC) + /* We have already incremented the stack pointer, so get the + previous value. */ + offset -= (HOST_WIDE_INT) rounded_size; +#endif + dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (offset)); + } else { #ifdef STACK_GROWS_DOWNWARD + /* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC. */ dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-(HOST_WIDE_INT) rounded_size)); #else + /* ??? This seems wrong if STACK_PUSH_CODE == POST_INC. */ dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (rounded_size)); #endif @@ -3715,21 +3451,10 @@ emit_single_push_insn (mode, x, type) of bytes required. */ void -emit_push_insn (x, mode, type, size, align, partial, reg, extra, - args_addr, args_so_far, reg_parm_stack_space, - alignment_pad) - rtx x; - enum machine_mode mode; - tree type; - rtx size; - unsigned int align; - int partial; - rtx reg; - int extra; - rtx args_addr; - rtx args_so_far; - int reg_parm_stack_space; - rtx alignment_pad; +emit_push_insn (rtx x, enum machine_mode mode, tree type, rtx size, + unsigned int align, int partial, rtx reg, int extra, + rtx args_addr, rtx args_so_far, int reg_parm_stack_space, + rtx alignment_pad) { rtx xinner; enum direction stack_direction @@ -3758,9 +3483,19 @@ emit_push_insn (x, mode, type, size, align, partial, reg, extra, rtx temp; int used = partial * UNITS_PER_WORD; - int offset = used % (PARM_BOUNDARY / BITS_PER_UNIT); + int offset; int skip; + if (reg && GET_CODE (reg) == PARALLEL) + { + /* Use the size of the elt to compute offset. */ + rtx elt = XEXP (XVECEXP (reg, 0, 0), 0); + used = partial * GET_MODE_SIZE (GET_MODE (elt)); + offset = used % (PARM_BOUNDARY / BITS_PER_UNIT); + } + else + offset = used % (PARM_BOUNDARY / BITS_PER_UNIT); + if (size == 0) abort (); @@ -3786,6 +3521,7 @@ emit_push_insn (x, mode, type, size, align, partial, reg, extra, && PUSH_ARGS && GET_CODE (size) == CONST_INT && skip == 0 + && MEM_ALIGN (xinner) >= align && (MOVE_BY_PIECES_P ((unsigned) INTVAL (size) - used, align)) /* Here we avoid the case of a structure whose weak alignment forces many pushes of a small amount of data, @@ -3803,7 +3539,7 @@ emit_push_insn (x, mode, type, size, align, partial, reg, extra, && where_pad != none && where_pad != stack_direction) anti_adjust_stack (GEN_INT (extra)); - move_by_pieces (NULL, xinner, INTVAL (size) - used, align); + move_by_pieces (NULL, xinner, INTVAL (size) - used, align, 0); } else #endif /* PUSH_ROUNDING */ @@ -3937,7 +3673,6 @@ emit_push_insn (x, mode, type, size, align, partial, reg, extra, else { rtx addr; - rtx target = NULL_RTX; rtx dest; /* Push padding now if padding above and stack grows down, @@ -3961,7 +3696,6 @@ emit_push_insn (x, mode, type, size, align, partial, reg, extra, else addr = memory_address (mode, gen_rtx_PLUS (Pmode, args_addr, args_so_far)); - target = addr; dest = gen_rtx_MEM (mode, addr); if (type != 0) { @@ -3985,7 +3719,7 @@ emit_push_insn (x, mode, type, size, align, partial, reg, extra, /* Handle calls that pass values in multiple non-contiguous locations. The Irix 6 ABI has examples of this. */ if (GET_CODE (reg) == PARALLEL) - emit_group_load (reg, x, -1); /* ??? size? */ + emit_group_load (reg, x, type, -1); else move_block_to_reg (REGNO (reg), x, partial, mode); } @@ -4001,8 +3735,7 @@ emit_push_insn (x, mode, type, size, align, partial, reg, extra, operations. */ static rtx -get_subtarget (x) - rtx x; +get_subtarget (rtx x) { return ((x == 0 /* Only registers can be subtargets. */ @@ -4021,18 +3754,10 @@ get_subtarget (x) If WANT_VALUE is nonzero, return an rtx for the value of TO. (This may contain a QUEUED rtx; if the value is constant, this rtx is a constant.) - Otherwise, the returned value is NULL_RTX. - - SUGGEST_REG is no longer actually used. - It used to mean, copy the value through a register - and return that register, if that is possible. - We now use WANT_VALUE to decide whether to do this. */ + Otherwise, the returned value is NULL_RTX. */ rtx -expand_assignment (to, from, want_value, suggest_reg) - tree to, from; - int want_value; - int suggest_reg ATTRIBUTE_UNUSED; +expand_assignment (tree to, tree from, int want_value) { rtx to_rtx = 0; rtx result; @@ -4105,8 +3830,8 @@ expand_assignment (to, from, want_value, suggest_reg) } to_rtx = offset_address (to_rtx, offset_rtx, - highest_pow2_factor_for_type (TREE_TYPE (to), - offset)); + highest_pow2_factor_for_target (to, + offset)); } if (GET_CODE (to_rtx) == MEM) @@ -4128,7 +3853,11 @@ expand_assignment (to, from, want_value, suggest_reg) } if (TREE_CODE (to) == COMPONENT_REF - && TREE_READONLY (TREE_OPERAND (to, 1))) + && TREE_READONLY (TREE_OPERAND (to, 1)) + /* We can't assert that a MEM won't be set more than once + if the component is not addressable because another + non-addressable component may be referenced by the same MEM. */ + && ! (GET_CODE (to_rtx) == MEM && ! can_address_p (to))) { if (to_rtx == orig_to_rtx) to_rtx = copy_rtx (to_rtx); @@ -4173,7 +3902,7 @@ expand_assignment (to, from, want_value, suggest_reg) since it might be a promoted variable where the zero- or sign- extension needs to be done. Handling this in the normal way is safe because no computation is done before the call. */ - if (TREE_CODE (from) == CALL_EXPR && ! aggregate_value_p (from) + if (TREE_CODE (from) == CALL_EXPR && ! aggregate_value_p (from, from) && TREE_CODE (TYPE_SIZE (TREE_TYPE (from))) == INTEGER_CST && ! ((TREE_CODE (to) == VAR_DECL || TREE_CODE (to) == PARM_DECL) && GET_CODE (DECL_RTL (to)) == REG)) @@ -4188,16 +3917,14 @@ expand_assignment (to, from, want_value, suggest_reg) /* Handle calls that return values in multiple non-contiguous locations. The Irix 6 ABI has examples of this. */ if (GET_CODE (to_rtx) == PARALLEL) - emit_group_load (to_rtx, value, int_size_in_bytes (TREE_TYPE (from))); + emit_group_load (to_rtx, value, TREE_TYPE (from), + int_size_in_bytes (TREE_TYPE (from))); else if (GET_MODE (to_rtx) == BLKmode) emit_block_move (to_rtx, value, expr_size (from), BLOCK_OP_NORMAL); else { -#ifdef POINTERS_EXTEND_UNSIGNED - if (POINTER_TYPE_P (TREE_TYPE (to)) - && GET_MODE (to_rtx) != GET_MODE (value)) + if (POINTER_TYPE_P (TREE_TYPE (to))) value = convert_memory_address (GET_MODE (to_rtx), value); -#endif emit_move_insn (to_rtx, value); } preserve_temp_slots (to_rtx); @@ -4222,7 +3949,8 @@ expand_assignment (to, from, want_value, suggest_reg) temp = expand_expr (from, 0, GET_MODE (to_rtx), 0); if (GET_CODE (to_rtx) == PARALLEL) - emit_group_load (to_rtx, temp, int_size_in_bytes (TREE_TYPE (from))); + emit_group_load (to_rtx, temp, TREE_TYPE (from), + int_size_in_bytes (TREE_TYPE (from))); else emit_move_insn (to_rtx, temp); @@ -4304,12 +4032,11 @@ expand_assignment (to, from, want_value, suggest_reg) stack, and block moves may need to be treated specially. */ rtx -store_expr (exp, target, want_value) - tree exp; - rtx target; - int want_value; +store_expr (tree exp, rtx target, int want_value) { rtx temp; + rtx alt_rtl = NULL_RTX; + rtx mark = mark_queue (); int dont_return_target = 0; int dont_store_target = 0; @@ -4317,7 +4044,7 @@ store_expr (exp, target, want_value) { /* C++ can generate ?: expressions with a throw expression in one branch and an rvalue in the other. Here, we resolve attempts to - store the throw expression's nonexistant result. */ + store the throw expression's nonexistent result. */ if (want_value) abort (); expand_expr (exp, const0_rtx, VOIDmode, 0); @@ -4446,12 +4173,12 @@ store_expr (exp, target, want_value) temp = expand_expr (exp, inner_target, VOIDmode, want_value & 2 ? EXPAND_STACK_PARM : EXPAND_NORMAL); - /* If TEMP is a volatile MEM and we want a result value, make - the access now so it gets done only once. Likewise if - it contains TARGET. */ - if (GET_CODE (temp) == MEM && (want_value & 1) != 0 - && (MEM_VOLATILE_P (temp) - || reg_mentioned_p (SUBREG_REG (target), XEXP (temp, 0)))) + /* If TEMP is a MEM and we want a result value, make the access + now so it gets done only once. Strictly speaking, this is + only necessary if the MEM is volatile, or if the address + overlaps TARGET. But not performing the load twice also + reduces the amount of rtl we generate and then have to CSE. */ + if (GET_CODE (temp) == MEM && (want_value & 1) != 0) temp = copy_to_reg (temp); /* If TEMP is a VOIDmode constant, use convert_modes to make @@ -4491,8 +4218,10 @@ store_expr (exp, target, want_value) } else { - temp = expand_expr (exp, target, GET_MODE (target), - want_value & 2 ? EXPAND_STACK_PARM : EXPAND_NORMAL); + temp = expand_expr_real (exp, target, GET_MODE (target), + (want_value & 2 + ? EXPAND_STACK_PARM : EXPAND_NORMAL), + &alt_rtl); /* Return TARGET if it's a specified hardware register. If TARGET is a volatile mem ref, either return TARGET or return a reg copied *from* TARGET; ANSI requires this. @@ -4519,7 +4248,11 @@ store_expr (exp, target, want_value) temp, TREE_UNSIGNED (TREE_TYPE (exp))); /* If value was not generated in the target, store it there. - Convert the value to TARGET's type first if necessary. + Convert the value to TARGET's type first if necessary and emit the + pending incrementations that have been queued when expanding EXP. + Note that we cannot emit the whole queue blindly because this will + effectively disable the POST_INC optimization later. + If TEMP and TARGET compare equal according to rtx_equal_p, but one or both of them are volatile memory refs, we have to distinguish two cases: @@ -4537,18 +4270,19 @@ store_expr (exp, target, want_value) || side_effects_p (target)))) && TREE_CODE (exp) != ERROR_MARK && ! dont_store_target - /* If store_expr stores a DECL whose DECL_RTL(exp) == TARGET, - but TARGET is not valid memory reference, TEMP will differ - from TARGET although it is really the same location. */ - && (TREE_CODE_CLASS (TREE_CODE (exp)) != 'd' - || target != DECL_RTL_IF_SET (exp)) + /* If store_expr stores a DECL whose DECL_RTL(exp) == TARGET, + but TARGET is not valid memory reference, TEMP will differ + from TARGET although it is really the same location. */ + && !(alt_rtl && rtx_equal_p (alt_rtl, target)) /* If there's nothing to copy, don't bother. Don't call expr_size unless necessary, because some front-ends (C++) expr_size-hook aborts on objects that are not supposed to be bit-copied or bit-initialized. */ && expr_size (exp) != const0_rtx) { + emit_insns_enqueued_after_mark (mark); target = protect_from_queue (target, 1); + temp = protect_from_queue (temp, 0); if (GET_MODE (temp) != GET_MODE (target) && GET_MODE (temp) != VOIDmode) { @@ -4635,7 +4369,8 @@ store_expr (exp, target, want_value) /* Handle calls that return values in multiple non-contiguous locations. The Irix 6 ABI has examples of this. */ else if (GET_CODE (target) == PARALLEL) - emit_group_load (target, temp, int_size_in_bytes (TREE_TYPE (exp))); + emit_group_load (target, temp, TREE_TYPE (exp), + int_size_in_bytes (TREE_TYPE (exp))); else if (GET_MODE (temp) == BLKmode) emit_block_move (target, temp, expr_size (exp), (want_value & 2 @@ -4664,11 +4399,10 @@ store_expr (exp, target, want_value) return target; } -/* Return 1 if EXP just contains zeros. */ +/* Return 1 if EXP just contains zeros. FIXME merge with initializer_zerop. */ static int -is_zeros_p (exp) - tree exp; +is_zeros_p (tree exp) { tree elt; @@ -4714,9 +4448,8 @@ is_zeros_p (exp) /* Return 1 if EXP contains mostly (3/4) zeros. */ -static int -mostly_zeros_p (exp) - tree exp; +int +mostly_zeros_p (tree exp) { if (TREE_CODE (exp) == CONSTRUCTOR) { @@ -4757,15 +4490,9 @@ mostly_zeros_p (exp) clear a substructure if the outer structure has already been cleared. */ static void -store_constructor_field (target, bitsize, bitpos, mode, exp, type, cleared, - alias_set) - rtx target; - unsigned HOST_WIDE_INT bitsize; - HOST_WIDE_INT bitpos; - enum machine_mode mode; - tree exp, type; - int cleared; - int alias_set; +store_constructor_field (rtx target, unsigned HOST_WIDE_INT bitsize, + HOST_WIDE_INT bitpos, enum machine_mode mode, + tree exp, tree type, int cleared, int alias_set) { if (TREE_CODE (exp) == CONSTRUCTOR && bitpos % BITS_PER_UNIT == 0 @@ -4807,11 +4534,7 @@ store_constructor_field (target, bitsize, bitpos, mode, exp, type, cleared, which has been packed to exclude padding bits. */ static void -store_constructor (exp, target, cleared, size) - tree exp; - rtx target; - int cleared; - HOST_WIDE_INT size; +store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) { tree type = TREE_TYPE (exp); #ifdef WORD_REGISTER_OPERATIONS @@ -4852,14 +4575,36 @@ store_constructor (exp, target, cleared, size) clear the whole structure first. Don't do this if TARGET is a register whose mode size isn't equal to SIZE since clear_storage can't handle this case. */ - else if (((list_length (CONSTRUCTOR_ELTS (exp)) != fields_length (type)) - || mostly_zeros_p (exp)) + else if (size > 0 + && ((list_length (CONSTRUCTOR_ELTS (exp)) != fields_length (type)) + || mostly_zeros_p (exp)) && (GET_CODE (target) != REG || ((HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (target)) == size))) { - clear_storage (target, GEN_INT (size)); + rtx xtarget = target; + + if (RTX_UNCHANGING_P (target)) + { + xtarget = copy_rtx (target); + RTX_UNCHANGING_P (xtarget) = 0; + } + + clear_storage (xtarget, GEN_INT (size)); cleared = 1; + if (RTX_UNCHANGING_P (target) || readonly_fields_p (type)) + { + /* ??? Emit a blockage to prevent the scheduler from swapping + the memory write issued above without the /u flag and + memory writes that will be issued later with it. + Note that the clearing above cannot be simply disabled + in the unsafe cases because the C front-end relies on + it to implement the semantics of constructors for + automatic objects. However, not all machine descriptions + define a blockage insn, so emit an ASM_INPUT to + act as one. */ + emit_insn (gen_rtx_ASM_INPUT (VOIDmode, "")); + } } if (! cleared) @@ -4875,7 +4620,6 @@ store_constructor (exp, target, cleared, size) enum machine_mode mode; HOST_WIDE_INT bitsize; HOST_WIDE_INT bitpos = 0; - int unsignedp; tree offset; rtx to_rtx = target; @@ -4893,7 +4637,6 @@ store_constructor (exp, target, cleared, size) else bitsize = -1; - unsignedp = TREE_UNSIGNED (field); mode = DECL_MODE (field); if (DECL_BIT_FIELD (field)) mode = VOIDmode; @@ -4912,7 +4655,7 @@ store_constructor (exp, target, cleared, size) { rtx offset_rtx; - if (contains_placeholder_p (offset)) + if (CONTAINS_PLACEHOLDER_P (offset)) offset = build (WITH_RECORD_EXPR, sizetype, offset, make_tree (TREE_TYPE (exp), target)); @@ -4994,6 +4737,10 @@ store_constructor (exp, target, cleared, size) int const_bounds_p; HOST_WIDE_INT minelt = 0; HOST_WIDE_INT maxelt = 0; + int icode = 0; + rtx *vector = NULL; + int elt_size = 0; + unsigned n_elts = 0; /* Vectors are like arrays, but the domain is stored via an array type indirectly. */ @@ -5004,6 +4751,22 @@ store_constructor (exp, target, cleared, size) it always will. */ domain = TYPE_DEBUG_REPRESENTATION_TYPE (type); domain = TYPE_DOMAIN (TREE_TYPE (TYPE_FIELDS (domain))); + if (REG_P (target) && VECTOR_MODE_P (GET_MODE (target))) + { + enum machine_mode mode = GET_MODE (target); + + icode = (int) vec_init_optab->handlers[mode].insn_code; + if (icode != CODE_FOR_nothing) + { + unsigned int i; + + elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); + n_elts = (GET_MODE_SIZE (mode) / elt_size); + vector = alloca (n_elts); + for (i = 0; i < n_elts; i++) + vector [i] = CONST0_RTX (GET_MODE_INNER (mode)); + } + } } const_bounds_p = (TYPE_MIN_VALUE (domain) @@ -5068,14 +4831,33 @@ store_constructor (exp, target, cleared, size) need_to_clear = 1; } - if (need_to_clear && size > 0) + if (need_to_clear && size > 0 && !vector) { if (! cleared) { if (REG_P (target)) - emit_move_insn (target, CONST0_RTX (GET_MODE (target))); + emit_move_insn (target, CONST0_RTX (GET_MODE (target))); else - clear_storage (target, GEN_INT (size)); + { + rtx xtarget = target; + + if (RTX_UNCHANGING_P (target)) + { + xtarget = copy_rtx (target); + RTX_UNCHANGING_P (xtarget) = 0; + } + + clear_storage (xtarget, GEN_INT (size)); + + if (RTX_UNCHANGING_P (target)) + { + /* ??? Emit a blockage to prevent the scheduler from + swapping the memory write issued above without the + /u flag and memory writes that will be issued later + with it. */ + emit_insn (gen_rtx_ASM_INPUT (VOIDmode, "")); + } + } } cleared = 1; } @@ -5114,11 +4896,14 @@ store_constructor (exp, target, cleared, size) { tree lo_index = TREE_OPERAND (index, 0); tree hi_index = TREE_OPERAND (index, 1); - rtx index_r, pos_rtx, hi_r, loop_top, loop_end; + rtx index_r, pos_rtx, loop_end; struct nesting *loop; HOST_WIDE_INT lo, hi, count; tree position; + if (vector) + abort (); + /* If the range is constant and "small", unroll the loop. */ if (const_bounds_p && host_integerp (lo_index, 0) @@ -5153,8 +4938,7 @@ store_constructor (exp, target, cleared, size) } else { - hi_r = expand_expr (hi_index, NULL_RTX, VOIDmode, 0); - loop_top = gen_label_rtx (); + expand_expr (hi_index, NULL_RTX, VOIDmode, 0); loop_end = gen_label_rtx (); unsignedp = TREE_UNSIGNED (domain); @@ -5211,6 +4995,9 @@ store_constructor (exp, target, cleared, size) { tree position; + if (vector) + abort (); + if (index == 0) index = ssize_int (1); @@ -5228,6 +5015,16 @@ store_constructor (exp, target, cleared, size) xtarget = adjust_address (xtarget, mode, 0); store_expr (value, xtarget, 0); } + else if (vector) + { + int pos; + + if (index != 0) + pos = tree_low_cst (index, 0) - minelt; + else + pos = i; + vector[pos] = expand_expr (value, NULL_RTX, VOIDmode, 0); + } else { if (index != 0) @@ -5243,12 +5040,16 @@ store_constructor (exp, target, cleared, size) target = copy_rtx (target); MEM_KEEP_ALIAS_SET_P (target) = 1; } - store_constructor_field (target, bitsize, bitpos, mode, value, type, cleared, get_alias_set (elttype)); - } } + if (vector) + { + emit_insn (GEN_FCN (icode) (target, + gen_rtx_PARALLEL (GET_MODE (target), + gen_rtvec_v (n_elts, vector)))); + } } /* Set constructor assignments. */ @@ -5293,7 +5094,7 @@ store_constructor (exp, target, cleared, size) { unsigned int set_word_size = TYPE_ALIGN (TREE_TYPE (exp)); enum machine_mode mode = mode_for_size (set_word_size, MODE_INT, 1); - char *bit_buffer = (char *) alloca (nbits); + char *bit_buffer = alloca (nbits); HOST_WIDE_INT word = 0; unsigned int bit_pos = 0; unsigned int ibit = 0; @@ -5315,7 +5116,7 @@ store_constructor (exp, target, cleared, size) { if (word != 0 || ! cleared) { - rtx datum = GEN_INT (word); + rtx datum = gen_int_mode (word, mode); rtx to_rtx; /* The assumption here is that it is safe to use @@ -5415,8 +5216,8 @@ store_constructor (exp, target, cleared, size) TYPE_MODE (sizetype)); } else - emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__setbits"), - LCT_NORMAL, VOIDmode, 4, XEXP (targetx, 0), + emit_library_call (setbits_libfunc, LCT_NORMAL, + VOIDmode, 4, XEXP (targetx, 0), Pmode, bitlength_rtx, TYPE_MODE (sizetype), startbit_rtx, TYPE_MODE (sizetype), endbit_rtx, TYPE_MODE (sizetype)); @@ -5449,17 +5250,9 @@ store_constructor (exp, target, cleared, size) reference to the containing structure. */ static rtx -store_field (target, bitsize, bitpos, mode, exp, value_mode, unsignedp, type, - alias_set) - rtx target; - HOST_WIDE_INT bitsize; - HOST_WIDE_INT bitpos; - enum machine_mode mode; - tree exp; - enum machine_mode value_mode; - int unsignedp; - tree type; - int alias_set; +store_field (rtx target, HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos, + enum machine_mode mode, tree exp, enum machine_mode value_mode, + int unsignedp, tree type, int alias_set) { HOST_WIDE_INT width_mask = 0; @@ -5470,7 +5263,7 @@ store_field (target, bitsize, bitpos, mode, exp, value_mode, unsignedp, type, side-effects. */ if (bitsize == 0) return expand_expr (exp, const0_rtx, VOIDmode, 0); - else if (bitsize >=0 && bitsize < HOST_BITS_PER_WIDE_INT) + else if (bitsize >= 0 && bitsize < HOST_BITS_PER_WIDE_INT) width_mask = ((HOST_WIDE_INT) 1 << bitsize) - 1; /* If we are storing into an unaligned field of an aligned union that is @@ -5482,15 +5275,13 @@ store_field (target, bitsize, bitpos, mode, exp, value_mode, unsignedp, type, that object. Finally, load from the object into TARGET. This is not very efficient in general, but should only be slightly more expensive than the otherwise-required unaligned accesses. Perhaps this can be - cleaned up later. */ + cleaned up later. It's tempting to make OBJECT readonly, but it's set + twice, once with emit_move_insn and once via store_field. */ if (mode == BLKmode && (GET_CODE (target) == REG || GET_CODE (target) == SUBREG)) { - rtx object - = assign_temp - (build_qualified_type (type, TYPE_QUALS (type) | TYPE_QUAL_CONST), - 0, 1, 1); + rtx object = assign_temp (type, 0, 1, 1); rtx blk_object = adjust_address (object, BLKmode, 0); if (bitsize != (HOST_WIDE_INT) GET_MODE_BITSIZE (GET_MODE (target))) @@ -5526,9 +5317,11 @@ store_field (target, bitsize, bitpos, mode, exp, value_mode, unsignedp, type, || GET_CODE (target) == SUBREG /* If the field isn't aligned enough to store as an ordinary memref, store it as a bit field. */ - || (mode != BLKmode && SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (target)) - && (MEM_ALIGN (target) < GET_MODE_ALIGNMENT (mode) - || bitpos % GET_MODE_ALIGNMENT (mode))) + || (mode != BLKmode + && ((((MEM_ALIGN (target) < GET_MODE_ALIGNMENT (mode)) + || bitpos % GET_MODE_ALIGNMENT (mode)) + && SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (target))) + || (bitpos % BITS_PER_UNIT != 0))) /* If the RHS and field are a constant size and the size of the RHS isn't the same size as the bitfield, we must use bitfield operations. */ @@ -5548,7 +5341,7 @@ store_field (target, bitsize, bitpos, mode, exp, value_mode, unsignedp, type, temp = expand_shift (RSHIFT_EXPR, GET_MODE (temp), temp, size_int (GET_MODE_BITSIZE (GET_MODE (temp)) - bitsize), - temp, 1); + NULL_RTX, 1); /* Unless MODE is VOIDmode or BLKmode, convert TEMP to MODE. */ @@ -5663,15 +5456,10 @@ store_field (target, bitsize, bitpos, mode, exp, value_mode, unsignedp, type, this case, but the address of the object can be found. */ tree -get_inner_reference (exp, pbitsize, pbitpos, poffset, pmode, - punsignedp, pvolatilep) - tree exp; - HOST_WIDE_INT *pbitsize; - HOST_WIDE_INT *pbitpos; - tree *poffset; - enum machine_mode *pmode; - int *punsignedp; - int *pvolatilep; +get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, + HOST_WIDE_INT *pbitpos, tree *poffset, + enum machine_mode *pmode, int *punsignedp, + int *pvolatilep) { tree size_tree = 0; enum machine_mode mode = VOIDmode; @@ -5730,8 +5518,7 @@ get_inner_reference (exp, pbitsize, pbitpos, poffset, pmode, made during type construction. */ if (this_offset == 0) break; - else if (! TREE_CONSTANT (this_offset) - && contains_placeholder_p (this_offset)) + else if (CONTAINS_PLACEHOLDER_P (this_offset)) this_offset = build (WITH_RECORD_EXPR, sizetype, this_offset, exp); offset = size_binop (PLUS_EXPR, offset, this_offset); @@ -5761,11 +5548,9 @@ get_inner_reference (exp, pbitsize, pbitpos, poffset, pmode, /* If the index has a self-referential type, pass it to a WITH_RECORD_EXPR; if the component size is, pass our component to one. */ - if (! TREE_CONSTANT (index) - && contains_placeholder_p (index)) + if (CONTAINS_PLACEHOLDER_P (index)) index = build (WITH_RECORD_EXPR, TREE_TYPE (index), index, exp); - if (! TREE_CONSTANT (unit_size) - && contains_placeholder_p (unit_size)) + if (CONTAINS_PLACEHOLDER_P (unit_size)) unit_size = build (WITH_RECORD_EXPR, sizetype, unit_size, array); offset = size_binop (PLUS_EXPR, offset, @@ -5788,8 +5573,20 @@ get_inner_reference (exp, pbitsize, pbitpos, poffset, pmode, continue; } + + /* We can go inside most conversions: all NON_VALUE_EXPRs, all normal + conversions that don't change the mode, and all view conversions + except those that need to "step up" the alignment. */ else if (TREE_CODE (exp) != NON_LVALUE_EXPR - && TREE_CODE (exp) != VIEW_CONVERT_EXPR + && ! (TREE_CODE (exp) == VIEW_CONVERT_EXPR + && ! ((TYPE_ALIGN (TREE_TYPE (exp)) + > TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0)))) + && STRICT_ALIGNMENT + && (TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0))) + < BIGGEST_ALIGNMENT) + && (TYPE_ALIGN_OK (TREE_TYPE (exp)) + || TYPE_ALIGN_OK (TREE_TYPE + (TREE_OPERAND (exp, 0)))))) && ! ((TREE_CODE (exp) == NOP_EXPR || TREE_CODE (exp) == CONVERT_EXPR) && (TYPE_MODE (TREE_TYPE (exp)) @@ -5821,8 +5618,7 @@ get_inner_reference (exp, pbitsize, pbitpos, poffset, pmode, /* Return 1 if T is an expression that get_inner_reference handles. */ int -handled_component_p (t) - tree t; +handled_component_p (tree t) { switch (TREE_CODE (t)) { @@ -5834,6 +5630,9 @@ handled_component_p (t) case VIEW_CONVERT_EXPR: return 1; + /* ??? Sure they are handled, but get_inner_reference may return + a different PBITSIZE, depending upon whether the expression is + wrapped up in a NOP_EXPR or not, e.g. for bitfields. */ case NOP_EXPR: case CONVERT_EXPR: return (TYPE_MODE (TREE_TYPE (t)) @@ -5852,8 +5651,7 @@ handled_component_p (t) The returned value may be a REG, SUBREG, MEM or constant. */ rtx -force_operand (value, target) - rtx value, target; +force_operand (rtx value, rtx target) { rtx op1, op2; /* Use subtarget as the target for operand 0 of a binary operation. */ @@ -5984,10 +5782,7 @@ force_operand (value, target) searches for optimization opportunities. */ int -safe_from_p (x, exp, top_p) - rtx x; - tree exp; - int top_p; +safe_from_p (rtx x, tree exp, int top_p) { rtx exp_rtl = 0; int i, nops; @@ -6026,7 +5821,7 @@ safe_from_p (x, exp, top_p) /* A SAVE_EXPR might appear many times in the expression passed to the top-level safe_from_p call, and if it has a complex subexpression, examining it multiple times could result in a combinatorial explosion. - E.g. on an Alpha running at least 200MHz, a Fortran test case compiled + E.g. on an Alpha running at least 200MHz, a Fortran testcase compiled with optimization took about 28 minutes to compile -- even though it was only a few lines long. So we mark each SAVE_EXPR we see with TREE_PRIVATE and turn that off when we are done. We keep a list of the SAVE_EXPRs @@ -6080,7 +5875,7 @@ safe_from_p (x, exp, top_p) case '<': if (!safe_from_p (x, TREE_OPERAND (exp, 1), 0)) return 0; - /* FALLTHRU */ + /* Fall through. */ case '1': return safe_from_p (x, TREE_OPERAND (exp, 0), 0); @@ -6174,10 +5969,6 @@ safe_from_p (x, exp, top_p) part of the expression. */ return safe_from_p (x, TREE_OPERAND (exp, 1), 0); - case METHOD_CALL_EXPR: - /* This takes an rtx argument, but shouldn't appear here. */ - abort (); - default: break; } @@ -6228,8 +6019,7 @@ safe_from_p (x, exp, top_p) variable or parameter; else return 0. */ static rtx -var_rtx (exp) - tree exp; +var_rtx (tree exp) { STRIP_NOPS (exp); switch (TREE_CODE (exp)) @@ -6241,70 +6031,14 @@ var_rtx (exp) return 0; } } - -#ifdef MAX_INTEGER_COMPUTATION_MODE - -void -check_max_integer_computation_mode (exp) - tree exp; -{ - enum tree_code code; - enum machine_mode mode; - - /* Strip any NOPs that don't change the mode. */ - STRIP_NOPS (exp); - code = TREE_CODE (exp); - - /* We must allow conversions of constants to MAX_INTEGER_COMPUTATION_MODE. */ - if (code == NOP_EXPR - && TREE_CODE (TREE_OPERAND (exp, 0)) == INTEGER_CST) - return; - - /* First check the type of the overall operation. We need only look at - unary, binary and relational operations. */ - if (TREE_CODE_CLASS (code) == '1' - || TREE_CODE_CLASS (code) == '2' - || TREE_CODE_CLASS (code) == '<') - { - mode = TYPE_MODE (TREE_TYPE (exp)); - if (GET_MODE_CLASS (mode) == MODE_INT - && mode > MAX_INTEGER_COMPUTATION_MODE) - internal_error ("unsupported wide integer operation"); - } - - /* Check operand of a unary op. */ - if (TREE_CODE_CLASS (code) == '1') - { - mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))); - if (GET_MODE_CLASS (mode) == MODE_INT - && mode > MAX_INTEGER_COMPUTATION_MODE) - internal_error ("unsupported wide integer operation"); - } - - /* Check operands of a binary/comparison op. */ - if (TREE_CODE_CLASS (code) == '2' || TREE_CODE_CLASS (code) == '<') - { - mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))); - if (GET_MODE_CLASS (mode) == MODE_INT - && mode > MAX_INTEGER_COMPUTATION_MODE) - internal_error ("unsupported wide integer operation"); - - mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 1))); - if (GET_MODE_CLASS (mode) == MODE_INT - && mode > MAX_INTEGER_COMPUTATION_MODE) - internal_error ("unsupported wide integer operation"); - } -} -#endif /* Return the highest power of two that EXP is known to be a multiple of. This is used in updating alignment of MEMs in array references. */ -static HOST_WIDE_INT -highest_pow2_factor (exp) - tree exp; +static unsigned HOST_WIDE_INT +highest_pow2_factor (tree exp) { - HOST_WIDE_INT c0, c1; + unsigned HOST_WIDE_INT c0, c1; switch (TREE_CODE (exp)) { @@ -6312,7 +6046,7 @@ highest_pow2_factor (exp) /* We can find the lowest bit that's a one. If the low HOST_BITS_PER_WIDE_INT bits are zero, return BIGGEST_ALIGNMENT. We need to handle this case since we can find it in a COND_EXPR, - a MIN_EXPR, or a MAX_EXPR. If the constant overlows, we have an + a MIN_EXPR, or a MAX_EXPR. If the constant overflows, we have an erroneous program, so return BIGGEST_ALIGNMENT to avoid any later ICE. */ if (TREE_CONSTANT_OVERFLOW (exp)) @@ -6367,19 +6101,22 @@ highest_pow2_factor (exp) return 1; } -/* Similar, except that it is known that the expression must be a multiple - of the alignment of TYPE. */ +/* Similar, except that the alignment requirements of TARGET are + taken into account. Assume it is at least as aligned as its + type, unless it is a COMPONENT_REF in which case the layout of + the structure gives the alignment. */ -static HOST_WIDE_INT -highest_pow2_factor_for_type (type, exp) - tree type; - tree exp; +static unsigned HOST_WIDE_INT +highest_pow2_factor_for_target (tree target, tree exp) { - HOST_WIDE_INT type_align, factor; + unsigned HOST_WIDE_INT target_align, factor; factor = highest_pow2_factor (exp); - type_align = TYPE_ALIGN (type) / BITS_PER_UNIT; - return MAX (factor, type_align); + if (TREE_CODE (target) == COMPONENT_REF) + target_align = DECL_ALIGN (TREE_OPERAND (target, 1)) / BITS_PER_UNIT; + else + target_align = TYPE_ALIGN (TREE_TYPE (target)) / BITS_PER_UNIT; + return MAX (factor, target_align); } /* Return an object on the placeholder list that matches EXP, a @@ -6391,9 +6128,7 @@ highest_pow2_factor_for_type (type, exp) the placeholder list at which the object is found is placed. */ tree -find_placeholder (exp, plist) - tree exp; - tree *plist; +find_placeholder (tree exp, tree *plist) { tree type = TREE_TYPE (exp); tree placeholder_expr; @@ -6447,6 +6182,34 @@ find_placeholder (exp, plist) return 0; } + +/* Subroutine of expand_expr. Expand the two operands of a binary + expression EXP0 and EXP1 placing the results in OP0 and OP1. + The value may be stored in TARGET if TARGET is nonzero. The + MODIFIER argument is as documented by expand_expr. */ + +static void +expand_operands (tree exp0, tree exp1, rtx target, rtx *op0, rtx *op1, + enum expand_modifier modifier) +{ + if (! safe_from_p (target, exp1, 1)) + target = 0; + if (operand_equal_p (exp0, exp1, 0)) + { + *op0 = expand_expr (exp0, target, VOIDmode, modifier); + *op1 = copy_rtx (*op0); + } + else + { + /* If we need to preserve evaluation order, copy exp0 into its own + temporary variable so that it can't be clobbered by exp1. */ + if (flag_evaluation_order && TREE_SIDE_EFFECTS (exp1)) + exp0 = save_expr (exp0); + *op0 = expand_expr (exp0, target, VOIDmode, modifier); + *op1 = expand_expr (exp1, NULL_RTX, VOIDmode, modifier); + } +} + /* expand_expr: generate code for computing expression EXP. An rtx for the computed value is returned. The value is never null. @@ -6488,14 +6251,17 @@ find_placeholder (exp, plist) marked TARGET so that it's safe from being trashed by libcalls. We don't want to use TARGET for anything but the final result; Intermediate values must go elsewhere. Additionally, calls to - emit_block_move will be flagged with BLOCK_OP_CALL_PARM. */ + emit_block_move will be flagged with BLOCK_OP_CALL_PARM. + + If EXP is a VAR_DECL whose DECL_RTL was a MEM with an invalid + address, and ALT_RTL is non-NULL, then *ALT_RTL is set to the + DECL_RTL of the VAR_DECL. *ALT_RTL is also set if EXP is a + COMPOUND_EXPR whose second argument is such a VAR_DECL, and so on + recursively. */ rtx -expand_expr (exp, target, tmode, modifier) - tree exp; - rtx target; - enum machine_mode tmode; - enum expand_modifier modifier; +expand_expr_real (tree exp, rtx target, enum machine_mode tmode, + enum expand_modifier modifier, rtx *alt_rtl) { rtx op0, op1, temp; tree type = TREE_TYPE (exp); @@ -6579,58 +6345,17 @@ expand_expr (exp, target, tmode, modifier) target = 0; } -#ifdef MAX_INTEGER_COMPUTATION_MODE - /* Only check stuff here if the mode we want is different from the mode - of the expression; if it's the same, check_max_integer_computiation_mode - will handle it. Do we really need to check this stuff at all? */ - - if (target - && GET_MODE (target) != mode - && TREE_CODE (exp) != INTEGER_CST - && TREE_CODE (exp) != PARM_DECL - && TREE_CODE (exp) != ARRAY_REF - && TREE_CODE (exp) != ARRAY_RANGE_REF - && TREE_CODE (exp) != COMPONENT_REF - && TREE_CODE (exp) != BIT_FIELD_REF - && TREE_CODE (exp) != INDIRECT_REF - && TREE_CODE (exp) != CALL_EXPR - && TREE_CODE (exp) != VAR_DECL - && TREE_CODE (exp) != RTL_EXPR) - { - enum machine_mode mode = GET_MODE (target); - - if (GET_MODE_CLASS (mode) == MODE_INT - && mode > MAX_INTEGER_COMPUTATION_MODE) - internal_error ("unsupported wide integer operation"); - } - - if (tmode != mode - && TREE_CODE (exp) != INTEGER_CST - && TREE_CODE (exp) != PARM_DECL - && TREE_CODE (exp) != ARRAY_REF - && TREE_CODE (exp) != ARRAY_RANGE_REF - && TREE_CODE (exp) != COMPONENT_REF - && TREE_CODE (exp) != BIT_FIELD_REF - && TREE_CODE (exp) != INDIRECT_REF - && TREE_CODE (exp) != VAR_DECL - && TREE_CODE (exp) != CALL_EXPR - && TREE_CODE (exp) != RTL_EXPR - && GET_MODE_CLASS (tmode) == MODE_INT - && tmode > MAX_INTEGER_COMPUTATION_MODE) - internal_error ("unsupported wide integer operation"); - - check_max_integer_computation_mode (exp); -#endif - /* If will do cse, generate all results into pseudo registers since 1) that allows cse to find more things and 2) otherwise cse could produce an insn the machine - cannot support. And exception is a CONSTRUCTOR into a multi-word - MEM: that's much more likely to be most efficient into the MEM. */ + cannot support. An exception is a CONSTRUCTOR into a multi-word + MEM: that's much more likely to be most efficient into the MEM. + Another is a CALL_EXPR which must return in memory. */ if (! cse_not_expected && mode != BLKmode && target && (GET_CODE (target) != REG || REGNO (target) < FIRST_PSEUDO_REGISTER) - && ! (code == CONSTRUCTOR && GET_MODE_SIZE (mode) > UNITS_PER_WORD)) + && ! (code == CONSTRUCTOR && GET_MODE_SIZE (mode) > UNITS_PER_WORD) + && ! (code == CALL_EXPR && aggregate_value_p (exp, exp))) target = 0; switch (code) @@ -6638,25 +6363,17 @@ expand_expr (exp, target, tmode, modifier) case LABEL_DECL: { tree function = decl_function_context (exp); - /* Handle using a label in a containing function. */ - if (function != current_function_decl - && function != inline_function_decl && function != 0) - { - struct function *p = find_function_data (function); - p->expr->x_forced_labels - = gen_rtx_EXPR_LIST (VOIDmode, label_rtx (exp), - p->expr->x_forced_labels); - } + /* Labels in containing functions, or labels used from initializers, + must be forced. */ + if (modifier == EXPAND_INITIALIZER + || (function != current_function_decl + && function != inline_function_decl + && function != 0)) + temp = force_label_rtx (exp); else - { - if (modifier == EXPAND_INITIALIZER) - forced_labels = gen_rtx_EXPR_LIST (VOIDmode, - label_rtx (exp), - forced_labels); - } + temp = label_rtx (exp); - temp = gen_rtx_MEM (FUNCTION_MODE, - gen_rtx_LABEL_REF (Pmode, label_rtx (exp))); + temp = gen_rtx_MEM (FUNCTION_MODE, gen_rtx_LABEL_REF (Pmode, temp)); if (function != current_function_decl && function != inline_function_decl && function != 0) LABEL_REF_NONLOCAL_P (XEXP (temp, 0)) = 1; @@ -6666,7 +6383,7 @@ expand_expr (exp, target, tmode, modifier) case PARM_DECL: if (!DECL_RTL_SET_P (exp)) { - error_with_decl (exp, "prior parameter's size depends on `%s'"); + error ("%Jprior parameter's size depends on '%D'", exp, exp); return CONST0_RTX (mode); } @@ -6753,8 +6470,12 @@ expand_expr (exp, target, tmode, modifier) XEXP (DECL_RTL (exp), 0)) || (flag_force_addr && GET_CODE (XEXP (DECL_RTL (exp), 0)) != REG))) - temp = replace_equiv_address (DECL_RTL (exp), - copy_rtx (XEXP (DECL_RTL (exp), 0))); + { + if (alt_rtl) + *alt_rtl = DECL_RTL (exp); + temp = replace_equiv_address (DECL_RTL (exp), + copy_rtx (XEXP (DECL_RTL (exp), 0))); + } /* If we got something, return it. But first, set the alignment if the address is a register. */ @@ -6824,37 +6545,64 @@ expand_expr (exp, target, tmode, modifier) TYPE_MODE (TREE_TYPE (exp))); case COMPLEX_CST: + /* Handle evaluating a complex constant in a CONCAT target. */ + if (original_target && GET_CODE (original_target) == CONCAT) + { + enum machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (exp))); + rtx rtarg, itarg; + + rtarg = XEXP (original_target, 0); + itarg = XEXP (original_target, 1); + + /* Move the real and imaginary parts separately. */ + op0 = expand_expr (TREE_REALPART (exp), rtarg, mode, 0); + op1 = expand_expr (TREE_IMAGPART (exp), itarg, mode, 0); + + if (op0 != rtarg) + emit_move_insn (rtarg, op0); + if (op1 != itarg) + emit_move_insn (itarg, op1); + + return original_target; + } + + /* ... fall through ... */ + case STRING_CST: - if (! TREE_CST_RTL (exp)) - output_constant_def (exp, 1); + temp = output_constant_def (exp, 1); - /* TREE_CST_RTL probably contains a constant address. + /* temp contains a constant address. On RISC machines where a constant address isn't valid, make some insns to get that address into a register. */ - if (GET_CODE (TREE_CST_RTL (exp)) == MEM - && modifier != EXPAND_CONST_ADDRESS + if (modifier != EXPAND_CONST_ADDRESS && modifier != EXPAND_INITIALIZER && modifier != EXPAND_SUM - && (! memory_address_p (mode, XEXP (TREE_CST_RTL (exp), 0)) - || (flag_force_addr - && GET_CODE (XEXP (TREE_CST_RTL (exp), 0)) != REG))) - return replace_equiv_address (TREE_CST_RTL (exp), - copy_rtx (XEXP (TREE_CST_RTL (exp), 0))); - return TREE_CST_RTL (exp); + && (! memory_address_p (mode, XEXP (temp, 0)) + || flag_force_addr)) + return replace_equiv_address (temp, + copy_rtx (XEXP (temp, 0))); + return temp; case EXPR_WITH_FILE_LOCATION: { rtx to_return; - const char *saved_input_filename = input_filename; - int saved_lineno = lineno; + struct file_stack fs; + + fs.location = input_location; + fs.next = expr_wfl_stack; input_filename = EXPR_WFL_FILENAME (exp); - lineno = EXPR_WFL_LINENO (exp); + input_line = EXPR_WFL_LINENO (exp); + expr_wfl_stack = &fs; if (EXPR_WFL_EMIT_LINE_NOTE (exp)) - emit_line_note (input_filename, lineno); + emit_line_note (input_location); /* Possibly avoid switching back and forth here. */ - to_return = expand_expr (EXPR_WFL_NODE (exp), target, tmode, modifier); - input_filename = saved_input_filename; - lineno = saved_lineno; + to_return = expand_expr (EXPR_WFL_NODE (exp), + (ignore ? const0_rtx : target), + tmode, modifier); + if (expr_wfl_stack != &fs) + abort (); + input_location = fs.location; + expr_wfl_stack = fs.next; return to_return; } @@ -7018,7 +6766,6 @@ expand_expr (exp, target, tmode, modifier) case BIND_EXPR: { tree vars = TREE_OPERAND (exp, 0); - int vars_need_expansion = 0; /* Need to open a binding contour here because if there are any cleanups they must be contained here. */ @@ -7033,10 +6780,7 @@ expand_expr (exp, target, tmode, modifier) while (vars) { if (!DECL_RTL_SET_P (vars)) - { - vars_need_expansion = 1; - expand_decl (vars); - } + expand_decl (vars); expand_decl_init (vars); vars = TREE_CHAIN (vars); } @@ -7058,6 +6802,8 @@ expand_expr (exp, target, tmode, modifier) } preserve_rtl_expr_result (RTL_EXPR_RTL (exp)); free_temps_for_rtl_expr (exp); + if (alt_rtl) + *alt_rtl = RTL_EXPR_ALT_RTL (exp); return RTL_EXPR_RTL (exp); case CONSTRUCTOR: @@ -7097,7 +6843,9 @@ expand_expr (exp, target, tmode, modifier) && ((TREE_CODE (type) == VECTOR_TYPE && !is_zeros_p (exp)) || ! mostly_zeros_p (exp))))) - || (modifier == EXPAND_INITIALIZER && TREE_CONSTANT (exp))) + || ((modifier == EXPAND_INITIALIZER + || modifier == EXPAND_CONST_ADDRESS) + && TREE_CONSTANT (exp))) { rtx constructor = output_constant_def (exp, 1); @@ -7228,7 +6976,8 @@ expand_expr (exp, target, tmode, modifier) && modifier != EXPAND_MEMORY && TREE_READONLY (array) && ! TREE_SIDE_EFFECTS (array) && TREE_CODE (array) == VAR_DECL && DECL_INITIAL (array) - && TREE_CODE (DECL_INITIAL (array)) != ERROR_MARK) + && TREE_CODE (DECL_INITIAL (array)) != ERROR_MARK + && targetm.binds_local_p (array)) { if (TREE_CODE (index) == INTEGER_CST) { @@ -7263,18 +7012,12 @@ expand_expr (exp, target, tmode, modifier) } } } - /* Fall through. */ + goto normal_inner_ref; case COMPONENT_REF: - case BIT_FIELD_REF: - case ARRAY_RANGE_REF: /* If the operand is a CONSTRUCTOR, we can just extract the - appropriate field if it is present. Don't do this if we have - already written the data since we want to refer to that copy - and varasm.c assumes that's what we'll do. */ - if (code == COMPONENT_REF - && TREE_CODE (TREE_OPERAND (exp, 0)) == CONSTRUCTOR - && TREE_CST_RTL (TREE_OPERAND (exp, 0)) == 0) + appropriate field if it is present. */ + if (TREE_CODE (TREE_OPERAND (exp, 0)) == CONSTRUCTOR) { tree elt; @@ -7326,7 +7069,11 @@ expand_expr (exp, target, tmode, modifier) return op0; } } + goto normal_inner_ref; + case BIT_FIELD_REF: + case ARRAY_RANGE_REF: + normal_inner_ref: { enum machine_mode mode1; HOST_WIDE_INT bitsize, bitpos; @@ -7371,39 +7118,41 @@ expand_expr (exp, target, tmode, modifier) op0 = validize_mem (force_const_mem (mode, op0)); } + /* Otherwise, if this object not in memory and we either have an + offset or a BLKmode result, put it there. This case can't occur in + C, but can in Ada if we have unchecked conversion of an expression + from a scalar type to an array or record type or for an + ARRAY_RANGE_REF whose type is BLKmode. */ + else if (GET_CODE (op0) != MEM + && (offset != 0 + || (code == ARRAY_RANGE_REF && mode == BLKmode))) + { + /* If the operand is a SAVE_EXPR, we can deal with this by + forcing the SAVE_EXPR into memory. */ + if (TREE_CODE (TREE_OPERAND (exp, 0)) == SAVE_EXPR) + { + put_var_into_stack (TREE_OPERAND (exp, 0), + /*rescan=*/true); + op0 = SAVE_EXPR_RTL (TREE_OPERAND (exp, 0)); + } + else + { + tree nt + = build_qualified_type (TREE_TYPE (tem), + (TYPE_QUALS (TREE_TYPE (tem)) + | TYPE_QUAL_CONST)); + rtx memloc = assign_temp (nt, 1, 1, 1); + + emit_move_insn (memloc, op0); + op0 = memloc; + } + } + if (offset != 0) { rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, EXPAND_SUM); - /* If this object is in a register, put it into memory. - This case can't occur in C, but can in Ada if we have - unchecked conversion of an expression from a scalar type to - an array or record type. */ - if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG - || GET_CODE (op0) == CONCAT || GET_CODE (op0) == ADDRESSOF) - { - /* If the operand is a SAVE_EXPR, we can deal with this by - forcing the SAVE_EXPR into memory. */ - if (TREE_CODE (TREE_OPERAND (exp, 0)) == SAVE_EXPR) - { - put_var_into_stack (TREE_OPERAND (exp, 0), - /*rescan=*/true); - op0 = SAVE_EXPR_RTL (TREE_OPERAND (exp, 0)); - } - else - { - tree nt - = build_qualified_type (TREE_TYPE (tem), - (TYPE_QUALS (TREE_TYPE (tem)) - | TYPE_QUAL_CONST)); - rtx memloc = assign_temp (nt, 1, 1, 1); - - emit_move_insn (memloc, op0); - op0 = memloc; - } - } - if (GET_CODE (op0) != MEM) abort (); @@ -7415,10 +7164,9 @@ expand_expr (exp, target, tmode, modifier) offset_rtx = convert_to_mode (ptr_mode, offset_rtx, 0); #endif - /* A constant address in OP0 can have VOIDmode, we must not try - to call force_reg for that case. Avoid that case. */ - if (GET_CODE (op0) == MEM - && GET_MODE (op0) == BLKmode + if (GET_MODE (op0) == BLKmode + /* A constant address in OP0 can have VOIDmode, we must + not try to call force_reg in that case. */ && GET_MODE (XEXP (op0, 0)) != VOIDmode && bitsize != 0 && (bitpos % bitsize) == 0 @@ -7474,10 +7222,16 @@ expand_expr (exp, target, tmode, modifier) /* If the field isn't aligned enough to fetch as a memref, fetch it as a bit field. */ || (mode1 != BLKmode - && SLOW_UNALIGNED_ACCESS (mode1, MEM_ALIGN (op0)) - && ((TYPE_ALIGN (TREE_TYPE (tem)) - < GET_MODE_ALIGNMENT (mode)) - || (bitpos % GET_MODE_ALIGNMENT (mode) != 0))) + && (((TYPE_ALIGN (TREE_TYPE (tem)) < GET_MODE_ALIGNMENT (mode) + || (bitpos % GET_MODE_ALIGNMENT (mode) != 0) + || (GET_CODE (op0) == MEM + && (MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode1) + || (bitpos % GET_MODE_ALIGNMENT (mode1) != 0)))) + && ((modifier == EXPAND_CONST_ADDRESS + || modifier == EXPAND_INITIALIZER) + ? STRICT_ALIGNMENT + : SLOW_UNALIGNED_ACCESS (mode1, MEM_ALIGN (op0)))) + || (bitpos % BITS_PER_UNIT != 0))) /* If the type and the field are a constant size and the size of the type isn't the same size as the bitfield, we must use bitfield operations. */ @@ -7497,6 +7251,12 @@ expand_expr (exp, target, tmode, modifier) if (ext_mode == BLKmode) { + if (target == 0) + target = assign_temp (type, 0, 1, 1); + + if (bitsize == 0) + return target; + /* In this case, BITPOS must start at a byte boundary and TARGET, if specified, must be a MEM. */ if (GET_CODE (op0) != MEM @@ -7504,11 +7264,9 @@ expand_expr (exp, target, tmode, modifier) || bitpos % BITS_PER_UNIT != 0) abort (); - op0 = adjust_address (op0, VOIDmode, bitpos / BITS_PER_UNIT); - if (target == 0) - target = assign_temp (type, 0, 1, 1); - - emit_block_move (target, op0, + emit_block_move (target, + adjust_address (op0, VOIDmode, + bitpos / BITS_PER_UNIT), GEN_INT ((bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT), (modifier == EXPAND_STACK_PARM @@ -7770,7 +7528,8 @@ expand_expr (exp, target, tmode, modifier) if (DECL_BUILT_IN_CLASS (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)) == BUILT_IN_FRONTEND) return (*lang_hooks.expand_expr) (exp, original_target, - tmode, modifier); + tmode, modifier, + alt_rtl); else return expand_builtin (exp, target, subtarget, tmode, ignore); } @@ -7801,7 +7560,12 @@ expand_expr (exp, target, tmode, modifier) } if (target == 0) - target = assign_temp (type, 0, 1, 1); + { + if (TYPE_MODE (type) != BLKmode) + target = gen_reg_rtx (TYPE_MODE (type)); + else + target = assign_temp (type, 0, 1, 1); + } if (GET_CODE (target) == MEM) /* Store data into beginning of memory target. */ @@ -7875,12 +7639,14 @@ expand_expr (exp, target, tmode, modifier) op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, mode, modifier); /* If the input and output modes are both the same, we are done. - Otherwise, if neither mode is BLKmode and both are within a word, we - can use gen_lowpart. If neither is true, make sure the operand is - in memory and convert the MEM to the new mode. */ + Otherwise, if neither mode is BLKmode and both are integral and within + a word, we can use gen_lowpart. If neither is true, make sure the + operand is in memory and convert the MEM to the new mode. */ if (TYPE_MODE (type) == GET_MODE (op0)) ; else if (TYPE_MODE (type) != BLKmode && GET_MODE (op0) != BLKmode + && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT + && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_WORD && GET_MODE_SIZE (GET_MODE (op0)) <= UNITS_PER_WORD) op0 = gen_lowpart (TYPE_MODE (type), op0); @@ -8022,11 +7788,11 @@ expand_expr (exp, target, tmode, modifier) { op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, modifier); - /* Don't go to both_summands if modifier - says it's not right to return a PLUS. */ - if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER) - goto binop2; - goto both_summands; + /* Return a PLUS if modifier says it's OK. */ + if (modifier == EXPAND_SUM + || modifier == EXPAND_INITIALIZER) + return simplify_gen_binary (PLUS, mode, op0, op1); + goto binop2; } /* Use immed_double_const to ensure that the constant is truncated according to the mode of OP1, then sign extended @@ -8043,9 +7809,6 @@ expand_expr (exp, target, tmode, modifier) } } - if (! safe_from_p (subtarget, TREE_OPERAND (exp, 1), 1)) - subtarget = 0; - /* No sense saving up arithmetic to be done if it's all in the wrong mode to form part of an address. And force_operand won't know whether to sign-extend or @@ -8053,8 +7816,8 @@ expand_expr (exp, target, tmode, modifier) if ((modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER) || mode != ptr_mode) { - op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0); - op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0); + expand_operands (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1), + subtarget, &op0, &op1, 0); if (op0 == const0_rtx) return op1; if (op1 == const0_rtx) @@ -8062,57 +7825,9 @@ expand_expr (exp, target, tmode, modifier) goto binop2; } - op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, modifier); - op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, modifier); - - /* We come here from MINUS_EXPR when the second operand is a - constant. */ - both_summands: - /* Make sure any term that's a sum with a constant comes last. */ - if (GET_CODE (op0) == PLUS - && CONSTANT_P (XEXP (op0, 1))) - { - temp = op0; - op0 = op1; - op1 = temp; - } - /* If adding to a sum including a constant, - associate it to put the constant outside. */ - if (GET_CODE (op1) == PLUS - && CONSTANT_P (XEXP (op1, 1))) - { - rtx constant_term = const0_rtx; - - temp = simplify_binary_operation (PLUS, mode, XEXP (op1, 0), op0); - if (temp != 0) - op0 = temp; - /* Ensure that MULT comes first if there is one. */ - else if (GET_CODE (op0) == MULT) - op0 = gen_rtx_PLUS (mode, op0, XEXP (op1, 0)); - else - op0 = gen_rtx_PLUS (mode, XEXP (op1, 0), op0); - - /* Let's also eliminate constants from op0 if possible. */ - op0 = eliminate_constant_term (op0, &constant_term); - - /* CONSTANT_TERM and XEXP (op1, 1) are known to be constant, so - their sum should be a constant. Form it into OP1, since the - result we want will then be OP0 + OP1. */ - - temp = simplify_binary_operation (PLUS, mode, constant_term, - XEXP (op1, 1)); - if (temp != 0) - op1 = temp; - else - op1 = gen_rtx_PLUS (mode, constant_term, XEXP (op1, 1)); - } - - /* Put a constant term last and put a multiplication first. */ - if (CONSTANT_P (op0) || GET_CODE (op1) == MULT) - temp = op1, op1 = op0, op0 = temp; - - temp = simplify_binary_operation (PLUS, mode, op0, op1); - return temp ? temp : gen_rtx_PLUS (mode, op0, op1); + expand_operands (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1), + subtarget, &op0, &op1, modifier); + return simplify_gen_binary (PLUS, mode, op0, op1); case MINUS_EXPR: /* For initializers, we are allowed to return a MINUS of two @@ -8124,10 +7839,8 @@ expand_expr (exp, target, tmode, modifier) && really_constant_p (TREE_OPERAND (exp, 0)) && really_constant_p (TREE_OPERAND (exp, 1))) { - rtx op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, VOIDmode, - modifier); - rtx op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, - modifier); + expand_operands (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1), + NULL_RTX, &op0, &op1, modifier); /* If the last operand is a CONST_INT, use plus_constant of the negated constant. Else make the MINUS. */ @@ -8149,17 +7862,14 @@ expand_expr (exp, target, tmode, modifier) || mode != ptr_mode) goto binop; - if (! safe_from_p (subtarget, TREE_OPERAND (exp, 1), 1)) - subtarget = 0; - - op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, modifier); - op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, modifier); + expand_operands (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1), + subtarget, &op0, &op1, modifier); /* Convert A - const to A + (-const). */ if (GET_CODE (op1) == CONST_INT) { op1 = negate_rtx (mode, op1); - goto both_summands; + return simplify_gen_binary (PLUS, mode, op0, op1); } goto binop2; @@ -8186,16 +7896,6 @@ expand_expr (exp, target, tmode, modifier) op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, EXPAND_SUM); - /* If we knew for certain that this is arithmetic for an array - reference, and we knew the bounds of the array, then we could - apply the distributive law across (PLUS X C) for constant C. - Without such knowledge, we risk overflowing the computation - when both X and C are large, but X+C isn't. */ - /* ??? Could perhaps special-case EXP being unsigned and C being - positive. In that case we are certain that X+C is no smaller - than X and so the transformed expression will overflow iff the - original would have. */ - if (GET_CODE (op0) != REG) op0 = force_operand (op0, NULL_RTX); if (GET_CODE (op0) != REG) @@ -8206,9 +7906,6 @@ expand_expr (exp, target, tmode, modifier) TYPE_MODE (TREE_TYPE (exp1)))); } - if (! safe_from_p (subtarget, TREE_OPERAND (exp, 1), 1)) - subtarget = 0; - if (modifier == EXPAND_STACK_PARM) target = 0; @@ -8248,14 +7945,14 @@ expand_expr (exp, target, tmode, modifier) { if (this_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) { - op0 = expand_expr (TREE_OPERAND (TREE_OPERAND (exp, 0), 0), - NULL_RTX, VOIDmode, 0); if (TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST) - op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, - VOIDmode, 0); + expand_operands (TREE_OPERAND (TREE_OPERAND (exp, 0), 0), + TREE_OPERAND (exp, 1), + NULL_RTX, &op0, &op1, 0); else - op1 = expand_expr (TREE_OPERAND (TREE_OPERAND (exp, 1), 0), - NULL_RTX, VOIDmode, 0); + expand_operands (TREE_OPERAND (TREE_OPERAND (exp, 0), 0), + TREE_OPERAND (TREE_OPERAND (exp, 1), 0), + NULL_RTX, &op0, &op1, 0); goto binop2; } else if (other_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing @@ -8284,8 +7981,8 @@ expand_expr (exp, target, tmode, modifier) } } } - op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0); - op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0); + expand_operands (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1), + subtarget, &op0, &op1, 0); return expand_mult (mode, op0, op1, target, unsignedp); case TRUNC_DIV_EXPR: @@ -8293,15 +7990,13 @@ expand_expr (exp, target, tmode, modifier) case CEIL_DIV_EXPR: case ROUND_DIV_EXPR: case EXACT_DIV_EXPR: - if (! safe_from_p (subtarget, TREE_OPERAND (exp, 1), 1)) - subtarget = 0; if (modifier == EXPAND_STACK_PARM) target = 0; /* Possible optimization: compute the dividend with EXPAND_SUM then if the divisor is constant can optimize the case where some terms of the dividend have coeffs divisible by it. */ - op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0); - op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0); + expand_operands (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1), + subtarget, &op0, &op1, 0); return expand_divmod (0, code, mode, op0, op1, target, unsignedp); case RDIV_EXPR: @@ -8323,12 +8018,10 @@ expand_expr (exp, target, tmode, modifier) case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR: case ROUND_MOD_EXPR: - if (! safe_from_p (subtarget, TREE_OPERAND (exp, 1), 1)) - subtarget = 0; if (modifier == EXPAND_STACK_PARM) target = 0; - op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0); - op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0); + expand_operands (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1), + subtarget, &op0, &op1, 0); return expand_divmod (1, code, mode, op0, op1, target, unsignedp); case FIX_ROUND_EXPR: @@ -8373,10 +8066,10 @@ expand_expr (exp, target, tmode, modifier) if (modifier == EXPAND_STACK_PARM) target = 0; - /* Handle complex values specially. */ + /* ABS_EXPR is not valid for complex arguments. */ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT) - return expand_complex_abs (mode, op0, target, unsignedp); + abort (); /* Unsigned abs is simply the operand. Testing here means we don't risk generating incorrect code below. */ @@ -8391,19 +8084,18 @@ expand_expr (exp, target, tmode, modifier) target = original_target; if (target == 0 || modifier == EXPAND_STACK_PARM - || ! safe_from_p (target, TREE_OPERAND (exp, 1), 1) || (GET_CODE (target) == MEM && MEM_VOLATILE_P (target)) || GET_MODE (target) != mode || (GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)) target = gen_reg_rtx (mode); - op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0); - op0 = expand_expr (TREE_OPERAND (exp, 0), target, VOIDmode, 0); + expand_operands (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1), + target, &op0, &op1, 0); /* First try to do it with a special MIN or MAX instruction. If that does not win, use a conditional jump to select the proper value. */ - this_optab = (TREE_UNSIGNED (type) + this_optab = (unsignedp ? (code == MIN_EXPR ? umin_optab : umax_optab) : (code == MIN_EXPR ? smin_optab : smax_optab)); @@ -8418,6 +8110,14 @@ expand_expr (exp, target, tmode, modifier) if (GET_CODE (target) == MEM) target = gen_reg_rtx (mode); + /* If op1 was placed in target, swap op0 and op1. */ + if (target != op0 && target == op1) + { + rtx tem = op0; + op0 = op1; + op1 = tem; + } + if (target != op0) emit_move_insn (target, op0); @@ -8429,18 +8129,16 @@ expand_expr (exp, target, tmode, modifier) && ! can_compare_p (GE, mode, ccp_jump)) { if (code == MAX_EXPR) - do_jump_by_parts_greater_rtx (mode, TREE_UNSIGNED (type), - target, op1, NULL_RTX, op0); + do_jump_by_parts_greater_rtx (mode, unsignedp, target, op1, + NULL_RTX, op0); else - do_jump_by_parts_greater_rtx (mode, TREE_UNSIGNED (type), - op1, target, NULL_RTX, op0); + do_jump_by_parts_greater_rtx (mode, unsignedp, op1, target, + NULL_RTX, op0); } else { - int unsignedp = TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 1))); do_compare_rtx_and_jump (target, op1, code == MAX_EXPR ? GE : LE, - unsignedp, mode, NULL_RTX, NULL_RTX, - op0); + unsignedp, mode, NULL_RTX, NULL_RTX, op0); } emit_move_insn (target, op1); emit_label (op0); @@ -8455,15 +8153,6 @@ expand_expr (exp, target, tmode, modifier) abort (); return temp; - case FFS_EXPR: - op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0); - if (modifier == EXPAND_STACK_PARM) - target = 0; - temp = expand_unop (mode, ffs_optab, op0, target, 1); - if (temp == 0) - abort (); - return temp; - /* ??? Can optimize bitwise operations with one arg constant. Can optimize (a bitwise1 n) bitwise2 (a bitwise3 b) and (a bitwise1 b) bitwise2 b (etc) @@ -8606,9 +8295,9 @@ expand_expr (exp, target, tmode, modifier) case COMPOUND_EXPR: expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, 0); emit_queue (); - return expand_expr (TREE_OPERAND (exp, 1), - (ignore ? const0_rtx : target), - VOIDmode, modifier); + return expand_expr_real (TREE_OPERAND (exp, 1), + (ignore ? const0_rtx : target), + VOIDmode, modifier, alt_rtl); case COND_EXPR: /* If we would have a "singleton" (see below) were it not for a @@ -8840,8 +8529,12 @@ expand_expr (exp, target, tmode, modifier) jumpif (TREE_OPERAND (exp, 0), op0); start_cleanup_deferral (); - store_expr (TREE_OPERAND (exp, 2), temp, - modifier == EXPAND_STACK_PARM ? 2 : 0); + if (TREE_TYPE (TREE_OPERAND (exp, 2)) != void_type_node) + store_expr (TREE_OPERAND (exp, 2), temp, + modifier == EXPAND_STACK_PARM ? 2 : 0); + else + expand_expr (TREE_OPERAND (exp, 2), + ignore ? const0_rtx : NULL_RTX, VOIDmode, 0); op1 = op0; } else if (temp @@ -8861,8 +8554,12 @@ expand_expr (exp, target, tmode, modifier) jumpifnot (TREE_OPERAND (exp, 0), op0); start_cleanup_deferral (); - store_expr (TREE_OPERAND (exp, 1), temp, - modifier == EXPAND_STACK_PARM ? 2 : 0); + if (TREE_TYPE (TREE_OPERAND (exp, 1)) != void_type_node) + store_expr (TREE_OPERAND (exp, 1), temp, + modifier == EXPAND_STACK_PARM ? 2 : 0); + else + expand_expr (TREE_OPERAND (exp, 1), + ignore ? const0_rtx : NULL_RTX, VOIDmode, 0); op1 = op0; } else @@ -8944,8 +8641,6 @@ expand_expr (exp, target, tmode, modifier) else { target = assign_temp (type, 2, 0, 1); - /* All temp slots at this level must not conflict. */ - preserve_temp_slots (target); SET_DECL_RTL (slot, target); if (TREE_ADDRESSABLE (slot)) put_var_into_stack (slot, /*rescan=*/false); @@ -9007,7 +8702,7 @@ expand_expr (exp, target, tmode, modifier) tree lhs = TREE_OPERAND (exp, 0); tree rhs = TREE_OPERAND (exp, 1); - temp = expand_assignment (lhs, rhs, ! ignore, original_target != 0); + temp = expand_assignment (lhs, rhs, ! ignore); return temp; } @@ -9052,13 +8747,13 @@ expand_expr (exp, target, tmode, modifier) (TREE_CODE (rhs) == BIT_IOR_EXPR ? integer_one_node : integer_zero_node)), - 0, 0); + 0); do_pending_stack_adjust (); emit_label (label); return const0_rtx; } - temp = expand_assignment (lhs, rhs, ! ignore, original_target != 0); + temp = expand_assignment (lhs, rhs, ! ignore); return temp; } @@ -9131,7 +8826,7 @@ expand_expr (exp, target, tmode, modifier) op0); else if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG || GET_CODE (op0) == CONCAT || GET_CODE (op0) == ADDRESSOF - || GET_CODE (op0) == PARALLEL) + || GET_CODE (op0) == PARALLEL || GET_CODE (op0) == LO_SUM) { /* If the operand is a SAVE_EXPR, we can deal with this by forcing the SAVE_EXPR into memory. */ @@ -9151,7 +8846,7 @@ expand_expr (exp, target, tmode, modifier) /* Handle calls that pass values in multiple non-contiguous locations. The Irix 6 ABI has examples of this. */ - emit_group_store (memloc, op0, + emit_group_store (memloc, op0, inner_type, int_size_in_bytes (inner_type)); else emit_move_insn (memloc, op0); @@ -9167,11 +8862,8 @@ expand_expr (exp, target, tmode, modifier) if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER) { op0 = XEXP (op0, 0); -#ifdef POINTERS_EXTEND_UNSIGNED - if (GET_MODE (op0) == Pmode && GET_MODE (op0) != mode - && mode == ptr_mode) + if (GET_MODE (op0) == Pmode && mode == ptr_mode) op0 = convert_memory_address (ptr_mode, op0); -#endif return op0; } @@ -9232,11 +8924,8 @@ expand_expr (exp, target, tmode, modifier) && ! REG_USERVAR_P (op0)) mark_reg_pointer (op0, TYPE_ALIGN (TREE_TYPE (type))); -#ifdef POINTERS_EXTEND_UNSIGNED - if (GET_MODE (op0) == Pmode && GET_MODE (op0) != mode - && mode == ptr_mode) + if (GET_MODE (op0) == Pmode && mode == ptr_mode) op0 = convert_memory_address (ptr_mode, op0); -#endif return op0; @@ -9419,16 +9108,15 @@ expand_expr (exp, target, tmode, modifier) abort (); default: - return (*lang_hooks.expand_expr) (exp, original_target, tmode, modifier); + return (*lang_hooks.expand_expr) (exp, original_target, tmode, modifier, + alt_rtl); } /* Here to do an ordinary binary operator, generating an instruction from the optab already placed in `this_optab'. */ binop: - if (! safe_from_p (subtarget, TREE_OPERAND (exp, 1), 1)) - subtarget = 0; - op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0); - op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0); + expand_operands (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1), + subtarget, &op0, &op1, 0); binop2: if (modifier == EXPAND_STACK_PARM) target = 0; @@ -9444,9 +9132,7 @@ expand_expr (exp, target, tmode, modifier) aligned more than BIGGEST_ALIGNMENT. */ static int -is_aligning_offset (offset, exp) - tree offset; - tree exp; +is_aligning_offset (tree offset, tree exp) { /* Strip off any conversions and WITH_RECORD_EXPR nodes. */ while (TREE_CODE (offset) == NON_LVALUE_EXPR @@ -9459,7 +9145,8 @@ is_aligning_offset (offset, exp) power of 2 and which is larger than BIGGEST_ALIGNMENT. */ if (TREE_CODE (offset) != BIT_AND_EXPR || !host_integerp (TREE_OPERAND (offset, 1), 1) - || compare_tree_int (TREE_OPERAND (offset, 1), BIGGEST_ALIGNMENT) <= 0 + || compare_tree_int (TREE_OPERAND (offset, 1), + BIGGEST_ALIGNMENT / BITS_PER_UNIT) <= 0 || !exact_log2 (tree_low_cst (TREE_OPERAND (offset, 1), 1) + 1) < 0) return 0; @@ -9495,9 +9182,7 @@ is_aligning_offset (offset, exp) offset will be `sizetype'. */ tree -string_constant (arg, ptr_offset) - tree arg; - tree *ptr_offset; +string_constant (tree arg, tree *ptr_offset) { STRIP_NOPS (arg); @@ -9537,9 +9222,7 @@ string_constant (arg, ptr_offset) POST is 1 for postinc/decrements and 0 for preinc/decrements. */ static rtx -expand_increment (exp, post, ignore) - tree exp; - int post, ignore; +expand_increment (tree exp, int post, int ignore) { rtx op0, op1; rtx temp, value; @@ -9667,7 +9350,7 @@ expand_increment (exp, post, ignore) incremented = TREE_OPERAND (incremented, 0); } - temp = expand_assignment (incremented, newexp, ! post && ! ignore , 0); + temp = expand_assignment (incremented, newexp, ! post && ! ignore); return post ? op0 : temp; } @@ -9675,7 +9358,7 @@ expand_increment (exp, post, ignore) { /* We have a true reference to the value in OP0. If there is an insn to add or subtract in this mode, queue it. - Queueing the increment insn avoids the register shuffling + Queuing the increment insn avoids the register shuffling that often results if we must increment now and first save the old value for subsequent use. */ @@ -9737,986 +9420,6 @@ expand_increment (exp, post, ignore) return temp; } -/* At the start of a function, record that we have no previously-pushed - arguments waiting to be popped. */ - -void -init_pending_stack_adjust () -{ - pending_stack_adjust = 0; -} - -/* When exiting from function, if safe, clear out any pending stack adjust - so the adjustment won't get done. - - Note, if the current function calls alloca, then it must have a - frame pointer regardless of the value of flag_omit_frame_pointer. */ - -void -clear_pending_stack_adjust () -{ -#ifdef EXIT_IGNORE_STACK - if (optimize > 0 - && (! flag_omit_frame_pointer || current_function_calls_alloca) - && EXIT_IGNORE_STACK - && ! (DECL_INLINE (current_function_decl) && ! flag_no_inline) - && ! flag_inline_functions) - { - stack_pointer_delta -= pending_stack_adjust, - pending_stack_adjust = 0; - } -#endif -} - -/* Pop any previously-pushed arguments that have not been popped yet. */ - -void -do_pending_stack_adjust () -{ - if (inhibit_defer_pop == 0) - { - if (pending_stack_adjust != 0) - adjust_stack (GEN_INT (pending_stack_adjust)); - pending_stack_adjust = 0; - } -} - -/* Expand conditional expressions. */ - -/* Generate code to evaluate EXP and jump to LABEL if the value is zero. - LABEL is an rtx of code CODE_LABEL, in this function and all the - functions here. */ - -void -jumpifnot (exp, label) - tree exp; - rtx label; -{ - do_jump (exp, label, NULL_RTX); -} - -/* Generate code to evaluate EXP and jump to LABEL if the value is nonzero. */ - -void -jumpif (exp, label) - tree exp; - rtx label; -{ - do_jump (exp, NULL_RTX, label); -} - -/* Generate code to evaluate EXP and jump to IF_FALSE_LABEL if - the result is zero, or IF_TRUE_LABEL if the result is one. - Either of IF_FALSE_LABEL and IF_TRUE_LABEL may be zero, - meaning fall through in that case. - - do_jump always does any pending stack adjust except when it does not - actually perform a jump. An example where there is no jump - is when EXP is `(foo (), 0)' and IF_FALSE_LABEL is null. - - This function is responsible for optimizing cases such as - &&, || and comparison operators in EXP. */ - -void -do_jump (exp, if_false_label, if_true_label) - tree exp; - rtx if_false_label, if_true_label; -{ - enum tree_code code = TREE_CODE (exp); - /* Some cases need to create a label to jump to - in order to properly fall through. - These cases set DROP_THROUGH_LABEL nonzero. */ - rtx drop_through_label = 0; - rtx temp; - int i; - tree type; - enum machine_mode mode; - -#ifdef MAX_INTEGER_COMPUTATION_MODE - check_max_integer_computation_mode (exp); -#endif - - emit_queue (); - - switch (code) - { - case ERROR_MARK: - break; - - case INTEGER_CST: - temp = integer_zerop (exp) ? if_false_label : if_true_label; - if (temp) - emit_jump (temp); - break; - -#if 0 - /* This is not true with #pragma weak */ - case ADDR_EXPR: - /* The address of something can never be zero. */ - if (if_true_label) - emit_jump (if_true_label); - break; -#endif - - case UNSAVE_EXPR: - do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label); - TREE_OPERAND (exp, 0) - = (*lang_hooks.unsave_expr_now) (TREE_OPERAND (exp, 0)); - break; - - case NOP_EXPR: - if (TREE_CODE (TREE_OPERAND (exp, 0)) == COMPONENT_REF - || TREE_CODE (TREE_OPERAND (exp, 0)) == BIT_FIELD_REF - || TREE_CODE (TREE_OPERAND (exp, 0)) == ARRAY_REF - || TREE_CODE (TREE_OPERAND (exp, 0)) == ARRAY_RANGE_REF) - goto normal; - case CONVERT_EXPR: - /* If we are narrowing the operand, we have to do the compare in the - narrower mode. */ - if ((TYPE_PRECISION (TREE_TYPE (exp)) - < TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp, 0))))) - goto normal; - case NON_LVALUE_EXPR: - case REFERENCE_EXPR: - case ABS_EXPR: - case NEGATE_EXPR: - case LROTATE_EXPR: - case RROTATE_EXPR: - /* These cannot change zero->nonzero or vice versa. */ - do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label); - break; - - case WITH_RECORD_EXPR: - /* Put the object on the placeholder list, recurse through our first - operand, and pop the list. */ - placeholder_list = tree_cons (TREE_OPERAND (exp, 1), NULL_TREE, - placeholder_list); - do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label); - placeholder_list = TREE_CHAIN (placeholder_list); - break; - -#if 0 - /* This is never less insns than evaluating the PLUS_EXPR followed by - a test and can be longer if the test is eliminated. */ - case PLUS_EXPR: - /* Reduce to minus. */ - exp = build (MINUS_EXPR, TREE_TYPE (exp), - TREE_OPERAND (exp, 0), - fold (build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (exp, 1)), - TREE_OPERAND (exp, 1)))); - /* Process as MINUS. */ -#endif - - case MINUS_EXPR: - /* Nonzero iff operands of minus differ. */ - do_compare_and_jump (build (NE_EXPR, TREE_TYPE (exp), - TREE_OPERAND (exp, 0), - TREE_OPERAND (exp, 1)), - NE, NE, if_false_label, if_true_label); - break; - - case BIT_AND_EXPR: - /* If we are AND'ing with a small constant, do this comparison in the - smallest type that fits. If the machine doesn't have comparisons - that small, it will be converted back to the wider comparison. - This helps if we are testing the sign bit of a narrower object. - combine can't do this for us because it can't know whether a - ZERO_EXTRACT or a compare in a smaller mode exists, but we do. */ - - if (! SLOW_BYTE_ACCESS - && TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST - && TYPE_PRECISION (TREE_TYPE (exp)) <= HOST_BITS_PER_WIDE_INT - && (i = tree_floor_log2 (TREE_OPERAND (exp, 1))) >= 0 - && (mode = mode_for_size (i + 1, MODE_INT, 0)) != BLKmode - && (type = (*lang_hooks.types.type_for_mode) (mode, 1)) != 0 - && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp)) - && (cmp_optab->handlers[(int) TYPE_MODE (type)].insn_code - != CODE_FOR_nothing)) - { - do_jump (convert (type, exp), if_false_label, if_true_label); - break; - } - goto normal; - - case TRUTH_NOT_EXPR: - do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label); - break; - - case TRUTH_ANDIF_EXPR: - if (if_false_label == 0) - if_false_label = drop_through_label = gen_label_rtx (); - do_jump (TREE_OPERAND (exp, 0), if_false_label, NULL_RTX); - start_cleanup_deferral (); - do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label); - end_cleanup_deferral (); - break; - - case TRUTH_ORIF_EXPR: - if (if_true_label == 0) - if_true_label = drop_through_label = gen_label_rtx (); - do_jump (TREE_OPERAND (exp, 0), NULL_RTX, if_true_label); - start_cleanup_deferral (); - do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label); - end_cleanup_deferral (); - break; - - case COMPOUND_EXPR: - push_temp_slots (); - expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, 0); - preserve_temp_slots (NULL_RTX); - free_temp_slots (); - pop_temp_slots (); - emit_queue (); - do_pending_stack_adjust (); - do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label); - break; - - case COMPONENT_REF: - case BIT_FIELD_REF: - case ARRAY_REF: - case ARRAY_RANGE_REF: - { - HOST_WIDE_INT bitsize, bitpos; - int unsignedp; - enum machine_mode mode; - tree type; - tree offset; - int volatilep = 0; - - /* Get description of this reference. We don't actually care - about the underlying object here. */ - get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode, - &unsignedp, &volatilep); - - type = (*lang_hooks.types.type_for_size) (bitsize, unsignedp); - if (! SLOW_BYTE_ACCESS - && type != 0 && bitsize >= 0 - && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp)) - && (cmp_optab->handlers[(int) TYPE_MODE (type)].insn_code - != CODE_FOR_nothing)) - { - do_jump (convert (type, exp), if_false_label, if_true_label); - break; - } - goto normal; - } - - case COND_EXPR: - /* Do (a ? 1 : 0) and (a ? 0 : 1) as special cases. */ - if (integer_onep (TREE_OPERAND (exp, 1)) - && integer_zerop (TREE_OPERAND (exp, 2))) - do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label); - - else if (integer_zerop (TREE_OPERAND (exp, 1)) - && integer_onep (TREE_OPERAND (exp, 2))) - do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label); - - else - { - rtx label1 = gen_label_rtx (); - drop_through_label = gen_label_rtx (); - - do_jump (TREE_OPERAND (exp, 0), label1, NULL_RTX); - - start_cleanup_deferral (); - /* Now the THEN-expression. */ - do_jump (TREE_OPERAND (exp, 1), - if_false_label ? if_false_label : drop_through_label, - if_true_label ? if_true_label : drop_through_label); - /* In case the do_jump just above never jumps. */ - do_pending_stack_adjust (); - emit_label (label1); - - /* Now the ELSE-expression. */ - do_jump (TREE_OPERAND (exp, 2), - if_false_label ? if_false_label : drop_through_label, - if_true_label ? if_true_label : drop_through_label); - end_cleanup_deferral (); - } - break; - - case EQ_EXPR: - { - tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0)); - - if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_COMPLEX_FLOAT - || GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_COMPLEX_INT) - { - tree exp0 = save_expr (TREE_OPERAND (exp, 0)); - tree exp1 = save_expr (TREE_OPERAND (exp, 1)); - do_jump - (fold - (build (TRUTH_ANDIF_EXPR, TREE_TYPE (exp), - fold (build (EQ_EXPR, TREE_TYPE (exp), - fold (build1 (REALPART_EXPR, - TREE_TYPE (inner_type), - exp0)), - fold (build1 (REALPART_EXPR, - TREE_TYPE (inner_type), - exp1)))), - fold (build (EQ_EXPR, TREE_TYPE (exp), - fold (build1 (IMAGPART_EXPR, - TREE_TYPE (inner_type), - exp0)), - fold (build1 (IMAGPART_EXPR, - TREE_TYPE (inner_type), - exp1)))))), - if_false_label, if_true_label); - } - - else if (integer_zerop (TREE_OPERAND (exp, 1))) - do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label); - - else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT - && !can_compare_p (EQ, TYPE_MODE (inner_type), ccp_jump)) - do_jump_by_parts_equality (exp, if_false_label, if_true_label); - else - do_compare_and_jump (exp, EQ, EQ, if_false_label, if_true_label); - break; - } - - case NE_EXPR: - { - tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0)); - - if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_COMPLEX_FLOAT - || GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_COMPLEX_INT) - { - tree exp0 = save_expr (TREE_OPERAND (exp, 0)); - tree exp1 = save_expr (TREE_OPERAND (exp, 1)); - do_jump - (fold - (build (TRUTH_ORIF_EXPR, TREE_TYPE (exp), - fold (build (NE_EXPR, TREE_TYPE (exp), - fold (build1 (REALPART_EXPR, - TREE_TYPE (inner_type), - exp0)), - fold (build1 (REALPART_EXPR, - TREE_TYPE (inner_type), - exp1)))), - fold (build (NE_EXPR, TREE_TYPE (exp), - fold (build1 (IMAGPART_EXPR, - TREE_TYPE (inner_type), - exp0)), - fold (build1 (IMAGPART_EXPR, - TREE_TYPE (inner_type), - exp1)))))), - if_false_label, if_true_label); - } - - else if (integer_zerop (TREE_OPERAND (exp, 1))) - do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label); - - else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT - && !can_compare_p (NE, TYPE_MODE (inner_type), ccp_jump)) - do_jump_by_parts_equality (exp, if_true_label, if_false_label); - else - do_compare_and_jump (exp, NE, NE, if_false_label, if_true_label); - break; - } - - case LT_EXPR: - mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))); - if (GET_MODE_CLASS (mode) == MODE_INT - && ! can_compare_p (LT, mode, ccp_jump)) - do_jump_by_parts_greater (exp, 1, if_false_label, if_true_label); - else - do_compare_and_jump (exp, LT, LTU, if_false_label, if_true_label); - break; - - case LE_EXPR: - mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))); - if (GET_MODE_CLASS (mode) == MODE_INT - && ! can_compare_p (LE, mode, ccp_jump)) - do_jump_by_parts_greater (exp, 0, if_true_label, if_false_label); - else - do_compare_and_jump (exp, LE, LEU, if_false_label, if_true_label); - break; - - case GT_EXPR: - mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))); - if (GET_MODE_CLASS (mode) == MODE_INT - && ! can_compare_p (GT, mode, ccp_jump)) - do_jump_by_parts_greater (exp, 0, if_false_label, if_true_label); - else - do_compare_and_jump (exp, GT, GTU, if_false_label, if_true_label); - break; - - case GE_EXPR: - mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))); - if (GET_MODE_CLASS (mode) == MODE_INT - && ! can_compare_p (GE, mode, ccp_jump)) - do_jump_by_parts_greater (exp, 1, if_true_label, if_false_label); - else - do_compare_and_jump (exp, GE, GEU, if_false_label, if_true_label); - break; - - case UNORDERED_EXPR: - case ORDERED_EXPR: - { - enum rtx_code cmp, rcmp; - int do_rev; - - if (code == UNORDERED_EXPR) - cmp = UNORDERED, rcmp = ORDERED; - else - cmp = ORDERED, rcmp = UNORDERED; - mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))); - - do_rev = 0; - if (! can_compare_p (cmp, mode, ccp_jump) - && (can_compare_p (rcmp, mode, ccp_jump) - /* If the target doesn't provide either UNORDERED or ORDERED - comparisons, canonicalize on UNORDERED for the library. */ - || rcmp == UNORDERED)) - do_rev = 1; - - if (! do_rev) - do_compare_and_jump (exp, cmp, cmp, if_false_label, if_true_label); - else - do_compare_and_jump (exp, rcmp, rcmp, if_true_label, if_false_label); - } - break; - - { - enum rtx_code rcode1; - enum tree_code tcode2; - - case UNLT_EXPR: - rcode1 = UNLT; - tcode2 = LT_EXPR; - goto unordered_bcc; - case UNLE_EXPR: - rcode1 = UNLE; - tcode2 = LE_EXPR; - goto unordered_bcc; - case UNGT_EXPR: - rcode1 = UNGT; - tcode2 = GT_EXPR; - goto unordered_bcc; - case UNGE_EXPR: - rcode1 = UNGE; - tcode2 = GE_EXPR; - goto unordered_bcc; - case UNEQ_EXPR: - rcode1 = UNEQ; - tcode2 = EQ_EXPR; - goto unordered_bcc; - - unordered_bcc: - mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))); - if (can_compare_p (rcode1, mode, ccp_jump)) - do_compare_and_jump (exp, rcode1, rcode1, if_false_label, - if_true_label); - else - { - tree op0 = save_expr (TREE_OPERAND (exp, 0)); - tree op1 = save_expr (TREE_OPERAND (exp, 1)); - tree cmp0, cmp1; - - /* If the target doesn't support combined unordered - compares, decompose into UNORDERED + comparison. */ - cmp0 = fold (build (UNORDERED_EXPR, TREE_TYPE (exp), op0, op1)); - cmp1 = fold (build (tcode2, TREE_TYPE (exp), op0, op1)); - exp = build (TRUTH_ORIF_EXPR, TREE_TYPE (exp), cmp0, cmp1); - do_jump (exp, if_false_label, if_true_label); - } - } - break; - - /* Special case: - __builtin_expect (<test>, 0) and - __builtin_expect (<test>, 1) - - We need to do this here, so that <test> is not converted to a SCC - operation on machines that use condition code registers and COMPARE - like the PowerPC, and then the jump is done based on whether the SCC - operation produced a 1 or 0. */ - case CALL_EXPR: - /* Check for a built-in function. */ - if (TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR) - { - tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0); - tree arglist = TREE_OPERAND (exp, 1); - - if (TREE_CODE (fndecl) == FUNCTION_DECL - && DECL_BUILT_IN (fndecl) - && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_EXPECT - && arglist != NULL_TREE - && TREE_CHAIN (arglist) != NULL_TREE) - { - rtx seq = expand_builtin_expect_jump (exp, if_false_label, - if_true_label); - - if (seq != NULL_RTX) - { - emit_insn (seq); - return; - } - } - } - /* fall through and generate the normal code. */ - - default: - normal: - temp = expand_expr (exp, NULL_RTX, VOIDmode, 0); -#if 0 - /* This is not needed any more and causes poor code since it causes - comparisons and tests from non-SI objects to have different code - sequences. */ - /* Copy to register to avoid generating bad insns by cse - from (set (mem ...) (arithop)) (set (cc0) (mem ...)). */ - if (!cse_not_expected && GET_CODE (temp) == MEM) - temp = copy_to_reg (temp); -#endif - do_pending_stack_adjust (); - /* Do any postincrements in the expression that was tested. */ - emit_queue (); - - if (GET_CODE (temp) == CONST_INT - || (GET_CODE (temp) == CONST_DOUBLE && GET_MODE (temp) == VOIDmode) - || GET_CODE (temp) == LABEL_REF) - { - rtx target = temp == const0_rtx ? if_false_label : if_true_label; - if (target) - emit_jump (target); - } - else if (GET_MODE_CLASS (GET_MODE (temp)) == MODE_INT - && ! can_compare_p (NE, GET_MODE (temp), ccp_jump)) - /* Note swapping the labels gives us not-equal. */ - do_jump_by_parts_equality_rtx (temp, if_true_label, if_false_label); - else if (GET_MODE (temp) != VOIDmode) - do_compare_rtx_and_jump (temp, CONST0_RTX (GET_MODE (temp)), - NE, TREE_UNSIGNED (TREE_TYPE (exp)), - GET_MODE (temp), NULL_RTX, - if_false_label, if_true_label); - else - abort (); - } - - if (drop_through_label) - { - /* If do_jump produces code that might be jumped around, - do any stack adjusts from that code, before the place - where control merges in. */ - do_pending_stack_adjust (); - emit_label (drop_through_label); - } -} - -/* Given a comparison expression EXP for values too wide to be compared - with one insn, test the comparison and jump to the appropriate label. - The code of EXP is ignored; we always test GT if SWAP is 0, - and LT if SWAP is 1. */ - -static void -do_jump_by_parts_greater (exp, swap, if_false_label, if_true_label) - tree exp; - int swap; - rtx if_false_label, if_true_label; -{ - rtx op0 = expand_expr (TREE_OPERAND (exp, swap), NULL_RTX, VOIDmode, 0); - rtx op1 = expand_expr (TREE_OPERAND (exp, !swap), NULL_RTX, VOIDmode, 0); - enum machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))); - int unsignedp = TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))); - - do_jump_by_parts_greater_rtx (mode, unsignedp, op0, op1, if_false_label, if_true_label); -} - -/* Compare OP0 with OP1, word at a time, in mode MODE. - UNSIGNEDP says to do unsigned comparison. - Jump to IF_TRUE_LABEL if OP0 is greater, IF_FALSE_LABEL otherwise. */ - -void -do_jump_by_parts_greater_rtx (mode, unsignedp, op0, op1, if_false_label, if_true_label) - enum machine_mode mode; - int unsignedp; - rtx op0, op1; - rtx if_false_label, if_true_label; -{ - int nwords = (GET_MODE_SIZE (mode) / UNITS_PER_WORD); - rtx drop_through_label = 0; - int i; - - if (! if_true_label || ! if_false_label) - drop_through_label = gen_label_rtx (); - if (! if_true_label) - if_true_label = drop_through_label; - if (! if_false_label) - if_false_label = drop_through_label; - - /* Compare a word at a time, high order first. */ - for (i = 0; i < nwords; i++) - { - rtx op0_word, op1_word; - - if (WORDS_BIG_ENDIAN) - { - op0_word = operand_subword_force (op0, i, mode); - op1_word = operand_subword_force (op1, i, mode); - } - else - { - op0_word = operand_subword_force (op0, nwords - 1 - i, mode); - op1_word = operand_subword_force (op1, nwords - 1 - i, mode); - } - - /* All but high-order word must be compared as unsigned. */ - do_compare_rtx_and_jump (op0_word, op1_word, GT, - (unsignedp || i > 0), word_mode, NULL_RTX, - NULL_RTX, if_true_label); - - /* Consider lower words only if these are equal. */ - do_compare_rtx_and_jump (op0_word, op1_word, NE, unsignedp, word_mode, - NULL_RTX, NULL_RTX, if_false_label); - } - - if (if_false_label) - emit_jump (if_false_label); - if (drop_through_label) - emit_label (drop_through_label); -} - -/* Given an EQ_EXPR expression EXP for values too wide to be compared - with one insn, test the comparison and jump to the appropriate label. */ - -static void -do_jump_by_parts_equality (exp, if_false_label, if_true_label) - tree exp; - rtx if_false_label, if_true_label; -{ - rtx op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, VOIDmode, 0); - rtx op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0); - enum machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))); - int nwords = (GET_MODE_SIZE (mode) / UNITS_PER_WORD); - int i; - rtx drop_through_label = 0; - - if (! if_false_label) - drop_through_label = if_false_label = gen_label_rtx (); - - for (i = 0; i < nwords; i++) - do_compare_rtx_and_jump (operand_subword_force (op0, i, mode), - operand_subword_force (op1, i, mode), - EQ, TREE_UNSIGNED (TREE_TYPE (exp)), - word_mode, NULL_RTX, if_false_label, NULL_RTX); - - if (if_true_label) - emit_jump (if_true_label); - if (drop_through_label) - emit_label (drop_through_label); -} - -/* Jump according to whether OP0 is 0. - We assume that OP0 has an integer mode that is too wide - for the available compare insns. */ - -void -do_jump_by_parts_equality_rtx (op0, if_false_label, if_true_label) - rtx op0; - rtx if_false_label, if_true_label; -{ - int nwords = GET_MODE_SIZE (GET_MODE (op0)) / UNITS_PER_WORD; - rtx part; - int i; - rtx drop_through_label = 0; - - /* The fastest way of doing this comparison on almost any machine is to - "or" all the words and compare the result. If all have to be loaded - from memory and this is a very wide item, it's possible this may - be slower, but that's highly unlikely. */ - - part = gen_reg_rtx (word_mode); - emit_move_insn (part, operand_subword_force (op0, 0, GET_MODE (op0))); - for (i = 1; i < nwords && part != 0; i++) - part = expand_binop (word_mode, ior_optab, part, - operand_subword_force (op0, i, GET_MODE (op0)), - part, 1, OPTAB_WIDEN); - - if (part != 0) - { - do_compare_rtx_and_jump (part, const0_rtx, EQ, 1, word_mode, - NULL_RTX, if_false_label, if_true_label); - - return; - } - - /* If we couldn't do the "or" simply, do this with a series of compares. */ - if (! if_false_label) - drop_through_label = if_false_label = gen_label_rtx (); - - for (i = 0; i < nwords; i++) - do_compare_rtx_and_jump (operand_subword_force (op0, i, GET_MODE (op0)), - const0_rtx, EQ, 1, word_mode, NULL_RTX, - if_false_label, NULL_RTX); - - if (if_true_label) - emit_jump (if_true_label); - - if (drop_through_label) - emit_label (drop_through_label); -} - -/* Generate code for a comparison of OP0 and OP1 with rtx code CODE. - (including code to compute the values to be compared) - and set (CC0) according to the result. - The decision as to signed or unsigned comparison must be made by the caller. - - We force a stack adjustment unless there are currently - things pushed on the stack that aren't yet used. - - If MODE is BLKmode, SIZE is an RTX giving the size of the objects being - compared. */ - -rtx -compare_from_rtx (op0, op1, code, unsignedp, mode, size) - rtx op0, op1; - enum rtx_code code; - int unsignedp; - enum machine_mode mode; - rtx size; -{ - enum rtx_code ucode; - rtx tem; - - /* If one operand is constant, make it the second one. Only do this - if the other operand is not constant as well. */ - - if (swap_commutative_operands_p (op0, op1)) - { - tem = op0; - op0 = op1; - op1 = tem; - code = swap_condition (code); - } - - if (flag_force_mem) - { - op0 = force_not_mem (op0); - op1 = force_not_mem (op1); - } - - do_pending_stack_adjust (); - - ucode = unsignedp ? unsigned_condition (code) : code; - if ((tem = simplify_relational_operation (ucode, mode, op0, op1)) != 0) - return tem; - -#if 0 - /* There's no need to do this now that combine.c can eliminate lots of - sign extensions. This can be less efficient in certain cases on other - machines. */ - - /* If this is a signed equality comparison, we can do it as an - unsigned comparison since zero-extension is cheaper than sign - extension and comparisons with zero are done as unsigned. This is - the case even on machines that can do fast sign extension, since - zero-extension is easier to combine with other operations than - sign-extension is. If we are comparing against a constant, we must - convert it to what it would look like unsigned. */ - if ((code == EQ || code == NE) && ! unsignedp - && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT) - { - if (GET_CODE (op1) == CONST_INT - && (INTVAL (op1) & GET_MODE_MASK (GET_MODE (op0))) != INTVAL (op1)) - op1 = GEN_INT (INTVAL (op1) & GET_MODE_MASK (GET_MODE (op0))); - unsignedp = 1; - } -#endif - - emit_cmp_insn (op0, op1, code, size, mode, unsignedp); - -#if HAVE_cc0 - return gen_rtx_fmt_ee (code, VOIDmode, cc0_rtx, const0_rtx); -#else - return gen_rtx_fmt_ee (code, VOIDmode, op0, op1); -#endif -} - -/* Like do_compare_and_jump but expects the values to compare as two rtx's. - The decision as to signed or unsigned comparison must be made by the caller. - - If MODE is BLKmode, SIZE is an RTX giving the size of the objects being - compared. */ - -void -do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, size, - if_false_label, if_true_label) - rtx op0, op1; - enum rtx_code code; - int unsignedp; - enum machine_mode mode; - rtx size; - rtx if_false_label, if_true_label; -{ - enum rtx_code ucode; - rtx tem; - int dummy_true_label = 0; - - /* Reverse the comparison if that is safe and we want to jump if it is - false. */ - if (! if_true_label && ! FLOAT_MODE_P (mode)) - { - if_true_label = if_false_label; - if_false_label = 0; - code = reverse_condition (code); - } - - /* If one operand is constant, make it the second one. Only do this - if the other operand is not constant as well. */ - - if (swap_commutative_operands_p (op0, op1)) - { - tem = op0; - op0 = op1; - op1 = tem; - code = swap_condition (code); - } - - if (flag_force_mem) - { - op0 = force_not_mem (op0); - op1 = force_not_mem (op1); - } - - do_pending_stack_adjust (); - - ucode = unsignedp ? unsigned_condition (code) : code; - if ((tem = simplify_relational_operation (ucode, mode, op0, op1)) != 0) - { - if (tem == const_true_rtx) - { - if (if_true_label) - emit_jump (if_true_label); - } - else - { - if (if_false_label) - emit_jump (if_false_label); - } - return; - } - -#if 0 - /* There's no need to do this now that combine.c can eliminate lots of - sign extensions. This can be less efficient in certain cases on other - machines. */ - - /* If this is a signed equality comparison, we can do it as an - unsigned comparison since zero-extension is cheaper than sign - extension and comparisons with zero are done as unsigned. This is - the case even on machines that can do fast sign extension, since - zero-extension is easier to combine with other operations than - sign-extension is. If we are comparing against a constant, we must - convert it to what it would look like unsigned. */ - if ((code == EQ || code == NE) && ! unsignedp - && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT) - { - if (GET_CODE (op1) == CONST_INT - && (INTVAL (op1) & GET_MODE_MASK (GET_MODE (op0))) != INTVAL (op1)) - op1 = GEN_INT (INTVAL (op1) & GET_MODE_MASK (GET_MODE (op0))); - unsignedp = 1; - } -#endif - - if (! if_true_label) - { - dummy_true_label = 1; - if_true_label = gen_label_rtx (); - } - - emit_cmp_and_jump_insns (op0, op1, code, size, mode, unsignedp, - if_true_label); - - if (if_false_label) - emit_jump (if_false_label); - if (dummy_true_label) - emit_label (if_true_label); -} - -/* Generate code for a comparison expression EXP (including code to compute - the values to be compared) and a conditional jump to IF_FALSE_LABEL and/or - IF_TRUE_LABEL. One of the labels can be NULL_RTX, in which case the - generated code will drop through. - SIGNED_CODE should be the rtx operation for this comparison for - signed data; UNSIGNED_CODE, likewise for use if data is unsigned. - - We force a stack adjustment unless there are currently - things pushed on the stack that aren't yet used. */ - -static void -do_compare_and_jump (exp, signed_code, unsigned_code, if_false_label, - if_true_label) - tree exp; - enum rtx_code signed_code, unsigned_code; - rtx if_false_label, if_true_label; -{ - rtx op0, op1; - tree type; - enum machine_mode mode; - int unsignedp; - enum rtx_code code; - - /* Don't crash if the comparison was erroneous. */ - op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, VOIDmode, 0); - if (TREE_CODE (TREE_OPERAND (exp, 0)) == ERROR_MARK) - return; - - op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0); - if (TREE_CODE (TREE_OPERAND (exp, 1)) == ERROR_MARK) - return; - - type = TREE_TYPE (TREE_OPERAND (exp, 0)); - mode = TYPE_MODE (type); - if (TREE_CODE (TREE_OPERAND (exp, 0)) == INTEGER_CST - && (TREE_CODE (TREE_OPERAND (exp, 1)) != INTEGER_CST - || (GET_MODE_BITSIZE (mode) - > GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, - 1))))))) - { - /* op0 might have been replaced by promoted constant, in which - case the type of second argument should be used. */ - type = TREE_TYPE (TREE_OPERAND (exp, 1)); - mode = TYPE_MODE (type); - } - unsignedp = TREE_UNSIGNED (type); - code = unsignedp ? unsigned_code : signed_code; - -#ifdef HAVE_canonicalize_funcptr_for_compare - /* If function pointers need to be "canonicalized" before they can - be reliably compared, then canonicalize them. */ - if (HAVE_canonicalize_funcptr_for_compare - && TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 0))) == POINTER_TYPE - && (TREE_CODE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0)))) - == FUNCTION_TYPE)) - { - rtx new_op0 = gen_reg_rtx (mode); - - emit_insn (gen_canonicalize_funcptr_for_compare (new_op0, op0)); - op0 = new_op0; - } - - if (HAVE_canonicalize_funcptr_for_compare - && TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 1))) == POINTER_TYPE - && (TREE_CODE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 1)))) - == FUNCTION_TYPE)) - { - rtx new_op1 = gen_reg_rtx (mode); - - emit_insn (gen_canonicalize_funcptr_for_compare (new_op1, op1)); - op1 = new_op1; - } -#endif - - /* Do any postincrements in the expression that was tested. */ - emit_queue (); - - do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, - ((mode == BLKmode) - ? expr_size (TREE_OPERAND (exp, 0)) : NULL_RTX), - if_false_label, if_true_label); -} - /* Generate code to calculate EXP using a store-flag instruction and return an rtx for the result. EXP is either a comparison or a TRUTH_NOT_EXPR whose operand is a comparison. @@ -10738,11 +9441,7 @@ do_compare_and_jump (exp, signed_code, unsigned_code, if_false_label, set/jump/set sequence. */ static rtx -do_store_flag (exp, target, mode, only_cheap) - tree exp; - rtx target; - enum machine_mode mode; - int only_cheap; +do_store_flag (tree exp, rtx target, enum machine_mode mode, int only_cheap) { enum rtx_code code; tree arg0, arg1, type; @@ -10872,64 +9571,19 @@ do_store_flag (exp, target, mode, only_cheap) do this by shifting the bit being tested to the low-order bit and masking the result with the constant 1. If the condition was EQ, we xor it with 1. This does not require an scc insn and is faster - than an scc insn even if we have it. */ + than an scc insn even if we have it. + + The code to make this transformation was moved into fold_single_bit_test, + so we just call into the folder and expand its result. */ if ((code == NE || code == EQ) && TREE_CODE (arg0) == BIT_AND_EXPR && integer_zerop (arg1) && integer_pow2p (TREE_OPERAND (arg0, 1))) { - tree inner = TREE_OPERAND (arg0, 0); - int bitnum = tree_log2 (TREE_OPERAND (arg0, 1)); - int ops_unsignedp; - - /* If INNER is a right shift of a constant and it plus BITNUM does - not overflow, adjust BITNUM and INNER. */ - - if (TREE_CODE (inner) == RSHIFT_EXPR - && TREE_CODE (TREE_OPERAND (inner, 1)) == INTEGER_CST - && TREE_INT_CST_HIGH (TREE_OPERAND (inner, 1)) == 0 - && bitnum < TYPE_PRECISION (type) - && 0 > compare_tree_int (TREE_OPERAND (inner, 1), - bitnum - TYPE_PRECISION (type))) - { - bitnum += TREE_INT_CST_LOW (TREE_OPERAND (inner, 1)); - inner = TREE_OPERAND (inner, 0); - } - - /* If we are going to be able to omit the AND below, we must do our - operations as unsigned. If we must use the AND, we have a choice. - Normally unsigned is faster, but for some machines signed is. */ - ops_unsignedp = (bitnum == TYPE_PRECISION (type) - 1 ? 1 -#ifdef LOAD_EXTEND_OP - : (LOAD_EXTEND_OP (operand_mode) == SIGN_EXTEND ? 0 : 1) -#else - : 1 -#endif - ); - - if (! get_subtarget (subtarget) - || GET_MODE (subtarget) != operand_mode - || ! safe_from_p (subtarget, inner, 1)) - subtarget = 0; - - op0 = expand_expr (inner, subtarget, VOIDmode, 0); - - if (bitnum != 0) - op0 = expand_shift (RSHIFT_EXPR, operand_mode, op0, - size_int (bitnum), subtarget, ops_unsignedp); - - if (GET_MODE (op0) != mode) - op0 = convert_to_mode (mode, op0, ops_unsignedp); - - if ((code == EQ && ! invert) || (code == NE && invert)) - op0 = expand_binop (mode, xor_optab, op0, const1_rtx, subtarget, - ops_unsignedp, OPTAB_LIB_WIDEN); - - /* Put the AND last so it can combine with more things. */ - if (bitnum != TYPE_PRECISION (type) - 1) - op0 = expand_and (mode, op0, const1_rtx, subtarget); - - return op0; + tree type = (*lang_hooks.types.type_for_mode) (mode, unsignedp); + return expand_expr (fold_single_bit_test (code == NE ? NE_EXPR : EQ_EXPR, + arg0, arg1, type), + target, VOIDmode, EXPAND_NORMAL); } /* Now see if we are likely to be able to do this. Return if not. */ @@ -10958,12 +9612,10 @@ do_store_flag (exp, target, mode, only_cheap) } if (! get_subtarget (target) - || GET_MODE (subtarget) != operand_mode - || ! safe_from_p (subtarget, arg1, 1)) + || GET_MODE (subtarget) != operand_mode) subtarget = 0; - op0 = expand_expr (arg0, subtarget, VOIDmode, 0); - op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); + expand_operands (arg0, arg1, subtarget, &op0, &op1, 0); if (target == 0) target = gen_reg_rtx (mode); @@ -11033,7 +9685,7 @@ do_store_flag (exp, target, mode, only_cheap) #endif /* CASE_VALUES_THRESHOLD */ unsigned int -case_values_threshold () +case_values_threshold (void) { return CASE_VALUES_THRESHOLD; } @@ -11041,11 +9693,8 @@ case_values_threshold () /* Attempt to generate a casesi instruction. Returns 1 if successful, 0 otherwise (i.e. if there is no casesi instruction). */ int -try_casesi (index_type, index_expr, minval, range, - table_label, default_label) - tree index_type, index_expr, minval, range; - rtx table_label ATTRIBUTE_UNUSED; - rtx default_label; +try_casesi (tree index_type, tree index_expr, tree minval, tree range, + rtx table_label ATTRIBUTE_UNUSED, rtx default_label) { enum machine_mode index_mode = SImode; int index_bits = GET_MODE_BITSIZE (index_mode); @@ -11132,9 +9781,8 @@ try_casesi (index_type, index_expr, minval, range, index value is out of range. */ static void -do_tablejump (index, mode, range, table_label, default_label) - rtx index, range, table_label, default_label; - enum machine_mode mode; +do_tablejump (rtx index, enum machine_mode mode, rtx range, rtx table_label, + rtx default_label) { rtx temp, vector; @@ -11157,7 +9805,7 @@ do_tablejump (index, mode, range, table_label, default_label) if (mode != Pmode) index = convert_to_mode (Pmode, index, 1); - /* Don't let a MEM slip thru, because then INDEX that comes + /* Don't let a MEM slip through, because then INDEX that comes out of PIC_CASE_VECTOR_ADDRESS won't be a valid address, and break_out_memory_refs will go to work on it and mess it up. */ #ifdef PIC_CASE_VECTOR_ADDRESS @@ -11198,10 +9846,8 @@ do_tablejump (index, mode, range, table_label, default_label) } int -try_tablejump (index_type, index_expr, minval, range, - table_label, default_label) - tree index_type, index_expr, minval, range; - rtx table_label, default_label; +try_tablejump (tree index_type, tree index_expr, tree minval, tree range, + rtx table_label, rtx default_label) { rtx index; @@ -11231,8 +9877,7 @@ try_tablejump (index_type, index_expr, minval, range, vector mode, but we can emulate with narrower modes. */ int -vector_mode_valid_p (mode) - enum machine_mode mode; +vector_mode_valid_p (enum machine_mode mode) { enum mode_class class = GET_MODE_CLASS (mode); enum machine_mode innermode; @@ -11258,8 +9903,7 @@ vector_mode_valid_p (mode) /* Return a CONST_VECTOR rtx for a VECTOR_CST tree. */ static rtx -const_vector_from_tree (exp) - tree exp; +const_vector_from_tree (tree exp) { rtvec v; int units, i; @@ -11290,6 +9934,10 @@ const_vector_from_tree (exp) inner); } + /* Initialize remaining elements to 0. */ + for (; i < units; ++i) + RTVEC_ELT (v, i) = CONST0_RTX (inner); + return gen_rtx_raw_CONST_VECTOR (mode, v); } |