本文整理汇总了C++中BB_END函数的典型用法代码示例。如果您正苦于以下问题:C++ BB_END函数的具体用法?C++ BB_END怎么用?C++ BB_END使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了BB_END函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: erase_matching_seqs
static void
erase_matching_seqs (void)
{
seq_block sb;
matching_seq mseq;
rtx insn;
basic_block bb;
rtx retlabel, saveinsn, callinsn;
int i;
for (sb = seq_blocks; sb; sb = sb->next_seq_block)
{
for (mseq = sb->matching_seqs; mseq; mseq = mseq->next_matching_seq)
{
insn = mseq->insn;
bb = BLOCK_FOR_INSN (insn);
/* Get the label after the sequence. This will be the return
address. The label will be referenced using a symbol_ref so
protect it from deleting. */
retlabel = block_label_after (insn);
LABEL_PRESERVE_P (retlabel) = 1;
/* Delete the insns of the sequence. */
for (i = 0; i < sb->length; i++)
insn = prev_insn_in_block (insn);
delete_basic_block (split_block_and_df_analyze (bb, insn));
/* Emit an insn saving the return address to the link register
before the deleted sequence. */
saveinsn = emit_insn_after (gen_move_insn (pattern_seqs->link_reg,
gen_symbol_ref_rtx_for_label
(retlabel)),
BB_END (bb));
BLOCK_FOR_INSN (saveinsn) = bb;
/* Emit a jump to the appropriate part of the pattern sequence
after the save insn. Also update the basic block. */
callinsn = emit_jump_insn_after (gen_jump (sb->label), saveinsn);
JUMP_LABEL (callinsn) = sb->label;
LABEL_NUSES (sb->label)++;
BLOCK_FOR_INSN (callinsn) = bb;
BB_END (bb) = callinsn;
/* Maintain control flow and liveness information. */
SET_REGNO_REG_SET (df_get_live_out (bb),
REGNO (pattern_seqs->link_reg));
emit_barrier_after (BB_END (bb));
make_single_succ_edge (bb, BLOCK_FOR_INSN (sb->label), 0);
IOR_REG_SET (df_get_live_out (bb),
df_get_live_in (BLOCK_FOR_INSN (sb->label)));
make_edge (BLOCK_FOR_INSN (seq_blocks->label),
BLOCK_FOR_INSN (retlabel), EDGE_ABNORMAL);
}
}
}
示例2: split_pattern_seq
static void
split_pattern_seq (void)
{
rtx insn;
basic_block bb;
rtx retlabel, retjmp, saveinsn;
int i;
seq_block sb;
insn = pattern_seqs->insn;
bb = BLOCK_FOR_INSN (insn);
/* Get the label after the sequence. This will be the return address. The
label will be referenced using a symbol_ref so protect it from
deleting. */
retlabel = block_label_after (insn);
LABEL_PRESERVE_P (retlabel) = 1;
/* Emit an indirect jump via the link register after the sequence acting
as the return insn. Also emit a barrier and update the basic block. */
if (!find_reg_note (BB_END (bb), REG_NORETURN, NULL))
retjmp = emit_jump_insn_after (gen_indirect_jump (pattern_seqs->link_reg),
BB_END (bb));
emit_barrier_after (BB_END (bb));
/* Replace all outgoing edges with a new one to the block of RETLABEL. */
while (EDGE_COUNT (bb->succs) != 0)
remove_edge (EDGE_SUCC (bb, 0));
make_edge (bb, BLOCK_FOR_INSN (retlabel), EDGE_ABNORMAL);
/* Split the sequence according to SEQ_BLOCKS and cache the label of the
resulting basic blocks. */
i = 0;
for (sb = seq_blocks; sb; sb = sb->next_seq_block)
{
for (; i < sb->length; i++)
insn = prev_insn_in_block (insn);
sb->label = block_label (split_block_and_df_analyze (bb, insn));
}
/* Emit an insn saving the return address to the link register before the
sequence. */
saveinsn = emit_insn_after (gen_move_insn (pattern_seqs->link_reg,
gen_symbol_ref_rtx_for_label
(retlabel)), BB_END (bb));
/* Update liveness info. */
SET_REGNO_REG_SET (df_get_live_out (bb),
REGNO (pattern_seqs->link_reg));
}
示例3: print_rtl_slim_with_bb
/* Provide a slim dump the instruction chain starting at FIRST to F, honoring
the dump flags given in FLAGS. Currently, TDF_BLOCKS and TDF_DETAILS
include more information on the basic blocks. */
void
print_rtl_slim_with_bb (FILE *f, rtx first, int flags)
{
basic_block current_bb = NULL;
rtx insn;
for (insn = first; NULL != insn; insn = NEXT_INSN (insn))
{
if ((flags & TDF_BLOCKS)
&& (INSN_P (insn) || GET_CODE (insn) == NOTE)
&& BLOCK_FOR_INSN (insn)
&& !current_bb)
{
current_bb = BLOCK_FOR_INSN (insn);
dump_bb_info (current_bb, true, false, flags, ";; ", f);
}
dump_insn_slim (f, insn);
if ((flags & TDF_BLOCKS)
&& current_bb
&& insn == BB_END (current_bb))
{
dump_bb_info (current_bb, false, true, flags, ";; ", f);
current_bb = NULL;
}
}
}
示例4: add_test
static void
add_test (rtx cond, basic_block bb, basic_block dest)
{
rtx seq, jump, label;
enum machine_mode mode;
rtx op0 = XEXP (cond, 0), op1 = XEXP (cond, 1);
enum rtx_code code = GET_CODE (cond);
mode = GET_MODE (XEXP (cond, 0));
if (mode == VOIDmode)
mode = GET_MODE (XEXP (cond, 1));
start_sequence ();
op0 = force_operand (op0, NULL_RTX);
op1 = force_operand (op1, NULL_RTX);
label = block_label (dest);
do_compare_rtx_and_jump (op0, op1, code, 0, mode, NULL_RTX, NULL_RTX, label);
jump = get_last_insn ();
JUMP_LABEL (jump) = label;
/* The jump is supposed to handle an unlikely special case. */
REG_NOTES (jump)
= gen_rtx_EXPR_LIST (REG_BR_PROB,
const0_rtx, REG_NOTES (jump));
LABEL_NUSES (label)++;
seq = get_insns ();
end_sequence ();
emit_insn_after (seq, BB_END (bb));
}
示例5: block_label_after
static rtx
block_label_after (rtx insn)
{
basic_block bb = BLOCK_FOR_INSN (insn);
if ((insn == BB_END (bb)) && (bb->next_bb != EXIT_BLOCK_PTR))
return block_label (bb->next_bb);
else
return block_label (split_block_and_df_analyze (bb, insn));
}
示例6: new_seginfo
static struct seginfo *
new_seginfo (int mode, rtx_insn *insn, int bb, HARD_REG_SET regs_live)
{
struct seginfo *ptr;
gcc_assert (!NOTE_INSN_BASIC_BLOCK_P (insn)
|| insn == BB_END (NOTE_BASIC_BLOCK (insn)));
ptr = XNEW (struct seginfo);
ptr->mode = mode;
ptr->insn_ptr = insn;
ptr->bbnum = bb;
ptr->next = NULL;
COPY_HARD_REG_SET (ptr->regs_live, regs_live);
return ptr;
}
示例7: display_stored_regs
static instr *
display_stored_regs(instr * pro_pc, unsigned char * sp)
{
instr * ret_pc = 0;
int reg;
unsigned long value;
printk("Prologue [<%p>], Frame %p:\n", pro_pc, sp);
while (!BB_END(*pro_pc))
if (STK_PUSH_MATCH(*pro_pc)) {
reg = (*pro_pc & MEM_REG) >> 21;
value = *(unsigned long *)(sp + (*pro_pc & MEM_OFF));
if (reg == 26)
ret_pc = (instr *)value;
printk("\t\t%s / 0x%016lx\n", reg_name[reg], value);
}
示例8: make_forwarder_block
static basic_block
make_forwarder_block (basic_block bb, int redirect_latch, int redirect_nonlatch, edge except, int conn_latch)
{
edge e, next_e, fallthru;
basic_block dummy;
rtx insn;
insn = PREV_INSN (first_insn_after_basic_block_note (bb));
/* For empty block split_block will return NULL. */
if (BB_END (bb) == insn)
emit_note_after (NOTE_INSN_DELETED, insn);
fallthru = split_block (bb, insn);
dummy = fallthru->src;
bb = fallthru->dest;
bb->aux = xmalloc (sizeof (int));
HEADER_BLOCK (dummy) = 0;
HEADER_BLOCK (bb) = 1;
/* Redirect back edges we want to keep. */
for (e = dummy->pred; e; e = next_e)
{
next_e = e->pred_next;
if (e == except
|| !((redirect_latch && LATCH_EDGE (e))
|| (redirect_nonlatch && !LATCH_EDGE (e))))
{
dummy->frequency -= EDGE_FREQUENCY (e);
dummy->count -= e->count;
if (dummy->frequency < 0)
dummy->frequency = 0;
if (dummy->count < 0)
dummy->count = 0;
redirect_edge_with_latch_update (e, bb);
}
}
alloc_aux_for_edge (fallthru, sizeof (int));
LATCH_EDGE (fallthru) = conn_latch;
return dummy;
}
示例9: clear_regs_live_in_seq
static void
clear_regs_live_in_seq (HARD_REG_SET * regs, rtx insn, int length)
{
basic_block bb;
regset_head live;
HARD_REG_SET hlive;
rtx x;
int i;
/* Initialize liveness propagation. */
bb = BLOCK_FOR_INSN (insn);
INIT_REG_SET (&live);
bitmap_copy (&live, DF_LR_OUT (bb));
df_simulate_initialize_backwards (bb, &live);
/* Propagate until INSN if found. */
for (x = BB_END (bb); x != insn; x = PREV_INSN (x))
df_simulate_one_insn_backwards (bb, x, &live);
/* Clear registers live after INSN. */
renumbered_reg_set_to_hard_reg_set (&hlive, &live);
AND_COMPL_HARD_REG_SET (*regs, hlive);
/* Clear registers live in and before the sequence. */
for (i = 0; i < length;)
{
rtx prev = PREV_INSN (x);
df_simulate_one_insn_backwards (bb, x, &live);
if (INSN_P (x))
{
renumbered_reg_set_to_hard_reg_set (&hlive, &live);
AND_COMPL_HARD_REG_SET (*regs, hlive);
i++;
}
x = prev;
}
/* Free unused data. */
CLEAR_REG_SET (&live);
}
示例10: optimize_mode_switching
//.........这里部分代码省略.........
{
int e = entity_map[j];
int no_mode = num_modes[e];
struct bb_info *info = bb_info[j];
rtx_insn *insn;
/* Determine what the first use (if any) need for a mode of entity E is.
This will be the mode that is anticipatable for this block.
Also compute the initial transparency settings. */
FOR_EACH_BB_FN (bb, cfun)
{
struct seginfo *ptr;
int last_mode = no_mode;
bool any_set_required = false;
HARD_REG_SET live_now;
info[bb->index].mode_out = info[bb->index].mode_in = no_mode;
REG_SET_TO_HARD_REG_SET (live_now, df_get_live_in (bb));
/* Pretend the mode is clobbered across abnormal edges. */
{
edge_iterator ei;
edge eg;
FOR_EACH_EDGE (eg, ei, bb->preds)
if (eg->flags & EDGE_COMPLEX)
break;
if (eg)
{
rtx_insn *ins_pos = BB_HEAD (bb);
if (LABEL_P (ins_pos))
ins_pos = NEXT_INSN (ins_pos);
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (ins_pos));
if (ins_pos != BB_END (bb))
ins_pos = NEXT_INSN (ins_pos);
ptr = new_seginfo (no_mode, ins_pos, bb->index, live_now);
add_seginfo (info + bb->index, ptr);
for (i = 0; i < no_mode; i++)
clear_mode_bit (transp[bb->index], j, i);
}
}
FOR_BB_INSNS (bb, insn)
{
if (INSN_P (insn))
{
int mode = targetm.mode_switching.needed (e, insn);
rtx link;
if (mode != no_mode && mode != last_mode)
{
any_set_required = true;
last_mode = mode;
ptr = new_seginfo (mode, insn, bb->index, live_now);
add_seginfo (info + bb->index, ptr);
for (i = 0; i < no_mode; i++)
clear_mode_bit (transp[bb->index], j, i);
}
if (targetm.mode_switching.after)
last_mode = targetm.mode_switching.after (e, last_mode,
insn);
/* Update LIVE_NOW. */
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
if (REG_NOTE_KIND (link) == REG_DEAD)
示例11: create_pre_exit
static basic_block
create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
{
edge eg;
edge_iterator ei;
basic_block pre_exit;
/* The only non-call predecessor at this stage is a block with a
fallthrough edge; there can be at most one, but there could be
none at all, e.g. when exit is called. */
pre_exit = 0;
FOR_EACH_EDGE (eg, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if (eg->flags & EDGE_FALLTHRU)
{
basic_block src_bb = eg->src;
rtx_insn *last_insn;
rtx ret_reg;
gcc_assert (!pre_exit);
/* If this function returns a value at the end, we have to
insert the final mode switch before the return value copy
to its hard register. */
if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == 1
&& NONJUMP_INSN_P ((last_insn = BB_END (src_bb)))
&& GET_CODE (PATTERN (last_insn)) == USE
&& GET_CODE ((ret_reg = XEXP (PATTERN (last_insn), 0))) == REG)
{
int ret_start = REGNO (ret_reg);
int nregs = hard_regno_nregs[ret_start][GET_MODE (ret_reg)];
int ret_end = ret_start + nregs;
bool short_block = false;
bool multi_reg_return = false;
bool forced_late_switch = false;
rtx_insn *before_return_copy;
do
{
rtx_insn *return_copy = PREV_INSN (last_insn);
rtx return_copy_pat, copy_reg;
int copy_start, copy_num;
int j;
if (NONDEBUG_INSN_P (return_copy))
{
/* When using SJLJ exceptions, the call to the
unregister function is inserted between the
clobber of the return value and the copy.
We do not want to split the block before this
or any other call; if we have not found the
copy yet, the copy must have been deleted. */
if (CALL_P (return_copy))
{
short_block = true;
break;
}
return_copy_pat = PATTERN (return_copy);
switch (GET_CODE (return_copy_pat))
{
case USE:
/* Skip USEs of multiple return registers.
__builtin_apply pattern is also handled here. */
if (GET_CODE (XEXP (return_copy_pat, 0)) == REG
&& (targetm.calls.function_value_regno_p
(REGNO (XEXP (return_copy_pat, 0)))))
{
multi_reg_return = true;
last_insn = return_copy;
continue;
}
break;
case ASM_OPERANDS:
/* Skip barrier insns. */
if (!MEM_VOLATILE_P (return_copy_pat))
break;
/* Fall through. */
case ASM_INPUT:
case UNSPEC_VOLATILE:
last_insn = return_copy;
continue;
default:
break;
}
/* If the return register is not (in its entirety)
likely spilled, the return copy might be
partially or completely optimized away. */
return_copy_pat = single_set (return_copy);
if (!return_copy_pat)
{
return_copy_pat = PATTERN (return_copy);
if (GET_CODE (return_copy_pat) != CLOBBER)
break;
else if (!optimize)
{
/* This might be (clobber (reg [<result>]))
when not optimizing. Then check if
//.........这里部分代码省略.........
示例12: expand_gimple_tailcall
static basic_block
expand_gimple_tailcall (basic_block bb, tree stmt, bool *can_fallthru)
{
rtx last2, last;
edge e;
edge_iterator ei;
int probability;
gcov_type count;
last2 = last = get_last_insn ();
expand_expr_stmt (stmt);
for (last = NEXT_INSN (last); last; last = NEXT_INSN (last))
if (CALL_P (last) && SIBLING_CALL_P (last))
goto found;
maybe_dump_rtl_for_tree_stmt (stmt, last2);
*can_fallthru = true;
return NULL;
found:
/* ??? Wouldn't it be better to just reset any pending stack adjust?
Any instructions emitted here are about to be deleted. */
do_pending_stack_adjust ();
/* Remove any non-eh, non-abnormal edges that don't go to exit. */
/* ??? I.e. the fallthrough edge. HOWEVER! If there were to be
EH or abnormal edges, we shouldn't have created a tail call in
the first place. So it seems to me we should just be removing
all edges here, or redirecting the existing fallthru edge to
the exit block. */
probability = 0;
count = 0;
for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
{
if (!(e->flags & (EDGE_ABNORMAL | EDGE_EH)))
{
if (e->dest != EXIT_BLOCK_PTR)
{
e->dest->count -= e->count;
e->dest->frequency -= EDGE_FREQUENCY (e);
if (e->dest->count < 0)
e->dest->count = 0;
if (e->dest->frequency < 0)
e->dest->frequency = 0;
}
count += e->count;
probability += e->probability;
remove_edge (e);
}
else
ei_next (&ei);
}
/* This is somewhat ugly: the call_expr expander often emits instructions
after the sibcall (to perform the function return). These confuse the
find_sub_basic_blocks code, so we need to get rid of these. */
last = NEXT_INSN (last);
gcc_assert (BARRIER_P (last));
*can_fallthru = false;
while (NEXT_INSN (last))
{
/* For instance an sqrt builtin expander expands if with
sibcall in the then and label for `else`. */
if (LABEL_P (NEXT_INSN (last)))
{
*can_fallthru = true;
break;
}
delete_insn (NEXT_INSN (last));
}
e = make_edge (bb, EXIT_BLOCK_PTR, EDGE_ABNORMAL | EDGE_SIBCALL);
e->probability += probability;
e->count += count;
BB_END (bb) = last;
update_bb_for_insn (bb);
if (NEXT_INSN (last))
{
bb = create_basic_block (NEXT_INSN (last), get_last_insn (), bb);
last = BB_END (bb);
if (BARRIER_P (last))
BB_END (bb) = PREV_INSN (last);
}
maybe_dump_rtl_for_tree_stmt (stmt, last2);
return bb;
}
示例13: may_unswitch_on
static rtx
may_unswitch_on (basic_block bb, struct loop *loop, rtx *cinsn)
{
rtx test, at, op[2], stest;
struct rtx_iv iv;
unsigned i;
enum machine_mode mode;
/* BB must end in a simple conditional jump. */
if (EDGE_COUNT (bb->succs) != 2)
return NULL_RTX;
if (!any_condjump_p (BB_END (bb)))
return NULL_RTX;
/* With branches inside loop. */
if (!flow_bb_inside_loop_p (loop, EDGE_SUCC (bb, 0)->dest)
|| !flow_bb_inside_loop_p (loop, EDGE_SUCC (bb, 1)->dest))
return NULL_RTX;
/* It must be executed just once each iteration (because otherwise we
are unable to update dominator/irreducible loop information correctly). */
if (!just_once_each_iteration_p (loop, bb))
return NULL_RTX;
/* Condition must be invariant. */
test = get_condition (BB_END (bb), &at, true, false);
if (!test)
return NULL_RTX;
for (i = 0; i < 2; i++)
{
op[i] = XEXP (test, i);
if (CONSTANT_P (op[i]))
continue;
if (!iv_analyze (at, op[i], &iv))
return NULL_RTX;
if (iv.step != const0_rtx
|| iv.first_special)
return NULL_RTX;
op[i] = get_iv_value (&iv, const0_rtx);
}
mode = GET_MODE (op[0]);
if (mode == VOIDmode)
mode = GET_MODE (op[1]);
if (GET_MODE_CLASS (mode) == MODE_CC)
{
if (at != BB_END (bb))
return NULL_RTX;
if (!rtx_equal_p (op[0], XEXP (test, 0))
|| !rtx_equal_p (op[1], XEXP (test, 1)))
return NULL_RTX;
*cinsn = BB_END (bb);
return test;
}
stest = simplify_gen_relational (GET_CODE (test), SImode,
mode, op[0], op[1]);
if (stest == const0_rtx
|| stest == const_true_rtx)
return stest;
return canon_condition (gen_rtx_fmt_ee (GET_CODE (test), SImode,
op[0], op[1]));
}
示例14: unswitch_loop
static struct loop *
unswitch_loop (struct loop *loop, basic_block unswitch_on, rtx cond, rtx cinsn)
{
edge entry, latch_edge, true_edge, false_edge, e;
basic_block switch_bb, unswitch_on_alt;
struct loop *nloop;
int irred_flag, prob;
rtx seq;
/* Some sanity checking. */
gcc_assert (flow_bb_inside_loop_p (loop, unswitch_on));
gcc_assert (EDGE_COUNT (unswitch_on->succs) == 2);
gcc_assert (just_once_each_iteration_p (loop, unswitch_on));
gcc_assert (!loop->inner);
gcc_assert (flow_bb_inside_loop_p (loop, EDGE_SUCC (unswitch_on, 0)->dest));
gcc_assert (flow_bb_inside_loop_p (loop, EDGE_SUCC (unswitch_on, 1)->dest));
entry = loop_preheader_edge (loop);
/* Make a copy. */
irred_flag = entry->flags & EDGE_IRREDUCIBLE_LOOP;
entry->flags &= ~EDGE_IRREDUCIBLE_LOOP;
if (!duplicate_loop_to_header_edge (loop, entry, 1,
NULL, NULL, NULL, 0))
return NULL;
entry->flags |= irred_flag;
/* Record the block with condition we unswitch on. */
unswitch_on_alt = get_bb_copy (unswitch_on);
true_edge = BRANCH_EDGE (unswitch_on_alt);
false_edge = FALLTHRU_EDGE (unswitch_on);
latch_edge = single_succ_edge (get_bb_copy (loop->latch));
/* Create a block with the condition. */
prob = true_edge->probability;
switch_bb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
seq = compare_and_jump_seq (XEXP (cond, 0), XEXP (cond, 1), GET_CODE (cond),
block_label (true_edge->dest),
prob, cinsn);
emit_insn_after (seq, BB_END (switch_bb));
e = make_edge (switch_bb, true_edge->dest, 0);
e->probability = prob;
e->count = latch_edge->count * prob / REG_BR_PROB_BASE;
e = make_edge (switch_bb, FALLTHRU_EDGE (unswitch_on)->dest, EDGE_FALLTHRU);
e->probability = false_edge->probability;
e->count = latch_edge->count * (false_edge->probability) / REG_BR_PROB_BASE;
if (irred_flag)
{
switch_bb->flags |= BB_IRREDUCIBLE_LOOP;
EDGE_SUCC (switch_bb, 0)->flags |= EDGE_IRREDUCIBLE_LOOP;
EDGE_SUCC (switch_bb, 1)->flags |= EDGE_IRREDUCIBLE_LOOP;
}
else
{
switch_bb->flags &= ~BB_IRREDUCIBLE_LOOP;
EDGE_SUCC (switch_bb, 0)->flags &= ~EDGE_IRREDUCIBLE_LOOP;
EDGE_SUCC (switch_bb, 1)->flags &= ~EDGE_IRREDUCIBLE_LOOP;
}
/* Loopify from the copy of LOOP body, constructing the new loop. */
nloop = loopify (latch_edge,
single_pred_edge (get_bb_copy (loop->header)), switch_bb,
BRANCH_EDGE (switch_bb), FALLTHRU_EDGE (switch_bb), true,
prob, REG_BR_PROB_BASE - prob);
copy_loop_info (loop, nloop);
/* Remove branches that are now unreachable in new loops. */
remove_path (true_edge);
remove_path (false_edge);
/* Preserve the simple loop preheaders. */
split_edge (loop_preheader_edge (loop));
split_edge (loop_preheader_edge (nloop));
return nloop;
}
示例15: collect_pattern_seqs
static void
collect_pattern_seqs (void)
{
htab_iterator hti0, hti1, hti2;
p_hash_bucket hash_bucket;
p_hash_elem e0, e1;
#if defined STACK_REGS || defined HAVE_cc0
basic_block bb;
bitmap_head dont_collect;
/* Extra initialization step to ensure that no stack registers (if present)
or cc0 code (if present) are live across abnormal edges.
Set a flag in DONT_COLLECT for an insn if a stack register is live
after the insn or the insn is cc0 setter or user. */
bitmap_initialize (&dont_collect, NULL);
#ifdef STACK_REGS
FOR_EACH_BB (bb)
{
regset_head live;
rtx insn;
rtx prev;
/* Initialize liveness propagation. */
INIT_REG_SET (&live);
bitmap_copy (&live, DF_LR_OUT (bb));
df_simulate_initialize_backwards (bb, &live);
/* Propagate liveness info and mark insns where a stack reg is live. */
insn = BB_END (bb);
for (insn = BB_END (bb); ; insn = prev)
{
prev = PREV_INSN (insn);
if (INSN_P (insn))
{
int reg;
for (reg = FIRST_STACK_REG; reg <= LAST_STACK_REG; reg++)
{
if (REGNO_REG_SET_P (&live, reg))
{
bitmap_set_bit (&dont_collect, INSN_UID (insn));
break;
}
}
}
if (insn == BB_HEAD (bb))
break;
df_simulate_one_insn_backwards (bb, insn, &live);
insn = prev;
}
/* Free unused data. */
CLEAR_REG_SET (&live);
}
#endif
#ifdef HAVE_cc0
/* Mark CC0 setters and users as ineligible for collection into sequences.
This is an over-conservative fix, since it is OK to include
a cc0_setter, but only if we also include the corresponding cc0_user,
and vice versa. */
FOR_EACH_BB (bb)
{
rtx insn;
rtx next_tail;
next_tail = NEXT_INSN (BB_END (bb));
for (insn = BB_HEAD (bb); insn != next_tail; insn = NEXT_INSN (insn))
{
if (INSN_P (insn) && reg_mentioned_p (cc0_rtx, PATTERN (insn)))
bitmap_set_bit (&dont_collect, INSN_UID (insn));
}
}
#endif
#endif /* defined STACK_REGS || defined HAVE_cc0 */
/* Initialize PATTERN_SEQS to empty. */
pattern_seqs = 0;
/* Try to match every abstractable insn with every other insn in the same
HASH_BUCKET. */
FOR_EACH_HTAB_ELEMENT (hash_buckets, hash_bucket, p_hash_bucket, hti0)
if (htab_elements (hash_bucket->seq_candidates) > 1)
FOR_EACH_HTAB_ELEMENT (hash_bucket->seq_candidates, e0, p_hash_elem, hti1)
FOR_EACH_HTAB_ELEMENT (hash_bucket->seq_candidates, e1, p_hash_elem,
hti2)
if (e0 != e1
#if defined STACK_REGS || defined HAVE_cc0
&& !bitmap_bit_p (&dont_collect, INSN_UID (e0->insn))
&& !bitmap_bit_p (&dont_collect, INSN_UID (e1->insn))
#endif
)
match_seqs (e0, e1);
#if defined STACK_REGS || defined HAVE_cc0
/* Free unused data. */
bitmap_clear (&dont_collect);
//.........这里部分代码省略.........