Commit | Line | Data |
---|---|---|
9bf86ebb PR |
1 | /* Perform instruction reorganizations for delay slot filling. |
2 | Copyright (C) 1992, 1993 Free Software Foundation, Inc. | |
3 | Contributed by Richard Kenner (kenner@nyu.edu). | |
4 | Hacked by Michael Tiemann (tiemann@cygnus.com). | |
5 | ||
6 | This file is part of GNU CC. | |
7 | ||
8 | GNU CC is free software; you can redistribute it and/or modify | |
9 | it under the terms of the GNU General Public License as published by | |
10 | the Free Software Foundation; either version 2, or (at your option) | |
11 | any later version. | |
12 | ||
13 | GNU CC is distributed in the hope that it will be useful, | |
14 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | GNU General Public License for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
19 | along with GNU CC; see the file COPYING. If not, write to | |
20 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ | |
21 | ||
22 | /* Instruction reorganization pass. | |
23 | ||
24 | This pass runs after register allocation and final jump | |
25 | optimization. It should be the last pass to run before peephole. | |
26 | It serves primarily to fill delay slots of insns, typically branch | |
27 | and call insns. Other insns typically involve more complicated | |
28 | interactions of data dependencies and resource constraints, and | |
29 | are better handled by scheduling before register allocation (by the | |
30 | function `schedule_insns'). | |
31 | ||
32 | The Branch Penalty is the number of extra cycles that are needed to | |
33 | execute a branch insn. On an ideal machine, branches take a single | |
34 | cycle, and the Branch Penalty is 0. Several RISC machines approach | |
35 | branch delays differently: | |
36 | ||
37 | The MIPS and AMD 29000 have a single branch delay slot. Most insns | |
38 | (except other branches) can be used to fill this slot. When the | |
39 | slot is filled, two insns execute in two cycles, reducing the | |
40 | branch penalty to zero. | |
41 | ||
42 | The Motorola 88000 conditionally exposes its branch delay slot, | |
43 | so code is shorter when it is turned off, but will run faster | |
44 | when useful insns are scheduled there. | |
45 | ||
46 | The IBM ROMP has two forms of branch and call insns, both with and | |
47 | without a delay slot. Much like the 88k, insns not using the delay | |
48 | slot can be shorted (2 bytes vs. 4 bytes), but will run slowed. | |
49 | ||
50 | The SPARC always has a branch delay slot, but its effects can be | |
51 | annulled when the branch is not taken. This means that failing to | |
52 | find other sources of insns, we can hoist an insn from the branch | |
53 | target that would only be safe to execute knowing that the branch | |
54 | is taken. | |
55 | ||
56 | The HP-PA always has a branch delay slot. For unconditional branches | |
57 | its effects can be annulled when the branch is taken. The effects | |
58 | of the delay slot in a conditional branch can be nullified for forward | |
59 | taken branches, or for untaken backward branches. This means | |
60 | we can hoist insns from the fall-through path for forward branches or | |
61 | steal insns from the target of backward branches. | |
62 | ||
63 | Three techniques for filling delay slots have been implemented so far: | |
64 | ||
65 | (1) `fill_simple_delay_slots' is the simplest, most efficient way | |
66 | to fill delay slots. This pass first looks for insns which come | |
67 | from before the branch and which are safe to execute after the | |
68 | branch. Then it searches after the insn requiring delay slots or, | |
69 | in the case of a branch, for insns that are after the point at | |
70 | which the branch merges into the fallthrough code, if such a point | |
71 | exists. When such insns are found, the branch penalty decreases | |
72 | and no code expansion takes place. | |
73 | ||
74 | (2) `fill_eager_delay_slots' is more complicated: it is used for | |
75 | scheduling conditional jumps, or for scheduling jumps which cannot | |
76 | be filled using (1). A machine need not have annulled jumps to use | |
77 | this strategy, but it helps (by keeping more options open). | |
78 | `fill_eager_delay_slots' tries to guess the direction the branch | |
79 | will go; if it guesses right 100% of the time, it can reduce the | |
80 | branch penalty as much as `fill_simple_delay_slots' does. If it | |
81 | guesses wrong 100% of the time, it might as well schedule nops (or | |
82 | on the m88k, unexpose the branch slot). When | |
83 | `fill_eager_delay_slots' takes insns from the fall-through path of | |
84 | the jump, usually there is no code expansion; when it takes insns | |
85 | from the branch target, there is code expansion if it is not the | |
86 | only way to reach that target. | |
87 | ||
88 | (3) `relax_delay_slots' uses a set of rules to simplify code that | |
89 | has been reorganized by (1) and (2). It finds cases where | |
90 | conditional test can be eliminated, jumps can be threaded, extra | |
91 | insns can be eliminated, etc. It is the job of (1) and (2) to do a | |
92 | good job of scheduling locally; `relax_delay_slots' takes care of | |
93 | making the various individual schedules work well together. It is | |
94 | especially tuned to handle the control flow interactions of branch | |
95 | insns. It does nothing for insns with delay slots that do not | |
96 | branch. | |
97 | ||
98 | On machines that use CC0, we are very conservative. We will not make | |
99 | a copy of an insn involving CC0 since we want to maintain a 1-1 | |
100 | correspondence between the insn that sets and uses CC0. The insns are | |
101 | allowed to be separated by placing an insn that sets CC0 (but not an insn | |
102 | that uses CC0; we could do this, but it doesn't seem worthwhile) in a | |
103 | delay slot. In that case, we point each insn at the other with REG_CC_USER | |
104 | and REG_CC_SETTER notes. Note that these restrictions affect very few | |
105 | machines because most RISC machines with delay slots will not use CC0 | |
106 | (the RT is the only known exception at this point). | |
107 | ||
108 | Not yet implemented: | |
109 | ||
110 | The Acorn Risc Machine can conditionally execute most insns, so | |
111 | it is profitable to move single insns into a position to execute | |
112 | based on the condition code of the previous insn. | |
113 | ||
114 | The HP-PA can conditionally nullify insns, providing a similar | |
115 | effect to the ARM, differing mostly in which insn is "in charge". */ | |
116 | ||
117 | #include <stdio.h> | |
118 | #include "config.h" | |
119 | #include "rtl.h" | |
120 | #include "insn-config.h" | |
121 | #include "conditions.h" | |
122 | #include "hard-reg-set.h" | |
123 | #include "basic-block.h" | |
124 | #include "regs.h" | |
125 | #include "insn-flags.h" | |
126 | #include "recog.h" | |
127 | #include "flags.h" | |
128 | #include "output.h" | |
129 | #include "obstack.h" | |
130 | #include "insn-attr.h" | |
131 | ||
132 | #ifdef DELAY_SLOTS | |
133 | ||
134 | #define obstack_chunk_alloc xmalloc | |
135 | #define obstack_chunk_free free | |
136 | ||
137 | #ifndef ANNUL_IFTRUE_SLOTS | |
138 | #define eligible_for_annul_true(INSN, SLOTS, TRIAL, FLAGS) 0 | |
139 | #endif | |
140 | #ifndef ANNUL_IFFALSE_SLOTS | |
141 | #define eligible_for_annul_false(INSN, SLOTS, TRIAL, FLAGS) 0 | |
142 | #endif | |
143 | ||
144 | /* Insns which have delay slots that have not yet been filled. */ | |
145 | ||
146 | static struct obstack unfilled_slots_obstack; | |
147 | static rtx *unfilled_firstobj; | |
148 | ||
149 | /* Define macros to refer to the first and last slot containing unfilled | |
150 | insns. These are used because the list may move and its address | |
151 | should be recomputed at each use. */ | |
152 | ||
153 | #define unfilled_slots_base \ | |
154 | ((rtx *) obstack_base (&unfilled_slots_obstack)) | |
155 | ||
156 | #define unfilled_slots_next \ | |
157 | ((rtx *) obstack_next_free (&unfilled_slots_obstack)) | |
158 | ||
159 | /* This structure is used to indicate which hardware resources are set or | |
160 | needed by insns so far. */ | |
161 | ||
162 | struct resources | |
163 | { | |
164 | char memory; /* Insn sets or needs a memory location. */ | |
165 | char volatil; /* Insn sets or needs a volatile memory loc. */ | |
166 | char cc; /* Insn sets or needs the condition codes. */ | |
167 | HARD_REG_SET regs; /* Which registers are set or needed. */ | |
168 | }; | |
169 | ||
170 | /* Macro to clear all resources. */ | |
171 | #define CLEAR_RESOURCE(RES) \ | |
172 | do { (RES)->memory = (RES)->volatil = (RES)->cc = 0; \ | |
173 | CLEAR_HARD_REG_SET ((RES)->regs); } while (0) | |
174 | ||
175 | /* Indicates what resources are required at the beginning of the epilogue. */ | |
176 | static struct resources start_of_epilogue_needs; | |
177 | ||
178 | /* Indicates what resources are required at function end. */ | |
179 | static struct resources end_of_function_needs; | |
180 | ||
181 | /* Points to the label before the end of the function. */ | |
182 | static rtx end_of_function_label; | |
183 | ||
184 | /* This structure is used to record liveness information at the targets or | |
185 | fallthrough insns of branches. We will most likely need the information | |
186 | at targets again, so save them in a hash table rather than recomputing them | |
187 | each time. */ | |
188 | ||
189 | struct target_info | |
190 | { | |
191 | int uid; /* INSN_UID of target. */ | |
192 | struct target_info *next; /* Next info for same hash bucket. */ | |
193 | HARD_REG_SET live_regs; /* Registers live at target. */ | |
194 | int block; /* Basic block number containing target. */ | |
195 | int bb_tick; /* Generation count of basic block info. */ | |
196 | }; | |
197 | ||
198 | #define TARGET_HASH_PRIME 257 | |
199 | ||
200 | /* Define the hash table itself. */ | |
201 | static struct target_info **target_hash_table; | |
202 | ||
203 | /* For each basic block, we maintain a generation number of its basic | |
204 | block info, which is updated each time we move an insn from the | |
205 | target of a jump. This is the generation number indexed by block | |
206 | number. */ | |
207 | ||
208 | static int *bb_ticks; | |
209 | ||
210 | /* Mapping between INSN_UID's and position in the code since INSN_UID's do | |
211 | not always monotonically increase. */ | |
212 | static int *uid_to_ruid; | |
213 | ||
214 | /* Highest valid index in `uid_to_ruid'. */ | |
215 | static int max_uid; | |
216 | ||
217 | static void mark_referenced_resources PROTO((rtx, struct resources *, int)); | |
218 | static void mark_set_resources PROTO((rtx, struct resources *, int, int)); | |
219 | static int stop_search_p PROTO((rtx, int)); | |
220 | static int resource_conflicts_p PROTO((struct resources *, | |
221 | struct resources *)); | |
222 | static int insn_references_resource_p PROTO((rtx, struct resources *, int)); | |
223 | static int insn_sets_resources_p PROTO((rtx, struct resources *, int)); | |
224 | static rtx find_end_label PROTO((void)); | |
225 | static rtx emit_delay_sequence PROTO((rtx, rtx, int, int)); | |
226 | static rtx add_to_delay_list PROTO((rtx, rtx)); | |
227 | static void delete_from_delay_slot PROTO((rtx)); | |
228 | static void delete_scheduled_jump PROTO((rtx)); | |
229 | static void note_delay_statistics PROTO((int, int)); | |
230 | static rtx optimize_skip PROTO((rtx)); | |
231 | static int get_jump_flags PROTO((rtx, rtx)); | |
232 | static int rare_destination PROTO((rtx)); | |
233 | static int mostly_true_jump PROTO((rtx, rtx)); | |
234 | static rtx get_branch_condition PROTO((rtx, rtx)); | |
235 | static int condition_dominates_p PROTO((rtx, rtx)); | |
236 | static rtx steal_delay_list_from_target PROTO((rtx, rtx, rtx, rtx, | |
237 | struct resources *, | |
238 | struct resources *, | |
239 | struct resources *, | |
240 | int, int *, int *, rtx *)); | |
241 | static rtx steal_delay_list_from_fallthrough PROTO((rtx, rtx, rtx, rtx, | |
242 | struct resources *, | |
243 | struct resources *, | |
244 | struct resources *, | |
245 | int, int *, int *)); | |
246 | static void try_merge_delay_insns PROTO((rtx, rtx)); | |
247 | static int redundant_insn_p PROTO((rtx, rtx, rtx)); | |
248 | static int own_thread_p PROTO((rtx, rtx, int)); | |
249 | static int find_basic_block PROTO((rtx)); | |
250 | static void update_block PROTO((rtx, rtx)); | |
251 | static int reorg_redirect_jump PROTO((rtx, rtx)); | |
252 | static void update_reg_dead_notes PROTO((rtx, rtx)); | |
253 | static void update_live_status PROTO((rtx, rtx)); | |
254 | static rtx next_insn_no_annul PROTO((rtx)); | |
255 | static void mark_target_live_regs PROTO((rtx, struct resources *)); | |
256 | static void fill_simple_delay_slots PROTO((rtx, int)); | |
257 | static rtx fill_slots_from_thread PROTO((rtx, rtx, rtx, rtx, int, int, | |
258 | int, int, int, int *)); | |
259 | static void fill_eager_delay_slots PROTO((rtx)); | |
260 | static void relax_delay_slots PROTO((rtx)); | |
261 | static void make_return_insns PROTO((rtx)); | |
262 | \f | |
263 | /* Given X, some rtl, and RES, a pointer to a `struct resource', mark | |
264 | which resources are references by the insn. If INCLUDE_CALLED_ROUTINE | |
265 | is TRUE, resources used by the called routine will be included for | |
266 | CALL_INSNs. */ | |
267 | ||
268 | static void | |
269 | mark_referenced_resources (x, res, include_delayed_effects) | |
270 | register rtx x; | |
271 | register struct resources *res; | |
272 | register int include_delayed_effects; | |
273 | { | |
274 | register enum rtx_code code = GET_CODE (x); | |
275 | register int i, j; | |
276 | register char *format_ptr; | |
277 | ||
278 | /* Handle leaf items for which we set resource flags. Also, special-case | |
279 | CALL, SET and CLOBBER operators. */ | |
280 | switch (code) | |
281 | { | |
282 | case CONST: | |
283 | case CONST_INT: | |
284 | case CONST_DOUBLE: | |
285 | case PC: | |
286 | case SYMBOL_REF: | |
287 | case LABEL_REF: | |
288 | return; | |
289 | ||
290 | case SUBREG: | |
291 | if (GET_CODE (SUBREG_REG (x)) != REG) | |
292 | mark_referenced_resources (SUBREG_REG (x), res, 0); | |
293 | else | |
294 | { | |
295 | int regno = REGNO (SUBREG_REG (x)) + SUBREG_WORD (x); | |
296 | int last_regno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x)); | |
297 | for (i = regno; i < last_regno; i++) | |
298 | SET_HARD_REG_BIT (res->regs, i); | |
299 | } | |
300 | return; | |
301 | ||
302 | case REG: | |
303 | for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++) | |
304 | SET_HARD_REG_BIT (res->regs, REGNO (x) + i); | |
305 | return; | |
306 | ||
307 | case MEM: | |
308 | /* If this memory shouldn't change, it really isn't referencing | |
309 | memory. */ | |
310 | if (! RTX_UNCHANGING_P (x)) | |
311 | res->memory = 1; | |
312 | res->volatil = MEM_VOLATILE_P (x); | |
313 | ||
314 | /* Mark registers used to access memory. */ | |
315 | mark_referenced_resources (XEXP (x, 0), res, 0); | |
316 | return; | |
317 | ||
318 | case CC0: | |
319 | res->cc = 1; | |
320 | return; | |
321 | ||
322 | case UNSPEC_VOLATILE: | |
323 | case ASM_INPUT: | |
324 | /* Traditional asm's are always volatile. */ | |
325 | res->volatil = 1; | |
326 | return; | |
327 | ||
328 | case ASM_OPERANDS: | |
329 | res->volatil = MEM_VOLATILE_P (x); | |
330 | ||
331 | /* For all ASM_OPERANDS, we must traverse the vector of input operands. | |
332 | We can not just fall through here since then we would be confused | |
333 | by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate | |
334 | traditional asms unlike their normal usage. */ | |
335 | ||
336 | for (i = 0; i < ASM_OPERANDS_INPUT_LENGTH (x); i++) | |
337 | mark_referenced_resources (ASM_OPERANDS_INPUT (x, i), res, 0); | |
338 | return; | |
339 | ||
340 | case CALL: | |
341 | /* The first operand will be a (MEM (xxx)) but doesn't really reference | |
342 | memory. The second operand may be referenced, though. */ | |
343 | mark_referenced_resources (XEXP (XEXP (x, 0), 0), res, 0); | |
344 | mark_referenced_resources (XEXP (x, 1), res, 0); | |
345 | return; | |
346 | ||
347 | case SET: | |
348 | /* Usually, the first operand of SET is set, not referenced. But | |
349 | registers used to access memory are referenced. SET_DEST is | |
350 | also referenced if it is a ZERO_EXTRACT or SIGN_EXTRACT. */ | |
351 | ||
352 | mark_referenced_resources (SET_SRC (x), res, 0); | |
353 | ||
354 | x = SET_DEST (x); | |
355 | if (GET_CODE (x) == SIGN_EXTRACT || GET_CODE (x) == ZERO_EXTRACT) | |
356 | mark_referenced_resources (x, res, 0); | |
357 | else if (GET_CODE (x) == SUBREG) | |
358 | x = SUBREG_REG (x); | |
359 | if (GET_CODE (x) == MEM) | |
360 | mark_referenced_resources (XEXP (x, 0), res, 0); | |
361 | return; | |
362 | ||
363 | case CLOBBER: | |
364 | return; | |
365 | ||
366 | case CALL_INSN: | |
367 | if (include_delayed_effects) | |
368 | { | |
369 | /* A CALL references memory, the frame pointer if it exists, the | |
370 | stack pointer, any global registers and any registers given in | |
371 | USE insns immediately in front of the CALL. | |
372 | ||
373 | However, we may have moved some of the parameter loading insns | |
374 | into the delay slot of this CALL. If so, the USE's for them | |
375 | don't count and should be skipped. */ | |
376 | rtx insn = PREV_INSN (x); | |
377 | rtx sequence = 0; | |
378 | int seq_size = 0; | |
379 | int i; | |
380 | ||
381 | /* If we are part of a delay slot sequence, point at the SEQUENCE. */ | |
382 | if (NEXT_INSN (insn) != x) | |
383 | { | |
384 | sequence = PATTERN (NEXT_INSN (insn)); | |
385 | seq_size = XVECLEN (sequence, 0); | |
386 | if (GET_CODE (sequence) != SEQUENCE) | |
387 | abort (); | |
388 | } | |
389 | ||
390 | res->memory = 1; | |
391 | SET_HARD_REG_BIT (res->regs, STACK_POINTER_REGNUM); | |
392 | if (frame_pointer_needed) | |
393 | SET_HARD_REG_BIT (res->regs, FRAME_POINTER_REGNUM); | |
394 | ||
395 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
396 | if (global_regs[i]) | |
397 | SET_HARD_REG_BIT (res->regs, i); | |
398 | ||
399 | /* Skip any labels between the CALL_INSN and possible USE insns. */ | |
400 | while (GET_CODE (insn) == CODE_LABEL) | |
401 | insn = PREV_INSN (insn); | |
402 | ||
403 | for ( ; (insn && GET_CODE (insn) == INSN | |
404 | && GET_CODE (PATTERN (insn)) == USE); | |
405 | insn = PREV_INSN (insn)) | |
406 | { | |
407 | for (i = 1; i < seq_size; i++) | |
408 | { | |
409 | rtx slot_pat = PATTERN (XVECEXP (sequence, 0, i)); | |
410 | if (GET_CODE (slot_pat) == SET | |
411 | && rtx_equal_p (SET_DEST (slot_pat), | |
412 | XEXP (PATTERN (insn), 0))) | |
413 | break; | |
414 | } | |
415 | if (i >= seq_size) | |
416 | mark_referenced_resources (XEXP (PATTERN (insn), 0), res, 0); | |
417 | } | |
418 | } | |
419 | ||
420 | /* ... fall through to other INSN processing ... */ | |
421 | ||
422 | case INSN: | |
423 | case JUMP_INSN: | |
424 | ||
425 | #ifdef INSN_REFERENCES_ARE_DELAYED | |
426 | if (! include_delayed_effects | |
427 | && INSN_REFERENCES_ARE_DELAYED (x)) | |
428 | return; | |
429 | #endif | |
430 | ||
431 | /* No special processing, just speed up. */ | |
432 | mark_referenced_resources (PATTERN (x), res, include_delayed_effects); | |
433 | return; | |
434 | } | |
435 | ||
436 | /* Process each sub-expression and flag what it needs. */ | |
437 | format_ptr = GET_RTX_FORMAT (code); | |
438 | for (i = 0; i < GET_RTX_LENGTH (code); i++) | |
439 | switch (*format_ptr++) | |
440 | { | |
441 | case 'e': | |
442 | mark_referenced_resources (XEXP (x, i), res, include_delayed_effects); | |
443 | break; | |
444 | ||
445 | case 'E': | |
446 | for (j = 0; j < XVECLEN (x, i); j++) | |
447 | mark_referenced_resources (XVECEXP (x, i, j), res, | |
448 | include_delayed_effects); | |
449 | break; | |
450 | } | |
451 | } | |
452 | \f | |
453 | /* Given X, a part of an insn, and a pointer to a `struct resource', RES, | |
454 | indicate which resources are modified by the insn. If INCLUDE_CALLED_ROUTINE | |
455 | is nonzero, also mark resources potentially set by the called routine. | |
456 | ||
457 | If IN_DEST is nonzero, it means we are inside a SET. Otherwise, | |
458 | objects are being referenced instead of set. | |
459 | ||
460 | We never mark the insn as modifying the condition code unless it explicitly | |
461 | SETs CC0 even though this is not totally correct. The reason for this is | |
462 | that we require a SET of CC0 to immediately precede the reference to CC0. | |
463 | So if some other insn sets CC0 as a side-effect, we know it cannot affect | |
464 | our computation and thus may be placed in a delay slot. */ | |
465 | ||
466 | static void | |
467 | mark_set_resources (x, res, in_dest, include_delayed_effects) | |
468 | register rtx x; | |
469 | register struct resources *res; | |
470 | int in_dest; | |
471 | int include_delayed_effects; | |
472 | { | |
473 | register enum rtx_code code; | |
474 | register int i, j; | |
475 | register char *format_ptr; | |
476 | ||
477 | restart: | |
478 | ||
479 | code = GET_CODE (x); | |
480 | ||
481 | switch (code) | |
482 | { | |
483 | case NOTE: | |
484 | case BARRIER: | |
485 | case CODE_LABEL: | |
486 | case USE: | |
487 | case CONST_INT: | |
488 | case CONST_DOUBLE: | |
489 | case LABEL_REF: | |
490 | case SYMBOL_REF: | |
491 | case CONST: | |
492 | case PC: | |
493 | /* These don't set any resources. */ | |
494 | return; | |
495 | ||
496 | case CC0: | |
497 | if (in_dest) | |
498 | res->cc = 1; | |
499 | return; | |
500 | ||
501 | case CALL_INSN: | |
502 | /* Called routine modifies the condition code, memory, any registers | |
503 | that aren't saved across calls, global registers and anything | |
504 | explicitly CLOBBERed immediately after the CALL_INSN. */ | |
505 | ||
506 | if (include_delayed_effects) | |
507 | { | |
508 | rtx next = NEXT_INSN (x); | |
509 | ||
510 | res->cc = res->memory = 1; | |
511 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
512 | if (call_used_regs[i] || global_regs[i]) | |
513 | SET_HARD_REG_BIT (res->regs, i); | |
514 | ||
515 | /* Skip any possible labels between the CALL_INSN and CLOBBERs. */ | |
516 | while (GET_CODE (next) == CODE_LABEL) | |
517 | next = NEXT_INSN (next); | |
518 | ||
519 | for (; (next && GET_CODE (next) == INSN | |
520 | && GET_CODE (PATTERN (next)) == CLOBBER); | |
521 | next = NEXT_INSN (next)) | |
522 | mark_set_resources (XEXP (PATTERN (next), 0), res, 1, 0); | |
523 | } | |
524 | ||
525 | /* ... and also what it's RTL says it modifies, if anything. */ | |
526 | ||
527 | case JUMP_INSN: | |
528 | case INSN: | |
529 | ||
530 | /* An insn consisting of just a CLOBBER (or USE) is just for flow | |
531 | and doesn't actually do anything, so we ignore it. */ | |
532 | ||
533 | #ifdef INSN_SETS_ARE_DELAYED | |
534 | if (! include_delayed_effects | |
535 | && INSN_SETS_ARE_DELAYED (x)) | |
536 | return; | |
537 | #endif | |
538 | ||
539 | x = PATTERN (x); | |
540 | if (GET_CODE (x) != USE && GET_CODE (x) != CLOBBER) | |
541 | goto restart; | |
542 | return; | |
543 | ||
544 | case SET: | |
545 | /* If the source of a SET is a CALL, this is actually done by | |
546 | the called routine. So only include it if we are to include the | |
547 | effects of the calling routine. */ | |
548 | ||
549 | mark_set_resources (SET_DEST (x), res, | |
550 | (include_delayed_effects | |
551 | || GET_CODE (SET_SRC (x)) != CALL), | |
552 | 0); | |
553 | ||
554 | mark_set_resources (SET_SRC (x), res, 0, 0); | |
555 | return; | |
556 | ||
557 | case CLOBBER: | |
558 | mark_set_resources (XEXP (x, 0), res, 1, 0); | |
559 | return; | |
560 | ||
561 | case SEQUENCE: | |
562 | for (i = 0; i < XVECLEN (x, 0); i++) | |
563 | if (! (INSN_ANNULLED_BRANCH_P (XVECEXP (x, 0, 0)) | |
564 | && INSN_FROM_TARGET_P (XVECEXP (x, 0, i)))) | |
565 | mark_set_resources (XVECEXP (x, 0, i), res, 0, | |
566 | include_delayed_effects); | |
567 | return; | |
568 | ||
569 | case POST_INC: | |
570 | case PRE_INC: | |
571 | case POST_DEC: | |
572 | case PRE_DEC: | |
573 | mark_set_resources (XEXP (x, 0), res, 1, 0); | |
574 | return; | |
575 | ||
576 | case ZERO_EXTRACT: | |
577 | mark_set_resources (XEXP (x, 0), res, in_dest, 0); | |
578 | mark_set_resources (XEXP (x, 1), res, 0, 0); | |
579 | mark_set_resources (XEXP (x, 2), res, 0, 0); | |
580 | return; | |
581 | ||
582 | case MEM: | |
583 | if (in_dest) | |
584 | { | |
585 | res->memory = 1; | |
586 | res->volatil = MEM_VOLATILE_P (x); | |
587 | } | |
588 | ||
589 | mark_set_resources (XEXP (x, 0), res, 0, 0); | |
590 | return; | |
591 | ||
592 | case REG: | |
593 | if (in_dest) | |
594 | for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++) | |
595 | SET_HARD_REG_BIT (res->regs, REGNO (x) + i); | |
596 | return; | |
597 | } | |
598 | ||
599 | /* Process each sub-expression and flag what it needs. */ | |
600 | format_ptr = GET_RTX_FORMAT (code); | |
601 | for (i = 0; i < GET_RTX_LENGTH (code); i++) | |
602 | switch (*format_ptr++) | |
603 | { | |
604 | case 'e': | |
605 | mark_set_resources (XEXP (x, i), res, in_dest, include_delayed_effects); | |
606 | break; | |
607 | ||
608 | case 'E': | |
609 | for (j = 0; j < XVECLEN (x, i); j++) | |
610 | mark_set_resources (XVECEXP (x, i, j), res, in_dest, | |
611 | include_delayed_effects); | |
612 | break; | |
613 | } | |
614 | } | |
615 | \f | |
616 | /* Return TRUE if this insn should stop the search for insn to fill delay | |
617 | slots. LABELS_P indicates that labels should terminate the search. | |
618 | In all cases, jumps terminate the search. */ | |
619 | ||
620 | static int | |
621 | stop_search_p (insn, labels_p) | |
622 | rtx insn; | |
623 | int labels_p; | |
624 | { | |
625 | if (insn == 0) | |
626 | return 1; | |
627 | ||
628 | switch (GET_CODE (insn)) | |
629 | { | |
630 | case NOTE: | |
631 | case CALL_INSN: | |
632 | return 0; | |
633 | ||
634 | case CODE_LABEL: | |
635 | return labels_p; | |
636 | ||
637 | case JUMP_INSN: | |
638 | case BARRIER: | |
639 | return 1; | |
640 | ||
641 | case INSN: | |
642 | /* OK unless it contains a delay slot or is an `asm' insn of some type. | |
643 | We don't know anything about these. */ | |
644 | return (GET_CODE (PATTERN (insn)) == SEQUENCE | |
645 | || GET_CODE (PATTERN (insn)) == ASM_INPUT | |
646 | || asm_noperands (PATTERN (insn)) >= 0); | |
647 | ||
648 | default: | |
649 | abort (); | |
650 | } | |
651 | } | |
652 | \f | |
653 | /* Return TRUE if any resources are marked in both RES1 and RES2 or if either | |
654 | resource set contains a volatile memory reference. Otherwise, return FALSE. */ | |
655 | ||
656 | static int | |
657 | resource_conflicts_p (res1, res2) | |
658 | struct resources *res1, *res2; | |
659 | { | |
660 | if ((res1->cc && res2->cc) || (res1->memory && res2->memory) | |
661 | || res1->volatil || res2->volatil) | |
662 | return 1; | |
663 | ||
664 | #ifdef HARD_REG_SET | |
665 | return (res1->regs & res2->regs) != HARD_CONST (0); | |
666 | #else | |
667 | { | |
668 | int i; | |
669 | ||
670 | for (i = 0; i < HARD_REG_SET_LONGS; i++) | |
671 | if ((res1->regs[i] & res2->regs[i]) != 0) | |
672 | return 1; | |
673 | return 0; | |
674 | } | |
675 | #endif | |
676 | } | |
677 | ||
678 | /* Return TRUE if any resource marked in RES, a `struct resources', is | |
679 | referenced by INSN. If INCLUDE_CALLED_ROUTINE is set, return if the called | |
680 | routine is using those resources. | |
681 | ||
682 | We compute this by computing all the resources referenced by INSN and | |
683 | seeing if this conflicts with RES. It might be faster to directly check | |
684 | ourselves, and this is the way it used to work, but it means duplicating | |
685 | a large block of complex code. */ | |
686 | ||
687 | static int | |
688 | insn_references_resource_p (insn, res, include_delayed_effects) | |
689 | register rtx insn; | |
690 | register struct resources *res; | |
691 | int include_delayed_effects; | |
692 | { | |
693 | struct resources insn_res; | |
694 | ||
695 | CLEAR_RESOURCE (&insn_res); | |
696 | mark_referenced_resources (insn, &insn_res, include_delayed_effects); | |
697 | return resource_conflicts_p (&insn_res, res); | |
698 | } | |
699 | ||
700 | /* Return TRUE if INSN modifies resources that are marked in RES. | |
701 | INCLUDE_CALLED_ROUTINE is set if the actions of that routine should be | |
702 | included. CC0 is only modified if it is explicitly set; see comments | |
703 | in front of mark_set_resources for details. */ | |
704 | ||
705 | static int | |
706 | insn_sets_resource_p (insn, res, include_delayed_effects) | |
707 | register rtx insn; | |
708 | register struct resources *res; | |
709 | int include_delayed_effects; | |
710 | { | |
711 | struct resources insn_sets; | |
712 | ||
713 | CLEAR_RESOURCE (&insn_sets); | |
714 | mark_set_resources (insn, &insn_sets, 0, include_delayed_effects); | |
715 | return resource_conflicts_p (&insn_sets, res); | |
716 | } | |
717 | \f | |
718 | /* Find a label at the end of the function or before a RETURN. If there is | |
719 | none, make one. */ | |
720 | ||
721 | static rtx | |
722 | find_end_label () | |
723 | { | |
724 | rtx insn; | |
725 | ||
726 | /* If we found one previously, return it. */ | |
727 | if (end_of_function_label) | |
728 | return end_of_function_label; | |
729 | ||
730 | /* Otherwise, see if there is a label at the end of the function. If there | |
731 | is, it must be that RETURN insns aren't needed, so that is our return | |
732 | label and we don't have to do anything else. */ | |
733 | ||
734 | insn = get_last_insn (); | |
735 | while (GET_CODE (insn) == NOTE | |
736 | || (GET_CODE (insn) == INSN | |
737 | && (GET_CODE (PATTERN (insn)) == USE | |
738 | || GET_CODE (PATTERN (insn)) == CLOBBER))) | |
739 | insn = PREV_INSN (insn); | |
740 | ||
741 | if (GET_CODE (insn) == CODE_LABEL) | |
742 | end_of_function_label = insn; | |
743 | else | |
744 | { | |
745 | /* Otherwise, make a new label and emit a RETURN and BARRIER, | |
746 | if needed. */ | |
747 | end_of_function_label = gen_label_rtx (); | |
748 | LABEL_NUSES (end_of_function_label) = 0; | |
749 | emit_label (end_of_function_label); | |
750 | #ifdef HAVE_return | |
751 | if (HAVE_return) | |
752 | { | |
753 | /* The return we make may have delay slots too. */ | |
754 | rtx insn = gen_return (); | |
755 | insn = emit_jump_insn (insn); | |
756 | emit_barrier (); | |
757 | if (num_delay_slots (insn) > 0) | |
758 | obstack_ptr_grow (&unfilled_slots_obstack, insn); | |
759 | } | |
760 | #endif | |
761 | } | |
762 | ||
763 | /* Show one additional use for this label so it won't go away until | |
764 | we are done. */ | |
765 | ++LABEL_NUSES (end_of_function_label); | |
766 | ||
767 | return end_of_function_label; | |
768 | } | |
769 | \f | |
770 | /* Put INSN and LIST together in a SEQUENCE rtx of LENGTH, and replace | |
771 | the pattern of INSN with the SEQUENCE. | |
772 | ||
773 | Chain the insns so that NEXT_INSN of each insn in the sequence points to | |
774 | the next and NEXT_INSN of the last insn in the sequence points to | |
775 | the first insn after the sequence. Similarly for PREV_INSN. This makes | |
776 | it easier to scan all insns. | |
777 | ||
778 | Returns the SEQUENCE that replaces INSN. */ | |
779 | ||
780 | static rtx | |
781 | emit_delay_sequence (insn, list, length, avail) | |
782 | rtx insn; | |
783 | rtx list; | |
784 | int length; | |
785 | int avail; | |
786 | { | |
787 | register int i = 1; | |
788 | register rtx li; | |
789 | int had_barrier = 0; | |
790 | ||
791 | /* Allocate the the rtvec to hold the insns and the SEQUENCE. */ | |
792 | rtvec seqv = rtvec_alloc (length + 1); | |
793 | rtx seq = gen_rtx (SEQUENCE, VOIDmode, seqv); | |
794 | rtx seq_insn = make_insn_raw (seq); | |
795 | rtx first = get_insns (); | |
796 | rtx last = get_last_insn (); | |
797 | ||
798 | /* Make a copy of the insn having delay slots. */ | |
799 | rtx delay_insn = copy_rtx (insn); | |
800 | ||
801 | /* If INSN is followed by a BARRIER, delete the BARRIER since it will only | |
802 | confuse further processing. Update LAST in case it was the last insn. | |
803 | We will put the BARRIER back in later. */ | |
804 | if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == BARRIER) | |
805 | { | |
806 | delete_insn (NEXT_INSN (insn)); | |
807 | last = get_last_insn (); | |
808 | had_barrier = 1; | |
809 | } | |
810 | ||
811 | /* Splice our SEQUENCE into the insn stream where INSN used to be. */ | |
812 | NEXT_INSN (seq_insn) = NEXT_INSN (insn); | |
813 | PREV_INSN (seq_insn) = PREV_INSN (insn); | |
814 | ||
815 | if (insn == last) | |
816 | set_new_first_and_last_insn (first, seq_insn); | |
817 | else | |
818 | PREV_INSN (NEXT_INSN (seq_insn)) = seq_insn; | |
819 | ||
820 | if (insn == first) | |
821 | set_new_first_and_last_insn (seq_insn, last); | |
822 | else | |
823 | NEXT_INSN (PREV_INSN (seq_insn)) = seq_insn; | |
824 | ||
825 | /* Build our SEQUENCE and rebuild the insn chain. */ | |
826 | XVECEXP (seq, 0, 0) = delay_insn; | |
827 | INSN_DELETED_P (delay_insn) = 0; | |
828 | PREV_INSN (delay_insn) = PREV_INSN (seq_insn); | |
829 | ||
830 | for (li = list; li; li = XEXP (li, 1), i++) | |
831 | { | |
832 | rtx tem = XEXP (li, 0); | |
833 | rtx note; | |
834 | ||
835 | /* Show that this copy of the insn isn't deleted. */ | |
836 | INSN_DELETED_P (tem) = 0; | |
837 | ||
838 | XVECEXP (seq, 0, i) = tem; | |
839 | PREV_INSN (tem) = XVECEXP (seq, 0, i - 1); | |
840 | NEXT_INSN (XVECEXP (seq, 0, i - 1)) = tem; | |
841 | ||
842 | /* Remove any REG_DEAD notes because we can't rely on them now | |
843 | that the insn has been moved. */ | |
844 | for (note = REG_NOTES (tem); note; note = XEXP (note, 1)) | |
845 | if (REG_NOTE_KIND (note) == REG_DEAD) | |
846 | XEXP (note, 0) = const0_rtx; | |
847 | } | |
848 | ||
849 | NEXT_INSN (XVECEXP (seq, 0, length)) = NEXT_INSN (seq_insn); | |
850 | ||
851 | /* If the previous insn is a SEQUENCE, update the NEXT_INSN pointer on the | |
852 | last insn in that SEQUENCE to point to us. Similarly for the first | |
853 | insn in the following insn if it is a SEQUENCE. */ | |
854 | ||
855 | if (PREV_INSN (seq_insn) && GET_CODE (PREV_INSN (seq_insn)) == INSN | |
856 | && GET_CODE (PATTERN (PREV_INSN (seq_insn))) == SEQUENCE) | |
857 | NEXT_INSN (XVECEXP (PATTERN (PREV_INSN (seq_insn)), 0, | |
858 | XVECLEN (PATTERN (PREV_INSN (seq_insn)), 0) - 1)) | |
859 | = seq_insn; | |
860 | ||
861 | if (NEXT_INSN (seq_insn) && GET_CODE (NEXT_INSN (seq_insn)) == INSN | |
862 | && GET_CODE (PATTERN (NEXT_INSN (seq_insn))) == SEQUENCE) | |
863 | PREV_INSN (XVECEXP (PATTERN (NEXT_INSN (seq_insn)), 0, 0)) = seq_insn; | |
864 | ||
865 | /* If there used to be a BARRIER, put it back. */ | |
866 | if (had_barrier) | |
867 | emit_barrier_after (seq_insn); | |
868 | ||
869 | if (i != length + 1) | |
870 | abort (); | |
871 | ||
872 | return seq_insn; | |
873 | } | |
874 | ||
875 | /* Add INSN to DELAY_LIST and return the head of the new list. The list must | |
876 | be in the order in which the insns are to be executed. */ | |
877 | ||
878 | static rtx | |
879 | add_to_delay_list (insn, delay_list) | |
880 | rtx insn; | |
881 | rtx delay_list; | |
882 | { | |
883 | /* If we have an empty list, just make a new list element. If | |
884 | INSN has it's block number recorded, clear it since we may | |
885 | be moving the insn to a new block. */ | |
886 | ||
887 | if (delay_list == 0) | |
888 | { | |
889 | struct target_info *tinfo; | |
890 | ||
891 | for (tinfo = target_hash_table[INSN_UID (insn) % TARGET_HASH_PRIME]; | |
892 | tinfo; tinfo = tinfo->next) | |
893 | if (tinfo->uid == INSN_UID (insn)) | |
894 | break; | |
895 | ||
896 | if (tinfo) | |
897 | tinfo->block = -1; | |
898 | ||
899 | return gen_rtx (INSN_LIST, VOIDmode, insn, NULL_RTX); | |
900 | } | |
901 | ||
902 | /* Otherwise this must be an INSN_LIST. Add INSN to the end of the | |
903 | list. */ | |
904 | XEXP (delay_list, 1) = add_to_delay_list (insn, XEXP (delay_list, 1)); | |
905 | ||
906 | return delay_list; | |
907 | } | |
908 | \f | |
909 | /* Delete INSN from the the delay slot of the insn that it is in. This may | |
910 | produce an insn without anything in its delay slots. */ | |
911 | ||
912 | static void | |
913 | delete_from_delay_slot (insn) | |
914 | rtx insn; | |
915 | { | |
916 | rtx trial, seq_insn, seq, prev; | |
917 | rtx delay_list = 0; | |
918 | int i; | |
919 | ||
920 | /* We first must find the insn containing the SEQUENCE with INSN in its | |
921 | delay slot. Do this by finding an insn, TRIAL, where | |
922 | PREV_INSN (NEXT_INSN (TRIAL)) != TRIAL. */ | |
923 | ||
924 | for (trial = insn; | |
925 | PREV_INSN (NEXT_INSN (trial)) == trial; | |
926 | trial = NEXT_INSN (trial)) | |
927 | ; | |
928 | ||
929 | seq_insn = PREV_INSN (NEXT_INSN (trial)); | |
930 | seq = PATTERN (seq_insn); | |
931 | ||
932 | /* Create a delay list consisting of all the insns other than the one | |
933 | we are deleting (unless we were the only one). */ | |
934 | if (XVECLEN (seq, 0) > 2) | |
935 | for (i = 1; i < XVECLEN (seq, 0); i++) | |
936 | if (XVECEXP (seq, 0, i) != insn) | |
937 | delay_list = add_to_delay_list (XVECEXP (seq, 0, i), delay_list); | |
938 | ||
939 | /* Delete the old SEQUENCE, re-emit the insn that used to have the delay | |
940 | list, and rebuild the delay list if non-empty. */ | |
941 | prev = PREV_INSN (seq_insn); | |
942 | trial = XVECEXP (seq, 0, 0); | |
943 | delete_insn (seq_insn); | |
944 | add_insn_after (trial, prev); | |
945 | ||
946 | if (GET_CODE (trial) == JUMP_INSN | |
947 | && (simplejump_p (trial) || GET_CODE (PATTERN (trial)) == RETURN)) | |
948 | emit_barrier_after (trial); | |
949 | ||
950 | /* If there are any delay insns, remit them. Otherwise clear the | |
951 | annul flag. */ | |
952 | if (delay_list) | |
953 | trial = emit_delay_sequence (trial, delay_list, XVECLEN (seq, 0) - 2, 0); | |
954 | else | |
955 | INSN_ANNULLED_BRANCH_P (trial) = 0; | |
956 | ||
957 | INSN_FROM_TARGET_P (insn) = 0; | |
958 | ||
959 | /* Show we need to fill this insn again. */ | |
960 | obstack_ptr_grow (&unfilled_slots_obstack, trial); | |
961 | } | |
962 | \f | |
963 | /* Delete INSN, a JUMP_INSN. If it is a conditional jump, we must track down | |
964 | the insn that sets CC0 for it and delete it too. */ | |
965 | ||
966 | static void | |
967 | delete_scheduled_jump (insn) | |
968 | rtx insn; | |
969 | { | |
970 | /* Delete the insn that sets cc0 for us. On machines without cc0, we could | |
971 | delete the insn that sets the condition code, but it is hard to find it. | |
972 | Since this case is rare anyway, don't bother trying; there would likely | |
973 | be other insns that became dead anyway, which we wouldn't know to | |
974 | delete. */ | |
975 | ||
976 | #ifdef HAVE_cc0 | |
977 | if (reg_mentioned_p (cc0_rtx, insn)) | |
978 | { | |
979 | rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX); | |
980 | ||
981 | /* If a reg-note was found, it points to an insn to set CC0. This | |
982 | insn is in the delay list of some other insn. So delete it from | |
983 | the delay list it was in. */ | |
984 | if (note) | |
985 | { | |
986 | if (! FIND_REG_INC_NOTE (XEXP (note, 0), NULL_RTX) | |
987 | && sets_cc0_p (PATTERN (XEXP (note, 0))) == 1) | |
988 | delete_from_delay_slot (XEXP (note, 0)); | |
989 | } | |
990 | else | |
991 | { | |
992 | /* The insn setting CC0 is our previous insn, but it may be in | |
993 | a delay slot. It will be the last insn in the delay slot, if | |
994 | it is. */ | |
995 | rtx trial = previous_insn (insn); | |
996 | if (GET_CODE (trial) == NOTE) | |
997 | trial = prev_nonnote_insn (trial); | |
998 | if (sets_cc0_p (PATTERN (trial)) != 1 | |
999 | || FIND_REG_INC_NOTE (trial, 0)) | |
1000 | return; | |
1001 | if (PREV_INSN (NEXT_INSN (trial)) == trial) | |
1002 | delete_insn (trial); | |
1003 | else | |
1004 | delete_from_delay_slot (trial); | |
1005 | } | |
1006 | } | |
1007 | #endif | |
1008 | ||
1009 | delete_insn (insn); | |
1010 | } | |
1011 | \f | |
1012 | /* Counters for delay-slot filling. */ | |
1013 | ||
1014 | #define NUM_REORG_FUNCTIONS 2 | |
1015 | #define MAX_DELAY_HISTOGRAM 3 | |
1016 | #define MAX_REORG_PASSES 2 | |
1017 | ||
1018 | static int num_insns_needing_delays[NUM_REORG_FUNCTIONS][MAX_REORG_PASSES]; | |
1019 | ||
1020 | static int num_filled_delays[NUM_REORG_FUNCTIONS][MAX_DELAY_HISTOGRAM+1][MAX_REORG_PASSES]; | |
1021 | ||
1022 | static int reorg_pass_number; | |
1023 | ||
1024 | static void | |
1025 | note_delay_statistics (slots_filled, index) | |
1026 | int slots_filled, index; | |
1027 | { | |
1028 | num_insns_needing_delays[index][reorg_pass_number]++; | |
1029 | if (slots_filled > MAX_DELAY_HISTOGRAM) | |
1030 | slots_filled = MAX_DELAY_HISTOGRAM; | |
1031 | num_filled_delays[index][slots_filled][reorg_pass_number]++; | |
1032 | } | |
1033 | \f | |
1034 | #if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS) | |
1035 | ||
1036 | /* Optimize the following cases: | |
1037 | ||
1038 | 1. When a conditional branch skips over only one instruction, | |
1039 | use an annulling branch and put that insn in the delay slot. | |
1040 | Use either a branch that annuls when the condition if true or | |
1041 | invert the test with a branch that annuls when the condition is | |
1042 | false. This saves insns, since otherwise we must copy an insn | |
1043 | from the L1 target. | |
1044 | ||
1045 | (orig) (skip) (otherwise) | |
1046 | Bcc.n L1 Bcc',a L1 Bcc,a L1' | |
1047 | insn insn insn2 | |
1048 | L1: L1: L1: | |
1049 | insn2 insn2 insn2 | |
1050 | insn3 insn3 L1': | |
1051 | insn3 | |
1052 | ||
1053 | 2. When a conditional branch skips over only one instruction, | |
1054 | and after that, it unconditionally branches somewhere else, | |
1055 | perform the similar optimization. This saves executing the | |
1056 | second branch in the case where the inverted condition is true. | |
1057 | ||
1058 | Bcc.n L1 Bcc',a L2 | |
1059 | insn insn | |
1060 | L1: L1: | |
1061 | Bra L2 Bra L2 | |
1062 | ||
1063 | INSN is a JUMP_INSN. | |
1064 | ||
1065 | This should be expanded to skip over N insns, where N is the number | |
1066 | of delay slots required. */ | |
1067 | ||
1068 | static rtx | |
1069 | optimize_skip (insn) | |
1070 | register rtx insn; | |
1071 | { | |
1072 | register rtx trial = next_nonnote_insn (insn); | |
1073 | rtx next_trial = next_active_insn (trial); | |
1074 | rtx delay_list = 0; | |
1075 | rtx target_label; | |
1076 | int flags; | |
1077 | ||
1078 | flags = get_jump_flags (insn, JUMP_LABEL (insn)); | |
1079 | ||
1080 | if (trial == 0 | |
1081 | || GET_CODE (trial) != INSN | |
1082 | || GET_CODE (PATTERN (trial)) == SEQUENCE | |
1083 | || recog_memoized (trial) < 0 | |
1084 | || (! eligible_for_annul_false (insn, 0, trial, flags) | |
1085 | && ! eligible_for_annul_true (insn, 0, trial, flags))) | |
1086 | return 0; | |
1087 | ||
1088 | /* There are two cases where we are just executing one insn (we assume | |
1089 | here that a branch requires only one insn; this should be generalized | |
1090 | at some point): Where the branch goes around a single insn or where | |
1091 | we have one insn followed by a branch to the same label we branch to. | |
1092 | In both of these cases, inverting the jump and annulling the delay | |
1093 | slot give the same effect in fewer insns. */ | |
1094 | if ((next_trial == next_active_insn (JUMP_LABEL (insn))) | |
1095 | || (next_trial != 0 | |
1096 | && GET_CODE (next_trial) == JUMP_INSN | |
1097 | && JUMP_LABEL (insn) == JUMP_LABEL (next_trial) | |
1098 | && (simplejump_p (next_trial) | |
1099 | || GET_CODE (PATTERN (next_trial)) == RETURN))) | |
1100 | { | |
1101 | if (eligible_for_annul_false (insn, 0, trial, flags)) | |
1102 | { | |
1103 | if (invert_jump (insn, JUMP_LABEL (insn))) | |
1104 | INSN_FROM_TARGET_P (trial) = 1; | |
1105 | else if (! eligible_for_annul_true (insn, 0, trial, flags)) | |
1106 | return 0; | |
1107 | } | |
1108 | ||
1109 | delay_list = add_to_delay_list (trial, NULL_RTX); | |
1110 | next_trial = next_active_insn (trial); | |
1111 | update_block (trial, trial); | |
1112 | delete_insn (trial); | |
1113 | ||
1114 | /* Also, if we are targeting an unconditional | |
1115 | branch, thread our jump to the target of that branch. Don't | |
1116 | change this into a RETURN here, because it may not accept what | |
1117 | we have in the delay slot. We'll fix this up later. */ | |
1118 | if (next_trial && GET_CODE (next_trial) == JUMP_INSN | |
1119 | && (simplejump_p (next_trial) | |
1120 | || GET_CODE (PATTERN (next_trial)) == RETURN)) | |
1121 | { | |
1122 | target_label = JUMP_LABEL (next_trial); | |
1123 | if (target_label == 0) | |
1124 | target_label = find_end_label (); | |
1125 | reorg_redirect_jump (insn, target_label); | |
1126 | } | |
1127 | ||
1128 | INSN_ANNULLED_BRANCH_P (insn) = 1; | |
1129 | } | |
1130 | ||
1131 | return delay_list; | |
1132 | } | |
1133 | #endif | |
1134 | \f | |
1135 | ||
1136 | /* Encode and return branch direction and prediction information for | |
1137 | INSN assuming it will jump to LABEL. | |
1138 | ||
1139 | Non conditional branches return no direction information and | |
1140 | are predicted as very likely taken. */ | |
1141 | static int | |
1142 | get_jump_flags (insn, label) | |
1143 | rtx insn, label; | |
1144 | { | |
1145 | int flags; | |
1146 | ||
1147 | /* get_jump_flags can be passed any insn with delay slots, these may | |
1148 | be INSNs, CALL_INSNs, or JUMP_INSNs. Only JUMP_INSNs have branch | |
1149 | direction information, and only if they are conditional jumps. | |
1150 | ||
1151 | If LABEL is zero, then there is no way to determine the branch | |
1152 | direction. */ | |
1153 | if (GET_CODE (insn) == JUMP_INSN | |
1154 | && condjump_p (insn) | |
1155 | && INSN_UID (insn) <= max_uid | |
1156 | && label != 0 | |
1157 | && INSN_UID (label) <= max_uid) | |
1158 | flags | |
1159 | = (uid_to_ruid[INSN_UID (label)] > uid_to_ruid[INSN_UID (insn)]) | |
1160 | ? ATTR_FLAG_forward : ATTR_FLAG_backward; | |
1161 | /* No valid direction information. */ | |
1162 | else | |
1163 | flags = 0; | |
1164 | ||
1165 | /* If insn is a conditional branch call mostly_true_jump to get | |
1166 | determine the branch prediction. | |
1167 | ||
1168 | Non conditional branches are predicted as very likely taken. */ | |
1169 | if (GET_CODE (insn) == JUMP_INSN | |
1170 | && condjump_p (insn)) | |
1171 | { | |
1172 | int prediction; | |
1173 | ||
1174 | prediction = mostly_true_jump (insn, get_branch_condition (insn, label)); | |
1175 | switch (prediction) | |
1176 | { | |
1177 | case 2: | |
1178 | flags |= (ATTR_FLAG_very_likely | ATTR_FLAG_likely); | |
1179 | break; | |
1180 | case 1: | |
1181 | flags |= ATTR_FLAG_likely; | |
1182 | break; | |
1183 | case 0: | |
1184 | flags |= ATTR_FLAG_unlikely; | |
1185 | break; | |
1186 | case -1: | |
1187 | flags |= (ATTR_FLAG_very_unlikely | ATTR_FLAG_unlikely); | |
1188 | break; | |
1189 | ||
1190 | default: | |
1191 | abort(); | |
1192 | } | |
1193 | } | |
1194 | else | |
1195 | flags |= (ATTR_FLAG_very_likely | ATTR_FLAG_likely); | |
1196 | ||
1197 | return flags; | |
1198 | } | |
1199 | ||
1200 | /* Return 1 if DEST is a destination that will be branched to rarely (the | |
1201 | return point of a function); return 2 if DEST will be branched to very | |
1202 | rarely (a call to a function that doesn't return). Otherwise, | |
1203 | return 0. */ | |
1204 | ||
1205 | static int | |
1206 | rare_destination (insn) | |
1207 | rtx insn; | |
1208 | { | |
1209 | int jump_count = 0; | |
1210 | ||
1211 | for (; insn; insn = NEXT_INSN (insn)) | |
1212 | { | |
1213 | if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE) | |
1214 | insn = XVECEXP (PATTERN (insn), 0, 0); | |
1215 | ||
1216 | switch (GET_CODE (insn)) | |
1217 | { | |
1218 | case CODE_LABEL: | |
1219 | return 0; | |
1220 | case BARRIER: | |
1221 | /* A BARRIER can either be after a JUMP_INSN or a CALL_INSN. We | |
1222 | don't scan past JUMP_INSNs, so any barrier we find here must | |
1223 | have been after a CALL_INSN and hence mean the call doesn't | |
1224 | return. */ | |
1225 | return 2; | |
1226 | case JUMP_INSN: | |
1227 | if (GET_CODE (PATTERN (insn)) == RETURN) | |
1228 | return 1; | |
1229 | else if (simplejump_p (insn) | |
1230 | && jump_count++ < 10) | |
1231 | insn = JUMP_LABEL (insn); | |
1232 | else | |
1233 | return 0; | |
1234 | } | |
1235 | } | |
1236 | ||
1237 | /* If we got here it means we hit the end of the function. So this | |
1238 | is an unlikely destination. */ | |
1239 | ||
1240 | return 1; | |
1241 | } | |
1242 | ||
1243 | /* Return truth value of the statement that this branch | |
1244 | is mostly taken. If we think that the branch is extremely likely | |
1245 | to be taken, we return 2. If the branch is slightly more likely to be | |
1246 | taken, return 1. If the branch is slightly less likely to be taken, | |
1247 | return 0 and if the branch is highly unlikely to be taken, return -1. | |
1248 | ||
1249 | CONDITION, if non-zero, is the condition that JUMP_INSN is testing. */ | |
1250 | ||
1251 | static int | |
1252 | mostly_true_jump (jump_insn, condition) | |
1253 | rtx jump_insn, condition; | |
1254 | { | |
1255 | rtx target_label = JUMP_LABEL (jump_insn); | |
1256 | rtx insn; | |
1257 | int rare_dest = rare_destination (target_label); | |
1258 | int rare_fallthrough = rare_destination (NEXT_INSN (jump_insn)); | |
1259 | ||
1260 | /* If this is a branch outside a loop, it is highly unlikely. */ | |
1261 | if (GET_CODE (PATTERN (jump_insn)) == SET | |
1262 | && GET_CODE (SET_SRC (PATTERN (jump_insn))) == IF_THEN_ELSE | |
1263 | && ((GET_CODE (XEXP (SET_SRC (PATTERN (jump_insn)), 1)) == LABEL_REF | |
1264 | && LABEL_OUTSIDE_LOOP_P (XEXP (SET_SRC (PATTERN (jump_insn)), 1))) | |
1265 | || (GET_CODE (XEXP (SET_SRC (PATTERN (jump_insn)), 2)) == LABEL_REF | |
1266 | && LABEL_OUTSIDE_LOOP_P (XEXP (SET_SRC (PATTERN (jump_insn)), 2))))) | |
1267 | return -1; | |
1268 | ||
1269 | if (target_label) | |
1270 | { | |
1271 | /* If this is the test of a loop, it is very likely true. We scan | |
1272 | backwards from the target label. If we find a NOTE_INSN_LOOP_BEG | |
1273 | before the next real insn, we assume the branch is to the top of | |
1274 | the loop. */ | |
1275 | for (insn = PREV_INSN (target_label); | |
1276 | insn && GET_CODE (insn) == NOTE; | |
1277 | insn = PREV_INSN (insn)) | |
1278 | if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) | |
1279 | return 2; | |
1280 | ||
1281 | /* If this is a jump to the test of a loop, it is likely true. We scan | |
1282 | forwards from the target label. If we find a NOTE_INSN_LOOP_VTOP | |
1283 | before the next real insn, we assume the branch is to the loop branch | |
1284 | test. */ | |
1285 | for (insn = NEXT_INSN (target_label); | |
1286 | insn && GET_CODE (insn) == NOTE; | |
1287 | insn = PREV_INSN (insn)) | |
1288 | if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_VTOP) | |
1289 | return 1; | |
1290 | } | |
1291 | ||
1292 | /* Look at the relative rarities of the fallthough and destination. If | |
1293 | they differ, we can predict the branch that way. */ | |
1294 | ||
1295 | switch (rare_fallthrough - rare_dest) | |
1296 | { | |
1297 | case -2: | |
1298 | return -1; | |
1299 | case -1: | |
1300 | return 0; | |
1301 | case 0: | |
1302 | break; | |
1303 | case 1: | |
1304 | return 1; | |
1305 | case 2: | |
1306 | return 2; | |
1307 | } | |
1308 | ||
1309 | /* If we couldn't figure out what this jump was, assume it won't be | |
1310 | taken. This should be rare. */ | |
1311 | if (condition == 0) | |
1312 | return 0; | |
1313 | ||
1314 | /* EQ tests are usually false and NE tests are usually true. Also, | |
1315 | most quantities are positive, so we can make the appropriate guesses | |
1316 | about signed comparisons against zero. */ | |
1317 | switch (GET_CODE (condition)) | |
1318 | { | |
1319 | case CONST_INT: | |
1320 | /* Unconditional branch. */ | |
1321 | return 1; | |
1322 | case EQ: | |
1323 | return 0; | |
1324 | case NE: | |
1325 | return 1; | |
1326 | case LE: | |
1327 | case LT: | |
1328 | if (XEXP (condition, 1) == const0_rtx) | |
1329 | return 0; | |
1330 | break; | |
1331 | case GE: | |
1332 | case GT: | |
1333 | if (XEXP (condition, 1) == const0_rtx) | |
1334 | return 1; | |
1335 | break; | |
1336 | } | |
1337 | ||
1338 | /* Predict backward branches usually take, forward branches usually not. If | |
1339 | we don't know whether this is forward or backward, assume the branch | |
1340 | will be taken, since most are. */ | |
1341 | return (target_label == 0 || INSN_UID (jump_insn) > max_uid | |
1342 | || INSN_UID (target_label) > max_uid | |
1343 | || (uid_to_ruid[INSN_UID (jump_insn)] | |
1344 | > uid_to_ruid[INSN_UID (target_label)]));; | |
1345 | } | |
1346 | ||
1347 | /* Return the condition under which INSN will branch to TARGET. If TARGET | |
1348 | is zero, return the condition under which INSN will return. If INSN is | |
1349 | an unconditional branch, return const_true_rtx. If INSN isn't a simple | |
1350 | type of jump, or it doesn't go to TARGET, return 0. */ | |
1351 | ||
1352 | static rtx | |
1353 | get_branch_condition (insn, target) | |
1354 | rtx insn; | |
1355 | rtx target; | |
1356 | { | |
1357 | rtx pat = PATTERN (insn); | |
1358 | rtx src; | |
1359 | ||
1360 | if (GET_CODE (pat) == RETURN) | |
1361 | return target == 0 ? const_true_rtx : 0; | |
1362 | ||
1363 | else if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx) | |
1364 | return 0; | |
1365 | ||
1366 | src = SET_SRC (pat); | |
1367 | if (GET_CODE (src) == LABEL_REF && XEXP (src, 0) == target) | |
1368 | return const_true_rtx; | |
1369 | ||
1370 | else if (GET_CODE (src) == IF_THEN_ELSE | |
1371 | && ((target == 0 && GET_CODE (XEXP (src, 1)) == RETURN) | |
1372 | || (GET_CODE (XEXP (src, 1)) == LABEL_REF | |
1373 | && XEXP (XEXP (src, 1), 0) == target)) | |
1374 | && XEXP (src, 2) == pc_rtx) | |
1375 | return XEXP (src, 0); | |
1376 | ||
1377 | else if (GET_CODE (src) == IF_THEN_ELSE | |
1378 | && ((target == 0 && GET_CODE (XEXP (src, 2)) == RETURN) | |
1379 | || (GET_CODE (XEXP (src, 2)) == LABEL_REF | |
1380 | && XEXP (XEXP (src, 2), 0) == target)) | |
1381 | && XEXP (src, 1) == pc_rtx) | |
1382 | return gen_rtx (reverse_condition (GET_CODE (XEXP (src, 0))), | |
1383 | GET_MODE (XEXP (src, 0)), | |
1384 | XEXP (XEXP (src, 0), 0), XEXP (XEXP (src, 0), 1)); | |
1385 | ||
1386 | return 0; | |
1387 | } | |
1388 | ||
1389 | /* Return non-zero if CONDITION is more strict than the condition of | |
1390 | INSN, i.e., if INSN will always branch if CONDITION is true. */ | |
1391 | ||
1392 | static int | |
1393 | condition_dominates_p (condition, insn) | |
1394 | rtx condition; | |
1395 | rtx insn; | |
1396 | { | |
1397 | rtx other_condition = get_branch_condition (insn, JUMP_LABEL (insn)); | |
1398 | enum rtx_code code = GET_CODE (condition); | |
1399 | enum rtx_code other_code; | |
1400 | ||
1401 | if (rtx_equal_p (condition, other_condition) | |
1402 | || other_condition == const_true_rtx) | |
1403 | return 1; | |
1404 | ||
1405 | else if (condition == const_true_rtx || other_condition == 0) | |
1406 | return 0; | |
1407 | ||
1408 | other_code = GET_CODE (other_condition); | |
1409 | if (GET_RTX_LENGTH (code) != 2 || GET_RTX_LENGTH (other_code) != 2 | |
1410 | || ! rtx_equal_p (XEXP (condition, 0), XEXP (other_condition, 0)) | |
1411 | || ! rtx_equal_p (XEXP (condition, 1), XEXP (other_condition, 1))) | |
1412 | return 0; | |
1413 | ||
1414 | return comparison_dominates_p (code, other_code); | |
1415 | } | |
1416 | \f | |
1417 | /* INSN branches to an insn whose pattern SEQ is a SEQUENCE. Given that | |
1418 | the condition tested by INSN is CONDITION and the resources shown in | |
1419 | OTHER_NEEDED are needed after INSN, see whether INSN can take all the insns | |
1420 | from SEQ's delay list, in addition to whatever insns it may execute | |
1421 | (in DELAY_LIST). SETS and NEEDED are denote resources already set and | |
1422 | needed while searching for delay slot insns. Return the concatenated | |
1423 | delay list if possible, otherwise, return 0. | |
1424 | ||
1425 | SLOTS_TO_FILL is the total number of slots required by INSN, and | |
1426 | PSLOTS_FILLED points to the number filled so far (also the number of | |
1427 | insns in DELAY_LIST). It is updated with the number that have been | |
1428 | filled from the SEQUENCE, if any. | |
1429 | ||
1430 | PANNUL_P points to a non-zero value if we already know that we need | |
1431 | to annul INSN. If this routine determines that annulling is needed, | |
1432 | it may set that value non-zero. | |
1433 | ||
1434 | PNEW_THREAD points to a location that is to receive the place at which | |
1435 | execution should continue. */ | |
1436 | ||
1437 | static rtx | |
1438 | steal_delay_list_from_target (insn, condition, seq, delay_list, | |
1439 | sets, needed, other_needed, | |
1440 | slots_to_fill, pslots_filled, pannul_p, | |
1441 | pnew_thread) | |
1442 | rtx insn, condition; | |
1443 | rtx seq; | |
1444 | rtx delay_list; | |
1445 | struct resources *sets, *needed, *other_needed; | |
1446 | int slots_to_fill; | |
1447 | int *pslots_filled; | |
1448 | int *pannul_p; | |
1449 | rtx *pnew_thread; | |
1450 | { | |
1451 | rtx temp; | |
1452 | int slots_remaining = slots_to_fill - *pslots_filled; | |
1453 | int total_slots_filled = *pslots_filled; | |
1454 | rtx new_delay_list = 0; | |
1455 | int must_annul = *pannul_p; | |
1456 | int i; | |
1457 | ||
1458 | /* We can't do anything if there are more delay slots in SEQ than we | |
1459 | can handle, or if we don't know that it will be a taken branch. | |
1460 | ||
1461 | We know that it will be a taken branch if it is either an unconditional | |
1462 | branch or a conditional branch with a stricter branch condition. */ | |
1463 | ||
1464 | if (XVECLEN (seq, 0) - 1 > slots_remaining | |
1465 | || ! condition_dominates_p (condition, XVECEXP (seq, 0, 0))) | |
1466 | return delay_list; | |
1467 | ||
1468 | for (i = 1; i < XVECLEN (seq, 0); i++) | |
1469 | { | |
1470 | rtx trial = XVECEXP (seq, 0, i); | |
1471 | int flags; | |
1472 | ||
1473 | if (insn_references_resource_p (trial, sets, 0) | |
1474 | || insn_sets_resource_p (trial, needed, 0) | |
1475 | || insn_sets_resource_p (trial, sets, 0) | |
1476 | #ifdef HAVE_cc0 | |
1477 | /* If TRIAL sets CC0, we can't copy it, so we can't steal this | |
1478 | delay list. */ | |
1479 | || find_reg_note (trial, REG_CC_USER, NULL_RTX) | |
1480 | #endif | |
1481 | /* If TRIAL is from the fallthrough code of an annulled branch insn | |
1482 | in SEQ, we cannot use it. */ | |
1483 | || (INSN_ANNULLED_BRANCH_P (XVECEXP (seq, 0, 0)) | |
1484 | && ! INSN_FROM_TARGET_P (trial))) | |
1485 | return delay_list; | |
1486 | ||
1487 | /* If this insn was already done (usually in a previous delay slot), | |
1488 | pretend we put it in our delay slot. */ | |
1489 | if (redundant_insn_p (trial, insn, new_delay_list)) | |
1490 | continue; | |
1491 | ||
1492 | /* We will end up re-vectoring this branch, so compute flags | |
1493 | based on jumping to the new label. */ | |
1494 | flags = get_jump_flags (insn, JUMP_LABEL (XVECEXP (seq, 0, 0))); | |
1495 | ||
1496 | if (! must_annul | |
1497 | && ((condition == const_true_rtx | |
1498 | || (! insn_sets_resource_p (trial, other_needed, 0) | |
1499 | && ! may_trap_p (PATTERN (trial))))) | |
1500 | ? eligible_for_delay (insn, total_slots_filled, trial, flags) | |
1501 | : (must_annul = 1, | |
1502 | eligible_for_annul_false (insn, total_slots_filled, trial, flags))) | |
1503 | { | |
1504 | temp = copy_rtx (trial); | |
1505 | INSN_FROM_TARGET_P (temp) = 1; | |
1506 | new_delay_list = add_to_delay_list (temp, new_delay_list); | |
1507 | total_slots_filled++; | |
1508 | ||
1509 | if (--slots_remaining == 0) | |
1510 | break; | |
1511 | } | |
1512 | else | |
1513 | return delay_list; | |
1514 | } | |
1515 | ||
1516 | /* Show the place to which we will be branching. */ | |
1517 | *pnew_thread = next_active_insn (JUMP_LABEL (XVECEXP (seq, 0, 0))); | |
1518 | ||
1519 | /* Add any new insns to the delay list and update the count of the | |
1520 | number of slots filled. */ | |
1521 | *pslots_filled = total_slots_filled; | |
1522 | *pannul_p = must_annul; | |
1523 | ||
1524 | if (delay_list == 0) | |
1525 | return new_delay_list; | |
1526 | ||
1527 | for (temp = new_delay_list; temp; temp = XEXP (temp, 1)) | |
1528 | delay_list = add_to_delay_list (XEXP (temp, 0), delay_list); | |
1529 | ||
1530 | return delay_list; | |
1531 | } | |
1532 | \f | |
1533 | /* Similar to steal_delay_list_from_target except that SEQ is on the | |
1534 | fallthrough path of INSN. Here we only do something if the delay insn | |
1535 | of SEQ is an unconditional branch. In that case we steal its delay slot | |
1536 | for INSN since unconditional branches are much easier to fill. */ | |
1537 | ||
1538 | static rtx | |
1539 | steal_delay_list_from_fallthrough (insn, condition, seq, | |
1540 | delay_list, sets, needed, other_needed, | |
1541 | slots_to_fill, pslots_filled, pannul_p) | |
1542 | rtx insn, condition; | |
1543 | rtx seq; | |
1544 | rtx delay_list; | |
1545 | struct resources *sets, *needed, *other_needed; | |
1546 | int slots_to_fill; | |
1547 | int *pslots_filled; | |
1548 | int *pannul_p; | |
1549 | { | |
1550 | int i; | |
1551 | int flags; | |
1552 | ||
1553 | flags = get_jump_flags (insn, JUMP_LABEL (insn)); | |
1554 | ||
1555 | /* We can't do anything if SEQ's delay insn isn't an | |
1556 | unconditional branch. */ | |
1557 | ||
1558 | if (! simplejump_p (XVECEXP (seq, 0, 0)) | |
1559 | && GET_CODE (PATTERN (XVECEXP (seq, 0, 0))) != RETURN) | |
1560 | return delay_list; | |
1561 | ||
1562 | for (i = 1; i < XVECLEN (seq, 0); i++) | |
1563 | { | |
1564 | rtx trial = XVECEXP (seq, 0, i); | |
1565 | ||
1566 | /* If TRIAL sets CC0, stealing it will move it too far from the use | |
1567 | of CC0. */ | |
1568 | if (insn_references_resource_p (trial, sets, 0) | |
1569 | || insn_sets_resource_p (trial, needed, 0) | |
1570 | || insn_sets_resource_p (trial, sets, 0) | |
1571 | #ifdef HAVE_cc0 | |
1572 | || sets_cc0_p (PATTERN (trial)) | |
1573 | #endif | |
1574 | ) | |
1575 | ||
1576 | break; | |
1577 | ||
1578 | /* If this insn was already done, we don't need it. */ | |
1579 | if (redundant_insn_p (trial, insn, delay_list)) | |
1580 | { | |
1581 | delete_from_delay_slot (trial); | |
1582 | continue; | |
1583 | } | |
1584 | ||
1585 | if (! *pannul_p | |
1586 | && ((condition == const_true_rtx | |
1587 | || (! insn_sets_resource_p (trial, other_needed, 0) | |
1588 | && ! may_trap_p (PATTERN (trial))))) | |
1589 | ? eligible_for_delay (insn, *pslots_filled, trial, flags) | |
1590 | : (*pannul_p = 1, | |
1591 | eligible_for_annul_true (insn, *pslots_filled, trial, flags))) | |
1592 | { | |
1593 | delete_from_delay_slot (trial); | |
1594 | delay_list = add_to_delay_list (trial, delay_list); | |
1595 | ||
1596 | if (++(*pslots_filled) == slots_to_fill) | |
1597 | break; | |
1598 | } | |
1599 | else | |
1600 | break; | |
1601 | } | |
1602 | ||
1603 | return delay_list; | |
1604 | } | |
1605 | \f | |
1606 | /* Try merging insns starting at THREAD which match exactly the insns in | |
1607 | INSN's delay list. | |
1608 | ||
1609 | If all insns were matched and the insn was previously annulling, the | |
1610 | annul bit will be cleared. | |
1611 | ||
1612 | For each insn that is merged, if the branch is or will be non-annulling, | |
1613 | we delete the merged insn. */ | |
1614 | ||
1615 | static void | |
1616 | try_merge_delay_insns (insn, thread) | |
1617 | rtx insn, thread; | |
1618 | { | |
1619 | rtx trial, next_trial; | |
1620 | rtx delay_insn = XVECEXP (PATTERN (insn), 0, 0); | |
1621 | int annul_p = INSN_ANNULLED_BRANCH_P (delay_insn); | |
1622 | int slot_number = 1; | |
1623 | int num_slots = XVECLEN (PATTERN (insn), 0); | |
1624 | rtx next_to_match = XVECEXP (PATTERN (insn), 0, slot_number); | |
1625 | struct resources set, needed; | |
1626 | rtx merged_insns = 0; | |
1627 | int i; | |
1628 | int flags; | |
1629 | ||
1630 | flags = get_jump_flags (insn, JUMP_LABEL (insn)); | |
1631 | ||
1632 | CLEAR_RESOURCE (&needed); | |
1633 | CLEAR_RESOURCE (&set); | |
1634 | ||
1635 | /* If this is not an annulling branch, take into account anything needed in | |
1636 | NEXT_TO_MATCH. This prevents two increments from being incorrectly | |
1637 | folded into one. If we are annulling, this would be the correct | |
1638 | thing to do. (The alternative, looking at things set in NEXT_TO_MATCH | |
1639 | will essentially disable this optimization. This method is somewhat of | |
1640 | a kludge, but I don't see a better way.) */ | |
1641 | if (! annul_p) | |
1642 | mark_referenced_resources (next_to_match, &needed, 1); | |
1643 | ||
1644 | for (trial = thread; !stop_search_p (trial, 1); trial = next_trial) | |
1645 | { | |
1646 | rtx pat = PATTERN (trial); | |
1647 | ||
1648 | next_trial = next_nonnote_insn (trial); | |
1649 | ||
1650 | /* TRIAL must be a CALL_INSN or INSN. Skip USE and CLOBBER. */ | |
1651 | if (GET_CODE (trial) == INSN | |
1652 | && (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)) | |
1653 | continue; | |
1654 | ||
1655 | if (GET_CODE (next_to_match) == GET_CODE (trial) | |
1656 | #ifdef HAVE_cc0 | |
1657 | /* We can't share an insn that sets cc0. */ | |
1658 | && ! sets_cc0_p (pat) | |
1659 | #endif | |
1660 | && ! insn_references_resource_p (trial, &set, 1) | |
1661 | && ! insn_sets_resource_p (trial, &set, 1) | |
1662 | && ! insn_sets_resource_p (trial, &needed, 1) | |
1663 | && (trial = try_split (pat, trial, 0)) != 0 | |
1664 | && rtx_equal_p (PATTERN (next_to_match), PATTERN (trial)) | |
1665 | /* Have to test this condition if annul condition is different | |
1666 | from (and less restrictive than) non-annulling one. */ | |
1667 | && eligible_for_delay (delay_insn, slot_number - 1, trial, flags)) | |
1668 | { | |
1669 | next_trial = next_nonnote_insn (trial); | |
1670 | ||
1671 | if (! annul_p) | |
1672 | { | |
1673 | update_block (trial, thread); | |
1674 | delete_insn (trial); | |
1675 | INSN_FROM_TARGET_P (next_to_match) = 0; | |
1676 | } | |
1677 | else | |
1678 | merged_insns = gen_rtx (INSN_LIST, VOIDmode, trial, merged_insns); | |
1679 | ||
1680 | if (++slot_number == num_slots) | |
1681 | break; | |
1682 | ||
1683 | next_to_match = XVECEXP (PATTERN (insn), 0, slot_number); | |
1684 | if (! annul_p) | |
1685 | mark_referenced_resources (next_to_match, &needed, 1); | |
1686 | } | |
1687 | ||
1688 | mark_set_resources (trial, &set, 0, 1); | |
1689 | mark_referenced_resources (trial, &needed, 1); | |
1690 | } | |
1691 | ||
1692 | /* See if we stopped on a filled insn. If we did, try to see if its | |
1693 | delay slots match. */ | |
1694 | if (slot_number != num_slots | |
1695 | && trial && GET_CODE (trial) == INSN | |
1696 | && GET_CODE (PATTERN (trial)) == SEQUENCE | |
1697 | && ! INSN_ANNULLED_BRANCH_P (XVECEXP (PATTERN (trial), 0, 0))) | |
1698 | { | |
1699 | rtx pat = PATTERN (trial); | |
1700 | ||
1701 | for (i = 1; i < XVECLEN (pat, 0); i++) | |
1702 | { | |
1703 | rtx dtrial = XVECEXP (pat, 0, i); | |
1704 | ||
1705 | if (! insn_references_resource_p (dtrial, &set, 1) | |
1706 | && ! insn_sets_resource_p (dtrial, &set, 1) | |
1707 | && ! insn_sets_resource_p (dtrial, &needed, 1) | |
1708 | #ifdef HAVE_cc0 | |
1709 | && ! sets_cc0_p (PATTERN (dtrial)) | |
1710 | #endif | |
1711 | && rtx_equal_p (PATTERN (next_to_match), PATTERN (dtrial)) | |
1712 | && eligible_for_delay (delay_insn, slot_number - 1, dtrial, flags)) | |
1713 | { | |
1714 | if (! annul_p) | |
1715 | { | |
1716 | update_block (dtrial, thread); | |
1717 | delete_from_delay_slot (dtrial); | |
1718 | INSN_FROM_TARGET_P (next_to_match) = 0; | |
1719 | } | |
1720 | else | |
1721 | merged_insns = gen_rtx (INSN_LIST, SImode, dtrial, | |
1722 | merged_insns); | |
1723 | ||
1724 | if (++slot_number == num_slots) | |
1725 | break; | |
1726 | ||
1727 | next_to_match = XVECEXP (PATTERN (insn), 0, slot_number); | |
1728 | } | |
1729 | } | |
1730 | } | |
1731 | ||
1732 | /* If all insns in the delay slot have been matched and we were previously | |
1733 | annulling the branch, we need not any more. In that case delete all the | |
1734 | merged insns. Also clear the INSN_FROM_TARGET_P bit of each insn the | |
1735 | the delay list so that we know that it isn't only being used at the | |
1736 | target. */ | |
1737 | if (next_to_match == 0 && annul_p) | |
1738 | { | |
1739 | for (; merged_insns; merged_insns = XEXP (merged_insns, 1)) | |
1740 | { | |
1741 | if (GET_MODE (merged_insns) == SImode) | |
1742 | { | |
1743 | update_block (XEXP (merged_insns, 0), thread); | |
1744 | delete_from_delay_slot (XEXP (merged_insns, 0)); | |
1745 | } | |
1746 | else | |
1747 | { | |
1748 | update_block (XEXP (merged_insns, 0), thread); | |
1749 | delete_insn (XEXP (merged_insns, 0)); | |
1750 | } | |
1751 | } | |
1752 | ||
1753 | INSN_ANNULLED_BRANCH_P (delay_insn) = 0; | |
1754 | ||
1755 | for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++) | |
1756 | INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i)) = 0; | |
1757 | } | |
1758 | } | |
1759 | \f | |
1760 | /* See if INSN is redundant with an insn in front of TARGET. Often this | |
1761 | is called when INSN is a candidate for a delay slot of TARGET. | |
1762 | DELAY_LIST are insns that will be placed in delay slots of TARGET in front | |
1763 | of INSN. Often INSN will be redundant with an insn in a delay slot of | |
1764 | some previous insn. This happens when we have a series of branches to the | |
1765 | same label; in that case the first insn at the target might want to go | |
1766 | into each of the delay slots. | |
1767 | ||
1768 | If we are not careful, this routine can take up a significant fraction | |
1769 | of the total compilation time (4%), but only wins rarely. Hence we | |
1770 | speed this routine up by making two passes. The first pass goes back | |
1771 | until it hits a label and sees if it find an insn with an identical | |
1772 | pattern. Only in this (relatively rare) event does it check for | |
1773 | data conflicts. | |
1774 | ||
1775 | We do not split insns we encounter. This could cause us not to find a | |
1776 | redundant insn, but the cost of splitting seems greater than the possible | |
1777 | gain in rare cases. */ | |
1778 | ||
1779 | static int | |
1780 | redundant_insn_p (insn, target, delay_list) | |
1781 | rtx insn; | |
1782 | rtx target; | |
1783 | rtx delay_list; | |
1784 | { | |
1785 | rtx target_main = target; | |
1786 | rtx ipat = PATTERN (insn); | |
1787 | rtx trial, pat; | |
1788 | struct resources needed, set; | |
1789 | int i; | |
1790 | ||
1791 | /* Scan backwards looking for a match. */ | |
1792 | for (trial = PREV_INSN (target); trial; trial = PREV_INSN (trial)) | |
1793 | { | |
1794 | if (GET_CODE (trial) == CODE_LABEL) | |
1795 | return 0; | |
1796 | ||
1797 | if (GET_RTX_CLASS (GET_CODE (trial)) != 'i') | |
1798 | continue; | |
1799 | ||
1800 | pat = PATTERN (trial); | |
1801 | if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) | |
1802 | continue; | |
1803 | ||
1804 | if (GET_CODE (pat) == SEQUENCE) | |
1805 | { | |
1806 | /* Stop for a CALL and its delay slots because it is difficult to | |
1807 | track its resource needs correctly. */ | |
1808 | if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL_INSN) | |
1809 | return 0; | |
1810 | ||
1811 | /* Stop for an INSN or JUMP_INSN with delayed effects and its delay | |
1812 | slots because it is difficult to track its resource needs | |
1813 | correctly. */ | |
1814 | ||
1815 | #ifdef INSN_SETS_ARE_DELAYED | |
1816 | if (INSN_SETS_ARE_DELAYED (XVECEXP (pat, 0, 0))) | |
1817 | return 0; | |
1818 | #endif | |
1819 | ||
1820 | #ifdef INSN_REFERENCES_ARE_DELAYED | |
1821 | if (INSN_REFERENCES_ARE_DELAYED (XVECEXP (pat, 0, 0))) | |
1822 | return 0; | |
1823 | #endif | |
1824 | ||
1825 | /* See if any of the insns in the delay slot match, updating | |
1826 | resource requirements as we go. */ | |
1827 | for (i = XVECLEN (pat, 0) - 1; i > 0; i--) | |
1828 | if (GET_CODE (XVECEXP (pat, 0, i)) == GET_CODE (insn) | |
1829 | && rtx_equal_p (PATTERN (XVECEXP (pat, 0, i)), ipat)) | |
1830 | break; | |
1831 | ||
1832 | /* If found a match, exit this loop early. */ | |
1833 | if (i > 0) | |
1834 | break; | |
1835 | } | |
1836 | ||
1837 | else if (GET_CODE (trial) == GET_CODE (insn) && rtx_equal_p (pat, ipat)) | |
1838 | break; | |
1839 | } | |
1840 | ||
1841 | /* If we didn't find an insn that matches, return 0. */ | |
1842 | if (trial == 0) | |
1843 | return 0; | |
1844 | ||
1845 | /* See what resources this insn sets and needs. If they overlap, or | |
1846 | if this insn references CC0, it can't be redundant. */ | |
1847 | ||
1848 | CLEAR_RESOURCE (&needed); | |
1849 | CLEAR_RESOURCE (&set); | |
1850 | mark_set_resources (insn, &set, 0, 1); | |
1851 | mark_referenced_resources (insn, &needed, 1); | |
1852 | ||
1853 | /* If TARGET is a SEQUENCE, get the main insn. */ | |
1854 | if (GET_CODE (target) == INSN && GET_CODE (PATTERN (target)) == SEQUENCE) | |
1855 | target_main = XVECEXP (PATTERN (target), 0, 0); | |
1856 | ||
1857 | if (resource_conflicts_p (&needed, &set) | |
1858 | #ifdef HAVE_cc0 | |
1859 | || reg_mentioned_p (cc0_rtx, ipat) | |
1860 | #endif | |
1861 | /* The insn requiring the delay may not set anything needed or set by | |
1862 | INSN. */ | |
1863 | || insn_sets_resource_p (target_main, &needed, 1) | |
1864 | || insn_sets_resource_p (target_main, &set, 1)) | |
1865 | return 0; | |
1866 | ||
1867 | /* Insns we pass may not set either NEEDED or SET, so merge them for | |
1868 | simpler tests. */ | |
1869 | needed.memory |= set.memory; | |
1870 | IOR_HARD_REG_SET (needed.regs, set.regs); | |
1871 | ||
1872 | /* This insn isn't redundant if it conflicts with an insn that either is | |
1873 | or will be in a delay slot of TARGET. */ | |
1874 | ||
1875 | while (delay_list) | |
1876 | { | |
1877 | if (insn_sets_resource_p (XEXP (delay_list, 0), &needed, 1)) | |
1878 | return 0; | |
1879 | delay_list = XEXP (delay_list, 1); | |
1880 | } | |
1881 | ||
1882 | if (GET_CODE (target) == INSN && GET_CODE (PATTERN (target)) == SEQUENCE) | |
1883 | for (i = 1; i < XVECLEN (PATTERN (target), 0); i++) | |
1884 | if (insn_sets_resource_p (XVECEXP (PATTERN (target), 0, i), &needed, 1)) | |
1885 | return 0; | |
1886 | ||
1887 | /* Scan backwards until we reach a label or an insn that uses something | |
1888 | INSN sets or sets something insn uses or sets. */ | |
1889 | ||
1890 | for (trial = PREV_INSN (target); | |
1891 | trial && GET_CODE (trial) != CODE_LABEL; | |
1892 | trial = PREV_INSN (trial)) | |
1893 | { | |
1894 | if (GET_CODE (trial) != INSN && GET_CODE (trial) != CALL_INSN | |
1895 | && GET_CODE (trial) != JUMP_INSN) | |
1896 | continue; | |
1897 | ||
1898 | pat = PATTERN (trial); | |
1899 | if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) | |
1900 | continue; | |
1901 | ||
1902 | if (GET_CODE (pat) == SEQUENCE) | |
1903 | { | |
1904 | /* If this is a CALL_INSN and its delay slots, it is hard to track | |
1905 | the resource needs properly, so give up. */ | |
1906 | if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL_INSN) | |
1907 | return 0; | |
1908 | ||
1909 | /* If this this is an INSN or JUMP_INSN with delayed effects, it | |
1910 | is hard to track the resource needs properly, so give up. */ | |
1911 | ||
1912 | #ifdef INSN_SETS_ARE_DELAYED | |
1913 | if (INSN_SETS_ARE_DELAYED (XVECEXP (pat, 0, 0))) | |
1914 | return 0; | |
1915 | #endif | |
1916 | ||
1917 | #ifdef INSN_REFERENCES_ARE_DELAYED | |
1918 | if (INSN_REFERENCES_ARE_DELAYED (XVECEXP (pat, 0, 0))) | |
1919 | return 0; | |
1920 | #endif | |
1921 | ||
1922 | /* See if any of the insns in the delay slot match, updating | |
1923 | resource requirements as we go. */ | |
1924 | for (i = XVECLEN (pat, 0) - 1; i > 0; i--) | |
1925 | { | |
1926 | rtx candidate = XVECEXP (pat, 0, i); | |
1927 | ||
1928 | /* If an insn will be annulled if the branch is false, it isn't | |
1929 | considered as a possible duplicate insn. */ | |
1930 | if (rtx_equal_p (PATTERN (candidate), ipat) | |
1931 | && ! (INSN_ANNULLED_BRANCH_P (XVECEXP (pat, 0, 0)) | |
1932 | && INSN_FROM_TARGET_P (candidate))) | |
1933 | { | |
1934 | /* Show that this insn will be used in the sequel. */ | |
1935 | INSN_FROM_TARGET_P (candidate) = 0; | |
1936 | return 1; | |
1937 | } | |
1938 | ||
1939 | /* Unless this is an annulled insn from the target of a branch, | |
1940 | we must stop if it sets anything needed or set by INSN. */ | |
1941 | if ((! INSN_ANNULLED_BRANCH_P (XVECEXP (pat, 0, 0)) | |
1942 | || ! INSN_FROM_TARGET_P (candidate)) | |
1943 | && insn_sets_resource_p (candidate, &needed, 1)) | |
1944 | return 0; | |
1945 | } | |
1946 | ||
1947 | ||
1948 | /* If the insn requiring the delay slot conflicts with INSN, we | |
1949 | must stop. */ | |
1950 | if (insn_sets_resource_p (XVECEXP (pat, 0, 0), &needed, 1)) | |
1951 | return 0; | |
1952 | } | |
1953 | else | |
1954 | { | |
1955 | /* See if TRIAL is the same as INSN. */ | |
1956 | pat = PATTERN (trial); | |
1957 | if (rtx_equal_p (pat, ipat)) | |
1958 | return 1; | |
1959 | ||
1960 | /* Can't go any further if TRIAL conflicts with INSN. */ | |
1961 | if (insn_sets_resource_p (trial, &needed, 1)) | |
1962 | return 0; | |
1963 | } | |
1964 | } | |
1965 | ||
1966 | return 0; | |
1967 | } | |
1968 | \f | |
1969 | /* Return 1 if THREAD can only be executed in one way. If LABEL is non-zero, | |
1970 | it is the target of the branch insn being scanned. If ALLOW_FALLTHROUGH | |
1971 | is non-zero, we are allowed to fall into this thread; otherwise, we are | |
1972 | not. | |
1973 | ||
1974 | If LABEL is used more than one or we pass a label other than LABEL before | |
1975 | finding an active insn, we do not own this thread. */ | |
1976 | ||
1977 | static int | |
1978 | own_thread_p (thread, label, allow_fallthrough) | |
1979 | rtx thread; | |
1980 | rtx label; | |
1981 | int allow_fallthrough; | |
1982 | { | |
1983 | rtx active_insn; | |
1984 | rtx insn; | |
1985 | ||
1986 | /* We don't own the function end. */ | |
1987 | if (thread == 0) | |
1988 | return 0; | |
1989 | ||
1990 | /* Get the first active insn, or THREAD, if it is an active insn. */ | |
1991 | active_insn = next_active_insn (PREV_INSN (thread)); | |
1992 | ||
1993 | for (insn = thread; insn != active_insn; insn = NEXT_INSN (insn)) | |
1994 | if (GET_CODE (insn) == CODE_LABEL | |
1995 | && (insn != label || LABEL_NUSES (insn) != 1)) | |
1996 | return 0; | |
1997 | ||
1998 | if (allow_fallthrough) | |
1999 | return 1; | |
2000 | ||
2001 | /* Ensure that we reach a BARRIER before any insn or label. */ | |
2002 | for (insn = prev_nonnote_insn (thread); | |
2003 | insn == 0 || GET_CODE (insn) != BARRIER; | |
2004 | insn = prev_nonnote_insn (insn)) | |
2005 | if (insn == 0 | |
2006 | || GET_CODE (insn) == CODE_LABEL | |
2007 | || (GET_CODE (insn) == INSN | |
2008 | && GET_CODE (PATTERN (insn)) != USE | |
2009 | && GET_CODE (PATTERN (insn)) != CLOBBER)) | |
2010 | return 0; | |
2011 | ||
2012 | return 1; | |
2013 | } | |
2014 | \f | |
2015 | /* Find the number of the basic block that starts closest to INSN. Return -1 | |
2016 | if we couldn't find such a basic block. */ | |
2017 | ||
2018 | static int | |
2019 | find_basic_block (insn) | |
2020 | rtx insn; | |
2021 | { | |
2022 | int i; | |
2023 | ||
2024 | /* Scan backwards to the previous BARRIER. Then see if we can find a | |
2025 | label that starts a basic block. Return the basic block number. */ | |
2026 | ||
2027 | for (insn = prev_nonnote_insn (insn); | |
2028 | insn && GET_CODE (insn) != BARRIER; | |
2029 | insn = prev_nonnote_insn (insn)) | |
2030 | ; | |
2031 | ||
2032 | /* The start of the function is basic block zero. */ | |
2033 | if (insn == 0) | |
2034 | return 0; | |
2035 | ||
2036 | /* See if any of the upcoming CODE_LABELs start a basic block. If we reach | |
2037 | anything other than a CODE_LABEL or note, we can't find this code. */ | |
2038 | for (insn = next_nonnote_insn (insn); | |
2039 | insn && GET_CODE (insn) == CODE_LABEL; | |
2040 | insn = next_nonnote_insn (insn)) | |
2041 | { | |
2042 | for (i = 0; i < n_basic_blocks; i++) | |
2043 | if (insn == basic_block_head[i]) | |
2044 | return i; | |
2045 | } | |
2046 | ||
2047 | return -1; | |
2048 | } | |
2049 | \f | |
2050 | /* Called when INSN is being moved from a location near the target of a jump. | |
2051 | We leave a marker of the form (use (INSN)) immediately in front | |
2052 | of WHERE for mark_target_live_regs. These markers will be deleted when | |
2053 | reorg finishes. | |
2054 | ||
2055 | We used to try to update the live status of registers if WHERE is at | |
2056 | the start of a basic block, but that can't work since we may remove a | |
2057 | BARRIER in relax_delay_slots. */ | |
2058 | ||
2059 | static void | |
2060 | update_block (insn, where) | |
2061 | rtx insn; | |
2062 | rtx where; | |
2063 | { | |
2064 | int b; | |
2065 | ||
2066 | /* Ignore if this was in a delay slot and it came from the target of | |
2067 | a branch. */ | |
2068 | if (INSN_FROM_TARGET_P (insn)) | |
2069 | return; | |
2070 | ||
2071 | emit_insn_before (gen_rtx (USE, VOIDmode, insn), where); | |
2072 | ||
2073 | /* INSN might be making a value live in a block where it didn't use to | |
2074 | be. So recompute liveness information for this block. */ | |
2075 | ||
2076 | b = find_basic_block (insn); | |
2077 | if (b != -1) | |
2078 | bb_ticks[b]++; | |
2079 | } | |
2080 | ||
2081 | /* Similar to REDIRECT_JUMP except that we update the BB_TICKS entry for | |
2082 | the basic block containing the jump. */ | |
2083 | ||
2084 | static int | |
2085 | reorg_redirect_jump (jump, nlabel) | |
2086 | rtx jump; | |
2087 | rtx nlabel; | |
2088 | { | |
2089 | int b = find_basic_block (jump); | |
2090 | ||
2091 | if (b != -1) | |
2092 | bb_ticks[b]++; | |
2093 | ||
2094 | return redirect_jump (jump, nlabel); | |
2095 | } | |
2096 | ||
2097 | /* Called when INSN is being moved forward into a delay slot of DELAYED_INSN. | |
2098 | We check every instruction between INSN and DELAYED_INSN for REG_DEAD notes | |
2099 | that reference values used in INSN. If we find one, then we move the | |
2100 | REG_DEAD note to INSN. | |
2101 | ||
2102 | This is needed to handle the case where an later insn (after INSN) has a | |
2103 | REG_DEAD note for a register used by INSN, and this later insn subsequently | |
2104 | gets moved before a CODE_LABEL because it is a redundant insn. In this | |
2105 | case, mark_target_live_regs may be confused into thinking the register | |
2106 | is dead because it sees a REG_DEAD note immediately before a CODE_LABEL. */ | |
2107 | ||
2108 | static void | |
2109 | update_reg_dead_notes (insn, delayed_insn) | |
2110 | rtx insn, delayed_insn; | |
2111 | { | |
2112 | rtx p, link, next; | |
2113 | ||
2114 | for (p = next_nonnote_insn (insn); p != delayed_insn; | |
2115 | p = next_nonnote_insn (p)) | |
2116 | for (link = REG_NOTES (p); link; link = next) | |
2117 | { | |
2118 | next = XEXP (link, 1); | |
2119 | ||
2120 | if (REG_NOTE_KIND (link) != REG_DEAD | |
2121 | || GET_CODE (XEXP (link, 0)) != REG) | |
2122 | continue; | |
2123 | ||
2124 | if (reg_referenced_p (XEXP (link, 0), PATTERN (insn))) | |
2125 | { | |
2126 | /* Move the REG_DEAD note from P to INSN. */ | |
2127 | remove_note (p, link); | |
2128 | XEXP (link, 1) = REG_NOTES (insn); | |
2129 | REG_NOTES (insn) = link; | |
2130 | } | |
2131 | } | |
2132 | } | |
2133 | \f | |
2134 | /* Marks registers possibly live at the current place being scanned by | |
2135 | mark_target_live_regs. Used only by next two function. */ | |
2136 | ||
2137 | static HARD_REG_SET current_live_regs; | |
2138 | ||
2139 | /* Marks registers for which we have seen a REG_DEAD note but no assignment. | |
2140 | Also only used by the next two functions. */ | |
2141 | ||
2142 | static HARD_REG_SET pending_dead_regs; | |
2143 | ||
2144 | /* Utility function called from mark_target_live_regs via note_stores. | |
2145 | It deadens any CLOBBERed registers and livens any SET registers. */ | |
2146 | ||
2147 | static void | |
2148 | update_live_status (dest, x) | |
2149 | rtx dest; | |
2150 | rtx x; | |
2151 | { | |
2152 | int first_regno, last_regno; | |
2153 | int i; | |
2154 | ||
2155 | if (GET_CODE (dest) != REG | |
2156 | && (GET_CODE (dest) != SUBREG || GET_CODE (SUBREG_REG (dest)) != REG)) | |
2157 | return; | |
2158 | ||
2159 | if (GET_CODE (dest) == SUBREG) | |
2160 | first_regno = REGNO (SUBREG_REG (dest)) + SUBREG_WORD (dest); | |
2161 | else | |
2162 | first_regno = REGNO (dest); | |
2163 | ||
2164 | last_regno = first_regno + HARD_REGNO_NREGS (first_regno, GET_MODE (dest)); | |
2165 | ||
2166 | if (GET_CODE (x) == CLOBBER) | |
2167 | for (i = first_regno; i < last_regno; i++) | |
2168 | CLEAR_HARD_REG_BIT (current_live_regs, i); | |
2169 | else | |
2170 | for (i = first_regno; i < last_regno; i++) | |
2171 | { | |
2172 | SET_HARD_REG_BIT (current_live_regs, i); | |
2173 | CLEAR_HARD_REG_BIT (pending_dead_regs, i); | |
2174 | } | |
2175 | } | |
2176 | ||
2177 | /* Similar to next_insn, but ignores insns in the delay slots of | |
2178 | an annulled branch. */ | |
2179 | ||
2180 | static rtx | |
2181 | next_insn_no_annul (insn) | |
2182 | rtx insn; | |
2183 | { | |
2184 | if (insn) | |
2185 | { | |
2186 | /* If INSN is an annulled branch, skip any insns from the target | |
2187 | of the branch. */ | |
2188 | if (INSN_ANNULLED_BRANCH_P (insn) | |
2189 | && NEXT_INSN (PREV_INSN (insn)) != insn) | |
2190 | while (INSN_FROM_TARGET_P (NEXT_INSN (insn))) | |
2191 | insn = NEXT_INSN (insn); | |
2192 | ||
2193 | insn = NEXT_INSN (insn); | |
2194 | if (insn && GET_CODE (insn) == INSN | |
2195 | && GET_CODE (PATTERN (insn)) == SEQUENCE) | |
2196 | insn = XVECEXP (PATTERN (insn), 0, 0); | |
2197 | } | |
2198 | ||
2199 | return insn; | |
2200 | } | |
2201 | \f | |
2202 | /* Set the resources that are live at TARGET. | |
2203 | ||
2204 | If TARGET is zero, we refer to the end of the current function and can | |
2205 | return our precomputed value. | |
2206 | ||
2207 | Otherwise, we try to find out what is live by consulting the basic block | |
2208 | information. This is tricky, because we must consider the actions of | |
2209 | reload and jump optimization, which occur after the basic block information | |
2210 | has been computed. | |
2211 | ||
2212 | Accordingly, we proceed as follows:: | |
2213 | ||
2214 | We find the previous BARRIER and look at all immediately following labels | |
2215 | (with no intervening active insns) to see if any of them start a basic | |
2216 | block. If we hit the start of the function first, we use block 0. | |
2217 | ||
2218 | Once we have found a basic block and a corresponding first insns, we can | |
2219 | accurately compute the live status from basic_block_live_regs and | |
2220 | reg_renumber. (By starting at a label following a BARRIER, we are immune | |
2221 | to actions taken by reload and jump.) Then we scan all insns between | |
2222 | that point and our target. For each CLOBBER (or for call-clobbered regs | |
2223 | when we pass a CALL_INSN), mark the appropriate registers are dead. For | |
2224 | a SET, mark them as live. | |
2225 | ||
2226 | We have to be careful when using REG_DEAD notes because they are not | |
2227 | updated by such things as find_equiv_reg. So keep track of registers | |
2228 | marked as dead that haven't been assigned to, and mark them dead at the | |
2229 | next CODE_LABEL since reload and jump won't propagate values across labels. | |
2230 | ||
2231 | If we cannot find the start of a basic block (should be a very rare | |
2232 | case, if it can happen at all), mark everything as potentially live. | |
2233 | ||
2234 | Next, scan forward from TARGET looking for things set or clobbered | |
2235 | before they are used. These are not live. | |
2236 | ||
2237 | Because we can be called many times on the same target, save our results | |
2238 | in a hash table indexed by INSN_UID. */ | |
2239 | ||
2240 | static void | |
2241 | mark_target_live_regs (target, res) | |
2242 | rtx target; | |
2243 | struct resources *res; | |
2244 | { | |
2245 | int b = -1; | |
2246 | int i; | |
2247 | struct target_info *tinfo; | |
2248 | rtx insn, next; | |
2249 | rtx jump_insn = 0; | |
2250 | rtx jump_target; | |
2251 | HARD_REG_SET scratch; | |
2252 | struct resources set, needed; | |
2253 | int jump_count = 0; | |
2254 | ||
2255 | /* Handle end of function. */ | |
2256 | if (target == 0) | |
2257 | { | |
2258 | *res = end_of_function_needs; | |
2259 | return; | |
2260 | } | |
2261 | ||
2262 | /* We have to assume memory is needed, but the CC isn't. */ | |
2263 | res->memory = 1; | |
2264 | res->volatil = 0; | |
2265 | res->cc = 0; | |
2266 | ||
2267 | /* See if we have computed this value already. */ | |
2268 | for (tinfo = target_hash_table[INSN_UID (target) % TARGET_HASH_PRIME]; | |
2269 | tinfo; tinfo = tinfo->next) | |
2270 | if (tinfo->uid == INSN_UID (target)) | |
2271 | break; | |
2272 | ||
2273 | /* Start by getting the basic block number. If we have saved information, | |
2274 | we can get it from there unless the insn at the start of the basic block | |
2275 | has been deleted. */ | |
2276 | if (tinfo && tinfo->block != -1 | |
2277 | && ! INSN_DELETED_P (basic_block_head[tinfo->block])) | |
2278 | b = tinfo->block; | |
2279 | ||
2280 | if (b == -1) | |
2281 | b = find_basic_block (target); | |
2282 | ||
2283 | if (tinfo) | |
2284 | { | |
2285 | /* If the information is up-to-date, use it. Otherwise, we will | |
2286 | update it below. */ | |
2287 | if (b == tinfo->block && b != -1 && tinfo->bb_tick == bb_ticks[b]) | |
2288 | { | |
2289 | COPY_HARD_REG_SET (res->regs, tinfo->live_regs); | |
2290 | return; | |
2291 | } | |
2292 | } | |
2293 | else | |
2294 | { | |
2295 | /* Allocate a place to put our results and chain it into the | |
2296 | hash table. */ | |
2297 | tinfo = (struct target_info *) oballoc (sizeof (struct target_info)); | |
2298 | tinfo->uid = INSN_UID (target); | |
2299 | tinfo->block = b; | |
2300 | tinfo->next = target_hash_table[INSN_UID (target) % TARGET_HASH_PRIME]; | |
2301 | target_hash_table[INSN_UID (target) % TARGET_HASH_PRIME] = tinfo; | |
2302 | } | |
2303 | ||
2304 | CLEAR_HARD_REG_SET (pending_dead_regs); | |
2305 | ||
2306 | /* If we found a basic block, get the live registers from it and update | |
2307 | them with anything set or killed between its start and the insn before | |
2308 | TARGET. Otherwise, we must assume everything is live. */ | |
2309 | if (b != -1) | |
2310 | { | |
2311 | regset regs_live = basic_block_live_at_start[b]; | |
2312 | int offset, j; | |
2313 | REGSET_ELT_TYPE bit; | |
2314 | int regno; | |
2315 | rtx start_insn, stop_insn; | |
2316 | ||
2317 | /* Compute hard regs live at start of block -- this is the real hard regs | |
2318 | marked live, plus live pseudo regs that have been renumbered to | |
2319 | hard regs. */ | |
2320 | ||
2321 | #ifdef HARD_REG_SET | |
2322 | current_live_regs = *regs_live; | |
2323 | #else | |
2324 | COPY_HARD_REG_SET (current_live_regs, regs_live); | |
2325 | #endif | |
2326 | ||
2327 | for (offset = 0, i = 0; offset < regset_size; offset++) | |
2328 | { | |
2329 | if (regs_live[offset] == 0) | |
2330 | i += REGSET_ELT_BITS; | |
2331 | else | |
2332 | for (bit = 1; bit && i < max_regno; bit <<= 1, i++) | |
2333 | if ((regs_live[offset] & bit) | |
2334 | && (regno = reg_renumber[i]) >= 0) | |
2335 | for (j = regno; | |
2336 | j < regno + HARD_REGNO_NREGS (regno, | |
2337 | PSEUDO_REGNO_MODE (i)); | |
2338 | j++) | |
2339 | SET_HARD_REG_BIT (current_live_regs, j); | |
2340 | } | |
2341 | ||
2342 | /* Get starting and ending insn, handling the case where each might | |
2343 | be a SEQUENCE. */ | |
2344 | start_insn = (b == 0 ? get_insns () : basic_block_head[b]); | |
2345 | stop_insn = target; | |
2346 | ||
2347 | if (GET_CODE (start_insn) == INSN | |
2348 | && GET_CODE (PATTERN (start_insn)) == SEQUENCE) | |
2349 | start_insn = XVECEXP (PATTERN (start_insn), 0, 0); | |
2350 | ||
2351 | if (GET_CODE (stop_insn) == INSN | |
2352 | && GET_CODE (PATTERN (stop_insn)) == SEQUENCE) | |
2353 | stop_insn = next_insn (PREV_INSN (stop_insn)); | |
2354 | ||
2355 | for (insn = start_insn; insn != stop_insn; | |
2356 | insn = next_insn_no_annul (insn)) | |
2357 | { | |
2358 | rtx link; | |
2359 | rtx real_insn = insn; | |
2360 | ||
2361 | /* If this insn is from the target of a branch, it isn't going to | |
2362 | be used in the sequel. If it is used in both cases, this | |
2363 | test will not be true. */ | |
2364 | if (INSN_FROM_TARGET_P (insn)) | |
2365 | continue; | |
2366 | ||
2367 | /* If this insn is a USE made by update_block, we care about the | |
2368 | underlying insn. */ | |
2369 | if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == USE | |
2370 | && GET_RTX_CLASS (GET_CODE (XEXP (PATTERN (insn), 0))) == 'i') | |
2371 | real_insn = XEXP (PATTERN (insn), 0); | |
2372 | ||
2373 | if (GET_CODE (real_insn) == CALL_INSN) | |
2374 | { | |
2375 | /* CALL clobbers all call-used regs that aren't fixed except | |
2376 | sp, ap, and fp. Do this before setting the result of the | |
2377 | call live. */ | |
2378 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
2379 | if (call_used_regs[i] | |
2380 | && i != STACK_POINTER_REGNUM && i != FRAME_POINTER_REGNUM | |
2381 | && i != ARG_POINTER_REGNUM | |
2382 | #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM | |
2383 | && ! (i == ARG_POINTER_REGNUM && fixed_regs[i]) | |
2384 | #endif | |
2385 | #ifdef PIC_OFFSET_TABLE_REGNUM | |
2386 | && ! (i == PIC_OFFSET_TABLE_REGNUM && flag_pic) | |
2387 | #endif | |
2388 | ) | |
2389 | CLEAR_HARD_REG_BIT (current_live_regs, i); | |
2390 | ||
2391 | /* A CALL_INSN sets any global register live, since it may | |
2392 | have been modified by the call. */ | |
2393 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
2394 | if (global_regs[i]) | |
2395 | SET_HARD_REG_BIT (current_live_regs, i); | |
2396 | } | |
2397 | ||
2398 | /* Mark anything killed in an insn to be deadened at the next | |
2399 | label. Ignore USE insns; the only REG_DEAD notes will be for | |
2400 | parameters. But they might be early. A CALL_INSN will usually | |
2401 | clobber registers used for parameters. It isn't worth bothering | |
2402 | with the unlikely case when it won't. */ | |
2403 | if ((GET_CODE (real_insn) == INSN | |
2404 | && GET_CODE (PATTERN (real_insn)) != USE) | |
2405 | || GET_CODE (real_insn) == JUMP_INSN | |
2406 | || GET_CODE (real_insn) == CALL_INSN) | |
2407 | { | |
2408 | for (link = REG_NOTES (real_insn); link; link = XEXP (link, 1)) | |
2409 | if (REG_NOTE_KIND (link) == REG_DEAD | |
2410 | && GET_CODE (XEXP (link, 0)) == REG | |
2411 | && REGNO (XEXP (link, 0)) < FIRST_PSEUDO_REGISTER) | |
2412 | { | |
2413 | int first_regno = REGNO (XEXP (link, 0)); | |
2414 | int last_regno | |
2415 | = (first_regno | |
2416 | + HARD_REGNO_NREGS (first_regno, | |
2417 | GET_MODE (XEXP (link, 0)))); | |
2418 | ||
2419 | for (i = first_regno; i < last_regno; i++) | |
2420 | SET_HARD_REG_BIT (pending_dead_regs, i); | |
2421 | } | |
2422 | ||
2423 | note_stores (PATTERN (real_insn), update_live_status); | |
2424 | ||
2425 | /* If any registers were unused after this insn, kill them. | |
2426 | These notes will always be accurate. */ | |
2427 | for (link = REG_NOTES (real_insn); link; link = XEXP (link, 1)) | |
2428 | if (REG_NOTE_KIND (link) == REG_UNUSED | |
2429 | && GET_CODE (XEXP (link, 0)) == REG | |
2430 | && REGNO (XEXP (link, 0)) < FIRST_PSEUDO_REGISTER) | |
2431 | { | |
2432 | int first_regno = REGNO (XEXP (link, 0)); | |
2433 | int last_regno | |
2434 | = (first_regno | |
2435 | + HARD_REGNO_NREGS (first_regno, | |
2436 | GET_MODE (XEXP (link, 0)))); | |
2437 | ||
2438 | for (i = first_regno; i < last_regno; i++) | |
2439 | CLEAR_HARD_REG_BIT (current_live_regs, i); | |
2440 | } | |
2441 | } | |
2442 | ||
2443 | else if (GET_CODE (real_insn) == CODE_LABEL) | |
2444 | { | |
2445 | /* A label clobbers the pending dead registers since neither | |
2446 | reload nor jump will propagate a value across a label. */ | |
2447 | AND_COMPL_HARD_REG_SET (current_live_regs, pending_dead_regs); | |
2448 | CLEAR_HARD_REG_SET (pending_dead_regs); | |
2449 | } | |
2450 | ||
2451 | /* The beginning of the epilogue corresponds to the end of the | |
2452 | RTL chain when there are no epilogue insns. Certain resources | |
2453 | are implicitly required at that point. */ | |
2454 | else if (GET_CODE (real_insn) == NOTE | |
2455 | && NOTE_LINE_NUMBER (real_insn) == NOTE_INSN_EPILOGUE_BEG) | |
2456 | IOR_HARD_REG_SET (current_live_regs, start_of_epilogue_needs.regs); | |
2457 | } | |
2458 | ||
2459 | COPY_HARD_REG_SET (res->regs, current_live_regs); | |
2460 | tinfo->block = b; | |
2461 | tinfo->bb_tick = bb_ticks[b]; | |
2462 | } | |
2463 | else | |
2464 | /* We didn't find the start of a basic block. Assume everything | |
2465 | in use. This should happen only extremely rarely. */ | |
2466 | SET_HARD_REG_SET (res->regs); | |
2467 | ||
2468 | /* Now step forward from TARGET looking for registers that are set before | |
2469 | they are used. These are dead. If we pass a label, any pending dead | |
2470 | registers that weren't yet used can be made dead. Stop when we pass a | |
2471 | conditional JUMP_INSN; follow the first few unconditional branches. */ | |
2472 | ||
2473 | CLEAR_RESOURCE (&set); | |
2474 | CLEAR_RESOURCE (&needed); | |
2475 | ||
2476 | for (insn = target; insn; insn = next) | |
2477 | { | |
2478 | rtx this_jump_insn = insn; | |
2479 | ||
2480 | next = NEXT_INSN (insn); | |
2481 | switch (GET_CODE (insn)) | |
2482 | { | |
2483 | case CODE_LABEL: | |
2484 | AND_COMPL_HARD_REG_SET (pending_dead_regs, needed.regs); | |
2485 | AND_COMPL_HARD_REG_SET (res->regs, pending_dead_regs); | |
2486 | CLEAR_HARD_REG_SET (pending_dead_regs); | |
2487 | continue; | |
2488 | ||
2489 | case BARRIER: | |
2490 | case NOTE: | |
2491 | continue; | |
2492 | ||
2493 | case INSN: | |
2494 | if (GET_CODE (PATTERN (insn)) == USE) | |
2495 | { | |
2496 | /* If INSN is a USE made by update_block, we care about the | |
2497 | underlying insn. Any registers set by the underlying insn | |
2498 | are live since the insn is being done somewhere else. */ | |
2499 | if (GET_RTX_CLASS (GET_CODE (XEXP (PATTERN (insn), 0))) == 'i') | |
2500 | mark_set_resources (XEXP (PATTERN (insn), 0), res, 0, 1); | |
2501 | ||
2502 | /* All other USE insns are to be ignored. */ | |
2503 | continue; | |
2504 | } | |
2505 | else if (GET_CODE (PATTERN (insn)) == CLOBBER) | |
2506 | continue; | |
2507 | else if (GET_CODE (PATTERN (insn)) == SEQUENCE) | |
2508 | { | |
2509 | /* An unconditional jump can be used to fill the delay slot | |
2510 | of a call, so search for a JUMP_INSN in any position. */ | |
2511 | for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++) | |
2512 | { | |
2513 | this_jump_insn = XVECEXP (PATTERN (insn), 0, i); | |
2514 | if (GET_CODE (this_jump_insn) == JUMP_INSN) | |
2515 | break; | |
2516 | } | |
2517 | } | |
2518 | } | |
2519 | ||
2520 | if (GET_CODE (this_jump_insn) == JUMP_INSN) | |
2521 | { | |
2522 | if (jump_count++ < 10 | |
2523 | && (simplejump_p (this_jump_insn) | |
2524 | || GET_CODE (PATTERN (this_jump_insn)) == RETURN)) | |
2525 | { | |
2526 | next = next_active_insn (JUMP_LABEL (this_jump_insn)); | |
2527 | if (jump_insn == 0) | |
2528 | { | |
2529 | jump_insn = insn; | |
2530 | jump_target = JUMP_LABEL (this_jump_insn); | |
2531 | } | |
2532 | } | |
2533 | else | |
2534 | break; | |
2535 | } | |
2536 | ||
2537 | mark_referenced_resources (insn, &needed, 1); | |
2538 | mark_set_resources (insn, &set, 0, 1); | |
2539 | ||
2540 | COPY_HARD_REG_SET (scratch, set.regs); | |
2541 | AND_COMPL_HARD_REG_SET (scratch, needed.regs); | |
2542 | AND_COMPL_HARD_REG_SET (res->regs, scratch); | |
2543 | } | |
2544 | ||
2545 | /* If we hit an unconditional branch, we have another way of finding out | |
2546 | what is live: we can see what is live at the branch target and include | |
2547 | anything used but not set before the branch. The only things that are | |
2548 | live are those that are live using the above test and the test below. | |
2549 | ||
2550 | Don't try this if we expired our jump count above, since that would | |
2551 | mean there may be an infinite loop in the function being compiled. */ | |
2552 | ||
2553 | if (jump_insn && jump_count < 10) | |
2554 | { | |
2555 | struct resources new_resources; | |
2556 | rtx stop_insn = next_active_insn (jump_insn); | |
2557 | ||
2558 | mark_target_live_regs (next_active_insn (jump_target), &new_resources); | |
2559 | CLEAR_RESOURCE (&set); | |
2560 | CLEAR_RESOURCE (&needed); | |
2561 | ||
2562 | /* Include JUMP_INSN in the needed registers. */ | |
2563 | for (insn = target; insn != stop_insn; insn = next_active_insn (insn)) | |
2564 | { | |
2565 | mark_referenced_resources (insn, &needed, 1); | |
2566 | ||
2567 | COPY_HARD_REG_SET (scratch, needed.regs); | |
2568 | AND_COMPL_HARD_REG_SET (scratch, set.regs); | |
2569 | IOR_HARD_REG_SET (new_resources.regs, scratch); | |
2570 | ||
2571 | mark_set_resources (insn, &set, 0, 1); | |
2572 | } | |
2573 | ||
2574 | AND_HARD_REG_SET (res->regs, new_resources.regs); | |
2575 | } | |
2576 | ||
2577 | COPY_HARD_REG_SET (tinfo->live_regs, res->regs); | |
2578 | } | |
2579 | \f | |
2580 | /* Scan a function looking for insns that need a delay slot and find insns to | |
2581 | put into the delay slot. | |
2582 | ||
2583 | NON_JUMPS_P is non-zero if we are to only try to fill non-jump insns (such | |
2584 | as calls). We do these first since we don't want jump insns (that are | |
2585 | easier to fill) to get the only insns that could be used for non-jump insns. | |
2586 | When it is zero, only try to fill JUMP_INSNs. | |
2587 | ||
2588 | When slots are filled in this manner, the insns (including the | |
2589 | delay_insn) are put together in a SEQUENCE rtx. In this fashion, | |
2590 | it is possible to tell whether a delay slot has really been filled | |
2591 | or not. `final' knows how to deal with this, by communicating | |
2592 | through FINAL_SEQUENCE. */ | |
2593 | ||
2594 | static void | |
2595 | fill_simple_delay_slots (first, non_jumps_p) | |
2596 | rtx first; | |
2597 | int non_jumps_p; | |
2598 | { | |
2599 | register rtx insn, pat, trial, next_trial; | |
2600 | register int i, j; | |
2601 | int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base; | |
2602 | struct resources needed, set; | |
2603 | register int slots_to_fill, slots_filled; | |
2604 | rtx delay_list; | |
2605 | ||
2606 | for (i = 0; i < num_unfilled_slots; i++) | |
2607 | { | |
2608 | int flags; | |
2609 | /* Get the next insn to fill. If it has already had any slots assigned, | |
2610 | we can't do anything with it. Maybe we'll improve this later. */ | |
2611 | ||
2612 | insn = unfilled_slots_base[i]; | |
2613 | if (insn == 0 | |
2614 | || INSN_DELETED_P (insn) | |
2615 | || (GET_CODE (insn) == INSN | |
2616 | && GET_CODE (PATTERN (insn)) == SEQUENCE) | |
2617 | || (GET_CODE (insn) == JUMP_INSN && non_jumps_p) | |
2618 | || (GET_CODE (insn) != JUMP_INSN && ! non_jumps_p)) | |
2619 | continue; | |
2620 | ||
2621 | flags = get_jump_flags (insn, JUMP_LABEL (insn)); | |
2622 | slots_to_fill = num_delay_slots (insn); | |
2623 | if (slots_to_fill == 0) | |
2624 | abort (); | |
2625 | ||
2626 | /* This insn needs, or can use, some delay slots. SLOTS_TO_FILL | |
2627 | says how many. After initialization, first try optimizing | |
2628 | ||
2629 | call _foo call _foo | |
2630 | nop add %o7,.-L1,%o7 | |
2631 | b,a L1 | |
2632 | nop | |
2633 | ||
2634 | If this case applies, the delay slot of the call is filled with | |
2635 | the unconditional jump. This is done first to avoid having the | |
2636 | delay slot of the call filled in the backward scan. Also, since | |
2637 | the unconditional jump is likely to also have a delay slot, that | |
2638 | insn must exist when it is subsequently scanned. */ | |
2639 | ||
2640 | slots_filled = 0; | |
2641 | delay_list = 0; | |
2642 | ||
2643 | if (GET_CODE (insn) == CALL_INSN | |
2644 | && (trial = next_active_insn (insn)) | |
2645 | && GET_CODE (trial) == JUMP_INSN | |
2646 | && simplejump_p (trial) | |
2647 | && eligible_for_delay (insn, slots_filled, trial, flags) | |
2648 | && no_labels_between_p (insn, trial)) | |
2649 | { | |
2650 | slots_filled++; | |
2651 | delay_list = add_to_delay_list (trial, delay_list); | |
2652 | /* Remove the unconditional jump from consideration for delay slot | |
2653 | filling and unthread it. */ | |
2654 | if (unfilled_slots_base[i + 1] == trial) | |
2655 | unfilled_slots_base[i + 1] = 0; | |
2656 | { | |
2657 | rtx next = NEXT_INSN (trial); | |
2658 | rtx prev = PREV_INSN (trial); | |
2659 | if (prev) | |
2660 | NEXT_INSN (prev) = next; | |
2661 | if (next) | |
2662 | PREV_INSN (next) = prev; | |
2663 | } | |
2664 | } | |
2665 | ||
2666 | /* Now, scan backwards from the insn to search for a potential | |
2667 | delay-slot candidate. Stop searching when a label or jump is hit. | |
2668 | ||
2669 | For each candidate, if it is to go into the delay slot (moved | |
2670 | forward in execution sequence), it must not need or set any resources | |
2671 | that were set by later insns and must not set any resources that | |
2672 | are needed for those insns. | |
2673 | ||
2674 | The delay slot insn itself sets resources unless it is a call | |
2675 | (in which case the called routine, not the insn itself, is doing | |
2676 | the setting). */ | |
2677 | ||
2678 | if (slots_filled < slots_to_fill) | |
2679 | { | |
2680 | CLEAR_RESOURCE (&needed); | |
2681 | CLEAR_RESOURCE (&set); | |
2682 | mark_set_resources (insn, &set, 0, 0); | |
2683 | mark_referenced_resources (insn, &needed, 0); | |
2684 | ||
2685 | for (trial = prev_nonnote_insn (insn); ! stop_search_p (trial, 1); | |
2686 | trial = next_trial) | |
2687 | { | |
2688 | next_trial = prev_nonnote_insn (trial); | |
2689 | ||
2690 | /* This must be an INSN or CALL_INSN. */ | |
2691 | pat = PATTERN (trial); | |
2692 | ||
2693 | /* USE and CLOBBER at this level was just for flow; ignore it. */ | |
2694 | if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) | |
2695 | continue; | |
2696 | ||
2697 | /* Check for resource conflict first, to avoid unnecessary | |
2698 | splitting. */ | |
2699 | if (! insn_references_resource_p (trial, &set, 1) | |
2700 | && ! insn_sets_resource_p (trial, &set, 1) | |
2701 | && ! insn_sets_resource_p (trial, &needed, 1) | |
2702 | #ifdef HAVE_cc0 | |
2703 | /* Can't separate set of cc0 from its use. */ | |
2704 | && ! (reg_mentioned_p (cc0_rtx, pat) | |
2705 | && ! sets_cc0_p (cc0_rtx, pat)) | |
2706 | #endif | |
2707 | ) | |
2708 | { | |
2709 | trial = try_split (pat, trial, 1); | |
2710 | next_trial = prev_nonnote_insn (trial); | |
2711 | if (eligible_for_delay (insn, slots_filled, trial, flags)) | |
2712 | { | |
2713 | /* In this case, we are searching backward, so if we | |
2714 | find insns to put on the delay list, we want | |
2715 | to put them at the head, rather than the | |
2716 | tail, of the list. */ | |
2717 | ||
2718 | update_reg_dead_notes (trial, insn); | |
2719 | delay_list = gen_rtx (INSN_LIST, VOIDmode, | |
2720 | trial, delay_list); | |
2721 | update_block (trial, trial); | |
2722 | delete_insn (trial); | |
2723 | if (slots_to_fill == ++slots_filled) | |
2724 | break; | |
2725 | continue; | |
2726 | } | |
2727 | } | |
2728 | ||
2729 | mark_set_resources (trial, &set, 0, 1); | |
2730 | mark_referenced_resources (trial, &needed, 1); | |
2731 | } | |
2732 | } | |
2733 | ||
2734 | /* If all needed slots haven't been filled, we come here. */ | |
2735 | ||
2736 | /* Try to optimize case of jumping around a single insn. */ | |
2737 | #if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS) | |
2738 | if (slots_filled != slots_to_fill | |
2739 | && delay_list == 0 | |
2740 | && GET_CODE (insn) == JUMP_INSN && condjump_p (insn)) | |
2741 | { | |
2742 | delay_list = optimize_skip (insn); | |
2743 | if (delay_list) | |
2744 | slots_filled += 1; | |
2745 | } | |
2746 | #endif | |
2747 | ||
2748 | /* Try to get insns from beyond the insn needing the delay slot. | |
2749 | These insns can neither set or reference resources set in insns being | |
2750 | skipped, cannot set resources in the insn being skipped, and, if this | |
2751 | is a CALL_INSN (or a CALL_INSN is passed), cannot trap (because the | |
2752 | call might not return). | |
2753 | ||
2754 | If this is a conditional jump, see if it merges back to us early | |
2755 | enough for us to pick up insns from the merge point. Don't do | |
2756 | this if there is another branch to our label unless we pass all of | |
2757 | them. | |
2758 | ||
2759 | Another similar merge is if we jump to the same place that a | |
2760 | later unconditional jump branches to. In that case, we don't | |
2761 | care about the number of uses of our label. */ | |
2762 | ||
2763 | if (slots_filled != slots_to_fill | |
2764 | && (GET_CODE (insn) != JUMP_INSN | |
2765 | || (condjump_p (insn) && ! simplejump_p (insn) | |
2766 | && JUMP_LABEL (insn) != 0))) | |
2767 | { | |
2768 | rtx target = 0; | |
2769 | int maybe_never = 0; | |
2770 | int passed_label = 0; | |
2771 | int target_uses; | |
2772 | struct resources needed_at_jump; | |
2773 | ||
2774 | CLEAR_RESOURCE (&needed); | |
2775 | CLEAR_RESOURCE (&set); | |
2776 | ||
2777 | if (GET_CODE (insn) == CALL_INSN) | |
2778 | { | |
2779 | mark_set_resources (insn, &set, 0, 1); | |
2780 | mark_referenced_resources (insn, &needed, 1); | |
2781 | maybe_never = 1; | |
2782 | } | |
2783 | else | |
2784 | { | |
2785 | mark_set_resources (insn, &set, 0, 1); | |
2786 | mark_referenced_resources (insn, &needed, 1); | |
2787 | if (GET_CODE (insn) == JUMP_INSN) | |
2788 | { | |
2789 | /* Get our target and show how many more uses we want to | |
2790 | see before we hit the label. */ | |
2791 | target = JUMP_LABEL (insn); | |
2792 | target_uses = LABEL_NUSES (target) - 1; | |
2793 | } | |
2794 | ||
2795 | } | |
2796 | ||
2797 | for (trial = next_nonnote_insn (insn); trial; trial = next_trial) | |
2798 | { | |
2799 | rtx pat, trial_delay; | |
2800 | ||
2801 | next_trial = next_nonnote_insn (trial); | |
2802 | ||
2803 | if (GET_CODE (trial) == CODE_LABEL) | |
2804 | { | |
2805 | passed_label = 1; | |
2806 | ||
2807 | /* If this is our target, see if we have seen all its uses. | |
2808 | If so, indicate we have passed our target and ignore it. | |
2809 | All other labels cause us to stop our search. */ | |
2810 | if (trial == target && target_uses == 0) | |
2811 | { | |
2812 | target = 0; | |
2813 | continue; | |
2814 | } | |
2815 | else | |
2816 | break; | |
2817 | } | |
2818 | else if (GET_CODE (trial) == BARRIER) | |
2819 | break; | |
2820 | ||
2821 | /* We must have an INSN, JUMP_INSN, or CALL_INSN. */ | |
2822 | pat = PATTERN (trial); | |
2823 | ||
2824 | /* Stand-alone USE and CLOBBER are just for flow. */ | |
2825 | if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) | |
2826 | continue; | |
2827 | ||
2828 | /* If this already has filled delay slots, get the insn needing | |
2829 | the delay slots. */ | |
2830 | if (GET_CODE (pat) == SEQUENCE) | |
2831 | trial_delay = XVECEXP (pat, 0, 0); | |
2832 | else | |
2833 | trial_delay = trial; | |
2834 | ||
2835 | /* If this is a jump insn to our target, indicate that we have | |
2836 | seen another jump to it. If we aren't handling a conditional | |
2837 | jump, stop our search. Otherwise, compute the needs at its | |
2838 | target and add them to NEEDED. */ | |
2839 | if (GET_CODE (trial_delay) == JUMP_INSN) | |
2840 | { | |
2841 | if (target == 0) | |
2842 | break; | |
2843 | else if (JUMP_LABEL (trial_delay) == target) | |
2844 | target_uses--; | |
2845 | else | |
2846 | { | |
2847 | mark_target_live_regs | |
2848 | (next_active_insn (JUMP_LABEL (trial_delay)), | |
2849 | &needed_at_jump); | |
2850 | needed.memory |= needed_at_jump.memory; | |
2851 | IOR_HARD_REG_SET (needed.regs, needed_at_jump.regs); | |
2852 | } | |
2853 | } | |
2854 | ||
2855 | /* See if we have a resource problem before we try to | |
2856 | split. */ | |
2857 | if (target == 0 | |
2858 | && GET_CODE (pat) != SEQUENCE | |
2859 | && ! insn_references_resource_p (trial, &set, 1) | |
2860 | && ! insn_sets_resource_p (trial, &set, 1) | |
2861 | && ! insn_sets_resource_p (trial, &needed, 1) | |
2862 | #ifdef HAVE_cc0 | |
2863 | && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat)) | |
2864 | #endif | |
2865 | && ! (maybe_never && may_trap_p (pat)) | |
2866 | && (trial = try_split (pat, trial, 0)) | |
2867 | && eligible_for_delay (insn, slots_filled, trial, flags)) | |
2868 | { | |
2869 | next_trial = next_nonnote_insn (trial); | |
2870 | delay_list = add_to_delay_list (trial, delay_list); | |
2871 | ||
2872 | #ifdef HAVE_cc0 | |
2873 | if (reg_mentioned_p (cc0_rtx, pat)) | |
2874 | link_cc0_insns (trial); | |
2875 | #endif | |
2876 | ||
2877 | if (passed_label) | |
2878 | update_block (trial, trial); | |
2879 | delete_insn (trial); | |
2880 | if (slots_to_fill == ++slots_filled) | |
2881 | break; | |
2882 | continue; | |
2883 | } | |
2884 | ||
2885 | mark_set_resources (trial, &set, 0, 1); | |
2886 | mark_referenced_resources (trial, &needed, 1); | |
2887 | ||
2888 | /* Ensure we don't put insns between the setting of cc and the | |
2889 | comparison by moving a setting of cc into an earlier delay | |
2890 | slot since these insns could clobber the condition code. */ | |
2891 | set.cc = 1; | |
2892 | ||
2893 | /* If this is a call or jump, we might not get here. */ | |
2894 | if (GET_CODE (trial) == CALL_INSN | |
2895 | || GET_CODE (trial) == JUMP_INSN) | |
2896 | maybe_never = 1; | |
2897 | } | |
2898 | ||
2899 | /* If there are slots left to fill and our search was stopped by an | |
2900 | unconditional branch, try the insn at the branch target. We can | |
2901 | redirect the branch if it works. */ | |
2902 | if (slots_to_fill != slots_filled | |
2903 | && trial | |
2904 | && GET_CODE (trial) == JUMP_INSN | |
2905 | && simplejump_p (trial) | |
2906 | && (target == 0 || JUMP_LABEL (trial) == target) | |
2907 | && (next_trial = next_active_insn (JUMP_LABEL (trial))) != 0 | |
2908 | && ! (GET_CODE (next_trial) == INSN | |
2909 | && GET_CODE (PATTERN (next_trial)) == SEQUENCE) | |
2910 | && ! insn_references_resource_p (next_trial, &set, 1) | |
2911 | && ! insn_sets_resource_p (next_trial, &set, 1) | |
2912 | && ! insn_sets_resource_p (next_trial, &needed, 1) | |
2913 | #ifdef HAVE_cc0 | |
2914 | && ! reg_mentioned_p (cc0_rtx, PATTERN (next_trial)) | |
2915 | #endif | |
2916 | && ! (maybe_never && may_trap_p (PATTERN (next_trial))) | |
2917 | && (next_trial = try_split (PATTERN (next_trial), next_trial, 0)) | |
2918 | && eligible_for_delay (insn, slots_filled, next_trial, flags)) | |
2919 | { | |
2920 | rtx new_label = next_active_insn (next_trial); | |
2921 | ||
2922 | if (new_label != 0) | |
2923 | new_label = get_label_before (new_label); | |
2924 | ||
2925 | delay_list | |
2926 | = add_to_delay_list (copy_rtx (next_trial), delay_list); | |
2927 | slots_filled++; | |
2928 | reorg_redirect_jump (trial, new_label); | |
2929 | ||
2930 | /* If we merged because we both jumped to the same place, | |
2931 | redirect the original insn also. */ | |
2932 | if (target) | |
2933 | reorg_redirect_jump (insn, new_label); | |
2934 | } | |
2935 | } | |
2936 | ||
2937 | if (delay_list) | |
2938 | unfilled_slots_base[i] | |
2939 | = emit_delay_sequence (insn, delay_list, | |
2940 | slots_filled, slots_to_fill); | |
2941 | ||
2942 | if (slots_to_fill == slots_filled) | |
2943 | unfilled_slots_base[i] = 0; | |
2944 | ||
2945 | note_delay_statistics (slots_filled, 0); | |
2946 | } | |
2947 | ||
2948 | #ifdef DELAY_SLOTS_FOR_EPILOGUE | |
2949 | /* See if the epilogue needs any delay slots. Try to fill them if so. | |
2950 | The only thing we can do is scan backwards from the end of the | |
2951 | function. If we did this in a previous pass, it is incorrect to do it | |
2952 | again. */ | |
2953 | if (current_function_epilogue_delay_list) | |
2954 | return; | |
2955 | ||
2956 | slots_to_fill = DELAY_SLOTS_FOR_EPILOGUE; | |
2957 | if (slots_to_fill == 0) | |
2958 | return; | |
2959 | ||
2960 | slots_filled = 0; | |
2961 | CLEAR_RESOURCE (&needed); | |
2962 | CLEAR_RESOURCE (&set); | |
2963 | ||
2964 | for (trial = get_last_insn (); ! stop_search_p (trial, 1); | |
2965 | trial = PREV_INSN (trial)) | |
2966 | { | |
2967 | if (GET_CODE (trial) == NOTE) | |
2968 | continue; | |
2969 | pat = PATTERN (trial); | |
2970 | if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) | |
2971 | continue; | |
2972 | ||
2973 | if (! insn_references_resource_p (trial, &set, 1) | |
2974 | && ! insn_sets_resource_p (trial, &needed, 1) | |
2975 | #ifdef HAVE_cc0 | |
2976 | /* Don't want to mess with cc0 here. */ | |
2977 | && ! reg_mentioned_p (cc0_rtx, pat) | |
2978 | #endif | |
2979 | ) | |
2980 | { | |
2981 | trial = try_split (pat, trial, 1); | |
2982 | if (ELIGIBLE_FOR_EPILOGUE_DELAY (trial, slots_filled)) | |
2983 | { | |
2984 | /* Here as well we are searching backward, so put the | |
2985 | insns we find on the head of the list. */ | |
2986 | ||
2987 | current_function_epilogue_delay_list | |
2988 | = gen_rtx (INSN_LIST, VOIDmode, trial, | |
2989 | current_function_epilogue_delay_list); | |
2990 | mark_referenced_resources (trial, &end_of_function_needs, 1); | |
2991 | update_block (trial, trial); | |
2992 | delete_insn (trial); | |
2993 | ||
2994 | /* Clear deleted bit so final.c will output the insn. */ | |
2995 | INSN_DELETED_P (trial) = 0; | |
2996 | ||
2997 | if (slots_to_fill == ++slots_filled) | |
2998 | break; | |
2999 | continue; | |
3000 | } | |
3001 | } | |
3002 | ||
3003 | mark_set_resources (trial, &set, 0, 1); | |
3004 | mark_referenced_resources (trial, &needed, 1); | |
3005 | } | |
3006 | ||
3007 | note_delay_statistics (slots_filled, 0); | |
3008 | #endif | |
3009 | } | |
3010 | \f | |
3011 | /* Try to find insns to place in delay slots. | |
3012 | ||
3013 | INSN is the jump needing SLOTS_TO_FILL delay slots. It tests CONDITION | |
3014 | or is an unconditional branch if CONDITION is const_true_rtx. | |
3015 | *PSLOTS_FILLED is updated with the number of slots that we have filled. | |
3016 | ||
3017 | THREAD is a flow-of-control, either the insns to be executed if the | |
3018 | branch is true or if the branch is false, THREAD_IF_TRUE says which. | |
3019 | ||
3020 | OPPOSITE_THREAD is the thread in the opposite direction. It is used | |
3021 | to see if any potential delay slot insns set things needed there. | |
3022 | ||
3023 | LIKELY is non-zero if it is extremely likely that the branch will be | |
3024 | taken and THREAD_IF_TRUE is set. This is used for the branch at the | |
3025 | end of a loop back up to the top. | |
3026 | ||
3027 | OWN_THREAD and OWN_OPPOSITE_THREAD are true if we are the only user of the | |
3028 | thread. I.e., it is the fallthrough code of our jump or the target of the | |
3029 | jump when we are the only jump going there. | |
3030 | ||
3031 | If OWN_THREAD is false, it must be the "true" thread of a jump. In that | |
3032 | case, we can only take insns from the head of the thread for our delay | |
3033 | slot. We then adjust the jump to point after the insns we have taken. */ | |
3034 | ||
3035 | static rtx | |
3036 | fill_slots_from_thread (insn, condition, thread, opposite_thread, likely, | |
3037 | thread_if_true, own_thread, own_opposite_thread, | |
3038 | slots_to_fill, pslots_filled) | |
3039 | rtx insn; | |
3040 | rtx condition; | |
3041 | rtx thread, opposite_thread; | |
3042 | int likely; | |
3043 | int thread_if_true; | |
3044 | int own_thread, own_opposite_thread; | |
3045 | int slots_to_fill, *pslots_filled; | |
3046 | { | |
3047 | rtx new_thread; | |
3048 | rtx delay_list = 0; | |
3049 | struct resources opposite_needed, set, needed; | |
3050 | rtx trial; | |
3051 | int lose = 0; | |
3052 | int must_annul = 0; | |
3053 | int flags; | |
3054 | ||
3055 | /* Validate our arguments. */ | |
3056 | if ((condition == const_true_rtx && ! thread_if_true) | |
3057 | || (! own_thread && ! thread_if_true)) | |
3058 | abort (); | |
3059 | ||
3060 | flags = get_jump_flags (insn, JUMP_LABEL (insn)); | |
3061 | ||
3062 | /* If our thread is the end of subroutine, we can't get any delay | |
3063 | insns from that. */ | |
3064 | if (thread == 0) | |
3065 | return 0; | |
3066 | ||
3067 | /* If this is an unconditional branch, nothing is needed at the | |
3068 | opposite thread. Otherwise, compute what is needed there. */ | |
3069 | if (condition == const_true_rtx) | |
3070 | CLEAR_RESOURCE (&opposite_needed); | |
3071 | else | |
3072 | mark_target_live_regs (opposite_thread, &opposite_needed); | |
3073 | ||
3074 | /* If the insn at THREAD can be split, do it here to avoid having to | |
3075 | update THREAD and NEW_THREAD if it is done in the loop below. Also | |
3076 | initialize NEW_THREAD. */ | |
3077 | ||
3078 | new_thread = thread = try_split (PATTERN (thread), thread, 0); | |
3079 | ||
3080 | /* Scan insns at THREAD. We are looking for an insn that can be removed | |
3081 | from THREAD (it neither sets nor references resources that were set | |
3082 | ahead of it and it doesn't set anything needs by the insns ahead of | |
3083 | it) and that either can be placed in an annulling insn or aren't | |
3084 | needed at OPPOSITE_THREAD. */ | |
3085 | ||
3086 | CLEAR_RESOURCE (&needed); | |
3087 | CLEAR_RESOURCE (&set); | |
3088 | ||
3089 | /* If we do not own this thread, we must stop as soon as we find | |
3090 | something that we can't put in a delay slot, since all we can do | |
3091 | is branch into THREAD at a later point. Therefore, labels stop | |
3092 | the search if this is not the `true' thread. */ | |
3093 | ||
3094 | for (trial = thread; | |
3095 | ! stop_search_p (trial, ! thread_if_true) && (! lose || own_thread); | |
3096 | trial = next_nonnote_insn (trial)) | |
3097 | { | |
3098 | rtx pat, old_trial; | |
3099 | ||
3100 | /* If we have passed a label, we no longer own this thread. */ | |
3101 | if (GET_CODE (trial) == CODE_LABEL) | |
3102 | { | |
3103 | own_thread = 0; | |
3104 | continue; | |
3105 | } | |
3106 | ||
3107 | pat = PATTERN (trial); | |
3108 | if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) | |
3109 | continue; | |
3110 | ||
3111 | /* If TRIAL conflicts with the insns ahead of it, we lose. Also, | |
3112 | don't separate or copy insns that set and use CC0. */ | |
3113 | if (! insn_references_resource_p (trial, &set, 1) | |
3114 | && ! insn_sets_resource_p (trial, &set, 1) | |
3115 | && ! insn_sets_resource_p (trial, &needed, 1) | |
3116 | #ifdef HAVE_cc0 | |
3117 | && ! (reg_mentioned_p (cc0_rtx, pat) | |
3118 | && (! own_thread || ! sets_cc0_p (pat))) | |
3119 | #endif | |
3120 | ) | |
3121 | { | |
3122 | /* If TRIAL is redundant with some insn before INSN, we don't | |
3123 | actually need to add it to the delay list; we can merely pretend | |
3124 | we did. */ | |
3125 | if (redundant_insn_p (trial, insn, delay_list)) | |
3126 | { | |
3127 | if (own_thread) | |
3128 | { | |
3129 | update_block (trial, thread); | |
3130 | delete_insn (trial); | |
3131 | } | |
3132 | else | |
3133 | new_thread = next_active_insn (trial); | |
3134 | ||
3135 | continue; | |
3136 | } | |
3137 | ||
3138 | /* There are two ways we can win: If TRIAL doesn't set anything | |
3139 | needed at the opposite thread and can't trap, or if it can | |
3140 | go into an annulled delay slot. */ | |
3141 | if (condition == const_true_rtx | |
3142 | || (! insn_sets_resource_p (trial, &opposite_needed, 1) | |
3143 | && ! may_trap_p (pat))) | |
3144 | { | |
3145 | old_trial = trial; | |
3146 | trial = try_split (pat, trial, 0); | |
3147 | if (new_thread == old_trial) | |
3148 | new_thread = trial; | |
3149 | pat = PATTERN (trial); | |
3150 | if (eligible_for_delay (insn, *pslots_filled, trial, flags)) | |
3151 | goto winner; | |
3152 | } | |
3153 | else if (0 | |
3154 | #ifdef ANNUL_IFTRUE_SLOTS | |
3155 | || ! thread_if_true | |
3156 | #endif | |
3157 | #ifdef ANNUL_IFFALSE_SLOTS | |
3158 | || thread_if_true | |
3159 | #endif | |
3160 | ) | |
3161 | { | |
3162 | old_trial = trial; | |
3163 | trial = try_split (pat, trial, 0); | |
3164 | if (new_thread == old_trial) | |
3165 | new_thread = trial; | |
3166 | pat = PATTERN (trial); | |
3167 | if ((thread_if_true | |
3168 | ? eligible_for_annul_false (insn, *pslots_filled, trial, flags) | |
3169 | : eligible_for_annul_true (insn, *pslots_filled, trial, flags))) | |
3170 | { | |
3171 | rtx temp; | |
3172 | ||
3173 | must_annul = 1; | |
3174 | winner: | |
3175 | ||
3176 | #ifdef HAVE_cc0 | |
3177 | if (reg_mentioned_p (cc0_rtx, pat)) | |
3178 | link_cc0_insns (trial); | |
3179 | #endif | |
3180 | ||
3181 | /* If we own this thread, delete the insn. If this is the | |
3182 | destination of a branch, show that a basic block status | |
3183 | may have been updated. In any case, mark the new | |
3184 | starting point of this thread. */ | |
3185 | if (own_thread) | |
3186 | { | |
3187 | update_block (trial, thread); | |
3188 | delete_insn (trial); | |
3189 | } | |
3190 | else | |
3191 | new_thread = next_active_insn (trial); | |
3192 | ||
3193 | temp = own_thread ? trial : copy_rtx (trial); | |
3194 | if (thread_if_true) | |
3195 | INSN_FROM_TARGET_P (temp) = 1; | |
3196 | ||
3197 | delay_list = add_to_delay_list (temp, delay_list); | |
3198 | ||
3199 | if (slots_to_fill == ++(*pslots_filled)) | |
3200 | { | |
3201 | /* Even though we have filled all the slots, we | |
3202 | may be branching to a location that has a | |
3203 | redundant insn. Skip any if so. */ | |
3204 | while (new_thread && ! own_thread | |
3205 | && ! insn_sets_resource_p (new_thread, &set, 1) | |
3206 | && ! insn_sets_resource_p (new_thread, &needed, 1) | |
3207 | && ! insn_references_resource_p (new_thread, | |
3208 | &set, 1) | |
3209 | && redundant_insn_p (new_thread, insn, | |
3210 | delay_list)) | |
3211 | new_thread = next_active_insn (new_thread); | |
3212 | break; | |
3213 | } | |
3214 | ||
3215 | continue; | |
3216 | } | |
3217 | } | |
3218 | } | |
3219 | ||
3220 | /* This insn can't go into a delay slot. */ | |
3221 | lose = 1; | |
3222 | mark_set_resources (trial, &set, 0, 1); | |
3223 | mark_referenced_resources (trial, &needed, 1); | |
3224 | ||
3225 | /* Ensure we don't put insns between the setting of cc and the comparison | |
3226 | by moving a setting of cc into an earlier delay slot since these insns | |
3227 | could clobber the condition code. */ | |
3228 | set.cc = 1; | |
3229 | ||
3230 | /* If this insn is a register-register copy and the next insn has | |
3231 | a use of our destination, change it to use our source. That way, | |
3232 | it will become a candidate for our delay slot the next time | |
3233 | through this loop. This case occurs commonly in loops that | |
3234 | scan a list. | |
3235 | ||
3236 | We could check for more complex cases than those tested below, | |
3237 | but it doesn't seem worth it. It might also be a good idea to try | |
3238 | to swap the two insns. That might do better. | |
3239 | ||
3240 | We can't do this if the next insn modifies our source, because that | |
3241 | would make the replacement into the insn invalid. This also | |
3242 | prevents updating the contents of a PRE_INC. */ | |
3243 | ||
3244 | if (GET_CODE (trial) == INSN && GET_CODE (pat) == SET | |
3245 | && GET_CODE (SET_SRC (pat)) == REG | |
3246 | && GET_CODE (SET_DEST (pat)) == REG) | |
3247 | { | |
3248 | rtx next = next_nonnote_insn (trial); | |
3249 | ||
3250 | if (next && GET_CODE (next) == INSN | |
3251 | && GET_CODE (PATTERN (next)) != USE | |
3252 | && ! reg_set_p (SET_DEST (pat), next) | |
3253 | && reg_referenced_p (SET_DEST (pat), PATTERN (next))) | |
3254 | validate_replace_rtx (SET_DEST (pat), SET_SRC (pat), next); | |
3255 | } | |
3256 | } | |
3257 | ||
3258 | /* If we stopped on a branch insn that has delay slots, see if we can | |
3259 | steal some of the insns in those slots. */ | |
3260 | if (trial && GET_CODE (trial) == INSN | |
3261 | && GET_CODE (PATTERN (trial)) == SEQUENCE | |
3262 | && GET_CODE (XVECEXP (PATTERN (trial), 0, 0)) == JUMP_INSN) | |
3263 | { | |
3264 | /* If this is the `true' thread, we will want to follow the jump, | |
3265 | so we can only do this if we have taken everything up to here. */ | |
3266 | if (thread_if_true && trial == new_thread) | |
3267 | delay_list | |
3268 | = steal_delay_list_from_target (insn, condition, PATTERN (trial), | |
3269 | delay_list, &set, &needed, | |
3270 | &opposite_needed, slots_to_fill, | |
3271 | pslots_filled, &must_annul, | |
3272 | &new_thread); | |
3273 | else if (! thread_if_true) | |
3274 | delay_list | |
3275 | = steal_delay_list_from_fallthrough (insn, condition, | |
3276 | PATTERN (trial), | |
3277 | delay_list, &set, &needed, | |
3278 | &opposite_needed, slots_to_fill, | |
3279 | pslots_filled, &must_annul); | |
3280 | } | |
3281 | ||
3282 | /* If we haven't found anything for this delay slot and it is very | |
3283 | likely that the branch will be taken, see if the insn at our target | |
3284 | increments or decrements a register with an increment that does not | |
3285 | depend on the destination register. If so, try to place the opposite | |
3286 | arithmetic insn after the jump insn and put the arithmetic insn in the | |
3287 | delay slot. If we can't do this, return. */ | |
3288 | if (delay_list == 0 && likely && new_thread && GET_CODE (new_thread) == INSN) | |
3289 | { | |
3290 | rtx pat = PATTERN (new_thread); | |
3291 | rtx dest; | |
3292 | rtx src; | |
3293 | ||
3294 | trial = new_thread; | |
3295 | pat = PATTERN (trial); | |
3296 | ||
3297 | if (GET_CODE (trial) != INSN || GET_CODE (pat) != SET | |
3298 | || ! eligible_for_delay (insn, 0, trial, flags)) | |
3299 | return 0; | |
3300 | ||
3301 | dest = SET_DEST (pat), src = SET_SRC (pat); | |
3302 | if ((GET_CODE (src) == PLUS || GET_CODE (src) == MINUS) | |
3303 | && rtx_equal_p (XEXP (src, 0), dest) | |
3304 | && ! reg_overlap_mentioned_p (dest, XEXP (src, 1))) | |
3305 | { | |
3306 | rtx other = XEXP (src, 1); | |
3307 | rtx new_arith; | |
3308 | rtx ninsn; | |
3309 | ||
3310 | /* If this is a constant adjustment, use the same code with | |
3311 | the negated constant. Otherwise, reverse the sense of the | |
3312 | arithmetic. */ | |
3313 | if (GET_CODE (other) == CONST_INT) | |
3314 | new_arith = gen_rtx (GET_CODE (src), GET_MODE (src), dest, | |
3315 | negate_rtx (GET_MODE (src), other)); | |
3316 | else | |
3317 | new_arith = gen_rtx (GET_CODE (src) == PLUS ? MINUS : PLUS, | |
3318 | GET_MODE (src), dest, other); | |
3319 | ||
3320 | ninsn = emit_insn_after (gen_rtx (SET, VOIDmode, dest, new_arith), | |
3321 | insn); | |
3322 | ||
3323 | if (recog_memoized (ninsn) < 0 | |
3324 | || (insn_extract (ninsn), | |
3325 | ! constrain_operands (INSN_CODE (ninsn), 1))) | |
3326 | { | |
3327 | delete_insn (ninsn); | |
3328 | return 0; | |
3329 | } | |
3330 | ||
3331 | if (own_thread) | |
3332 | { | |
3333 | update_block (trial, thread); | |
3334 | delete_insn (trial); | |
3335 | } | |
3336 | else | |
3337 | new_thread = next_active_insn (trial); | |
3338 | ||
3339 | ninsn = own_thread ? trial : copy_rtx (trial); | |
3340 | if (thread_if_true) | |
3341 | INSN_FROM_TARGET_P (ninsn) = 1; | |
3342 | ||
3343 | delay_list = add_to_delay_list (ninsn, NULL_RTX); | |
3344 | (*pslots_filled)++; | |
3345 | } | |
3346 | } | |
3347 | ||
3348 | if (delay_list && must_annul) | |
3349 | INSN_ANNULLED_BRANCH_P (insn) = 1; | |
3350 | ||
3351 | /* If we are to branch into the middle of this thread, find an appropriate | |
3352 | label or make a new one if none, and redirect INSN to it. If we hit the | |
3353 | end of the function, use the end-of-function label. */ | |
3354 | if (new_thread != thread) | |
3355 | { | |
3356 | rtx label; | |
3357 | ||
3358 | if (! thread_if_true) | |
3359 | abort (); | |
3360 | ||
3361 | if (new_thread && GET_CODE (new_thread) == JUMP_INSN | |
3362 | && (simplejump_p (new_thread) | |
3363 | || GET_CODE (PATTERN (new_thread)) == RETURN)) | |
3364 | new_thread = follow_jumps (JUMP_LABEL (new_thread)); | |
3365 | ||
3366 | if (new_thread == 0) | |
3367 | label = find_end_label (); | |
3368 | else if (GET_CODE (new_thread) == CODE_LABEL) | |
3369 | label = new_thread; | |
3370 | else | |
3371 | label = get_label_before (new_thread); | |
3372 | ||
3373 | reorg_redirect_jump (insn, label); | |
3374 | } | |
3375 | ||
3376 | return delay_list; | |
3377 | } | |
3378 | \f | |
3379 | /* Make another attempt to find insns to place in delay slots. | |
3380 | ||
3381 | We previously looked for insns located in front of the delay insn | |
3382 | and, for non-jump delay insns, located behind the delay insn. | |
3383 | ||
3384 | Here only try to schedule jump insns and try to move insns from either | |
3385 | the target or the following insns into the delay slot. If annulling is | |
3386 | supported, we will be likely to do this. Otherwise, we can do this only | |
3387 | if safe. */ | |
3388 | ||
3389 | static void | |
3390 | fill_eager_delay_slots (first) | |
3391 | rtx first; | |
3392 | { | |
3393 | register rtx insn; | |
3394 | register int i; | |
3395 | int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base; | |
3396 | ||
3397 | for (i = 0; i < num_unfilled_slots; i++) | |
3398 | { | |
3399 | rtx condition; | |
3400 | rtx target_label, insn_at_target, fallthrough_insn; | |
3401 | rtx delay_list = 0; | |
3402 | int own_target; | |
3403 | int own_fallthrough; | |
3404 | int prediction, slots_to_fill, slots_filled; | |
3405 | ||
3406 | insn = unfilled_slots_base[i]; | |
3407 | if (insn == 0 | |
3408 | || INSN_DELETED_P (insn) | |
3409 | || GET_CODE (insn) != JUMP_INSN | |
3410 | || ! condjump_p (insn)) | |
3411 | continue; | |
3412 | ||
3413 | slots_to_fill = num_delay_slots (insn); | |
3414 | if (slots_to_fill == 0) | |
3415 | abort (); | |
3416 | ||
3417 | slots_filled = 0; | |
3418 | target_label = JUMP_LABEL (insn); | |
3419 | condition = get_branch_condition (insn, target_label); | |
3420 | ||
3421 | if (condition == 0) | |
3422 | continue; | |
3423 | ||
3424 | /* Get the next active fallthough and target insns and see if we own | |
3425 | them. Then see whether the branch is likely true. We don't need | |
3426 | to do a lot of this for unconditional branches. */ | |
3427 | ||
3428 | insn_at_target = next_active_insn (target_label); | |
3429 | own_target = own_thread_p (target_label, target_label, 0); | |
3430 | ||
3431 | if (condition == const_true_rtx) | |
3432 | { | |
3433 | own_fallthrough = 0; | |
3434 | fallthrough_insn = 0; | |
3435 | prediction = 2; | |
3436 | } | |
3437 | else | |
3438 | { | |
3439 | fallthrough_insn = next_active_insn (insn); | |
3440 | own_fallthrough = own_thread_p (NEXT_INSN (insn), NULL_RTX, 1); | |
3441 | prediction = mostly_true_jump (insn, condition); | |
3442 | } | |
3443 | ||
3444 | /* If this insn is expected to branch, first try to get insns from our | |
3445 | target, then our fallthrough insns. If it is not, expected to branch, | |
3446 | try the other order. */ | |
3447 | ||
3448 | if (prediction > 0) | |
3449 | { | |
3450 | delay_list | |
3451 | = fill_slots_from_thread (insn, condition, insn_at_target, | |
3452 | fallthrough_insn, prediction == 2, 1, | |
3453 | own_target, own_fallthrough, | |
3454 | slots_to_fill, &slots_filled); | |
3455 | ||
3456 | if (delay_list == 0 && own_fallthrough) | |
3457 | { | |
3458 | /* Even though we didn't find anything for delay slots, | |
3459 | we might have found a redundant insn which we deleted | |
3460 | from the thread that was filled. So we have to recompute | |
3461 | the next insn at the target. */ | |
3462 | target_label = JUMP_LABEL (insn); | |
3463 | insn_at_target = next_active_insn (target_label); | |
3464 | ||
3465 | delay_list | |
3466 | = fill_slots_from_thread (insn, condition, fallthrough_insn, | |
3467 | insn_at_target, 0, 0, | |
3468 | own_fallthrough, own_target, | |
3469 | slots_to_fill, &slots_filled); | |
3470 | } | |
3471 | } | |
3472 | else | |
3473 | { | |
3474 | if (own_fallthrough) | |
3475 | delay_list | |
3476 | = fill_slots_from_thread (insn, condition, fallthrough_insn, | |
3477 | insn_at_target, 0, 0, | |
3478 | own_fallthrough, own_target, | |
3479 | slots_to_fill, &slots_filled); | |
3480 | ||
3481 | if (delay_list == 0) | |
3482 | delay_list | |
3483 | = fill_slots_from_thread (insn, condition, insn_at_target, | |
3484 | next_active_insn (insn), 0, 1, | |
3485 | own_target, own_fallthrough, | |
3486 | slots_to_fill, &slots_filled); | |
3487 | } | |
3488 | ||
3489 | if (delay_list) | |
3490 | unfilled_slots_base[i] | |
3491 | = emit_delay_sequence (insn, delay_list, | |
3492 | slots_filled, slots_to_fill); | |
3493 | ||
3494 | if (slots_to_fill == slots_filled) | |
3495 | unfilled_slots_base[i] = 0; | |
3496 | ||
3497 | note_delay_statistics (slots_filled, 1); | |
3498 | } | |
3499 | } | |
3500 | \f | |
3501 | /* Once we have tried two ways to fill a delay slot, make a pass over the | |
3502 | code to try to improve the results and to do such things as more jump | |
3503 | threading. */ | |
3504 | ||
3505 | static void | |
3506 | relax_delay_slots (first) | |
3507 | rtx first; | |
3508 | { | |
3509 | register rtx insn, next, pat; | |
3510 | register rtx trial, delay_insn, target_label; | |
3511 | ||
3512 | /* Look at every JUMP_INSN and see if we can improve it. */ | |
3513 | for (insn = first; insn; insn = next) | |
3514 | { | |
3515 | rtx other; | |
3516 | ||
3517 | next = next_active_insn (insn); | |
3518 | ||
3519 | /* If this is a jump insn, see if it now jumps to a jump, jumps to | |
3520 | the next insn, or jumps to a label that is not the last of a | |
3521 | group of consecutive labels. */ | |
3522 | if (GET_CODE (insn) == JUMP_INSN | |
3523 | && condjump_p (insn) | |
3524 | && (target_label = JUMP_LABEL (insn)) != 0) | |
3525 | { | |
3526 | target_label = follow_jumps (target_label); | |
3527 | target_label = prev_label (next_active_insn (target_label)); | |
3528 | ||
3529 | if (target_label == 0) | |
3530 | target_label = find_end_label (); | |
3531 | ||
3532 | if (next_active_insn (target_label) == next) | |
3533 | { | |
3534 | delete_jump (insn); | |
3535 | continue; | |
3536 | } | |
3537 | ||
3538 | if (target_label != JUMP_LABEL (insn)) | |
3539 | reorg_redirect_jump (insn, target_label); | |
3540 | ||
3541 | /* See if this jump branches around a unconditional jump. | |
3542 | If so, invert this jump and point it to the target of the | |
3543 | second jump. */ | |
3544 | if (next && GET_CODE (next) == JUMP_INSN | |
3545 | && (simplejump_p (next) || GET_CODE (PATTERN (next)) == RETURN) | |
3546 | && next_active_insn (target_label) == next_active_insn (next) | |
3547 | && no_labels_between_p (insn, next)) | |
3548 | { | |
3549 | rtx label = JUMP_LABEL (next); | |
3550 | ||
3551 | /* Be careful how we do this to avoid deleting code or | |
3552 | labels that are momentarily dead. See similar optimization | |
3553 | in jump.c. | |
3554 | ||
3555 | We also need to ensure we properly handle the case when | |
3556 | invert_jump fails. */ | |
3557 | ||
3558 | ++LABEL_NUSES (target_label); | |
3559 | if (label) | |
3560 | ++LABEL_NUSES (label); | |
3561 | ||
3562 | if (invert_jump (insn, label)) | |
3563 | { | |
3564 | delete_insn (next); | |
3565 | next = insn; | |
3566 | } | |
3567 | ||
3568 | if (label) | |
3569 | --LABEL_NUSES (label); | |
3570 | ||
3571 | if (--LABEL_NUSES (target_label) == 0) | |
3572 | delete_insn (target_label); | |
3573 | ||
3574 | continue; | |
3575 | } | |
3576 | } | |
3577 | ||
3578 | /* If this is an unconditional jump and the previous insn is a | |
3579 | conditional jump, try reversing the condition of the previous | |
3580 | insn and swapping our targets. The next pass might be able to | |
3581 | fill the slots. | |
3582 | ||
3583 | Don't do this if we expect the conditional branch to be true, because | |
3584 | we would then be making the more common case longer. */ | |
3585 | ||
3586 | if (GET_CODE (insn) == JUMP_INSN | |
3587 | && (simplejump_p (insn) || GET_CODE (PATTERN (insn)) == RETURN) | |
3588 | && (other = prev_active_insn (insn)) != 0 | |
3589 | && condjump_p (other) | |
3590 | && no_labels_between_p (other, insn) | |
3591 | && 0 < mostly_true_jump (other, | |
3592 | get_branch_condition (other, | |
3593 | JUMP_LABEL (other)))) | |
3594 | { | |
3595 | rtx other_target = JUMP_LABEL (other); | |
3596 | target_label = JUMP_LABEL (insn); | |
3597 | ||
3598 | /* Increment the count of OTHER_TARGET, so it doesn't get deleted | |
3599 | as we move the label. */ | |
3600 | if (other_target) | |
3601 | ++LABEL_NUSES (other_target); | |
3602 | ||
3603 | if (invert_jump (other, target_label)) | |
3604 | reorg_redirect_jump (insn, other_target); | |
3605 | ||
3606 | if (other_target) | |
3607 | --LABEL_NUSES (other_target); | |
3608 | } | |
3609 | ||
3610 | /* Now look only at cases where we have filled a delay slot. */ | |
3611 | if (GET_CODE (insn) != INSN | |
3612 | || GET_CODE (PATTERN (insn)) != SEQUENCE) | |
3613 | continue; | |
3614 | ||
3615 | pat = PATTERN (insn); | |
3616 | delay_insn = XVECEXP (pat, 0, 0); | |
3617 | ||
3618 | /* See if the first insn in the delay slot is redundant with some | |
3619 | previous insn. Remove it from the delay slot if so; then set up | |
3620 | to reprocess this insn. */ | |
3621 | if (redundant_insn_p (XVECEXP (pat, 0, 1), delay_insn, 0)) | |
3622 | { | |
3623 | delete_from_delay_slot (XVECEXP (pat, 0, 1)); | |
3624 | next = prev_active_insn (next); | |
3625 | continue; | |
3626 | } | |
3627 | ||
3628 | /* Now look only at the cases where we have a filled JUMP_INSN. */ | |
3629 | if (GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) != JUMP_INSN | |
3630 | || ! condjump_p (XVECEXP (PATTERN (insn), 0, 0))) | |
3631 | continue; | |
3632 | ||
3633 | target_label = JUMP_LABEL (delay_insn); | |
3634 | ||
3635 | if (target_label) | |
3636 | { | |
3637 | /* If this jump goes to another unconditional jump, thread it, but | |
3638 | don't convert a jump into a RETURN here. */ | |
3639 | trial = follow_jumps (target_label); | |
3640 | trial = prev_label (next_active_insn (trial)); | |
3641 | if (trial == 0 && target_label != 0) | |
3642 | trial = find_end_label (); | |
3643 | ||
3644 | if (trial != target_label) | |
3645 | { | |
3646 | reorg_redirect_jump (delay_insn, trial); | |
3647 | target_label = trial; | |
3648 | } | |
3649 | ||
3650 | /* If the first insn at TARGET_LABEL is redundant with a previous | |
3651 | insn, redirect the jump to the following insn process again. */ | |
3652 | trial = next_active_insn (target_label); | |
3653 | if (trial && GET_CODE (PATTERN (trial)) != SEQUENCE | |
3654 | && redundant_insn_p (trial, insn, 0)) | |
3655 | { | |
3656 | trial = next_active_insn (trial); | |
3657 | if (trial == 0) | |
3658 | target_label = find_end_label (); | |
3659 | else | |
3660 | target_label = get_label_before (trial); | |
3661 | reorg_redirect_jump (delay_insn, target_label); | |
3662 | next = insn; | |
3663 | continue; | |
3664 | } | |
3665 | ||
3666 | /* Similarly, if it is an unconditional jump with one insn in its | |
3667 | delay list and that insn is redundant, thread the jump. */ | |
3668 | if (trial && GET_CODE (PATTERN (trial)) == SEQUENCE | |
3669 | && XVECLEN (PATTERN (trial), 0) == 2 | |
3670 | && GET_CODE (XVECEXP (PATTERN (trial), 0, 0)) == JUMP_INSN | |
3671 | && (simplejump_p (XVECEXP (PATTERN (trial), 0, 0)) | |
3672 | || GET_CODE (PATTERN (XVECEXP (PATTERN (trial), 0, 0))) == RETURN) | |
3673 | && redundant_insn_p (XVECEXP (PATTERN (trial), 0, 1), insn, 0)) | |
3674 | { | |
3675 | target_label = JUMP_LABEL (XVECEXP (PATTERN (trial), 0, 0)); | |
3676 | if (target_label == 0) | |
3677 | target_label = find_end_label (); | |
3678 | reorg_redirect_jump (delay_insn, target_label); | |
3679 | next = insn; | |
3680 | continue; | |
3681 | } | |
3682 | } | |
3683 | ||
3684 | if (! INSN_ANNULLED_BRANCH_P (delay_insn) | |
3685 | && prev_active_insn (target_label) == insn | |
3686 | #ifdef HAVE_cc0 | |
3687 | /* If the last insn in the delay slot sets CC0 for some insn, | |
3688 | various code assumes that it is in a delay slot. We could | |
3689 | put it back where it belonged and delete the register notes, | |
3690 | but it doesn't seem worthwhile in this uncommon case. */ | |
3691 | && ! find_reg_note (XVECEXP (pat, 0, XVECLEN (pat, 0) - 1), | |
3692 | REG_CC_USER, NULL_RTX) | |
3693 | #endif | |
3694 | ) | |
3695 | { | |
3696 | int i; | |
3697 | ||
3698 | /* All this insn does is execute its delay list and jump to the | |
3699 | following insn. So delete the jump and just execute the delay | |
3700 | list insns. | |
3701 | ||
3702 | We do this by deleting the INSN containing the SEQUENCE, then | |
3703 | re-emitting the insns separately, and then deleting the jump. | |
3704 | This allows the count of the jump target to be properly | |
3705 | decremented. */ | |
3706 | ||
3707 | /* Clear the from target bit, since these insns are no longer | |
3708 | in delay slots. */ | |
3709 | for (i = 0; i < XVECLEN (pat, 0); i++) | |
3710 | INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)) = 0; | |
3711 | ||
3712 | trial = PREV_INSN (insn); | |
3713 | delete_insn (insn); | |
3714 | emit_insn_after (pat, trial); | |
3715 | delete_scheduled_jump (delay_insn); | |
3716 | continue; | |
3717 | } | |
3718 | ||
3719 | /* See if this is an unconditional jump around a single insn which is | |
3720 | identical to the one in its delay slot. In this case, we can just | |
3721 | delete the branch and the insn in its delay slot. */ | |
3722 | if (next && GET_CODE (next) == INSN | |
3723 | && prev_label (next_active_insn (next)) == target_label | |
3724 | && simplejump_p (insn) | |
3725 | && XVECLEN (pat, 0) == 2 | |
3726 | && rtx_equal_p (PATTERN (next), PATTERN (XVECEXP (pat, 0, 1)))) | |
3727 | { | |
3728 | delete_insn (insn); | |
3729 | continue; | |
3730 | } | |
3731 | ||
3732 | /* See if this jump (with its delay slots) branches around another | |
3733 | jump (without delay slots). If so, invert this jump and point | |
3734 | it to the target of the second jump. We cannot do this for | |
3735 | annulled jumps, though. Again, don't convert a jump to a RETURN | |
3736 | here. */ | |
3737 | if (! INSN_ANNULLED_BRANCH_P (delay_insn) | |
3738 | && next && GET_CODE (next) == JUMP_INSN | |
3739 | && (simplejump_p (next) || GET_CODE (PATTERN (next)) == RETURN) | |
3740 | && next_active_insn (target_label) == next_active_insn (next) | |
3741 | && no_labels_between_p (insn, next)) | |
3742 | { | |
3743 | rtx label = JUMP_LABEL (next); | |
3744 | rtx old_label = JUMP_LABEL (delay_insn); | |
3745 | ||
3746 | if (label == 0) | |
3747 | label = find_end_label (); | |
3748 | ||
3749 | /* Be careful how we do this to avoid deleting code or labels | |
3750 | that are momentarily dead. See similar optimization in jump.c */ | |
3751 | if (old_label) | |
3752 | ++LABEL_NUSES (old_label); | |
3753 | ||
3754 | if (invert_jump (delay_insn, label)) | |
3755 | { | |
3756 | delete_insn (next); | |
3757 | next = insn; | |
3758 | } | |
3759 | ||
3760 | if (old_label && --LABEL_NUSES (old_label) == 0) | |
3761 | delete_insn (old_label); | |
3762 | continue; | |
3763 | } | |
3764 | ||
3765 | /* If we own the thread opposite the way this insn branches, see if we | |
3766 | can merge its delay slots with following insns. */ | |
3767 | if (INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1)) | |
3768 | && own_thread_p (NEXT_INSN (insn), 0, 1)) | |
3769 | try_merge_delay_insns (insn, next); | |
3770 | else if (! INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1)) | |
3771 | && own_thread_p (target_label, target_label, 0)) | |
3772 | try_merge_delay_insns (insn, next_active_insn (target_label)); | |
3773 | ||
3774 | /* If we get here, we haven't deleted INSN. But we may have deleted | |
3775 | NEXT, so recompute it. */ | |
3776 | next = next_active_insn (insn); | |
3777 | } | |
3778 | } | |
3779 | \f | |
3780 | #ifdef HAVE_return | |
3781 | ||
3782 | /* Look for filled jumps to the end of function label. We can try to convert | |
3783 | them into RETURN insns if the insns in the delay slot are valid for the | |
3784 | RETURN as well. */ | |
3785 | ||
3786 | static void | |
3787 | make_return_insns (first) | |
3788 | rtx first; | |
3789 | { | |
3790 | rtx insn, jump_insn, pat; | |
3791 | rtx real_return_label = end_of_function_label; | |
3792 | int slots, i; | |
3793 | ||
3794 | /* See if there is a RETURN insn in the function other than the one we | |
3795 | made for END_OF_FUNCTION_LABEL. If so, set up anything we can't change | |
3796 | into a RETURN to jump to it. */ | |
3797 | for (insn = first; insn; insn = NEXT_INSN (insn)) | |
3798 | if (GET_CODE (insn) == JUMP_INSN && GET_CODE (PATTERN (insn)) == RETURN) | |
3799 | { | |
3800 | real_return_label = get_label_before (insn); | |
3801 | break; | |
3802 | } | |
3803 | ||
3804 | /* Show an extra usage of REAL_RETURN_LABEL so it won't go away if it | |
3805 | was equal to END_OF_FUNCTION_LABEL. */ | |
3806 | LABEL_NUSES (real_return_label)++; | |
3807 | ||
3808 | /* Clear the list of insns to fill so we can use it. */ | |
3809 | obstack_free (&unfilled_slots_obstack, unfilled_firstobj); | |
3810 | ||
3811 | for (insn = first; insn; insn = NEXT_INSN (insn)) | |
3812 | { | |
3813 | int flags; | |
3814 | ||
3815 | /* Only look at filled JUMP_INSNs that go to the end of function | |
3816 | label. */ | |
3817 | if (GET_CODE (insn) != INSN | |
3818 | || GET_CODE (PATTERN (insn)) != SEQUENCE | |
3819 | || GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) != JUMP_INSN | |
3820 | || JUMP_LABEL (XVECEXP (PATTERN (insn), 0, 0)) != end_of_function_label) | |
3821 | continue; | |
3822 | ||
3823 | pat = PATTERN (insn); | |
3824 | jump_insn = XVECEXP (pat, 0, 0); | |
3825 | ||
3826 | /* If we can't make the jump into a RETURN, redirect it to the best | |
3827 | RETURN and go on to the next insn. */ | |
3828 | if (! reorg_redirect_jump (jump_insn, NULL_RTX)) | |
3829 | { | |
3830 | reorg_redirect_jump (jump_insn, real_return_label); | |
3831 | continue; | |
3832 | } | |
3833 | ||
3834 | /* See if this RETURN can accept the insns current in its delay slot. | |
3835 | It can if it has more or an equal number of slots and the contents | |
3836 | of each is valid. */ | |
3837 | ||
3838 | flags = get_jump_flags (jump_insn, JUMP_LABEL (jump_insn)); | |
3839 | slots = num_delay_slots (jump_insn); | |
3840 | if (slots >= XVECLEN (pat, 0) - 1) | |
3841 | { | |
3842 | for (i = 1; i < XVECLEN (pat, 0); i++) | |
3843 | if (! ( | |
3844 | #ifdef ANNUL_IFFALSE_SLOTS | |
3845 | (INSN_ANNULLED_BRANCH_P (jump_insn) | |
3846 | && INSN_FROM_TARGET_P (XVECEXP (pat, 0, i))) | |
3847 | ? eligible_for_annul_false (jump_insn, i - 1, | |
3848 | XVECEXP (pat, 0, i), flags) : | |
3849 | #endif | |
3850 | #ifdef ANNUL_IFTRUE_SLOTS | |
3851 | (INSN_ANNULLED_BRANCH_P (jump_insn) | |
3852 | && ! INSN_FROM_TARGET_P (XVECEXP (pat, 0, i))) | |
3853 | ? eligible_for_annul_true (jump_insn, i - 1, | |
3854 | XVECEXP (pat, 0, i), flags) : | |
3855 | #endif | |
3856 | eligible_for_delay (jump_insn, i -1, XVECEXP (pat, 0, i), flags))) | |
3857 | break; | |
3858 | } | |
3859 | else | |
3860 | i = 0; | |
3861 | ||
3862 | if (i == XVECLEN (pat, 0)) | |
3863 | continue; | |
3864 | ||
3865 | /* We have to do something with this insn. If it is an unconditional | |
3866 | RETURN, delete the SEQUENCE and output the individual insns, | |
3867 | followed by the RETURN. Then set things up so we try to find | |
3868 | insns for its delay slots, if it needs some. */ | |
3869 | if (GET_CODE (PATTERN (jump_insn)) == RETURN) | |
3870 | { | |
3871 | rtx prev = PREV_INSN (insn); | |
3872 | ||
3873 | delete_insn (insn); | |
3874 | for (i = 1; i < XVECLEN (pat, 0); i++) | |
3875 | prev = emit_insn_after (PATTERN (XVECEXP (pat, 0, i)), prev); | |
3876 | ||
3877 | insn = emit_jump_insn_after (PATTERN (jump_insn), prev); | |
3878 | emit_barrier_after (insn); | |
3879 | ||
3880 | if (slots) | |
3881 | obstack_ptr_grow (&unfilled_slots_obstack, insn); | |
3882 | } | |
3883 | else | |
3884 | /* It is probably more efficient to keep this with its current | |
3885 | delay slot as a branch to a RETURN. */ | |
3886 | reorg_redirect_jump (jump_insn, real_return_label); | |
3887 | } | |
3888 | ||
3889 | /* Now delete REAL_RETURN_LABEL if we never used it. Then try to fill any | |
3890 | new delay slots we have created. */ | |
3891 | if (--LABEL_NUSES (real_return_label) == 0) | |
3892 | delete_insn (real_return_label); | |
3893 | ||
3894 | fill_simple_delay_slots (first, 1); | |
3895 | fill_simple_delay_slots (first, 0); | |
3896 | } | |
3897 | #endif | |
3898 | \f | |
3899 | /* Try to find insns to place in delay slots. */ | |
3900 | ||
3901 | void | |
3902 | dbr_schedule (first, file) | |
3903 | rtx first; | |
3904 | FILE *file; | |
3905 | { | |
3906 | rtx insn, next, epilogue_insn = 0; | |
3907 | int i; | |
3908 | #if 0 | |
3909 | int old_flag_no_peephole = flag_no_peephole; | |
3910 | ||
3911 | /* Execute `final' once in prescan mode to delete any insns that won't be | |
3912 | used. Don't let final try to do any peephole optimization--it will | |
3913 | ruin dataflow information for this pass. */ | |
3914 | ||
3915 | flag_no_peephole = 1; | |
3916 | final (first, 0, NO_DEBUG, 1, 1); | |
3917 | flag_no_peephole = old_flag_no_peephole; | |
3918 | #endif | |
3919 | ||
3920 | /* If the current function has no insns other than the prologue and | |
3921 | epilogue, then do not try to fill any delay slots. */ | |
3922 | if (n_basic_blocks == 0) | |
3923 | return; | |
3924 | ||
3925 | /* Find the highest INSN_UID and allocate and initialize our map from | |
3926 | INSN_UID's to position in code. */ | |
3927 | for (max_uid = 0, insn = first; insn; insn = NEXT_INSN (insn)) | |
3928 | { | |
3929 | if (INSN_UID (insn) > max_uid) | |
3930 | max_uid = INSN_UID (insn); | |
3931 | if (GET_CODE (insn) == NOTE | |
3932 | && NOTE_LINE_NUMBER (insn) == NOTE_INSN_EPILOGUE_BEG) | |
3933 | epilogue_insn = insn; | |
3934 | } | |
3935 | ||
3936 | uid_to_ruid = (int *) alloca ((max_uid + 1) * sizeof (int *)); | |
3937 | for (i = 0, insn = first; insn; i++, insn = NEXT_INSN (insn)) | |
3938 | uid_to_ruid[INSN_UID (insn)] = i; | |
3939 | ||
3940 | /* Initialize the list of insns that need filling. */ | |
3941 | if (unfilled_firstobj == 0) | |
3942 | { | |
3943 | gcc_obstack_init (&unfilled_slots_obstack); | |
3944 | unfilled_firstobj = (rtx *) obstack_alloc (&unfilled_slots_obstack, 0); | |
3945 | } | |
3946 | ||
3947 | for (insn = next_active_insn (first); insn; insn = next_active_insn (insn)) | |
3948 | { | |
3949 | rtx target; | |
3950 | ||
3951 | INSN_ANNULLED_BRANCH_P (insn) = 0; | |
3952 | INSN_FROM_TARGET_P (insn) = 0; | |
3953 | ||
3954 | /* Skip vector tables. We can't get attributes for them. */ | |
3955 | if (GET_CODE (insn) == JUMP_INSN | |
3956 | && (GET_CODE (PATTERN (insn)) == ADDR_VEC | |
3957 | || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)) | |
3958 | continue; | |
3959 | ||
3960 | if (num_delay_slots (insn) > 0) | |
3961 | obstack_ptr_grow (&unfilled_slots_obstack, insn); | |
3962 | ||
3963 | /* Ensure all jumps go to the last of a set of consecutive labels. */ | |
3964 | if (GET_CODE (insn) == JUMP_INSN && condjump_p (insn) | |
3965 | && JUMP_LABEL (insn) != 0 | |
3966 | && ((target = prev_label (next_active_insn (JUMP_LABEL (insn)))) | |
3967 | != JUMP_LABEL (insn))) | |
3968 | redirect_jump (insn, target); | |
3969 | } | |
3970 | ||
3971 | /* Indicate what resources are required to be valid at the end of the current | |
3972 | function. The condition code never is and memory always is. If the | |
3973 | frame pointer is needed, it is and so is the stack pointer unless | |
3974 | EXIT_IGNORE_STACK is non-zero. If the frame pointer is not needed, the | |
3975 | stack pointer is. Registers used to return the function value are | |
3976 | needed. Registers holding global variables are needed. */ | |
3977 | ||
3978 | end_of_function_needs.cc = 0; | |
3979 | end_of_function_needs.memory = 1; | |
3980 | CLEAR_HARD_REG_SET (end_of_function_needs.regs); | |
3981 | ||
3982 | if (frame_pointer_needed) | |
3983 | { | |
3984 | SET_HARD_REG_BIT (end_of_function_needs.regs, FRAME_POINTER_REGNUM); | |
3985 | #ifdef EXIT_IGNORE_STACK | |
3986 | if (! EXIT_IGNORE_STACK) | |
3987 | #endif | |
3988 | SET_HARD_REG_BIT (end_of_function_needs.regs, STACK_POINTER_REGNUM); | |
3989 | } | |
3990 | else | |
3991 | SET_HARD_REG_BIT (end_of_function_needs.regs, STACK_POINTER_REGNUM); | |
3992 | ||
3993 | if (current_function_return_rtx != 0 | |
3994 | && GET_CODE (current_function_return_rtx) == REG) | |
3995 | mark_referenced_resources (current_function_return_rtx, | |
3996 | &end_of_function_needs, 1); | |
3997 | ||
3998 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
3999 | if (global_regs[i]) | |
4000 | SET_HARD_REG_BIT (end_of_function_needs.regs, i); | |
4001 | ||
4002 | /* The registers required to be live at the end of the function are | |
4003 | represented in the flow information as being dead just prior to | |
4004 | reaching the end of the function. For example, the return of a value | |
4005 | might be represented by a USE of the return register immediately | |
4006 | followed by an unconditional jump to the return label where the | |
4007 | return label is the end of the RTL chain. The end of the RTL chain | |
4008 | is then taken to mean that the return register is live. | |
4009 | ||
4010 | This sequence is no longer maintained when epilogue instructions are | |
4011 | added to the RTL chain. To reconstruct the original meaning, the | |
4012 | start of the epilogue (NOTE_INSN_EPILOGUE_BEG) is regarded as the | |
4013 | point where these registers become live (start_of_epilogue_needs). | |
4014 | If epilogue instructions are present, the registers set by those | |
4015 | instructions won't have been processed by flow. Thus, those | |
4016 | registers are additionally required at the end of the RTL chain | |
4017 | (end_of_function_needs). */ | |
4018 | ||
4019 | start_of_epilogue_needs = end_of_function_needs; | |
4020 | ||
4021 | while (epilogue_insn = next_nonnote_insn (epilogue_insn)) | |
4022 | mark_set_resources (epilogue_insn, &end_of_function_needs, 0, 1); | |
4023 | ||
4024 | /* Show we haven't computed an end-of-function label yet. */ | |
4025 | end_of_function_label = 0; | |
4026 | ||
4027 | /* Allocate and initialize the tables used by mark_target_live_regs. */ | |
4028 | target_hash_table | |
4029 | = (struct target_info **) alloca ((TARGET_HASH_PRIME | |
4030 | * sizeof (struct target_info *))); | |
4031 | bzero (target_hash_table, TARGET_HASH_PRIME * sizeof (struct target_info *)); | |
4032 | ||
4033 | bb_ticks = (int *) alloca (n_basic_blocks * sizeof (int)); | |
4034 | bzero (bb_ticks, n_basic_blocks * sizeof (int)); | |
4035 | ||
4036 | /* Initialize the statistics for this function. */ | |
4037 | bzero (num_insns_needing_delays, sizeof num_insns_needing_delays); | |
4038 | bzero (num_filled_delays, sizeof num_filled_delays); | |
4039 | ||
4040 | /* Now do the delay slot filling. Try everything twice in case earlier | |
4041 | changes make more slots fillable. */ | |
4042 | ||
4043 | for (reorg_pass_number = 0; | |
4044 | reorg_pass_number < MAX_REORG_PASSES; | |
4045 | reorg_pass_number++) | |
4046 | { | |
4047 | fill_simple_delay_slots (first, 1); | |
4048 | fill_simple_delay_slots (first, 0); | |
4049 | fill_eager_delay_slots (first); | |
4050 | relax_delay_slots (first); | |
4051 | } | |
4052 | ||
4053 | /* Delete any USE insns made by update_block; subsequent passes don't need | |
4054 | them or know how to deal with them. */ | |
4055 | for (insn = first; insn; insn = next) | |
4056 | { | |
4057 | next = NEXT_INSN (insn); | |
4058 | ||
4059 | if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == USE | |
4060 | && GET_RTX_CLASS (GET_CODE (XEXP (PATTERN (insn), 0))) == 'i') | |
4061 | next = delete_insn (insn); | |
4062 | } | |
4063 | ||
4064 | /* If we made an end of function label, indicate that it is now | |
4065 | safe to delete it by undoing our prior adjustment to LABEL_NUSES. | |
4066 | If it is now unused, delete it. */ | |
4067 | if (end_of_function_label && --LABEL_NUSES (end_of_function_label) == 0) | |
4068 | delete_insn (end_of_function_label); | |
4069 | ||
4070 | #ifdef HAVE_return | |
4071 | if (HAVE_return && end_of_function_label != 0) | |
4072 | make_return_insns (first); | |
4073 | #endif | |
4074 | ||
4075 | obstack_free (&unfilled_slots_obstack, unfilled_firstobj); | |
4076 | ||
4077 | /* It is not clear why the line below is needed, but it does seem to be. */ | |
4078 | unfilled_firstobj = (rtx *) obstack_alloc (&unfilled_slots_obstack, 0); | |
4079 | ||
4080 | /* Reposition the prologue and epilogue notes in case we moved the | |
4081 | prologue/epilogue insns. */ | |
4082 | reposition_prologue_and_epilogue_notes (first); | |
4083 | ||
4084 | if (file) | |
4085 | { | |
4086 | register int i, j, need_comma; | |
4087 | ||
4088 | for (reorg_pass_number = 0; | |
4089 | reorg_pass_number < MAX_REORG_PASSES; | |
4090 | reorg_pass_number++) | |
4091 | { | |
4092 | fprintf (file, ";; Reorg pass #%d:\n", reorg_pass_number + 1); | |
4093 | for (i = 0; i < NUM_REORG_FUNCTIONS; i++) | |
4094 | { | |
4095 | need_comma = 0; | |
4096 | fprintf (file, ";; Reorg function #%d\n", i); | |
4097 | ||
4098 | fprintf (file, ";; %d insns needing delay slots\n;; ", | |
4099 | num_insns_needing_delays[i][reorg_pass_number]); | |
4100 | ||
4101 | for (j = 0; j < MAX_DELAY_HISTOGRAM; j++) | |
4102 | if (num_filled_delays[i][j][reorg_pass_number]) | |
4103 | { | |
4104 | if (need_comma) | |
4105 | fprintf (file, ", "); | |
4106 | need_comma = 1; | |
4107 | fprintf (file, "%d got %d delays", | |
4108 | num_filled_delays[i][j][reorg_pass_number], j); | |
4109 | } | |
4110 | fprintf (file, "\n"); | |
4111 | } | |
4112 | } | |
4113 | } | |
4114 | } | |
4115 | #endif /* DELAY_SLOTS */ |