Start development on 386BSD 0.0
[unix-history] / .ref-BSD-4_3_Net_2 / usr / src / usr.bin / gcc / cc1 / config / out-sparc.c
CommitLineData
5b966f24
C
1/* Subroutines for insn-output.c for Sun SPARC.
2 Copyright (C) 1987, 1988, 1989 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@mcc.com)
4
5This file is part of GNU CC.
6
7GNU CC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 1, or (at your option)
10any later version.
11
12GNU CC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20
21/* Global variables for machine-dependend things. */
22
23/* This should go away if we pass floats to regs via
24 the stack instead of the frame, and if we learn how
25 to renumber all the registers when we don't do a save (hard!). */
26extern int frame_pointer_needed;
27
28static rtx find_addr_reg ();
29
30rtx next_real_insn_no_labels ();
31
32/* Return non-zero only if OP is a register of mode MODE,
33 or const0_rtx. */
34int
35reg_or_0_operand (op, mode)
36 rtx op;
37 enum machine_mode mode;
38{
39 return (op == const0_rtx || register_operand (op, mode));
40}
41
42/* Return non-zero if INSN is a conditional insn with a predicate
43 valid after an addcc or subcc instruction. */
44
45int
46ignore_overflow_conditional_p (insn)
47 rtx insn;
48{
49 rtx x = SET_SRC (PATTERN (insn));
50 RTX_CODE code;
51 if (GET_CODE (x) == IF_THEN_ELSE)
52 x = XEXP (x, 0);
53 code = GET_CODE (x);
54 return code == EQ || code == NE || code == GE || code == LT;
55}
56
57/* Return non-zero if this pattern, can be evaluated safely, even if it
58 was not asked for. */
59int
60safe_insn_src_p (op, mode)
61 rtx op;
62 enum machine_mode mode;
63{
64 /* Just experimenting. */
65
66 /* No floating point src is safe if it contains an arithmetic
67 operation, since that operation may trap. */
68 switch (GET_CODE (op))
69 {
70 case CONST_INT:
71 case LABEL_REF:
72 case SYMBOL_REF:
73 case CONST:
74 return 1;
75
76 case REG:
77 return 1;
78
79 case MEM:
80 return CONSTANT_ADDRESS_P (XEXP (op, 0));
81
82 /* We never need to negate or complement constants. */
83 case NEG:
84 return (mode != SFmode && mode != DFmode);
85 case NOT:
86 return 1;
87
88 case COMPARE:
89 case MINUS:
90 case PLUS:
91 return (mode != SFmode && mode != DFmode);
92 case AND:
93 case IOR:
94 case XOR:
95 case LSHIFT:
96 case ASHIFT:
97 case ASHIFTRT:
98 case LSHIFTRT:
99 if ((GET_CODE (XEXP (op, 0)) == CONST_INT && ! SMALL_INT (XEXP (op, 0)))
100 || (GET_CODE (XEXP (op, 1)) == CONST_INT && ! SMALL_INT (XEXP (op, 1))))
101 return 0;
102 return 1;
103
104 default:
105 return 0;
106 }
107}
108
109/* Return 1 if REG is clobbered in IN.
110 Return 0 if REG is used in IN (other than being clobbered).
111 Return 2 if REG does not appear in IN. */
112
113static int
114reg_clobbered_p (reg, in)
115 rtx reg;
116 rtx in;
117{
118 register char *fmt;
119 register int i, result = 0;
120
121 register enum rtx_code code;
122
123 if (in == 0)
124 return 2;
125
126 code = GET_CODE (in);
127
128 switch (code)
129 {
130 /* Let these fail out quickly. */
131 case CONST_INT:
132 case SYMBOL_REF:
133 case CONST:
134 return 2;
135
136 case SUBREG:
137 if (SUBREG_WORD (in) != 0)
138 in = gen_rtx (REG, SImode, REGNO (SUBREG_REG (in)) + SUBREG_WORD (in));
139 else
140 in = SUBREG_REG (in);
141
142 case REG:
143 if (in == reg
144 || refers_to_regno_p (REGNO (reg),
145 REGNO (reg) + HARD_REGNO_NREGS (reg, GET_MODE (reg)),
146 in, 0))
147 return 0;
148 return 2;
149
150 case SET:
151 if (SET_SRC (in) == reg
152 || refers_to_regno_p (REGNO (reg),
153 REGNO (reg) + HARD_REGNO_NREGS (reg, GET_MODE (reg)),
154 SET_SRC (in), 0))
155 return 0;
156
157 if (SET_DEST (in) == reg)
158 return 1;
159
160 if (refers_to_regno_p (REGNO (reg),
161 REGNO (reg) + HARD_REGNO_NREGS (reg, GET_MODE (reg)),
162 SET_DEST (in), 0))
163 if (GET_CODE (SET_DEST (in)) == REG
164 || GET_CODE (SET_DEST (in)) == SUBREG)
165 return 1;
166 else
167 return 0;
168 return 2;
169
170 case USE:
171 if (XEXP (in, 0) == reg
172 || refers_to_regno_p (REGNO (reg),
173 REGNO (reg) + HARD_REGNO_NREGS (reg, GET_MODE (reg)),
174 XEXP (in, 0), 0))
175 return 0;
176 return 2;
177
178 case CLOBBER:
179 if (XEXP (in, 0) == reg)
180 return 1;
181 /* If the CLOBBER expression is a SUBREG, accept that as a
182 clobber. But if it is some expression based on this register,
183 that is like a USE as far as this register is concerned,
184 so we won't take it. */
185 if (refers_to_regno_p (REGNO (reg),
186 REGNO (reg) + HARD_REGNO_NREGS (reg, GET_MODE (reg)),
187 XEXP (in, 0), 0))
188 if (GET_CODE (XEXP (in, 0)) == REG
189 || GET_CODE (XEXP (in, 0)) == SUBREG)
190 return 1;
191 else
192 return 0;
193 return 2;
194 }
195
196 fmt = GET_RTX_FORMAT (code);
197
198 result = 2;
199
200 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
201 {
202 if (fmt[i] == 'E')
203 {
204 register int j;
205 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
206 switch (reg_clobbered_p (reg, XVECEXP (in, i, j)))
207 {
208 case 0:
209 return 0;
210 case 2:
211 continue;
212 case 1:
213 result = 1;
214 break;
215 }
216 }
217 else if (fmt[i] == 'e')
218 switch (reg_clobbered_p (reg, XEXP (in, i)))
219 {
220 case 0:
221 return 0;
222 case 2:
223 continue;
224 case 1:
225 result = 1;
226 break;
227 }
228 }
229 return result;
230}
231
232/* Return non-zero if OP can be written to without screwing up
233 GCC's model of what's going on. It is assumed that this operand
234 appears in the dest position of a SET insn in a conditional
235 branch's delay slot. AFTER is the label to start looking from. */
236int
237operand_clobbered_before_used_after (op, after)
238 rtx op;
239 rtx after;
240{
241 extern char call_used_regs[];
242
243 /* Just experimenting. */
244 if (GET_CODE (op) == CC0)
245 return 1;
246 if (GET_CODE (op) == REG)
247 {
248 rtx insn;
249
250 if (op == stack_pointer_rtx)
251 return 0;
252
253 for (insn = NEXT_INSN (after); insn; insn = NEXT_INSN (insn))
254 {
255 if (GET_CODE (insn) == NOTE)
256 continue;
257 if (GET_CODE (insn) == INSN
258 || GET_CODE (insn) == JUMP_INSN
259 || GET_CODE (insn) == CALL_INSN)
260 {
261 switch (reg_clobbered_p (op, PATTERN (insn)))
262 {
263 case 0:
264 return 0;
265 case 2:
266 break;
267 case 1:
268 return 1;
269 }
270 if (dead_or_set_p (insn, op))
271 return 1;
272 }
273 else if (GET_CODE (insn) == CODE_LABEL)
274 return 0;
275 if (GET_CODE (insn) == JUMP_INSN)
276 {
277 if (condjump_p (insn))
278 return 0;
279 /* This is a jump insn which has already
280 been mangled. We can't tell what it does. */
281 if (GET_CODE (PATTERN (insn)) == PARALLEL)
282 return 0;
283 if (! JUMP_LABEL (insn))
284 return 0;
285 /* Keep following jumps. */
286 insn = JUMP_LABEL (insn);
287 }
288 }
289 return 1;
290 }
291
292 /* In both of these cases, the first insn executed
293 for this op will be a sethi %hi(whatever),%g1,
294 which is tolerable. */
295 if (GET_CODE (op) == MEM)
296 return (CONSTANT_ADDRESS_P (XEXP (op, 0)));
297
298 return 0;
299}
300
301/* Return non-zero if this pattern, as a source to a "SET",
302 is known to yield an instruction of unit size. */
303int
304single_insn_src_p (op, mode)
305 rtx op;
306 enum machine_mode mode;
307{
308 switch (GET_CODE (op))
309 {
310 case CONST_INT:
311#if 1
312 /* This is not always a single insn src, technically,
313 but output_delayed_branch knows how to deal with it. */
314 return 1;
315#else
316 if (SMALL_INT (op))
317 return 1;
318 /* We can put this set insn into delay slot, because this is one
319 insn; 'sethi'. */
320 if ((INTVAL (op) & 0x3ff) == 0)
321 return 1;
322
323 /* This is not a single insn src, technically,
324 but output_delayed_branch knows how to deal with it. */
325 return 1;
326#endif
327
328#if 1
329 case SYMBOL_REF:
330 /* This is not a single insn src, technically,
331 but output_delayed_branch knows how to deal with it. */
332 return 1;
333#else
334 return 0;
335#endif
336
337 case REG:
338 return 1;
339
340 case MEM:
341#if 0
342 /* This is not a single insn src, technically,
343 but output_delayed_branch knows how to deal with it. */
344 if (GET_CODE (XEXP (op, 0)) == SYMBOL_REF)
345 return 0;
346#endif
347 return 1;
348
349 /* We never need to negate or complement constants. */
350 case NEG:
351 return (mode != DFmode);
352 case NOT:
353 return 1;
354
355 case COMPARE:
356 case MINUS:
357 /* If the target is cc0, then these insns will take
358 two insns (one being a nop). */
359 return (mode != SFmode && mode != DFmode);
360 case PLUS:
361 case AND:
362 case IOR:
363 case XOR:
364 case LSHIFT:
365 case ASHIFT:
366 case ASHIFTRT:
367 case LSHIFTRT:
368 if ((GET_CODE (XEXP (op, 0)) == CONST_INT && ! SMALL_INT (XEXP (op, 0)))
369 || (GET_CODE (XEXP (op, 1)) == CONST_INT && ! SMALL_INT (XEXP (op, 1))))
370 return 0;
371 return 1;
372
373 case SUBREG:
374 if (SUBREG_WORD (op) != 0)
375 return 0;
376 return single_insn_src_p (SUBREG_REG (op), mode);
377
378 case SIGN_EXTEND:
379 case ZERO_EXTEND:
380 /* Lazy... could check for more cases. */
381 if (GET_CODE (XEXP (op, 0)) == MEM
382 && ! CONSTANT_ADDRESS_P (XEXP (XEXP (op, 0), 0)))
383 return 1;
384 return 0;
385
386 /* Not doing floating point, since they probably
387 take longer than the branch slot they might fill. */
388 case FLOAT_EXTEND:
389 case FLOAT_TRUNCATE:
390 case FLOAT:
391 case FIX:
392 case UNSIGNED_FLOAT:
393 case UNSIGNED_FIX:
394 return 0;
395
396 default:
397 return 0;
398 }
399}
400
401/* This extra test must be done to verify that a move insn
402 really is just one assembler insn. */
403
404int
405single_insn_extra_test (dest, src)
406 rtx dest, src;
407{
408 /* Moves between FP regs and CPU regs are two insns. */
409 return (!(GET_CODE (src) == REG
410 && GET_CODE (dest) == REG
411 && (FP_REG_P (src) != FP_REG_P (dest))));
412}
413
414/* Nonzero only if this *really* is a single insn operand. */
415int
416strict_single_insn_op_p (op, mode)
417 rtx op;
418 enum machine_mode mode;
419{
420 if (mode == VOIDmode)
421 mode = GET_MODE (op);
422
423 switch (GET_CODE (op))
424 {
425 case CC0:
426 return 1;
427
428 case CONST_INT:
429 if (SMALL_INT (op))
430 return 1;
431 /* We can put this set insn into delay slot, because this is one
432 insn; 'sethi'. */
433 if ((INTVAL (op) & 0x3ff) == 0)
434 return 1;
435 return 0;
436
437 case SYMBOL_REF:
438 return 0;
439
440 case REG:
441 return (mode != DFmode && mode != DImode);
442
443 case MEM:
444 if (! CONSTANT_ADDRESS_P (XEXP (op, 0)))
445 return (mode != DFmode && mode != DImode);
446 return 0;
447
448 /* We never need to negate or complement constants. */
449 case NEG:
450 return (mode != DFmode);
451 case NOT:
452 return 1;
453
454 case COMPARE:
455 case MINUS:
456 /* If the target is cc0, then these insns will take
457 two insns (one being a nop). */
458 return (mode != SFmode && mode != DFmode);
459 case PLUS:
460 case AND:
461 case IOR:
462 case XOR:
463 case LSHIFT:
464 case ASHIFT:
465 case ASHIFTRT:
466 case LSHIFTRT:
467 if ((GET_CODE (XEXP (op, 0)) == CONST_INT && ! SMALL_INT (XEXP (op, 0)))
468 || (GET_CODE (XEXP (op, 1)) == CONST_INT && ! SMALL_INT (XEXP (op, 1))))
469 return 0;
470 return 1;
471
472 case SUBREG:
473 if (SUBREG_WORD (op) != 0)
474 return 0;
475 return strict_single_insn_op_p (SUBREG_REG (op), mode);
476
477 case SIGN_EXTEND:
478 case ZERO_EXTEND:
479 if (GET_CODE (XEXP (op, 0)) == MEM
480 && ! CONSTANT_ADDRESS_P (XEXP (XEXP (op, 0), 0)))
481 return 1;
482 return 0;
483
484 /* Not doing floating point, since they probably
485 take longer than the branch slot they might fill. */
486 case FLOAT_EXTEND:
487 case FLOAT_TRUNCATE:
488 case FLOAT:
489 case FIX:
490 case UNSIGNED_FLOAT:
491 case UNSIGNED_FIX:
492 return 0;
493
494 default:
495 return 0;
496 }
497}
498\f
499/* Return truth value of whether OP is a relational operator. */
500int
501relop (op, mode)
502 rtx op;
503 enum machine_mode mode;
504{
505 switch (GET_CODE (op))
506 {
507 case EQ:
508 case NE:
509 case GT:
510 case GE:
511 case LT:
512 case LE:
513 case GTU:
514 case GEU:
515 case LTU:
516 case LEU:
517 return 1;
518 }
519 return 0;
520}
521
522/* Return truth value of wheterh OP is EQ or NE. */
523int
524eq_or_neq (op, mode)
525 rtx op;
526 enum machine_mode mode;
527{
528 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
529}
530\f
531/* Return truth value of whether OP can be used as an operands in a three
532 address arithmetic insn (such as add %o1,7,%l2) of mode MODE. */
533
534int
535arith_operand (op, mode)
536 rtx op;
537 enum machine_mode mode;
538{
539 return (register_operand (op, mode)
540 || (GET_CODE (op) == CONST_INT && SMALL_INT (op)));
541}
542
543/* Return truth value of whether OP can be used as an operand in a two
544 address arithmetic insn (such as set 123456,%o4) of mode MODE. */
545
546int
547arith32_operand (op, mode)
548 rtx op;
549 enum machine_mode mode;
550{
551 return (register_operand (op, mode) || GET_CODE (op) == CONST_INT);
552}
553
554/* Return truth value of whether OP is a integer which fits the
555 range constraining immediate operands in three-address insns. */
556
557int
558small_int (op, mode)
559 rtx op;
560 enum machine_mode mode;
561{
562 return (GET_CODE (op) == CONST_INT && SMALL_INT (op));
563}
564
565/* Return the best assembler insn template
566 for moving operands[1] into operands[0] as a fullword. */
567
568static char *
569singlemove_string (operands)
570 rtx *operands;
571{
572 if (GET_CODE (operands[0]) == MEM)
573 {
574 if (GET_CODE (operands[1]) != MEM)
575 if (CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
576 {
577 if (! ((cc_prev_status.flags & CC_KNOW_HI_G1)
578 && cc_prev_status.mdep == XEXP (operands[0], 0)))
579 output_asm_insn ("sethi %%hi(%m0),%%g1", operands);
580 cc_status.flags |= CC_KNOW_HI_G1;
581 cc_status.mdep = XEXP (operands[0], 0);
582 return "st %1,[%%lo(%m0)+%%g1]";
583 }
584 else
585 return "st %r1,%0";
586 else
587 {
588 rtx xoperands[2];
589
590 cc_status.flags &= ~CC_F0_IS_0;
591 xoperands[0] = gen_rtx (REG, SFmode, 32);
592 xoperands[1] = operands[1];
593 output_asm_insn (singlemove_string (xoperands), xoperands);
594 xoperands[1] = xoperands[0];
595 xoperands[0] = operands[0];
596 output_asm_insn (singlemove_string (xoperands), xoperands);
597 return "";
598 }
599 }
600 if (GET_CODE (operands[1]) == MEM)
601 {
602 if (CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
603 {
604 if (! ((cc_prev_status.flags & CC_KNOW_HI_G1)
605 && cc_prev_status.mdep == XEXP (operands[1], 0)))
606 output_asm_insn ("sethi %%hi(%m1),%%g1", operands);
607 cc_status.flags |= CC_KNOW_HI_G1;
608 cc_status.mdep = XEXP (operands[1], 0);
609 return "ld [%%lo(%m1)+%%g1],%0";
610 }
611 return "ld %1,%0";
612 }
613 return "mov %1,%0";
614}
615\f
616/* Output assembler code to perform a doubleword move insn
617 with operands OPERANDS. */
618
619char *
620output_move_double (operands)
621 rtx *operands;
622{
623 enum { REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP } optype0, optype1;
624 rtx latehalf[2];
625 rtx addreg0 = 0, addreg1 = 0;
626
627 /* First classify both operands. */
628
629 if (REG_P (operands[0]))
630 optype0 = REGOP;
631 else if (offsettable_memref_p (operands[0]))
632 optype0 = OFFSOP;
633 else if (GET_CODE (operands[0]) == MEM)
634 optype0 = MEMOP;
635 else
636 optype0 = RNDOP;
637
638 if (REG_P (operands[1]))
639 optype1 = REGOP;
640 else if (CONSTANT_P (operands[1])
641 || GET_CODE (operands[1]) == CONST_DOUBLE)
642 optype1 = CNSTOP;
643 else if (offsettable_memref_p (operands[1]))
644 optype1 = OFFSOP;
645 else if (GET_CODE (operands[1]) == MEM)
646 optype1 = MEMOP;
647 else
648 optype1 = RNDOP;
649
650 /* Check for the cases that the operand constraints are not
651 supposed to allow to happen. Abort if we get one,
652 because generating code for these cases is painful. */
653
654 if (optype0 == RNDOP || optype1 == RNDOP)
655 abort ();
656
657 /* If an operand is an unoffsettable memory ref, find a register
658 we can increment temporarily to make it refer to the second word. */
659
660 if (optype0 == MEMOP)
661 addreg0 = find_addr_reg (XEXP (operands[0], 0));
662
663 if (optype1 == MEMOP)
664 addreg1 = find_addr_reg (XEXP (operands[1], 0));
665
666 /* Ok, we can do one word at a time.
667 Normally we do the low-numbered word first,
668 but if either operand is autodecrementing then we
669 do the high-numbered word first.
670
671 In either case, set up in LATEHALF the operands to use
672 for the high-numbered word and in some cases alter the
673 operands in OPERANDS to be suitable for the low-numbered word. */
674
675 if (optype0 == REGOP)
676 latehalf[0] = gen_rtx (REG, SImode, REGNO (operands[0]) + 1);
677 else if (optype0 == OFFSOP)
678 latehalf[0] = adj_offsettable_operand (operands[0], 4);
679 else
680 latehalf[0] = operands[0];
681
682 if (optype1 == REGOP)
683 latehalf[1] = gen_rtx (REG, SImode, REGNO (operands[1]) + 1);
684 else if (optype1 == OFFSOP)
685 latehalf[1] = adj_offsettable_operand (operands[1], 4);
686 else if (optype1 == CNSTOP)
687 {
688 if (CONSTANT_P (operands[1]))
689 latehalf[1] = const0_rtx;
690 else if (GET_CODE (operands[1]) == CONST_DOUBLE)
691 {
692 latehalf[1] = gen_rtx (CONST_INT, VOIDmode,
693 CONST_DOUBLE_HIGH (operands[1]));
694 operands[1] = gen_rtx (CONST_INT, VOIDmode,
695 CONST_DOUBLE_LOW (operands[1]));
696 }
697 }
698 else
699 latehalf[1] = operands[1];
700
701 /* If the first move would clobber the source of the second one,
702 do them in the other order.
703
704 RMS says "This happens only for registers;
705 such overlap can't happen in memory unless the user explicitly
706 sets it up, and that is an undefined circumstance."
707
708 but it happens on the sparc when loading parameter registers,
709 so I am going to define that circumstance, and make it work
710 as expected. */
711
712 /* Easy case: try moving both words at once. */
713 /* First check for moving between an even/odd register pair
714 and a memory location. */
715 if ((optype0 == REGOP && optype1 != REGOP && optype1 != CNSTOP
716 && (REGNO (operands[0]) & 1) == 0)
717 || (optype0 != REGOP && optype1 != CNSTOP && optype1 == REGOP
718 && (REGNO (operands[1]) & 1) == 0))
719 {
720 rtx op1, op2;
721 rtx base = 0, offset = const0_rtx;
722
723 /* OP1 gets the register pair, and OP2 gets the memory address. */
724 if (optype0 == REGOP)
725 op1 = operands[0], op2 = XEXP (operands[1], 0);
726 else
727 op1 = operands[1], op2 = XEXP (operands[0], 0);
728
729 /* Now see if we can trust the address to be 8-byte aligned. */
730 /* Trust global variables. */
731 if (CONSTANT_ADDRESS_P (op2))
732 {
733 operands[0] = op1;
734 operands[1] = op2;
735 if (! ((cc_prev_status.flags & CC_KNOW_HI_G1)
736 && cc_prev_status.mdep == op2))
737 output_asm_insn ("sethi %%hi(%1),%%g1", operands);
738 cc_status.flags |= CC_KNOW_HI_G1;
739 cc_status.mdep = op2;
740 if (op1 == operands[0])
741 return "ldd [%%lo(%1)+%%g1],%0";
742 else
743 return "std [%%lo(%1)+%%g1],%0";
744 }
745
746 if (GET_CODE (op2) == PLUS)
747 {
748 if (GET_CODE (XEXP (op2, 0)) == REG)
749 base = XEXP (op2, 0), offset = XEXP (op2, 1);
750 else if (GET_CODE (XEXP (op2, 1)) == REG)
751 base = XEXP (op2, 1), offset = XEXP (op2, 0);
752 }
753
754 /* Trust round enough offsets from the stack or frame pointer. */
755 if (base
756 && (REGNO (base) == FRAME_POINTER_REGNUM
757 || REGNO (base) == STACK_POINTER_REGNUM))
758 {
759 if (GET_CODE (offset) == CONST_INT
760 && (INTVAL (offset) & 0x7) == 0)
761 {
762 if (op1 == operands[0])
763 return "ldd %1,%0";
764 else
765 return "std %1,%0";
766 }
767 }
768 else
769 {
770 /* We know structs not on the stack are properly aligned.
771 Since a double asks for 8-byte alignment,
772 we know it must have got that if it is in a struct.
773 But a DImode need not be 8-byte aligned, because it could be a
774 struct containing two ints or pointers. */
775
776 /* Sun fucks us here. We cannot trust references
777 to doubles via varying addresses. It might be on the stack
778 even if we don't know that it is; and then it might not be
779 double-word aligned. */
780#if 0
781 if (GET_CODE (operands[1]) == MEM && GET_MODE (operands[1]) == DFmode
782 && MEM_IN_STRUCT_P (operands[1]))
783 return "ldd %1,%0";
784 else if (GET_CODE (operands[0]) == MEM
785 && GET_MODE (operands[0]) == DFmode
786 && MEM_IN_STRUCT_P (operands[0]))
787 return "std %1,%0";
788#endif
789 }
790 }
791
792 if (optype0 == REGOP && optype1 == REGOP
793 && REGNO (operands[0]) == REGNO (latehalf[1]))
794 {
795 /* Make any unoffsettable addresses point at high-numbered word. */
796 if (addreg0)
797 output_asm_insn ("add %0,0x4,%0", &addreg0);
798 if (addreg1)
799 output_asm_insn ("add %0,0x4,%0", &addreg1);
800
801 /* Do that word. */
802 output_asm_insn (singlemove_string (latehalf), latehalf);
803
804 /* Undo the adds we just did. */
805 if (addreg0)
806 output_asm_insn ("add %0,-0x4,%0", &addreg0);
807 if (addreg1)
808 output_asm_insn ("add %0,-0x4,%0", &addreg0);
809
810 /* Do low-numbered word. */
811 return singlemove_string (operands);
812 }
813 else if (optype0 == REGOP && optype1 != REGOP
814 && reg_overlap_mentioned_p (operands[0], operands[1]))
815 {
816 /* Do the late half first. */
817 output_asm_insn (singlemove_string (latehalf), latehalf);
818 /* Then clobber. */
819 return singlemove_string (operands);
820 }
821
822 /* Normal case: do the two words, low-numbered first. */
823
824 output_asm_insn (singlemove_string (operands), operands);
825
826 /* Make any unoffsettable addresses point at high-numbered word. */
827 if (addreg0)
828 output_asm_insn ("add %0,0x4,%0", &addreg0);
829 if (addreg1)
830 output_asm_insn ("add %0,0x4,%0", &addreg1);
831
832 /* Do that word. */
833 output_asm_insn (singlemove_string (latehalf), latehalf);
834
835 /* Undo the adds we just did. */
836 if (addreg0)
837 output_asm_insn ("add %0,-0x4,%0", &addreg0);
838 if (addreg1)
839 output_asm_insn ("add %0,-0x4,%0", &addreg1);
840
841 return "";
842}
843\f
844static char *
845output_fp_move_double (operands)
846 rtx *operands;
847{
848 if (FP_REG_P (operands[0]))
849 {
850 if (FP_REG_P (operands[1]))
851 {
852 output_asm_insn ("fmovs %1,%0", operands);
853 operands[0] = gen_rtx (REG, VOIDmode, REGNO (operands[0]) + 1);
854 operands[1] = gen_rtx (REG, VOIDmode, REGNO (operands[1]) + 1);
855 return "fmovs %1,%0";
856 }
857 if (GET_CODE (operands[1]) == REG)
858 {
859 if ((REGNO (operands[1]) & 1) == 0)
860 return "std %1,[%%fp-8]\n\tldd [%%fp-8],%0";
861 else
862 {
863 rtx xoperands[3];
864 xoperands[0] = operands[0];
865 xoperands[1] = operands[1];
866 xoperands[2] = gen_rtx (REG, SImode, REGNO (operands[1]) + 1);
867 output_asm_insn ("st %2,[%%fp-4]\n\tst %1,[%%fp-8]\n\tldd [%%fp-8],%0", xoperands);
868 return "";
869 }
870 }
871 /* Use ldd if known to be aligned. */
872 if (GET_CODE (XEXP (operands[1], 0)) == PLUS
873 && (((XEXP (XEXP (operands[1], 0), 0) == frame_pointer_rtx
874 || XEXP (XEXP (operands[1], 0), 0) == stack_pointer_rtx)
875 && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == CONST_INT
876 && (INTVAL (XEXP (XEXP (operands[1], 0), 1)) & 0x7) == 0)
877#if 0 /* An array in a structure that is a parm need not be aligned! */
878 /* Arrays are known to be aligned,
879 and reg+reg addresses are used (on this machine)
880 only for array accesses. */
881 || (REG_P (XEXP (XEXP (operands[1], 0), 0))
882 && REG_P (XEXP (XEXP (operands[1], 0), 1)))
883#endif
884 ))
885 return "ldd %1,%0";
886 if (CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
887 {
888 if (! ((cc_prev_status.flags & CC_KNOW_HI_G1)
889 && cc_prev_status.mdep == XEXP (operands[1], 0)))
890 output_asm_insn ("sethi %%hi(%m1),%%g1", operands);
891 cc_status.flags |= CC_KNOW_HI_G1;
892 cc_status.mdep = XEXP (operands[1], 0);
893 return "ldd [%%lo(%m1)+%%g1],%0";
894 }
895 /* Otherwise use two ld insns. */
896 {
897 rtx xoperands[2];
898 output_asm_insn ("ld %1,%0", operands);
899 xoperands[0] = gen_rtx (REG, GET_MODE (operands[0]),
900 REGNO (operands[0]) + 1);
901 if (GET_CODE (XEXP (operands[1], 0)) == PLUS
902 && offsettable_address_p (1, GET_MODE (operands[1]),
903 XEXP (operands[1], 0)))
904 {
905 xoperands[1] = adj_offsettable_operand (operands[1], 4);
906 output_asm_insn ("ld %1,%0", xoperands);
907 }
908 else if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
909 {
910 rtx inc_reg = XEXP (XEXP (operands[1], 0), 0);
911 if (inc_reg == frame_pointer_rtx
912 && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == REG
913 && XEXP (XEXP (operands[1], 0), 0) != frame_pointer_rtx)
914 inc_reg = XEXP (XEXP (operands[1], 0), 1);
915 if (inc_reg == frame_pointer_rtx)
916 {
917 output_asm_insn ("mov %%fp,%%g1", xoperands);
918 inc_reg = gen_rtx (REG, SImode, 1);
919 }
920 xoperands[1] = inc_reg;
921 output_asm_insn ("add 4,%1,%1", xoperands);
922 xoperands[1] = operands[1];
923 output_asm_insn ("ld %1,%0", xoperands);
924 xoperands[1] = inc_reg;
925 output_asm_insn ("add -4,%1,%1", xoperands);
926 }
927 else
928 {
929 xoperands[1] = gen_rtx (MEM, GET_MODE (operands[1]),
930 plus_constant (XEXP (operands[1], 0), 4));
931 output_asm_insn ("ld %1,%0", xoperands);
932 }
933 return "";
934 }
935 }
936 else if (FP_REG_P (operands[1]))
937 {
938 if (GET_CODE (operands[0]) == REG)
939 {
940 if ((REGNO (operands[0]) & 1) == 0)
941 return "std %1,[%%fp-8]\n\tldd [%%fp-8],%0";
942 else
943 {
944 rtx xoperands[3];
945 xoperands[2] = operands[1];
946 xoperands[1] = gen_rtx (REG, SImode, REGNO (operands[0]) + 1);
947 xoperands[0] = operands[0];
948 output_asm_insn ("std %2,[%%fp-8]\n\tld [%%fp-4],%1\n\tld [%%fp-8],%0", xoperands);
949 return "";
950 }
951 }
952 /* Use std if we can be sure it is well-aligned. */
953 if (GET_CODE (XEXP (operands[0], 0)) == PLUS
954 && (((XEXP (XEXP (operands[0], 0), 0) == frame_pointer_rtx
955 || XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx)
956 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT
957 && (INTVAL (XEXP (XEXP (operands[0], 0), 1)) & 0x7) == 0)
958#if 0 /* An array in a structure that is a parm need not be aligned! */
959 /* Arrays are known to be aligned,
960 and reg+reg addresses are used (on this machine)
961 only for array accesses. */
962 || (REG_P (XEXP (XEXP (operands[0], 0), 0))
963 && REG_P (XEXP (XEXP (operands[0], 0), 1)))
964#endif
965 ))
966 return "std %1,%0";
967 if (CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
968 {
969 if (! ((cc_prev_status.flags & CC_KNOW_HI_G1)
970 && cc_prev_status.mdep == XEXP (operands[0], 0)))
971 output_asm_insn ("sethi %%hi(%m0),%%g1", operands);
972 cc_status.flags |= CC_KNOW_HI_G1;
973 cc_status.mdep = XEXP (operands[0], 0);
974 return "std %1,[%%lo(%m0)+%%g1]";
975 }
976 /* Otherwise use two st insns. */
977 {
978 rtx xoperands[2];
979 output_asm_insn ("st %r1,%0", operands);
980 xoperands[1] = gen_rtx (REG, GET_MODE (operands[1]),
981 REGNO (operands[1]) + 1);
982 if (GET_CODE (XEXP (operands[0], 0)) == PLUS
983 && offsettable_address_p (1, GET_MODE (operands[0]),
984 XEXP (operands[0], 0)))
985 {
986 xoperands[0] = adj_offsettable_operand (operands[0], 4);
987 output_asm_insn ("st %r1,%0", xoperands);
988 }
989 else if (GET_CODE (XEXP (operands[0], 0)) == PLUS)
990 {
991 rtx inc_reg = XEXP (XEXP (operands[0], 0), 0);
992 if (inc_reg == frame_pointer_rtx
993 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == REG
994 && XEXP (XEXP (operands[0], 0), 0) != frame_pointer_rtx)
995 inc_reg = XEXP (XEXP (operands[0], 0), 1);
996 if (inc_reg == frame_pointer_rtx)
997 {
998 output_asm_insn ("mov %%fp,%%g1", xoperands);
999 inc_reg = gen_rtx (REG, SImode, 1);
1000 }
1001 xoperands[0] = inc_reg;
1002 output_asm_insn ("add 4,%0,%0", xoperands);
1003 xoperands[0] = operands[0];
1004 output_asm_insn ("st %r1,%0", xoperands);
1005 xoperands[0] = inc_reg;
1006 output_asm_insn ("add -4,%0,%0", xoperands);
1007 }
1008 else
1009 {
1010 xoperands[0] = gen_rtx (MEM, GET_MODE (operands[0]),
1011 plus_constant (XEXP (operands[0], 0), 4));
1012 output_asm_insn ("st %r1,%0", xoperands);
1013 }
1014 return "";
1015 }
1016 }
1017 else abort ();
1018}
1019\f
1020/* Return a REG that occurs in ADDR with coefficient 1.
1021 ADDR can be effectively incremented by incrementing REG. */
1022
1023static rtx
1024find_addr_reg (addr)
1025 rtx addr;
1026{
1027 while (GET_CODE (addr) == PLUS)
1028 {
1029 if (GET_CODE (XEXP (addr, 0)) == REG
1030 && !(GET_CODE (XEXP (addr, 1)) == REG
1031 && XEXP (addr, 0) == frame_pointer_rtx))
1032 addr = XEXP (addr, 0);
1033 else if (GET_CODE (XEXP (addr, 1)) == REG)
1034 addr = XEXP (addr, 1);
1035 else if (CONSTANT_P (XEXP (addr, 0)))
1036 addr = XEXP (addr, 1);
1037 else if (CONSTANT_P (XEXP (addr, 1)))
1038 addr = XEXP (addr, 0);
1039 else
1040 abort ();
1041 }
1042 if (GET_CODE (addr) == REG)
1043 return addr;
1044 abort ();
1045}
1046
1047void
1048output_sized_memop (opname, mode)
1049 char *opname;
1050 enum machine_mode mode;
1051{
1052 extern struct _iobuf *asm_out_file;
1053
1054 static char *ld_size_suffix[] = { "ub", "uh", "", "?", "d" };
1055 static char *st_size_suffix[] = { "b", "h", "", "?", "d" };
1056 char *modename
1057 = (opname[0] == 'l' ? ld_size_suffix : st_size_suffix)[GET_MODE_SIZE (mode) >> 1];
1058
1059 fprintf (asm_out_file, "\t%s%s", opname, modename);
1060}
1061\f
1062/* Output a store-in-memory whose operands are OPERANDS[0,1].
1063 OPERANDS[0] is a MEM, and OPERANDS[1] is a reg or zero. */
1064
1065char *
1066output_store (operands)
1067 rtx *operands;
1068{
1069 enum machine_mode mode = GET_MODE (operands[0]);
1070 rtx address = XEXP (operands[0], 0);
1071
1072 cc_status.flags |= CC_KNOW_HI_G1;
1073 cc_status.mdep = address;
1074
1075 if (! ((cc_prev_status.flags & CC_KNOW_HI_G1)
1076 && address == cc_prev_status.mdep))
1077 {
1078 output_asm_insn ("sethi %%hi(%m0),%%g1", operands);
1079 cc_prev_status.mdep = address;
1080 }
1081
1082 /* Store zero in two parts when appropriate. */
1083 if (mode == DFmode && operands[1] == dconst0_rtx)
1084 {
1085 /* We can't cross a page boundary here because the
1086 SYMBOL_REF must be double word aligned, and for this
1087 to be the case, SYMBOL_REF+4 cannot cross. */
1088 output_sized_memop ("st", SImode);
1089 output_asm_insn ("%r1,[%%g1+%%lo(%m0)]", operands);
1090 output_sized_memop ("st", SImode);
1091 return "%r1,[%%g1+%%lo(%m0)+4]";
1092 }
1093
1094 /* Code below isn't smart enough to move a doubleword in two parts,
1095 so use output_move_double to do that in the cases that require it. */
1096 if ((mode == DImode || mode == DFmode)
1097 && (GET_CODE (operands[1]) == REG
1098 && (REGNO (operands[1]) & 1)))
1099 return output_move_double (operands);
1100
1101 output_sized_memop ("st", mode);
1102 return "%r1,[%%g1+%%lo(%m0)]";
1103}
1104
1105/* Output a fixed-point load-from-memory whose operands are OPERANDS[0,1].
1106 OPERANDS[0] is a reg, and OPERANDS[1] is a mem. */
1107
1108char *
1109output_load_fixed (operands)
1110 rtx *operands;
1111{
1112 enum machine_mode mode = GET_MODE (operands[0]);
1113 rtx address = XEXP (operands[1], 0);
1114
1115 /* We don't bother trying to see if we know %hi(address).
1116 This is because we are doing a load, and if we know the
1117 %hi value, we probably also know that value in memory. */
1118 cc_status.flags |= CC_KNOW_HI_G1;
1119 cc_status.mdep = address;
1120
1121 if (! ((cc_prev_status.flags & CC_KNOW_HI_G1)
1122 && address == cc_prev_status.mdep
1123 && cc_prev_status.mdep == cc_status.mdep))
1124 {
1125 output_asm_insn ("sethi %%hi(%m1),%%g1", operands);
1126 cc_prev_status.mdep = address;
1127 }
1128
1129 /* Code below isn't smart enough to do a doubleword in two parts.
1130 So handle that case the slow way. */
1131 if (mode == DImode
1132 && GET_CODE (operands[0]) == REG /* Moving to nonaligned reg pair */
1133 && (REGNO (operands[0]) & 1))
1134 return output_move_double (operands);
1135
1136 output_sized_memop ("ld", mode);
1137 if (GET_CODE (operands[0]) == REG)
1138 return "[%%g1+%%lo(%m1)],%0";
1139 abort ();
1140}
1141
1142/* Output a floating-point load-from-memory whose operands are OPERANDS[0,1].
1143 OPERANDS[0] is a reg, and OPERANDS[1] is a mem.
1144 We also handle the case where OPERANDS[0] is a mem. */
1145
1146char *
1147output_load_floating (operands)
1148 rtx *operands;
1149{
1150 enum machine_mode mode = GET_MODE (operands[0]);
1151 rtx address = XEXP (operands[1], 0);
1152
1153 /* We don't bother trying to see if we know %hi(address).
1154 This is because we are doing a load, and if we know the
1155 %hi value, we probably also know that value in memory. */
1156 cc_status.flags |= CC_KNOW_HI_G1;
1157 cc_status.mdep = address;
1158
1159 if (! ((cc_prev_status.flags & CC_KNOW_HI_G1)
1160 && address == cc_prev_status.mdep
1161 && cc_prev_status.mdep == cc_status.mdep))
1162 {
1163 output_asm_insn ("sethi %%hi(%m1),%%g1", operands);
1164 cc_prev_status.mdep = address;
1165 }
1166
1167 if (mode == DFmode)
1168 {
1169 if (REG_P (operands[0]))
1170 {
1171 if (REGNO (operands[0]) & 1)
1172 return output_move_double (operands);
1173 else
1174 return "ldd [%%g1+%%lo(%m1)],%0";
1175 }
1176 cc_status.flags &= ~(CC_F0_IS_0|CC_F1_IS_0);
1177 output_asm_insn ("ldd [%%g1+%%lo(%m1)],%%f0", operands);
1178 operands[1] = gen_rtx (REG, DFmode, 32);
1179 return output_fp_move_double (operands);
1180 }
1181
1182 if (GET_CODE (operands[0]) == MEM)
1183 {
1184 cc_status.flags &= ~CC_F1_IS_0;
1185 output_asm_insn ("ld [%%g1+%%lo(%1)],%%f1", operands);
1186 if (CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
1187 {
1188 cc_status.mdep = XEXP (operands[0], 0);
1189 return "sethi %%hi(%m0),%%g1\n\tst %%f1,[%%g1+%%lo(%m0)]";
1190 }
1191 else
1192 return "st %%f1,%0";
1193 }
1194 return "ld [%%g1+%%lo(%m1)],%0";
1195}
1196\f
1197/* Load the address specified by OPERANDS[3] into the register
1198 specified by OPERANDS[0].
1199
1200 OPERANDS[3] may be the result of a sum, hence it could either be:
1201
1202 (1) CONST
1203 (2) REG
1204 (2) REG + CONST_INT
1205 (3) REG + REG + CONST_INT
1206 (4) REG + REG (special case of 3).
1207
1208 Note that (3) is not a legitimate address.
1209 All cases are handled here. */
1210
1211void
1212output_load_address (operands)
1213 rtx *operands;
1214{
1215 rtx base, offset;
1216
1217 if (CONSTANT_P (operands[3]))
1218 {
1219 output_asm_insn ("set %3,%0", operands);
1220 return;
1221 }
1222
1223 if (REG_P (operands[3]))
1224 {
1225 if (REGNO (operands[0]) != REGNO (operands[3]))
1226 output_asm_insn ("mov %3,%0", operands);
1227 return;
1228 }
1229
1230 if (GET_CODE (operands[3]) != PLUS)
1231 abort ();
1232
1233 base = XEXP (operands[3], 0);
1234 offset = XEXP (operands[3], 1);
1235
1236 if (GET_CODE (base) == CONST_INT)
1237 {
1238 rtx tmp = base;
1239 base = offset;
1240 offset = tmp;
1241 }
1242
1243 if (GET_CODE (offset) != CONST_INT)
1244 {
1245 /* Operand is (PLUS (REG) (REG)). */
1246 base = operands[3];
1247 offset = const0_rtx;
1248 }
1249
1250 if (REG_P (base))
1251 {
1252 operands[6] = base;
1253 operands[7] = offset;
1254 if (SMALL_INT (offset))
1255 output_asm_insn ("add %6,%7,%0", operands);
1256 else
1257 output_asm_insn ("set %7,%0\n\tadd %0,%6,%0", operands);
1258 }
1259 else if (GET_CODE (base) == PLUS)
1260 {
1261 operands[6] = XEXP (base, 0);
1262 operands[7] = XEXP (base, 1);
1263 operands[8] = offset;
1264
1265 if (SMALL_INT (offset))
1266 output_asm_insn ("add %6,%7,%0\n\tadd %0,%8,%0", operands);
1267 else
1268 output_asm_insn ("set %8,%0\n\tadd %0,%6,%0\n\tadd %0,%7,%0", operands);
1269 }
1270 else
1271 abort ();
1272}
1273
1274/* Output code to place a size count SIZE in register REG.
1275 ALIGN is the size of the unit of transfer.
1276
1277 Because block moves are pipelined, we don't include the
1278 first element in the transfer of SIZE to REG. */
1279
1280static void
1281output_size_for_block_move (size, reg, align)
1282 rtx size, reg;
1283 rtx align;
1284{
1285 rtx xoperands[3];
1286
1287 xoperands[0] = reg;
1288 xoperands[1] = size;
1289 xoperands[2] = align;
1290 if (GET_CODE (size) == REG)
1291 output_asm_insn ("sub %1,%2,%0", xoperands);
1292 else
1293 {
1294 xoperands[1]
1295 = gen_rtx (CONST_INT, VOIDmode, INTVAL (size) - INTVAL (align));
1296 cc_status.flags &= ~ CC_KNOW_HI_G1;
1297 output_asm_insn ("set %1,%0", xoperands);
1298 }
1299}
1300
1301/* Emit code to perform a block move.
1302
1303 OPERANDS[0] is the destination.
1304 OPERANDS[1] is the source.
1305 OPERANDS[2] is the size.
1306 OPERANDS[3] is the alignment safe to use.
1307 OPERANDS[4] is a register we can safely clobber as a temp. */
1308
1309char *
1310output_block_move (operands)
1311 rtx *operands;
1312{
1313 /* A vector for our computed operands. Note that load_output_address
1314 makes use of (and can clobber) up to the 8th element of this vector. */
1315 rtx xoperands[10];
1316 rtx zoperands[10];
1317 static int movstrsi_label = 0;
1318 int i, j;
1319 rtx temp1 = operands[4];
1320 rtx alignrtx = operands[3];
1321 int align = INTVAL (alignrtx);
1322
1323 xoperands[0] = operands[0];
1324 xoperands[1] = operands[1];
1325 xoperands[2] = temp1;
1326
1327 /* We can't move more than four bytes at a time
1328 because we have only one register to move them through. */
1329 if (align > 4)
1330 {
1331 align = 4;
1332 alignrtx = gen_rtx (CONST_INT, VOIDmode, 4);
1333 }
1334
1335 /* Since we clobber untold things, nix the condition codes. */
1336 CC_STATUS_INIT;
1337
1338 /* Recognize special cases of block moves. These occur
1339 when GNU C++ is forced to treat something as BLKmode
1340 to keep it in memory, when its mode could be represented
1341 with something smaller.
1342
1343 We cannot do this for global variables, since we don't know
1344 what pages they don't cross. Sigh. */
1345 if (GET_CODE (operands[2]) == CONST_INT
1346 && INTVAL (operands[2]) <= 16
1347 && ! CONSTANT_ADDRESS_P (operands[0])
1348 && ! CONSTANT_ADDRESS_P (operands[1]))
1349 {
1350 int size = INTVAL (operands[2]);
1351
1352 cc_status.flags &= ~CC_KNOW_HI_G1;
1353 if (align == 1)
1354 {
1355 if (memory_address_p (QImode, plus_constant (xoperands[0], size))
1356 && memory_address_p (QImode, plus_constant (xoperands[1], size)))
1357 {
1358 /* We will store different integers into this particular RTX. */
1359 xoperands[2] = gen_rtx (CONST_INT, VOIDmode, 13);
1360 for (i = size-1; i >= 0; i--)
1361 {
1362 INTVAL (xoperands[2]) = i;
1363 output_asm_insn ("ldub [%a1+%2],%%g1\n\tstb %%g1,[%a0+%2]",
1364 xoperands);
1365 }
1366 return "";
1367 }
1368 }
1369 else if (align == 2)
1370 {
1371 if (memory_address_p (HImode, plus_constant (xoperands[0], size))
1372 && memory_address_p (HImode, plus_constant (xoperands[1], size)))
1373 {
1374 /* We will store different integers into this particular RTX. */
1375 xoperands[2] = gen_rtx (CONST_INT, VOIDmode, 13);
1376 for (i = (size>>1)-1; i >= 0; i--)
1377 {
1378 INTVAL (xoperands[2]) = i<<1;
1379 output_asm_insn ("lduh [%a1+%2],%%g1\n\tsth %%g1,[%a0+%2]",
1380 xoperands);
1381 }
1382 return "";
1383 }
1384 }
1385 else
1386 {
1387 if (memory_address_p (SImode, plus_constant (xoperands[0], size))
1388 && memory_address_p (SImode, plus_constant (xoperands[1], size)))
1389 {
1390 /* We will store different integers into this particular RTX. */
1391 xoperands[2] = gen_rtx (CONST_INT, VOIDmode, 13);
1392 for (i = (size>>2)-1; i >= 0; i--)
1393 {
1394 INTVAL (xoperands[2]) = i<<2;
1395 output_asm_insn ("ld [%a1+%2],%%g1\n\tst %%g1,[%a0+%2]",
1396 xoperands);
1397 }
1398 return "";
1399 }
1400 }
1401 }
1402
1403 /* This is the size of the transfer.
1404 Either use the register which already contains the size,
1405 or use a free register (used by no operands).
1406 Also emit code to decrement the size value by ALIGN. */
1407 output_size_for_block_move (operands[2], temp1, alignrtx);
1408
1409 zoperands[0] = operands[0];
1410 zoperands[3] = plus_constant (operands[0], align);
1411 output_load_address (zoperands);
1412
1413 xoperands[3] = gen_rtx (CONST_INT, VOIDmode, movstrsi_label++);
1414 xoperands[4] = gen_rtx (CONST_INT, VOIDmode, align);
1415
1416 if (align == 1)
1417 output_asm_insn ("\nLm%3:\n\tldub [%1+%2],%%g1\n\tsubcc %2,%4,%2\n\tbge Lm%3\n\tstb %%g1,[%0+%2]", xoperands);
1418 else if (align == 2)
1419 output_asm_insn ("\nLm%3:\n\tlduh [%1+%2],%%g1\n\tsubcc %2,%4,%2\n\tbge Lm%3\n\tsth %%g1,[%0+%2]", xoperands);
1420 else
1421 output_asm_insn ("\nLm%3:\n\tld [%1+%2],%%g1\n\tsubcc %2,%4,%2\n\tbge Lm%3\n\tst %%g1,[%0+%2]", xoperands);
1422 return "";
1423}
1424\f
1425/* What the sparc lacks in hardware, make up for in software.
1426 Compute a fairly good sequence of shift and add insns
1427 to make a multiply happen. */
1428
1429#define ABS(x) ((x) < 0 ? -(x) : x)
1430
1431char *
1432output_mul_by_constant (insn, operands, unsignedp)
1433 rtx insn;
1434 rtx *operands;
1435 int unsignedp;
1436{
1437 int c; /* Size of constant */
1438 int shifts[BITS_PER_WORD]; /* Table of shifts */
1439 unsigned int p, log; /* A power of two, and its log */
1440 int d1, d2; /* Differences of c and p */
1441 int first = 1; /* True if dst has unknown data in it */
1442 int i;
1443
1444 CC_STATUS_INIT;
1445
1446 c = INTVAL (operands[2]);
1447 if (c == 0)
1448 {
1449 /* Does happen, at least when not optimizing. */
1450 if (GET_CODE (operands[0]) == MEM)
1451 return "st %%g0,%0";
1452 return "mov %%g0,%0";
1453 }
1454
1455 output_asm_insn ("! start open coded multiply");
1456
1457 /* Clear out the table of shifts. */
1458 for (i = 0; i < BITS_PER_WORD; ++i)
1459 shifts[i] = 0;
1460
1461 while (c)
1462 {
1463 /* Find the power of two nearest ABS(c) */
1464 p = 1, log = 0;
1465 do
1466 {
1467 d1 = ABS(c) - p;
1468 p *= 2;
1469 ++log;
1470 }
1471 while (p < ABS(c));
1472 d2 = p - ABS(c);
1473
1474 /* Make an appropriate entry in shifts for p. */
1475 if (d2 < d1)
1476 {
1477 shifts[log] = c < 0 ? -1 : 1;
1478 c = c < 0 ? d2 : -d2;
1479 }
1480 else
1481 {
1482 shifts[log - 1] = c < 0 ? -1 : 1;
1483 c = c < 0 ? -d1 : d1;
1484 }
1485 }
1486
1487 /* Take care of the first insn in sequence.
1488 We know we have at least one. */
1489
1490 /* A value of -1 in shifts says to subtract that power of two, and a value
1491 of 1 says to add that power of two. */
1492 for (i = 0; ; i++)
1493 if (shifts[i])
1494 {
1495 if (i)
1496 {
1497 operands[2] = gen_rtx (CONST_INT, VOIDmode, i);
1498 output_asm_insn ("sll %1,%2,%%g1", operands);
1499 }
1500 else output_asm_insn ("mov %1,%%g1", operands);
1501
1502 log = i;
1503 if (shifts[i] < 0)
1504 output_asm_insn ("sub %%g0,%%g1,%0", operands);
1505 else
1506 output_asm_insn ("mov %%g1,%0", operands);
1507 break;
1508 }
1509
1510 /* A value of -1 in shifts says to subtract that power of two, and a value
1511 of 1 says to add that power of two--continued. */
1512 for (i += 1; i < BITS_PER_WORD; ++i)
1513 if (shifts[i])
1514 {
1515 if (i - log > 0)
1516 {
1517 operands[2] = gen_rtx (CONST_INT, VOIDmode, i - log);
1518 output_asm_insn ("sll %%g1,%2,%%g1", operands);
1519 }
1520 else
1521 {
1522 operands[2] = gen_rtx (CONST_INT, VOIDmode, log - i);
1523 output_asm_insn ("sra %%g1,%2,%%g1", operands);
1524 }
1525 log = i;
1526 if (shifts[i] < 0)
1527 output_asm_insn ("sub %0,%%g1,%0", operands);
1528 else
1529 output_asm_insn ("add %0,%%g1,%0", operands);
1530 }
1531
1532 output_asm_insn ("! end open coded multiply");
1533
1534 return "";
1535}
1536
1537char *
1538output_mul_insn (operands, unsignedp)
1539 rtx *operands;
1540 int unsignedp;
1541{
1542 int lucky1 = ((unsigned)REGNO (operands[1]) - 8) <= 1;
1543 int lucky2 = ((unsigned)REGNO (operands[2]) - 8) <= 1;
1544
1545 CC_STATUS_INIT;
1546
1547 if (lucky1)
1548 {
1549 if (lucky2)
1550 {
1551 if (REGNO (operands[1]) == REGNO (operands[2]))
1552 {
1553 if (REGNO (operands[1]) == 8)
1554 output_asm_insn ("mov %%o0,%%o1");
1555 else
1556 output_asm_insn ("mov %%o1,%%o0");
1557 }
1558 output_asm_insn ("call .mul,2\n\tnop", operands);
1559 }
1560 else
1561 {
1562 rtx xoperands[2];
1563 xoperands[0] = gen_rtx (REG, SImode,
1564 8 ^ (REGNO (operands[1]) == 8));
1565 xoperands[1] = operands[2];
1566 output_asm_insn ("call .mul,2\n\tmov %1,%0", xoperands);
1567 }
1568 }
1569 else if (lucky2)
1570 {
1571 rtx xoperands[2];
1572 xoperands[0] = gen_rtx (REG, SImode,
1573 8 ^ (REGNO (operands[2]) == 8));
1574 xoperands[1] = operands[1];
1575 output_asm_insn ("call .mul,2\n\tmov %1,%0", xoperands);
1576 }
1577 else
1578 {
1579 output_asm_insn ("mov %1,%%o0\n\tcall .mul,2\n\tmov %2,%%o1",
1580 operands);
1581 }
1582
1583 if (REGNO (operands[0]) == 8)
1584 return "";
1585 return "mov %%o0,%0";
1586}
1587
1588/* Make floating point register f0 contain 0.
1589 SIZE is the number of registers (including f0)
1590 which should contain 0. */
1591
1592void
1593make_f0_contain_0 (size)
1594 int size;
1595{
1596 if (size == 1)
1597 {
1598 if ((cc_status.flags & (CC_F0_IS_0)) == 0)
1599 output_asm_insn ("ld [%%fp-16],%%f0", 0);
1600 cc_status.flags |= CC_F0_IS_0;
1601 }
1602 else if (size == 2)
1603 {
1604 if ((cc_status.flags & CC_F0_IS_0) == 0)
1605 output_asm_insn ("ld [%%fp-16],%%f0", 0);
1606 if ((cc_status.flags & (CC_F1_IS_0)) == 0)
1607 output_asm_insn ("ld [%%fp-12],%%f1", 0);
1608 cc_status.flags |= CC_F0_IS_0 | CC_F1_IS_0;
1609 }
1610}
1611
1612/* Since condition codes don't have logical links, we need to keep
1613 their setting and use together for set-cc insns. */
1614void
1615gen_scc_insn (code, mode, operands)
1616 enum rtx_code code;
1617 enum machine_mode mode;
1618 rtx *operands;
1619{
1620 extern rtx sequence_stack;
1621 rtx last_insn = XEXP (XEXP (sequence_stack, 1), 0);
1622 rtx last_pat;
1623
1624 /* Skip back over the CLOBBERs that may precede this insn. */
1625 while (last_insn && GET_CODE (last_insn) == INSN
1626 && GET_CODE (PATTERN (last_insn)) == CLOBBER)
1627 last_insn = PREV_INSN (last_insn);
1628 /* We should have found the preceding compare. */
1629 if (last_insn == 0 || GET_CODE (last_insn) != INSN)
1630 abort ();
1631 last_pat = PATTERN (last_insn);
1632 if (GET_CODE (last_pat) != SET
1633 || GET_CODE (SET_DEST (last_pat)) != CC0)
1634 abort ();
1635
1636 /* Turn off that previous insn, now that we have got the data out of it. */
1637 PUT_CODE (last_insn, NOTE);
1638 NOTE_LINE_NUMBER (last_insn) = NOTE_INSN_DELETED;
1639
1640 /* Emit one replacement insn to compare operands and store result. */
1641 emit_insn (gen_rtx (SET, VOIDmode, operands[0],
1642 gen_rtx (code, mode, SET_SRC (last_pat), const0_rtx)));
1643}
1644
1645/* Output reasonable peephole for set-on-condition-code insns.
1646 Note that these insns assume a particular way of defining
1647 labels. Therefore, *both* tm-sparc.h and this function must
1648 be changed if a new syntax is needed. */
1649
1650char *
1651output_scc_insn (code, operand)
1652 enum rtx_code code;
1653 rtx operand;
1654{
1655 rtx xoperands[2];
1656 rtx label = gen_label_rtx ();
1657 int cc_in_fccr = cc_status.flags & CC_IN_FCCR;
1658 int antisymmetric = 0;
1659
1660 xoperands[0] = operand;
1661 xoperands[1] = label;
1662
1663 switch (code)
1664 {
1665 case NE:
1666 if (cc_in_fccr)
1667 output_asm_insn ("fbne,a %l0", &label);
1668 else
1669 output_asm_insn ("bne,a %l0", &label);
1670 break;
1671 case EQ:
1672 if (cc_in_fccr)
1673 output_asm_insn ("fbe,a %l0", &label);
1674 else
1675 output_asm_insn ("be,a %l0", &label);
1676 break;
1677 case GE:
1678 if (cc_in_fccr)
1679 output_asm_insn ("fbge,a %l0", &label);
1680 else
1681 output_asm_insn ("bge,a %l0", &label);
1682 antisymmetric = 1;
1683 break;
1684 case GT:
1685 if (cc_in_fccr)
1686 output_asm_insn ("fbg,a %l0", &label);
1687 else
1688 output_asm_insn ("bg,a %l0", &label);
1689 antisymmetric = 1;
1690 break;
1691 case LE:
1692 if (cc_in_fccr)
1693 output_asm_insn ("fble,a %l0", &label);
1694 else
1695 output_asm_insn ("ble,a %l0", &label);
1696 antisymmetric = 1;
1697 break;
1698 case LT:
1699 if (cc_in_fccr)
1700 output_asm_insn ("fbl,a %l0", &label);
1701 else
1702 output_asm_insn ("bl,a %l0", &label);
1703 antisymmetric = 1;
1704 break;
1705 case GEU:
1706 if (cc_in_fccr)
1707 abort ();
1708 else
1709 output_asm_insn ("bgeu,a %l0", &label);
1710 antisymmetric = 1;
1711 break;
1712 case GTU:
1713 if (cc_in_fccr)
1714 abort ();
1715 else
1716 output_asm_insn ("bgu,a %l0", &label);
1717 antisymmetric = 1;
1718 break;
1719 case LEU:
1720 if (cc_in_fccr)
1721 abort ();
1722 else
1723 output_asm_insn ("bleu,a %l0", &label);
1724 antisymmetric = 1;
1725 break;
1726 case LTU:
1727 if (cc_in_fccr)
1728 abort ();
1729 else
1730 output_asm_insn ("blu,a %l0", &label);
1731 antisymmetric = 1;
1732 break;
1733 default:
1734 abort ();
1735 }
1736
1737 if (antisymmetric
1738 && (cc_status.flags & CC_REVERSED))
1739 output_asm_insn ("orcc %%g0,0,%0\n\torcc %%g0,1,%0\n%l1:", xoperands);
1740 else
1741 output_asm_insn ("orcc %%g0,1,%0\n\torcc %%g0,0,%0\n%l1:", xoperands);
1742 cc_status.flags &= ~CC_IN_FCCR;
1743
1744 return "";
1745}
1746
1747/* Output a delayed branch insn with the delay insn in its
1748 branch slot. The delayed branch insn template is in TEMPLATE,
1749 with operands OPERANDS. The insn in its delay slot is INSN.
1750
1751 As a special case, since we know that all memory transfers are via
1752 ld/st insns, if we see a (MEM (SYMBOL_REF ...)) we divide the memory
1753 reference around the branch as
1754
1755 sethi %hi(x),%%g1
1756 b ...
1757 ld/st [%g1+%lo(x)],...
1758
1759 As another special case, we handle loading (SYMBOL_REF ...) and
1760 other large constants around branches as well:
1761
1762 sethi %hi(x),%0
1763 b ...
1764 or %0,%lo(x),%1
1765
1766 */
1767
1768char *
1769output_delayed_branch (template, operands, insn)
1770 char *template;
1771 rtx *operands;
1772 rtx insn;
1773{
1774 extern rtx recog_operand[];
1775 rtx src = XVECEXP (PATTERN (insn), 0, 1);
1776 rtx dest = XVECEXP (PATTERN (insn), 0, 0);
1777
1778 if (GET_CODE (src) == SYMBOL_REF
1779 || (GET_CODE (src) == CONST_INT
1780 && !(SMALL_INT (src) || (INTVAL (src) & 0x3ff) == 0)))
1781 {
1782 rtx xoperands[2];
1783 xoperands[0] = dest;
1784 xoperands[1] = src;
1785
1786 /* Output the `sethi' insn. */
1787 output_asm_insn ("sethi %%hi(%1),%0", xoperands);
1788
1789 /* Output the branch instruction next. */
1790 output_asm_insn (template, operands);
1791
1792 /* Now output the `or' insn. */
1793 output_asm_insn ("or %0,%%lo(%1),%0", xoperands);
1794 }
1795 else if ((GET_CODE (src) == MEM
1796 && CONSTANT_ADDRESS_P (XEXP (src, 0)))
1797 || (GET_CODE (dest) == MEM
1798 && CONSTANT_ADDRESS_P (XEXP (dest, 0))))
1799 {
1800 rtx xoperands[2];
1801 char *split_template;
1802 xoperands[0] = dest;
1803 xoperands[1] = src;
1804
1805 /* Output the `sethi' insn. */
1806 if (GET_CODE (src) == MEM)
1807 {
1808 if (! ((cc_prev_status.flags & CC_KNOW_HI_G1)
1809 && cc_prev_status.mdep == XEXP (operands[1], 0)))
1810 output_asm_insn ("sethi %%hi(%m1),%%g1", xoperands);
1811 split_template = "ld [%%g1+%%lo(%m1)],%0";
1812 }
1813 else
1814 {
1815 if (! ((cc_prev_status.flags & CC_KNOW_HI_G1)
1816 && cc_prev_status.mdep == XEXP (operands[0], 0)))
1817 output_asm_insn ("sethi %%hi(%m0),%%g1", xoperands);
1818 split_template = "st %r1,[%%g1+%%lo(%m0)]";
1819 }
1820
1821 /* Output the branch instruction next. */
1822 output_asm_insn (template, operands);
1823
1824 /* Now output the load or store.
1825 No need to do a CC_STATUS_INIT, because we are branching anyway. */
1826 output_asm_insn (split_template, xoperands);
1827 }
1828 else
1829 {
1830 extern char *insn_template[];
1831 extern char *(*insn_outfun[])();
1832 int insn_code_number;
1833 rtx pat = gen_rtx (SET, VOIDmode, dest, src);
1834 rtx delay_insn = gen_rtx (INSN, VOIDmode, 0, 0, 0, pat, -1, 0, 0);
1835 int i;
1836 extern rtx alter_subreg();
1837 extern int insn_n_operands[];
1838
1839 /* Output the branch instruction first. */
1840 output_asm_insn (template, operands);
1841
1842 /* Now recognize the insn which we put in its delay slot.
1843 We must do this after outputing the branch insn,
1844 since operands may just be a pointer to `recog_operand'. */
1845 insn_code_number = recog (pat, delay_insn);
1846 if (insn_code_number == -1)
1847 abort ();
1848
1849 for (i = 0; i < insn_n_operands[insn_code_number]; i++)
1850 {
1851 if (GET_CODE (recog_operand[i]) == SUBREG)
1852 recog_operand[i] = alter_subreg (recog_operand[i]);
1853 }
1854
1855 /* Now get the template for what this insn would
1856 have been, without the branch. Its operands are
1857 exactly the same as they would be, so we don't
1858 need to do an insn_extract. */
1859 template = insn_template[insn_code_number];
1860 if (template == 0)
1861 template = (*insn_outfun[insn_code_number]) (recog_operand, delay_insn);
1862 output_asm_insn (template, recog_operand);
1863 }
1864 CC_STATUS_INIT;
1865 return "";
1866}
1867
1868/* Output a newly constructed insn DELAY_INSN. */
1869char *
1870output_delay_insn (delay_insn)
1871 rtx delay_insn;
1872{
1873 char *template;
1874 extern rtx recog_operand[];
1875 extern char call_used_regs[];
1876 extern char *insn_template[];
1877 extern int insn_n_operands[];
1878 extern char *(*insn_outfun[])();
1879 extern rtx alter_subreg();
1880 int insn_code_number;
1881 extern int insn_n_operands[];
1882 int i;
1883
1884 /* Now recognize the insn which we put in its delay slot.
1885 We must do this after outputing the branch insn,
1886 since operands may just be a pointer to `recog_operand'. */
1887 insn_code_number = recog_memoized (delay_insn);
1888 if (insn_code_number == -1)
1889 abort ();
1890
1891 /* Extract the operands of this delay insn. */
1892 INSN_CODE (delay_insn) = insn_code_number;
1893 insn_extract (delay_insn);
1894
1895 /* It is possible that this insn has not been properly scaned by final
1896 yet. If this insn's operands don't appear in the peephole's
1897 actual operands, then they won't be fixed up by final, so we
1898 make sure they get fixed up here. -- This is a kludge. */
1899 for (i = 0; i < insn_n_operands[insn_code_number]; i++)
1900 {
1901 if (GET_CODE (recog_operand[i]) == SUBREG)
1902 recog_operand[i] = alter_subreg (recog_operand[i]);
1903 }
1904
1905#ifdef REGISTER_CONSTRAINTS
1906 if (! constrain_operands (insn_code_number))
1907 abort ();
1908#endif
1909
1910 cc_prev_status = cc_status;
1911
1912 /* Update `cc_status' for this instruction.
1913 The instruction's output routine may change it further.
1914 If the output routine for a jump insn needs to depend
1915 on the cc status, it should look at cc_prev_status. */
1916
1917 NOTICE_UPDATE_CC (PATTERN (delay_insn), delay_insn);
1918
1919 /* Now get the template for what this insn would
1920 have been, without the branch. */
1921
1922 template = insn_template[insn_code_number];
1923 if (template == 0)
1924 template = (*insn_outfun[insn_code_number]) (recog_operand, delay_insn);
1925 output_asm_insn (template, recog_operand);
1926 return "";
1927}
1928
1929/* Output the insn HEAD, keeping OPERANDS protected (wherever they are).
1930 HEAD comes from the target of some branch, so before we output it,
1931 we delete it from the target, lest we execute it twice. The caller
1932 of this function promises that such code motion is permissable. */
1933char *
1934output_eager_then_insn (head, operands)
1935 rtx head;
1936 rtx *operands;
1937{
1938 extern rtx alter_subreg ();
1939 extern int insn_n_operands[];
1940 extern rtx recog_operand[];
1941 rtx xoperands[MAX_RECOG_OPERANDS];
1942 int insn_code_number, i, nbytes;
1943 rtx nhead;
1944
1945 /* Micro-hack: run peephole on head if it looks like a good idea.
1946 Right now there's only one such case worth doing...
1947
1948 This could be made smarter if the peephole for ``2-insn combine''
1949 were also made smarter. */
1950 if (GET_CODE (PATTERN (head)) == SET
1951 && REG_P (SET_SRC (PATTERN (head)))
1952 && REG_P (SET_DEST (PATTERN (head)))
1953 && (nhead = next_real_insn_no_labels (head))
1954 && GET_CODE (nhead) == INSN
1955 && GET_CODE (PATTERN (nhead)) == SET
1956 && GET_CODE (SET_DEST (PATTERN (nhead))) == CC0
1957 && (SET_SRC (PATTERN (nhead)) == SET_SRC (PATTERN (head))
1958 || SET_SRC (PATTERN (nhead)) == SET_DEST (PATTERN (head))))
1959 /* Something's wrong if this does not fly. */
1960 if (! peephole (head))
1961 abort ();
1962
1963 /* Save our contents of `operands', since output_delay_insn sets them. */
1964 insn_code_number = recog_memoized (head);
1965 nbytes = insn_n_operands[insn_code_number] * sizeof (rtx);
1966 bcopy (operands, xoperands, nbytes);
1967
1968 /* Output the delay insn, and prevent duplication later. */
1969 delete_insn (head);
1970 output_delay_insn (head);
1971
1972 /* Restore this insn's operands. */
1973 bcopy (xoperands, operands, nbytes);
1974}
1975
1976/* Return the next INSN, CALL_INSN or JUMP_INSN after LABEL;
1977 or 0, if there is none. Also return 0 if we cross a label. */
1978
1979rtx
1980next_real_insn_no_labels (label)
1981 rtx label;
1982{
1983 register rtx insn = NEXT_INSN (label);
1984 register RTX_CODE code;
1985
1986 while (insn)
1987 {
1988 code = GET_CODE (insn);
1989 if (code == INSN)
1990 {
1991 if (GET_CODE (PATTERN (insn)) != CLOBBER
1992 && GET_CODE (PATTERN (insn)) != USE)
1993 return insn;
1994 }
1995 if (code == CALL_INSN || code == JUMP_INSN)
1996 return insn;
1997 if (code == CODE_LABEL)
1998 return 0;
1999 insn = NEXT_INSN (insn);
2000 }
2001
2002 return 0;
2003}
2004
2005int
2006operands_satisfy_eager_branch_peephole (operands, conditional)
2007 rtx *operands;
2008 int conditional;
2009{
2010 rtx label;
2011
2012 if (conditional)
2013 {
2014 if (GET_CODE (operands[0]) != IF_THEN_ELSE)
2015 return 0;
2016
2017 if (GET_CODE (XEXP (operands[0], 1)) == LABEL_REF)
2018 label = XEXP (XEXP (operands[0], 1), 0);
2019 else if (GET_CODE (XEXP (operands[0], 2)) == LABEL_REF)
2020 label = XEXP (XEXP (operands[0], 2), 0);
2021 else return 0;
2022 }
2023 else
2024 {
2025 label = operands[0];
2026 }
2027
2028 if (LABEL_NUSES (label) == 1)
2029 {
2030 rtx prev = PREV_INSN (label);
2031 while (prev && GET_CODE (prev) == NOTE)
2032 prev = PREV_INSN (prev);
2033 if (prev == 0
2034 || GET_CODE (prev) == BARRIER)
2035 {
2036 rtx head = next_real_insn_no_labels (label);
2037
2038 if (head
2039 && ! INSN_DELETED_P (head)
2040 && GET_CODE (head) == INSN
2041 && GET_CODE (PATTERN (head)) == SET
2042 && strict_single_insn_op_p (SET_SRC (PATTERN (head)),
2043 GET_MODE (SET_DEST (PATTERN (head))))
2044 && strict_single_insn_op_p (SET_DEST (PATTERN (head)),
2045 GET_MODE (SET_DEST (PATTERN (head))))
2046 /* Moves between FP regs and CPU regs are two insns. */
2047 && !(GET_CODE (SET_SRC (PATTERN (head))) == REG
2048 && GET_CODE (SET_DEST (PATTERN (head))) == REG
2049 && (FP_REG_P (SET_SRC (PATTERN (head)))
2050 != FP_REG_P (SET_DEST (PATTERN (head))))))
2051 {
2052 if (conditional == 2)
2053 return (GET_CODE (operands[1]) != PC
2054 && safe_insn_src_p (operands[2], VOIDmode)
2055 && strict_single_insn_op_p (operands[2], VOIDmode)
2056 && operand_clobbered_before_used_after (operands[1], label));
2057 return 1;
2058 }
2059 }
2060 }
2061
2062 if (conditional == 1
2063 && GET_CODE (operands[1]) != PC
2064 && safe_insn_src_p (operands[2], VOIDmode)
2065 && strict_single_insn_op_p (operands[2], VOIDmode)
2066 && operand_clobbered_before_used_after (operands[1], label))
2067 return 1;
2068
2069 return 0;
2070}
2071