Start development on 386BSD 0.0
[unix-history] / .ref-BSD-4_3_Net_2 / usr / src / usr.bin / gcc / cc1 / config / out-pyr.c
CommitLineData
a5894e73
C
1/* Subroutines for insn-output.c for Pyramid 90 Series.
2 Copyright (C) 1989 Free Software Foundation, Inc.
3
4This file is part of GNU CC.
5
6GNU CC is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
8the Free Software Foundation; either version 1, or (at your option)
9any later version.
10
11GNU CC is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
17along with GNU CC; see the file COPYING. If not, write to
18the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
19
20/* Some output-actions in pyr.md need these. */
21#include <stdio.h>
22
23extern FILE *asm_out_file;
24#include "tree.h"
25
26/*
27 * Do FUNCTION_ARG.
28 * This cannot be defined as a macro on pyramids, because Pyramid Technology's
29 * C compiler dies on (several equivalent definitions of) this macro.
30 * The only way around this cc bug was to make this a function.
31 * While it would be possible to use a macro version for gcc, it seems
32 * more reliable to have a single version of the code.
33 */
34void *
35pyr_function_arg(cum, mode, type, named)
36 CUMULATIVE_ARGS cum;
37 enum machine_mode mode;
38 tree type;
39{
40 return (void *)(FUNCTION_ARG_HELPER (cum, mode,type,named));
41}
42\f
43/* Do the hard part of PARAM_SAFE_FOR_REG_P.
44 * This cannot be defined as a macro on pyramids, because Pyramid Technology's
45 * C compiler dies on (several equivalent definitions of) this macro.
46 * The only way around this cc bug was to make this a function.
47 */
48int
49inner_param_safe_helper (type)
50 tree type;
51{
52 return (INNER_PARAM_SAFE_HELPER(type));
53}
54\f
55
56/* Return 1 if OP is a non-indexed operand of mode MODE.
57 This is either a register reference, a memory reference,
58 or a constant. In the case of a memory reference, the address
59 is checked to make sure it isn't indexed.
60
61 Register and memory references must have mode MODE in order to be valid,
62 but some constants have no machine mode and are valid for any mode.
63
64 If MODE is VOIDmode, OP is checked for validity for whatever mode
65 it has.
66
67 The main use of this function is as a predicate in match_operand
68 expressions in the machine description.
69
70 It is useful to compare this with general_operand(). They should
71 be identical except for one line.
72
73 This function seems necessary because of the non-orthogonality of
74 Pyramid insns.
75 For any 2-operand insn, and any combination of operand modes,
76 if indexing is valid for the isn's second operand, it is invalid
77 for the first operand to be indexed. */
78
79extern int volatile_ok;
80
81int
82nonindexed_operand(op, mode)
83 register rtx op;
84 enum machine_mode mode;
85{
86 register RTX_CODE code = GET_CODE (op);
87 int mode_altering_drug = 0;
88
89 if (mode == VOIDmode)
90 mode = GET_MODE (op);
91
92 if (CONSTANT_P (op))
93 return ((GET_MODE (op) == VOIDmode || GET_MODE (op) == mode)
94 && LEGITIMATE_CONSTANT_P (op));
95
96 /* Except for certain constants with VOIDmode, already checked for,
97 OP's mode must match MODE if MODE specifies a mode. */
98
99 if (GET_MODE (op) != mode)
100 return 0;
101
102 while (code == SUBREG)
103 {
104 op = SUBREG_REG (op);
105 code = GET_CODE (op);
106#if 0
107 /* No longer needed, since (SUBREG (MEM...))
108 will load the MEM into a reload reg in the MEM's own mode. */
109 mode_altering_drug = 1;
110#endif
111 }
112 if (code == REG)
113 return 1;
114 if (code == CONST_DOUBLE)
115 return LEGITIMATE_CONSTANT_P (op);
116 if (code == MEM)
117 {
118 register rtx y = XEXP (op, 0);
119 if (! volatile_ok && MEM_VOLATILE_P (op))
120 return 0;
121 GO_IF_NONINDEXED_ADDRESS (y, win);
122 }
123 return 0;
124
125 win:
126 if (mode_altering_drug)
127 return ! mode_dependent_address_p (XEXP (op, 0));
128 return 1;
129}
130
131/* Return non-zero if the rtx OP has an immediate component. An
132 immediate component or additive term equal to zero is rejected
133 due to assembler problems. */
134
135int
136has_direct_base (op)
137 rtx op;
138{
139 if ((CONSTANT_ADDRESS_P (op)
140 && op != const0_rtx)
141 || (GET_CODE (op) == PLUS
142 && ((CONSTANT_ADDRESS_P (XEXP (op, 1))
143 && XEXP (op, 1) != const0_rtx)
144 || (CONSTANT_ADDRESS_P (XEXP (op, 0))
145 && XEXP (op, 0) != const0_rtx))))
146 return 1;
147
148 return 0;
149}
150
151int
152has_index (op)
153 rtx op;
154{
155 if (GET_CODE (op) == PLUS
156 && (GET_CODE (XEXP (op, 0)) == MULT
157 || (GET_CODE (XEXP (op, 1)) == MULT)))
158 return 1;
159 else
160 return 0;
161}
162
163int swap_operands;
164
165/* weird_memory_memory -- return 1 if OP1 and OP2 can be compared (or
166 exchanged with xchw) with one instruction. If the operands need to
167 be swapped, set the global variable SWAP_OPERANDS. This function
168 silently assumes that both OP0 and OP1 are valid memory references.
169 */
170
171int
172weird_memory_memory (op0, op1)
173 rtx op0, op1;
174{
175 int ret;
176 int c;
177 RTX_CODE code0, code1;
178
179 op0 = XEXP (op0, 0);
180 op1 = XEXP (op1, 0);
181 code0 = GET_CODE (op0);
182 code1 = GET_CODE (op1);
183
184 swap_operands = 0;
185
186 if (code1 == REG || code1 == SUBREG)
187 {
188 return 1;
189 }
190 if (code0 == REG || code0 == SUBREG)
191 {
192 swap_operands = 1;
193 return 1;
194 }
195 if (has_direct_base (op0) && has_direct_base (op1))
196 {
197 if (has_index (op1))
198 {
199 if (has_index (op0))
200 return 0;
201 swap_operands = 1;
202 }
203
204 return 1;
205 }
206 return 0;
207}
208
209int
210signed_comparison (x, mode)
211 rtx x;
212 enum machine_mode mode;
213{
214 return ! TRULY_UNSIGNED_COMPARE_P (GET_CODE (x));
215}
216
217extern rtx force_reg ();
218rtx test_op0, test_op1;
219enum machine_mode test_mode;
220
221/* Sign-extend or zero-extend constant X from FROM_MODE to TO_MODE. */
222
223rtx
224extend_const (x, extop, from_mode, to_mode)
225 rtx x;
226 RTX_CODE extop;
227 enum machine_mode from_mode, to_mode;
228{
229 int val;
230 int negative;
231 if (from_mode == to_mode)
232 return x;
233 if (GET_CODE (x) != CONST_INT)
234 abort ();
235 val = INTVAL (x);
236 negative = val & (1 << (GET_MODE_BITSIZE (from_mode) - 1));
237 if (GET_MODE_BITSIZE (from_mode) == HOST_BITS_PER_INT)
238 abort ();
239 if (negative && extop == SIGN_EXTEND)
240 val = val | ((-1) << (GET_MODE_BITSIZE (from_mode)));
241 else
242 val = val & ~((-1) << (GET_MODE_BITSIZE (from_mode)));
243 if (GET_MODE_BITSIZE (to_mode) == HOST_BITS_PER_INT)
244 return gen_rtx (CONST_INT, VOIDmode, val);
245 return gen_rtx (CONST_INT, VOIDmode,
246 val & ~((-1) << (GET_MODE_BITSIZE (to_mode))));
247}
248
249rtx
250ensure_extended (op, extop, from_mode)
251 rtx op;
252 RTX_CODE extop;
253 enum machine_mode from_mode;
254{
255 if (GET_CODE (op) == CONST_INT)
256 return extend_const (op, extop, from_mode, SImode);
257 else
258 return force_reg (SImode, gen_rtx (extop, SImode, op));
259}
260
261/* Emit rtl for a branch, as well as any delayed (integer) compare insns.
262 The compare insn to perform is determined by the global variables
263 test_op0 and test_op1. */
264
265void
266extend_and_branch (extop)
267 RTX_CODE extop;
268{
269 rtx op0, op1;
270 RTX_CODE code0, code1;
271
272 op0 = test_op0, op1 = test_op1;
273 if (op0 == 0)
274 return;
275
276 code0 = GET_CODE (op0);
277 if (op1 != 0)
278 code1 = GET_CODE (op1);
279 test_op0 = test_op1 = 0;
280
281 if (op1 == 0)
282 {
283 op0 = ensure_extended (op0, extop, test_mode);
284 emit_insn (gen_rtx (SET, VOIDmode, cc0_rtx, op0));
285 }
286 else
287 {
288 if (CONSTANT_P (op0) && CONSTANT_P (op1))
289 {
290 op0 = ensure_extended (op0, extop, test_mode);
291 op1 = ensure_extended (op1, extop, test_mode);
292 }
293 else if (extop == ZERO_EXTEND && test_mode == HImode)
294 {
295 /* Pyramids have no unsigned "cmphi" instructions. We need to
296 zero extend unsigned halfwords into temporary registers. */
297 op0 = ensure_extended (op0, extop, test_mode);
298 op1 = ensure_extended (op1, extop, test_mode);
299 }
300 else if (CONSTANT_P (op0))
301 {
302 op0 = ensure_extended (op0, extop, test_mode);
303 op1 = ensure_extended (op1, extop, test_mode);
304 }
305 else if (CONSTANT_P (op1))
306 {
307 op1 = ensure_extended (op1, extop, test_mode);
308 op0 = ensure_extended (op0, extop, test_mode);
309 }
310 else if ((code0 == REG || code0 == SUBREG)
311 && (code1 == REG || code1 == SUBREG))
312 {
313 /* I could do this case without extension, by using the virtual
314 register address (but that would lose for global regs). */
315 op0 = ensure_extended (op0, extop, test_mode);
316 op1 = ensure_extended (op1, extop, test_mode);
317 }
318 else if (code0 == MEM && code1 == MEM)
319 {
320 /* Load into a reg if the address combination can't be handled
321 directly. */
322 if (! weird_memory_memory (op0, op1))
323 op0 = force_reg (test_mode, op0);
324 }
325
326 emit_insn (gen_rtx (SET, VOIDmode, cc0_rtx,
327 gen_rtx (COMPARE, VOIDmode, op0, op1)));
328 }
329}
330
331/* Return non-zero if the two single-word moves with operands[0]
332 and operands[1] for the first single-word move, and operands[2]
333 and operands[3] for the second single-word move, is possible to
334 combine to a double word move.
335
336 The criterion is whether the operands are in consecutive memory cells,
337 registers, etc. */
338
339int
340movdi_possible (operands)
341 rtx operands[];
342{
343 int cnst_diff0, cnst_diff1;
344 RTX_CODE code0 = GET_CODE (operands[0]);
345 RTX_CODE code1 = GET_CODE (operands[1]);
346
347 /* Don't dare to combine (possibly overlapping) memory -> memory moves. */
348 /* It would be possible to detect the cases where we dare, by using
349 constant_diff (operands[0], operands[1])!!! */
350 if (code0 == MEM && code1 == MEM)
351 return 0;
352
353 cnst_diff0 = consecutive_operands (operands[0], operands[2]);
354 if (cnst_diff0 == 0)
355 return 0;
356
357 cnst_diff1 = consecutive_operands (operands[1], operands[3]);
358 if (cnst_diff1 == 0)
359 return 0;
360
361 if (cnst_diff0 & cnst_diff1)
362 {
363 /* The source and destination operands are consecutive. */
364
365 /* If the first move writes into the source of the second move,
366 we cannot combine. */
367 if ((code0 == REG
368 && reg_overlap_mentioned_p (operands[0], operands[3]))
369 || (code0 == SUBREG
370 && subreg_overlap_mentioned_p (operands[0], operands[3])))
371 return 0;
372
373 if (cnst_diff0 & 1)
374 /* operands[0],[1] has higher addresses than operands[2],[3]. */
375 swap_operands = 0;
376 else
377 /* operands[0],[1] has lower addresses than operands[2],[3]. */
378 swap_operands = 1;
379 return 1;
380 }
381 return 0;
382}
383
384/* Like reg_overlap_mentioned_p, but accepts a subreg rtx instead
385 of a reg. */
386
387int
388subreg_overlap_mentioned_p (subreg, x)
389 rtx subreg, x;
390{
391 rtx reg = SUBREG_REG (subreg);
392 int regno = REGNO (reg) + SUBREG_WORD (subreg);
393 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (subreg));
394 return refers_to_regno_p (regno, endregno, x, 0);
395}
396
397/* Return 1 if OP0 is a consecutive operand to OP1, 2 if OP1 is a
398 consecutive operand to OP0.
399
400 This function is used to determine if addresses are consecutive,
401 and therefore possible to combine to fewer instructions. */
402
403int
404consecutive_operands (op0, op1)
405 rtx op0, op1;
406{
407 RTX_CODE code0, code1;
408 int cnst_diff;
409 int regno_off0, regno_off1;
410
411 code0 = GET_CODE (op0);
412 code1 = GET_CODE (op1);
413
414 regno_off0 = 0;
415 if (code0 == SUBREG)
416 {
417 if (GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0))) <= UNITS_PER_WORD)
418 return 0;
419 regno_off0 = SUBREG_WORD (op0);
420 op0 = SUBREG_REG (op0);
421 code0 = REG;
422 }
423
424 regno_off1 = 0;
425 if (code1 == SUBREG)
426 {
427 if (GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1))) <= UNITS_PER_WORD)
428 return 0;
429 regno_off1 = SUBREG_WORD (op1);
430 op1 = SUBREG_REG (op1);
431 code1 = REG;
432 }
433
434 if (code0 != code1)
435 return 0;
436
437 switch (code0)
438 {
439 case CONST_INT:
440 /* Cannot permit any symbolic constants, even if the consecutive
441 operand is 0, since a movl really performs sign extension. */
442 if (code1 != CONST_INT)
443 return 0;
444 if ((INTVAL (op0) == 0 && INTVAL (op1) == 0)
445 || (INTVAL (op0) == -1 && INTVAL (op1) == -1))
446 return 3;
447 if ((INTVAL (op0) == 0 && INTVAL (op1) > 0)
448 || (INTVAL (op0) == -1 && INTVAL (op1) < 0))
449 return 2;
450 if ((INTVAL (op1) == 0 && INTVAL (op0) > 0)
451 || (INTVAL (op1) == -1 && INTVAL (op0) < 0))
452 return 1;
453 break;
454
455 case REG:
456 regno_off0 = REGNO (op0) + regno_off0;
457 regno_off1 = REGNO (op1) + regno_off1;
458
459 cnst_diff = regno_off0 - regno_off1;
460 if (cnst_diff == 1)
461 {
462 /* movl with the highest numbered parameter (local) register as
463 source or destination, doesn't wrap to the lowest numbered local
464 (temporary) register. */
465
466 if (regno_off0 % 16 != 0)
467 return 1;
468 else
469 return 0;
470 }
471 else if (cnst_diff == -1)
472 {
473 if (regno_off1 % 16 != 0)
474 return 2;
475 else
476 return 0;
477 }
478 break;
479
480 case MEM:
481 op0 = XEXP (op0, 0);
482 op1 = XEXP (op1, 0);
483 if (GET_CODE (op0) == CONST)
484 op0 = XEXP (op0, 0);
485 if (GET_CODE (op1) == CONST)
486 op1 = XEXP (op1, 0);
487
488 cnst_diff = constant_diff (op0, op1);
489 if (cnst_diff)
490 {
491 if (cnst_diff == 4)
492 return 1;
493 else if (cnst_diff == -4)
494 return 2;
495 }
496 break;
497 }
498 return 0;
499}
500
501/* Return the constant difference of the rtx expressions OP0 and OP1,
502 or 0 if they don't have a constant difference.
503
504 This function is used to determine if addresses are consecutive,
505 and therefore possible to combine to fewer instructions. */
506
507int
508constant_diff (op0, op1)
509 rtx op0, op1;
510{
511 RTX_CODE code0, code1;
512 int cnst_diff;
513
514 code0 = GET_CODE (op0);
515 code1 = GET_CODE (op1);
516
517 if (code0 != code1)
518 {
519 if (code0 == PLUS)
520 {
521 if (GET_CODE (XEXP (op0, 1)) == CONST_INT
522 && rtx_equal_p (op1, XEXP (op0, 0)))
523 return INTVAL (XEXP (op0, 1));
524 }
525 else if (code1 == PLUS)
526 {
527 if (GET_CODE (XEXP (op1, 1)) == CONST_INT
528 && rtx_equal_p (op0, XEXP (op1, 0)))
529 return -INTVAL (XEXP (op1, 1));
530 }
531 return 0;
532 }
533
534 if (code0 == CONST_INT)
535 return INTVAL (op0) - INTVAL (op1);
536
537 if (code0 == PLUS)
538 {
539 cnst_diff = constant_diff (XEXP (op0, 0), XEXP (op1, 0));
540 if (cnst_diff)
541 return (rtx_equal_p (XEXP (op0, 1), XEXP (op1, 1)))
542 ? cnst_diff : 0;
543 cnst_diff = constant_diff (XEXP (op0, 1), XEXP (op1, 1));
544 if (cnst_diff)
545 return (rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0)))
546 ? cnst_diff : 0;
547 }
548
549 return 0;
550}
551
552int
553already_sign_extended (insn, from_mode, op)
554 rtx insn;
555 enum machine_mode from_mode;
556 rtx op;
557{
558 rtx xinsn, xdest, xsrc;
559
560 for (;;)
561 {
562 insn = PREV_INSN (insn);
563 if (insn == 0)
564 return 0;
565 if (GET_CODE (insn) == NOTE || GET_CODE (insn) == JUMP_INSN)
566 continue;
567 if (GET_CODE (insn) == CALL_INSN && ! call_used_regs[REGNO (op)])
568 continue;
569 if (GET_CODE (insn) != INSN)
570 return 0;
571 xinsn = PATTERN (insn);
572
573 if (GET_CODE (xinsn) != SET)
574 return 0;
575
576 xdest = SET_DEST (xinsn);
577 xsrc = SET_SRC (xinsn);
578
579 if (GET_CODE (xdest) == SUBREG)
580 abort ();
581
582 if ( ! REG_P (xdest))
583 continue;
584
585 if (REGNO (op) == REGNO (xdest)
586 && ((GET_CODE (xsrc) == SIGN_EXTEND
587 && GET_MODE (XEXP (xsrc, 0)) == from_mode)
588 || (GET_CODE (xsrc) == MEM
589 && GET_MODE (xsrc) == from_mode)))
590 return 1;
591
592 /* The register is modified by another operation. */
593 if (reg_overlap_mentioned_p (xdest, op))
594 return 0;
595 }
596}
597
598char *
599output_move_double (operands)
600 rtx *operands;
601{
602 if (GET_CODE (operands[1]) == CONST_DOUBLE)
603 {
604 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT)
605 {
606 /* In an integer, the low-order word is in CONST_DOUBLE_LOW. */
607 rtx const_op = operands[1];
608 if ((CONST_DOUBLE_HIGH (const_op) == 0
609 && CONST_DOUBLE_LOW (const_op) >= 0)
610 || (CONST_DOUBLE_HIGH (const_op) == -1
611 && CONST_DOUBLE_LOW (const_op) < 0))
612 {
613 operands[1] = gen_rtx (CONST_INT, VOIDmode,
614 CONST_DOUBLE_LOW (const_op));
615 return "movl %1,%0";
616 }
617 operands[1] = gen_rtx (CONST_INT, VOIDmode,
618 CONST_DOUBLE_HIGH (const_op));
619 output_asm_insn ("movw %1,%0", operands);
620 operands[0] = gen_rtx (REG, SImode, REGNO (operands[0]) + 1);
621 operands[1] = gen_rtx (CONST_INT, VOIDmode,
622 CONST_DOUBLE_LOW (const_op));
623 return "movw %1,%0";
624 }
625 else
626 {
627 /* In a real, the low-address word is in CONST_DOUBLE_LOW. */
628 rtx const_op = operands[1];
629 if ((CONST_DOUBLE_LOW (const_op) == 0
630 && CONST_DOUBLE_HIGH (const_op) >= 0)
631 || (CONST_DOUBLE_LOW (const_op) == -1
632 && CONST_DOUBLE_HIGH (const_op) < 0))
633 {
634 operands[1] = gen_rtx (CONST_INT, VOIDmode,
635 CONST_DOUBLE_HIGH (const_op));
636 return "movl %1,%0";
637 }
638 operands[1] = gen_rtx (CONST_INT, VOIDmode,
639 CONST_DOUBLE_LOW (const_op));
640 output_asm_insn ("movw %1,%0", operands);
641 operands[0] = gen_rtx (REG, SImode, REGNO (operands[0]) + 1);
642 operands[1] = gen_rtx (CONST_INT, VOIDmode,
643 CONST_DOUBLE_HIGH (const_op));
644 return "movw %1,%0";
645 }
646 }
647
648 return "movl %1,%0";
649}
650
651/* Output a shift insns, after having reduced integer arguments to
652 avoid as warnings. */
653
654char *
655output_shift (pattern, op2, mod)
656 char *pattern;
657 rtx op2;
658 int mod;
659{
660 if (GET_CODE (op2) == CONST_INT)
661 {
662 int cnt = INTVAL (op2) % mod;
663 if (cnt == 0)
664 {
665 cc_status = cc_prev_status;
666 return "";
667 }
668 op2 = gen_rtx (CONST_INT, VOIDmode, cnt);
669 }
670 return pattern;
671}
672
673/* Return non-zero if the code of this rtx pattern is a relop. */
674
675int
676relop (op, mode)
677 rtx op;
678 enum machine_mode mode;
679{
680 switch (GET_CODE (op))
681 {
682 case EQ:
683 case NE:
684 case LT:
685 case LE:
686 case GE:
687 case GT:
688 case LTU:
689 case LEU:
690 case GEU:
691 case GTU:
692 return 1;
693 }
694 return 0;
695}
696
697void
698notice_update_cc (EXP, INSN)
699 rtx EXP, INSN;
700{
701 switch (GET_CODE (EXP))
702 {
703 case SET:
704 switch (GET_CODE (SET_DEST (EXP)))
705 {
706 case CC0:
707 cc_status.mdep = 0;
708 cc_status.flags = 0;
709 cc_status.value1 = 0;
710 cc_status.value2 = SET_SRC (EXP);
711 break;
712
713 case PC:
714 break;
715
716 case REG:
717 switch (GET_CODE (SET_SRC (EXP)))
718 {
719 case CALL:
720 goto call;
721 case MEM:
722 if (GET_MODE (SET_SRC (EXP)) == QImode
723 || GET_MODE (SET_SRC (EXP)) == HImode)
724 {
725 cc_status.mdep = 0;
726 cc_status.flags = CC_NO_OVERFLOW;
727 cc_status.value1 = SET_DEST (EXP);
728 cc_status.value2 = SET_SRC (EXP);
729 break;
730 }
731 /* else: Fall through. */
732 case CONST_INT:
733 case SYMBOL_REF:
734 case LABEL_REF:
735 case CONST:
736 case CONST_DOUBLE:
737 case REG:
738 if (cc_status.value1
739 && reg_overlap_mentioned_p (SET_DEST (EXP),
740 cc_status.value1))
741 cc_status.value1 = 0;
742 if (cc_status.value2
743 && reg_overlap_mentioned_p (SET_DEST (EXP),
744 cc_status.value2))
745 cc_status.value2 = 0;
746 break;
747
748 case UDIV:
749 case UMOD:
750 cc_status.mdep = CC_VALID_FOR_UNSIGNED;
751 cc_status.flags = CC_NO_OVERFLOW;
752 cc_status.value1 = SET_DEST (EXP);
753 cc_status.value2 = SET_SRC (EXP);
754 break;
755 default:
756 cc_status.mdep = 0;
757 cc_status.flags = CC_NO_OVERFLOW;
758 cc_status.value1 = SET_DEST (EXP);
759 cc_status.value2 = SET_SRC (EXP);
760 break;
761 }
762 break;
763
764 case MEM:
765 switch (GET_CODE (SET_SRC (EXP)))
766 {
767 case REG:
768 if (GET_MODE (SET_SRC (EXP)) == QImode
769 || GET_MODE (SET_SRC (EXP)) == HImode)
770 {
771 cc_status.flags = CC_NO_OVERFLOW;
772 cc_status.value1 = SET_DEST (EXP);
773 cc_status.value2 = SET_SRC (EXP);
774 cc_status.mdep = 0;
775 break;
776 }
777 /* else: Fall through. */
778 case CONST_INT:
779 case SYMBOL_REF:
780 case LABEL_REF:
781 case CONST:
782 case CONST_DOUBLE:
783 case MEM:
784 /* Need to forget cc_status about memory positions each
785 time a memory store is made, even if the memory store
786 insns in question doesn't modify the condition codes. */
787 if (cc_status.value1 &&
788 GET_CODE (cc_status.value1) == MEM)
789 cc_status.value1 = 0;
790 if (cc_status.value2 &&
791 GET_CODE (cc_status.value2) == MEM)
792 cc_status.value2 = 0;
793 break;
794 case SIGN_EXTEND:
795 case FLOAT_EXTEND:
796 case FLOAT_TRUNCATE:
797 case FLOAT:
798 case FIX:
799 cc_status.flags = CC_NO_OVERFLOW;
800 cc_status.value1 = SET_DEST (EXP);
801 cc_status.value2 = SET_SRC (EXP);
802 cc_status.mdep = 0;
803 break;
804
805 default:
806 abort ();
807 }
808 break;
809
810 default:
811 abort ();
812 }
813 break;
814
815 case CALL:
816 call:
817 CC_STATUS_INIT;
818 break;
819 /* Do calls preserve the condition codes? (At least forget
820 cc_status expressions if they refer to registers
821 not preserved across calls. Also forget expressions
822 about memory contents.) */
823 if (cc_status.value1
824 && (refers_to_regno_p (PYR_TREG (0), PYR_TREG (15),
825 cc_status.value1, 0)
826 || GET_CODE (cc_status.value1) == MEM))
827 cc_status.value1 = 0;
828 if (cc_status.value2
829 && (refers_to_regno_p (PYR_TREG (0), PYR_TREG (15),
830 cc_status.value2, 0)
831 || GET_CODE (cc_status.value2) == MEM))
832 cc_status.value2 = 0;
833 break;
834
835 default:
836 CC_STATUS_INIT;
837 }
838}
839
840void
841forget_cc_if_dependent (op)
842 rtx op;
843{
844 cc_status = cc_prev_status;
845 if (cc_status.value1 && reg_overlap_mentioned_p (op, cc_status.value1))
846 cc_status.value1 = 0;
847 if (cc_status.value2 && reg_overlap_mentioned_p (op, cc_status.value2))
848 cc_status.value2 = 0;
849}