Commit | Line | Data |
---|---|---|
dea92547 KM |
1 | /* |
2 | * Copyright (c) 1992 Regents of the University of California. | |
3 | * All rights reserved. | |
4 | * | |
5 | * This code is derived from software contributed to Berkeley by | |
6 | * Digital Equipment Corporation and Ralph Campbell. | |
7 | * | |
8 | * %sccs.include.redist.c% | |
9 | * | |
10 | * Copyright (C) 1989 Digital Equipment Corporation. | |
11 | * Permission to use, copy, modify, and distribute this software and | |
12 | * its documentation for any purpose and without fee is hereby granted, | |
13 | * provided that the above copyright notice appears in all copies. | |
14 | * Digital Equipment Corporation makes no representations about the | |
15 | * suitability of this software for any purpose. It is provided "as is" | |
16 | * without express or implied warranty. | |
17 | * | |
18 | * from: $Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s, | |
19 | * v 1.1 89/07/11 17:55:04 nelson Exp $ SPRITE (DECWRL) | |
20 | * from: $Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s, | |
21 | * v 9.2 90/01/29 18:00:39 shirriff Exp $ SPRITE (DECWRL) | |
22 | * from: $Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s, | |
23 | * v 1.1 89/07/10 14:27:41 nelson Exp $ SPRITE (DECWRL) | |
24 | * | |
25 | * @(#)locore.s 7.1 (Berkeley) %G% | |
26 | */ | |
27 | ||
28 | /* | |
29 | * Contains code that is the first executed at boot time plus | |
30 | * assembly language support routines. | |
31 | */ | |
32 | ||
33 | #include "errno.h" | |
34 | ||
35 | #include "machine/regdef.h" | |
36 | #include "machine/param.h" | |
37 | #include "machine/vmparam.h" | |
38 | #include "machine/psl.h" | |
39 | #include "machine/reg.h" | |
40 | #include "machine/machAsmDefs.h" | |
41 | #include "pte.h" | |
42 | #include "assym.h" | |
43 | ||
44 | /* | |
45 | * Amount to take off of the stack for the benefit of the debugger. | |
46 | */ | |
47 | #define START_FRAME ((4 * 4) + 4 + 4) | |
48 | ||
49 | .globl start | |
50 | start: | |
51 | .set noreorder | |
52 | mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts | |
53 | li t1, MACH_RESERVED_ADDR # invalid address | |
54 | mtc0 t1, MACH_COP_0_TLB_HI # Mark entry high as invalid | |
55 | mtc0 zero, MACH_COP_0_TLB_LOW # Zero out low entry. | |
56 | /* | |
57 | * Clear the TLB (just to be safe). | |
58 | * Align the starting value (t1), the increment (t2) and the upper bound (t3). | |
59 | */ | |
60 | move t1, zero | |
61 | li t2, 1 << VMMACH_TLB_INDEX_SHIFT | |
62 | li t3, VMMACH_NUM_TLB_ENTRIES << VMMACH_TLB_INDEX_SHIFT | |
63 | 1: | |
64 | mtc0 t1, MACH_COP_0_TLB_INDEX # Set the index register. | |
65 | addu t1, t1, t2 # Increment index. | |
66 | bne t1, t3, 1b # NB: always executes next | |
67 | tlbwi # Write the TLB entry. | |
68 | ||
69 | li sp, MACH_CODE_START - START_FRAME | |
70 | la gp, _gp | |
71 | sw zero, START_FRAME - 4(sp) # Zero out old ra for debugger | |
72 | jal mach_init # mach_init(argc, argv, envp) | |
73 | sw zero, START_FRAME - 8(sp) # Zero out old fp for debugger | |
74 | ||
75 | li t0, MACH_SR_COP_1_BIT # Disable interrupts and | |
76 | mtc0 t0, MACH_COP_0_STATUS_REG # enable the coprocessor | |
77 | li sp, KERNELSTACK - START_FRAME # switch to standard stack | |
78 | mfc0 t0, MACH_COP_0_PRID # read processor ID register | |
79 | cfc1 t1, MACH_FPC_ID # read FPU ID register | |
80 | sw t0, cpu # save PRID register | |
81 | sw t1, fpu # save FPU ID register | |
82 | jal main # main() | |
83 | nop | |
84 | ||
85 | /* proc[1] == /etc/init now running here; run icode */ | |
86 | li v0, PSL_USERSET | |
87 | mtc0 v0, MACH_COP_0_STATUS_REG # switch to user mode | |
88 | j zero # icode is at address zero | |
89 | rfe | |
90 | .set reorder | |
91 | ||
92 | /* | |
93 | * This code is copied to user data space as the first program to run. | |
94 | * Basically, it just calls execve(); | |
95 | */ | |
96 | .globl icode | |
97 | icode: | |
98 | .set noreorder | |
99 | li a1, (9 * 4) # address of 'icode_argv' | |
100 | addu a0, a1, (3 * 4) # address of 'icode_fname' | |
101 | move a2, zero # no environment | |
102 | li v0, 59 # code for execve system call | |
103 | syscall | |
104 | li v0, 1 # code for exit system call | |
105 | syscall # execve failed: call exit() | |
106 | 1: b 1b # loop if exit returns | |
107 | nop | |
108 | .set reorder | |
109 | icode_argv: | |
110 | .word (12 * 4) # address of 'icode_fname' | |
111 | .word (15 * 4) # address of 'icodeEnd' | |
112 | .word 0 | |
113 | icode_fname: | |
114 | .asciiz "/sbin/init" # occupies 3 words | |
115 | .align 2 | |
116 | .globl icodeEnd | |
117 | icodeEnd: | |
118 | ||
119 | .sdata | |
120 | .align 2 | |
121 | .globl szicode | |
122 | szicode: | |
123 | .word (9 + 3 + 3) * 4 # compute icodeEnd - icode | |
124 | .text | |
125 | ||
126 | /* | |
127 | * Primitives | |
128 | */ | |
129 | ||
130 | /* | |
131 | * This table is indexed by u.u_pcb.pcb_onfault in trap(). | |
132 | * The reason for using this table rather than storing an address in | |
133 | * u.u_pcb.pcb_onfault is simply to make the code faster. | |
134 | */ | |
135 | .globl onfault_table | |
136 | .data | |
137 | .align 2 | |
138 | onfault_table: | |
139 | .word 0 # invalid index number | |
140 | #define BADERR 1 | |
141 | .word baderr | |
142 | #define ADDUPCERR 2 | |
143 | .word addupcerr | |
144 | #define COPYERR 3 | |
145 | .word copyerr | |
146 | #define FSWBERR 4 | |
147 | .word fswberr | |
148 | .text | |
149 | ||
150 | /* | |
151 | * See if access to addr with a len type instruction causes a machine check. | |
152 | * len is length of access (1=byte, 2=short, 4=long) | |
153 | * | |
154 | * badaddr(addr, len) | |
155 | * char *addr; | |
156 | * int len; | |
157 | */ | |
158 | LEAF(badaddr) | |
159 | li v0, BADERR | |
160 | sw v0, UADDR+U_PCB_ONFAULT | |
161 | bne a1, 1, 2f | |
162 | lbu v0, (a0) | |
163 | b 5f | |
164 | 2: | |
165 | bne a1, 2, 4f | |
166 | lhu v0, (a0) | |
167 | b 5f | |
168 | 4: | |
169 | lw v0, (a0) | |
170 | 5: | |
171 | sw zero, UADDR+U_PCB_ONFAULT | |
172 | move v0, zero # made it w/o errors | |
173 | j ra | |
174 | baderr: | |
175 | li v0, 1 # trap sends us here | |
176 | j ra | |
177 | END(badaddr) | |
178 | ||
179 | /* | |
180 | * update profiling information for the user | |
181 | * addupc(pc, pr, ticks) | |
182 | * unsigned pc; | |
183 | * struct uprof *pr; | |
184 | * int ticks; | |
185 | */ | |
186 | LEAF(addupc) | |
187 | lw v1, 8(a1) # get pr->pr_off | |
188 | subu a0, a0, v1 # pc -= pr->pr_off | |
189 | blt a0, zero, 1f # ignore if less than zero | |
190 | lw v0, 12(a1) # get pr->pr_scale | |
191 | multu v0, a0 # compute index into count table | |
192 | mflo v0 | |
193 | srl v0, v0, 16 # shift v1,v0 >> 16 | |
194 | mfhi v1 | |
195 | sll v1, v1, 16 | |
196 | or v0, v0, v1 | |
197 | addu v0, v0, 1 # round up and | |
198 | and v0, v0, ~1 # align to short boundary | |
199 | lw v1, 4(a1) # get pr->pr_size | |
200 | bgeu v0, v1, 1f # ignore if index >= size | |
201 | lw v1, 0(a1) # get pr->pr_base | |
202 | addu v0, v0, v1 # add index and base | |
203 | li v1, ADDUPCERR # turn off profiling if fault | |
204 | bltz v0, addupcerr # can this happen? | |
205 | sw v1, UADDR+U_PCB_ONFAULT | |
206 | lh v1, 0(v0) # get old count | |
207 | addu v1, v1, a2 # add ticks | |
208 | sh v1, 0(v0) # save new count | |
209 | sw zero, UADDR+U_PCB_ONFAULT | |
210 | 1: | |
211 | j ra | |
212 | addupcerr: | |
213 | sw zero, 12(a1) # pr->pr_scale = 0 | |
214 | j ra | |
215 | END(addupc) | |
216 | ||
217 | /* | |
218 | * netorder = htonl(hostorder) | |
219 | * hostorder = ntohl(netorder) | |
220 | */ | |
221 | LEAF(htonl) # a0 = 0x11223344, return 0x44332211 | |
222 | ALEAF(ntohl) | |
223 | srl v1, a0, 24 # v1 = 0x00000011 | |
224 | sll v0, a0, 24 # v0 = 0x44000000 | |
225 | or v0, v0, v1 | |
226 | and v1, a0, 0xff00 | |
227 | sll v1, v1, 8 # v1 = 0x00330000 | |
228 | or v0, v0, v1 | |
229 | srl v1, a0, 8 | |
230 | and v1, v1, 0xff00 # v1 = 0x00002200 | |
231 | or v0, v0, v1 | |
232 | j ra | |
233 | END(htonl) | |
234 | ||
235 | /* | |
236 | * netorder = htons(hostorder) | |
237 | * hostorder = ntohs(netorder) | |
238 | */ | |
239 | LEAF(htons) | |
240 | ALEAF(ntohs) | |
241 | srl v0, a0, 8 | |
242 | and v0, v0, 0xff | |
243 | sll v1, a0, 8 | |
244 | and v1, v1, 0xff00 | |
245 | or v0, v0, v1 | |
246 | j ra | |
247 | END(htons) | |
248 | ||
249 | /* | |
250 | * bit = ffs(value) | |
251 | */ | |
252 | LEAF(ffs) | |
253 | move v0, zero | |
254 | beq a0, zero, 2f | |
255 | 1: | |
256 | and v1, a0, 1 # bit set? | |
257 | addu v0, v0, 1 | |
258 | srl a0, a0, 1 | |
259 | beq v1, zero, 1b # no, continue | |
260 | 2: | |
261 | j ra | |
262 | END(ffs) | |
263 | ||
264 | /* | |
265 | * strlen(str) | |
266 | */ | |
267 | LEAF(strlen) | |
268 | addu v1, a0, 1 | |
269 | 1: | |
270 | lb v0, 0(a0) # get byte from string | |
271 | addu a0, a0, 1 # increment pointer | |
272 | bne v0, zero, 1b # continue if not end | |
273 | subu v0, a0, v1 # compute length - 1 for '\0' char | |
274 | j ra | |
275 | END(strlen) | |
276 | ||
277 | /* | |
278 | * bzero(s1, n) | |
279 | */ | |
280 | LEAF(bzero) | |
281 | ALEAF(blkclr) | |
282 | .set noreorder | |
283 | blt a1, 12, smallclr # small amount to clear? | |
284 | subu a3, zero, a0 # compute # bytes to word align address | |
285 | and a3, a3, 3 | |
286 | beq a3, zero, 1f # skip if word aligned | |
287 | subu a1, a1, a3 # subtract from remaining count | |
288 | swr zero, 0(a0) # clear 1, 2, or 3 bytes to align | |
289 | addu a0, a0, a3 | |
290 | 1: | |
291 | and v0, a1, 3 # compute number of words left | |
292 | subu a3, a1, v0 | |
293 | move a1, v0 | |
294 | addu a3, a3, a0 # compute ending address | |
295 | 2: | |
296 | addu a0, a0, 4 # clear words | |
297 | bne a0, a3, 2b # unrolling loop doesn't help | |
298 | sw zero, -4(a0) # since we're limited by memory speed | |
299 | smallclr: | |
300 | ble a1, zero, 2f | |
301 | addu a3, a1, a0 # compute ending address | |
302 | 1: | |
303 | addu a0, a0, 1 # clear bytes | |
304 | bne a0, a3, 1b | |
305 | sb zero, -1(a0) | |
306 | 2: | |
307 | j ra | |
308 | nop | |
309 | .set reorder | |
310 | END(bzero) | |
311 | ||
312 | /* | |
313 | * bcmp(s1, s2, n) | |
314 | */ | |
315 | LEAF(bcmp) | |
316 | .set noreorder | |
317 | blt a2, 16, smallcmp # is it worth any trouble? | |
318 | xor v0, a0, a1 # compare low two bits of addresses | |
319 | and v0, v0, 3 | |
320 | subu a3, zero, a1 # compute # bytes to word align address | |
321 | bne v0, zero, unalignedcmp # not possible to align addresses | |
322 | and a3, a3, 3 | |
323 | ||
324 | beq a3, zero, 1f | |
325 | subu a2, a2, a3 # subtract from remaining count | |
326 | move v0, v1 # init v0,v1 so unmodified bytes match | |
327 | lwr v0, 0(a0) # read 1, 2, or 3 bytes | |
328 | lwr v1, 0(a1) | |
329 | addu a1, a1, a3 | |
330 | bne v0, v1, nomatch | |
331 | addu a0, a0, a3 | |
332 | 1: | |
333 | and a3, a2, ~3 # compute number of whole words left | |
334 | subu a2, a2, a3 # which has to be >= (16-3) & ~3 | |
335 | addu a3, a3, a0 # compute ending address | |
336 | 2: | |
337 | lw v0, 0(a0) # compare words | |
338 | lw v1, 0(a1) | |
339 | addu a0, a0, 4 | |
340 | bne v0, v1, nomatch | |
341 | addu a1, a1, 4 | |
342 | bne a0, a3, 2b | |
343 | nop | |
344 | b smallcmp # finish remainder | |
345 | nop | |
346 | unalignedcmp: | |
347 | beq a3, zero, 2f | |
348 | subu a2, a2, a3 # subtract from remaining count | |
349 | addu a3, a3, a0 # compute ending address | |
350 | 1: | |
351 | lbu v0, 0(a0) # compare bytes until a1 word aligned | |
352 | lbu v1, 0(a1) | |
353 | addu a0, a0, 1 | |
354 | bne v0, v1, nomatch | |
355 | addu a1, a1, 1 | |
356 | bne a0, a3, 1b | |
357 | nop | |
358 | 2: | |
359 | and a3, a2, ~3 # compute number of whole words left | |
360 | subu a2, a2, a3 # which has to be >= (16-3) & ~3 | |
361 | addu a3, a3, a0 # compute ending address | |
362 | 3: | |
363 | lwr v0, 0(a0) # compare words a0 unaligned, a1 aligned | |
364 | lwl v0, 3(a0) | |
365 | lw v1, 0(a1) | |
366 | addu a0, a0, 4 | |
367 | bne v0, v1, nomatch | |
368 | addu a1, a1, 4 | |
369 | bne a0, a3, 3b | |
370 | nop | |
371 | smallcmp: | |
372 | ble a2, zero, match | |
373 | addu a3, a2, a0 # compute ending address | |
374 | 1: | |
375 | lbu v0, 0(a0) | |
376 | lbu v1, 0(a1) | |
377 | addu a0, a0, 1 | |
378 | bne v0, v1, nomatch | |
379 | addu a1, a1, 1 | |
380 | bne a0, a3, 1b | |
381 | nop | |
382 | match: | |
383 | j ra | |
384 | move v0, zero | |
385 | nomatch: | |
386 | j ra | |
387 | li v0, 1 | |
388 | .set reorder | |
389 | END(bcmp) | |
390 | ||
391 | /* | |
392 | * {ov}bcopy(from, to, len) | |
393 | */ | |
394 | LEAF(bcopy) | |
395 | ALEAF(ovbcopy) | |
396 | .set noreorder | |
397 | addu t0, a0, a2 # t0 = end of s1 region | |
398 | sltu t1, a1, t0 | |
399 | sltu t2, a0, a1 | |
400 | and t1, t1, t2 # t1 = true if from < to < (from+len) | |
401 | beq t1, zero, forward # non overlapping, do forward copy | |
402 | slt t2, a2, 12 # check for small copy | |
403 | ||
404 | ble a2, zero, 2f | |
405 | addu t1, a1, a2 # t1 = end of to region | |
406 | 1: | |
407 | lb v0, -1(t0) # copy bytes backwards, | |
408 | subu t0, t0, 1 # doesn't happen often so do slow way | |
409 | subu t1, t1, 1 | |
410 | bne t0, a0, 1b | |
411 | sb v0, 0(t1) | |
412 | 2: | |
413 | j ra | |
414 | nop | |
415 | forward: | |
416 | bne t2, zero, smallcpy # do a small bcopy | |
417 | xor v0, a0, a1 # compare low two bits of addresses | |
418 | and v0, v0, 3 | |
419 | subu a3, zero, a1 # compute # bytes to word align address | |
420 | beq v0, zero, aligned # addresses can be word aligned | |
421 | and a3, a3, 3 | |
422 | ||
423 | beq a3, zero, 1f | |
424 | subu a2, a2, a3 # subtract from remaining count | |
425 | lwr v0, 0(a0) # get next 4 bytes (unaligned) | |
426 | lwl v0, 3(a0) | |
427 | addu a0, a0, a3 | |
428 | swr v0, 0(a1) # store 1, 2, or 3 bytes to align a1 | |
429 | addu a1, a1, a3 | |
430 | 1: | |
431 | and v0, a2, 3 # compute number of words left | |
432 | subu a3, a2, v0 | |
433 | move a2, v0 | |
434 | addu a3, a3, a0 # compute ending address | |
435 | 2: | |
436 | lwr v0, 0(a0) # copy words a0 unaligned, a1 aligned | |
437 | lwl v0, 3(a0) | |
438 | addu a0, a0, 4 | |
439 | addu a1, a1, 4 | |
440 | bne a0, a3, 2b | |
441 | sw v0, -4(a1) | |
442 | b smallcpy | |
443 | nop | |
444 | aligned: | |
445 | beq a3, zero, 1f | |
446 | subu a2, a2, a3 # subtract from remaining count | |
447 | lwr v0, 0(a0) # copy 1, 2, or 3 bytes to align | |
448 | addu a0, a0, a3 | |
449 | swr v0, 0(a1) | |
450 | addu a1, a1, a3 | |
451 | 1: | |
452 | and v0, a2, 3 # compute number of whole words left | |
453 | subu a3, a2, v0 | |
454 | move a2, v0 | |
455 | addu a3, a3, a0 # compute ending address | |
456 | 2: | |
457 | lw v0, 0(a0) # copy words | |
458 | addu a0, a0, 4 | |
459 | addu a1, a1, 4 | |
460 | bne a0, a3, 2b | |
461 | sw v0, -4(a1) | |
462 | smallcpy: | |
463 | ble a2, zero, 2f | |
464 | addu a3, a2, a0 # compute ending address | |
465 | 1: | |
466 | lbu v0, 0(a0) # copy bytes | |
467 | addu a0, a0, 1 | |
468 | addu a1, a1, 1 | |
469 | bne a0, a3, 1b | |
470 | sb v0, -1(a1) | |
471 | 2: | |
472 | sw zero, UADDR+U_PCB_ONFAULT # for copyin, copyout | |
473 | j ra | |
474 | move v0, zero | |
475 | .set reorder | |
476 | END(bcopy) | |
477 | ||
478 | /* | |
479 | * Copy a null terminated string within the kernel address space. | |
480 | * Maxlength may be null if count not wanted. | |
481 | * copystr(fromaddr, toaddr, maxlength, &lencopied) | |
482 | * caddr_t fromaddr; | |
483 | * caddr_t toaddr; | |
484 | * u_int maxlength; | |
485 | * u_int *lencopied; | |
486 | */ | |
487 | LEAF(copystr) | |
488 | move t2, a2 # Save the number of bytes | |
489 | 1: | |
490 | lb t0, 0(a0) | |
491 | sb t0, 0(a1) | |
492 | sub a2, a2, 1 | |
493 | beq t0, zero, 2f | |
494 | add a0, a0, 1 | |
495 | add a1, a1, 1 | |
496 | bne a2, zero, 1b | |
497 | 2: | |
498 | beq a3, zero, 3f | |
499 | sub a2, t2, a2 # compute length copied | |
500 | sw a2, 0(a3) | |
501 | 3: | |
502 | sw zero, UADDR+U_PCB_ONFAULT # for copyinstr, copyoutstr | |
503 | move v0, zero | |
504 | j ra | |
505 | END(copystr) | |
506 | ||
507 | /* | |
508 | * Copy a null terminated string from the user address space into | |
509 | * the kernel address space. | |
510 | * | |
511 | * copyinstr(fromaddr, toaddr, maxlength, &lencopied) | |
512 | * caddr_t fromaddr; | |
513 | * caddr_t toaddr; | |
514 | * u_int maxlength; | |
515 | * u_int *lencopied; | |
516 | */ | |
517 | LEAF(copyinstr) | |
518 | li v0, COPYERR | |
519 | blt a0, zero, copyerr # make sure address is in user space | |
520 | sw v0, UADDR+U_PCB_ONFAULT | |
521 | b copystr | |
522 | END(copyinstr) | |
523 | ||
524 | /* | |
525 | * Copy a null terminated string from the kernel address space into | |
526 | * the user address space. | |
527 | * | |
528 | * copyoutstr(fromaddr, toaddr, maxlength, &lencopied) | |
529 | * caddr_t fromaddr; | |
530 | * caddr_t toaddr; | |
531 | * u_int maxlength; | |
532 | * u_int *lencopied; | |
533 | */ | |
534 | LEAF(copyoutstr) | |
535 | li v0, COPYERR | |
536 | blt a1, zero, copyerr # make sure address is in user space | |
537 | sw v0, UADDR+U_PCB_ONFAULT | |
538 | b copystr | |
539 | END(copyoutstr) | |
540 | ||
541 | /* | |
542 | * Copy specified amount of data from user space into the kernel | |
543 | * copyin(from, to, len) | |
544 | * caddr_t *from; (user source address) | |
545 | * caddr_t *to; (kernel destination address) | |
546 | * unsigned len; | |
547 | */ | |
548 | LEAF(copyin) | |
549 | li v0, COPYERR | |
550 | blt a0, zero, copyerr # make sure address is in user space | |
551 | sw v0, UADDR+U_PCB_ONFAULT | |
552 | b bcopy | |
553 | END(copyin) | |
554 | ||
555 | /* | |
556 | * Copy specified amount of data from kernel to the user space | |
557 | * copyout(from, to, len) | |
558 | * caddr_t *from; (kernel source address) | |
559 | * caddr_t *to; (user destination address) | |
560 | * unsigned len; | |
561 | */ | |
562 | LEAF(copyout) | |
563 | li v0, COPYERR | |
564 | blt a1, zero, copyerr # make sure address is in user space | |
565 | sw v0, UADDR+U_PCB_ONFAULT | |
566 | b bcopy | |
567 | END(copyout) | |
568 | ||
569 | LEAF(copyerr) | |
570 | li v0, EFAULT # return error | |
571 | j ra | |
572 | END(copyerr) | |
573 | ||
574 | /* | |
575 | * Copy data to the DMA buffer. | |
576 | * The DMA bufffer can only be written one short at a time | |
577 | * (and takes ~14 cycles). | |
578 | * | |
579 | * CopyToBuffer(src, dst, length) | |
580 | * u_short *src; NOTE: must be short aligned | |
581 | * u_short *dst; | |
582 | * int length; | |
583 | */ | |
584 | LEAF(CopyToBuffer) | |
585 | blez a2, 2f | |
586 | 1: | |
587 | lhu t0, 0(a0) # read 2 bytes of data | |
588 | subu a2, a2, 2 | |
589 | addu a0, a0, 2 | |
590 | addu a1, a1, 4 | |
591 | sh t0, -4(a1) # write 2 bytes of data to buffer | |
592 | bgtz a2, 1b | |
593 | 2: | |
594 | j ra | |
595 | END(CopyToBuffer) | |
596 | ||
597 | /* | |
598 | * Copy data from the DMA buffer. | |
599 | * The DMA bufffer can only be read one short at a time | |
600 | * (and takes ~12 cycles). | |
601 | * | |
602 | * CopyFromBuffer(src, dst, length) | |
603 | * u_short *src; | |
604 | * char *dst; | |
605 | * int length; | |
606 | */ | |
607 | LEAF(CopyFromBuffer) | |
608 | and t0, a1, 1 # test for aligned dst | |
609 | beq t0, zero, 3f | |
610 | blt a2, 2, 7f # at least 2 bytes to copy? | |
611 | 1: | |
612 | lhu t0, 0(a0) # read 2 bytes of data from buffer | |
613 | addu a0, a0, 4 # keep buffer pointer word aligned | |
614 | addu a1, a1, 2 | |
615 | subu a2, a2, 2 | |
616 | sb t0, -2(a1) | |
617 | srl t0, t0, 8 | |
618 | sb t0, -1(a1) | |
619 | bge a2, 2, 1b | |
620 | 3: | |
621 | blt a2, 2, 7f # at least 2 bytes to copy? | |
622 | 6: | |
623 | lhu t0, 0(a0) # read 2 bytes of data from buffer | |
624 | addu a0, a0, 4 # keep buffer pointer word aligned | |
625 | addu a1, a1, 2 | |
626 | subu a2, a2, 2 | |
627 | sh t0, -2(a1) | |
628 | bge a2, 2, 6b | |
629 | 7: | |
630 | ble a2, zero, 9f # done? | |
631 | lhu t0, 0(a0) # copy one more byte | |
632 | sb t0, 0(a1) | |
633 | 9: | |
634 | j ra | |
635 | END(CopyFromBuffer) | |
636 | ||
637 | /* | |
638 | * Copy the kernel stack to the new process and save the current context so | |
639 | * the new process will return nonzero when it is resumed by swtch(). | |
640 | * | |
641 | * copykstack(up) | |
642 | * struct user *up; | |
643 | */ | |
644 | LEAF(copykstack) | |
645 | subu v0, sp, UADDR # compute offset into stack | |
646 | addu v0, v0, a0 # v0 = new stack address | |
647 | move v1, sp # v1 = old stack address | |
648 | li t1, KERNELSTACK | |
649 | 1: | |
650 | lw t0, 0(v1) # copy stack data | |
651 | addu v1, v1, 4 | |
652 | sw t0, 0(v0) | |
653 | addu v0, v0, 4 | |
654 | bne v1, t1, 1b | |
655 | /* FALLTHROUGH */ | |
656 | /* | |
657 | * Save registers and state so we can do a longjmp later. | |
658 | * Note: this only works if p != curproc since | |
659 | * swtch() will copy over pcb_context. | |
660 | * | |
661 | * savectx(up) | |
662 | * struct user *up; | |
663 | */ | |
664 | ALEAF(savectx) | |
665 | .set noreorder | |
666 | sw s0, U_PCB_CONTEXT+0(a0) | |
667 | sw s1, U_PCB_CONTEXT+4(a0) | |
668 | sw s2, U_PCB_CONTEXT+8(a0) | |
669 | sw s3, U_PCB_CONTEXT+12(a0) | |
670 | mfc0 v0, MACH_COP_0_STATUS_REG | |
671 | sw s4, U_PCB_CONTEXT+16(a0) | |
672 | sw s5, U_PCB_CONTEXT+20(a0) | |
673 | sw s6, U_PCB_CONTEXT+24(a0) | |
674 | sw s7, U_PCB_CONTEXT+28(a0) | |
675 | sw sp, U_PCB_CONTEXT+32(a0) | |
676 | sw s8, U_PCB_CONTEXT+36(a0) | |
677 | sw ra, U_PCB_CONTEXT+40(a0) | |
678 | sw v0, U_PCB_CONTEXT+44(a0) | |
679 | j ra | |
680 | move v0, zero | |
681 | .set reorder | |
682 | END(copykstack) | |
683 | ||
684 | /* | |
685 | * _whichqs tells which of the 32 queues _qs | |
686 | * have processes in them. Setrq puts processes into queues, Remrq | |
687 | * removes them from queues. The running process is on no queue, | |
688 | * other processes are on a queue related to p->p_pri, divided by 4 | |
689 | * actually to shrink the 0-127 range of priorities into the 32 available | |
690 | * queues. | |
691 | */ | |
692 | ||
693 | /* | |
694 | * setrq(p) | |
695 | * proc *p; | |
696 | * | |
697 | * Call should be made at splclock(), and p->p_stat should be SRUN. | |
698 | */ | |
699 | NON_LEAF(setrq, STAND_FRAME_SIZE, ra) | |
700 | subu sp, sp, STAND_FRAME_SIZE | |
701 | .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) | |
702 | lw t0, P_RLINK(a0) ## firewall: p->p_rlink must be 0 | |
703 | beq t0, zero, 1f ## | |
704 | sw ra, STAND_RA_OFFSET(sp) ## | |
705 | PANIC("setrq") ## | |
706 | 1: | |
707 | lbu t0, P_PRI(a0) # put on queue which is p->p_pri / 4 | |
708 | srl t0, t0, 2 # compute index into 'whichqs' | |
709 | li t1, 1 # compute corresponding bit | |
710 | sll t1, t1, t0 | |
711 | lw t2, whichqs # set corresponding bit | |
712 | or t2, t2, t1 | |
713 | sw t2, whichqs | |
714 | sll t0, t0, 3 # compute index into 'qs' | |
715 | la t1, qs | |
716 | addu t0, t0, t1 # t0 = qp = &qs[pri >> 2] | |
717 | lw t1, P_RLINK(t0) # t1 = qp->ph_rlink | |
718 | sw t0, P_LINK(a0) # p->p_link = qp | |
719 | sw t1, P_RLINK(a0) # p->p_rlink = qp->ph_rlink | |
720 | sw a0, P_LINK(t1) # p->p_rlink->p_link = p; | |
721 | sw a0, P_RLINK(t0) # qp->ph_rlink = p | |
722 | addu sp, sp, STAND_FRAME_SIZE | |
723 | j ra | |
724 | END(setrq) | |
725 | ||
726 | /* | |
727 | * Remrq(p) | |
728 | * | |
729 | * Call should be made at splclock(). | |
730 | */ | |
731 | NON_LEAF(remrq, STAND_FRAME_SIZE, ra) | |
732 | subu sp, sp, STAND_FRAME_SIZE | |
733 | .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) | |
734 | lbu t0, P_PRI(a0) # get from queue which is p->p_pri / 4 | |
735 | srl t0, t0, 2 # compute index into 'whichqs' | |
736 | li t1, 1 # compute corresponding bit | |
737 | sll t1, t1, t0 | |
738 | lw t2, whichqs # check corresponding bit | |
739 | and v0, t2, t1 | |
740 | bne v0, zero, 1f ## | |
741 | sw ra, STAND_RA_OFFSET(sp) ## | |
742 | PANIC("remrq") ## it wasn't recorded to be on its q | |
743 | 1: | |
744 | lw v0, P_RLINK(a0) # v0 = p->p_rlink | |
745 | lw v1, P_LINK(a0) # v1 = p->p_link | |
746 | sw v1, P_LINK(v0) # p->p_rlink->p_link = p->p_link; | |
747 | sw v0, P_RLINK(v1) # p->p_link->p_rlink = p->r_rlink | |
748 | sll t0, t0, 3 # compute index into 'qs' | |
749 | la v0, qs | |
750 | addu t0, t0, v0 # t0 = qp = &qs[pri >> 2] | |
751 | lw v0, P_LINK(t0) # check if queue empty | |
752 | bne v0, t0, 2f # No. qp->ph_link != qp | |
753 | xor t2, t2, t1 # clear corresponding bit in 'whichqs' | |
754 | sw t2, whichqs | |
755 | 2: | |
756 | sw zero, P_RLINK(a0) ## for firewall checking | |
757 | addu sp, sp, STAND_FRAME_SIZE | |
758 | j ra | |
759 | END(remrq) | |
760 | ||
761 | /* | |
762 | * swtch_exit() | |
763 | * | |
764 | * At exit of a process, do a swtch for the last time. | |
765 | * The mapping of the pcb at p->p_addr has already been deleted, | |
766 | * and the memory for the pcb+stack has been freed. | |
767 | * All interrupts should be blocked at this point. | |
768 | */ | |
769 | LEAF(swtch_exit) | |
770 | .set noreorder | |
771 | la v0, nullproc # save state into garbage proc | |
772 | lw t0, P_UPTE+0(v0) # t0 = first u. pte | |
773 | lw t1, P_UPTE+4(v0) # t1 = 2nd u. pte | |
774 | li v0, UADDR # v0 = first HI entry | |
775 | mtc0 zero, MACH_COP_0_TLB_INDEX # set the index register | |
776 | mtc0 v0, MACH_COP_0_TLB_HI # init high entry | |
777 | mtc0 t0, MACH_COP_0_TLB_LOW # init low entry | |
778 | li t0, 1 << VMMACH_TLB_INDEX_SHIFT | |
779 | tlbwi # Write the TLB entry. | |
780 | addu v0, v0, NBPG # 2nd HI entry | |
781 | mtc0 t0, MACH_COP_0_TLB_INDEX # set the index register | |
782 | mtc0 v0, MACH_COP_0_TLB_HI # init high entry | |
783 | mtc0 t1, MACH_COP_0_TLB_LOW # init low entry | |
784 | nop | |
785 | tlbwi # Write the TLB entry. | |
786 | .set reorder | |
787 | li sp, KERNELSTACK - START_FRAME # switch to standard stack | |
788 | b swtch | |
789 | END(swtch_exit) | |
790 | ||
791 | /* | |
792 | * When no processes are on the runq, swtch branches to idle | |
793 | * to wait for something to come ready. | |
794 | * Note: this is really a part of swtch() but defined here for kernel profiling. | |
795 | */ | |
796 | LEAF(idle) | |
797 | .set noreorder | |
798 | li t0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) | |
799 | mtc0 t0, MACH_COP_0_STATUS_REG # enable all interrupts | |
800 | nop | |
801 | .set reorder | |
802 | 1: | |
803 | lw t0, whichqs # look for non-empty queue | |
804 | beq t0, zero, 1b | |
805 | b sw1 | |
806 | END(idle) | |
807 | ||
808 | /* | |
809 | * swtch() | |
810 | * Find the highest priority process and resume it. | |
811 | */ | |
812 | NON_LEAF(swtch, STAND_FRAME_SIZE, ra) | |
813 | .set noreorder | |
814 | sw sp, UADDR+U_PCB_CONTEXT+32 # save old sp | |
815 | subu sp, sp, STAND_FRAME_SIZE | |
816 | sw ra, STAND_RA_OFFSET(sp) | |
817 | .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) | |
818 | lw t2, cnt+V_SWTCH # for statistics | |
819 | lw t1, whichqs # look for non-empty queue | |
820 | mfc0 t0, MACH_COP_0_STATUS_REG # t0 = saved status register | |
821 | sw ra, UADDR+U_PCB_CONTEXT+40 # save return address | |
822 | sw t0, UADDR+U_PCB_CONTEXT+44 # save status register | |
823 | addu t2, t2, 1 | |
824 | beq t1, zero, idle # if none, idle | |
825 | sw t2, cnt+V_SWTCH | |
826 | sw1: | |
827 | mtc0 zero, MACH_COP_0_STATUS_REG # Disable all interrupts | |
828 | nop | |
829 | lw t0, whichqs # look for non-empty queue | |
830 | li t2, -1 # t2 = lowest bit set | |
831 | beq t0, zero, idle # if none, idle | |
832 | move t3, t0 # t3 = saved whichqs | |
833 | 1: | |
834 | add t2, t2, 1 | |
835 | and t1, t0, 1 # bit set? | |
836 | beq t1, zero, 1b | |
837 | srl t0, t0, 1 # try next bit | |
838 | /* | |
839 | * Remove process from queue. | |
840 | */ | |
841 | sll t0, t2, 3 | |
842 | la t1, qs | |
843 | addu t0, t0, t1 # t0 = qp = &qs[highbit] | |
844 | lw a0, P_LINK(t0) # a0 = p = highest pri process | |
845 | nop | |
846 | lw v0, P_LINK(a0) # v0 = p->p_link | |
847 | bne t0, a0, 2f # make sure something in queue | |
848 | sw v0, P_LINK(t0) # qp->ph_link = p->p_link; | |
849 | PANIC("swtch") # nothing in queue | |
850 | 2: | |
851 | sw t0, P_RLINK(v0) # p->p_link->p_rlink = qp | |
852 | bne v0, t0, 3f # queue still not empty | |
853 | sw zero, P_RLINK(a0) ## for firewall checking | |
854 | li v1, 1 # compute bit in 'whichqs' | |
855 | sll v1, v1, t2 | |
856 | xor t3, t3, v1 # clear bit in 'whichqs' | |
857 | sw t3, whichqs | |
858 | 3: | |
859 | /* | |
860 | * Save old context and switch to new one. | |
861 | */ | |
862 | sw a0, curproc # set curproc | |
863 | sw zero, want_resched | |
864 | jal pmap_alloc_tlbpid # v0 = TLB PID | |
865 | sw a0, STAND_FRAME_SIZE(sp) # save p | |
866 | lw a0, STAND_FRAME_SIZE(sp) # restore p | |
867 | sll v0, v0, VMMACH_TLB_PID_SHIFT # v0 = aligned PID | |
868 | or v0, v0, UADDR # v0 = first HI entry | |
869 | lw t0, P_UPTE+0(a0) # t0 = first u. pte | |
870 | lw t1, P_UPTE+4(a0) # t1 = 2nd u. pte | |
871 | sw s0, UADDR+U_PCB_CONTEXT+0 # do a 'savectx()' | |
872 | sw s1, UADDR+U_PCB_CONTEXT+4 # We save s0 to s8 here because | |
873 | sw s2, UADDR+U_PCB_CONTEXT+8 # the TLB trap code uses | |
874 | sw s3, UADDR+U_PCB_CONTEXT+12 # CONTEXT and there should be | |
875 | sw s4, UADDR+U_PCB_CONTEXT+16 # no faults at this point. | |
876 | sw s5, UADDR+U_PCB_CONTEXT+20 | |
877 | sw s6, UADDR+U_PCB_CONTEXT+24 | |
878 | sw s7, UADDR+U_PCB_CONTEXT+28 | |
879 | sw s8, UADDR+U_PCB_CONTEXT+36 | |
880 | /* | |
881 | * Resume process indicated by the pte's for its u struct | |
882 | * NOTE: This is hard coded to UPAGES == 2. | |
883 | * Also, there should be no TLB faults at this point. | |
884 | */ | |
885 | mtc0 zero, MACH_COP_0_TLB_INDEX # set the index register | |
886 | mtc0 v0, MACH_COP_0_TLB_HI # init high entry | |
887 | mtc0 t0, MACH_COP_0_TLB_LOW # init low entry | |
888 | li t0, 1 << VMMACH_TLB_INDEX_SHIFT | |
889 | tlbwi # Write the TLB entry. | |
890 | addu v0, v0, NBPG # 2nd HI entry | |
891 | mtc0 t0, MACH_COP_0_TLB_INDEX # set the index register | |
892 | mtc0 v0, MACH_COP_0_TLB_HI # init high entry | |
893 | mtc0 t1, MACH_COP_0_TLB_LOW # init low entry | |
894 | nop | |
895 | tlbwi # Write the TLB entry. | |
896 | /* | |
897 | * Now running on new u struct. | |
898 | * Restore registers and return. | |
899 | */ | |
900 | lw v0, UADDR+U_PCB_CONTEXT+44 # restore kernel context | |
901 | lw ra, UADDR+U_PCB_CONTEXT+40 | |
902 | lw s0, UADDR+U_PCB_CONTEXT+0 | |
903 | lw s1, UADDR+U_PCB_CONTEXT+4 | |
904 | lw s2, UADDR+U_PCB_CONTEXT+8 | |
905 | lw s3, UADDR+U_PCB_CONTEXT+12 | |
906 | lw s4, UADDR+U_PCB_CONTEXT+16 | |
907 | lw s5, UADDR+U_PCB_CONTEXT+20 | |
908 | lw s6, UADDR+U_PCB_CONTEXT+24 | |
909 | lw s7, UADDR+U_PCB_CONTEXT+28 | |
910 | lw sp, UADDR+U_PCB_CONTEXT+32 | |
911 | lw s8, UADDR+U_PCB_CONTEXT+36 | |
912 | mtc0 v0, MACH_COP_0_STATUS_REG | |
913 | j ra | |
914 | li v0, 1 # possible return to 'savectx()' | |
915 | .set reorder | |
916 | END(swtch) | |
917 | ||
918 | /* | |
919 | * {fu,su},{ibyte,iword}, fetch or store a byte or word to user text space. | |
920 | * {fu,su},{byte,word}, fetch or store a byte or word to user data space. | |
921 | */ | |
922 | LEAF(fuword) | |
923 | ALEAF(fuiword) | |
924 | li v0, FSWBERR | |
925 | blt a0, zero, fswberr # make sure address is in user space | |
926 | sw v0, UADDR+U_PCB_ONFAULT | |
927 | lw v0, 0(a0) # fetch word | |
928 | sw zero, UADDR+U_PCB_ONFAULT | |
929 | j ra | |
930 | END(fuword) | |
931 | ||
932 | LEAF(fubyte) | |
933 | ALEAF(fuibyte) | |
934 | li v0, FSWBERR | |
935 | blt a0, zero, fswberr # make sure address is in user space | |
936 | sw v0, UADDR+U_PCB_ONFAULT | |
937 | lbu v0, 0(a0) # fetch byte | |
938 | sw zero, UADDR+U_PCB_ONFAULT | |
939 | j ra | |
940 | END(fubyte) | |
941 | ||
942 | LEAF(suword) | |
943 | ALEAF(suiword) | |
944 | li v0, FSWBERR | |
945 | blt a0, zero, fswberr # make sure address is in user space | |
946 | sw v0, UADDR+U_PCB_ONFAULT | |
947 | sw a1, 0(a0) # store word | |
948 | sw zero, UADDR+U_PCB_ONFAULT | |
949 | move v0, zero | |
950 | j ra | |
951 | END(suword) | |
952 | ||
953 | LEAF(subyte) | |
954 | ALEAF(suibyte) | |
955 | li v0, FSWBERR | |
956 | blt a0, zero, fswberr # make sure address is in user space | |
957 | sw v0, UADDR+U_PCB_ONFAULT | |
958 | sb a1, 0(a0) # store byte | |
959 | sw zero, UADDR+U_PCB_ONFAULT | |
960 | move v0, zero | |
961 | j ra | |
962 | END(subyte) | |
963 | ||
964 | LEAF(fswberr) | |
965 | li v0, -1 | |
966 | j ra | |
967 | END(fswberr) | |
968 | ||
969 | /* | |
970 | * Insert 'p' after 'q'. | |
971 | * _insque(p, q) | |
972 | * caddr_t p, q; | |
973 | */ | |
974 | LEAF(_insque) | |
975 | lw v0, 0(a1) # v0 = q->next | |
976 | sw a1, 4(a0) # p->prev = q | |
977 | sw v0, 0(a0) # p->next = q->next | |
978 | sw a0, 4(v0) # q->next->prev = p | |
979 | sw a0, 0(a1) # q->next = p | |
980 | j ra | |
981 | END(_insque) | |
982 | ||
983 | /* | |
984 | * Remove item 'p' from queue. | |
985 | * _remque(p) | |
986 | * caddr_t p; | |
987 | */ | |
988 | LEAF(_remque) | |
989 | lw v0, 0(a0) # v0 = p->next | |
990 | lw v1, 4(a0) # v1 = p->prev | |
991 | sw v0, 0(v1) # p->prev->next = p->next | |
992 | sw v1, 4(v0) # p->next->prev = p->prev | |
993 | j ra | |
994 | END(_remque) | |
995 | ||
996 | /* | |
997 | * This code is copied to the UTLB exception vector address to | |
998 | * handle user level TLB translation misses. | |
999 | * NOTE: This code must be relocatable!!! | |
1000 | */ | |
1001 | .globl MachUTLBMiss | |
1002 | MachUTLBMiss: | |
1003 | .set noat | |
1004 | .set noreorder | |
1005 | mfc0 k0, MACH_COP_0_BAD_VADDR # get the virtual address | |
1006 | nop | |
1007 | srl k0, k0, PMAP_HASH_SHIFT1 # get page in low bits | |
1008 | srl k1, k0, PMAP_HASH_SHIFT2 - PMAP_HASH_SHIFT1 | |
1009 | and k0, k0, PMAP_HASH_MASK1 | |
1010 | and k1, k1, PMAP_HASH_MASK2 | |
1011 | or k1, k1, k0 | |
1012 | sll k1, k1, PMAP_HASH_SIZE_SHIFT # compute index | |
1013 | lw k0, PMAP_HASH_LOW_OFFSET(k1) # get cached low PTE entry | |
1014 | lw k1, PMAP_HASH_HIGH_OFFSET(k1) # get cached high PTE entry | |
1015 | mtc0 k0, MACH_COP_0_TLB_LOW | |
1016 | mfc0 k0, MACH_COP_0_TLB_HI # get actual high PTE entry | |
1017 | nop | |
1018 | bne k0, k1, 1f # non-matching PTE | |
1019 | mfc0 k0, MACH_COP_0_EXC_PC # get return address | |
1020 | tlbwr # update TLB | |
1021 | j k0 | |
1022 | rfe | |
1023 | 1: | |
1024 | j SlowFault # handle cache miss | |
1025 | nop | |
1026 | .set reorder | |
1027 | .set at | |
1028 | .globl MachUTLBMissEnd | |
1029 | MachUTLBMissEnd: | |
1030 | ||
1031 | /* | |
1032 | * This code is copied to the general exception vector address to | |
1033 | * handle all execptions except RESET and UTLBMiss. | |
1034 | * NOTE: This code must be relocatable!!! | |
1035 | */ | |
1036 | .globl MachException | |
1037 | MachException: | |
1038 | /* | |
1039 | * Find out what mode we came from and jump to the proper handler. | |
1040 | */ | |
1041 | .set noat | |
1042 | .set noreorder | |
1043 | mfc0 k0, MACH_COP_0_STATUS_REG # Get the status register | |
1044 | mfc0 k1, MACH_COP_0_CAUSE_REG # Get the cause register value. | |
1045 | and k0, k0, MACH_SR_KU_PREV # test for user mode | |
1046 | beq k0, zero, 1f # handle kernel exception | |
1047 | and k1, k1, MACH_CR_EXC_CODE # Mask out the cause bits. | |
1048 | addu k1, k1, 0x40 # change index to user table | |
1049 | 1: | |
1050 | la k0, machExceptionTable # get base of the jump table | |
1051 | add k0, k0, k1 # Get the address of the | |
1052 | # function entry. Note that | |
1053 | # the cause is already | |
1054 | # shifted left by 2 bits so | |
1055 | # we don't have to shift. | |
1056 | lw k0, 0(k0) # Get the function address | |
1057 | nop | |
1058 | j k0 # Jump to the function. | |
1059 | nop | |
1060 | .set reorder | |
1061 | .set at | |
1062 | .globl MachExceptionEnd | |
1063 | MachExceptionEnd: | |
1064 | ||
1065 | /* | |
1066 | * We couldn't find a TLB entry. | |
1067 | * Find out what mode we came from and call the appropriate handler. | |
1068 | */ | |
1069 | SlowFault: | |
1070 | .set noat | |
1071 | .set noreorder | |
1072 | mfc0 k0, MACH_COP_0_STATUS_REG | |
1073 | nop | |
1074 | and k0, k0, MACH_SR_KU_PREV | |
1075 | bne k0, zero, MachUserGenException | |
1076 | nop | |
1077 | .set reorder | |
1078 | .set at | |
1079 | /* | |
1080 | * Fall though ... | |
1081 | */ | |
1082 | ||
1083 | /*---------------------------------------------------------------------------- | |
1084 | * | |
1085 | * MachKernGenException -- | |
1086 | * | |
1087 | * Handle an exception from kernel mode. | |
1088 | * | |
1089 | * Results: | |
1090 | * None. | |
1091 | * | |
1092 | * Side effects: | |
1093 | * None. | |
1094 | * | |
1095 | *---------------------------------------------------------------------------- | |
1096 | */ | |
1097 | ||
1098 | /* | |
1099 | * The kernel exception stack contains 18 saved general registers, | |
1100 | * the status register and the multiply lo and high registers. | |
1101 | * In addition, we set this up for linkage conventions. | |
1102 | */ | |
1103 | #define KERN_REG_SIZE (18 * 4) | |
1104 | #define KERN_REG_OFFSET (STAND_FRAME_SIZE) | |
1105 | #define KERN_SR_OFFSET (STAND_FRAME_SIZE + KERN_REG_SIZE) | |
1106 | #define KERN_MULT_LO_OFFSET (STAND_FRAME_SIZE + KERN_REG_SIZE + 4) | |
1107 | #define KERN_MULT_HI_OFFSET (STAND_FRAME_SIZE + KERN_REG_SIZE + 8) | |
1108 | #define KERN_EXC_FRAME_SIZE (STAND_FRAME_SIZE + KERN_REG_SIZE + 12) | |
1109 | ||
1110 | NON_LEAF(MachKernGenException, KERN_EXC_FRAME_SIZE, ra) | |
1111 | .set noreorder | |
1112 | .set noat | |
1113 | subu sp, sp, KERN_EXC_FRAME_SIZE | |
1114 | .mask 0x80000000, (STAND_RA_OFFSET - KERN_EXC_FRAME_SIZE) | |
1115 | /* | |
1116 | * Save the relevant kernel registers onto the stack. | |
1117 | * We don't need to save s0 - s8, sp and gp because | |
1118 | * the compiler does it for us. | |
1119 | */ | |
1120 | sw AT, KERN_REG_OFFSET + 0(sp) | |
1121 | sw v0, KERN_REG_OFFSET + 4(sp) | |
1122 | sw v1, KERN_REG_OFFSET + 8(sp) | |
1123 | sw a0, KERN_REG_OFFSET + 12(sp) | |
1124 | mflo v0 | |
1125 | mfhi v1 | |
1126 | sw a1, KERN_REG_OFFSET + 16(sp) | |
1127 | sw a2, KERN_REG_OFFSET + 20(sp) | |
1128 | sw a3, KERN_REG_OFFSET + 24(sp) | |
1129 | sw t0, KERN_REG_OFFSET + 28(sp) | |
1130 | mfc0 a0, MACH_COP_0_STATUS_REG # First arg is the status reg. | |
1131 | sw t1, KERN_REG_OFFSET + 32(sp) | |
1132 | sw t2, KERN_REG_OFFSET + 36(sp) | |
1133 | sw t3, KERN_REG_OFFSET + 40(sp) | |
1134 | sw t4, KERN_REG_OFFSET + 44(sp) | |
1135 | mfc0 a1, MACH_COP_0_CAUSE_REG # Second arg is the cause reg. | |
1136 | sw t5, KERN_REG_OFFSET + 48(sp) | |
1137 | sw t6, KERN_REG_OFFSET + 52(sp) | |
1138 | sw t7, KERN_REG_OFFSET + 56(sp) | |
1139 | sw t8, KERN_REG_OFFSET + 60(sp) | |
1140 | mfc0 a2, MACH_COP_0_BAD_VADDR # Third arg is the fault addr. | |
1141 | sw t9, KERN_REG_OFFSET + 64(sp) | |
1142 | sw ra, KERN_REG_OFFSET + 68(sp) | |
1143 | sw v0, KERN_MULT_LO_OFFSET(sp) | |
1144 | sw v1, KERN_MULT_HI_OFFSET(sp) | |
1145 | mfc0 a3, MACH_COP_0_EXC_PC # Fourth arg is the pc. | |
1146 | sw a0, KERN_SR_OFFSET(sp) | |
1147 | /* | |
1148 | * Call the exception handler. | |
1149 | */ | |
1150 | jal trap | |
1151 | sw a3, STAND_RA_OFFSET(sp) # for debugging | |
1152 | /* | |
1153 | * Restore registers and return from the exception. | |
1154 | * v0 contains the return address. | |
1155 | */ | |
1156 | lw a0, KERN_SR_OFFSET(sp) | |
1157 | lw t0, KERN_MULT_LO_OFFSET(sp) | |
1158 | lw t1, KERN_MULT_HI_OFFSET(sp) | |
1159 | mtc0 a0, MACH_COP_0_STATUS_REG # Restore the SR, disable intrs | |
1160 | mtlo t0 | |
1161 | mthi t1 | |
1162 | move k0, v0 | |
1163 | lw AT, KERN_REG_OFFSET + 0(sp) | |
1164 | lw v0, KERN_REG_OFFSET + 4(sp) | |
1165 | lw v1, KERN_REG_OFFSET + 8(sp) | |
1166 | lw a0, KERN_REG_OFFSET + 12(sp) | |
1167 | lw a1, KERN_REG_OFFSET + 16(sp) | |
1168 | lw a2, KERN_REG_OFFSET + 20(sp) | |
1169 | lw a3, KERN_REG_OFFSET + 24(sp) | |
1170 | lw t0, KERN_REG_OFFSET + 28(sp) | |
1171 | lw t1, KERN_REG_OFFSET + 32(sp) | |
1172 | lw t2, KERN_REG_OFFSET + 36(sp) | |
1173 | lw t3, KERN_REG_OFFSET + 40(sp) | |
1174 | lw t4, KERN_REG_OFFSET + 44(sp) | |
1175 | lw t5, KERN_REG_OFFSET + 48(sp) | |
1176 | lw t6, KERN_REG_OFFSET + 52(sp) | |
1177 | lw t7, KERN_REG_OFFSET + 56(sp) | |
1178 | lw t8, KERN_REG_OFFSET + 60(sp) | |
1179 | lw t9, KERN_REG_OFFSET + 64(sp) | |
1180 | lw ra, KERN_REG_OFFSET + 68(sp) | |
1181 | addu sp, sp, KERN_EXC_FRAME_SIZE | |
1182 | j k0 # Now return from the | |
1183 | rfe # exception. | |
1184 | .set at | |
1185 | .set reorder | |
1186 | END(MachKernGenException) | |
1187 | ||
1188 | /*---------------------------------------------------------------------------- | |
1189 | * | |
1190 | * MachUserGenException -- | |
1191 | * | |
1192 | * Handle an exception from user mode. | |
1193 | * | |
1194 | * Results: | |
1195 | * None. | |
1196 | * | |
1197 | * Side effects: | |
1198 | * None. | |
1199 | * | |
1200 | *---------------------------------------------------------------------------- | |
1201 | */ | |
1202 | NON_LEAF(MachUserGenException, STAND_FRAME_SIZE, ra) | |
1203 | .set noreorder | |
1204 | .set noat | |
1205 | .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) | |
1206 | /* | |
1207 | * Save all of the registers except for the kernel temporaries in u.u_pcb. | |
1208 | */ | |
1209 | sw AT, UADDR+U_PCB_REGS+(AST * 4) | |
1210 | sw v0, UADDR+U_PCB_REGS+(V0 * 4) | |
1211 | sw v1, UADDR+U_PCB_REGS+(V1 * 4) | |
1212 | sw a0, UADDR+U_PCB_REGS+(A0 * 4) | |
1213 | mflo v0 | |
1214 | sw a1, UADDR+U_PCB_REGS+(A1 * 4) | |
1215 | sw a2, UADDR+U_PCB_REGS+(A2 * 4) | |
1216 | sw a3, UADDR+U_PCB_REGS+(A3 * 4) | |
1217 | sw t0, UADDR+U_PCB_REGS+(T0 * 4) | |
1218 | mfhi v1 | |
1219 | sw t1, UADDR+U_PCB_REGS+(T1 * 4) | |
1220 | sw t2, UADDR+U_PCB_REGS+(T2 * 4) | |
1221 | sw t3, UADDR+U_PCB_REGS+(T3 * 4) | |
1222 | sw t4, UADDR+U_PCB_REGS+(T4 * 4) | |
1223 | mfc0 a0, MACH_COP_0_STATUS_REG # First arg is the status reg. | |
1224 | sw t5, UADDR+U_PCB_REGS+(T5 * 4) | |
1225 | sw t6, UADDR+U_PCB_REGS+(T6 * 4) | |
1226 | sw t7, UADDR+U_PCB_REGS+(T7 * 4) | |
1227 | sw s0, UADDR+U_PCB_REGS+(S0 * 4) | |
1228 | mfc0 a1, MACH_COP_0_CAUSE_REG # Second arg is the cause reg. | |
1229 | sw s1, UADDR+U_PCB_REGS+(S1 * 4) | |
1230 | sw s2, UADDR+U_PCB_REGS+(S2 * 4) | |
1231 | sw s3, UADDR+U_PCB_REGS+(S3 * 4) | |
1232 | sw s4, UADDR+U_PCB_REGS+(S4 * 4) | |
1233 | mfc0 a2, MACH_COP_0_BAD_VADDR # Third arg is the fault addr | |
1234 | sw s5, UADDR+U_PCB_REGS+(S5 * 4) | |
1235 | sw s6, UADDR+U_PCB_REGS+(S6 * 4) | |
1236 | sw s7, UADDR+U_PCB_REGS+(S7 * 4) | |
1237 | sw t8, UADDR+U_PCB_REGS+(T8 * 4) | |
1238 | mfc0 a3, MACH_COP_0_EXC_PC # Fourth arg is the pc. | |
1239 | sw t9, UADDR+U_PCB_REGS+(T9 * 4) | |
1240 | sw gp, UADDR+U_PCB_REGS+(GP * 4) | |
1241 | sw sp, UADDR+U_PCB_REGS+(SP * 4) | |
1242 | sw s8, UADDR+U_PCB_REGS+(S8 * 4) | |
1243 | li sp, KERNELSTACK - STAND_FRAME_SIZE # switch to kernel SP | |
1244 | sw ra, UADDR+U_PCB_REGS+(RA * 4) | |
1245 | sw v0, UADDR+U_PCB_REGS+(MULLO * 4) | |
1246 | sw v1, UADDR+U_PCB_REGS+(MULHI * 4) | |
1247 | sw a0, UADDR+U_PCB_REGS+(SR * 4) | |
1248 | la gp, _gp # switch to kernel GP | |
1249 | sw a3, UADDR+U_PCB_REGS+(PC * 4) | |
1250 | sw a3, STAND_RA_OFFSET(sp) # for debugging | |
1251 | and t0, a0, ~MACH_SR_COP_1_BIT # Turn off the FPU. | |
1252 | /* | |
1253 | * Call the exception handler. | |
1254 | */ | |
1255 | jal trap | |
1256 | mtc0 t0, MACH_COP_0_STATUS_REG | |
1257 | /* | |
1258 | * Restore user registers and return. NOTE: interrupts are enabled. | |
1259 | */ | |
1260 | lw a0, UADDR+U_PCB_REGS+(SR * 4) | |
1261 | lw t0, UADDR+U_PCB_REGS+(MULLO * 4) | |
1262 | lw t1, UADDR+U_PCB_REGS+(MULHI * 4) | |
1263 | mtc0 a0, MACH_COP_0_STATUS_REG # this should disable interrupts | |
1264 | mtlo t0 | |
1265 | mthi t1 | |
1266 | lw k0, UADDR+U_PCB_REGS+(PC * 4) | |
1267 | lw AT, UADDR+U_PCB_REGS+(AST * 4) | |
1268 | lw v0, UADDR+U_PCB_REGS+(V0 * 4) | |
1269 | lw v1, UADDR+U_PCB_REGS+(V1 * 4) | |
1270 | lw a0, UADDR+U_PCB_REGS+(A0 * 4) | |
1271 | lw a1, UADDR+U_PCB_REGS+(A1 * 4) | |
1272 | lw a2, UADDR+U_PCB_REGS+(A2 * 4) | |
1273 | lw a3, UADDR+U_PCB_REGS+(A3 * 4) | |
1274 | lw t0, UADDR+U_PCB_REGS+(T0 * 4) | |
1275 | lw t1, UADDR+U_PCB_REGS+(T1 * 4) | |
1276 | lw t2, UADDR+U_PCB_REGS+(T2 * 4) | |
1277 | lw t3, UADDR+U_PCB_REGS+(T3 * 4) | |
1278 | lw t4, UADDR+U_PCB_REGS+(T4 * 4) | |
1279 | lw t5, UADDR+U_PCB_REGS+(T5 * 4) | |
1280 | lw t6, UADDR+U_PCB_REGS+(T6 * 4) | |
1281 | lw t7, UADDR+U_PCB_REGS+(T7 * 4) | |
1282 | lw s0, UADDR+U_PCB_REGS+(S0 * 4) | |
1283 | lw s1, UADDR+U_PCB_REGS+(S1 * 4) | |
1284 | lw s2, UADDR+U_PCB_REGS+(S2 * 4) | |
1285 | lw s3, UADDR+U_PCB_REGS+(S3 * 4) | |
1286 | lw s4, UADDR+U_PCB_REGS+(S4 * 4) | |
1287 | lw s5, UADDR+U_PCB_REGS+(S5 * 4) | |
1288 | lw s6, UADDR+U_PCB_REGS+(S6 * 4) | |
1289 | lw s7, UADDR+U_PCB_REGS+(S7 * 4) | |
1290 | lw t8, UADDR+U_PCB_REGS+(T8 * 4) | |
1291 | lw t9, UADDR+U_PCB_REGS+(T9 * 4) | |
1292 | lw gp, UADDR+U_PCB_REGS+(GP * 4) | |
1293 | lw sp, UADDR+U_PCB_REGS+(SP * 4) | |
1294 | lw s8, UADDR+U_PCB_REGS+(S8 * 4) | |
1295 | lw ra, UADDR+U_PCB_REGS+(RA * 4) | |
1296 | j k0 | |
1297 | rfe | |
1298 | .set at | |
1299 | .set reorder | |
1300 | END(MachUserGenException) | |
1301 | ||
1302 | /*---------------------------------------------------------------------------- | |
1303 | * | |
1304 | * MachKernIntr -- | |
1305 | * | |
1306 | * Handle an interrupt from kernel mode. | |
1307 | * Interrupts must use a separate stack since during exit() | |
1308 | * there is a window of time when there is no kernel stack. | |
1309 | * | |
1310 | * Results: | |
1311 | * None. | |
1312 | * | |
1313 | * Side effects: | |
1314 | * None. | |
1315 | * | |
1316 | *---------------------------------------------------------------------------- | |
1317 | */ | |
1318 | #define KINTR_REG_OFFSET (STAND_FRAME_SIZE) | |
1319 | #define KINTR_SR_OFFSET (STAND_FRAME_SIZE + KERN_REG_SIZE) | |
1320 | #define KINTR_SP_OFFSET (STAND_FRAME_SIZE + KERN_REG_SIZE + 4) | |
1321 | #define KINTR_MULT_LO_OFFSET (STAND_FRAME_SIZE + KERN_REG_SIZE + 8) | |
1322 | #define KINTR_MULT_HI_OFFSET (STAND_FRAME_SIZE + KERN_REG_SIZE + 12) | |
1323 | #define KINTR_FRAME_SIZE (STAND_FRAME_SIZE + KERN_REG_SIZE + 16) | |
1324 | ||
1325 | NON_LEAF(MachKernIntr, KINTR_FRAME_SIZE, ra) | |
1326 | .set noreorder | |
1327 | .set noat | |
1328 | .mask 0x80000000, (STAND_RA_OFFSET - KINTR_FRAME_SIZE) | |
1329 | /* | |
1330 | * Check to see if we are already on the interrupt stack. | |
1331 | */ | |
1332 | li k0, MACH_CODE_START # interrupt stack below code | |
1333 | sltu k1, sp, k0 | |
1334 | beq k1, zero, 1f # no, init sp | |
1335 | nop | |
1336 | sw sp, KINTR_SP_OFFSET - KINTR_FRAME_SIZE(sp) # save old sp | |
1337 | b 2f | |
1338 | subu sp, sp, KINTR_FRAME_SIZE # allocate stack frame | |
1339 | 1: | |
1340 | sw sp, KINTR_SP_OFFSET - KINTR_FRAME_SIZE(k0) # save old sp | |
1341 | subu sp, k0, KINTR_FRAME_SIZE # switch to interrupt stack | |
1342 | 2: | |
1343 | /* | |
1344 | * Save the relevant kernel registers onto the stack. | |
1345 | * We don't need to save s0 - s8, sp and gp because | |
1346 | * the compiler does it for us. | |
1347 | */ | |
1348 | sw AT, KINTR_REG_OFFSET + 0(sp) | |
1349 | sw v0, KINTR_REG_OFFSET + 4(sp) | |
1350 | sw v1, KINTR_REG_OFFSET + 8(sp) | |
1351 | sw a0, KINTR_REG_OFFSET + 12(sp) | |
1352 | mflo v0 | |
1353 | mfhi v1 | |
1354 | sw a1, KINTR_REG_OFFSET + 16(sp) | |
1355 | sw a2, KINTR_REG_OFFSET + 20(sp) | |
1356 | sw a3, KINTR_REG_OFFSET + 24(sp) | |
1357 | sw t0, KINTR_REG_OFFSET + 28(sp) | |
1358 | mfc0 a0, MACH_COP_0_STATUS_REG # First arg is the status reg. | |
1359 | sw t1, KINTR_REG_OFFSET + 32(sp) | |
1360 | sw t2, KINTR_REG_OFFSET + 36(sp) | |
1361 | sw t3, KINTR_REG_OFFSET + 40(sp) | |
1362 | sw t4, KINTR_REG_OFFSET + 44(sp) | |
1363 | mfc0 a1, MACH_COP_0_CAUSE_REG # Second arg is the cause reg. | |
1364 | sw t5, KINTR_REG_OFFSET + 48(sp) | |
1365 | sw t6, KINTR_REG_OFFSET + 52(sp) | |
1366 | sw t7, KINTR_REG_OFFSET + 56(sp) | |
1367 | sw t8, KINTR_REG_OFFSET + 60(sp) | |
1368 | mfc0 a2, MACH_COP_0_EXC_PC # Third arg is the pc. | |
1369 | sw t9, KINTR_REG_OFFSET + 64(sp) | |
1370 | sw ra, KINTR_REG_OFFSET + 68(sp) | |
1371 | sw v0, KINTR_MULT_LO_OFFSET(sp) | |
1372 | sw v1, KINTR_MULT_HI_OFFSET(sp) | |
1373 | sw a0, KINTR_SR_OFFSET(sp) | |
1374 | /* | |
1375 | * Call the interrupt handler. | |
1376 | */ | |
1377 | jal interrupt | |
1378 | sw a2, STAND_RA_OFFSET(sp) # for debugging | |
1379 | /* | |
1380 | * Restore registers and return from the interrupt. | |
1381 | */ | |
1382 | lw a0, KINTR_SR_OFFSET(sp) | |
1383 | lw t0, KINTR_MULT_LO_OFFSET(sp) | |
1384 | lw t1, KINTR_MULT_HI_OFFSET(sp) | |
1385 | mtc0 a0, MACH_COP_0_STATUS_REG # Restore the SR, disable intrs | |
1386 | mtlo t0 | |
1387 | mthi t1 | |
1388 | lw k0, STAND_RA_OFFSET(sp) | |
1389 | lw AT, KINTR_REG_OFFSET + 0(sp) | |
1390 | lw v0, KINTR_REG_OFFSET + 4(sp) | |
1391 | lw v1, KINTR_REG_OFFSET + 8(sp) | |
1392 | lw a0, KINTR_REG_OFFSET + 12(sp) | |
1393 | lw a1, KINTR_REG_OFFSET + 16(sp) | |
1394 | lw a2, KINTR_REG_OFFSET + 20(sp) | |
1395 | lw a3, KINTR_REG_OFFSET + 24(sp) | |
1396 | lw t0, KINTR_REG_OFFSET + 28(sp) | |
1397 | lw t1, KINTR_REG_OFFSET + 32(sp) | |
1398 | lw t2, KINTR_REG_OFFSET + 36(sp) | |
1399 | lw t3, KINTR_REG_OFFSET + 40(sp) | |
1400 | lw t4, KINTR_REG_OFFSET + 44(sp) | |
1401 | lw t5, KINTR_REG_OFFSET + 48(sp) | |
1402 | lw t6, KINTR_REG_OFFSET + 52(sp) | |
1403 | lw t7, KINTR_REG_OFFSET + 56(sp) | |
1404 | lw t8, KINTR_REG_OFFSET + 60(sp) | |
1405 | lw t9, KINTR_REG_OFFSET + 64(sp) | |
1406 | lw ra, KINTR_REG_OFFSET + 68(sp) | |
1407 | lw sp, KINTR_SP_OFFSET(sp) # restore orig sp | |
1408 | j k0 # Now return from the | |
1409 | rfe # interrupt. | |
1410 | .set at | |
1411 | .set reorder | |
1412 | END(MachKernIntr) | |
1413 | ||
1414 | /*---------------------------------------------------------------------------- | |
1415 | * | |
1416 | * MachUserIntr -- | |
1417 | * | |
1418 | * Handle an interrupt from user mode. | |
1419 | * Note: we save minimal state in the u.u_pcb struct and use the standard | |
1420 | * kernel stack since there has to be a u page if we came from user mode. | |
1421 | * If there is a pending software interrupt, then save the remaining state | |
1422 | * and call softintr(). This is all because if we call swtch() inside | |
1423 | * interrupt(), not all the user registers have been saved in u.u_pcb. | |
1424 | * | |
1425 | * Results: | |
1426 | * None. | |
1427 | * | |
1428 | * Side effects: | |
1429 | * None. | |
1430 | * | |
1431 | *---------------------------------------------------------------------------- | |
1432 | */ | |
1433 | NON_LEAF(MachUserIntr, STAND_FRAME_SIZE, ra) | |
1434 | .set noreorder | |
1435 | .set noat | |
1436 | .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) | |
1437 | /* | |
1438 | * Save the relevant user registers into the u.u_pcb struct. | |
1439 | * We don't need to save s0 - s8 because | |
1440 | * the compiler does it for us. | |
1441 | */ | |
1442 | sw AT, UADDR+U_PCB_REGS+(AST * 4) | |
1443 | sw v0, UADDR+U_PCB_REGS+(V0 * 4) | |
1444 | sw v1, UADDR+U_PCB_REGS+(V1 * 4) | |
1445 | sw a0, UADDR+U_PCB_REGS+(A0 * 4) | |
1446 | mflo v0 | |
1447 | mfhi v1 | |
1448 | sw a1, UADDR+U_PCB_REGS+(A1 * 4) | |
1449 | sw a2, UADDR+U_PCB_REGS+(A2 * 4) | |
1450 | sw a3, UADDR+U_PCB_REGS+(A3 * 4) | |
1451 | sw t0, UADDR+U_PCB_REGS+(T0 * 4) | |
1452 | mfc0 a0, MACH_COP_0_STATUS_REG # First arg is the status reg. | |
1453 | sw t1, UADDR+U_PCB_REGS+(T1 * 4) | |
1454 | sw t2, UADDR+U_PCB_REGS+(T2 * 4) | |
1455 | sw t3, UADDR+U_PCB_REGS+(T3 * 4) | |
1456 | sw t4, UADDR+U_PCB_REGS+(T4 * 4) | |
1457 | mfc0 a1, MACH_COP_0_CAUSE_REG # Second arg is the cause reg. | |
1458 | sw t5, UADDR+U_PCB_REGS+(T5 * 4) | |
1459 | sw t6, UADDR+U_PCB_REGS+(T6 * 4) | |
1460 | sw t7, UADDR+U_PCB_REGS+(T7 * 4) | |
1461 | sw t8, UADDR+U_PCB_REGS+(T8 * 4) | |
1462 | mfc0 a2, MACH_COP_0_EXC_PC # Third arg is the pc. | |
1463 | sw t9, UADDR+U_PCB_REGS+(T9 * 4) | |
1464 | sw gp, UADDR+U_PCB_REGS+(GP * 4) | |
1465 | sw sp, UADDR+U_PCB_REGS+(SP * 4) | |
1466 | sw ra, UADDR+U_PCB_REGS+(RA * 4) | |
1467 | li sp, KERNELSTACK - STAND_FRAME_SIZE # switch to kernel SP | |
1468 | sw v0, UADDR+U_PCB_REGS+(MULLO * 4) | |
1469 | sw v1, UADDR+U_PCB_REGS+(MULHI * 4) | |
1470 | sw a0, UADDR+U_PCB_REGS+(SR * 4) | |
1471 | sw a2, UADDR+U_PCB_REGS+(PC * 4) | |
1472 | la gp, _gp # switch to kernel GP | |
1473 | and t0, a0, ~MACH_SR_COP_1_BIT # Turn off the FPU. | |
1474 | mtc0 t0, MACH_COP_0_STATUS_REG | |
1475 | /* | |
1476 | * Call the interrupt handler. | |
1477 | */ | |
1478 | jal interrupt | |
1479 | sw a2, STAND_RA_OFFSET(sp) # for debugging | |
1480 | /* | |
1481 | * Restore registers and return from the interrupt. | |
1482 | */ | |
1483 | lw a0, UADDR+U_PCB_REGS+(SR * 4) | |
1484 | lw v0, astpending # any pending interrupts? | |
1485 | mtc0 a0, MACH_COP_0_STATUS_REG # Restore the SR, disable intrs | |
1486 | bne v0, zero, 1f # don't restore, call softintr | |
1487 | lw t0, UADDR+U_PCB_REGS+(MULLO * 4) | |
1488 | lw t1, UADDR+U_PCB_REGS+(MULHI * 4) | |
1489 | lw k0, UADDR+U_PCB_REGS+(PC * 4) | |
1490 | lw AT, UADDR+U_PCB_REGS+(AST * 4) | |
1491 | lw v0, UADDR+U_PCB_REGS+(V0 * 4) | |
1492 | lw v1, UADDR+U_PCB_REGS+(V1 * 4) | |
1493 | lw a0, UADDR+U_PCB_REGS+(A0 * 4) | |
1494 | lw a1, UADDR+U_PCB_REGS+(A1 * 4) | |
1495 | lw a2, UADDR+U_PCB_REGS+(A2 * 4) | |
1496 | lw a3, UADDR+U_PCB_REGS+(A3 * 4) | |
1497 | mtlo t0 | |
1498 | mthi t1 | |
1499 | lw t0, UADDR+U_PCB_REGS+(T0 * 4) | |
1500 | lw t1, UADDR+U_PCB_REGS+(T1 * 4) | |
1501 | lw t2, UADDR+U_PCB_REGS+(T2 * 4) | |
1502 | lw t3, UADDR+U_PCB_REGS+(T3 * 4) | |
1503 | lw t4, UADDR+U_PCB_REGS+(T4 * 4) | |
1504 | lw t5, UADDR+U_PCB_REGS+(T5 * 4) | |
1505 | lw t6, UADDR+U_PCB_REGS+(T6 * 4) | |
1506 | lw t7, UADDR+U_PCB_REGS+(T7 * 4) | |
1507 | lw t8, UADDR+U_PCB_REGS+(T8 * 4) | |
1508 | lw t9, UADDR+U_PCB_REGS+(T9 * 4) | |
1509 | lw gp, UADDR+U_PCB_REGS+(GP * 4) | |
1510 | lw sp, UADDR+U_PCB_REGS+(SP * 4) | |
1511 | lw ra, UADDR+U_PCB_REGS+(RA * 4) | |
1512 | j k0 # Now return from the | |
1513 | rfe # interrupt. | |
1514 | ||
1515 | 1: | |
1516 | /* | |
1517 | * We have pending software interrupts; save remaining user state in u.u_pcb. | |
1518 | */ | |
1519 | sw s0, UADDR+U_PCB_REGS+(S0 * 4) | |
1520 | sw s1, UADDR+U_PCB_REGS+(S1 * 4) | |
1521 | sw s2, UADDR+U_PCB_REGS+(S2 * 4) | |
1522 | sw s3, UADDR+U_PCB_REGS+(S3 * 4) | |
1523 | sw s4, UADDR+U_PCB_REGS+(S4 * 4) | |
1524 | sw s5, UADDR+U_PCB_REGS+(S5 * 4) | |
1525 | sw s6, UADDR+U_PCB_REGS+(S6 * 4) | |
1526 | sw s7, UADDR+U_PCB_REGS+(S7 * 4) | |
1527 | sw s8, UADDR+U_PCB_REGS+(S8 * 4) | |
1528 | li t0, MACH_HARD_INT_MASK | MACH_SR_INT_ENA_CUR | |
1529 | /* | |
1530 | * Call the software interrupt handler. | |
1531 | */ | |
1532 | jal softintr | |
1533 | mtc0 t0, MACH_COP_0_STATUS_REG # enable interrupts (spl0) | |
1534 | /* | |
1535 | * Restore user registers and return. NOTE: interrupts are enabled. | |
1536 | */ | |
1537 | lw a0, UADDR+U_PCB_REGS+(SR * 4) | |
1538 | lw t0, UADDR+U_PCB_REGS+(MULLO * 4) | |
1539 | lw t1, UADDR+U_PCB_REGS+(MULHI * 4) | |
1540 | mtc0 a0, MACH_COP_0_STATUS_REG # this should disable interrupts | |
1541 | mtlo t0 | |
1542 | mthi t1 | |
1543 | lw k0, UADDR+U_PCB_REGS+(PC * 4) | |
1544 | lw AT, UADDR+U_PCB_REGS+(AST * 4) | |
1545 | lw v0, UADDR+U_PCB_REGS+(V0 * 4) | |
1546 | lw v1, UADDR+U_PCB_REGS+(V1 * 4) | |
1547 | lw a0, UADDR+U_PCB_REGS+(A0 * 4) | |
1548 | lw a1, UADDR+U_PCB_REGS+(A1 * 4) | |
1549 | lw a2, UADDR+U_PCB_REGS+(A2 * 4) | |
1550 | lw a3, UADDR+U_PCB_REGS+(A3 * 4) | |
1551 | lw t0, UADDR+U_PCB_REGS+(T0 * 4) | |
1552 | lw t1, UADDR+U_PCB_REGS+(T1 * 4) | |
1553 | lw t2, UADDR+U_PCB_REGS+(T2 * 4) | |
1554 | lw t3, UADDR+U_PCB_REGS+(T3 * 4) | |
1555 | lw t4, UADDR+U_PCB_REGS+(T4 * 4) | |
1556 | lw t5, UADDR+U_PCB_REGS+(T5 * 4) | |
1557 | lw t6, UADDR+U_PCB_REGS+(T6 * 4) | |
1558 | lw t7, UADDR+U_PCB_REGS+(T7 * 4) | |
1559 | lw s0, UADDR+U_PCB_REGS+(S0 * 4) | |
1560 | lw s1, UADDR+U_PCB_REGS+(S1 * 4) | |
1561 | lw s2, UADDR+U_PCB_REGS+(S2 * 4) | |
1562 | lw s3, UADDR+U_PCB_REGS+(S3 * 4) | |
1563 | lw s4, UADDR+U_PCB_REGS+(S4 * 4) | |
1564 | lw s5, UADDR+U_PCB_REGS+(S5 * 4) | |
1565 | lw s6, UADDR+U_PCB_REGS+(S6 * 4) | |
1566 | lw s7, UADDR+U_PCB_REGS+(S7 * 4) | |
1567 | lw t8, UADDR+U_PCB_REGS+(T8 * 4) | |
1568 | lw t9, UADDR+U_PCB_REGS+(T9 * 4) | |
1569 | lw gp, UADDR+U_PCB_REGS+(GP * 4) | |
1570 | lw sp, UADDR+U_PCB_REGS+(SP * 4) | |
1571 | lw s8, UADDR+U_PCB_REGS+(S8 * 4) | |
1572 | lw ra, UADDR+U_PCB_REGS+(RA * 4) | |
1573 | j k0 | |
1574 | rfe | |
1575 | .set at | |
1576 | .set reorder | |
1577 | END(MachUserIntr) | |
1578 | ||
1579 | /*---------------------------------------------------------------------------- | |
1580 | * | |
1581 | * MachTLBModException -- | |
1582 | * | |
1583 | * Handle a TLB modified exception. | |
1584 | * The BaddVAddr, Context, and EntryHi registers contain the failed | |
1585 | * virtual address. | |
1586 | * | |
1587 | * Results: | |
1588 | * None. | |
1589 | * | |
1590 | * Side effects: | |
1591 | * None. | |
1592 | * | |
1593 | *---------------------------------------------------------------------------- | |
1594 | */ | |
1595 | LEAF(MachTLBModException) | |
1596 | #if 0 | |
1597 | .set noreorder | |
1598 | .set noat | |
1599 | tlbp # find the TLB entry | |
1600 | mfc0 k0, MACH_COP_0_TLB_LOW # get the physical address | |
1601 | mfc0 k1, MACH_COP_0_TLB_INDEX # check to be sure its valid | |
1602 | or k0, k0, VMMACH_TLB_MOD_BIT # update TLB | |
1603 | blt k1, zero, 4f # not found!!! | |
1604 | mtc0 k0, MACH_COP_0_TLB_LOW | |
1605 | li k1, MACH_CACHED_MEMORY_ADDR | |
1606 | subu k0, k0, k1 | |
1607 | srl k0, k0, VMMACH_TLB_PHYS_PAGE_SHIFT | |
1608 | la k1, pmap_attributes | |
1609 | add k0, k0, k1 | |
1610 | lbu k1, 0(k0) # fetch old value | |
1611 | nop | |
1612 | or k1, k1, 1 # set modified bit | |
1613 | sb k1, 0(k0) # save new value | |
1614 | mfc0 k0, MACH_COP_0_EXC_PC # get return address | |
1615 | nop | |
1616 | j k0 | |
1617 | rfe | |
1618 | 4: | |
1619 | break 0 # panic | |
1620 | .set reorder | |
1621 | .set at | |
1622 | #endif | |
1623 | END(MachTLBModException) | |
1624 | ||
1625 | /*---------------------------------------------------------------------------- | |
1626 | * | |
1627 | * MachTLBMissException -- | |
1628 | * | |
1629 | * Handle a TLB miss exception from kernel mode. | |
1630 | * The BaddVAddr, Context, and EntryHi registers contain the failed | |
1631 | * virtual address. | |
1632 | * | |
1633 | * Results: | |
1634 | * None. | |
1635 | * | |
1636 | * Side effects: | |
1637 | * None. | |
1638 | * | |
1639 | *---------------------------------------------------------------------------- | |
1640 | */ | |
1641 | LEAF(MachTLBMissException) | |
1642 | .set noreorder | |
1643 | .set noat | |
1644 | mfc0 k0, MACH_COP_0_BAD_VADDR # get the fault address | |
1645 | li k1, MACH_KSEG2_ADDR # compute index | |
1646 | subu k0, k0, k1 | |
1647 | srl k0, k0, PGSHIFT | |
1648 | li k1, PMAP_HASH_KPAGES * NPTEPG # index within range? | |
1649 | sltu k1, k0, k1 | |
1650 | beq k1, zero, SlowFault # No. do it the long way | |
1651 | sll k0, k0, 2 # compute offset from index | |
1652 | lw k0, PMAP_HASH_KADDR(k0) # get PTE entry | |
1653 | mfc0 k1, MACH_COP_0_EXC_PC # get return address | |
1654 | mtc0 k0, MACH_COP_0_TLB_LOW # save PTE entry | |
1655 | and k0, k0, PG_V # make sure it's valid | |
1656 | beq k0, zero, SlowFault # No. do it the long way | |
1657 | nop | |
1658 | tlbwr # update TLB | |
1659 | j k1 | |
1660 | rfe | |
1661 | .set reorder | |
1662 | .set at | |
1663 | END(MachTLBMissException) | |
1664 | ||
1665 | /* | |
1666 | * Set/clear software interrupt routines. | |
1667 | */ | |
1668 | ||
1669 | LEAF(setsoftclock) | |
1670 | .set noreorder | |
1671 | mfc0 v0, MACH_COP_0_CAUSE_REG # read cause register | |
1672 | nop | |
1673 | or v0, v0, MACH_SOFT_INT_MASK_0 # set soft clock interrupt | |
1674 | mtc0 v0, MACH_COP_0_CAUSE_REG # save it | |
1675 | j ra | |
1676 | nop | |
1677 | .set reorder | |
1678 | END(setsoftclock) | |
1679 | ||
1680 | LEAF(clearsoftclock) | |
1681 | .set noreorder | |
1682 | mfc0 v0, MACH_COP_0_CAUSE_REG # read cause register | |
1683 | nop | |
1684 | and v0, v0, ~MACH_SOFT_INT_MASK_0 # clear soft clock interrupt | |
1685 | mtc0 v0, MACH_COP_0_CAUSE_REG # save it | |
1686 | j ra | |
1687 | nop | |
1688 | .set reorder | |
1689 | END(clearsoftclock) | |
1690 | ||
1691 | LEAF(setsoftnet) | |
1692 | .set noreorder | |
1693 | mfc0 v0, MACH_COP_0_CAUSE_REG # read cause register | |
1694 | nop | |
1695 | or v0, v0, MACH_SOFT_INT_MASK_1 # set soft net interrupt | |
1696 | mtc0 v0, MACH_COP_0_CAUSE_REG # save it | |
1697 | j ra | |
1698 | nop | |
1699 | .set reorder | |
1700 | END(setsoftnet) | |
1701 | ||
1702 | LEAF(clearsoftnet) | |
1703 | .set noreorder | |
1704 | mfc0 v0, MACH_COP_0_CAUSE_REG # read cause register | |
1705 | nop | |
1706 | and v0, v0, ~MACH_SOFT_INT_MASK_1 # clear soft net interrupt | |
1707 | mtc0 v0, MACH_COP_0_CAUSE_REG # save it | |
1708 | j ra | |
1709 | nop | |
1710 | .set reorder | |
1711 | END(clearsoftnet) | |
1712 | ||
1713 | /* | |
1714 | * Set/change interrupt priority routines. | |
1715 | */ | |
1716 | ||
1717 | LEAF(MachEnableIntr) | |
1718 | .set noreorder | |
1719 | mfc0 v0, MACH_COP_0_STATUS_REG # read status register | |
1720 | nop | |
1721 | or v0, v0, MACH_SR_INT_ENA_CUR | |
1722 | mtc0 v0, MACH_COP_0_STATUS_REG # enable all interrupts | |
1723 | j ra | |
1724 | nop | |
1725 | .set reorder | |
1726 | END(MachEnableIntr) | |
1727 | ||
1728 | LEAF(spl0) | |
1729 | .set noreorder | |
1730 | mfc0 v0, MACH_COP_0_STATUS_REG # read status register | |
1731 | nop | |
1732 | or t0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) | |
1733 | mtc0 t0, MACH_COP_0_STATUS_REG # enable all interrupts | |
1734 | j ra | |
1735 | and v0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) | |
1736 | .set reorder | |
1737 | END(spl0) | |
1738 | ||
1739 | LEAF(splsoftclock) | |
1740 | .set noreorder | |
1741 | mfc0 v0, MACH_COP_0_STATUS_REG # read status register | |
1742 | li t0, ~MACH_SOFT_INT_MASK_1 # disable soft clock | |
1743 | and t0, t0, v0 | |
1744 | mtc0 t0, MACH_COP_0_STATUS_REG # save it | |
1745 | j ra | |
1746 | and v0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) | |
1747 | .set reorder | |
1748 | END(splsoftclock) | |
1749 | ||
1750 | LEAF(splbio) | |
1751 | .set noreorder | |
1752 | mfc0 v0, MACH_COP_0_STATUS_REG # read status register | |
1753 | li t0, ~MACH_INT_MASK_0 # disable SCSI interrupts | |
1754 | and t0, t0, v0 | |
1755 | mtc0 t0, MACH_COP_0_STATUS_REG # save it | |
1756 | j ra | |
1757 | and v0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) | |
1758 | .set reorder | |
1759 | END(splbio) | |
1760 | ||
1761 | /* | |
1762 | * Block interrupts for any device that could allocate memory at interrupt | |
1763 | * time. | |
1764 | */ | |
1765 | LEAF(splnet) | |
1766 | ALEAF(splimp) | |
1767 | .set noreorder | |
1768 | mfc0 v0, MACH_COP_0_STATUS_REG # read status register | |
1769 | li t0, ~MACH_INT_MASK_1 # disable network interrupts | |
1770 | and t0, t0, v0 | |
1771 | mtc0 t0, MACH_COP_0_STATUS_REG # save it | |
1772 | j ra | |
1773 | and v0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) | |
1774 | .set reorder | |
1775 | END(splnet) | |
1776 | ||
1777 | LEAF(spltty) | |
1778 | .set noreorder | |
1779 | mfc0 v0, MACH_COP_0_STATUS_REG # read status register | |
1780 | li t0, ~MACH_INT_MASK_2 # disable tty interrupts | |
1781 | and t0, t0, v0 | |
1782 | mtc0 t0, MACH_COP_0_STATUS_REG # save it | |
1783 | j ra | |
1784 | and v0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) | |
1785 | .set reorder | |
1786 | END(spltty) | |
1787 | ||
1788 | LEAF(splclock) | |
1789 | .set noreorder | |
1790 | mfc0 v0, MACH_COP_0_STATUS_REG # read status register | |
1791 | li t0, ~MACH_INT_MASK_3 # disable clock interrupts | |
1792 | and t0, t0, v0 | |
1793 | mtc0 t0, MACH_COP_0_STATUS_REG # save it | |
1794 | j ra | |
1795 | and v0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) | |
1796 | .set reorder | |
1797 | END(splclock) | |
1798 | ||
1799 | LEAF(splhigh) | |
1800 | .set noreorder | |
1801 | mfc0 v0, MACH_COP_0_STATUS_REG # read status register | |
1802 | li t0, ~MACH_SR_INT_ENA_CUR # disable all interrupts | |
1803 | and t0, t0, v0 | |
1804 | mtc0 t0, MACH_COP_0_STATUS_REG # save it | |
1805 | j ra | |
1806 | and v0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) | |
1807 | .set reorder | |
1808 | END(splhigh) | |
1809 | ||
1810 | /* | |
1811 | * Restore saved interrupt mask. | |
1812 | */ | |
1813 | LEAF(splx) | |
1814 | .set noreorder | |
1815 | mfc0 v0, MACH_COP_0_STATUS_REG | |
1816 | li t0, ~(MACH_INT_MASK | MACH_SR_INT_ENA_CUR) | |
1817 | and t0, t0, v0 | |
1818 | or t0, t0, a0 | |
1819 | mtc0 t0, MACH_COP_0_STATUS_REG | |
1820 | j ra | |
1821 | nop | |
1822 | .set reorder | |
1823 | END(splx) | |
1824 | ||
1825 | /*---------------------------------------------------------------------------- | |
1826 | * | |
1827 | * MachEmptyWriteBuffer -- | |
1828 | * | |
1829 | * Return when the write buffer is empty. | |
1830 | * | |
1831 | * MachEmptyWriteBuffer() | |
1832 | * | |
1833 | * Results: | |
1834 | * None. | |
1835 | * | |
1836 | * Side effects: | |
1837 | * None. | |
1838 | * | |
1839 | *---------------------------------------------------------------------------- | |
1840 | */ | |
1841 | LEAF(MachEmptyWriteBuffer) | |
1842 | .set noreorder | |
1843 | nop | |
1844 | nop | |
1845 | nop | |
1846 | nop | |
1847 | 1: bc0f 1b | |
1848 | nop | |
1849 | j ra | |
1850 | nop | |
1851 | .set reorder | |
1852 | END(MachEmptyWriteBuffer) | |
1853 | ||
1854 | /*-------------------------------------------------------------------------- | |
1855 | * | |
1856 | * MachTLBWriteIndexed -- | |
1857 | * | |
1858 | * Write the given entry into the TLB at the given index. | |
1859 | * | |
1860 | * MachTLBWriteIndexed(index, highEntry, lowEntry) | |
1861 | * int index; | |
1862 | * int highEntry; | |
1863 | * int lowEntry; | |
1864 | * | |
1865 | * Results: | |
1866 | * None. | |
1867 | * | |
1868 | * Side effects: | |
1869 | * TLB entry set. | |
1870 | * | |
1871 | *-------------------------------------------------------------------------- | |
1872 | */ | |
1873 | LEAF(MachTLBWriteIndexed) | |
1874 | .set noreorder | |
1875 | mfc0 t1, MACH_COP_0_STATUS_REG # Save the status register. | |
1876 | mfc0 t0, MACH_COP_0_TLB_HI # Save the current PID. | |
1877 | mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts | |
1878 | ||
1879 | sll a0, a0, VMMACH_TLB_INDEX_SHIFT | |
1880 | mtc0 a0, MACH_COP_0_TLB_INDEX # Set the index. | |
1881 | mtc0 a1, MACH_COP_0_TLB_HI # Set up entry high. | |
1882 | mtc0 a2, MACH_COP_0_TLB_LOW # Set up entry low. | |
1883 | nop | |
1884 | tlbwi # Write the TLB | |
1885 | ||
1886 | mtc0 t0, MACH_COP_0_TLB_HI # Restore the PID. | |
1887 | j ra | |
1888 | mtc0 t1, MACH_COP_0_STATUS_REG # Restore the status register | |
1889 | .set reorder | |
1890 | END(MachTLBWriteIndexed) | |
1891 | ||
1892 | /*-------------------------------------------------------------------------- | |
1893 | * | |
1894 | * MachTLBWriteRandom -- | |
1895 | * | |
1896 | * Write the given entry into the TLB at a random location. | |
1897 | * | |
1898 | * MachTLBWriteRandom(highEntry, lowEntry) | |
1899 | * unsigned highEntry; | |
1900 | * unsigned lowEntry; | |
1901 | * | |
1902 | * Results: | |
1903 | * None. | |
1904 | * | |
1905 | * Side effects: | |
1906 | * TLB entry set. | |
1907 | * | |
1908 | *-------------------------------------------------------------------------- | |
1909 | */ | |
1910 | LEAF(MachTLBWriteRandom) | |
1911 | .set noreorder | |
1912 | mfc0 v1, MACH_COP_0_STATUS_REG # Save the status register. | |
1913 | mfc0 v0, MACH_COP_0_TLB_HI # Save the current PID. | |
1914 | mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts | |
1915 | ||
1916 | mtc0 a0, MACH_COP_0_TLB_HI # Set up entry high. | |
1917 | mtc0 a1, MACH_COP_0_TLB_LOW # Set up entry low. | |
1918 | nop | |
1919 | tlbwr # Write the TLB | |
1920 | ||
1921 | mtc0 v0, MACH_COP_0_TLB_HI # Restore the PID. | |
1922 | j ra | |
1923 | mtc0 v1, MACH_COP_0_STATUS_REG # Restore the status register | |
1924 | .set reorder | |
1925 | END(MachTLBWriteRandom) | |
1926 | ||
1927 | /*-------------------------------------------------------------------------- | |
1928 | * | |
1929 | * MachSetPID -- | |
1930 | * | |
1931 | * Write the given pid into the TLB pid reg. | |
1932 | * | |
1933 | * MachSetPID(pid) | |
1934 | * int pid; | |
1935 | * | |
1936 | * Results: | |
1937 | * None. | |
1938 | * | |
1939 | * Side effects: | |
1940 | * PID set in the entry hi register. | |
1941 | * | |
1942 | *-------------------------------------------------------------------------- | |
1943 | */ | |
1944 | LEAF(MachSetPID) | |
1945 | .set noreorder | |
1946 | sll a0, a0, VMMACH_TLB_PID_SHIFT # put PID in right spot | |
1947 | mtc0 a0, MACH_COP_0_TLB_HI # Write the hi reg value | |
1948 | j ra | |
1949 | nop | |
1950 | .set reorder | |
1951 | END(MachSetPID) | |
1952 | ||
1953 | /*-------------------------------------------------------------------------- | |
1954 | * | |
1955 | * MachTLBFlush -- | |
1956 | * | |
1957 | * Flush the "random" entries from the TLB. | |
1958 | * | |
1959 | * MachTLBFlush() | |
1960 | * | |
1961 | * Results: | |
1962 | * None. | |
1963 | * | |
1964 | * Side effects: | |
1965 | * The TLB is flushed. | |
1966 | * | |
1967 | *-------------------------------------------------------------------------- | |
1968 | */ | |
1969 | LEAF(MachTLBFlush) | |
1970 | .set noreorder | |
1971 | mfc0 v1, MACH_COP_0_STATUS_REG # Save the status register. | |
1972 | mfc0 t0, MACH_COP_0_TLB_HI # Save the PID | |
1973 | mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts | |
1974 | li t1, MACH_RESERVED_ADDR # invalid address | |
1975 | mtc0 t1, MACH_COP_0_TLB_HI # Mark entry high as invalid | |
1976 | mtc0 zero, MACH_COP_0_TLB_LOW # Zero out low entry. | |
1977 | /* | |
1978 | * Align the starting value (t1), the increment (t2) and the upper bound (t3). | |
1979 | */ | |
1980 | li t1, VMMACH_FIRST_RAND_ENTRY << VMMACH_TLB_INDEX_SHIFT | |
1981 | li t2, 1 << VMMACH_TLB_INDEX_SHIFT | |
1982 | li t3, VMMACH_NUM_TLB_ENTRIES << VMMACH_TLB_INDEX_SHIFT | |
1983 | 1: | |
1984 | mtc0 t1, MACH_COP_0_TLB_INDEX # Set the index register. | |
1985 | addu t1, t1, t2 # Increment index. | |
1986 | bne t1, t3, 1b # NB: always executes next | |
1987 | tlbwi # Write the TLB entry. | |
1988 | ||
1989 | mtc0 t0, MACH_COP_0_TLB_HI # Restore the PID | |
1990 | j ra | |
1991 | mtc0 v1, MACH_COP_0_STATUS_REG # Restore the status register | |
1992 | .set reorder | |
1993 | END(MachTLBFlush) | |
1994 | ||
1995 | /*-------------------------------------------------------------------------- | |
1996 | * | |
1997 | * MachTLBFlushPID -- | |
1998 | * | |
1999 | * Flush all entries with the given PID from the TLB. | |
2000 | * | |
2001 | * MachTLBFlushPID(pid) | |
2002 | * int pid; | |
2003 | * | |
2004 | * Results: | |
2005 | * None. | |
2006 | * | |
2007 | * Side effects: | |
2008 | * All entries corresponding to this PID are flushed. | |
2009 | * | |
2010 | *-------------------------------------------------------------------------- | |
2011 | */ | |
2012 | LEAF(MachTLBFlushPID) | |
2013 | .set noreorder | |
2014 | mfc0 v1, MACH_COP_0_STATUS_REG # Save the status register. | |
2015 | mfc0 t0, MACH_COP_0_TLB_HI # Save the current PID | |
2016 | mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts | |
2017 | sll a0, a0, VMMACH_TLB_PID_SHIFT # Align the pid to flush. | |
2018 | /* | |
2019 | * Align the starting value (t1), the increment (t2) and the upper bound (t3). | |
2020 | */ | |
2021 | li t1, VMMACH_FIRST_RAND_ENTRY << VMMACH_TLB_INDEX_SHIFT | |
2022 | li t2, 1 << VMMACH_TLB_INDEX_SHIFT | |
2023 | li t3, VMMACH_NUM_TLB_ENTRIES << VMMACH_TLB_INDEX_SHIFT | |
2024 | mtc0 t1, MACH_COP_0_TLB_INDEX # Set the index register | |
2025 | 1: | |
2026 | addu t1, t1, t2 # Increment index. | |
2027 | tlbr # Read from the TLB | |
2028 | mfc0 t4, MACH_COP_0_TLB_HI # Fetch the hi register. | |
2029 | nop | |
2030 | and t4, t4, VMMACH_TLB_PID # compare PID's | |
2031 | bne t4, a0, 2f | |
2032 | li v0, MACH_RESERVED_ADDR # invalid address | |
2033 | mtc0 v0, MACH_COP_0_TLB_HI # Mark entry high as invalid | |
2034 | mtc0 zero, MACH_COP_0_TLB_LOW # Zero out low entry. | |
2035 | nop | |
2036 | tlbwi # Write the entry. | |
2037 | 2: | |
2038 | bne t1, t3, 1b | |
2039 | mtc0 t1, MACH_COP_0_TLB_INDEX # Set the index register | |
2040 | ||
2041 | mtc0 t0, MACH_COP_0_TLB_HI # restore PID | |
2042 | j ra | |
2043 | mtc0 v1, MACH_COP_0_STATUS_REG # Restore the status register | |
2044 | .set reorder | |
2045 | END(MachTLBFlushPID) | |
2046 | ||
2047 | /*-------------------------------------------------------------------------- | |
2048 | * | |
2049 | * MachTLBFlushAddr -- | |
2050 | * | |
2051 | * Flush any TLB entries for the given address. | |
2052 | * | |
2053 | * MachTLBFlushAddr(virtaddr) | |
2054 | * unsigned virtaddr; | |
2055 | * | |
2056 | * Results: | |
2057 | * None. | |
2058 | * | |
2059 | * Side effects: | |
2060 | * The process's page is flushed from the TLB. | |
2061 | * | |
2062 | *-------------------------------------------------------------------------- | |
2063 | */ | |
2064 | LEAF(MachTLBFlushAddr) | |
2065 | .set noreorder | |
2066 | mfc0 v1, MACH_COP_0_STATUS_REG # Save the status register. | |
2067 | mfc0 t0, MACH_COP_0_TLB_HI # Get current PID | |
2068 | mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts | |
2069 | mtc0 a0, MACH_COP_0_TLB_HI # look for addr & PID | |
2070 | mtc0 zero, MACH_COP_0_TLB_LOW # look for matching PID | |
2071 | nop | |
2072 | tlbp # Probe for the entry. | |
2073 | mfc0 v0, MACH_COP_0_TLB_INDEX # See what we got | |
2074 | li t1, MACH_RESERVED_ADDR # Load invalid entry. | |
2075 | bltz v0, 1f # index < 0 => !found | |
2076 | mtc0 t1, MACH_COP_0_TLB_HI # Prepare index hi. | |
2077 | nop | |
2078 | tlbwi | |
2079 | 1: | |
2080 | mtc0 t0, MACH_COP_0_TLB_HI # restore PID | |
2081 | j ra | |
2082 | mtc0 v1, MACH_COP_0_STATUS_REG # Restore the status register | |
2083 | .set reorder | |
2084 | END(MachTLBFlushAddr) | |
2085 | ||
2086 | /*-------------------------------------------------------------------------- | |
2087 | * | |
2088 | * MachTLBUpdate -- | |
2089 | * | |
2090 | * Update the TLB if highreg is found. | |
2091 | * | |
2092 | * MachTLBUpdate(highreg, lowreg) | |
2093 | * unsigned highreg, lowreg; | |
2094 | * | |
2095 | * Results: | |
2096 | * None. | |
2097 | * | |
2098 | * Side effects: | |
2099 | * None. | |
2100 | * | |
2101 | *-------------------------------------------------------------------------- | |
2102 | */ | |
2103 | LEAF(MachTLBUpdate) | |
2104 | .set noreorder | |
2105 | mfc0 v1, MACH_COP_0_STATUS_REG # Save the status register. | |
2106 | mfc0 t0, MACH_COP_0_TLB_HI # Save current PID | |
2107 | mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts | |
2108 | mtc0 a0, MACH_COP_0_TLB_HI # init high reg. | |
2109 | mtc0 a1, MACH_COP_0_TLB_LOW # init low reg. | |
2110 | nop | |
2111 | tlbp # Probe for the entry. | |
2112 | mfc0 v0, MACH_COP_0_TLB_INDEX # See what we got | |
2113 | nop | |
2114 | bltz v0, 1f # index < 0 => !found | |
2115 | nop | |
2116 | tlbwi | |
2117 | 1: | |
2118 | mtc0 t0, MACH_COP_0_TLB_HI # restore PID | |
2119 | j ra | |
2120 | mtc0 v1, MACH_COP_0_STATUS_REG # Restore the status register | |
2121 | .set reorder | |
2122 | END(MachTLBUpdate) | |
2123 | ||
2124 | #ifdef DEBUG | |
2125 | /*-------------------------------------------------------------------------- | |
2126 | * | |
2127 | * MachTLBDump -- | |
2128 | * | |
2129 | * Print all entries in the TLB if 'all' is true; otherwise, just | |
2130 | * print valid entries. | |
2131 | * | |
2132 | * MachTLBDump(all) | |
2133 | * int all; | |
2134 | * | |
2135 | * Results: | |
2136 | * None. | |
2137 | * | |
2138 | * Side effects: | |
2139 | * None. | |
2140 | * | |
2141 | *-------------------------------------------------------------------------- | |
2142 | */ | |
2143 | ||
2144 | #define DUMP_FRAME_SIZE (STAND_FRAME_SIZE + 4*4) | |
2145 | ||
2146 | NON_LEAF(MachTLBDump, DUMP_FRAME_SIZE, ra) | |
2147 | .set noreorder | |
2148 | subu sp, sp, DUMP_FRAME_SIZE | |
2149 | sw s0, STAND_RA_OFFSET(sp) | |
2150 | sw s1, STAND_RA_OFFSET+4(sp) | |
2151 | sw s2, STAND_RA_OFFSET+8(sp) | |
2152 | sw s3, STAND_RA_OFFSET+12(sp) | |
2153 | sw ra, STAND_RA_OFFSET+16(sp) | |
2154 | .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) | |
2155 | ||
2156 | mfc0 s0, MACH_COP_0_TLB_HI # Save the current PID | |
2157 | sw a0, DUMP_FRAME_SIZE(sp) # Save 'all' | |
2158 | /* | |
2159 | * Align the starting value (s1), the increment (s2) and the upper bound (s3). | |
2160 | */ | |
2161 | move s1, zero | |
2162 | li s2, 1 << VMMACH_TLB_INDEX_SHIFT | |
2163 | li s3, VMMACH_NUM_TLB_ENTRIES << VMMACH_TLB_INDEX_SHIFT | |
2164 | mtc0 s1, MACH_COP_0_TLB_INDEX # Set the index register | |
2165 | 1: | |
2166 | addu s1, s1, s2 # Increment index. | |
2167 | tlbr # Read from the TLB | |
2168 | bne a0, zero, 2f # skip valid check if 'all' | |
2169 | mfc0 a3, MACH_COP_0_TLB_LOW # Fetch the low register. | |
2170 | nop | |
2171 | and t0, a3, PG_V # is it valid? | |
2172 | beq t0, zero, 3f | |
2173 | nop | |
2174 | 2: | |
2175 | mfc0 a2, MACH_COP_0_TLB_HI # Fetch the hi register. | |
2176 | PRINTF("%d: hi %x low %x\n") # print entry | |
2177 | srl a1, s1, VMMACH_TLB_INDEX_SHIFT # this is in the delay slot | |
2178 | lw a0, DUMP_FRAME_SIZE(sp) # get 'all' | |
2179 | 3: | |
2180 | bne s1, s3, 1b | |
2181 | mtc0 s1, MACH_COP_0_TLB_INDEX # Set the index register | |
2182 | ||
2183 | mtc0 s0, MACH_COP_0_TLB_HI # restore PID | |
2184 | nop | |
2185 | lw ra, STAND_RA_OFFSET+16(sp) | |
2186 | lw s0, STAND_RA_OFFSET(sp) | |
2187 | lw s1, STAND_RA_OFFSET+4(sp) | |
2188 | lw s2, STAND_RA_OFFSET+8(sp) | |
2189 | lw s3, STAND_RA_OFFSET+12(sp) | |
2190 | j ra | |
2191 | addu sp, sp, DUMP_FRAME_SIZE | |
2192 | .set reorder | |
2193 | END(MachTLBDump) | |
2194 | ||
2195 | .comm tlbhi, 4 | |
2196 | .comm tlblo, 4 | |
2197 | LEAF(MachTLBFind) | |
2198 | .set noreorder | |
2199 | mfc0 v1, MACH_COP_0_STATUS_REG # Save the status register. | |
2200 | mfc0 t0, MACH_COP_0_TLB_HI # Get current PID | |
2201 | mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts | |
2202 | mtc0 a0, MACH_COP_0_TLB_HI # Set up entry high. | |
2203 | mtc0 a1, MACH_COP_0_TLB_LOW # Set up entry low. | |
2204 | nop | |
2205 | tlbp # Probe for the entry. | |
2206 | mfc0 v0, MACH_COP_0_TLB_INDEX # See what we got | |
2207 | mfc0 t1, MACH_COP_0_TLB_HI # See what we got | |
2208 | mfc0 t2, MACH_COP_0_TLB_LOW # See what we got | |
2209 | sw t1, tlbhi | |
2210 | sw t2, tlblo | |
2211 | mtc0 t0, MACH_COP_0_TLB_HI # Get current PID | |
2212 | j ra | |
2213 | mtc0 v1, MACH_COP_0_STATUS_REG # Restore the status register | |
2214 | .set reorder | |
2215 | END(MachTLBFind) | |
2216 | ||
2217 | /*-------------------------------------------------------------------------- | |
2218 | * | |
2219 | * MachGetPID -- | |
2220 | * | |
2221 | * MachGetPID() | |
2222 | * | |
2223 | * Results: | |
2224 | * Returns the current TLB pid reg. | |
2225 | * | |
2226 | * Side effects: | |
2227 | * None. | |
2228 | * | |
2229 | *-------------------------------------------------------------------------- | |
2230 | */ | |
2231 | LEAF(MachGetPID) | |
2232 | .set noreorder | |
2233 | mfc0 v0, MACH_COP_0_TLB_HI # get PID | |
2234 | nop | |
2235 | and v0, v0, VMMACH_TLB_PID # mask off PID | |
2236 | j ra | |
2237 | srl v0, v0, VMMACH_TLB_PID_SHIFT # put PID in right spot | |
2238 | .set reorder | |
2239 | END(MachGetPID) | |
2240 | #endif /* DEBUG */ | |
2241 | ||
2242 | /*---------------------------------------------------------------------------- | |
2243 | * | |
2244 | * MachSwitchFPState -- | |
2245 | * | |
2246 | * Save the current state into 'from' and restore it from 'to'. | |
2247 | * | |
2248 | * MachSwitchFPState(from, to) | |
2249 | * struct proc *from; | |
2250 | * struct user *to; | |
2251 | * | |
2252 | * Results: | |
2253 | * None. | |
2254 | * | |
2255 | * Side effects: | |
2256 | * None. | |
2257 | * | |
2258 | *---------------------------------------------------------------------------- | |
2259 | */ | |
2260 | LEAF(MachSwitchFPState) | |
2261 | .set noreorder | |
2262 | mfc0 t1, MACH_COP_0_STATUS_REG # Save old SR | |
2263 | li t0, MACH_SR_COP_1_BIT # Disable interrupts and | |
2264 | mtc0 t0, MACH_COP_0_STATUS_REG # enable the coprocessor | |
2265 | ||
2266 | beq a0, zero, 1f # skip save if NULL pointer | |
2267 | nop | |
2268 | /* | |
2269 | * First read out the status register to make sure that all FP operations | |
2270 | * have completed. | |
2271 | */ | |
2272 | lw a0, P_ADDR(a0) # get pointer to pcb for proc | |
2273 | cfc1 t0, MACH_FPC_CSR # stall til FP done, get status | |
2274 | li t3, ~MACH_SR_COP_1_BIT | |
2275 | lw t2, U_PCB_REGS+(PS * 4)(a0) # get CPU status register | |
2276 | sw t0, U_PCB_FPREGS+(32 * 4)(a0) # save FP status | |
2277 | and t2, t2, t3 # clear COP_1 enable bit | |
2278 | sw t2, U_PCB_REGS+(PS * 4)(a0) # save new status register | |
2279 | /* | |
2280 | * Save the floating point registers. | |
2281 | */ | |
2282 | swc1 $f0, U_PCB_FPREGS+(0 * 4)(a0) | |
2283 | swc1 $f1, U_PCB_FPREGS+(1 * 4)(a0) | |
2284 | swc1 $f2, U_PCB_FPREGS+(2 * 4)(a0) | |
2285 | swc1 $f3, U_PCB_FPREGS+(3 * 4)(a0) | |
2286 | swc1 $f4, U_PCB_FPREGS+(4 * 4)(a0) | |
2287 | swc1 $f5, U_PCB_FPREGS+(5 * 4)(a0) | |
2288 | swc1 $f6, U_PCB_FPREGS+(6 * 4)(a0) | |
2289 | swc1 $f7, U_PCB_FPREGS+(7 * 4)(a0) | |
2290 | swc1 $f8, U_PCB_FPREGS+(8 * 4)(a0) | |
2291 | swc1 $f9, U_PCB_FPREGS+(9 * 4)(a0) | |
2292 | swc1 $f10, U_PCB_FPREGS+(10 * 4)(a0) | |
2293 | swc1 $f11, U_PCB_FPREGS+(11 * 4)(a0) | |
2294 | swc1 $f12, U_PCB_FPREGS+(12 * 4)(a0) | |
2295 | swc1 $f13, U_PCB_FPREGS+(13 * 4)(a0) | |
2296 | swc1 $f14, U_PCB_FPREGS+(14 * 4)(a0) | |
2297 | swc1 $f15, U_PCB_FPREGS+(15 * 4)(a0) | |
2298 | swc1 $f16, U_PCB_FPREGS+(16 * 4)(a0) | |
2299 | swc1 $f17, U_PCB_FPREGS+(17 * 4)(a0) | |
2300 | swc1 $f18, U_PCB_FPREGS+(18 * 4)(a0) | |
2301 | swc1 $f19, U_PCB_FPREGS+(19 * 4)(a0) | |
2302 | swc1 $f20, U_PCB_FPREGS+(20 * 4)(a0) | |
2303 | swc1 $f21, U_PCB_FPREGS+(21 * 4)(a0) | |
2304 | swc1 $f22, U_PCB_FPREGS+(22 * 4)(a0) | |
2305 | swc1 $f23, U_PCB_FPREGS+(23 * 4)(a0) | |
2306 | swc1 $f24, U_PCB_FPREGS+(24 * 4)(a0) | |
2307 | swc1 $f25, U_PCB_FPREGS+(25 * 4)(a0) | |
2308 | swc1 $f26, U_PCB_FPREGS+(26 * 4)(a0) | |
2309 | swc1 $f27, U_PCB_FPREGS+(27 * 4)(a0) | |
2310 | swc1 $f28, U_PCB_FPREGS+(28 * 4)(a0) | |
2311 | swc1 $f29, U_PCB_FPREGS+(29 * 4)(a0) | |
2312 | swc1 $f30, U_PCB_FPREGS+(30 * 4)(a0) | |
2313 | swc1 $f31, U_PCB_FPREGS+(31 * 4)(a0) | |
2314 | ||
2315 | 1: | |
2316 | /* | |
2317 | * Restore the floating point registers. | |
2318 | */ | |
2319 | lw t0, U_PCB_FPREGS+(32 * 4)(a1) # get status register | |
2320 | lwc1 $f0, U_PCB_FPREGS+(0 * 4)(a1) | |
2321 | lwc1 $f1, U_PCB_FPREGS+(1 * 4)(a1) | |
2322 | lwc1 $f2, U_PCB_FPREGS+(2 * 4)(a1) | |
2323 | lwc1 $f3, U_PCB_FPREGS+(3 * 4)(a1) | |
2324 | lwc1 $f4, U_PCB_FPREGS+(4 * 4)(a1) | |
2325 | lwc1 $f5, U_PCB_FPREGS+(5 * 4)(a1) | |
2326 | lwc1 $f6, U_PCB_FPREGS+(6 * 4)(a1) | |
2327 | lwc1 $f7, U_PCB_FPREGS+(7 * 4)(a1) | |
2328 | lwc1 $f8, U_PCB_FPREGS+(8 * 4)(a1) | |
2329 | lwc1 $f9, U_PCB_FPREGS+(9 * 4)(a1) | |
2330 | lwc1 $f10, U_PCB_FPREGS+(10 * 4)(a1) | |
2331 | lwc1 $f11, U_PCB_FPREGS+(11 * 4)(a1) | |
2332 | lwc1 $f12, U_PCB_FPREGS+(12 * 4)(a1) | |
2333 | lwc1 $f13, U_PCB_FPREGS+(13 * 4)(a1) | |
2334 | lwc1 $f14, U_PCB_FPREGS+(14 * 4)(a1) | |
2335 | lwc1 $f15, U_PCB_FPREGS+(15 * 4)(a1) | |
2336 | lwc1 $f16, U_PCB_FPREGS+(16 * 4)(a1) | |
2337 | lwc1 $f17, U_PCB_FPREGS+(17 * 4)(a1) | |
2338 | lwc1 $f18, U_PCB_FPREGS+(18 * 4)(a1) | |
2339 | lwc1 $f19, U_PCB_FPREGS+(19 * 4)(a1) | |
2340 | lwc1 $f20, U_PCB_FPREGS+(20 * 4)(a1) | |
2341 | lwc1 $f21, U_PCB_FPREGS+(21 * 4)(a1) | |
2342 | lwc1 $f22, U_PCB_FPREGS+(22 * 4)(a1) | |
2343 | lwc1 $f23, U_PCB_FPREGS+(23 * 4)(a1) | |
2344 | lwc1 $f24, U_PCB_FPREGS+(24 * 4)(a1) | |
2345 | lwc1 $f25, U_PCB_FPREGS+(25 * 4)(a1) | |
2346 | lwc1 $f26, U_PCB_FPREGS+(26 * 4)(a1) | |
2347 | lwc1 $f27, U_PCB_FPREGS+(27 * 4)(a1) | |
2348 | lwc1 $f28, U_PCB_FPREGS+(28 * 4)(a1) | |
2349 | lwc1 $f29, U_PCB_FPREGS+(29 * 4)(a1) | |
2350 | lwc1 $f30, U_PCB_FPREGS+(30 * 4)(a1) | |
2351 | lwc1 $f31, U_PCB_FPREGS+(31 * 4)(a1) | |
2352 | ||
2353 | and t0, t0, ~MACH_FPC_EXCEPTION_BITS | |
2354 | ctc1 t0, MACH_FPC_CSR | |
2355 | nop | |
2356 | ||
2357 | mtc0 t1, MACH_COP_0_STATUS_REG # Restore the status register. | |
2358 | j ra | |
2359 | nop | |
2360 | .set reorder | |
2361 | END(MachSwitchFPState) | |
2362 | ||
2363 | /*---------------------------------------------------------------------------- | |
2364 | * | |
2365 | * MachSaveCurFPState -- | |
2366 | * | |
2367 | * Save the current floating point coprocessor state. | |
2368 | * | |
2369 | * MachSaveCurFPState(p) | |
2370 | * struct proc *p; | |
2371 | * | |
2372 | * Results: | |
2373 | * None. | |
2374 | * | |
2375 | * Side effects: | |
2376 | * None. | |
2377 | * | |
2378 | *---------------------------------------------------------------------------- | |
2379 | */ | |
2380 | LEAF(MachSaveCurFPState) | |
2381 | .set noreorder | |
2382 | lw a0, P_ADDR(a0) # get pointer to pcb for proc | |
2383 | mfc0 t1, MACH_COP_0_STATUS_REG # Disable interrupts and | |
2384 | li t0, MACH_SR_COP_1_BIT # enable the coprocessor | |
2385 | mtc0 t0, MACH_COP_0_STATUS_REG | |
2386 | nop | |
2387 | /* | |
2388 | * First read out the status register to make sure that all FP operations | |
2389 | * have completed. | |
2390 | */ | |
2391 | lw t2, U_PCB_REGS+(PS * 4)(a0) # get CPU status register | |
2392 | li t3, ~MACH_SR_COP_1_BIT | |
2393 | and t2, t2, t3 # clear COP_1 enable bit | |
2394 | cfc1 t0, MACH_FPC_CSR # stall til FP done, get status | |
2395 | sw t2, U_PCB_REGS+(PS * 4)(a0) # save new status register | |
2396 | sw t0, U_PCB_FPREGS+(32 * 4)(a0) # save FP status | |
2397 | /* | |
2398 | * Save the floating point registers. | |
2399 | */ | |
2400 | swc1 $f0, U_PCB_FPREGS+(0 * 4)(a0) | |
2401 | swc1 $f1, U_PCB_FPREGS+(1 * 4)(a0) | |
2402 | swc1 $f2, U_PCB_FPREGS+(2 * 4)(a0) | |
2403 | swc1 $f3, U_PCB_FPREGS+(3 * 4)(a0) | |
2404 | swc1 $f4, U_PCB_FPREGS+(4 * 4)(a0) | |
2405 | swc1 $f5, U_PCB_FPREGS+(5 * 4)(a0) | |
2406 | swc1 $f6, U_PCB_FPREGS+(6 * 4)(a0) | |
2407 | swc1 $f7, U_PCB_FPREGS+(7 * 4)(a0) | |
2408 | swc1 $f8, U_PCB_FPREGS+(8 * 4)(a0) | |
2409 | swc1 $f9, U_PCB_FPREGS+(9 * 4)(a0) | |
2410 | swc1 $f10, U_PCB_FPREGS+(10 * 4)(a0) | |
2411 | swc1 $f11, U_PCB_FPREGS+(11 * 4)(a0) | |
2412 | swc1 $f12, U_PCB_FPREGS+(12 * 4)(a0) | |
2413 | swc1 $f13, U_PCB_FPREGS+(13 * 4)(a0) | |
2414 | swc1 $f14, U_PCB_FPREGS+(14 * 4)(a0) | |
2415 | swc1 $f15, U_PCB_FPREGS+(15 * 4)(a0) | |
2416 | swc1 $f16, U_PCB_FPREGS+(16 * 4)(a0) | |
2417 | swc1 $f17, U_PCB_FPREGS+(17 * 4)(a0) | |
2418 | swc1 $f18, U_PCB_FPREGS+(18 * 4)(a0) | |
2419 | swc1 $f19, U_PCB_FPREGS+(19 * 4)(a0) | |
2420 | swc1 $f20, U_PCB_FPREGS+(20 * 4)(a0) | |
2421 | swc1 $f21, U_PCB_FPREGS+(21 * 4)(a0) | |
2422 | swc1 $f22, U_PCB_FPREGS+(22 * 4)(a0) | |
2423 | swc1 $f23, U_PCB_FPREGS+(23 * 4)(a0) | |
2424 | swc1 $f24, U_PCB_FPREGS+(24 * 4)(a0) | |
2425 | swc1 $f25, U_PCB_FPREGS+(25 * 4)(a0) | |
2426 | swc1 $f26, U_PCB_FPREGS+(26 * 4)(a0) | |
2427 | swc1 $f27, U_PCB_FPREGS+(27 * 4)(a0) | |
2428 | swc1 $f28, U_PCB_FPREGS+(28 * 4)(a0) | |
2429 | swc1 $f29, U_PCB_FPREGS+(29 * 4)(a0) | |
2430 | swc1 $f30, U_PCB_FPREGS+(30 * 4)(a0) | |
2431 | swc1 $f31, U_PCB_FPREGS+(31 * 4)(a0) | |
2432 | ||
2433 | mtc0 t1, MACH_COP_0_STATUS_REG # Restore the status register. | |
2434 | j ra | |
2435 | nop | |
2436 | .set reorder | |
2437 | END(MachSaveCurFPState) | |
2438 | ||
2439 | /*---------------------------------------------------------------------------- | |
2440 | * | |
2441 | * MachFPInterrupt -- | |
2442 | * | |
2443 | * Handle a floating point interrupt. | |
2444 | * | |
2445 | * MachFPInterrupt(statusReg, causeReg, pc) | |
2446 | * unsigned statusReg; | |
2447 | * unsigned causeReg; | |
2448 | * unsigned pc; | |
2449 | * | |
2450 | * Results: | |
2451 | * None. | |
2452 | * | |
2453 | * Side effects: | |
2454 | * None. | |
2455 | * | |
2456 | *---------------------------------------------------------------------------- | |
2457 | */ | |
2458 | NON_LEAF(MachFPInterrupt, STAND_FRAME_SIZE, ra) | |
2459 | .set noreorder | |
2460 | subu sp, sp, STAND_FRAME_SIZE | |
2461 | mfc0 t0, MACH_COP_0_STATUS_REG | |
2462 | sw ra, STAND_RA_OFFSET(sp) | |
2463 | .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) | |
2464 | ||
2465 | or t1, t0, MACH_SR_COP_1_BIT | |
2466 | mtc0 t1, MACH_COP_0_STATUS_REG | |
2467 | nop | |
2468 | nop | |
2469 | cfc1 t1, MACH_FPC_CSR # stall til FP done, get status | |
2470 | nop | |
2471 | .set reorder | |
2472 | sll t2, t1, (31 - 17) # unimplemented operation? | |
2473 | bgez t2, 3f # no, normal trap | |
2474 | /* | |
2475 | * We got an unimplemented operation trap so | |
2476 | * fetch the instruction, compute the next PC and emulate the instruction. | |
2477 | */ | |
2478 | bgez a1, 1f # Check the branch delay bit. | |
2479 | /* | |
2480 | * The instruction is in the branch delay slot so the branch will have to | |
2481 | * be emulated to get the resulting PC. | |
2482 | */ | |
2483 | sw a2, STAND_FRAME_SIZE + 8(sp) | |
2484 | li a0, UADDR+U_PCB_REGS # first arg is ptr to CPU registers | |
2485 | move a1, a2 # second arg is instruction PC | |
2486 | move a2, t1 # third arg is floating point CSR | |
2487 | move a3, zero # fourth arg is FALSE | |
2488 | jal MachEmulateBranch # compute PC after branch | |
2489 | /* | |
2490 | * Now load the floating-point instruction in the branch delay slot | |
2491 | * to be emulated. | |
2492 | */ | |
2493 | lw a2, STAND_FRAME_SIZE + 8(sp) # restore EXC pc | |
2494 | lw a0, 4(a2) # a0 = coproc instruction | |
2495 | b 2f | |
2496 | /* | |
2497 | * This is not in the branch delay slot so calculate the resulting | |
2498 | * PC (epc + 4) into v0 and continue to MachEmulateFP(). | |
2499 | */ | |
2500 | 1: | |
2501 | lw a0, 0(a2) # a0 = coproc instruction | |
2502 | addu v0, a2, 4 # v0 = next pc | |
2503 | 2: | |
2504 | sw v0, UADDR+U_PCB_REGS+(PC * 4) # save new pc | |
2505 | /* | |
2506 | * Check to see if the instruction to be emulated is a floating-point | |
2507 | * instruction. | |
2508 | */ | |
2509 | srl a3, a0, MACH_OPCODE_SHIFT | |
2510 | beq a3, MACH_OPCODE_C1, 4f # this should never fail | |
2511 | /* | |
2512 | * Send a floating point exception signal to the current process. | |
2513 | */ | |
2514 | 3: | |
2515 | lw a0, curproc # get current process | |
2516 | cfc1 a2, MACH_FPC_CSR # code = FP execptions | |
2517 | li a1, SIGFPE | |
2518 | ctc1 zero, MACH_FPC_CSR # Clear exceptions | |
2519 | jal trapsignal | |
2520 | b FPReturn | |
2521 | ||
2522 | /* | |
2523 | * Finally, we can call MachEmulateFP() where a0 is the instruction to emulate. | |
2524 | */ | |
2525 | 4: | |
2526 | jal MachEmulateFP | |
2527 | ||
2528 | /* | |
2529 | * Turn off the floating point coprocessor and return. | |
2530 | */ | |
2531 | FPReturn: | |
2532 | .set noreorder | |
2533 | mfc0 t0, MACH_COP_0_STATUS_REG | |
2534 | lw ra, STAND_RA_OFFSET(sp) | |
2535 | and t0, t0, ~MACH_SR_COP_1_BIT | |
2536 | mtc0 t0, MACH_COP_0_STATUS_REG | |
2537 | j ra | |
2538 | addu sp, sp, STAND_FRAME_SIZE | |
2539 | .set reorder | |
2540 | END(MachFPInterrupt) | |
2541 | ||
2542 | /*---------------------------------------------------------------------------- | |
2543 | * | |
2544 | * MachConfigCache -- | |
2545 | * | |
2546 | * Size the caches. | |
2547 | * NOTE: should only be called from mach_init(). | |
2548 | * | |
2549 | * Results: | |
2550 | * None. | |
2551 | * | |
2552 | * Side effects: | |
2553 | * The size of the data cache is stored into machDataCacheSize and the | |
2554 | * size of instruction cache is stored into machInstCacheSize. | |
2555 | * | |
2556 | *---------------------------------------------------------------------------- | |
2557 | */ | |
2558 | NON_LEAF(MachConfigCache, STAND_FRAME_SIZE, ra) | |
2559 | .set noreorder | |
2560 | subu sp, sp, STAND_FRAME_SIZE | |
2561 | sw ra, STAND_RA_OFFSET(sp) # Save return address. | |
2562 | .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) | |
2563 | mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts. | |
2564 | la v0, 1f | |
2565 | or v0, MACH_UNCACHED_MEMORY_ADDR # Run uncached. | |
2566 | j v0 | |
2567 | nop | |
2568 | 1: | |
2569 | /* | |
2570 | * This works because jal doesn't change pc[31..28] and the | |
2571 | * linker still thinks SizeCache is in the cached region so it computes | |
2572 | * the correct address without complaining. | |
2573 | */ | |
2574 | jal SizeCache # Get the size of the d-cache. | |
2575 | nop | |
2576 | sw v0, machDataCacheSize | |
2577 | nop # Make sure sw out of pipe | |
2578 | nop | |
2579 | nop | |
2580 | nop | |
2581 | li v0, MACH_SR_SWAP_CACHES # Swap caches | |
2582 | mtc0 v0, MACH_COP_0_STATUS_REG | |
2583 | nop # Insure caches stable | |
2584 | nop | |
2585 | nop | |
2586 | nop | |
2587 | jal SizeCache # Get the size of the i-cache. | |
2588 | nop | |
2589 | sw v0, machInstCacheSize | |
2590 | nop # Make sure sw out of pipe | |
2591 | nop | |
2592 | nop | |
2593 | nop | |
2594 | mtc0 zero, MACH_COP_0_STATUS_REG # Swap back caches. | |
2595 | nop | |
2596 | nop | |
2597 | nop | |
2598 | nop | |
2599 | la t0, 1f | |
2600 | j t0 # Back to cached mode | |
2601 | nop | |
2602 | 1: | |
2603 | lw ra, STAND_RA_OFFSET(sp) # Restore return addr | |
2604 | addu sp, sp, STAND_FRAME_SIZE # Restore sp. | |
2605 | j ra | |
2606 | nop | |
2607 | .set reorder | |
2608 | END(MachConfigCache) | |
2609 | ||
2610 | /*---------------------------------------------------------------------------- | |
2611 | * | |
2612 | * SizeCache -- | |
2613 | * | |
2614 | * Get the size of the cache. | |
2615 | * | |
2616 | * Results: | |
2617 | * The size of the cache. | |
2618 | * | |
2619 | * Side effects: | |
2620 | * None. | |
2621 | * | |
2622 | *---------------------------------------------------------------------------- | |
2623 | */ | |
2624 | LEAF(SizeCache) | |
2625 | .set noreorder | |
2626 | mfc0 t0, MACH_COP_0_STATUS_REG # Save the current status reg. | |
2627 | nop | |
2628 | or v0, t0, MACH_SR_ISOL_CACHES # Isolate the caches. | |
2629 | nop # Make sure no stores in pipe | |
2630 | mtc0 v0, MACH_COP_0_STATUS_REG | |
2631 | nop # Make sure isolated | |
2632 | nop | |
2633 | nop | |
2634 | /* | |
2635 | * Clear cache size boundaries. | |
2636 | */ | |
2637 | li v0, MACH_MIN_CACHE_SIZE | |
2638 | 1: | |
2639 | sw zero, MACH_CACHED_MEMORY_ADDR(v0) # Clear cache memory | |
2640 | sll v0, v0, 1 | |
2641 | bleu v0, +MACH_MAX_CACHE_SIZE, 1b | |
2642 | nop | |
2643 | li v0, -1 | |
2644 | sw v0, MACH_CACHED_MEMORY_ADDR(zero) # Store marker in cache | |
2645 | li v0, MACH_MIN_CACHE_SIZE | |
2646 | 2: | |
2647 | lw v1, MACH_CACHED_MEMORY_ADDR(v0) # Look for marker | |
2648 | nop | |
2649 | bne v1, zero, 3f # Found marker. | |
2650 | nop | |
2651 | sll v0, v0, 1 # cache size * 2 | |
2652 | bleu v0, +MACH_MAX_CACHE_SIZE, 2b # keep looking | |
2653 | nop | |
2654 | move v0, zero # must be no cache | |
2655 | 3: | |
2656 | mtc0 t0, MACH_COP_0_STATUS_REG | |
2657 | nop # Make sure unisolated | |
2658 | nop | |
2659 | nop | |
2660 | nop | |
2661 | j ra | |
2662 | nop | |
2663 | .set reorder | |
2664 | END(SizeCache) | |
2665 | ||
2666 | /*---------------------------------------------------------------------------- | |
2667 | * | |
2668 | * MachFlushCache -- | |
2669 | * | |
2670 | * Flush the caches. | |
2671 | * | |
2672 | * Results: | |
2673 | * None. | |
2674 | * | |
2675 | * Side effects: | |
2676 | * The contents of the caches is flushed. | |
2677 | * | |
2678 | *---------------------------------------------------------------------------- | |
2679 | */ | |
2680 | LEAF(MachFlushCache) | |
2681 | .set noreorder | |
2682 | lw t1, machInstCacheSize # Must load before isolating | |
2683 | lw t2, machDataCacheSize # Must load before isolating | |
2684 | mfc0 t3, MACH_COP_0_STATUS_REG # Save the status register. | |
2685 | mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts. | |
2686 | la v0, 1f | |
2687 | or v0, MACH_UNCACHED_MEMORY_ADDR # Run uncached. | |
2688 | j v0 | |
2689 | nop | |
2690 | /* | |
2691 | * Flush the instruction cache. | |
2692 | */ | |
2693 | 1: | |
2694 | li v0, MACH_SR_ISOL_CACHES | MACH_SR_SWAP_CACHES | |
2695 | mtc0 v0, MACH_COP_0_STATUS_REG # Isolate and swap caches. | |
2696 | li t0, MACH_UNCACHED_MEMORY_ADDR | |
2697 | subu t0, t0, t1 | |
2698 | li t1, MACH_UNCACHED_MEMORY_ADDR | |
2699 | la v0, 1f # Run cached | |
2700 | j v0 | |
2701 | nop | |
2702 | 1: | |
2703 | sb zero, 0(t0) | |
2704 | sb zero, 4(t0) | |
2705 | sb zero, 8(t0) | |
2706 | sb zero, 12(t0) | |
2707 | sb zero, 16(t0) | |
2708 | sb zero, 20(t0) | |
2709 | sb zero, 24(t0) | |
2710 | addu t0, t0, 32 | |
2711 | bne t0, t1, 1b | |
2712 | sb zero, -4(t0) | |
2713 | ||
2714 | la v0, 1f | |
2715 | or v0, MACH_UNCACHED_MEMORY_ADDR | |
2716 | j v0 # Run uncached | |
2717 | nop | |
2718 | /* | |
2719 | * Flush the data cache. | |
2720 | */ | |
2721 | 1: | |
2722 | li v0, MACH_SR_ISOL_CACHES | |
2723 | mtc0 v0, MACH_COP_0_STATUS_REG # Isolate and swap back caches | |
2724 | li t0, MACH_UNCACHED_MEMORY_ADDR | |
2725 | subu t0, t0, t2 | |
2726 | la v0, 1f | |
2727 | j v0 # Back to cached mode | |
2728 | nop | |
2729 | 1: | |
2730 | sb zero, 0(t0) | |
2731 | sb zero, 4(t0) | |
2732 | sb zero, 8(t0) | |
2733 | sb zero, 12(t0) | |
2734 | sb zero, 16(t0) | |
2735 | sb zero, 20(t0) | |
2736 | sb zero, 24(t0) | |
2737 | addu t0, t0, 32 | |
2738 | bne t0, t1, 1b | |
2739 | sb zero, -4(t0) | |
2740 | ||
2741 | nop # Insure isolated stores | |
2742 | nop # out of pipe. | |
2743 | nop | |
2744 | nop | |
2745 | mtc0 t3, MACH_COP_0_STATUS_REG # Restore status reg. | |
2746 | nop # Insure cache unisolated. | |
2747 | nop | |
2748 | nop | |
2749 | nop | |
2750 | j ra | |
2751 | nop | |
2752 | .set reorder | |
2753 | END(MachFlushCache) | |
2754 | ||
2755 | /*---------------------------------------------------------------------------- | |
2756 | * | |
2757 | * MachFlushICache -- | |
2758 | * | |
2759 | * MachFlushICache(addr, len) | |
2760 | * | |
2761 | * Flush instruction cache for range of addr to addr + len - 1. | |
2762 | * | |
2763 | * Results: | |
2764 | * None. | |
2765 | * | |
2766 | * Side effects: | |
2767 | * The contents of the cache is flushed. | |
2768 | * | |
2769 | *---------------------------------------------------------------------------- | |
2770 | */ | |
2771 | LEAF(MachFlushICache) | |
2772 | .set noreorder | |
2773 | lw t1, machInstCacheSize | |
2774 | mfc0 t3, MACH_COP_0_STATUS_REG # Save SR | |
2775 | subu t0, t1, 1 # t0 = size - 1 | |
2776 | and a0, a0, t0 # mask off address bits | |
2777 | addu t0, t0, MACH_UNCACHED_MEMORY_ADDR | |
2778 | subu t0, t0, t1 | |
2779 | mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts. | |
2780 | ||
2781 | la v0, 1f | |
2782 | or v0, MACH_UNCACHED_MEMORY_ADDR # Run uncached. | |
2783 | j v0 | |
2784 | nop | |
2785 | 1: | |
2786 | li v0, MACH_SR_ISOL_CACHES | MACH_SR_SWAP_CACHES | |
2787 | mtc0 v0, MACH_COP_0_STATUS_REG | |
2788 | bltu t1, a1, 1f # cache is smaller than region | |
2789 | nop | |
2790 | move t1, a1 | |
2791 | 1: | |
2792 | addu t1, t1, t0 # compute ending address | |
2793 | la v0, 1f # run cached | |
2794 | j v0 | |
2795 | nop | |
2796 | 1: | |
2797 | sb zero, 0(t0) | |
2798 | sb zero, 4(t0) | |
2799 | sb zero, 8(t0) | |
2800 | sb zero, 12(t0) | |
2801 | sb zero, 16(t0) | |
2802 | sb zero, 20(t0) | |
2803 | sb zero, 24(t0) | |
2804 | addu t0, t0, 32 | |
2805 | bltu t0, t1, 1b | |
2806 | sb zero, -4(t0) | |
2807 | ||
2808 | la v0, 1f | |
2809 | or v0, MACH_UNCACHED_MEMORY_ADDR | |
2810 | j v0 # Run uncached | |
2811 | nop | |
2812 | 1: | |
2813 | nop # insure isolated stores out of pipe | |
2814 | mtc0 zero, MACH_COP_0_STATUS_REG # unisolate, unswap | |
2815 | nop # keep pipeline clean | |
2816 | nop | |
2817 | nop | |
2818 | mtc0 t3, MACH_COP_0_STATUS_REG # enable interrupts | |
2819 | j ra # return and run cached | |
2820 | nop | |
2821 | .set reorder | |
2822 | END(MachFlushICache) |