Major rewrite of npx code (npx-0.5 + 2 patches to it)
[unix-history] / usr / src / sys.386bsd / i386 / i386 / locore.s
CommitLineData
3c40a151
WJ
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)locore.s 7.3 (Berkeley) 5/13/91
f0ee6650
PW
37 *
38 * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
39 * -------------------- ----- ----------------------
42d41470 40 * CURRENT PATCH LEVEL: 4 00154
f0ee6650
PW
41 * -------------------- ----- ----------------------
42 *
43 * 06 Aug 92 Pace Willisson Allow VGA memory to be mapped
6d829b69
FM
44 * 28 Nov 92 Frank MacLachlan Aligned addresses and data
45 * on 32bit boundaries.
6b9305fd 46 * 25 Mar 93 Kevin Lahey Add syscall counter for vmstat
42d41470 47 * 20 Apr 93 Bruce Evans New npx-0.5 code
3c40a151
WJ
48 */
49
50
51/*
52 * locore.s: 4BSD machine support for the Intel 386
53 * Preliminary version
54 * Written by William F. Jolitz, 386BSD Project
55 */
56
57#include "assym.s"
58#include "machine/psl.h"
59#include "machine/pte.h"
60
61#include "errno.h"
62
63#include "machine/trap.h"
64
42d41470
BE
65#include "machine/specialreg.h"
66
67#define KDSEL 0x10
68
3c40a151
WJ
69/*
70 * Note: This version greatly munged to avoid various assembler errors
71 * that may be fixed in newer versions of gas. Perhaps newer versions
72 * will have more pleasant appearance.
73 */
74
75 .set IDXSHIFT,10
76 .set SYSTEM,0xFE000000 # virtual address of system start
77 /*note: gas copys sign bit (e.g. arithmetic >>), can't do SYSTEM>>22! */
78 .set SYSPDROFF,0x3F8 # Page dir index of System Base
79
80#define NOP inb $0x84, %al ; inb $0x84, %al
6d829b69 81#define ALIGN32 .align 2 /* 2^2 = 4 */
3c40a151
WJ
82
83/*
84 * PTmap is recursive pagemap at top of virtual address space.
85 * Within PTmap, the page directory can be found (third indirection).
86 */
87 .set PDRPDROFF,0x3F7 # Page dir index of Page dir
88 .globl _PTmap, _PTD, _PTDpde, _Sysmap
89 .set _PTmap,0xFDC00000
90 .set _PTD,0xFDFF7000
91 .set _Sysmap,0xFDFF8000
92 .set _PTDpde,0xFDFF7000+4*PDRPDROFF
93
94/*
95 * APTmap, APTD is the alternate recursive pagemap.
96 * It's used when modifying another process's page tables.
97 */
98 .set APDRPDROFF,0x3FE # Page dir index of Page dir
99 .globl _APTmap, _APTD, _APTDpde
100 .set _APTmap,0xFF800000
101 .set _APTD,0xFFBFE000
102 .set _APTDpde,0xFDFF7000+4*APDRPDROFF
103
104/*
105 * Access to each processes kernel stack is via a region of
106 * per-process address space (at the beginning), immediatly above
107 * the user process stack.
108 */
109 .set _kstack, USRSTACK
110 .globl _kstack
111 .set PPDROFF,0x3F6
112 .set PPTEOFF,0x400-UPAGES # 0x3FE
113
114#define ENTRY(name) \
115 .globl _/**/name; _/**/name:
116#define ALTENTRY(name) \
117 .globl _/**/name; _/**/name:
118
119/*
120 * Initialization
121 */
122 .data
123 .globl _cpu,_cold,_boothowto,_bootdev,_cyloffset,_atdevbase,_atdevphys
124_cpu: .long 0 # are we 386, 386sx, or 486
125_cold: .long 1 # cold till we are not
126_atdevbase: .long 0 # location of start of iomem in virtual
127_atdevphys: .long 0 # location of device mapping ptes (phys)
128
129 .globl _IdlePTD, _KPTphys
130_IdlePTD: .long 0
131_KPTphys: .long 0
132
133 .space 512
134tmpstk:
135 .text
136 .globl start
137start: movw $0x1234,%ax
138 movw %ax,0x472 # warm boot
139 jmp 1f
140 .space 0x500 # skip over warm boot shit
141
142 /*
143 * pass parameters on stack (howto, bootdev, unit, cyloffset)
144 * note: 0(%esp) is return address of boot
145 * ( if we want to hold onto /boot, it's physical %esp up to _end)
146 */
147
148 1: movl 4(%esp),%eax
149 movl %eax,_boothowto-SYSTEM
150 movl 8(%esp),%eax
151 movl %eax,_bootdev-SYSTEM
152 movl 12(%esp),%eax
153 movl %eax, _cyloffset-SYSTEM
154
155#ifdef garbage
156 /* count up memory */
157
158 xorl %eax,%eax # start with base memory at 0x0
159 #movl $ 0xA0000/NBPG,%ecx # look every 4K up to 640K
160 movl $ 0xA0,%ecx # look every 4K up to 640K
1611: movl 0(%eax),%ebx # save location to check
162 movl $0xa55a5aa5,0(%eax) # write test pattern
163 /* flush stupid cache here! (with bcopy (0,0,512*1024) ) */
164 cmpl $0xa55a5aa5,0(%eax) # does not check yet for rollover
165 jne 2f
166 movl %ebx,0(%eax) # restore memory
167 addl $ NBPG,%eax
168 loop 1b
1692: shrl $12,%eax
170 movl %eax,_Maxmem-SYSTEM
171
172 movl $0x100000,%eax # next, talley remaining memory
173 #movl $((0xFFF000-0x100000)/NBPG),%ecx
174 movl $(0xFFF-0x100),%ecx
1751: movl 0(%eax),%ebx # save location to check
176 movl $0xa55a5aa5,0(%eax) # write test pattern
177 cmpl $0xa55a5aa5,0(%eax) # does not check yet for rollover
178 jne 2f
179 movl %ebx,0(%eax) # restore memory
180 addl $ NBPG,%eax
181 loop 1b
1822: shrl $12,%eax
183 movl %eax,_Maxmem-SYSTEM
184#endif
185
186/* find end of kernel image */
187 movl $_end-SYSTEM,%ecx
188 addl $ NBPG-1,%ecx
189 andl $~(NBPG-1),%ecx
190 movl %ecx,%esi
191
192/* clear bss and memory for bootstrap pagetables. */
193 movl $_edata-SYSTEM,%edi
194 subl %edi,%ecx
195 addl $(UPAGES+5)*NBPG,%ecx
196/*
197 * Virtual address space of kernel:
198 *
199 * text | data | bss | page dir | proc0 kernel stack | usr stk map | Sysmap
200 * 0 1 2 3 4
201 */
202 xorl %eax,%eax # pattern
203 cld
204 rep
205 stosb
206
207 movl %esi,_IdlePTD-SYSTEM /*physical address of Idle Address space */
208 movl $ tmpstk-SYSTEM,%esp # bootstrap stack end location
209
210#define fillkpt \
2111: movl %eax,0(%ebx) ; \
212 addl $ NBPG,%eax ; /* increment physical address */ \
213 addl $4,%ebx ; /* next pte */ \
214 loop 1b ;
215
216/*
217 * Map Kernel
218 * N.B. don't bother with making kernel text RO, as 386
219 * ignores R/W AND U/S bits on kernel access (only v works) !
220 *
221 * First step - build page tables
222 */
223 movl %esi,%ecx # this much memory,
224 shrl $ PGSHIFT,%ecx # for this many pte s
225 addl $ UPAGES+4,%ecx # including our early context
226 movl $ PG_V,%eax # having these bits set,
227 lea (4*NBPG)(%esi),%ebx # physical address of KPT in proc 0,
228 movl %ebx,_KPTphys-SYSTEM # in the kernel page table,
229 fillkpt
230
231/* map I/O memory map */
232
233 movl $0x100-0xa0,%ecx # for this many pte s,
f0ee6650 234 movl $(0xa0000|PG_V|PG_UW),%eax # having these bits set,(perhaps URW?) XXX 06 Aug 92
3c40a151
WJ
235 movl %ebx,_atdevphys-SYSTEM # remember phys addr of ptes
236 fillkpt
237
238 /* map proc 0's kernel stack into user page table page */
239
240 movl $ UPAGES,%ecx # for this many pte s,
241 lea (1*NBPG)(%esi),%eax # physical address in proc 0
242 lea (SYSTEM)(%eax),%edx
243 movl %edx,_proc0paddr-SYSTEM # remember VA for 0th process init
244 orl $ PG_V|PG_URKW,%eax # having these bits set,
245 lea (3*NBPG)(%esi),%ebx # physical address of stack pt in proc 0
246 addl $(PPTEOFF*4),%ebx
247 fillkpt
248
249/*
250 * Construct a page table directory
251 * (of page directory elements - pde's)
252 */
253 /* install a pde for temporary double map of bottom of VA */
254 lea (4*NBPG)(%esi),%eax # physical address of kernel page table
f0ee6650 255 orl $ PG_V|PG_UW,%eax # pde entry is valid XXX 06 Aug 92
3c40a151
WJ
256 movl %eax,(%esi) # which is where temp maps!
257
258 /* kernel pde's */
259 movl $ 3,%ecx # for this many pde s,
260 lea (SYSPDROFF*4)(%esi), %ebx # offset of pde for kernel
261 fillkpt
262
263 /* install a pde recursively mapping page directory as a page table! */
264 movl %esi,%eax # phys address of ptd in proc 0
f0ee6650 265 orl $ PG_V|PG_UW,%eax # pde entry is valid XXX 06 Aug 92
3c40a151
WJ
266 movl %eax, PDRPDROFF*4(%esi) # which is where PTmap maps!
267
268 /* install a pde to map kernel stack for proc 0 */
269 lea (3*NBPG)(%esi),%eax # physical address of pt in proc 0
270 orl $ PG_V,%eax # pde entry is valid
271 movl %eax,PPDROFF*4(%esi) # which is where kernel stack maps!
272
273 /* load base of page directory, and enable mapping */
274 movl %esi,%eax # phys address of ptd in proc 0
275 orl $ I386_CR3PAT,%eax
276 movl %eax,%cr3 # load ptd addr into mmu
277 movl %cr0,%eax # get control word
278 orl $0x80000001,%eax # and let s page!
279 movl %eax,%cr0 # NOW!
280
281 pushl $begin # jump to high mem!
282 ret
283
284begin: /* now running relocated at SYSTEM where the system is linked to run */
285
286 .globl _Crtat
287 movl _Crtat,%eax
288 subl $0xfe0a0000,%eax
289 movl _atdevphys,%edx # get pte PA
290 subl _KPTphys,%edx # remove base of ptes, now have phys offset
291 shll $ PGSHIFT-2,%edx # corresponding to virt offset
292 addl $ SYSTEM,%edx # add virtual base
293 movl %edx, _atdevbase
294 addl %eax,%edx
295 movl %edx,_Crtat
296
297 /* set up bootstrap stack */
298 movl $ _kstack+UPAGES*NBPG-4*12,%esp # bootstrap stack end location
299 xorl %eax,%eax # mark end of frames
300 movl %eax,%ebp
301 movl _proc0paddr, %eax
302 movl %esi, PCB_CR3(%eax)
303
304 lea 7*NBPG(%esi),%esi # skip past stack.
305 pushl %esi
306
307 call _init386 # wire 386 chip for unix operation
308
309 movl $0,_PTD
310 call _main
311 popl %esi
312
313 .globl __ucodesel,__udatasel
314 movzwl __ucodesel,%eax
315 movzwl __udatasel,%ecx
316 # build outer stack frame
317 pushl %ecx # user ss
318 pushl $ USRSTACK # user esp
319 pushl %eax # user cs
320 pushl $0 # user ip
321 movw %cx,%ds
322 movw %cx,%es
323 movw %ax,%fs # double map cs to fs
324 movw %cx,%gs # and ds to gs
325 lret # goto user!
326
327 pushl $lretmsg1 /* "should never get here!" */
328 call _panic
329lretmsg1:
330 .asciz "lret: toinit\n"
331
332
333 .set exec,59
334 .set exit,1
335 .globl _icode
336 .globl _szicode
337
338#define LCALL(x,y) .byte 0x9a ; .long y; .word x
339/*
340 * Icode is copied out to process 1 to exec /etc/init.
341 * If the exec fails, process 1 exits.
342 */
343_icode:
344 # pushl $argv-_icode # gas fucks up again
345 movl $argv,%eax
346 subl $_icode,%eax
347 pushl %eax
348
349 # pushl $init-_icode
350 movl $init,%eax
351 subl $_icode,%eax
352 pushl %eax
353 pushl %eax # dummy out rta
354
355 movl %esp,%ebp
356 movl $exec,%eax
357 LCALL(0x7,0x0)
358 pushl %eax
359 movl $exit,%eax
360 pushl %eax # dummy out rta
361 LCALL(0x7,0x0)
362
363init:
364 .asciz "/sbin/init"
365 .align 2
366argv:
367 .long init+6-_icode # argv[0] = "init" ("/sbin/init" + 6)
368 .long eicode-_icode # argv[1] follows icode after copyout
369 .long 0
370eicode:
371
372_szicode:
373 .long _szicode-_icode
374
375 .globl _sigcode,_szsigcode
376_sigcode:
377 movl 12(%esp),%eax # unsure if call will dec stack 1st
378 call %eax
379 xorl %eax,%eax # smaller movl $103,%eax
380 movb $103,%al # sigreturn()
381 LCALL(0x7,0) # enter kernel with args on stack
382 hlt # never gets here
383
384_szsigcode:
385 .long _szsigcode-_sigcode
386
387 /*
388 * Support routines for GCC
389 */
390 .globl ___udivsi3
6d829b69 391 ALIGN32
3c40a151
WJ
392___udivsi3:
393 movl 4(%esp),%eax
394 xorl %edx,%edx
395 divl 8(%esp)
396 ret
397
398 .globl ___divsi3
6d829b69 399 ALIGN32
3c40a151
WJ
400___divsi3:
401 movl 4(%esp),%eax
6d829b69 402 #xorl %edx,%edx /* not needed - cltd sign extends into %edx */
3c40a151
WJ
403 cltd
404 idivl 8(%esp)
405 ret
406
407 /*
408 * I/O bus instructions via C
409 */
410 .globl _inb
6d829b69 411 ALIGN32
3c40a151
WJ
412_inb: movl 4(%esp),%edx
413 subl %eax,%eax # clr eax
414 NOP
415 inb %dx,%al
416 ret
417
418
419 .globl _inw
6d829b69 420 ALIGN32
3c40a151
WJ
421_inw: movl 4(%esp),%edx
422 subl %eax,%eax # clr eax
423 NOP
424 inw %dx,%ax
425 ret
426
427
428 .globl _rtcin
6d829b69 429 ALIGN32
3c40a151
WJ
430_rtcin: movl 4(%esp),%eax
431 outb %al,$0x70
432 subl %eax,%eax # clr eax
433 inb $0x71,%al # Compaq SystemPro
434 ret
435
436 .globl _outb
6d829b69 437 ALIGN32
3c40a151
WJ
438_outb: movl 4(%esp),%edx
439 NOP
440 movl 8(%esp),%eax
441 outb %al,%dx
442 NOP
443 ret
444
445 .globl _outw
6d829b69 446 ALIGN32
3c40a151
WJ
447_outw: movl 4(%esp),%edx
448 NOP
449 movl 8(%esp),%eax
450 outw %ax,%dx
451 NOP
452 ret
453
454 /*
455 * void bzero(void *base, u_int cnt)
456 */
457
458 .globl _bzero
6d829b69 459 ALIGN32
3c40a151
WJ
460_bzero:
461 pushl %edi
462 movl 8(%esp),%edi
463 movl 12(%esp),%ecx
464 xorl %eax,%eax
465 shrl $2,%ecx
466 cld
467 rep
468 stosl
469 movl 12(%esp),%ecx
470 andl $3,%ecx
471 rep
472 stosb
473 popl %edi
474 ret
475
476 /*
477 * fillw (pat,base,cnt)
478 */
479
480 .globl _fillw
6d829b69 481 ALIGN32
3c40a151
WJ
482_fillw:
483 pushl %edi
484 movl 8(%esp),%eax
485 movl 12(%esp),%edi
486 movl 16(%esp),%ecx
487 cld
488 rep
489 stosw
490 popl %edi
491 ret
492
493 .globl _bcopyb
6d829b69 494 ALIGN32
3c40a151
WJ
495_bcopyb:
496 pushl %esi
497 pushl %edi
498 movl 12(%esp),%esi
499 movl 16(%esp),%edi
500 movl 20(%esp),%ecx
501 cld
502 rep
503 movsb
504 popl %edi
505 popl %esi
506 xorl %eax,%eax
507 ret
508
509 /*
510 * (ov)bcopy (src,dst,cnt)
511 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
512 */
513
514 .globl _bcopy,_ovbcopy
6d829b69 515 ALIGN32
3c40a151
WJ
516_ovbcopy:
517_bcopy:
518 pushl %esi
519 pushl %edi
520 movl 12(%esp),%esi
521 movl 16(%esp),%edi
522 movl 20(%esp),%ecx
523 cmpl %esi,%edi /* potentially overlapping? */
524 jnb 1f
525 cld /* nope, copy forwards. */
526 shrl $2,%ecx /* copy by words */
527 rep
528 movsl
529 movl 20(%esp),%ecx
530 andl $3,%ecx /* any bytes left? */
531 rep
532 movsb
533 popl %edi
534 popl %esi
535 xorl %eax,%eax
536 ret
6d829b69 537 ALIGN32
3c40a151
WJ
5381:
539 addl %ecx,%edi /* copy backwards. */
540 addl %ecx,%esi
541 std
542 andl $3,%ecx /* any fractional bytes? */
543 decl %edi
544 decl %esi
545 rep
546 movsb
547 movl 20(%esp),%ecx /* copy remainder by words */
548 shrl $2,%ecx
549 subl $3,%esi
550 subl $3,%edi
551 rep
552 movsl
553 popl %edi
554 popl %esi
555 xorl %eax,%eax
556 cld
557 ret
558
559#ifdef notdef
560 .globl _copyout
6d829b69 561 ALIGN32
3c40a151
WJ
562_copyout:
563 movl _curpcb, %eax
564 movl $cpyflt, PCB_ONFAULT(%eax) # in case we page/protection violate
565 pushl %esi
566 pushl %edi
567 pushl %ebx
568 movl 16(%esp), %esi
569 movl 20(%esp), %edi
570 movl 24(%esp), %ebx
571
572 /* first, check to see if "write fault" */
5731: movl %edi, %eax
574#ifdef notyet
575 shrl $IDXSHIFT, %eax /* fetch pte associated with address */
576 andb $0xfc, %al
577 movl _PTmap(%eax), %eax
578
579 andb $7, %al /* if we are the one case that won't trap... */
580 cmpb $5, %al
581 jne 2f
582 /* ... then simulate the trap! */
583 pushl %edi
584 call _trapwrite /* trapwrite(addr) */
585 popl %edx
586
587 cmpl $0, %eax /* if not ok, return */
588 jne cpyflt
589 /* otherwise, continue with reference */
5902:
591 movl %edi, %eax /* calculate remainder this pass */
592 andl $0xfffff000, %eax
593 movl $NBPG, %ecx
594 subl %eax, %ecx
595 cmpl %ecx, %ebx
596 jle 3f
597 movl %ebx, %ecx
5983: subl %ecx, %ebx
599 movl %ecx, %edx
600#else
601 movl %ebx, %ecx
602 movl %ebx, %edx
603#endif
604
605 shrl $2,%ecx /* movem */
606 cld
607 rep
608 movsl
609 movl %edx, %ecx /* don't depend on ecx here! */
610 andl $3, %ecx
611 rep
612 movsb
613
614#ifdef notyet
615 cmpl $0, %ebx
616 jl 1b
617#endif
618
619 popl %ebx
620 popl %edi
621 popl %esi
622 xorl %eax,%eax
623 movl _curpcb,%edx
624 movl %eax,PCB_ONFAULT(%edx)
625 ret
626
627 .globl _copyin
6d829b69 628 ALIGN32
3c40a151
WJ
629_copyin:
630 movl _curpcb,%eax
631 movl $cpyflt,PCB_ONFAULT(%eax) # in case we page/protection violate
632 pushl %esi
633 pushl %edi
634 pushl %ebx
635 movl 12(%esp),%esi
636 movl 16(%esp),%edi
637 movl 20(%esp),%ecx
638 shrl $2,%ecx
639 cld
640 rep
641 movsl
642 movl 20(%esp),%ecx
643 andl $3,%ecx
644 rep
645 movsb
646 popl %ebx
647 popl %edi
648 popl %esi
649 xorl %eax,%eax
650 movl _curpcb,%edx
651 movl %eax,PCB_ONFAULT(%edx)
652 ret
653
6d829b69 654 ALIGN32
3c40a151
WJ
655cpyflt:
656 popl %ebx
657 popl %edi
658 popl %esi
659 movl _curpcb,%edx
660 movl $0,PCB_ONFAULT(%edx)
661 movl $ EFAULT,%eax
662 ret
663#else
664 .globl _copyout
6d829b69 665 ALIGN32
3c40a151
WJ
666_copyout:
667 movl _curpcb,%eax
668 movl $cpyflt,PCB_ONFAULT(%eax) # in case we page/protection violate
669 pushl %esi
670 pushl %edi
671 movl 12(%esp),%esi
672 movl 16(%esp),%edi
673 movl 20(%esp),%ecx
674 shrl $2,%ecx
675 cld
676 rep
677 movsl
678 movl 20(%esp),%ecx
679 andl $3,%ecx
680 rep
681 movsb
682 popl %edi
683 popl %esi
684 xorl %eax,%eax
685 movl _curpcb,%edx
686 movl %eax,PCB_ONFAULT(%edx)
687 ret
688
689 .globl _copyin
6d829b69 690 ALIGN32
3c40a151
WJ
691_copyin:
692 movl _curpcb,%eax
693 movl $cpyflt,PCB_ONFAULT(%eax) # in case we page/protection violate
694 pushl %esi
695 pushl %edi
696 movl 12(%esp),%esi
697 movl 16(%esp),%edi
698 movl 20(%esp),%ecx
699 shrl $2,%ecx
700 cld
701 rep
702 movsl
703 movl 20(%esp),%ecx
704 andl $3,%ecx
705 rep
706 movsb
707 popl %edi
708 popl %esi
709 xorl %eax,%eax
710 movl _curpcb,%edx
711 movl %eax,PCB_ONFAULT(%edx)
712 ret
713
6d829b69 714 ALIGN32
3c40a151
WJ
715cpyflt: popl %edi
716 popl %esi
717 movl _curpcb,%edx
718 movl $0,PCB_ONFAULT(%edx)
719 movl $ EFAULT,%eax
720 ret
721
722#endif
723
724 # insb(port,addr,cnt)
725 .globl _insb
6d829b69 726 ALIGN32
3c40a151
WJ
727_insb:
728 pushl %edi
729 movw 8(%esp),%dx
730 movl 12(%esp),%edi
731 movl 16(%esp),%ecx
732 cld
733 NOP
734 rep
735 insb
736 NOP
737 movl %edi,%eax
738 popl %edi
739 ret
740
741 # insw(port,addr,cnt)
742 .globl _insw
6d829b69 743 ALIGN32
3c40a151
WJ
744_insw:
745 pushl %edi
746 movw 8(%esp),%dx
747 movl 12(%esp),%edi
748 movl 16(%esp),%ecx
749 cld
750 NOP
751 .byte 0x66,0xf2,0x6d # rep insw
752 NOP
753 movl %edi,%eax
754 popl %edi
755 ret
756
757 # outsw(port,addr,cnt)
758 .globl _outsw
6d829b69 759 ALIGN32
3c40a151
WJ
760_outsw:
761 pushl %esi
762 movw 8(%esp),%dx
763 movl 12(%esp),%esi
764 movl 16(%esp),%ecx
765 cld
766 NOP
767 .byte 0x66,0xf2,0x6f # rep outsw
768 NOP
769 movl %esi,%eax
770 popl %esi
771 ret
772
773 # outsb(port,addr,cnt)
774 .globl _outsb
6d829b69 775 ALIGN32
3c40a151
WJ
776_outsb:
777 pushl %esi
778 movw 8(%esp),%dx
779 movl 12(%esp),%esi
780 movl 16(%esp),%ecx
781 cld
782 NOP
783 rep
784 outsb
785 NOP
786 movl %esi,%eax
787 popl %esi
788 ret
789
790 /*
791 * void lgdt(struct region_descriptor *rdp);
792 */
793 .globl _lgdt
6d829b69 794 ALIGN32
3c40a151
WJ
795_lgdt:
796 /* reload the descriptor table */
797 movl 4(%esp),%eax
798 lgdt (%eax)
799 /* flush the prefetch q */
800 jmp 1f
801 nop
8021:
803 /* reload "stale" selectors */
804 # movw $KDSEL,%ax
805 movw $0x10,%ax
806 movw %ax,%ds
807 movw %ax,%es
808 movw %ax,%ss
809
810 /* reload code selector by turning return into intersegmental return */
811 movl 0(%esp),%eax
812 pushl %eax
813 # movl $KCSEL,4(%esp)
814 movl $8,4(%esp)
815 lret
816
817 /*
818 * void lidt(struct region_descriptor *rdp);
819 */
820 .globl _lidt
6d829b69 821 ALIGN32
3c40a151
WJ
822_lidt:
823 movl 4(%esp),%eax
824 lidt (%eax)
825 ret
826
827 /*
828 * void lldt(u_short sel)
829 */
830 .globl _lldt
6d829b69 831 ALIGN32
3c40a151
WJ
832_lldt:
833 lldt 4(%esp)
834 ret
835
836 /*
837 * void ltr(u_short sel)
838 */
839 .globl _ltr
6d829b69 840 ALIGN32
3c40a151
WJ
841_ltr:
842 ltr 4(%esp)
843 ret
844
845 /*
846 * void lcr3(caddr_t cr3)
847 */
848 .globl _lcr3
849 .globl _load_cr3
6d829b69 850 ALIGN32
3c40a151
WJ
851_load_cr3:
852_lcr3:
853 inb $0x84,%al # check wristwatch
854 movl 4(%esp),%eax
855 orl $ I386_CR3PAT,%eax
856 movl %eax,%cr3
857 inb $0x84,%al # check wristwatch
858 ret
859
860 # tlbflush()
861 .globl _tlbflush
6d829b69 862 ALIGN32
3c40a151
WJ
863_tlbflush:
864 inb $0x84,%al # check wristwatch
865 movl %cr3,%eax
866 orl $ I386_CR3PAT,%eax
867 movl %eax,%cr3
868 inb $0x84,%al # check wristwatch
869 ret
870
871 # lcr0(cr0)
872 .globl _lcr0,_load_cr0
6d829b69 873 ALIGN32
3c40a151
WJ
874_lcr0:
875_load_cr0:
876 movl 4(%esp),%eax
877 movl %eax,%cr0
878 ret
879
880 # rcr0()
881 .globl _rcr0
6d829b69 882 ALIGN32
3c40a151
WJ
883_rcr0:
884 movl %cr0,%eax
885 ret
886
887 # rcr2()
888 .globl _rcr2
6d829b69 889 ALIGN32
3c40a151
WJ
890_rcr2:
891 movl %cr2,%eax
892 ret
893
894 # rcr3()
895 .globl _rcr3
896 .globl __cr3
6d829b69 897 ALIGN32
3c40a151
WJ
898__cr3:
899_rcr3:
900 movl %cr3,%eax
901 ret
902
903 # ssdtosd(*ssdp,*sdp)
904 .globl _ssdtosd
6d829b69 905 ALIGN32
3c40a151
WJ
906_ssdtosd:
907 pushl %ebx
908 movl 8(%esp),%ecx
909 movl 8(%ecx),%ebx
910 shll $16,%ebx
911 movl (%ecx),%edx
912 roll $16,%edx
913 movb %dh,%bl
914 movb %dl,%bh
915 rorl $8,%ebx
916 movl 4(%ecx),%eax
917 movw %ax,%dx
918 andl $0xf0000,%eax
919 orl %eax,%ebx
920 movl 12(%esp),%ecx
921 movl %edx,(%ecx)
922 movl %ebx,4(%ecx)
923 popl %ebx
924 ret
925
926/*
927 * {fu,su},{byte,word}
928 */
6d829b69 929 ALIGN32
3c40a151
WJ
930ALTENTRY(fuiword)
931ENTRY(fuword)
932 movl _curpcb,%ecx
933 movl $fusufault,PCB_ONFAULT(%ecx)
934 movl 4(%esp),%edx
935 .byte 0x65 # use gs
936 movl 0(%edx),%eax
937 movl $0,PCB_ONFAULT(%ecx)
938 ret
939
6d829b69 940 ALIGN32
3c40a151
WJ
941ENTRY(fusword)
942 movl _curpcb,%ecx
943 movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate
944 movl 4(%esp),%edx
945 .byte 0x65 # use gs
946 movzwl 0(%edx),%eax
947 movl $0,PCB_ONFAULT(%ecx)
948 ret
949
6d829b69 950 ALIGN32
3c40a151
WJ
951ALTENTRY(fuibyte)
952ENTRY(fubyte)
953 movl _curpcb,%ecx
954 movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate
955 movl 4(%esp),%edx
956 .byte 0x65 # use gs
957 movzbl 0(%edx),%eax
958 movl $0,PCB_ONFAULT(%ecx)
959 ret
960
6d829b69 961 ALIGN32
3c40a151
WJ
962fusufault:
963 movl _curpcb,%ecx
964 xorl %eax,%eax
965 movl %eax,PCB_ONFAULT(%ecx) #in case we page/protection violate
966 decl %eax
967 ret
968
6d829b69 969 ALIGN32
3c40a151
WJ
970ALTENTRY(suiword)
971ENTRY(suword)
972 movl _curpcb,%ecx
973 movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate
974 movl 4(%esp),%edx
975 movl 8(%esp),%eax
976
977#ifdef notdef
978 shrl $IDXSHIFT, %edx /* fetch pte associated with address */
979 andb $0xfc, %dl
980 movl _PTmap(%edx), %edx
981
982 andb $7, %dl /* if we are the one case that won't trap... */
983 cmpb $5 , %edx
984 jne 1f
985 /* ... then simulate the trap! */
986 pushl %edi
987 call _trapwrite /* trapwrite(addr) */
988 popl %edx
989 cmpl $0, %eax /* if not ok, return */
990 jne fusufault
991 movl 8(%esp),%eax /* otherwise, continue with reference */
9921:
993 movl 4(%esp),%edx
994#endif
995 .byte 0x65 # use gs
996 movl %eax,0(%edx)
997 xorl %eax,%eax
998 movl %eax,PCB_ONFAULT(%ecx) #in case we page/protection violate
999 ret
1000
6d829b69 1001 ALIGN32
3c40a151
WJ
1002ENTRY(susword)
1003 movl _curpcb,%ecx
1004 movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate
1005 movl 4(%esp),%edx
1006 movl 8(%esp),%eax
1007#ifdef notdef
1008shrl $IDXSHIFT, %edx /* calculate pte address */
1009andb $0xfc, %dl
1010movl _PTmap(%edx), %edx
1011andb $7, %edx /* if we are the one case that won't trap... */
1012cmpb $5 , %edx
1013jne 1f
1014/* ..., then simulate the trap! */
1015 pushl %edi
1016 call _trapwrite /* trapwrite(addr) */
1017 popl %edx
1018movl _curpcb, %ecx # restore trashed registers
1019cmpl $0, %eax /* if not ok, return */
1020jne fusufault
1021movl 8(%esp),%eax
10221: movl 4(%esp),%edx
1023#endif
1024 .byte 0x65 # use gs
1025 movw %ax,0(%edx)
1026 xorl %eax,%eax
1027 movl %eax,PCB_ONFAULT(%ecx) #in case we page/protection violate
1028 ret
1029
6d829b69 1030 ALIGN32
3c40a151
WJ
1031ALTENTRY(suibyte)
1032ENTRY(subyte)
1033 movl _curpcb,%ecx
1034 movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate
1035 movl 4(%esp),%edx
1036 movl 8(%esp),%eax
1037#ifdef notdef
1038shrl $IDXSHIFT, %edx /* calculate pte address */
1039andb $0xfc, %dl
1040movl _PTmap(%edx), %edx
1041andb $7, %edx /* if we are the one case that won't trap... */
1042cmpb $5 , %edx
1043jne 1f
1044/* ..., then simulate the trap! */
1045 pushl %edi
1046 call _trapwrite /* trapwrite(addr) */
1047 popl %edx
1048movl _curpcb, %ecx # restore trashed registers
1049cmpl $0, %eax /* if not ok, return */
1050jne fusufault
1051movl 8(%esp),%eax
10521: movl 4(%esp),%edx
1053#endif
1054 .byte 0x65 # use gs
1055 movb %eax,0(%edx)
1056 xorl %eax,%eax
1057 movl %eax,PCB_ONFAULT(%ecx) #in case we page/protection violate
1058 ret
1059
6d829b69 1060 ALIGN32
3c40a151
WJ
1061 ENTRY(setjmp)
1062 movl 4(%esp),%eax
1063 movl %ebx, 0(%eax) # save ebx
1064 movl %esp, 4(%eax) # save esp
1065 movl %ebp, 8(%eax) # save ebp
1066 movl %esi,12(%eax) # save esi
1067 movl %edi,16(%eax) # save edi
1068 movl (%esp),%edx # get rta
1069 movl %edx,20(%eax) # save eip
1070 xorl %eax,%eax # return (0);
1071 ret
1072
6d829b69 1073 ALIGN32
3c40a151
WJ
1074 ENTRY(longjmp)
1075 movl 4(%esp),%eax
1076 movl 0(%eax),%ebx # restore ebx
1077 movl 4(%eax),%esp # restore esp
1078 movl 8(%eax),%ebp # restore ebp
1079 movl 12(%eax),%esi # restore esi
1080 movl 16(%eax),%edi # restore edi
1081 movl 20(%eax),%edx # get rta
1082 movl %edx,(%esp) # put in return frame
1083 xorl %eax,%eax # return (1);
1084 incl %eax
1085 ret
1086/*
1087 * The following primitives manipulate the run queues.
1088 * _whichqs tells which of the 32 queues _qs
1089 * have processes in them. Setrq puts processes into queues, Remrq
1090 * removes them from queues. The running process is on no queue,
1091 * other processes are on a queue related to p->p_pri, divided by 4
1092 * actually to shrink the 0-127 range of priorities into the 32 available
1093 * queues.
1094 */
1095
1096 .globl _whichqs,_qs,_cnt,_panic
1097 .comm _noproc,4
1098 .comm _runrun,4
1099
1100/*
1101 * Setrq(p)
1102 *
1103 * Call should be made at spl6(), and p->p_stat should be SRUN
1104 */
6d829b69 1105 ALIGN32
3c40a151
WJ
1106ENTRY(setrq)
1107 movl 4(%esp),%eax
1108 cmpl $0,P_RLINK(%eax) # should not be on q already
1109 je set1
1110 pushl $set2
1111 call _panic
1112set1:
1113 movzbl P_PRI(%eax),%edx
1114 shrl $2,%edx
1115 btsl %edx,_whichqs # set q full bit
1116 shll $3,%edx
1117 addl $_qs,%edx # locate q hdr
1118 movl %edx,P_LINK(%eax) # link process on tail of q
1119 movl P_RLINK(%edx),%ecx
1120 movl %ecx,P_RLINK(%eax)
1121 movl %eax,P_RLINK(%edx)
1122 movl %eax,P_LINK(%ecx)
1123 ret
1124
1125set2: .asciz "setrq"
1126
1127/*
1128 * Remrq(p)
1129 *
1130 * Call should be made at spl6().
1131 */
6d829b69 1132 ALIGN32
3c40a151
WJ
1133ENTRY(remrq)
1134 movl 4(%esp),%eax
1135 movzbl P_PRI(%eax),%edx
1136 shrl $2,%edx
1137 btrl %edx,_whichqs # clear full bit, panic if clear already
1138 jb rem1
1139 pushl $rem3
1140 call _panic
1141rem1:
1142 pushl %edx
1143 movl P_LINK(%eax),%ecx # unlink process
1144 movl P_RLINK(%eax),%edx
1145 movl %edx,P_RLINK(%ecx)
1146 movl P_RLINK(%eax),%ecx
1147 movl P_LINK(%eax),%edx
1148 movl %edx,P_LINK(%ecx)
1149 popl %edx
1150 movl $_qs,%ecx
1151 shll $3,%edx
1152 addl %edx,%ecx
1153 cmpl P_LINK(%ecx),%ecx # q still has something?
1154 je rem2
1155 shrl $3,%edx # yes, set bit as still full
1156 btsl %edx,_whichqs
1157rem2:
1158 movl $0,P_RLINK(%eax) # zap reverse link to indicate off list
1159 ret
1160
1161rem3: .asciz "remrq"
1162sw0: .asciz "swtch"
1163
1164/*
1165 * When no processes are on the runq, Swtch branches to idle
1166 * to wait for something to come ready.
1167 */
1168 .globl Idle
6d829b69 1169 ALIGN32
3c40a151
WJ
1170Idle:
1171idle:
1172 call _spl0
1173 cmpl $0,_whichqs
1174 jne sw1
1175 hlt # wait for interrupt
1176 jmp idle
1177
1178 .align 4 /* ..so that profiling doesn't lump Idle with swtch().. */
1179badsw:
1180 pushl $sw0
1181 call _panic
1182 /*NOTREACHED*/
1183
1184/*
1185 * Swtch()
1186 */
6d829b69 1187 ALIGN32
3c40a151
WJ
1188ENTRY(swtch)
1189
1190 incl _cnt+V_SWTCH
1191
1192 /* switch to new process. first, save context as needed */
1193
1194 movl _curproc,%ecx
1195
1196 /* if no process to save, don't bother */
1197 cmpl $0,%ecx
1198 je sw1
1199
1200 movl P_ADDR(%ecx),%ecx
1201
1202
1203 movl (%esp),%eax # Hardware registers
1204 movl %eax, PCB_EIP(%ecx)
1205 movl %ebx, PCB_EBX(%ecx)
1206 movl %esp, PCB_ESP(%ecx)
1207 movl %ebp, PCB_EBP(%ecx)
1208 movl %esi, PCB_ESI(%ecx)
1209 movl %edi, PCB_EDI(%ecx)
1210
1211#ifdef NPX
3c40a151 1212 /* have we used fp, and need a save? */
42d41470
BE
1213 mov _curproc,%eax
1214 cmp %eax,_npxproc
3c40a151 1215 jne 1f
42d41470
BE
1216 pushl %ecx /* h/w bugs make saving complicated */
1217 leal PCB_SAVEFPU(%ecx),%eax
1218 pushl %eax
1219 call _npxsave /* do it in a big C function */
1220 popl %eax
1221 popl %ecx
3c40a151
WJ
12221:
1223#endif
1224
1225 movl _CMAP2,%eax # save temporary map PTE
1226 movl %eax,PCB_CMAP2(%ecx) # in our context
1227 movl $0,_curproc # out of process
1228
1229 # movw _cpl, %ax
1230 # movw %ax, PCB_IML(%ecx) # save ipl
1231
1232 /* save is done, now choose a new process or idle */
1233sw1:
1234 movl _whichqs,%edi
12352:
1236 cli
1237 bsfl %edi,%eax # find a full q
1238 jz idle # if none, idle
1239 # XX update whichqs?
1240swfnd:
1241 btrl %eax,%edi # clear q full status
1242 jnb 2b # if it was clear, look for another
1243 movl %eax,%ebx # save which one we are using
1244
1245 shll $3,%eax
1246 addl $_qs,%eax # select q
1247 movl %eax,%esi
1248
1249#ifdef DIAGNOSTIC
1250 cmpl P_LINK(%eax),%eax # linked to self? (e.g. not on list)
1251 je badsw # not possible
1252#endif
1253
1254 movl P_LINK(%eax),%ecx # unlink from front of process q
1255 movl P_LINK(%ecx),%edx
1256 movl %edx,P_LINK(%eax)
1257 movl P_RLINK(%ecx),%eax
1258 movl %eax,P_RLINK(%edx)
1259
1260 cmpl P_LINK(%ecx),%esi # q empty
1261 je 3f
1262 btsl %ebx,%edi # nope, set to indicate full
12633:
1264 movl %edi,_whichqs # update q status
1265
1266 movl $0,%eax
1267 movl %eax,_want_resched
1268
1269#ifdef DIAGNOSTIC
1270 cmpl %eax,P_WCHAN(%ecx)
1271 jne badsw
1272 cmpb $ SRUN,P_STAT(%ecx)
1273 jne badsw
1274#endif
1275
1276 movl %eax,P_RLINK(%ecx) /* isolate process to run */
1277 movl P_ADDR(%ecx),%edx
1278 movl PCB_CR3(%edx),%ebx
1279
1280 /* switch address space */
1281 movl %ebx,%cr3
1282
1283 /* restore context */
1284 movl PCB_EBX(%edx), %ebx
1285 movl PCB_ESP(%edx), %esp
1286 movl PCB_EBP(%edx), %ebp
1287 movl PCB_ESI(%edx), %esi
1288 movl PCB_EDI(%edx), %edi
1289 movl PCB_EIP(%edx), %eax
1290 movl %eax, (%esp)
1291
3c40a151
WJ
1292 movl PCB_CMAP2(%edx),%eax # get temporary map
1293 movl %eax,_CMAP2 # reload temporary map PTE
1294
1295 movl %ecx,_curproc # into next process
1296 movl %edx,_curpcb
1297
1298 /* pushl PCB_IML(%edx)
1299 call _splx
1300 popl %eax*/
1301
1302 movl %edx,%eax # return (1);
1303 ret
1304
1305 .globl _mvesp
6d829b69 1306 ALIGN32
3c40a151
WJ
1307_mvesp: movl %esp,%eax
1308 ret
1309/*
1310 * struct proc *swtch_to_inactive(p) ; struct proc *p;
1311 *
1312 * At exit of a process, move off the address space of the
1313 * process and onto a "safe" one. Then, on a temporary stack
1314 * return and run code that disposes of the old state.
1315 * Since this code requires a parameter from the "old" stack,
1316 * pass it back as a return value.
1317 */
6d829b69 1318 ALIGN32
3c40a151
WJ
1319ENTRY(swtch_to_inactive)
1320 popl %edx # old pc
1321 popl %eax # arg, our return value
1322 movl _IdlePTD,%ecx
1323 movl %ecx,%cr3 # good bye address space
1324 #write buffer?
1325 movl $tmpstk-4,%esp # temporary stack, compensated for call
1326 jmp %edx # return, execute remainder of cleanup
1327
1328/*
1329 * savectx(pcb, altreturn)
1330 * Update pcb, saving current processor state and arranging
1331 * for alternate return ala longjmp in swtch if altreturn is true.
1332 */
6d829b69 1333 ALIGN32
3c40a151
WJ
1334ENTRY(savectx)
1335 movl 4(%esp), %ecx
1336 movw _cpl, %ax
1337 movw %ax, PCB_IML(%ecx)
1338 movl (%esp), %eax
1339 movl %eax, PCB_EIP(%ecx)
1340 movl %ebx, PCB_EBX(%ecx)
1341 movl %esp, PCB_ESP(%ecx)
1342 movl %ebp, PCB_EBP(%ecx)
1343 movl %esi, PCB_ESI(%ecx)
1344 movl %edi, PCB_EDI(%ecx)
42d41470 1345
3c40a151 1346#ifdef NPX
42d41470
BE
1347 /*
1348 * If npxproc == NULL, then the npx h/w state is irrelevant and the
1349 * state had better already be in the pcb. This is true for forks
1350 * but not for dumps (the old book-keeping with FP flags in the pcb
1351 * always lost for dumps because the dump pcb has 0 flags).
1352 *
1353 * If npxproc != NULL, then we have to save the npx h/w state to
1354 * npxproc's pcb and copy it to the requested pcb, or save to the
1355 * requested pcb and reload. Copying is easier because we would
1356 * have to handle h/w bugs for reloading. We used to lose the
1357 * parent's npx state for forks by forgetting to reload.
1358 */
1359 mov _npxproc,%eax
1360 testl %eax,%eax
3c40a151 1361 je 1f
42d41470
BE
1362
1363 pushl %ecx
1364 movl P_ADDR(%eax),%eax
1365 leal PCB_SAVEFPU(%eax),%eax
1366 pushl %eax
1367 pushl %eax
1368 call _npxsave
1369 popl %eax
1370 popl %eax
1371 popl %ecx
1372
1373 pushl %ecx
1374 pushl $108+8*2 /* XXX h/w state size + padding */
1375 leal PCB_SAVEFPU(%ecx),%ecx
1376 pushl %ecx
1377 pushl %eax
1378 call _bcopy
1379 addl $12,%esp
1380 popl %ecx
3c40a151
WJ
13811:
1382#endif
42d41470 1383
3c40a151
WJ
1384 movl _CMAP2, %edx # save temporary map PTE
1385 movl %edx, PCB_CMAP2(%ecx) # in our context
1386
1387 cmpl $0, 8(%esp)
1388 je 1f
1389 movl %esp, %edx # relocate current sp relative to pcb
1390 subl $_kstack, %edx # (sp is relative to kstack):
1391 addl %edx, %ecx # pcb += sp - kstack;
1392 movl %eax, (%ecx) # write return pc at (relocated) sp@
1393 # this mess deals with replicating register state gcc hides
1394 movl 12(%esp),%eax
1395 movl %eax,12(%ecx)
1396 movl 16(%esp),%eax
1397 movl %eax,16(%ecx)
1398 movl 20(%esp),%eax
1399 movl %eax,20(%ecx)
1400 movl 24(%esp),%eax
1401 movl %eax,24(%ecx)
14021:
1403 xorl %eax, %eax # return 0
1404 ret
1405
1406/*
1407 * addupc(int pc, struct uprof *up, int ticks):
1408 * update profiling information for the user process.
1409 */
1410
6d829b69 1411 ALIGN32
3c40a151
WJ
1412ENTRY(addupc)
1413 pushl %ebp
1414 movl %esp,%ebp
1415 movl 12(%ebp),%edx /* up */
1416 movl 8(%ebp),%eax /* pc */
1417
1418 subl PR_OFF(%edx),%eax /* pc -= up->pr_off */
1419 jl L1 /* if (pc < 0) return */
1420
1421 shrl $1,%eax /* praddr = pc >> 1 */
1422 imull PR_SCALE(%edx),%eax /* praddr *= up->pr_scale */
1423 shrl $15,%eax /* praddr = praddr << 15 */
1424 andl $-2,%eax /* praddr &= ~1 */
1425
1426 cmpl PR_SIZE(%edx),%eax /* if (praddr > up->pr_size) return */
1427 ja L1
1428
1429/* addl %eax,%eax /* praddr -> word offset */
1430 addl PR_BASE(%edx),%eax /* praddr += up-> pr_base */
1431 movl 16(%ebp),%ecx /* ticks */
1432
1433 movl _curpcb,%edx
1434 movl $proffault,PCB_ONFAULT(%edx)
1435 addl %ecx,(%eax) /* storage location += ticks */
1436 movl $0,PCB_ONFAULT(%edx)
1437L1:
1438 leave
1439 ret
1440
6d829b69 1441 ALIGN32
3c40a151
WJ
1442proffault:
1443 /* if we get a fault, then kill profiling all together */
1444 movl $0,PCB_ONFAULT(%edx) /* squish the fault handler */
1445 movl 12(%ebp),%ecx
1446 movl $0,PR_SCALE(%ecx) /* up->pr_scale = 0 */
1447 leave
1448 ret
1449
1450.data
6d829b69 1451 ALIGN32
3c40a151
WJ
1452 .globl _cyloffset, _curpcb
1453_cyloffset: .long 0
1454 .globl _proc0paddr
1455_proc0paddr: .long 0
1456LF: .asciz "swtch %x"
1457
1458.text
1459 # To be done:
1460 .globl _astoff
1461_astoff:
1462 ret
1463
1464#define IDTVEC(name) .align 4; .globl _X/**/name; _X/**/name:
1465#define PANIC(msg) xorl %eax,%eax; movl %eax,_waittime; pushl 1f; \
1466 call _panic; 1: .asciz msg
1467#define PRINTF(n,msg) pushal ; nop ; pushl 1f; call _printf; MSG(msg) ; \
1468 popl %eax ; popal
1469#define MSG(msg) .data; 1: .asciz msg; .text
1470
1471 .text
1472
1473/*
1474 * Trap and fault vector routines
1475 */
1476#define TRAP(a) pushl $(a) ; jmp alltraps
1477#ifdef KGDB
1478#define BPTTRAP(a) pushl $(a) ; jmp bpttraps
1479#else
1480#define BPTTRAP(a) TRAP(a)
1481#endif
1482
1483IDTVEC(div)
1484 pushl $0; TRAP(T_DIVIDE)
1485IDTVEC(dbg)
1486 pushl $0; BPTTRAP(T_TRCTRAP)
1487IDTVEC(nmi)
1488 pushl $0; TRAP(T_NMI)
1489IDTVEC(bpt)
1490 pushl $0; BPTTRAP(T_BPTFLT)
1491IDTVEC(ofl)
1492 pushl $0; TRAP(T_OFLOW)
1493IDTVEC(bnd)
1494 pushl $0; TRAP(T_BOUND)
1495IDTVEC(ill)
1496 pushl $0; TRAP(T_PRIVINFLT)
1497IDTVEC(dna)
1498 pushl $0; TRAP(T_DNA)
1499IDTVEC(dble)
1500 TRAP(T_DOUBLEFLT)
1501 /*PANIC("Double Fault");*/
1502IDTVEC(fpusegm)
1503 pushl $0; TRAP(T_FPOPFLT)
1504IDTVEC(tss)
1505 TRAP(T_TSSFLT)
1506 /*PANIC("TSS not valid");*/
1507IDTVEC(missing)
1508 TRAP(T_SEGNPFLT)
1509IDTVEC(stk)
1510 TRAP(T_STKFLT)
1511IDTVEC(prot)
1512 TRAP(T_PROTFLT)
1513IDTVEC(page)
1514 TRAP(T_PAGEFLT)
1515IDTVEC(rsvd)
1516 pushl $0; TRAP(T_RESERVED)
1517IDTVEC(fpu)
42d41470
BE
1518#ifdef NPX
1519 /*
1520 * Handle like an interrupt so that we can call npxintr to clear the
1521 * error. It would be better to handle npx interrupts as traps but
1522 * this is difficult for nested interrupts.
1523 */
1524 pushl $0 /* dummy error code */
1525 pushl $T_ASTFLT
1526 pushal
1527 nop /* silly, the bug is for popal and it only
1528 * bites when the next instruction has a
1529 * complicated address mode */
1530 pushl %ds
1531 pushl %es /* now the stack frame is a trap frame */
1532 movl $KDSEL,%eax
1533 movl %ax,%ds
1534 movl %ax,%es
1535 pushl _cpl
1536 pushl $0 /* dummy unit to finish building intr frame */
1537 incl _cnt+V_TRAP
1538 call _npxintr
1539 jmp doreti
1540#else
3c40a151 1541 pushl $0; TRAP(T_ARITHTRAP)
42d41470 1542#endif
3c40a151
WJ
1543 /* 17 - 31 reserved for future exp */
1544IDTVEC(rsvd0)
1545 pushl $0; TRAP(17)
1546IDTVEC(rsvd1)
1547 pushl $0; TRAP(18)
1548IDTVEC(rsvd2)
1549 pushl $0; TRAP(19)
1550IDTVEC(rsvd3)
1551 pushl $0; TRAP(20)
1552IDTVEC(rsvd4)
1553 pushl $0; TRAP(21)
1554IDTVEC(rsvd5)
1555 pushl $0; TRAP(22)
1556IDTVEC(rsvd6)
1557 pushl $0; TRAP(23)
1558IDTVEC(rsvd7)
1559 pushl $0; TRAP(24)
1560IDTVEC(rsvd8)
1561 pushl $0; TRAP(25)
1562IDTVEC(rsvd9)
1563 pushl $0; TRAP(26)
1564IDTVEC(rsvd10)
1565 pushl $0; TRAP(27)
1566IDTVEC(rsvd11)
1567 pushl $0; TRAP(28)
1568IDTVEC(rsvd12)
1569 pushl $0; TRAP(29)
1570IDTVEC(rsvd13)
1571 pushl $0; TRAP(30)
1572IDTVEC(rsvd14)
1573 pushl $0; TRAP(31)
1574
6d829b69 1575 ALIGN32
3c40a151
WJ
1576alltraps:
1577 pushal
1578 nop
1579 push %ds
1580 push %es
1581 # movw $KDSEL,%ax
1582 movw $0x10,%ax
1583 movw %ax,%ds
1584 movw %ax,%es
1585calltrap:
1586 incl _cnt+V_TRAP
1587 call _trap
42d41470
BE
1588 /*
1589 * Return through doreti to handle ASTs. Have to change trap frame
1590 * to interrupt frame.
1591 */
1592 movl $T_ASTFLT,4+4+32(%esp) /* new trap type (err code not used) */
1593 pushl _cpl
1594 pushl $0 /* dummy unit */
1595 jmp doreti
3c40a151
WJ
1596
1597#ifdef KGDB
1598/*
1599 * This code checks for a kgdb trap, then falls through
1600 * to the regular trap code.
1601 */
6d829b69 1602 ALIGN32
3c40a151
WJ
1603bpttraps:
1604 pushal
1605 nop
1606 push %es
1607 push %ds
1608 # movw $KDSEL,%ax
1609 movw $0x10,%ax
1610 movw %ax,%ds
1611 movw %ax,%es
1612 movzwl 52(%esp),%eax
1613 test $3,%eax
1614 jne calltrap
1615 call _kgdb_trap_glue
1616 jmp calltrap
1617#endif
1618
1619/*
1620 * Call gate entry for syscall
1621 */
1622
6d829b69 1623 ALIGN32
3c40a151
WJ
1624IDTVEC(syscall)
1625 pushfl # only for stupid carry bit and more stupid wait3 cc kludge
1626 pushal # only need eax,ecx,edx - trap resaves others
1627 nop
42d41470
BE
1628 movl $KDSEL,%eax # switch to kernel segments
1629 movl %ax,%ds
1630 movl %ax,%es
6b9305fd 1631 incl _cnt+V_SYSCALL # kml 3/25/93
3c40a151 1632 call _syscall
42d41470
BE
1633 /*
1634 * Return through doreti to handle ASTs. Have to change syscall frame
1635 * to interrupt frame.
1636 *
1637 * XXX - we should have set up the frame earlier to avoid the
1638 * following popal/pushal (not much can be done to avoid shuffling
1639 * the flags). Consistent frames would simplify things all over.
1640 */
1641 movl 32+0(%esp),%eax /* old flags, shuffle to above cs:eip */
1642 movl 32+4(%esp),%ebx /* `int' frame should have been ef, eip, cs */
1643 movl 32+8(%esp),%ecx
1644 movl %ebx,32+0(%esp)
1645 movl %ecx,32+4(%esp)
1646 movl %eax,32+8(%esp)
3c40a151
WJ
1647 popal
1648 nop
42d41470
BE
1649 pushl $0 /* dummy error code */
1650 pushl $T_ASTFLT
1651 pushal
1652 nop
1653 movl __udatasel,%eax /* switch back to user segments */
1654 push %eax /* XXX - better to preserve originals? */
1655 push %eax
1656 pushl _cpl
1657 pushl $0
1658 jmp doreti
3c40a151 1659
6d829b69 1660 ALIGN32
3c40a151
WJ
1661ENTRY(htonl)
1662ENTRY(ntohl)
1663 movl 4(%esp),%eax
1664 xchgb %al,%ah
1665 roll $16,%eax
1666 xchgb %al,%ah
1667 ret
1668
6d829b69 1669 ALIGN32
3c40a151
WJ
1670ENTRY(htons)
1671ENTRY(ntohs)
1672 movzwl 4(%esp),%eax
1673 xchgb %al,%ah
1674 ret
1675
1676#include "vector.s"
1677#include "i386/isa/icu.s"