Fixed 2 compiler warnings, two functions prototyped non-static, then later
[unix-history] / sys / i386 / isa / icu.s
CommitLineData
15637ed4
RG
1/*-
2 * Copyright (c) 1989, 1990 William F. Jolitz.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)icu.s 7.2 (Berkeley) 5/21/91
38 *
39 * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
40 * -------------------- ----- ----------------------
41 * CURRENT PATCH LEVEL: 5 00167
42 * -------------------- ----- ----------------------
43 *
44 * 28 Nov 92 Frank MacLachlan Aligned addresses and data
45 * on 32bit boundaries.
46 * 24 Mar 93 Rodney W. Grimes Added interrupt counters for vmstat
47 * also stray and false intr counters added
48 * 20 Apr 93 Bruce Evans New npx-0.5 code
49 * 25 Apr 93 Bruce Evans Support new interrupt code (intr-0.1)
50 * Rodney W. Grimes Reimplement above patches..
51 * 17 May 93 Rodney W. Grimes Redid the interrupt counter stuff
52 * moved the counters to vectors.s so
53 * they are next to the name tables.
54 * 04 Jun 93 Bruce Evans Fixed irq_num vs id_num for multiple
55 * devices configed on the same irq with
56 * respect to ipending. Restructured
57 * not to use BUILD_VECTORS.
58 * Rodney W. Grimes softsio1 only works if you have sio
59 * serial driver, added #include sio.h
60 * and #ifdef NSIO > 0 to protect it.
61 */
62
63/*
64 * AT/386
65 * Vector interrupt control section
66 */
67
68/*
69 * XXX - this file is now misnamed. All spls are now soft and the only thing
70 * related to the hardware icu is that the bit numbering is the same in the
71 * soft priority masks as in the hard ones.
72 */
73
74#include "sio.h"
75#define HIGHMASK 0xffff
76#define SOFTCLOCKMASK 0x8000
77
78 .data
79 .globl _cpl
80_cpl: .long 0xffff # current priority (all off)
81 .globl _imen
82_imen: .long 0xffff # interrupt mask enable (all off)
cbaa079f 83/* .globl _highmask */
15637ed4
RG
84_highmask: .long HIGHMASK
85 .globl _ttymask
86_ttymask: .long 0
87 .globl _biomask
88_biomask: .long 0
89 .globl _netmask
90_netmask: .long 0
91 .globl _ipending
92_ipending: .long 0
93vec:
94 .long vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7
95 .long vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15
96
97#define GENSPL(name, mask, event) \
98 .globl _spl/**/name ; \
99 ALIGN_TEXT ; \
100_spl/**/name: ; \
101 COUNT_EVENT(_intrcnt_spl, event) ; \
102 movl _cpl,%eax ; \
103 movl %eax,%edx ; \
104 orl mask,%edx ; \
105 movl %edx,_cpl ; \
106 SHOW_CPL ; \
107 ret
108
109#define FASTSPL(mask) \
110 movl mask,_cpl ; \
111 SHOW_CPL
112
113#define FASTSPL_VARMASK(varmask) \
114 movl varmask,%eax ; \
115 movl %eax,_cpl ; \
116 SHOW_CPL
117
118 .text
119
120 ALIGN_TEXT
121unpend_v:
122 COUNT_EVENT(_intrcnt_spl, 0)
123 bsfl %eax,%eax # slow, but not worth optimizing
124 btrl %eax,_ipending
125 jnc unpend_v_next # some intr cleared the in-memory bit
126 SHOW_IPENDING
127 movl Vresume(,%eax,4),%eax
128 testl %eax,%eax
129 je noresume
130 jmp %eax
131
132 ALIGN_TEXT
133/*
134 * XXX - must be some fastintr, need to register those too.
135 */
136noresume:
137#if NSIO > 0
138 call _softsio1
139#endif
140unpend_v_next:
141 movl _cpl,%eax
142 movl %eax,%edx
143 notl %eax
144 andl _ipending,%eax
145 je none_to_unpend
146 jmp unpend_v
147
148/*
149 * Handle return from interrupt after device handler finishes
150 */
151 ALIGN_TEXT
152doreti:
153 COUNT_EVENT(_intrcnt_spl, 1)
154 addl $4,%esp # discard unit arg
155 popl %eax # get previous priority
156/*
157 * Now interrupt frame is a trap frame!
158 *
159 * XXX - setting up the interrupt frame to be almost a stack frame is mostly
160 * a waste of time.
161 */
162 movl %eax,_cpl
163 SHOW_CPL
164 movl %eax,%edx
165 notl %eax
166 andl _ipending,%eax
167 jne unpend_v
168none_to_unpend:
169 testl %edx,%edx # returning to zero priority?
170 jne 1f # nope, going to non-zero priority
171 movl _netisr,%eax
172 testl %eax,%eax # check for softint s/traps
173 jne 2f # there are some
174 jmp test_resched # XXX - schedule jumps better
175 COUNT_EVENT(_intrcnt_spl, 2) # XXX
176
177 ALIGN_TEXT # XXX
1781: # XXX
179 COUNT_EVENT(_intrcnt_spl, 3)
180 popl %es
181 popl %ds
182 popal
183 addl $8,%esp
184 iret
185
186#include "../net/netisr.h"
187
188#define DONET(s, c, event) ; \
189 .globl c ; \
190 btrl $s,_netisr ; \
191 jnc 1f ; \
192 COUNT_EVENT(_intrcnt_spl, event) ; \
193 call c ; \
1941:
195
196 ALIGN_TEXT
1972:
198 COUNT_EVENT(_intrcnt_spl, 4)
199/*
200 * XXX - might need extra locking while testing reg copy of netisr, but
201 * interrupt routines setting it would not cause any new problems (since we
202 * don't loop, fresh bits will not be processed until the next doreti or spl0).
203 */
204 testl $~((1 << NETISR_SCLK) | (1 << NETISR_AST)),%eax
205 je test_ASTs # no net stuff, just temporary AST's
206 FASTSPL_VARMASK(_netmask)
207 DONET(NETISR_RAW, _rawintr, 5)
208#ifdef INET
209 DONET(NETISR_IP, _ipintr, 6)
210#endif
211#ifdef IMP
212 DONET(NETISR_IMP, _impintr, 7)
213#endif
214#ifdef NS
215 DONET(NETISR_NS, _nsintr, 8)
216#endif
217 FASTSPL($0)
218test_ASTs:
219 btrl $NETISR_SCLK,_netisr
220 jnc test_resched
221 COUNT_EVENT(_intrcnt_spl, 9)
222 FASTSPL($SOFTCLOCKMASK)
223/*
224 * Back to an interrupt frame for a moment.
225 */
226 pushl $0 # previous cpl (probably not used)
227 pushl $0x7f # dummy unit number
228 call _softclock
229 addl $8,%esp # discard dummies
230 FASTSPL($0)
231test_resched:
232#ifdef notused1
233 btrl $NETISR_AST,_netisr
234 jnc 2f
235#endif
236#ifdef notused2
237 cmpl $0,_want_resched
238 je 2f
239#endif
240 cmpl $0,_astpending # XXX - put it back in netisr to
241 je 2f # reduce the number of tests
242 testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
243 # to non-kernel (i.e., user)?
244 je 2f # nope, leave
245 COUNT_EVENT(_intrcnt_spl, 10)
246 movl $0,_astpending
247 call _trap
2482:
249 COUNT_EVENT(_intrcnt_spl, 11)
250 popl %es
251 popl %ds
252 popal
253 addl $8,%esp
254 iret
255
256/*
257 * Interrupt priority mechanism
258 * -- soft splXX masks with group mechanism (cpl)
259 * -- h/w masks for currently active or unused interrupts (imen)
260 * -- ipending = active interrupts currently masked by cpl
261 */
262
263 GENSPL(bio, _biomask, 12)
264 GENSPL(clock, $HIGHMASK, 13) /* splclock == splhigh ex for count */
265 GENSPL(high, $HIGHMASK, 14)
266 GENSPL(imp, _netmask, 15) /* splimp == splnet except for count */
267 GENSPL(net, _netmask, 16)
268 GENSPL(softclock, $SOFTCLOCKMASK, 17)
269 GENSPL(tty, _ttymask, 18)
270
271 .globl _splnone
272 .globl _spl0
273 ALIGN_TEXT
274_splnone:
275_spl0:
276 COUNT_EVENT(_intrcnt_spl, 19)
277in_spl0:
278 movl _cpl,%eax
279 pushl %eax # save old priority
280 testl $(1 << NETISR_RAW) | (1 << NETISR_IP),_netisr
281 je over_net_stuff_for_spl0
282 movl _netmask,%eax # mask off those network devices
283 movl %eax,_cpl # set new priority
284 SHOW_CPL
285/*
286 * XXX - what about other net intrs?
287 */
288 DONET(NETISR_RAW, _rawintr, 20)
289#ifdef INET
290 DONET(NETISR_IP, _ipintr, 21)
291#endif
292over_net_stuff_for_spl0:
293 movl $0,_cpl # set new priority
294 SHOW_CPL
295 movl _ipending,%eax
296 testl %eax,%eax
297 jne unpend_V
298 popl %eax # return old priority
299 ret
300
301 .globl _splx
302 ALIGN_TEXT
303_splx:
304 COUNT_EVENT(_intrcnt_spl, 22)
305 movl 4(%esp),%eax # new priority
306 testl %eax,%eax
307 je in_spl0 # going to "zero level" is special
308 COUNT_EVENT(_intrcnt_spl, 23)
309 movl _cpl,%edx # save old priority
310 movl %eax,_cpl # set new priority
311 SHOW_CPL
312 notl %eax
313 andl _ipending,%eax
314 jne unpend_V_result_edx
315 movl %edx,%eax # return old priority
316 ret
317
318 ALIGN_TEXT
319unpend_V_result_edx:
320 pushl %edx
321unpend_V:
322 COUNT_EVENT(_intrcnt_spl, 24)
323 bsfl %eax,%eax
324 btrl %eax,_ipending
325 jnc unpend_V_next
326 SHOW_IPENDING
327 movl Vresume(,%eax,4),%edx
328 testl %edx,%edx
329 je noresumeV
330/*
331 * We would prefer to call the intr handler directly here but that doesn't
332 * work for badly behaved handlers that want the interrupt frame. Also,
333 * there's a problem determining the unit number. We should change the
334 * interface so that the unit number is not determined at config time.
335 */
336 jmp *vec(,%eax,4)
337
338 ALIGN_TEXT
339/*
340 * XXX - must be some fastintr, need to register those too.
341 */
342noresumeV:
343#if NSIO > 0
344 call _softsio1
345#endif
346unpend_V_next:
347 movl _cpl,%eax
348 notl %eax
349 andl _ipending,%eax
350 jne unpend_V
351 popl %eax
352 ret
353
354#define BUILD_VEC(irq_num) \
355 ALIGN_TEXT ; \
356vec/**/irq_num: ; \
357 int $ICU_OFFSET + (irq_num) ; \
358 popl %eax ; \
359 ret
360
361 BUILD_VEC(0)
362 BUILD_VEC(1)
363 BUILD_VEC(2)
364 BUILD_VEC(3)
365 BUILD_VEC(4)
366 BUILD_VEC(5)
367 BUILD_VEC(6)
368 BUILD_VEC(7)
369 BUILD_VEC(8)
370 BUILD_VEC(9)
371 BUILD_VEC(10)
372 BUILD_VEC(11)
373 BUILD_VEC(12)
374 BUILD_VEC(13)
375 BUILD_VEC(14)
376 BUILD_VEC(15)