Commit | Line | Data |
---|---|---|
90269d47 AM |
1 | /* ==== signal.c ============================================================ |
2 | * Copyright (c) 1993 by Chris Provenzano, proven@athena.mit.edu | |
3 | * | |
4 | * Description : Queue functions. | |
5 | * | |
6 | * 1.00 93/07/21 proven | |
7 | * -Started coding this file. | |
8 | */ | |
9 | ||
10 | #include <pthread/copyright.h> | |
11 | #include "pthread.h" | |
12 | #include <signal.h> | |
13 | ||
14 | /* | |
15 | * Global for user-kernel lock, and blocked signals | |
16 | */ | |
17 | static volatile sigset_t sig_to_process; | |
18 | static volatile int kernel_lock = 0; | |
19 | static volatile int sig_count = 0; | |
20 | ||
21 | static void set_thread_timer(); | |
22 | void sig_prevent(void); | |
23 | void sig_resume(void); | |
24 | ||
25 | /* ========================================================================== | |
26 | * context_switch() | |
27 | * | |
28 | * This routine saves the current state of the running thread gets | |
29 | * the next thread to run and restores it's state. To allow different | |
30 | * processors to work with this routine, I allow the machdep_restore_state() | |
31 | * to either return or have it return from machdep_save_state with a value | |
32 | * other than 0, this is for implementations which use setjmp/longjmp. | |
33 | */ | |
34 | void fd_kern_wait() { | |
35 | fd_kern_poll(); | |
36 | } | |
37 | ||
38 | static void context_switch() | |
39 | { | |
40 | struct pthread **current, *next; | |
41 | ||
42 | /* save state of current thread */ | |
43 | if (machdep_save_state()) { | |
44 | return; | |
45 | } | |
46 | ||
47 | if (pthread_run = pthread_queue_deq(&pthread_current_queue)) { | |
48 | /* restore state of new current thread */ | |
49 | machdep_restore_state(); | |
50 | return; | |
51 | } | |
52 | /* Poll all the kernel fds */ | |
53 | fd_kern_poll(); | |
54 | ||
55 | context_switch_reschedule:; | |
56 | /* | |
57 | * Go through the reschedule list once, this is the only place | |
58 | * that goes through the queue without using the queue routines. | |
59 | * | |
60 | * But first delete the current queue. | |
61 | */ | |
62 | pthread_queue_init(&pthread_current_queue); | |
63 | current = &(pthread_link_list); | |
64 | while (*current) { | |
65 | switch((*current)->state) { | |
66 | case PS_RUNNING: | |
67 | pthread_queue_enq(&pthread_current_queue, *current); | |
68 | current = &((*current)->pll); | |
69 | break; | |
70 | case PS_DEAD: | |
71 | /* Cleanup thread */ | |
72 | next = (*current)->pll; | |
73 | pthread_cleanup(current); | |
74 | *current = next; | |
75 | break; | |
76 | default: | |
77 | /* Should be on a different queue. Ignore. */ | |
78 | current = &((*current)->pll); | |
79 | break; | |
80 | } | |
81 | } | |
82 | ||
83 | /* Are there any threads at all */ | |
84 | if (!pthread_link_list) { | |
85 | exit(0); | |
86 | } | |
87 | ||
88 | if (pthread_run = pthread_queue_deq(&pthread_current_queue)) { | |
89 | /* restore state of new current thread */ | |
90 | machdep_restore_state(); | |
91 | return; | |
92 | } | |
93 | ||
94 | /* | |
95 | * Okay, make sure the context switch timer is off, so we don't get any | |
96 | * SIG_VTALRM signals while waiting for a fd to unblock. | |
97 | */ | |
98 | /* machdep_unset_thread_timer(); | |
99 | sigdelset(&sig_to_process, SIGVTALRM); */ | |
100 | ||
101 | /* Well have to unlock the kernel/then relock it but that should be ok */ | |
102 | fd_kern_wait(); | |
103 | goto context_switch_reschedule; | |
104 | } | |
105 | ||
106 | /* ========================================================================== | |
107 | * context_switch_done() | |
108 | * | |
109 | * This routine does all the things that are necessary after a context_switch() | |
110 | * calls the machdep_restore_state(). DO NOT put this in the context_switch() | |
111 | * routine because sometimes the machdep_restore_state() doesn't return | |
112 | * to context_switch() but instead ends up in machdep_thread_start() or | |
113 | * some such routine, which will need to call this routine and | |
114 | * sig_check_and_resume(). | |
115 | */ | |
116 | void context_switch_done() | |
117 | { | |
118 | sigdelset(&sig_to_process, SIGVTALRM); | |
119 | set_thread_timer(); | |
120 | } | |
121 | ||
122 | /* ========================================================================== | |
123 | * set_thread_timer() | |
124 | * | |
125 | * Assums kernel is locked. | |
126 | */ | |
127 | static void set_thread_timer() | |
128 | { | |
129 | static int last_sched_attr = SCHED_RR; | |
130 | ||
131 | switch (pthread_run->attr.sched_attr) { | |
132 | case SCHED_RR: | |
133 | machdep_set_thread_timer(&(pthread_run->machdep_data)); | |
134 | break; | |
135 | case SCHED_FIFO: | |
136 | if (last_sched_attr != SCHED_FIFO) { | |
137 | machdep_unset_thread_timer(); | |
138 | } | |
139 | break; | |
140 | case SCHED_IO: | |
141 | if (last_sched_attr != SCHED_IO) { | |
142 | machdep_set_thread_timer(&(pthread_run->machdep_data)); | |
143 | } | |
144 | break; | |
145 | default: | |
146 | machdep_set_thread_timer(&(pthread_run->machdep_data)); | |
147 | break; | |
148 | } | |
149 | } | |
150 | ||
151 | /* ========================================================================== | |
152 | * sig_handler() | |
153 | * | |
154 | * Assumes the kernel is locked. | |
155 | */ | |
156 | static void sig_handler(int sig) | |
157 | { | |
158 | sig_handler_top:; | |
159 | ||
160 | switch(sig) { | |
161 | case 0: | |
162 | break; | |
163 | case SIGVTALRM: | |
164 | if (sig_count) { | |
165 | sigset_t sigall; | |
166 | ||
167 | sig_count = 0; | |
168 | ||
169 | /* Unblock all signals */ | |
170 | sigemptyset(&sigall); | |
171 | sigprocmask(SIG_SETMASK, &sigall, NULL); | |
172 | } | |
173 | context_switch(); | |
174 | context_switch_done(); | |
175 | break; | |
176 | case SIGALRM: | |
177 | /* if (sleep_wakeup()) { | |
178 | break; | |
179 | } */ | |
180 | /* Do the defaul action no threads were sleeping */ | |
181 | default: | |
182 | PANIC(); | |
183 | } | |
184 | ||
185 | /* Determine if there are any other signals */ | |
186 | if (sig_to_process) { | |
187 | for (sig = 1; sig <= SIGMAX; sig++) { | |
188 | if (sigismember(&sig_to_process, sig)) { | |
189 | ||
190 | /* goto sig_handler_top */ | |
191 | goto sig_handler_top; | |
192 | } | |
193 | } | |
194 | } | |
195 | } | |
196 | ||
197 | /* ========================================================================== | |
198 | * sig_handler_real() | |
199 | * | |
200 | * On a multi-processor this would need to use the test and set instruction | |
201 | * otherwise the following will work. | |
202 | */ | |
203 | void sig_handler_real(int sig) | |
204 | { | |
205 | if (kernel_lock) { | |
206 | sigaddset(&sig_to_process, sig); | |
207 | return; | |
208 | } | |
209 | sig_prevent(); | |
210 | sig_count++; | |
211 | sig_handler(sig); | |
212 | sig_resume(); | |
213 | } | |
214 | ||
215 | /* ========================================================================== | |
216 | * sig_handler_fake() | |
217 | */ | |
218 | void sig_handler_fake(int sig) | |
219 | { | |
220 | if (kernel_lock) { | |
221 | /* Currently this should be impossible */ | |
222 | PANIC(); | |
223 | } | |
224 | sig_prevent(); | |
225 | sig_handler(sig); | |
226 | sig_resume(); | |
227 | } | |
228 | ||
229 | /* ========================================================================== | |
230 | * reschedule() | |
231 | * | |
232 | * This routine assumes that the caller is the current pthread, pthread_run | |
233 | * and that it has a lock on itself and that it wants to reschedule itself. | |
234 | */ | |
235 | void reschedule(enum pthread_state state) | |
236 | { | |
237 | semaphore *plock; | |
238 | ||
239 | if (kernel_lock) { | |
240 | /* Currently this should be impossible */ | |
241 | PANIC(); | |
242 | } | |
243 | sig_prevent(); | |
244 | pthread_run->state = state; | |
245 | SEMAPHORE_RESET((plock = &(pthread_run->lock))); | |
246 | sig_handler(SIGVTALRM); | |
247 | sig_resume(); | |
248 | } | |
249 | ||
250 | /* ========================================================================== | |
251 | * sig_prevent() | |
252 | */ | |
253 | void sig_prevent(void) | |
254 | { | |
255 | kernel_lock++; | |
256 | } | |
257 | ||
258 | /* ========================================================================== | |
259 | * sig_resume() | |
260 | */ | |
261 | void sig_resume() | |
262 | { | |
263 | kernel_lock--; | |
264 | } | |
265 | ||
266 | /* ========================================================================== | |
267 | * sig_check_and_resume() | |
268 | */ | |
269 | void sig_check_and_resume() | |
270 | { | |
271 | /* Some routine name that is yet to be determined. */ | |
272 | ||
273 | /* Only bother if we are truely unlocking the kernel */ | |
274 | while (!(--kernel_lock)) { | |
275 | ||
276 | /* Assume sigset_t is not a struct or union */ | |
277 | if (sig_to_process) { | |
278 | kernel_lock++; | |
279 | sig_handler(0); | |
280 | } else { | |
281 | break; | |
282 | } | |
283 | } | |
284 | } | |
285 | ||
286 | /* ========================================================================== | |
287 | * sig_init() | |
288 | * | |
289 | * SIGVTALRM (NOT POSIX) needed for thread timeslice timeouts. | |
290 | * Since it's not POSIX I will replace it with a | |
291 | * virtual timer for threads. | |
292 | * SIGALRM (IS POSIX) so some special handling will be | |
293 | * necessary to fake SIGALRM signals | |
294 | */ | |
295 | void sig_init(void) | |
296 | { | |
297 | int sig_to_init[] = { SIGVTALRM, SIGALRM, 0 }; | |
298 | int i; | |
299 | ||
300 | /* Initialize only the necessary signals */ | |
301 | ||
302 | for (i = 0; sig_to_init[i]; i++) { | |
303 | if (signal(sig_to_init[i], sig_handler_real)) { | |
304 | PANIC(); | |
305 | } | |
306 | } | |
307 | } | |
308 |