From 90269d47546a1adf200aaac83ae20caf6ef9975f Mon Sep 17 00:00:00 2001 From: Andrew Moore Date: Sun, 19 Dec 1993 06:21:33 +0000 Subject: [PATCH] added Chris Provenzano's light-weight threading library. from libpthread/README: This pthread package is/will be based on the POSIX1003.4a Draft 7 pthread standard, and Frank Mullers paper on signal handelling presented at the Winter 93 USENIX conference. It is currently being designed and written by me, Chris Provenzano. All bug, comments, and questions can be sent me at either proven@athena.mit.edu or proven@sun-lamp.cs.berkeley.edu Please don't send questions, bugs or patches to any of the [FreeBSD] mailing lists. Copyright (c) 1993 Chris Provenzano. All rights reserved. This product includes software developed by the Univeristy of California, Berkeley and its contributors. --- lib/Makefile | 2 +- lib/libpthread/Makefile | 21 + lib/libpthread/README | 86 +++++ lib/libpthread/arch/i386/Makefile.inc | 18 + lib/libpthread/arch/i386/machdep.c | 103 +++++ lib/libpthread/arch/i386/machdep.h | 31 ++ lib/libpthread/arch/i386/syscall.S | 150 ++++++++ lib/libpthread/include/Makefile.inc | 25 ++ lib/libpthread/include/cond.h | 61 +++ lib/libpthread/include/copyright.h | 32 ++ lib/libpthread/include/engine.h | 59 +++ lib/libpthread/include/fd.h | 76 ++++ lib/libpthread/include/fd_pipe.h | 24 ++ lib/libpthread/include/kernel.h | 20 + lib/libpthread/include/mutex.h | 64 ++++ lib/libpthread/include/posix.h | 28 ++ lib/libpthread/include/pthread.h | 93 +++++ lib/libpthread/include/pthread_attr.h | 45 +++ lib/libpthread/include/queue.h | 39 ++ lib/libpthread/include/util.h | 51 +++ lib/libpthread/pthreads/Makefile.inc | 12 + lib/libpthread/pthreads/cond.c | 177 +++++++++ lib/libpthread/pthreads/fd.c | 482 +++++++++++++++++++++++ lib/libpthread/pthreads/fd_kern.c | 510 +++++++++++++++++++++++++ lib/libpthread/pthreads/fd_pipe.c | 237 ++++++++++++ lib/libpthread/pthreads/file.c | 56 +++ lib/libpthread/pthreads/globals.c | 36 ++ lib/libpthread/pthreads/malloc.c | 363 ++++++++++++++++++ lib/libpthread/pthreads/mutex.c | 196 ++++++++++ lib/libpthread/pthreads/pthread.c | 168 ++++++++ lib/libpthread/pthreads/pthread_attr.c | 69 ++++ lib/libpthread/pthreads/queue.c | 92 +++++ lib/libpthread/pthreads/signal.c | 308 +++++++++++++++ 33 files changed, 3733 insertions(+), 1 deletion(-) create mode 100644 lib/libpthread/Makefile create mode 100644 lib/libpthread/README create mode 100644 lib/libpthread/arch/i386/Makefile.inc create mode 100644 lib/libpthread/arch/i386/machdep.c create mode 100644 lib/libpthread/arch/i386/machdep.h create mode 100644 lib/libpthread/arch/i386/syscall.S create mode 100644 lib/libpthread/include/Makefile.inc create mode 100644 lib/libpthread/include/cond.h create mode 100644 lib/libpthread/include/copyright.h create mode 100644 lib/libpthread/include/engine.h create mode 100644 lib/libpthread/include/fd.h create mode 100644 lib/libpthread/include/fd_pipe.h create mode 100644 lib/libpthread/include/kernel.h create mode 100644 lib/libpthread/include/mutex.h create mode 100644 lib/libpthread/include/posix.h create mode 100644 lib/libpthread/include/pthread.h create mode 100644 lib/libpthread/include/pthread_attr.h create mode 100644 lib/libpthread/include/queue.h create mode 100644 lib/libpthread/include/util.h create mode 100644 lib/libpthread/pthreads/Makefile.inc create mode 100644 lib/libpthread/pthreads/cond.c create mode 100644 lib/libpthread/pthreads/fd.c create mode 100644 lib/libpthread/pthreads/fd_kern.c create mode 100644 lib/libpthread/pthreads/fd_pipe.c create mode 100644 lib/libpthread/pthreads/file.c create mode 100644 lib/libpthread/pthreads/globals.c create mode 100644 lib/libpthread/pthreads/malloc.c create mode 100644 lib/libpthread/pthreads/mutex.c create mode 100644 lib/libpthread/pthreads/pthread.c create mode 100644 lib/libpthread/pthreads/pthread_attr.c create mode 100644 lib/libpthread/pthreads/queue.c create mode 100644 lib/libpthread/pthreads/signal.c diff --git a/lib/Makefile b/lib/Makefile index 0605427ceb..d1681d19b2 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -1,6 +1,6 @@ # @(#)Makefile 5.25.1.1 (Berkeley) 5/7/91 -SUBDIR= csu.${MACHINE} libc libcurses libm \ +SUBDIR= csu.${MACHINE} libc libcurses libm libpthread \ libresolv librpcsvc libtelnet libterm libutil liby .if exists(libcrypt) diff --git a/lib/libpthread/Makefile b/lib/libpthread/Makefile new file mode 100644 index 0000000000..a6a599f863 --- /dev/null +++ b/lib/libpthread/Makefile @@ -0,0 +1,21 @@ +# from: @(#)Makefile 5.2 (Berkeley) 3/5/91 + +LIB=pthread +INSTALL_PIC_ARCHIVE= no +CPPFLAGS+= -g -DPTHREAD_KERNEL -I. -I${.CURDIR}/include -I${.CURDIR}/arch/${MACHINE} +CFLAGS+= ${CPPFLAGS} + +.include "${.CURDIR}/arch/${MACHINE}/Makefile.inc" +.include "${.CURDIR}/pthreads/Makefile.inc" + +all beforedepend: pthread + +CLEANFILES+=pthread + +pthread: + if [ ! -e pthread ]; then; \ + ln -s ${.CURDIR}/include pthread; \ + fi + +.include + diff --git a/lib/libpthread/README b/lib/libpthread/README new file mode 100644 index 0000000000..7d22223b3f --- /dev/null +++ b/lib/libpthread/README @@ -0,0 +1,86 @@ +This pthread package is/will be based on the POSIX1003.4a Draft 7 pthread +standard, and Frank Mullers paper on signal handelling presented +at the Winter 93 USENIX conference. + +It is currently being designed and written by me, Chris Provenzano. +All bug, comments, and questions can be sent me at either +proven@athena.mit.edu or proven@sun-lamp.cs.berkeley.edu +Please don't send questions, bugs or patches to any of the FreeBSD mailing lists. + +Thanks goes to John Carr jfc@mit.edu for porting this to the IBM/RT, +and for his bug reports and fixes. + +PORTING +One of the goals of this user space implementation of pthreads is that it +be portable. I have minimized the ammount of assembler code necessary, +but some is. + +If you want to port it to another platform here are a few basic hints. + +There are currently three files you'll have to creat for your +architecture, machdep.h, machdep.c and syscall.S. +The first two are necessary to get the context switch section of +the pthread package running, the third is for all the syscalls. + +To do an initial port, create an appropriate machdep.h, and machdep.c +and define PTHREAD_INITIAL_PORT in the Makefile + +Comment out references to the stdio package. + +INCLUDE FILES AND PORTING +To continue to make this package portable, some basic rules on includes +files must be followed. + +pthread.h should be included first (if it is to be included). +machdep.h should define size_t if the system doesn't define it already + +stdio.h should not be included. It is included in pthread.h. + +posix.h should be included last. This file is used to correct non +POSIX features, after everything else has been defined. + +INTERNAL LOCKING +To prevent deadlocks the following rules were used for locks. + +1. Local locks for mutex queues and other like things are only locked + by running threads, at NO time will a local lock be held by + a thread in a non running state. +2. Only threads that are in a run state can attempt to lock another thread, + this way, we can assume that the lock will be released shortly, and don't + have to unlock the local lock. +3. The only time a thread will have a pthread->lock and is not in a run + state is when it is in the reschedule routine. +4. The reschedule routine assumes all local locks have been released, + there is a lock on the currently running thread (pthread_run), + and that this thread is being rescheduled to a non running state. + It is safe to unlock the currently running threads lock after it + has been rescheduled. +5. The reschedule routine locks the kernel, sets the state of the currently + running thread, unlocks the currently running thread, calls the + context switch routines. +6 the kernel lock is used only ... + + +7. The order of locking is ... + +1 local locks +2 pthread->lock /* Assumes it will get it soon */ +3 pthread_run->lock /* Assumes it will get it soon, but must release 2 */ +4 kernel lock /* Currently assumes it will ALWAYS get it. */ + +8. The kernel lock will be changed to a spin lock for systems that +already support kernel threads, this way we can mutiplex threads onto +kernel threads. +9. There are points where the kernel is locked and it needs to get +either a local lock or a pthread lock, if at these points the code +fails to get the lock the kernel gives up and sets a flag which will +be checked at a later point. +10. Interrupts are dissabled while the kernel is locked, the interrupt +mask must be checked afterwards or cleared in some way, after interrputs +have been reenabled, this allows back to back interrupts, but should always +avoid missing one. + +Copyright (c) 1993 Chris Provenzano. All rights reserved. + +This product includes software developed by the Univeristy of California, +Berkeley and its contributors. diff --git a/lib/libpthread/arch/i386/Makefile.inc b/lib/libpthread/arch/i386/Makefile.inc new file mode 100644 index 0000000000..925a351a53 --- /dev/null +++ b/lib/libpthread/arch/i386/Makefile.inc @@ -0,0 +1,18 @@ +# Machine dependent sources +.PATH: ${.CURDIR}/arch/${MACHINE} + +SRCS+= machdep.c syscall.S + +# Necessary until someone puts gas-2.1.1 into the release. +syscall.o: syscall.S + cpp ${CPPFLAGS} ${.CURDIR}/arch/${MACHINE}/syscall.S | as + mv a.out syscall.o + +syscall.so: syscall.S + cpp -DPIC ${CPPFLAGS} ${.CURDIR}/arch/${MACHINE}/syscall.S | as + mv a.out syscall.so + +syscall.po: syscall.S + cpp -DPROFILED ${CPPFLAGS} ${.CURDIR}/arch/${MACHINE}/syscall.S | as + mv a.out syscall.po + diff --git a/lib/libpthread/arch/i386/machdep.c b/lib/libpthread/arch/i386/machdep.c new file mode 100644 index 0000000000..71779a78bd --- /dev/null +++ b/lib/libpthread/arch/i386/machdep.c @@ -0,0 +1,103 @@ +/* ==== machdep.c ============================================================ + * Copyright (c) 1993 Chris Provenzano, proven@athena.mit.edu + * + * Description : Machine dependent functions for FreeBSD on i386 + * + * 1.00 93/08/04 proven + * -Started coding this file. + */ + +#include +#include "pthread.h" + +/* ========================================================================== + * machdep_save_state() + */ +int machdep_save_state(void) +{ + return(_setjmp(pthread_run->machdep_data.machdep_state)); +} + +/* ========================================================================== + * machdep_restore_state() + */ +void machdep_restore_state(void) +{ + _longjmp(pthread_run->machdep_data.machdep_state, 1); +} + +/* ========================================================================== + * machdep_set_thread_timer() + */ +void machdep_set_thread_timer(struct machdep_pthread *machdep_pthread) +{ + if (setitimer(ITIMER_VIRTUAL, &(machdep_pthread->machdep_timer), NULL)) { + PANIC(); + } +} + +/* ========================================================================== + * machdep_unset_thread_timer() + */ +void machdep_unset_thread_timer(struct machdep_pthread *machdep_pthread) +{ + struct itimerval zeroval = { { 0, 0 }, { 0, 0} }; + + if (setitimer(ITIMER_VIRTUAL, &zeroval, NULL)) { + PANIC(); + } +} + +/* ========================================================================== + * machdep_pthread_cleanup() + */ +void *machdep_pthread_cleanup(struct machdep_pthread *machdep_pthread) +{ + return(machdep_pthread->machdep_stack); +} + +/* ========================================================================== + * machdep_pthread_start() + */ +void machdep_pthread_start(void) +{ + context_switch_done(); + sig_check_and_resume(); + + /* Run current threads start routine with argument */ + pthread_exit(pthread_run->machdep_data.start_routine + (pthread_run->machdep_data.start_argument)); + + /* should never reach here */ + PANIC(); +} + +/* ========================================================================== + * machdep_pthread_create() + */ +void machdep_pthread_create(struct machdep_pthread *machdep_pthread, + void *(* start_routine)(), void *start_argument, long stack_size, + void *stack_start, long nsec) +{ + machdep_pthread->machdep_stack = stack_start; + + machdep_pthread->start_routine = start_routine; + machdep_pthread->start_argument = start_argument; + + machdep_pthread->machdep_timer.it_value.tv_sec = 0; + machdep_pthread->machdep_timer.it_interval.tv_sec = 0; + machdep_pthread->machdep_timer.it_interval.tv_usec = 0; + machdep_pthread->machdep_timer.it_value.tv_usec = nsec / 1000; + + _setjmp(machdep_pthread->machdep_state); + /* + * Set up new stact frame so that it looks like it + * returned from a longjmp() to the beginning of + * machdep_pthread_start(). + */ + machdep_pthread->machdep_state[0] = (int)machdep_pthread_start; + + /* Stack starts high and builds down. */ + machdep_pthread->machdep_state[2] = + (int)machdep_pthread->machdep_stack + stack_size; +} diff --git a/lib/libpthread/arch/i386/machdep.h b/lib/libpthread/arch/i386/machdep.h new file mode 100644 index 0000000000..80b5df5b81 --- /dev/null +++ b/lib/libpthread/arch/i386/machdep.h @@ -0,0 +1,31 @@ +/* ==== machdep.h ============================================================ + * Copyright (c) 1993 Chris Provenzano, proven@athena.mit.edu + * + */ + +#include + +/* + * The first machine dependent functions are the SEMAPHORES + * needing the test and set instruction. + */ +#define SEMAPHORE_CLEAR 0 +#define SEMAPHORE_SET 1 + +#define SEMAPHORE_TEST_AND_SET(lock) \ +({ \ +volatile long temp = SEMAPHORE_SET; \ + \ +__asm__("xchgl %0,(%2)" \ + :"=r" (temp) \ + :"0" (temp),"r" (lock)); \ +temp; \ +}) + +#define SEMAPHORE_RESET(lock) *lock = SEMAPHORE_CLEAR + +/* + * Minimum stack size + */ +#define PTHREAD_STACK_MIN 1024 + diff --git a/lib/libpthread/arch/i386/syscall.S b/lib/libpthread/arch/i386/syscall.S new file mode 100644 index 0000000000..141753e104 --- /dev/null +++ b/lib/libpthread/arch/i386/syscall.S @@ -0,0 +1,150 @@ +/* ==== syscall.S ============================================================ + * Copyright (c) 1990 The Regents of the University of California. + * Copyright (c) 1993 Chris Provenzano, proven@mit.edu + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Description : Machine dependent syscalls for i386/i486/i586 + * + * 1.00 93/08/26 proven + * -Started coding this file. + * + * 1.01 93/11/13 proven + * -The functions readv() and writev() added. + */ + +#if defined(SYSLIBC_SCCS) && !defined(lint) + .asciz "@(#)syscall.s 5.1 (Berkeley) 4/23/90" +#endif /* SYSLIBC_SCCS and not lint */ + +#include +#include +#include + + +#define SYSCALL(x) \ + .globl _machdep_sys_/**/x; \ + \ +_machdep_sys_/**/x:; \ + \ + lea SYS_/**/x, %eax; \ + .byte 0x9a; .long 0; .word 7; \ + jb 1b; \ + ret; + +/* + * Initial asm stuff for all functions. + */ + .text + .align 2 + + +/* ========================================================================== + * error code for all syscalls. The error value is returned as the negative + * of the errno value. + */ + +1: + neg %eax + ret + +/* ========================================================================== + * machdep_sys_write() + */ +SYSCALL(write) + +/* ========================================================================== + * machdep_sys_read() + */ +SYSCALL(read) + +/* ========================================================================== + * machdep_sys_open() + */ +SYSCALL(open) + +/* ========================================================================== + * machdep_sys_close() + */ +SYSCALL(close) + +/* ========================================================================== + * machdep_sys_fcntl() + */ +SYSCALL(fcntl) + +/* ======================================================================= */ +/* ========================================================================== + * machdep_sys_socket() + */ +SYSCALL(socket) + +/* ========================================================================== + * machdep_sys_bind() + */ +SYSCALL(bind) + +/* ========================================================================== + * machdep_sys_connect() + */ +SYSCALL(connect) + +/* ========================================================================== + * machdep_sys_accept() + */ +SYSCALL(accept) + +/* ========================================================================== + * machdep_sys_listen() + */ +SYSCALL(listen) + +/* ========================================================================== + * machdep_sys_getsockopt() + */ +SYSCALL(getsockopt) + +/* ========================================================================== + * machdep_sys_readv() + */ +SYSCALL(readv) + +/* ========================================================================== + * machdep_sys_writev() + */ +SYSCALL(writev) + +/* ========================================================================== + * machdep_sys_getpeername() + */ +SYSCALL(getpeername) diff --git a/lib/libpthread/include/Makefile.inc b/lib/libpthread/include/Makefile.inc new file mode 100644 index 0000000000..3317d56515 --- /dev/null +++ b/lib/libpthread/include/Makefile.inc @@ -0,0 +1,25 @@ +# from: @(#)Makefile 5.45.1.1 (Berkeley) 5/6/91 + +# Doing a make install builds /usr/include/pthread +# +# The ``rm -rf''s used below are safe because rm doesn't follow symbolic +# links. + + +FILES= cond.h copyright.h fd.h fd_pipe.h kernel.h mutex.h posix.h \ + pthread.h pthread_attr.h queue.h util.h engine.h + +realinstall: + if [ ! -d ${DESTDIR}/usr/include/pthread ]; then \ + mkdir ${DESTDIR}/usr/include/pthread; \ + fi + @echo installing ${FILES} + @-for i in ${FILES}; do \ + cmp -s $$i ${DESTDIR}/usr/include/pthread/$$i || \ + install -c -m 644 $$i ${DESTDIR}/usr/include/$$i; \ + done + rm -rf ${DESTDIR}/usr/include/pthread.h + ln -s /usr/include/pthread/pthread.h ${DESTDIR}/usr/include/pthread.h + @chown -R ${BINOWN}:${BINGRP} ${DESTDIR}/usr/include/pthread + @chmod -R a-w ${DESTDIR}/usr/include/pthread + diff --git a/lib/libpthread/include/cond.h b/lib/libpthread/include/cond.h new file mode 100644 index 0000000000..f267b91444 --- /dev/null +++ b/lib/libpthread/include/cond.h @@ -0,0 +1,61 @@ +/* ==== cond.h ============================================================ + * Copyright (c) 1993 by Chris Provenzano, proven@athena.mit.edu + * + * Description : mutex header. + * + * 1.00 93/10/30 proven + * -Started coding this file. + */ + +/* + * New cond structures + */ +enum pthread_cond_type { + COND_TYPE_FAST, + COND_TYPE_STATIC_FAST, + COND_TYPE_METERED, + COND_TYPE_DEBUG, /* Debug conds will have lots of options */ + COND_TYPE_MAX +}; + +typedef struct pthread_cond { + enum pthread_cond_type c_type; + struct pthread_queue c_queue; + semaphore c_lock; + void * c_data; + long c_flags; +} pthread_cond_t; + +typedef struct pthread_cond_attr { + enum pthread_cond_type c_type; + long c_flags; +} pthread_condattr_t; + +/* + * Flags for conds. + */ +#define COND_FLAGS_PRIVATE 0x01 +#define COND_FLAGS_INITED 0x02 +#define COND_FLAGS_BUSY 0x04 + +/* + * Static cond initialization values. + */ +#define PTHREAD_COND_INITIALIZER \ +{ COND_TYPE_STATIC_FAST, PTHREAD_QUEUE_INITIALIZER, \ + NULL, SEMAPHORE_CLEAR, COND_FLAGS_INITED } + +/* + * New functions + */ + +__BEGIN_DECLS + +int pthread_cond_init __P((pthread_cond_t *, pthread_condattr_t *)); +int pthread_cond_wait __P((pthread_cond_t *, pthread_mutex_t *)); +int pthread_cond_signal __P((pthread_cond_t *)); +int pthread_cond_broadcast __P((pthread_cond_t *)); +int pthread_cond_destroy __P((pthread_cond_t *)); + +__END_DECLS + diff --git a/lib/libpthread/include/copyright.h b/lib/libpthread/include/copyright.h new file mode 100644 index 0000000000..887676b888 --- /dev/null +++ b/lib/libpthread/include/copyright.h @@ -0,0 +1,32 @@ +/* ==== copyright.h ========================================================== + * Copyright (c) 1993 by Chris Provenzano and contributors, proven@mit.edu + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Chris Provenzano, + * the University of California, Berkeley, and contributors. + * 4. Neither the name of Chris Provenzano, the University, nor the names of + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO, THE REGENTS OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/lib/libpthread/include/engine.h b/lib/libpthread/include/engine.h new file mode 100644 index 0000000000..a32a4a60e6 --- /dev/null +++ b/lib/libpthread/include/engine.h @@ -0,0 +1,59 @@ +/* ==== engine.h ============================================================ + * Copyright (c) 1993 Chris Provenzano, proven@athena.mit.edu + * + */ + +#include +#include +#include +#include + +#if defined(PTHREAD_KERNEL) +#include "machdep.h" +#endif + +/* + * New types + */ +typedef long semaphore; + +#define SIGMAX 31 + +/* + * New Strutures + */ +struct machdep_pthread { + void *(*start_routine)(void *); + void *start_argument; + void *machdep_stack; + struct itimerval machdep_timer; + jmp_buf machdep_state; +}; + +/* + * Static machdep_pthread initialization values. + * For initial thread only. + */ +#define MACHDEP_PTHREAD_INIT \ +{ NULL, NULL, NULL, { { 0, 0 }, { 0, 0 } }, 0 } + +/* + * Some fd flag defines that are necessary to distinguish between posix + * behavior and bsd4.3 behavior. + */ +#define __FD_NONBLOCK O_NONBLOCK + +/* + * New functions + */ + +__BEGIN_DECLS + +#if defined(PTHREAD_KERNEL) + +int semaphore_text_and_set __P((semaphore *)); +int machdep_save_state __P((void)); + +#endif + +__END_DECLS diff --git a/lib/libpthread/include/fd.h b/lib/libpthread/include/fd.h new file mode 100644 index 0000000000..e4dd63be57 --- /dev/null +++ b/lib/libpthread/include/fd.h @@ -0,0 +1,76 @@ +/* ==== fd.h ============================================================ + * Copyright (c) 1993 by Chris Provenzano, proven@athena.mit.edu + * + * Description : Basic fd header. + * + * 1.00 93/08/14 proven + * -Started coding this file. + * + * 1.01 93/11/13 proven + * -The functions readv() and writev() added + */ + +/* + * New pthread types. + */ +enum fd_type { + FD_NT, /* Not tested */ + FD_NIU, /* Known to be not in use */ + FD_HALF_DUPLEX, /* Files, and seeking devices */ + FD_FULL_DUPLEX /* pipes, sockets, drivers, ... */ +}; + + +#define FD_READ 0x1 +#define FD_WRITE 0x2 +#define FD_RDWR (FD_READ | FD_WRITE) + +struct fd_ops { + int (*write)(); + int (*read)(); + int (*close)(); + int (*fcntl)(); + + int (*writev)(); + int (*readv)(); +}; + +union fd_data { + void *ptr; + int i; +}; + +struct fd_table_entry { + struct pthread_queue r_queue; + struct pthread_queue w_queue; + struct pthread *r_owner; + struct pthread *w_owner; + semaphore lock; + struct fd_table_entry *next; + struct fd_ops *ops; + enum fd_type type; + int lockcount; /* Count for FILE locks */ + int count; + + /* data that needs to be passed to the type dependent fd */ + int flags; + union fd_data fd; +}; + +/* + * Important data structure + */ +extern struct fd_table_entry *fd_table[]; +extern int dtablesize; + +/* + * New functions + */ + +__BEGIN_DECLS + +#if defined(PTHREAD_KERNEL) + +#endif + +__END_DECLS diff --git a/lib/libpthread/include/fd_pipe.h b/lib/libpthread/include/fd_pipe.h new file mode 100644 index 0000000000..1ca1453640 --- /dev/null +++ b/lib/libpthread/include/fd_pipe.h @@ -0,0 +1,24 @@ +/* ==== fd_pipe.h ============================================================ + * Copyright (c) 1993 by Chris Provenzano, proven@athena.mit.edu + * + * Description : The new fast ITC pipe header. + * + * 1.00 93/08/14 proven + * -Started coding this file. + */ + +struct __pipe { + semaphore lock; + char * buf; + int size; + int flags; + int count; + int offset; + struct pthread * wait; + char * wait_buf; + size_t wait_size; +}; + +#define RD_CLOSED 0x01 +#define WR_CLOSED 0x02 + diff --git a/lib/libpthread/include/kernel.h b/lib/libpthread/include/kernel.h new file mode 100644 index 0000000000..5659b29543 --- /dev/null +++ b/lib/libpthread/include/kernel.h @@ -0,0 +1,20 @@ +/* ==== kernel.h ============================================================ + * Copyright (c) 1993 by Chris Provenzano, proven@athena.mit.edu + * + * Description : mutex header. + * + * 1.00 93/07/22 proven + * -Started coding this file. + */ + +/* + * Defines only for the pthread user kernel. + */ +#if defined(PTHREAD_KERNEL) + +#define PANIC() abort() + +/* Time each rr thread gets */ +#define PTHREAD_RR_TIMEOUT 100000000 + +#endif diff --git a/lib/libpthread/include/mutex.h b/lib/libpthread/include/mutex.h new file mode 100644 index 0000000000..a7c729b533 --- /dev/null +++ b/lib/libpthread/include/mutex.h @@ -0,0 +1,64 @@ +/* ==== mutex.h ============================================================ + * Copyright (c) 1993 by Chris Provenzano, proven@athena.mit.edu + * + * Description : mutex header. + * + * 1.00 93/07/20 proven + * -Started coding this file. + */ + +#include +/* + * New mutex structures + */ +enum pthread_mutex_type { + MUTEX_TYPE_FAST, + MUTEX_TYPE_STATIC_FAST, + MUTEX_TYPE_RECURSIVE, + MUTEX_TYPE_METERED, + MUTEX_TYPE_DEBUG, /* Debug mutexes will have lots of options */ + MUTEX_TYPE_MAX +}; + +typedef struct pthread_mutex { + enum pthread_mutex_type m_type; + struct pthread_queue m_queue; + struct pthread *m_owner; + semaphore m_lock; + void *m_data; + long m_flags; +} pthread_mutex_t; + +typedef struct pthread_mutex_attr { + enum pthread_mutex_type m_type; + long m_flags; +} pthread_mutexattr_t; + +/* + * Flags for mutexes. + */ +#define MUTEX_FLAGS_PRIVATE 0x01 +#define MUTEX_FLAGS_INITED 0x02 +#define MUTEX_FLAGS_BUSY 0x04 + +/* + * Static mutex initialization values. + */ +#define PTHREAD_MUTEX_INITIALIZER \ +{ MUTEX_TYPE_STATIC_FAST, PTHREAD_QUEUE_INITIALIZER, \ + NULL, SEMAPHORE_CLEAR, NULL, MUTEX_FLAGS_INITED } + +/* + * New functions + */ + +__BEGIN_DECLS + +int pthread_mutex_init __P((pthread_mutex_t *, pthread_mutexattr_t *)); +int pthread_mutex_lock __P((pthread_mutex_t *)); +int pthread_mutex_unlock __P((pthread_mutex_t *)); +int pthread_mutex_trylock __P((pthread_mutex_t *)); +int pthread_mutex_destroy __P((pthread_mutex_t *)); + +__END_DECLS + diff --git a/lib/libpthread/include/posix.h b/lib/libpthread/include/posix.h new file mode 100644 index 0000000000..9c791be890 --- /dev/null +++ b/lib/libpthread/include/posix.h @@ -0,0 +1,28 @@ +/* ==== posix.h ============================================================ + * Copyright (c) 1993 by Chris Provenzano, proven@athena.mit.edu + * + * Description : Convert a system to a more or less POSIX system. + * + * 1.00 93/07/20 proven + * -Started coding this file. + */ + +#include + +#ifndef O_NONBLOCK +#ifdef FNDELAY +#define O_NONBLOCK FNDELAY +#endif +#endif + +#ifndef O_ACCMODE +#define O_ACCMODE (O_RDONLY|O_RDWR|O_WRONLY) +#endif + +#ifndef S_ISREG +#define S_ISREG(x) ((x & S_IFMT) == S_IFREG) +#endif + +#ifndef __ibm032__ +#include +#endif diff --git a/lib/libpthread/include/pthread.h b/lib/libpthread/include/pthread.h new file mode 100644 index 0000000000..a6b5e9107b --- /dev/null +++ b/lib/libpthread/include/pthread.h @@ -0,0 +1,93 @@ +/* ==== pthread.h ============================================================ + * Copyright (c) 1993 by Chris Provenzano, proven@athena.mit.edu + * + * Description : Basic pthread header. + * + * 1.00 93/07/20 proven + * -Started coding this file. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +/* #include Because I'm a moron -- proven */ +#include + +/* More includes, that need size_t or NULL */ +#include + +enum pthread_state { + PS_RUNNING, + PS_MUTEX_WAIT, + PS_COND_WAIT, + PS_FDLR_WAIT, + PS_FDLW_WAIT, + PS_FDR_WAIT, + PS_FDW_WAIT, + PS_DEAD +}; + +struct pthread { + struct machdep_pthread machdep_data; + struct pthread_queue *queue; + enum pthread_state state; + pthread_attr_t attr; + + /* + * Thread implementations are just multiple queue type implemenations, + * Below are the various link lists currently necessary + * It is possible for a thread to be on multiple, or even all the + * queues at once, much care must be taken during queue manipulation. + */ + struct pthread *pll; /* ALL threads, in any state */ + /* struct pthread *rll; /* Current run queue, before resced */ + struct pthread *next; /* Standard for mutexes, etc ... */ + struct pthread *s_next; /* For sleeping threads */ + /* struct pthread *fd_next; /* For kernel fd operations */ + + int fd; /* Used when thread waiting on fd */ + + semaphore lock; + int error; +}; + +typedef struct pthread* pthread_t; + +/* + * Globals + */ +extern struct pthread *pthread_run; +extern struct pthread *pthread_initial; +extern struct pthread *pthread_link_list; +extern pthread_attr_t pthread_default_attr; +extern struct pthread_queue pthread_current_queue; +extern struct fd_table_entry *fd_table[]; + +/* + * New functions + */ + +__BEGIN_DECLS + +int pthread_create __P((pthread_t *, const pthread_attr_t *, + void * (*start_routine)(void *), void *)); +void pthread_exit __P((void *)); +pthread_t pthread_self __P((void)); +int pthread_equal __P((pthread_t, pthread_t)); + +#if defined(PTHREAD_KERNEL) + +void pthread_yield __P((void)); + +/* Not valid, but I can't spell so this will be caught at compile time */ +#define pthread_yeild(notvalid) + +#endif + +__END_DECLS diff --git a/lib/libpthread/include/pthread_attr.h b/lib/libpthread/include/pthread_attr.h new file mode 100644 index 0000000000..7d58c52422 --- /dev/null +++ b/lib/libpthread/include/pthread_attr.h @@ -0,0 +1,45 @@ +/* ==== pthread_attr.h ======================================================== + * Copyright (c) 1993 by Chris Provenzano, proven@athena.mit.edu + * + * Description : Basic pthread attributes header. + * + * 1.00 93/11/03 proven + * -Started coding this file. + */ + +#include + +#define _POSIX_THREAD_ATTR_STACKSIZE + +#define PTHREAD_STACK_DEFAULT 65536 + +/* + * New pthread attribute types. + */ +enum pthread_sched_attr { + SCHED_RR, + SCHED_IO, + SCHED_FIFO, + SCHED_OTHER, +}; + +typedef struct pthread_attr { + enum pthread_sched_attr sched_attr; + void * stackaddr_attr; + size_t stacksize_attr; +} pthread_attr_t; + +/* + * New functions + */ + +__BEGIN_DECLS + +int pthread_attr_init __P((pthread_attr_t *)); +int pthread_attr_destroy __P((pthread_attr_t *)); +int pthread_attr_setstacksize __P((pthread_attr_t *, size_t)); +int pthread_attr_getstacksize __P((pthread_attr_t *, size_t *)); +int pthread_attr_setstackaddr __P((pthread_attr_t *, void *)); +int pthread_attr_getstackaddr __P((pthread_attr_t *, void **)); + +__END_DECLS diff --git a/lib/libpthread/include/queue.h b/lib/libpthread/include/queue.h new file mode 100644 index 0000000000..0a06d79842 --- /dev/null +++ b/lib/libpthread/include/queue.h @@ -0,0 +1,39 @@ +/* ==== queue.h ============================================================ + * Copyright (c) 1993 by Chris Provenzano, proven@athena.mit.edu + * + * Description : mutex header. + * + * 1.00 93/07/20 proven + * -Started coding this file. + */ + +#include + +/* + * New queue structures + */ +struct pthread_queue { + struct pthread *q_next; + struct pthread *q_last; + void *q_data; +}; + +/* + * Static queue initialization values. + */ +#define PTHREAD_QUEUE_INITIALIZER { NULL, NULL, NULL } + +/* + * New functions + * Should make pthread_queue_get a macro + */ + +__BEGIN_DECLS + +void pthread_queue_init __P((struct pthread_queue *)); +void pthread_queue_enq __P((struct pthread_queue *, struct pthread *)); +void pthread_queue_remove __P((struct pthread_queue *, struct pthread *)); +struct pthread *pthread_queue_get __P((struct pthread_queue *)); +struct pthread *pthread_queue_deq __P((struct pthread_queue *)); + +__END_DECLS diff --git a/lib/libpthread/include/util.h b/lib/libpthread/include/util.h new file mode 100644 index 0000000000..0da50e9df2 --- /dev/null +++ b/lib/libpthread/include/util.h @@ -0,0 +1,51 @@ +/* ==== util.h ============================================================ + * Copyright (c) 1991, 1992, 1993 by Chris Provenzano, proven@athena.mit.edu + * + * Description : Header file for generic utility functions. + * + * 91/08/31 proven - Added exchange. + * Exchange any two objects of any size in any table. + * + * 91/10/06 proven - Cleaned out all the old junk. + * + * 91/03/06 proven - Added getint. + */ + +#include + +#ifndef NULL +#define NULL 0 +#endif + +#undef FALSE +#undef TRUE + +typedef enum Boolean { + FALSE, + TRUE, +} Boolean; + +#define OK 0 +#define NUL '\0' +#define NOTOK -1 + +#if ! defined(min) +#define min(a,b) (((a)<(b))?(a):(b)) +#define max(a,b) (((a)>(b))?(a):(b)) +#endif + +/* Alingn the size to the next multiple of 4 bytes */ +#define ALIGN4(size) ((size + 3) & ~3) +#define ALIGN8(size) ((size + 7) & ~7) + +#ifdef DEBUG +#define DEBUG0(s) printf(s) +#define DEBUG1(s,a) printf(s,a) +#define DEBUG2(s,a,b) printf(s,a,b) +#define DEBUG3(s,a,b,c) printf(s,a,b,c) +#else +#define DEBUG0(s) +#define DEBUG1(s) +#define DEBUG2(s) +#define DEBUG3(s) +#endif diff --git a/lib/libpthread/pthreads/Makefile.inc b/lib/libpthread/pthreads/Makefile.inc new file mode 100644 index 0000000000..d5e43ecc75 --- /dev/null +++ b/lib/libpthread/pthreads/Makefile.inc @@ -0,0 +1,12 @@ +# from: @(#)Makefile.inc 5.6 (Berkeley) 6/4/91 + +# pthread sources +.PATH: ${.CURDIR}/pthreads + +SRCS+= cond.c fd.c fd_kern.c fd_pipe.c file.c globals.c malloc.c mutex.c \ + pthread.c pthread_attr.c queue.c signal.c + +MAN2+= + +MAN3+= + diff --git a/lib/libpthread/pthreads/cond.c b/lib/libpthread/pthreads/cond.c new file mode 100644 index 0000000000..bb1e87410a --- /dev/null +++ b/lib/libpthread/pthreads/cond.c @@ -0,0 +1,177 @@ +/* ==== cond.c ============================================================ + * Copyright (c) 1993 by Chris Provenzano, proven@athena.mit.edu + * + * Description : Condition cariable functions. + * + * 1.00 93/10/28 proven + * -Started coding this file. + */ + +#include +#include "pthread.h" +#include + +/* ========================================================================== + * pthread_cond_init() + * + * In this implementation I don't need to allocate memory. + * ENOMEM, EAGAIN should never be returned. Arch that have + * weird constraints may need special coding. + */ +int pthread_cond_init(pthread_cond_t *cond, pthread_condattr_t *cond_attr) +{ + /* Only check if attr specifies some mutex type other than fast */ + if ((cond_attr) && (cond_attr->c_type != COND_TYPE_FAST)) { + if (cond_attr->c_type >= COND_TYPE_MAX) { + return(EINVAL); + } + if (cond->c_flags & COND_FLAGS_INITED) { + return(EBUSY); + } + cond->c_type = cond_attr->c_type; + } else { + cond->c_type = COND_TYPE_FAST; + } + /* Set all other paramaters */ + pthread_queue_init(&cond->c_queue); + cond->c_flags |= COND_FLAGS_INITED; + cond->c_lock = SEMAPHORE_CLEAR; + return(OK); +} + +/* ========================================================================== + * pthread_cond_destroy() + */ +int pthread_cond_destroy(pthread_cond_t *cond) +{ + /* Only check if cond is of type other than fast */ + switch(cond->c_type) { + case COND_TYPE_FAST: + break; + case COND_TYPE_STATIC_FAST: + default: + return(EINVAL); + break; + } + + /* Cleanup cond, others might want to use it. */ + pthread_queue_init(&cond->c_queue); + cond->c_flags |= COND_FLAGS_INITED; + cond->c_lock = SEMAPHORE_CLEAR; + cond->c_flags = 0; + return(OK); +} + +/* ========================================================================== + * pthread_cond_wait() + */ +int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) +{ + semaphore *lock, *plock; + int rval; + + lock = &(cond->c_lock); + while (SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + + switch (cond->c_type) { + /* + * Fast condition variables do not check for any error conditions. + */ + case COND_TYPE_FAST: + case COND_TYPE_STATIC_FAST: + plock = &(pthread_run->lock); + while (SEMAPHORE_TEST_AND_SET(plock)) { + pthread_yield(); + } + pthread_queue_enq(&cond->c_queue, pthread_run); + pthread_mutex_unlock(mutex); + SEMAPHORE_RESET(lock); + + /* Reschedule will unlock pthread_run */ + reschedule(PS_COND_WAIT); + + return(pthread_mutex_lock(mutex)); + break; + default: + rval = EINVAL; + break; + } + SEMAPHORE_RESET(lock); + return(rval); +} + +/* ========================================================================== + * pthread_cond_signal() + */ +int pthread_cond_signal(pthread_cond_t *cond) +{ + struct pthread *pthread; + semaphore *lock, *plock; + int rval; + + lock = &(cond->c_lock); + while (SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + + switch (cond->c_type) { + case COND_TYPE_FAST: + case COND_TYPE_STATIC_FAST: + if (pthread = pthread_queue_get(&cond->c_queue)) { + plock = &(pthread->lock); + while (SEMAPHORE_TEST_AND_SET(plock)) { + pthread_yield(); + } + pthread_queue_deq(&cond->c_queue); + pthread->state = PS_RUNNING; + SEMAPHORE_RESET(plock); + } + rval = OK; + break; + default: + rval = EINVAL; + break; + } + SEMAPHORE_RESET(lock); + return(rval); +} + +/* ========================================================================== + * pthread_cond_broadcast() + * + * Not much different then the above routine. + */ +int pthread_cond_broadcast(pthread_cond_t *cond) +{ + struct pthread *pthread; + semaphore *lock, *plock; + int rval; + + lock = &(cond->c_lock); + while (SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + + switch (cond->c_type) { + case COND_TYPE_FAST: + case COND_TYPE_STATIC_FAST: + while (pthread = pthread_queue_get(&cond->c_queue)) { + plock = &(pthread->lock); + while (SEMAPHORE_TEST_AND_SET(plock)) { + pthread_yield(); + } + pthread_queue_deq(&cond->c_queue); + pthread->state = PS_RUNNING; + SEMAPHORE_RESET(plock); + } + rval = OK; + break; + default: + rval = EINVAL; + break; + } + SEMAPHORE_RESET(lock); + return(rval); +} diff --git a/lib/libpthread/pthreads/fd.c b/lib/libpthread/pthreads/fd.c new file mode 100644 index 0000000000..bff477a7f2 --- /dev/null +++ b/lib/libpthread/pthreads/fd.c @@ -0,0 +1,482 @@ +/* ==== fd.c ============================================================ + * Copyright (c) 1993 by Chris Provenzano, proven@athena.mit.edu + * + * Description : All the syscalls dealing with fds. + * + * 1.00 93/08/14 proven + * -Started coding this file. + * + * 1.01 93/11/13 proven + * -The functions readv() and writev() added. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * These first functions really should not be called by the user. + * + * I really should dynamically figure out what the table size is. + */ +int dtablesize = 64; +static struct fd_table_entry fd_entry[64]; + +/* ========================================================================== + * fd_init() + */ +void fd_init(void) +{ + int i; + + for (i = 0; i < dtablesize; i++) { + fd_table[i] = &fd_entry[i]; + + fd_table[i]->ops = NULL; + fd_table[i]->type = FD_NT; + fd_table[i]->fd.i = NOTOK; + fd_table[i]->flags = 0; + fd_table[i]->count = 0; + + pthread_queue_init(&(fd_table[i]->r_queue)); + pthread_queue_init(&(fd_table[i]->w_queue)); + + fd_table[i]->r_owner = NULL; + fd_table[i]->w_owner = NULL; + fd_table[i]->lock = SEMAPHORE_CLEAR; + fd_table[i]->next = NULL; + fd_table[i]->lockcount = 0; + } + + /* Currently only initialize first 3 fds. */ + fd_kern_init(0); + fd_kern_init(1); + fd_kern_init(2); +} + +/* ========================================================================== + * fd_allocate() + */ +int fd_allocate() +{ + semaphore *lock; + int i; + + for (i = 0; i < dtablesize; i++) { + lock = &(fd_table[i]->lock); + while (SEMAPHORE_TEST_AND_SET(lock)) { + continue; + } + if (fd_table[i]->count || fd_table[i]->r_owner + || fd_table[i]->w_owner) { + SEMAPHORE_RESET(lock); + continue; + } + if (fd_table[i]->type == FD_NT) { + /* Test to see if the kernel version is in use */ + /* If so continue; */ + } + fd_table[i]->count++; + SEMAPHORE_RESET(lock); + return(i); + } + pthread_run->error = ENFILE; + return(NOTOK); +} + +/* ========================================================================== + * fd_free() + * + * Assumes fd is locked and owner by pthread_run + * Don't clear the queues, fd_unlock will do that. + */ +int fd_free(int fd) +{ + struct fd_table_entry *fd_valid; + int ret; + + if (ret = --fd_table[fd]->count) { + /* Separate pthread queue into two distinct queues. */ + fd_valid = fd_table[fd]; + fd_table[fd] = fd_table[fd]->next; + fd_valid->next = fd_table[fd]->next; + } + + fd_table[fd]->type = FD_NIU; + fd_table[fd]->fd.i = NOTOK; + fd_table[fd]->next = NULL; + fd_table[fd]->flags = 0; + fd_table[fd]->count = 0; + return(ret); +} + +/* ========================================================================== + * fd_basic_unlock() + * + * The real work of unlock without the locking of fd_table[fd].lock. + */ +void fd_basic_unlock(int fd, int lock_type) +{ + struct pthread *pthread; + semaphore *plock; + + if (fd_table[fd]->r_owner == pthread_run) { + if (pthread = pthread_queue_get(&fd_table[fd]->r_queue)) { + + plock = &(pthread->lock); + while (SEMAPHORE_TEST_AND_SET(plock)) { + pthread_yield(); + } + pthread_queue_deq(&fd_table[fd]->r_queue); + fd_table[fd]->r_owner = pthread; + pthread->state = PS_RUNNING; + SEMAPHORE_RESET(plock); + } else { + fd_table[fd]->r_owner = NULL; + } + } + + if (fd_table[fd]->w_owner == pthread_run) { + if (pthread = pthread_queue_get(&fd_table[fd]->w_queue)) { + plock = &(pthread->lock); + while (SEMAPHORE_TEST_AND_SET(plock)) { + pthread_yield(); + } + pthread_queue_deq(&fd_table[fd]->r_queue); + fd_table[fd]->w_owner = pthread; + pthread->state = PS_RUNNING; + SEMAPHORE_RESET(plock); + } else { + fd_table[fd]->w_owner = NULL; + } + } +} + +/* ========================================================================== + * fd_unlock() + * If there is a lock count then the function fileunlock will do + * the unlocking, just return. + */ +void fd_unlock(int fd, int lock_type) +{ + semaphore *lock; + + if (!(fd_table[fd]->lockcount)) { + lock = &(fd_table[fd]->lock); + while (SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + fd_basic_unlock(fd, lock_type); + SEMAPHORE_RESET(lock); + } +} + +/* ========================================================================== + * fd_basic_lock() + * + * The real work of lock without the locking of fd_table[fd].lock. + * Be sure to leave the lock the same way you found it. i.e. locked. + */ +int fd_basic_lock(unsigned int fd, int lock_type, semaphore * lock) +{ + semaphore *plock; + + /* If not in use return EBADF error */ + if (fd_table[fd]->type == FD_NIU) { + return(NOTOK); + } + + /* If not tested, test it and see if it is valid */ + if (fd_table[fd]->type == FD_NT) { + /* If not ok return EBADF error */ + if (fd_kern_init(fd) != OK) { + return(NOTOK); + } + } + if ((fd_table[fd]->type == FD_HALF_DUPLEX) || + (lock_type & FD_READ)) { + if (fd_table[fd]->r_owner) { + if (fd_table[fd]->r_owner != pthread_run) { + plock = &(pthread_run->lock); + while (SEMAPHORE_TEST_AND_SET(plock)) { + pthread_yield(); + } + pthread_queue_enq(&fd_table[fd]->r_queue, pthread_run); + SEMAPHORE_RESET(lock); + + /* Reschedule will unlock pthread_run */ + reschedule(PS_FDLR_WAIT); + + while(SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + } else { + if (!fd_table[fd]->lockcount) { + PANIC(); + } + } + } + fd_table[fd]->r_owner = pthread_run; + } + if ((fd_table[fd]->type != FD_HALF_DUPLEX) && + (lock_type & FD_WRITE)) { + if (fd_table[fd]->w_owner) { + if (fd_table[fd]->w_owner != pthread_run) { + plock = &(pthread_run->lock); + while (SEMAPHORE_TEST_AND_SET(plock)) { + pthread_yield(); + } + pthread_queue_enq(&fd_table[fd]->w_queue, pthread_run); + SEMAPHORE_RESET(lock); + + /* Reschedule will unlock pthread_run */ + reschedule(PS_FDLW_WAIT); + + while(SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + } + } + fd_table[fd]->w_owner = pthread_run; + } + if (!fd_table[fd]->count) { + fd_basic_unlock(fd, lock_type); + return(NOTOK); + } + return(OK); +} + +/* ========================================================================== + * fd_lock() + */ +int fd_lock(unsigned int fd, int lock_type) +{ + semaphore *lock; + int error; + + if (fd < dtablesize) { + lock = &(fd_table[fd]->lock); + while (SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + error = fd_basic_lock(fd, lock_type, lock); + SEMAPHORE_RESET(lock); + return(error); + } + return(NOTOK); +} + + +/* ========================================================================== + * ======================================================================= */ + +/* ========================================================================== + * read() + */ +ssize_t read(int fd, void *buf, size_t nbytes) +{ + int ret; + + if ((ret = fd_lock(fd, FD_READ)) == OK) { + ret = fd_table[fd]->ops->read(fd_table[fd]->fd, + fd_table[fd]->flags, buf, nbytes); + fd_unlock(fd, FD_READ); + } + return(ret); +} + +/* ========================================================================== + * readv() + */ +int readv(int fd, const struct iovec *iov, int iovcnt) +{ + int ret; + + if ((ret = fd_lock(fd, FD_READ)) == OK) { + ret = fd_table[fd]->ops->readv(fd_table[fd]->fd, + fd_table[fd]->flags, iov, iovcnt); + fd_unlock(fd, FD_READ); + } + return(ret); +} + +/* ========================================================================== + * write() + */ +ssize_t write(int fd, const void *buf, size_t nbytes) +{ + int ret; + + if ((ret = fd_lock(fd, FD_WRITE)) == OK) { + ret = fd_table[fd]->ops->write(fd_table[fd]->fd, + fd_table[fd]->flags, buf, nbytes); + fd_unlock(fd, FD_WRITE); + } + return(ret); +} + +/* ========================================================================== + * writev() + */ +int writev(int fd, const struct iovec *iov, int iovcnt) +{ + int ret; + + if ((ret = fd_lock(fd, FD_WRITE)) == OK) { + ret = fd_table[fd]->ops->writev(fd_table[fd]->fd, + fd_table[fd]->flags, iov, iovcnt); + fd_unlock(fd, FD_WRITE); + } + return(ret); +} + +/* ========================================================================== + * close() + * + * The whole close procedure is a bit odd and needs a bit of a rethink. + * For now close() locks the fd, calls fd_free() which checks to see if + * there are any other fd values poinging to the same real fd. If so + * It breaks the wait queue into two sections those that are waiting on fd + * and those waiting on other fd's. Those that are waiting on fd are connected + * to the fd_table[fd] queue, and the count is set to zero, (BUT THE LOCK IS NOT + * RELEASED). close() then calls fd_unlock which give the fd to the next queued + * element which determins that the fd is closed and then calls fd_unlock etc... + */ +int close(int fd) +{ + union fd_data realfd; + int ret, flags; + + if ((ret = fd_lock(fd, FD_RDWR)) == OK) { + flags = fd_table[fd]->flags; + realfd = fd_table[fd]->fd; + if (fd_free(fd) == OK) { + ret = fd_table[fd]->ops->close(realfd, flags); + } + fd_unlock(fd, FD_RDWR); + } + return(ret); +} + +/* ========================================================================== + * fd_basic_dup() + * + * Might need to do more than just what's below. + */ +static inline void fd_basic_dup(int fd, int newfd) +{ + fd_table[newfd]->next = fd_table[fd]->next; + fd_table[fd]->next = fd_table[newfd]; + fd_table[fd]->count++; +} + +/* ========================================================================== + * dup2() + */ +int dup2(fd, newfd) +{ + union fd_data realfd; + semaphore *lock; + int ret, flags; + + if ((ret = fd_lock(fd, FD_RDWR)) == OK) { + /* Need to lock the newfd by hand */ + if (newfd < dtablesize) { + lock = &(fd_table[newfd]->lock); + while(SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + + /* Is it inuse */ + if (fd_basic_lock(newfd, FD_RDWR, lock) == OK) { + /* free it and check close status */ + flags = fd_table[fd]->flags; + realfd = fd_table[fd]->fd; + if (fd_free(fd) == OK) { + ret = fd_table[fd]->ops->close(realfd, flags); + } + } + fd_basic_dup(fd, newfd); + + } + fd_unlock(fd, FD_RDWR); + } + return(ret); +} + +/* ========================================================================== + * dup() + */ +int dup(int fd) +{ + int ret; + + if ((ret = fd_lock(fd, FD_RDWR)) == OK) { + ret = fd_allocate(); + fd_basic_dup(fd, ret); + fd_unlock(fd, FD_RDWR); + } + return(ret); +} + +/* ========================================================================== + * fcntl() + */ +int fcntl(int fd, int cmd, ...) +{ + int ret, realfd, flags; + struct flock *flock; + semaphore *plock; + va_list ap; + + flags = 0; + if ((ret = fd_lock(fd, FD_RDWR)) == OK) { + va_start(ap, cmd); + switch(cmd) { + case F_DUPFD: + ret = fd_allocate(); + fd_basic_dup(va_arg(ap, int), ret); + break; + case F_SETFD: + flags = va_arg(ap, int); + case F_GETFD: + ret = fd_table[fd]->ops->fcntl(fd_table[fd]->fd, + fd_table[fd]->flags, cmd, flags | __FD_NONBLOCK); + break; + case F_GETFL: + ret = fd_table[fd]->flags; + break; + case F_SETFL: + flags = va_arg(ap, int); + if ((ret = fd_table[fd]->ops->fcntl(fd_table[fd]->fd, + fd_table[fd]->flags, cmd, flags | __FD_NONBLOCK)) == OK) { + fd_table[fd]->flags = flags; + } + break; +/* case F_SETLKW: */ + /* + * Do the same as SETLK but if it fails with EACCES or EAGAIN + * block the thread and try again later, not implemented yet + */ +/* case F_SETLK: */ +/* case F_GETLK: + flock = va_arg(ap, struct flock*); + ret = fd_table[fd]->ops->fcntl(fd_table[fd]->fd, + fd_table[fd]->flags, cmd, flock); + break; */ + default: + /* Might want to make va_arg use a union */ + ret = fd_table[fd]->ops->fcntl(fd_table[fd]->fd, + fd_table[fd]->flags, cmd, va_arg(ap, void*)); + break; + } + va_end(ap); + fd_unlock(fd, FD_RDWR); + } + return(ret); +} diff --git a/lib/libpthread/pthreads/fd_kern.c b/lib/libpthread/pthreads/fd_kern.c new file mode 100644 index 0000000000..a67b229eba --- /dev/null +++ b/lib/libpthread/pthreads/fd_kern.c @@ -0,0 +1,510 @@ +/* ==== fd_kern.c ============================================================ + * Copyright (c) 1993 by Chris Provenzano, proven@athena.mit.edu + * + * Description : Deals with the valid kernel fds. + * + * 1.00 93/09/27 proven + * -Started coding this file. + * + * 1.01 93/11/13 proven + * -The functions readv() and writev() added. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* ========================================================================== + * fd_kern_poll() + * + * Called only from context_switch(). The kernel must be locked + * and interrupts must be turned of. + * + * This function uses a linked list of waiting pthreads, NOT a queue. + */ +static struct pthread *fd_wait_read, *fd_wait_write; +static semaphore fd_wait_lock = SEMAPHORE_CLEAR; +static fd_set fd_set_read, fd_set_write; +static struct timeval zerotime = { 0, 0 }; + +void fd_kern_poll(void) +{ + struct pthread **pthread; + semaphore *lock; + int count; + + /* If someone has the lock then they are in RUNNING state, just return */ + lock = &fd_wait_lock; + if (SEMAPHORE_TEST_AND_SET(lock)) { + return; + } + if (fd_wait_read || fd_wait_write) { + for (pthread = &fd_wait_read; *pthread; pthread = &((*pthread)->next)) { + FD_SET((*pthread)->fd, &fd_set_read); + } + for (pthread = &fd_wait_write; *pthread; pthread = &((*pthread)->next)) { + FD_SET((*pthread)->fd, &fd_set_write); + } + + while ((count = select(dtablesize, &fd_set_read, &fd_set_write, + NULL, &zerotime)) < OK) { + if (count = -EINTR) { + continue; + } + PANIC(); + } + + for (pthread = &fd_wait_read; count && *pthread; ) { + if (FD_ISSET((*pthread)->fd, &fd_set_read)) { + /* Get lock on thread */ + + (*pthread)->state = PS_RUNNING; + *pthread = (*pthread)->next; + count--; + continue; + } + pthread = &((*pthread)->next); + } + + for (pthread = &fd_wait_write; count && *pthread; ) { + if (FD_ISSET((*pthread)->fd, &fd_set_write)) { + semaphore *plock; + + /* Get lock on thread */ + plock = &(*pthread)->lock; + if (!(SEMAPHORE_TEST_AND_SET(plock))) { + /* Thread locked, skip it. */ + (*pthread)->state = PS_RUNNING; + *pthread = (*pthread)->next; + SEMAPHORE_RESET(plock); + } + count--; + continue; + } + pthread = &((*pthread)->next); + } + } + SEMAPHORE_RESET(lock); +} + +/* ========================================================================== + * Special Note: All operations return the errno as a negative of the errno + * listed in errno.h + * ======================================================================= */ + +/* ========================================================================== + * read() + */ +ssize_t __fd_kern_read(int fd, int flags, void *buf, size_t nbytes) +{ + semaphore *lock, *plock; + int ret; + + while ((ret = machdep_sys_read(fd, buf, nbytes)) < OK) { + if (ret == -EWOULDBLOCK) { + /* Lock queue */ + lock = &fd_wait_lock; + while (SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + + /* Lock pthread */ + plock = &(pthread_run->lock); + while (SEMAPHORE_TEST_AND_SET(plock)) { + pthread_yield(); + } + + /* queue pthread for a FDR_WAIT */ + pthread_run->next = fd_wait_read; + fd_wait_read = pthread_run; + pthread_run->fd = fd; + SEMAPHORE_RESET(lock); + reschedule(PS_FDR_WAIT); + } else { + pthread_run->error = -ret; + ret = NOTOK; + break; + } + } + return(ret); +} + +/* ========================================================================== + * readv() + */ +int __fd_kern_readv(int fd, int flags, struct iovec *iov, int iovcnt) +{ + semaphore *lock, *plock; + int ret; + + while ((ret = machdep_sys_readv(fd, iov, iovcnt)) < OK) { + if (ret == -EWOULDBLOCK) { + /* Lock queue */ + lock = &fd_wait_lock; + while (SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + + /* Lock pthread */ + plock = &(pthread_run->lock); + while (SEMAPHORE_TEST_AND_SET(plock)) { + pthread_yield(); + } + + /* queue pthread for a FDR_WAIT */ + pthread_run->next = fd_wait_read; + fd_wait_read = pthread_run; + pthread_run->fd = fd; + SEMAPHORE_RESET(lock); + reschedule(PS_FDR_WAIT); + } else { + pthread_run->error = -ret; + ret = NOTOK; + break; + } + } + return(ret); +} + +/* ========================================================================== + * write() + */ +ssize_t __fd_kern_write(int fd, int flags, const void *buf, size_t nbytes) +{ + semaphore *lock, *plock; + int ret; + + while ((ret = machdep_sys_write(fd, buf, nbytes)) < OK) { + if (pthread_run->error == -EWOULDBLOCK) { + /* Lock queue */ + lock = &fd_wait_lock; + while (SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + + /* Lock pthread */ + plock = &(pthread_run->lock); + while (SEMAPHORE_TEST_AND_SET(plock)) { + pthread_yield(); + } + + /* queue pthread for a FDW_WAIT */ + pthread_run->next = fd_wait_write; + fd_wait_write = pthread_run; + pthread_run->fd = fd; + SEMAPHORE_RESET(lock); + reschedule(PS_FDW_WAIT); + } else { + pthread_run->error = ret; + break; + } + } + return(ret); +} + +/* ========================================================================== + * writev() + */ +int __fd_kern_writev(int fd, int flags, struct iovec *iov, int iovcnt) +{ + semaphore *lock, *plock; + int ret; + + while ((ret = machdep_sys_writev(fd, iov, iovcnt)) < OK) { + if (pthread_run->error == -EWOULDBLOCK) { + /* Lock queue */ + lock = &fd_wait_lock; + while (SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + + /* Lock pthread */ + plock = &(pthread_run->lock); + while (SEMAPHORE_TEST_AND_SET(plock)) { + pthread_yield(); + } + + /* queue pthread for a FDW_WAIT */ + pthread_run->next = fd_wait_write; + fd_wait_write = pthread_run; + pthread_run->fd = fd; + SEMAPHORE_RESET(lock); + reschedule(PS_FDW_WAIT); + } else { + pthread_run->error = ret; + break; + } + } + return(ret); +} + +/* ========================================================================== + * For blocking version we really should set an interrupt + * fcntl() + */ +int __fd_kern_fcntl(int fd, int flags, int cmd, int arg) +{ + machdep_sys_fcntl(fd, cmd, arg); +} + +/* ========================================================================== + * close() + */ +int __fd_kern_close(int fd, int flags) +{ + machdep_sys_close(fd); +} + +/* + * File descriptor operations + */ +extern machdep_sys_close(); + +/* Normal file operations */ +static struct fd_ops __fd_kern_ops = { + __fd_kern_write, __fd_kern_read, __fd_kern_close, __fd_kern_fcntl, + __fd_kern_readv, __fd_kern_writev +}; + +/* NFS file opperations */ + +/* FIFO file opperations */ + +/* Device operations */ + +/* ========================================================================== + * open() + * + * Because open could potentially block opening a file from a remote + * system, we want to make sure the call will timeout. We then try and open + * the file, and stat the file to determine what operations we should + * associate with the fd. + * + * This is not done yet + * + * A reqular file on the local system needs no special treatment. + */ +int open(const char *path, int flags, ...) +{ + int fd, mode, fd_kern; + struct stat stat_buf; + va_list ap; + + /* If pthread scheduling == FIFO set a virtual timer */ + if (flags & O_CREAT) { + va_start(ap, flags); + mode = va_arg(ap, int); + va_end(ap); + } else { + mode = 0; + } + + if (!((fd = fd_allocate()) < OK)) { + fd_table[fd]->flags = flags; + flags |= __FD_NONBLOCK; + + if (!((fd_kern = machdep_sys_open(path, flags, mode)) < OK)) { + + /* fstat the file to determine what type it is */ + if (fstat(fd_kern, &stat_buf)) { +printf("error %d stating new fd %d\n", errno, fd); + } + if (S_ISREG(stat_buf.st_mode)) { + fd_table[fd]->ops = &(__fd_kern_ops); + fd_table[fd]->type = FD_HALF_DUPLEX; + } else { + fd_table[fd]->ops = &(__fd_kern_ops); + fd_table[fd]->type = FD_FULL_DUPLEX; + } + fd_table[fd]->fd.i = fd_kern; + return(fd); + } + + pthread_run->error = - fd_kern; + fd_table[fd]->count = 0; + } + return(NOTOK); +} + +/* ========================================================================== + * fd_kern_init() + * + * Assume the entry is locked before routine is invoked + * + * This may change. The problem is setting the fd to nonblocking changes + * the parents fd too, which may not be the desired result. + */ +static fd_kern_init_called = 0; +void fd_kern_init(int fd) +{ + if ((fd_table[fd]->flags = machdep_sys_fcntl(fd, F_GETFL, NULL)) >= OK) { + machdep_sys_fcntl(fd, F_SETFL, fd_table[fd]->flags | __FD_NONBLOCK); + fd_table[fd]->ops = &(__fd_kern_ops); + fd_table[fd]->type = FD_HALF_DUPLEX; + fd_table[fd]->fd.i = fd; + fd_table[fd]->count = 1; + + /* Only give one warning */ + if (!(fd_kern_init_called++)) { + printf("Warning: threaded process may have changed open file "); + printf("descriptors of parent\n"); + } + } +} + +/* ========================================================================== + * Here are the berkeley socket functions. These are not POSIX. + * ======================================================================= */ + +/* ========================================================================== + * socket() + */ +int socket(int af, int type, int protocol) +{ + int fd, fd_kern; + + if (!((fd = fd_allocate()) < OK)) { + + if (!((fd_kern = machdep_sys_socket(af, type, protocol)) < OK)) { + machdep_sys_fcntl(fd_kern, F_SETFL, __FD_NONBLOCK); + + /* Should fstat the file to determine what type it is */ + fd_table[fd]->ops = & __fd_kern_ops; + fd_table[fd]->type = FD_FULL_DUPLEX; + fd_table[fd]->fd.i = fd_kern; + fd_table[fd]->flags = 0; + return(fd); + } + + pthread_run->error = - fd_kern; + fd_table[fd]->count = 0; + } + return(NOTOK); +} + +/* ========================================================================== + * bind() + */ +int bind(int fd, const struct sockaddr *name, int namelen) +{ + /* Not much to do in bind */ + semaphore *plock; + int ret; + + if ((ret = fd_lock(fd, FD_RDWR)) == OK) { + if ((ret = machdep_sys_bind(fd_table[fd]->fd, name, namelen)) < OK) { + pthread_run->error = - ret; + } + fd_unlock(fd, FD_RDWR); + } + return(ret); +} + +/* ========================================================================== + * connect() + */ +int connect(int fd, const struct sockaddr *name, int namelen) +{ + semaphore *lock, *plock; + struct sockaddr tmpname; + int ret, tmpnamelen; + + if ((ret = fd_lock(fd, FD_RDWR)) == OK) { + if ((ret = machdep_sys_connect(fd_table[fd]->fd, name, namelen)) < OK) { + if ((ret == -EWOULDBLOCK) || (ret == -EINPROGRESS) || + (ret == -EALREADY)) { + /* Lock queue */ + lock = &fd_wait_lock; + while (SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + + /* Lock pthread */ + plock = &(pthread_run->lock); + while (SEMAPHORE_TEST_AND_SET(plock)) { + pthread_yield(); + } + + /* queue pthread for a FDW_WAIT */ + pthread_run->next = fd_wait_write; + fd_wait_write = pthread_run; + pthread_run->fd = fd; + SEMAPHORE_RESET(lock); + reschedule(PS_FDW_WAIT); + + /* OK now lets see if it really worked */ + if (((ret = machdep_sys_getpeername(fd_table[fd]->fd, + &tmpname, &tmpnamelen)) < OK) && (ret == -ENOTCONN)) { + + /* Get the error, this function should not fail */ + machdep_sys_getsockopt(fd_table[fd]->fd, SOL_SOCKET, + SO_ERROR, &pthread_run->error, &tmpnamelen); + } + } else { + pthread_run->error = -ret; + } + } + fd_unlock(fd, FD_RDWR); + } + return(ret); +} + +/* ========================================================================== + * accept() + */ +int accept(int fd, struct sockaddr *name, int *namelen) +{ + semaphore *lock, *plock; + int ret; + + if ((ret = fd_lock(fd, FD_RDWR)) == OK) { + while ((ret = machdep_sys_accept(fd_table[fd]->fd, name, namelen)) < OK) { + if (pthread_run->error == -EWOULDBLOCK) { + /* Lock queue */ + lock = &fd_wait_lock; + while (SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + + /* Lock pthread */ + plock = &(pthread_run->lock); + while (SEMAPHORE_TEST_AND_SET(plock)) { + pthread_yield(); + } + + /* queue pthread for a FDR_WAIT */ + pthread_run->next = fd_wait_read; + fd_wait_read = pthread_run; + pthread_run->fd = fd; + SEMAPHORE_RESET(lock); + reschedule(PS_FDR_WAIT); + } else { + break; + } + } + fd_unlock(fd, FD_RDWR); + } + return(ret); +} + +/* ========================================================================== + * listen() + */ +int listen(int fd, int backlog) +{ + int ret; + + if ((ret = fd_lock(fd, FD_RDWR)) == OK) { + ret = machdep_sys_listen(fd_table[fd]->fd, backlog); + fd_unlock(fd, FD_RDWR); + } + return(ret); +} diff --git a/lib/libpthread/pthreads/fd_pipe.c b/lib/libpthread/pthreads/fd_pipe.c new file mode 100644 index 0000000000..a5524d025c --- /dev/null +++ b/lib/libpthread/pthreads/fd_pipe.c @@ -0,0 +1,237 @@ +/* ==== fd_pipe.c ============================================================ + * Copyright (c) 1993 by Chris Provenzano, proven@athena.mit.edu + * + * Description : The new fast ITC pipe routines. + * + * 1.00 93/08/14 proven + * -Started coding this file. + * + * 1.01 93/11/13 proven + * -The functions readv() and writev() added. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* ========================================================================== + * The pipe lock is never unlocked until all pthreads waiting are done with it + * read() + */ +ssize_t __pipe_read(struct __pipe *fd, int flags, void *buf, size_t nbytes) +{ + semaphore *lock, *plock; + int ret = 0; + + if (flags & O_ACCMODE) { return(NOTOK); } + + lock = &(fd->lock); + while (SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + /* If there is nothing to read, go to sleep */ + if (fd->count == 0) { + if (flags == WR_CLOSED) { + SEMAPHORE_RESET(lock); + return(0); + } /* Lock pthread */ + plock = &(pthread_run->lock); + while (SEMAPHORE_TEST_AND_SET(plock)) { + pthread_yield(); + } + + /* queue pthread for a FDR_WAIT */ + pthread_run->next = NULL; + fd->wait = pthread_run; + SEMAPHORE_RESET(lock); + reschedule(PS_FDR_WAIT); + ret = fd->size; + } else { + ret = MIN(nbytes, fd->count); + memcpy(buf, fd->buf + fd->offset, ret); + if (!(fd->count -= ret)) { + fd->offset = 0; + } + + /* Should try to read more from the waiting writer */ + + if (fd->wait) { + plock = &(fd->wait->lock); + while (SEMAPHORE_TEST_AND_SET(plock)) { + pthread_yield(); + } + fd->wait->state = PS_RUNNING; + SEMAPHORE_RESET(plock); + } else { + SEMAPHORE_RESET(lock); + } + } + return(ret); +} + +/* ========================================================================== + * __pipe_write() + * + * First check to see if the read side is still open, then + * check to see if there is a thread in a read wait for this pipe, if so + * copy as much data as possible directly into the read waiting threads + * buffer. The write thread(whether or not there was a read thread) + * copies as much data as it can into the pipe buffer and it there + * is still data it goes to sleep. + */ +ssize_t __pipe_write(struct __pipe *fd, int flags, const void *buf, size_t nbytes) { + semaphore *lock, *plock; + int ret, count; + + if (!(flags & O_ACCMODE)) { return(NOTOK); } + + lock = &(fd->lock); + while (SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + while (fd->flags != RD_CLOSED) { + if (fd->wait) { + /* Lock pthread */ + plock = &(fd->wait->lock); + while (SEMAPHORE_TEST_AND_SET(plock)) { + pthread_yield(); + } + + /* Copy data directly into waiting pthreads buf */ + fd->wait_size = MIN(nbytes, fd->wait_size); + memcpy(fd->wait_buf, buf, fd->wait_size); + buf = (const char *)buf + fd->wait_size; + nbytes -= fd->wait_size; + ret = fd->wait_size; + + /* Wake up waiting pthread */ + fd->wait->state = PS_RUNNING; + SEMAPHORE_RESET(plock); + fd->wait = NULL; + } + + if (count = MIN(nbytes, fd->size - (fd->offset + fd->count))) { + memcpy(fd->buf + (fd->offset + fd->count), buf, count); + buf = (const char *)buf + count; + nbytes -= count; + ret += count; + } + if (nbytes) { + /* Lock pthread */ + plock = &(fd->wait->lock); + while (SEMAPHORE_TEST_AND_SET(plock)) { + pthread_yield(); + } + + fd->wait = pthread_run; + SEMAPHORE_RESET(lock); + reschedule(PS_FDW_WAIT); + } else { + return(ret); + } + } + return(NOTOK); +} + +/* ========================================================================== + * __pipe_close() + * + * The whole close procedure is a bit odd and needs a bit of a rethink. + * For now close() locks the fd, calls fd_free() which checks to see if + * there are any other fd values poinging to the same real fd. If so + * It breaks the wait queue into two sections those that are waiting on fd + * and those waiting on other fd's. Those that are waiting on fd are connected + * to the fd_table[fd] queue, and the count is set to zero, (BUT THE LOCK IS NOT + * RELEASED). close() then calls fd_unlock which give the fd to the next queued + * element which determins that the fd is closed and then calls fd_unlock etc... + */ +int __pipe_close(struct __pipe *fd, int flags) +{ + semaphore *lock, *plock; + + lock = &(fd->lock); + while (SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + if (!(fd->flags)) { + if (fd->wait) { + if (flags & O_ACCMODE) { + fd->flags |= WR_CLOSED; + /* Lock pthread */ + /* Write side closed, wake read side and return EOF */ + plock = &((fd->wait)->lock); + while (SEMAPHORE_TEST_AND_SET(plock)) { + pthread_yield(); + } + + fd->count = 0; + + /* Wake up waiting pthread */ + fd->wait->state = PS_RUNNING; + SEMAPHORE_RESET(plock); + fd->wait = NULL; + } else { + /* Should send a signal */ + fd->flags |= RD_CLOSED; + } + } + } else { + free(fd); + return(OK); + } + SEMAPHORE_RESET(lock); +} + +/* ========================================================================== + * For those function that aren't implemented yet + * __pipe_enosys() + */ +static int __pipe_enosys() +{ + pthread_run->error = ENOSYS; + return(NOTOK); +} + +/* + * File descriptor operations + */ +struct fd_ops fd_ops[] = { +{ NULL, NULL, }, /* Non operations */ +{ __pipe_write, __pipe_read, __pipe_close, __pipe_enosys, __pipe_enosys, + __pipe_enosys }, +}; + +/* ========================================================================== + * open() + */ +/* int __pipe_open(const char *path, int flags, ...) */ +int newpipe(int fd[2]) +{ + struct __pipe *fd_data; + + if ((!((fd[0] = fd_allocate()) < OK)) && (!((fd[1] = fd_allocate()) < OK))) { + fd_data = malloc(sizeof(struct __pipe)); + fd_data->buf = malloc(4096); + fd_data->size = 4096; + fd_data->count = 0; + fd_data->offset = 0; + + fd_data->wait = NULL; + fd_data->flags = 0; + + fd_table[fd[0]]->fd.ptr = fd_data; + fd_table[fd[0]]->flags = O_RDONLY; + fd_table[fd[1]]->fd.ptr = fd_data; + fd_table[fd[1]]->flags = O_WRONLY; + + return(OK); + } + return(NOTOK); +} + diff --git a/lib/libpthread/pthreads/file.c b/lib/libpthread/pthreads/file.c new file mode 100644 index 0000000000..2221a851a4 --- /dev/null +++ b/lib/libpthread/pthreads/file.c @@ -0,0 +1,56 @@ +/* ==== file.c ============================================================ + * Copyright (c) 1993 by Chris Provenzano, proven@athena.mit.edu + * + * Description : All the new stdio functions. + * + * 1.00 93/09/04 proven + * -Started coding this file. + */ + +#include +#include "pthread.h" +#include /* Remove this when the stdio library is done. */ + +/* ========================================================================== + * flockfile() + */ +void flockfile(FILE *fp) +{ + semaphore *lock; + int fd; + + fd = fileno(fp); + lock = &(fd_table[fd]->lock); + while (SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + + if (fd_table[fd]->r_owner != pthread_run) { + fd_basic_lock(fd, FD_RDWR, lock); + } + fd_table[fd]->lockcount++; + SEMAPHORE_RESET(lock); +} + +/* ========================================================================== + * funlockfile() + */ +void funlockfile(FILE *fp) +{ + semaphore *lock; + int fd; + + fd = fileno(fp); + lock = &(fd_table[fd]->lock); + while (SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + + if (fd_table[fd]->r_owner == pthread_run) { + if (--fd_table[fd]->lockcount == 0) { + fd_basic_unlock(fd, FD_RDWR); + } + } + SEMAPHORE_RESET(lock); +} + diff --git a/lib/libpthread/pthreads/globals.c b/lib/libpthread/pthreads/globals.c new file mode 100644 index 0000000000..c8a9e9df33 --- /dev/null +++ b/lib/libpthread/pthreads/globals.c @@ -0,0 +1,36 @@ +/* ==== globals.c ============================================================ + * Copyright (c) 1993 by Chris Provenzano, proven@athena.mit.edu + * + * Description : Global variables. + * + * 1.00 93/07/26 proven + * -Started coding this file. + */ + +#include +#include "pthread.h" + +/* + * Initial thread, running thread, and top of link list + * of all threads. + */ +struct pthread *pthread_run; +struct pthread *pthread_initial; +struct pthread *pthread_link_list; + +/* + * default thread attributes + */ +pthread_attr_t pthread_default_attr = { SCHED_RR, NULL, PTHREAD_STACK_DEFAULT }; + +/* + * Queue for all threads elidgeable to run this scheduling round. + */ +struct pthread_queue pthread_current_queue = PTHREAD_QUEUE_INITIALIZER; + +/* + * File table information + */ +struct fd_table_entry *fd_table[64]; + + diff --git a/lib/libpthread/pthreads/malloc.c b/lib/libpthread/pthreads/malloc.c new file mode 100644 index 0000000000..7dc09afa52 --- /dev/null +++ b/lib/libpthread/pthreads/malloc.c @@ -0,0 +1,363 @@ +/* ==== malloc.c ============================================================ + * Copyright (c) 1983 Regents of the University of California. + * Copyright (c) 1993 by Chris Provenzano, proven@mit.edu + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Description : Malloc functions. + * This is a very fast storage allocator. It allocates blocks of a small + * number of different sizes, and keeps free lists of each size. Blocks that + * don't exactly fit are passed up to the next larger size. In this + * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long. + * This is designed for use in a virtual memory environment. + * + * 0.00 82/02/21 Chris Kingsley kingsley@cit-20 + * + * 1.00 93/11/06 proven + * -Modified BSD libc malloc to be threadsafe. + * + */ + +#if defined(LIBC_SCCS) && !defined(lint) +/*static char *sccsid = "from: @(#)malloc.c 5.11 (Berkeley) 2/23/91";*/ +static char *rcsid = "$Id: malloc.c,v 1.2 1993/11/15 10:06:09 proven Exp $"; +#endif /* LIBC_SCCS and not lint */ + +#include +#include +#include +#include +#include + +/* + * The overhead on a block is at least 4 bytes. When free, this space + * contains a pointer to the next free block, and the bottom two bits must + * be zero. When in use, the first byte is set to MAGIC, and the second + * byte is the size index. The remaining bytes are for alignment. + * If range checking is enabled then a second word holds the size of the + * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC). + * The order of elements is critical: ov_magic must overlay the low order + * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern. + */ +union overhead { + union overhead *ov_next; /* when free */ + struct { + u_char ovu_magic; /* magic number */ + u_char ovu_index; /* bucket # */ +#ifdef RCHECK + u_short ovu_rmagic; /* range magic number */ + u_int ovu_size; /* actual block size */ +#endif + } ovu; +#define ov_magic ovu.ovu_magic +#define ov_index ovu.ovu_index +#define ov_rmagic ovu.ovu_rmagic +#define ov_size ovu.ovu_size +}; + +#define MAGIC 0xef /* magic # on accounting info */ +#define RMAGIC 0x5555 /* magic # on range info */ + +#ifdef RCHECK +#define RSLOP sizeof (u_short) +#else +#define RSLOP 0 +#endif + +/* + * nextf[i] is the pointer to the next free block of size 2^(i+3). The + * smallest allocatable block is 8 bytes. The overhead information + * precedes the data area returned to the user. + */ +#define NBUCKETS 30 +static union overhead *nextf[NBUCKETS]; +extern char *sbrk(); + +static int pagesz; /* page size */ +static int pagebucket; /* page size bucket */ +static semaphore malloc_lock = SEMAPHORE_CLEAR; + +#if defined(DEBUG) || defined(RCHECK) +#define ASSERT(p) if (!(p)) botch("p") +#include +static +botch(s) + char *s; +{ + fprintf(stderr, "\r\nassertion botched: %s\r\n", s); + (void) fflush(stderr); /* just in case user buffered it */ + abort(); +} +#else +#define ASSERT(p) +#endif + +/* ========================================================================== + * morecore() + * + * Allocate more memory to the indicated bucket + */ +static inline void morecore(int bucket) +{ + register union overhead *op; + register int sz; /* size of desired block */ + int amt; /* amount to allocate */ + int nblks; /* how many blocks we get */ + + /* + * sbrk_size <= 0 only for big, FLUFFY, requests (about + * 2^30 bytes on a VAX, I think) or for a negative arg. + */ + sz = 1 << (bucket + 3); +#ifdef DEBUG + ASSERT(sz > 0); +#else + if (sz <= 0) + return; +#endif + if (sz < pagesz) { + amt = pagesz; + nblks = amt / sz; + } else { + amt = sz + pagesz; + nblks = 1; + } + op = (union overhead *)sbrk(amt); + /* no more room! */ + if ((int)op == -1) + return; + /* + * Add new memory allocated to that on + * free list for this hash bucket. + */ + nextf[bucket] = op; + while (--nblks > 0) { + op->ov_next = (union overhead *)((caddr_t)op + sz); + op = (union overhead *)((caddr_t)op + sz); + } +} + +/* ========================================================================== + * malloc() + */ +void *malloc(size_t nbytes) +{ + union overhead *op; + unsigned int amt; + int bucket, n; + semaphore *lock; + + lock = &malloc_lock; + while(SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + /* + * First time malloc is called, setup page size and + * align break pointer so all data will be page aligned. + */ + if (pagesz == 0) { + pagesz = n = getpagesize(); + op = (union overhead *)sbrk(0); + n = n - sizeof (*op) - ((int)op & (n - 1)); + if (n < 0) + n += pagesz; + if (n) { + if (sbrk(n) == (char *)-1) + return (NULL); + } + bucket = 0; + amt = 8; + while (pagesz > amt) { + amt <<= 1; + bucket++; + } + pagebucket = bucket; + } + /* + * Convert amount of memory requested into closest block size + * stored in hash buckets which satisfies request. + * Account for space used per block for accounting. + */ + if (nbytes <= (n = pagesz - sizeof (*op) - RSLOP)) { +#ifndef RCHECK + amt = 8; /* size of first bucket */ + bucket = 0; +#else + amt = 16; /* size of first bucket */ + bucket = 1; +#endif + n = -(sizeof (*op) + RSLOP); + } else { + amt = pagesz; + bucket = pagebucket; + } + while (nbytes > amt + n) { + amt <<= 1; + if (amt == 0) { + SEMAPHORE_RESET(lock); + return (NULL); + } + bucket++; + } + /* + * If nothing in hash bucket right now, + * request more memory from the system. + */ + if ((op = nextf[bucket]) == NULL) { + morecore(bucket); + if ((op = nextf[bucket]) == NULL) { + SEMAPHORE_RESET(lock); + return (NULL); + } + } + /* remove from linked list */ + nextf[bucket] = op->ov_next; + op->ov_magic = MAGIC; + op->ov_index = bucket; +#ifdef RCHECK + /* + * Record allocated size of block and + * bound space with magic numbers. + */ + op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); + op->ov_rmagic = RMAGIC; + *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; +#endif + SEMAPHORE_RESET(lock); + return ((char *)(op + 1)); +} + +/* ========================================================================== + * free() + */ +void free(void *cp) +{ + union overhead *op; + semaphore *lock; + int size; + + lock = &malloc_lock; + while(SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + if (cp == NULL) { + SEMAPHORE_RESET(lock); + return; + } + op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); +#ifdef DEBUG + ASSERT(op->ov_magic == MAGIC); /* make sure it was in use */ +#else + if (op->ov_magic != MAGIC) { + SEMAPHORE_RESET(lock); + return; /* sanity */ + } +#endif +#ifdef RCHECK + ASSERT(op->ov_rmagic == RMAGIC); + ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC); +#endif + size = op->ov_index; + ASSERT(size < NBUCKETS); + op->ov_next = nextf[size]; /* also clobbers ov_magic */ + nextf[size] = op; + + SEMAPHORE_RESET(lock); +} + +/* ========================================================================== + * realloc() + * + * Storage compaction is no longer supported, fix program and try again. + */ +void *realloc(void *cp, size_t nbytes) +{ + u_int onb; + int i; + semaphore *lock; + union overhead *op; + char *res; + + if (cp == NULL) + return (malloc(nbytes)); + op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); + + if (op->ov_magic == MAGIC) { + i = op->ov_index; + } else { + /* + * This will cause old programs using storage compaction feature of + * realloc to break in a pseudo resonable way that is easy to debug. + * Returning a malloced buffer without the copy may cause + * indeterministic behavior. + */ + return(NULL); + } + + lock = &malloc_lock; + while(SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + onb = 1 << (i + 3); + if (onb < pagesz) + onb -= sizeof (*op) + RSLOP; + else + onb += pagesz - sizeof (*op) - RSLOP; + + /* avoid the copy if same size block */ + if (i) { + i = 1 << (i + 2); + if (i < pagesz) + i -= sizeof (*op) + RSLOP; + else + i += pagesz - sizeof (*op) - RSLOP; + } + + if (nbytes <= onb && nbytes > i) { +#ifdef RCHECK + op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); + *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; +#endif + SEMAPHORE_RESET(lock); + return(cp); + } + SEMAPHORE_RESET(lock); + + if ((res = malloc(nbytes)) == NULL) { + free(cp); + return (NULL); + } + + bcopy(cp, res, (nbytes < onb) ? nbytes : onb); + free(cp); + + return (res); +} + diff --git a/lib/libpthread/pthreads/mutex.c b/lib/libpthread/pthreads/mutex.c new file mode 100644 index 0000000000..2d3274b5fb --- /dev/null +++ b/lib/libpthread/pthreads/mutex.c @@ -0,0 +1,196 @@ +/* ==== mutex.c ============================================================ + * Copyright (c) 1993 by Chris Provenzano, proven@athena.mit.edu + * + * Description : Queue functions. + * + * 1.00 93/07/19 proven + * -Started coding this file. + */ + +#include +#include "pthread.h" +#include + +/* + * Basic mutex functionality + + * This is the basic lock order + * queue + * pthread + * global + * + * semaphore functionality is defined in machdep.h + */ + +/* ========================================================================== + * pthread_mutex_init() + * + * In this implementation I don't need to allocate memory. + * ENOMEM, EAGAIN should never be returned. Arch that have + * weird constraints may need special coding. + */ +int pthread_mutex_init(pthread_mutex_t *mutex, pthread_mutexattr_t *mutex_attr) +{ + /* Only check if attr specifies some mutex type other than fast */ + if ((mutex_attr) && (mutex_attr->m_type != MUTEX_TYPE_FAST)) { + if (mutex_attr->m_type >= MUTEX_TYPE_MAX) { + return(EINVAL); + } + if (mutex->m_flags & MUTEX_FLAGS_INITED) { + return(EBUSY); + } + mutex->m_type = mutex_attr->m_type; + } else { + mutex->m_type = MUTEX_TYPE_FAST; + } + /* Set all other paramaters */ + pthread_queue_init(&mutex->m_queue); + mutex->m_flags |= MUTEX_FLAGS_INITED; + mutex->m_lock = SEMAPHORE_CLEAR; + mutex->m_owner = NULL; + return(OK); +} + +/* ========================================================================== + * pthread_mutex_destroy() + */ +int pthread_mutex_destroy(pthread_mutex_t *mutex) +{ + /* Only check if mutex is of type other than fast */ + switch(mutex->m_type) { + case MUTEX_TYPE_FAST: + break; + case MUTEX_TYPE_STATIC_FAST: + default: + return(EINVAL); + break; + } + + /* Cleanup mutex, others might want to use it. */ + pthread_queue_init(&mutex->m_queue); + mutex->m_flags |= MUTEX_FLAGS_INITED; + mutex->m_lock = SEMAPHORE_CLEAR; + mutex->m_owner = NULL; + mutex->m_flags = 0; + return(OK); +} + +/* ========================================================================== + * pthread_mutex_trylock() + */ +int pthread_mutex_trylock(pthread_mutex_t *mutex) +{ + semaphore *lock; + int rval; + + lock = &(mutex->m_lock); + while (SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + + switch (mutex->m_type) { + /* + * Fast mutexes do not check for any error conditions. + */ + case MUTEX_TYPE_FAST: + case MUTEX_TYPE_STATIC_FAST: + if (!mutex->m_owner) { + mutex->m_owner = pthread_run; + rval = OK; + } else { + rval = EBUSY; + } + break; + default: + rval = EINVAL; + break; + } + SEMAPHORE_RESET(lock); + return(rval); +} + +/* ========================================================================== + * pthread_mutex_lock() + */ +int pthread_mutex_lock(pthread_mutex_t *mutex) +{ + semaphore *lock, *plock; + int rval; + + lock = &(mutex->m_lock); + while (SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + + switch (mutex->m_type) { + /* + * Fast mutexes do not check for any error conditions. + */ + case MUTEX_TYPE_FAST: + case MUTEX_TYPE_STATIC_FAST: + if (mutex->m_owner) { + plock = &(pthread_run->lock); + while (SEMAPHORE_TEST_AND_SET(plock)) { + pthread_yield(); + } + pthread_queue_enq(&mutex->m_queue, pthread_run); + SEMAPHORE_RESET(lock); + + /* Reschedule will unlock pthread_run */ + reschedule(PS_MUTEX_WAIT); + return(OK); + } + mutex->m_owner = pthread_run; + rval = OK; + break; + default: + rval = EINVAL; + break; + } + SEMAPHORE_RESET(lock); + return(rval); +} + +/* ========================================================================== + * pthread_mutex_unlock() + */ +int pthread_mutex_unlock(pthread_mutex_t *mutex) +{ + struct pthread *pthread; + semaphore *lock, *plock; + int rval; + + lock = &(mutex->m_lock); + while (SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + + switch (mutex->m_type) { + /* + * Fast mutexes do not check for any error conditions. + */ + case MUTEX_TYPE_FAST: + case MUTEX_TYPE_STATIC_FAST: + if (pthread = pthread_queue_get(&mutex->m_queue)) { + plock = &(pthread->lock); + while (SEMAPHORE_TEST_AND_SET(plock)) { + pthread_yield(); + } + mutex->m_owner = pthread; + + /* Reset pthread state */ + pthread_queue_deq(&mutex->m_queue); + pthread->state = PS_RUNNING; + SEMAPHORE_RESET(plock); + } else { + mutex->m_owner = NULL; + } + rval = OK; + break; + default: + rval = EINVAL; + break; + } + SEMAPHORE_RESET(lock); + return(rval); +} diff --git a/lib/libpthread/pthreads/pthread.c b/lib/libpthread/pthreads/pthread.c new file mode 100644 index 0000000000..ab0d962679 --- /dev/null +++ b/lib/libpthread/pthreads/pthread.c @@ -0,0 +1,168 @@ +/* ==== pthread.c ============================================================ + * Copyright (c) 1993 by Chris Provenzano, proven@athena.mit.edu + * + * Description : Pthread functions. + * + * 1.00 93/07/26 proven + * -Started coding this file. + */ + +#include +#include "pthread.h" +#include +#include + +/* + * These first functions really should not be called by the user. + */ + +/* ========================================================================== + * pthread_init() + * + * This function should be called in crt0.o before main() is called. + * But on some systems It may not be possible to change crt0.o so currently + * I'm requiring this function to be called first thing after main. + * Actually I'm assuming it is, because I do no locking here. + */ +void pthread_init(void) +{ + struct machdep_pthread machdep_data = MACHDEP_PTHREAD_INIT; + + /* Initialize the signal handler. */ + sig_init(); + + /* Initialize the fd table. */ + fd_init(); + + /* Initialize the first thread */ + if (pthread_initial = (pthread_t)malloc(sizeof(struct pthread))) { + bcopy(machdep_data, &(pthread_initial->machdep_data), sizeof(machdep_data)); + pthread_initial->state = PS_RUNNING; + pthread_initial->queue = NULL; + pthread_initial->next = NULL; + pthread_initial->pll = NULL; + + pthread_initial->lock = SEMAPHORE_CLEAR; + pthread_initial->error = 0; + + pthread_link_list = pthread_initial; + pthread_run = pthread_initial; + return; + } + PANIC(); +} + +/* ========================================================================== + * pthread_cleanup() + */ +void pthread_cleanup(pthread_t *thread) +{ + void *stack; + + /* Check attr to see what needs cleanup. */ + if (stack = (void *)machdep_pthread_cleanup(&((*thread)->machdep_data))) { + free(stack); + } + free(*thread); +} + +/* ========================================================================== + * pthread_yield() + */ +void pthread_yield() +{ + sig_handler_fake(SIGVTALRM); +} + +/* ======================================================================= */ +/* ========================================================================== + * pthread_self() + */ +pthread_t pthread_self() +{ + return(pthread_run); +} + +/* ========================================================================== + * pthread_equal() + */ +int pthread_equal(pthread_t t1, pthread_t t2) +{ + return(t1 == t2); +} + +/* ========================================================================== + * pthread_exit() + * + * Once this routine gets the lock it never gives it up. + * Joining with a thread that has exited is not valid anymore, so + * there now is no valid opperation that can be done to a thread once it + * has done the pthread_exit(). + * It doesn't matter if a context switch occurs before yield is called + * but after the state is set. + */ +void pthread_exit(void *status) +{ + semaphore *lock; + + lock = &pthread_run->lock; + if (SEMAPHORE_TEST_AND_SET(lock)) { + pthread_yield(); + } + pthread_run->state = PS_DEAD; + pthread_yield(); +} + +/* ========================================================================== + * pthread_create() + * + * After the new thread structure is allocated and set up, it is added to + * pthread_run_next_queue, which requires a sig_prevent(), + * sig_check_and_resume() + */ +int pthread_create(pthread_t *thread, const pthread_attr_t *attr, + void * (*start_routine)(void *), void *arg) +{ + long nsec = 100000000; + void *stack; + + if ((*thread) = (pthread_t)malloc(sizeof(struct pthread))) { + + if (! attr) { attr = &pthread_default_attr; } + + /* Get a stack, if necessary */ + if ((stack = attr->stackaddr_attr) || + (stack = (void *)malloc(attr->stacksize_attr))) { + + machdep_pthread_create(&((*thread)->machdep_data), + start_routine, arg, 65536, stack, nsec); + + memcpy(&(*thread)->attr, attr, sizeof(pthread_attr_t)); + + (*thread)->queue = NULL; + (*thread)->next = NULL; + + (*thread)->lock = SEMAPHORE_CLEAR; + (*thread)->error = 0; + + sig_prevent(); + + /* Add to the link list of all threads. */ + (*thread)->pll = pthread_link_list; + pthread_link_list = (*thread); + + (*thread)->state = PS_RUNNING; + sig_check_and_resume(); + + return(OK); + } + free((*thread)); + } + return(ENOMEM); +} + +/* ========================================================================== + * pthread_cancel() + * + * This routine will also require a sig_prevent/sig_check_and_resume() + */ diff --git a/lib/libpthread/pthreads/pthread_attr.c b/lib/libpthread/pthreads/pthread_attr.c new file mode 100644 index 0000000000..2c7bb5bee3 --- /dev/null +++ b/lib/libpthread/pthreads/pthread_attr.c @@ -0,0 +1,69 @@ +/* ==== pthread_attr.c ======================================================= + * Copyright (c) 1993 by Chris Provenzano, proven@athena.mit.edu + * + * Description : Pthread attribute functions. + * + * 1.00 93/11/04 proven + * -Started coding this file. + */ + +#include +#include "pthread.h" +#include + +/* Currently we do no locking, should we just to be safe? CAP */ +/* ========================================================================== + * pthread_attr_init() + */ +int pthread_attr_init(pthread_attr_t *attr) +{ + memcpy(attr, &pthread_default_attr, sizeof(pthread_attr_t)); + return(OK); +} + +/* ========================================================================== + * pthread_attr_destroy() + */ +int pthread_attr_destroy(pthread_attr_t *attr) +{ + return(OK); +} + +/* ========================================================================== + * pthread_attr_getstacksize() + */ +int pthread_attr_getstacksize(pthread_attr_t *attr, size_t * stacksize) +{ + *stacksize = attr->stacksize_attr; + return(OK); +} + +/* ========================================================================== + * pthread_attr_setstacksize() + */ +int pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize) +{ + if (stacksize >= PTHREAD_STACK_MIN) { + attr->stacksize_attr = stacksize; + return(OK); + } + return(EINVAL); +} + +/* ========================================================================== + * pthread_attr_getstackaddr() + */ +int pthread_attr_getstackaddr(pthread_attr_t *attr, void ** stackaddr) +{ + *stackaddr = attr->stackaddr_attr; + return(OK); +} + +/* ========================================================================== + * pthread_attr_setstackaddr() + */ +int pthread_attr_setstackaddr(pthread_attr_t *attr, void * stackaddr) +{ + attr->stackaddr_attr = stackaddr; + return(OK); +} diff --git a/lib/libpthread/pthreads/queue.c b/lib/libpthread/pthreads/queue.c new file mode 100644 index 0000000000..984760b5a8 --- /dev/null +++ b/lib/libpthread/pthreads/queue.c @@ -0,0 +1,92 @@ +/* ==== queue.c ============================================================ + * Copyright (c) 1993 by Chris Provenzano, proven@athena.mit.edu + * + * Description : Queue functions. + * + * 1.00 93/07/15 proven + * -Started coding this file. + */ + +#include +#include + +/* + * All routines in this file assume that the queue has been appropriatly + * locked. + */ + +/* ========================================================================== + * pthread_queue_init() + */ +void pthread_queue_init(struct pthread_queue *queue) +{ + queue->q_next = NULL; + queue->q_last = NULL; + queue->q_data = NULL; +} + +/* ========================================================================== + * pthread_queue_enq() + */ +void pthread_queue_enq(struct pthread_queue *queue, struct pthread *thread) +{ + if (queue->q_last) { + queue->q_last->next = thread; + } else { + queue->q_next = thread; + } + queue->q_last = thread; + thread->queue = queue; + thread->next = NULL; + +} + +/* ========================================================================== + * pthread_queue_get() + */ +struct pthread *pthread_queue_get(struct pthread_queue *queue) +{ + return(queue->q_next); +} + +/* ========================================================================== + * pthread_queue_deq() + */ +struct pthread *pthread_queue_deq(struct pthread_queue *queue) +{ + struct pthread *thread = NULL; + + if (queue->q_next) { + thread = queue->q_next; + if (!(queue->q_next = queue->q_next->next)) { + queue->q_last = NULL; + } + thread->queue = NULL; + thread->next = NULL; + } + return(thread); +} + +/* ========================================================================== + * pthread_queue_remove() + */ +void pthread_queue_remove(struct pthread_queue *queue, struct pthread *thread) +{ + struct pthread **current = &(queue->q_next); + struct pthread *prev = NULL; + + while (*current) { + if (*current == thread) { + if ((*current)->next) { + *current = (*current)->next; + } else { + queue->q_last = prev; + *current = NULL; + } + } + prev = *current; + current = &((*current)->next); + } + thread->queue = NULL; + thread->next = NULL; +} diff --git a/lib/libpthread/pthreads/signal.c b/lib/libpthread/pthreads/signal.c new file mode 100644 index 0000000000..12d0b3e372 --- /dev/null +++ b/lib/libpthread/pthreads/signal.c @@ -0,0 +1,308 @@ +/* ==== signal.c ============================================================ + * Copyright (c) 1993 by Chris Provenzano, proven@athena.mit.edu + * + * Description : Queue functions. + * + * 1.00 93/07/21 proven + * -Started coding this file. + */ + +#include +#include "pthread.h" +#include + +/* + * Global for user-kernel lock, and blocked signals + */ +static volatile sigset_t sig_to_process; +static volatile int kernel_lock = 0; +static volatile int sig_count = 0; + +static void set_thread_timer(); +void sig_prevent(void); +void sig_resume(void); + +/* ========================================================================== + * context_switch() + * + * This routine saves the current state of the running thread gets + * the next thread to run and restores it's state. To allow different + * processors to work with this routine, I allow the machdep_restore_state() + * to either return or have it return from machdep_save_state with a value + * other than 0, this is for implementations which use setjmp/longjmp. + */ +void fd_kern_wait() { + fd_kern_poll(); +} + +static void context_switch() +{ + struct pthread **current, *next; + + /* save state of current thread */ + if (machdep_save_state()) { + return; + } + + if (pthread_run = pthread_queue_deq(&pthread_current_queue)) { + /* restore state of new current thread */ + machdep_restore_state(); + return; + } + /* Poll all the kernel fds */ + fd_kern_poll(); + +context_switch_reschedule:; + /* + * Go through the reschedule list once, this is the only place + * that goes through the queue without using the queue routines. + * + * But first delete the current queue. + */ + pthread_queue_init(&pthread_current_queue); + current = &(pthread_link_list); + while (*current) { + switch((*current)->state) { + case PS_RUNNING: + pthread_queue_enq(&pthread_current_queue, *current); + current = &((*current)->pll); + break; + case PS_DEAD: + /* Cleanup thread */ + next = (*current)->pll; + pthread_cleanup(current); + *current = next; + break; + default: + /* Should be on a different queue. Ignore. */ + current = &((*current)->pll); + break; + } + } + + /* Are there any threads at all */ + if (!pthread_link_list) { + exit(0); + } + + if (pthread_run = pthread_queue_deq(&pthread_current_queue)) { + /* restore state of new current thread */ + machdep_restore_state(); + return; + } + + /* + * Okay, make sure the context switch timer is off, so we don't get any + * SIG_VTALRM signals while waiting for a fd to unblock. + */ + /* machdep_unset_thread_timer(); + sigdelset(&sig_to_process, SIGVTALRM); */ + + /* Well have to unlock the kernel/then relock it but that should be ok */ + fd_kern_wait(); + goto context_switch_reschedule; +} + +/* ========================================================================== + * context_switch_done() + * + * This routine does all the things that are necessary after a context_switch() + * calls the machdep_restore_state(). DO NOT put this in the context_switch() + * routine because sometimes the machdep_restore_state() doesn't return + * to context_switch() but instead ends up in machdep_thread_start() or + * some such routine, which will need to call this routine and + * sig_check_and_resume(). + */ +void context_switch_done() +{ + sigdelset(&sig_to_process, SIGVTALRM); + set_thread_timer(); +} + +/* ========================================================================== + * set_thread_timer() + * + * Assums kernel is locked. + */ +static void set_thread_timer() +{ + static int last_sched_attr = SCHED_RR; + + switch (pthread_run->attr.sched_attr) { + case SCHED_RR: + machdep_set_thread_timer(&(pthread_run->machdep_data)); + break; + case SCHED_FIFO: + if (last_sched_attr != SCHED_FIFO) { + machdep_unset_thread_timer(); + } + break; + case SCHED_IO: + if (last_sched_attr != SCHED_IO) { + machdep_set_thread_timer(&(pthread_run->machdep_data)); + } + break; + default: + machdep_set_thread_timer(&(pthread_run->machdep_data)); + break; + } +} + +/* ========================================================================== + * sig_handler() + * + * Assumes the kernel is locked. + */ +static void sig_handler(int sig) +{ + sig_handler_top:; + + switch(sig) { + case 0: + break; + case SIGVTALRM: + if (sig_count) { + sigset_t sigall; + + sig_count = 0; + + /* Unblock all signals */ + sigemptyset(&sigall); + sigprocmask(SIG_SETMASK, &sigall, NULL); + } + context_switch(); + context_switch_done(); + break; + case SIGALRM: + /* if (sleep_wakeup()) { + break; + } */ + /* Do the defaul action no threads were sleeping */ + default: + PANIC(); + } + + /* Determine if there are any other signals */ + if (sig_to_process) { + for (sig = 1; sig <= SIGMAX; sig++) { + if (sigismember(&sig_to_process, sig)) { + + /* goto sig_handler_top */ + goto sig_handler_top; + } + } + } +} + +/* ========================================================================== + * sig_handler_real() + * + * On a multi-processor this would need to use the test and set instruction + * otherwise the following will work. + */ +void sig_handler_real(int sig) +{ + if (kernel_lock) { + sigaddset(&sig_to_process, sig); + return; + } + sig_prevent(); + sig_count++; + sig_handler(sig); + sig_resume(); +} + +/* ========================================================================== + * sig_handler_fake() + */ +void sig_handler_fake(int sig) +{ + if (kernel_lock) { + /* Currently this should be impossible */ + PANIC(); + } + sig_prevent(); + sig_handler(sig); + sig_resume(); +} + +/* ========================================================================== + * reschedule() + * + * This routine assumes that the caller is the current pthread, pthread_run + * and that it has a lock on itself and that it wants to reschedule itself. + */ +void reschedule(enum pthread_state state) +{ + semaphore *plock; + + if (kernel_lock) { + /* Currently this should be impossible */ + PANIC(); + } + sig_prevent(); + pthread_run->state = state; + SEMAPHORE_RESET((plock = &(pthread_run->lock))); + sig_handler(SIGVTALRM); + sig_resume(); +} + +/* ========================================================================== + * sig_prevent() + */ +void sig_prevent(void) +{ + kernel_lock++; +} + +/* ========================================================================== + * sig_resume() + */ +void sig_resume() +{ + kernel_lock--; +} + +/* ========================================================================== + * sig_check_and_resume() + */ +void sig_check_and_resume() +{ + /* Some routine name that is yet to be determined. */ + + /* Only bother if we are truely unlocking the kernel */ + while (!(--kernel_lock)) { + + /* Assume sigset_t is not a struct or union */ + if (sig_to_process) { + kernel_lock++; + sig_handler(0); + } else { + break; + } + } +} + +/* ========================================================================== + * sig_init() + * + * SIGVTALRM (NOT POSIX) needed for thread timeslice timeouts. + * Since it's not POSIX I will replace it with a + * virtual timer for threads. + * SIGALRM (IS POSIX) so some special handling will be + * necessary to fake SIGALRM signals + */ +void sig_init(void) +{ + int sig_to_init[] = { SIGVTALRM, SIGALRM, 0 }; + int i; + + /* Initialize only the necessary signals */ + + for (i = 0; sig_to_init[i]; i++) { + if (signal(sig_to_init[i], sig_handler_real)) { + PANIC(); + } + } +} + -- 2.20.1