diff options
| author | Ulrich Drepper <drepper@redhat.com> | 2000-04-13 05:57:21 +0000 |
|---|---|---|
| committer | Ulrich Drepper <drepper@redhat.com> | 2000-04-13 05:57:21 +0000 |
| commit | d8d914df6806c6057b20c7311cad0bc2ac201c03 (patch) | |
| tree | 6d2512373ef92b0abbebd4e0d0761cdd9715ea0b /linuxthreads | |
| parent | b3ae0650bcff54f12d87f878000d4c488b365bf7 (diff) | |
| download | glibc-d8d914df6806c6057b20c7311cad0bc2ac201c03.tar.xz glibc-d8d914df6806c6057b20c7311cad0bc2ac201c03.zip | |
Update.
* sysdeps/pthread/pthread.h: Add prototypes for pthread_spin_init,
pthread_spin_destroy, pthread_spin_lock, pthread_spin_trylock,
and pthread_spin_unlock.
* sysdeps/pthread/bits/pthreadtypes.h: Change struct _pthread_fastlock
into pthread_spinlock_t. Change all uses.
* spinlock.c: Implement pthread_spin_lock.
Rename __pthread_unlock to __pthread_spin_unlock and define weak
alias for real name.
Define pthread_spin_trylock, pthread_spin_init, and
pthread_spin_destroy.
Change all uses of _pthread_fastlock to pthread_spinlock_t.
* spinlock.h: Rename __pthread_unlock to __pthread_spin_unlock.
Change all uses of _pthread_fastlock to pthread_spinlock_t.
* Versions [libpthread] (GLIBC_2.2): Add pthread_spin_init,
pthread_spin_destroy, pthread_spin_lock, pthread_spin_trylock,
and pthread_spin_unlock.
* cancel.c: Use __pthread_spin_unlock instead of __pthread_unlock.
Change all uses of _pthread_fastlock to pthread_spinlock_t.
* condvar.c: Likewise.
* internals.h: Likewise.
* join.c: Likewise.
* manager.c: Likewise.
* mutex.c: Likewise.
* pthread.c: Likewise.
* rwlock.c: Likewise.
* semaphore.c: Likewise.
* signals.c: Likewise.
Diffstat (limited to 'linuxthreads')
| -rw-r--r-- | linuxthreads/ChangeLog | 28 | ||||
| -rw-r--r-- | linuxthreads/Versions | 2 | ||||
| -rw-r--r-- | linuxthreads/cancel.c | 10 | ||||
| -rw-r--r-- | linuxthreads/condvar.c | 16 | ||||
| -rw-r--r-- | linuxthreads/internals.h | 4 | ||||
| -rw-r--r-- | linuxthreads/join.c | 28 | ||||
| -rw-r--r-- | linuxthreads/manager.c | 16 | ||||
| -rw-r--r-- | linuxthreads/mutex.c | 6 | ||||
| -rw-r--r-- | linuxthreads/pthread.c | 14 | ||||
| -rw-r--r-- | linuxthreads/rwlock.c | 24 | ||||
| -rw-r--r-- | linuxthreads/semaphore.c | 36 | ||||
| -rw-r--r-- | linuxthreads/signals.c | 4 | ||||
| -rw-r--r-- | linuxthreads/spinlock.c | 51 | ||||
| -rw-r--r-- | linuxthreads/spinlock.h | 9 | ||||
| -rw-r--r-- | linuxthreads/sysdeps/pthread/bits/pthreadtypes.h | 10 | ||||
| -rw-r--r-- | linuxthreads/sysdeps/pthread/pthread.h | 21 |
16 files changed, 182 insertions, 97 deletions
diff --git a/linuxthreads/ChangeLog b/linuxthreads/ChangeLog index 9e30ebb308..0bd8a4a5cd 100644 --- a/linuxthreads/ChangeLog +++ b/linuxthreads/ChangeLog @@ -1,5 +1,33 @@ 2000-04-12 Ulrich Drepper <drepper@redhat.com> + * sysdeps/pthread/pthread.h: Add prototypes for pthread_spin_init, + pthread_spin_destroy, pthread_spin_lock, pthread_spin_trylock, + and pthread_spin_unlock. + * sysdeps/pthread/bits/pthreadtypes.h: Change struct _pthread_fastlock + into pthread_spinlock_t. Change all uses. + * spinlock.c: Implement pthread_spin_lock. + Rename __pthread_unlock to __pthread_spin_unlock and define weak + alias for real name. + Define pthread_spin_trylock, pthread_spin_init, and + pthread_spin_destroy. + Change all uses of _pthread_fastlock to pthread_spinlock_t. + * spinlock.h: Rename __pthread_unlock to __pthread_spin_unlock. + Change all uses of _pthread_fastlock to pthread_spinlock_t. + * Versions [libpthread] (GLIBC_2.2): Add pthread_spin_init, + pthread_spin_destroy, pthread_spin_lock, pthread_spin_trylock, + and pthread_spin_unlock. + * cancel.c: Use __pthread_spin_unlock instead of __pthread_unlock. + Change all uses of _pthread_fastlock to pthread_spinlock_t. + * condvar.c: Likewise. + * internals.h: Likewise. + * join.c: Likewise. + * manager.c: Likewise. + * mutex.c: Likewise. + * pthread.c: Likewise. + * rwlock.c: Likewise. + * semaphore.c: Likewise. + * signals.c: Likewise. + * sysdeps/unix/sysv/linux/bits/posix_opt.h: Add various new POSIX macros. * sysdeps/unix/sysv/linux/i386/bits/posix_opt.h: New file. diff --git a/linuxthreads/Versions b/linuxthreads/Versions index 2e3673e1a1..e5348fc59e 100644 --- a/linuxthreads/Versions +++ b/linuxthreads/Versions @@ -132,5 +132,7 @@ libpthread { # New functions from IEEE Std. 10003.1-200x. sem_timedwait; + pthread_spin_destroy; pthread_spin_init; pthread_spin_lock; + pthread_spin_trylock; pthread_spin_unlock; } } diff --git a/linuxthreads/cancel.c b/linuxthreads/cancel.c index e1e887e0f9..067e3f65ed 100644 --- a/linuxthreads/cancel.c +++ b/linuxthreads/cancel.c @@ -58,14 +58,14 @@ int pthread_cancel(pthread_t thread) __pthread_lock(&handle->h_lock, NULL); if (invalid_handle(handle, thread)) { - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); return ESRCH; } th = handle->h_descr; if (th->p_canceled) { - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); return 0; } @@ -76,7 +76,7 @@ int pthread_cancel(pthread_t thread) /* If the thread has registered an extrication interface, then invoke the interface. If it returns 1, then we succeeded in dequeuing the thread from whatever waiting object it was enqueued - with. In that case, it is our responsibility to wake it up. + with. In that case, it is our responsibility to wake it up. And also to set the p_woken_by_cancel flag so the woken thread can tell that it was woken by cancellation. */ @@ -85,7 +85,7 @@ int pthread_cancel(pthread_t thread) th->p_woken_by_cancel = dorestart; } - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); /* If the thread has suspended or is about to, then we unblock it by issuing a restart, instead of a cancel signal. Otherwise we send @@ -97,7 +97,7 @@ int pthread_cancel(pthread_t thread) if (dorestart) restart(th); - else + else kill(pid, __pthread_sig_cancel); return 0; diff --git a/linuxthreads/condvar.c b/linuxthreads/condvar.c index 1c1b736f40..e1c8119231 100644 --- a/linuxthreads/condvar.c +++ b/linuxthreads/condvar.c @@ -66,7 +66,7 @@ static int cond_extricate_func(void *obj, pthread_descr th) __pthread_lock(&cond->__c_lock, self); did_remove = remove_from_queue(&cond->__c_waiting, th); - __pthread_unlock(&cond->__c_lock); + __pthread_spin_unlock(&cond->__c_lock); return did_remove; } @@ -101,7 +101,7 @@ int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) enqueue(&cond->__c_waiting, self); else already_canceled = 1; - __pthread_unlock(&cond->__c_lock); + __pthread_spin_unlock(&cond->__c_lock); if (already_canceled) { __pthread_set_own_extricate_if(self, 0); @@ -161,7 +161,7 @@ pthread_cond_timedwait_relative_old(pthread_cond_t *cond, enqueue(&cond->__c_waiting, self); else already_canceled = 1; - __pthread_unlock(&cond->__c_lock); + __pthread_spin_unlock(&cond->__c_lock); if (already_canceled) { __pthread_set_own_extricate_if(self, 0); @@ -231,7 +231,7 @@ pthread_cond_timedwait_relative_old(pthread_cond_t *cond, int was_on_queue; __pthread_lock(&cond->__c_lock, self); was_on_queue = remove_from_queue(&cond->__c_waiting, self); - __pthread_unlock(&cond->__c_lock); + __pthread_spin_unlock(&cond->__c_lock); if (was_on_queue) { __pthread_set_own_extricate_if(self, 0); @@ -295,7 +295,7 @@ pthread_cond_timedwait_relative_new(pthread_cond_t *cond, enqueue(&cond->__c_waiting, self); else already_canceled = 1; - __pthread_unlock(&cond->__c_lock); + __pthread_spin_unlock(&cond->__c_lock); if (already_canceled) { __pthread_set_own_extricate_if(self, 0); @@ -361,7 +361,7 @@ pthread_cond_timedwait_relative_new(pthread_cond_t *cond, __pthread_lock(&cond->__c_lock, self); was_on_queue = remove_from_queue(&cond->__c_waiting, self); - __pthread_unlock(&cond->__c_lock); + __pthread_spin_unlock(&cond->__c_lock); if (was_on_queue) { __pthread_set_own_extricate_if(self, 0); @@ -402,7 +402,7 @@ int pthread_cond_signal(pthread_cond_t *cond) __pthread_lock(&cond->__c_lock, NULL); th = dequeue(&cond->__c_waiting); - __pthread_unlock(&cond->__c_lock); + __pthread_spin_unlock(&cond->__c_lock); if (th != NULL) restart(th); return 0; } @@ -415,7 +415,7 @@ int pthread_cond_broadcast(pthread_cond_t *cond) /* Copy the current state of the waiting queue and empty it */ tosignal = cond->__c_waiting; cond->__c_waiting = NULL; - __pthread_unlock(&cond->__c_lock); + __pthread_spin_unlock(&cond->__c_lock); /* Now signal each process in the queue */ while ((th = dequeue(&tosignal)) != NULL) restart(th); return 0; diff --git a/linuxthreads/internals.h b/linuxthreads/internals.h index 3984512e4d..6b115beaa2 100644 --- a/linuxthreads/internals.h +++ b/linuxthreads/internals.h @@ -131,7 +131,7 @@ struct _pthread_descr_struct { pthread_t p_tid; /* Thread identifier */ int p_pid; /* PID of Unix process */ int p_priority; /* Thread priority (== 0 if not realtime) */ - struct _pthread_fastlock * p_lock; /* Spinlock for synchronized accesses */ + pthread_spinlock_t * p_lock; /* Spinlock for synchronized accesses */ int p_signal; /* last signal received */ sigjmp_buf * p_signal_jmp; /* where to siglongjmp on a signal or NULL */ sigjmp_buf * p_cancel_jmp; /* where to siglongjmp on a cancel or NULL */ @@ -183,7 +183,7 @@ struct _pthread_descr_struct { typedef struct pthread_handle_struct * pthread_handle; struct pthread_handle_struct { - struct _pthread_fastlock h_lock; /* Fast lock for sychronized access */ + pthread_spinlock_t h_lock; /* Fast lock for sychronized access */ pthread_descr h_descr; /* Thread descriptor or NULL if invalid */ char * h_bottom; /* Lowest address in the stack thread */ }; diff --git a/linuxthreads/join.c b/linuxthreads/join.c index 5e6b78ab3b..b703c0d34c 100644 --- a/linuxthreads/join.c +++ b/linuxthreads/join.c @@ -62,7 +62,7 @@ void pthread_exit(void * retval) } /* See if someone is joining on us */ joining = THREAD_GETMEM(self, p_joining); - __pthread_unlock(THREAD_GETMEM(self, p_lock)); + __pthread_spin_unlock(THREAD_GETMEM(self, p_lock)); /* Restart joining thread if any */ if (joining != NULL) restart(joining); /* If this is the initial thread, block until all threads have terminated. @@ -93,7 +93,7 @@ static int join_extricate_func(void *obj, pthread_descr th) jo = handle->h_descr; did_remove = jo->p_joining != NULL; jo->p_joining = NULL; - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); return did_remove; } @@ -113,38 +113,38 @@ int pthread_join(pthread_t thread_id, void ** thread_return) __pthread_lock(&handle->h_lock, self); if (invalid_handle(handle, thread_id)) { - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); return ESRCH; } th = handle->h_descr; if (th == self) { - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); return EDEADLK; } /* If detached or already joined, error */ if (th->p_detached || th->p_joining != NULL) { - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); return EINVAL; } /* If not terminated yet, suspend ourselves. */ if (! th->p_terminated) { /* Register extrication interface */ - __pthread_set_own_extricate_if(self, &extr); + __pthread_set_own_extricate_if(self, &extr); if (!(THREAD_GETMEM(self, p_canceled) && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE)) th->p_joining = self; else already_canceled = 1; - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); if (already_canceled) { - __pthread_set_own_extricate_if(self, 0); + __pthread_set_own_extricate_if(self, 0); pthread_exit(PTHREAD_CANCELED); } suspend(self); /* Deregister extrication interface */ - __pthread_set_own_extricate_if(self, 0); + __pthread_set_own_extricate_if(self, 0); /* This is a cancellation point */ if (THREAD_GETMEM(self, p_woken_by_cancel) @@ -156,7 +156,7 @@ int pthread_join(pthread_t thread_id, void ** thread_return) } /* Get return value */ if (thread_return != NULL) *thread_return = th->p_retval; - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); /* Send notification to thread manager */ if (__pthread_manager_request >= 0) { request.req_thread = self; @@ -177,24 +177,24 @@ int pthread_detach(pthread_t thread_id) __pthread_lock(&handle->h_lock, NULL); if (invalid_handle(handle, thread_id)) { - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); return ESRCH; } th = handle->h_descr; /* If already detached, error */ if (th->p_detached) { - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); return EINVAL; } /* If already joining, don't do anything. */ if (th->p_joining != NULL) { - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); return 0; } /* Mark as detached */ th->p_detached = 1; terminated = th->p_terminated; - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); /* If already terminated, notify thread manager to reclaim resources */ if (terminated && __pthread_manager_request >= 0) { request.req_thread = thread_self(); diff --git a/linuxthreads/manager.c b/linuxthreads/manager.c index 6e585be79c..5d7014ab72 100644 --- a/linuxthreads/manager.c +++ b/linuxthreads/manager.c @@ -194,7 +194,7 @@ int __pthread_manager_event(void *arg) /* Get the lock the manager will free once all is correctly set up. */ __pthread_lock (THREAD_GETMEM((&__pthread_manager_thread), p_lock), NULL); /* Free it immediately. */ - __pthread_unlock (THREAD_GETMEM((&__pthread_manager_thread), p_lock)); + __pthread_spin_unlock (THREAD_GETMEM((&__pthread_manager_thread), p_lock)); return __pthread_manager(arg); } @@ -260,7 +260,7 @@ static int pthread_start_thread_event(void *arg) /* Get the lock the manager will free once all is correctly set up. */ __pthread_lock (THREAD_GETMEM(self, p_lock), NULL); /* Free it immediately. */ - __pthread_unlock (THREAD_GETMEM(self, p_lock)); + __pthread_spin_unlock (THREAD_GETMEM(self, p_lock)); /* Continue with the real function. */ return pthread_start_thread (arg); @@ -460,7 +460,7 @@ static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr, __linuxthreads_create_event (); /* Now restart the thread. */ - __pthread_unlock(new_thread->p_lock); + __pthread_spin_unlock(new_thread->p_lock); } } } @@ -509,7 +509,7 @@ static void pthread_free(pthread_descr th) __pthread_lock(&handle->h_lock, NULL); handle->h_descr = NULL; handle->h_bottom = (char *)(-1L); - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); #ifdef FREE_THREAD_SELF FREE_THREAD_SELF(th, th->p_nr); #endif @@ -580,7 +580,7 @@ static void pthread_exited(pid_t pid) } } detached = th->p_detached; - __pthread_unlock(th->p_lock); + __pthread_spin_unlock(th->p_lock); if (detached) pthread_free(th); break; @@ -623,19 +623,19 @@ static void pthread_handle_free(pthread_t th_id) if (invalid_handle(handle, th_id)) { /* pthread_reap_children has deallocated the thread already, nothing needs to be done */ - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); return; } th = handle->h_descr; if (th->p_exited) { - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); pthread_free(th); } else { /* The Unix process of the thread is still running. Mark the thread as detached so that the thread manager will deallocate its resources when the Unix process exits. */ th->p_detached = 1; - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); } } diff --git a/linuxthreads/mutex.c b/linuxthreads/mutex.c index 97b5a4fb84..4d9019ec4e 100644 --- a/linuxthreads/mutex.c +++ b/linuxthreads/mutex.c @@ -109,7 +109,7 @@ int __pthread_mutex_unlock(pthread_mutex_t * mutex) { switch (mutex->__m_kind) { case PTHREAD_MUTEX_FAST_NP: - __pthread_unlock(&mutex->__m_lock); + __pthread_spin_unlock(&mutex->__m_lock); return 0; case PTHREAD_MUTEX_RECURSIVE_NP: if (mutex->__m_count > 0) { @@ -117,13 +117,13 @@ int __pthread_mutex_unlock(pthread_mutex_t * mutex) return 0; } mutex->__m_owner = NULL; - __pthread_unlock(&mutex->__m_lock); + __pthread_spin_unlock(&mutex->__m_lock); return 0; case PTHREAD_MUTEX_ERRORCHECK_NP: if (mutex->__m_owner != thread_self() || mutex->__m_lock.__status == 0) return EPERM; mutex->__m_owner = NULL; - __pthread_unlock(&mutex->__m_lock); + __pthread_spin_unlock(&mutex->__m_lock); return 0; default: return EINVAL; diff --git a/linuxthreads/pthread.c b/linuxthreads/pthread.c index c0658cd3c2..d3b851e5a2 100644 --- a/linuxthreads/pthread.c +++ b/linuxthreads/pthread.c @@ -458,7 +458,7 @@ int __pthread_initialize_manager(void) __linuxthreads_create_event (); /* Now restart the thread. */ - __pthread_unlock(__pthread_manager_thread.p_lock); + __pthread_spin_unlock(__pthread_manager_thread.p_lock); } } } @@ -585,16 +585,16 @@ int pthread_setschedparam(pthread_t thread, int policy, __pthread_lock(&handle->h_lock, NULL); if (invalid_handle(handle, thread)) { - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); return ESRCH; } th = handle->h_descr; if (__sched_setscheduler(th->p_pid, policy, param) == -1) { - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); return errno; } th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority; - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); if (__pthread_manager_request >= 0) __pthread_manager_adjust_prio(th->p_priority); return 0; @@ -608,11 +608,11 @@ int pthread_getschedparam(pthread_t thread, int *policy, __pthread_lock(&handle->h_lock, NULL); if (invalid_handle(handle, thread)) { - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); return ESRCH; } pid = handle->h_descr->p_pid; - __pthread_unlock(&handle->h_lock); + __pthread_spin_unlock(&handle->h_lock); pol = __sched_getscheduler(pid); if (pol == -1) return errno; if (__sched_getparam(pid, param) == -1) return errno; @@ -809,7 +809,7 @@ void __pthread_set_own_extricate_if(pthread_descr self, pthread_extricate_if *pe { __pthread_lock(self->p_lock, self); THREAD_SETMEM(self, p_extricate, peif); - __pthread_unlock(self->p_lock); + __pthread_spin_unlock(self->p_lock); } /* Primitives for controlling thread execution */ diff --git a/linuxthreads/rwlock.c b/linuxthreads/rwlock.c index 9da87d25d1..e4a4c81f8c 100644 --- a/linuxthreads/rwlock.c +++ b/linuxthreads/rwlock.c @@ -217,7 +217,7 @@ __pthread_rwlock_destroy (pthread_rwlock_t *rwlock) __pthread_lock (&rwlock->__rw_lock, NULL); readers = rwlock->__rw_readers; writer = rwlock->__rw_writer; - __pthread_unlock (&rwlock->__rw_lock); + __pthread_spin_unlock (&rwlock->__rw_lock); if (readers > 0 || writer != NULL) return EBUSY; @@ -247,12 +247,12 @@ __pthread_rwlock_rdlock (pthread_rwlock_t *rwlock) break; enqueue (&rwlock->__rw_read_waiting, self); - __pthread_unlock (&rwlock->__rw_lock); + __pthread_spin_unlock (&rwlock->__rw_lock); suspend (self); /* This is not a cancellation point */ } ++rwlock->__rw_readers; - __pthread_unlock (&rwlock->__rw_lock); + __pthread_spin_unlock (&rwlock->__rw_lock); if (have_lock_already || out_of_mem) { @@ -291,7 +291,7 @@ __pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) retval = 0; } - __pthread_unlock (&rwlock->__rw_lock); + __pthread_spin_unlock (&rwlock->__rw_lock); if (retval == 0) { @@ -320,13 +320,13 @@ __pthread_rwlock_wrlock (pthread_rwlock_t *rwlock) if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL) { rwlock->__rw_writer = self; - __pthread_unlock (&rwlock->__rw_lock); + __pthread_spin_unlock (&rwlock->__rw_lock); return 0; } /* Suspend ourselves, then try again |
