diff options
| author | Ulrich Drepper <drepper@redhat.com> | 1998-10-27 14:19:07 +0000 |
|---|---|---|
| committer | Ulrich Drepper <drepper@redhat.com> | 1998-10-27 14:19:07 +0000 |
| commit | c70ca1fa69c9a95108664b4132b7188a686cc9e4 (patch) | |
| tree | 19fa3a22f0ab1ce323bb2bfd787ffc86627196be /linuxthreads | |
| parent | 91cc83ff9745491a0d6673f36df9cdabd397d748 (diff) | |
| download | glibc-c70ca1fa69c9a95108664b4132b7188a686cc9e4.tar.xz glibc-c70ca1fa69c9a95108664b4132b7188a686cc9e4.zip | |
Update.
* sysdeps/unix/sysv/linux/powerpc/pread.c: Do not use the i386 version.
Instead call the system call wrapper function using an 64bit argument.
* sysdeps/unix/sysv/linux/powerpc/pread64.c: Likewise.
* sysdeps/unix/sysv/linux/powerpc/pwrite.c: Likewise.
* sysdeps/unix/sysv/linux/powerpc/pwrite64.c: Likewise.
Diffstat (limited to 'linuxthreads')
| -rw-r--r-- | linuxthreads/ChangeLog | 23 | ||||
| -rw-r--r-- | linuxthreads/attr.c | 68 | ||||
| -rw-r--r-- | linuxthreads/cancel.c | 28 | ||||
| -rw-r--r-- | linuxthreads/condvar.c | 50 | ||||
| -rw-r--r-- | linuxthreads/manager.c | 30 | ||||
| -rw-r--r-- | linuxthreads/mutex.c | 74 | ||||
| -rw-r--r-- | linuxthreads/pthread.c | 11 | ||||
| -rw-r--r-- | linuxthreads/ptlongjmp.c | 4 | ||||
| -rw-r--r-- | linuxthreads/rwlock.c | 120 | ||||
| -rw-r--r-- | linuxthreads/spinlock.c | 23 | ||||
| -rw-r--r-- | linuxthreads/spinlock.h | 4 | ||||
| -rw-r--r-- | linuxthreads/sysdeps/pthread/bits/pthreadtypes.h | 58 | ||||
| -rw-r--r-- | linuxthreads/sysdeps/pthread/pthread.h | 8 |
13 files changed, 263 insertions, 238 deletions
diff --git a/linuxthreads/ChangeLog b/linuxthreads/ChangeLog index c0acddad0e..635bc634dd 100644 --- a/linuxthreads/ChangeLog +++ b/linuxthreads/ChangeLog @@ -1,3 +1,26 @@ +1998-10-27 13:46 Ulrich Drepper <drepper@cygnus.com> + + * sysdeps/pthread/pthread.h (struct _pthread_cleanup_buffer): Prepend + __ to field names of the struct. + * sysdeps/pthread/bits/pthreadtypes.h (struct _pthread_fastlock): + Likewise. + (pthread_attr_t): Likewise. + (pthread_cond_t): Likewise. + (pthread_condattr_t): Likewise. + (pthread_mutex_t): Likewise. + (pthread_mutexattr_t): Likewise. + (pthread_rwlock_t): Likewise. + (pthread_rwlockattr_t): Likewise. + * attr.c: Adjust for pthread.h and pthreadtypes.h change. + * cancel.c: Likewise. + * condvar.c: Likewise. + * manager.c: Likewise. + * mutex.c: Likewise. + * pthread.c: Likewise. + * ptlongjmp.c: Likewise. + * rwlock.c: Likewise. + * spinlock.c: Likewise. + 1998-10-09 Ulrich Drepper <drepper@cygnus.com> * sysdeps/i386/pt-machine.h (get_eflags, set_eflags): Mark these diff --git a/linuxthreads/attr.c b/linuxthreads/attr.c index 3a19ddc49b..2a70ebe674 100644 --- a/linuxthreads/attr.c +++ b/linuxthreads/attr.c @@ -25,15 +25,15 @@ int __pthread_attr_init_2_1(pthread_attr_t *attr) { size_t ps = __getpagesize (); - attr->detachstate = PTHREAD_CREATE_JOINABLE; - attr->schedpolicy = SCHED_OTHER; - attr->schedparam.sched_priority = 0; - attr->inheritsched = PTHREAD_EXPLICIT_SCHED; - attr->scope = PTHREAD_SCOPE_SYSTEM; - attr->guardsize = ps; - attr->stackaddr = NULL; - attr->stackaddr_set = 0; - attr->stacksize = STACK_SIZE - ps; + attr->__detachstate = PTHREAD_CREATE_JOINABLE; + attr->__schedpolicy = SCHED_OTHER; + attr->__schedparam.sched_priority = 0; + attr->__inheritsched = PTHREAD_EXPLICIT_SCHED; + attr->__scope = PTHREAD_SCOPE_SYSTEM; + attr->__guardsize = ps; + attr->__stackaddr = NULL; + attr->__stackaddr_set = 0; + attr->__stacksize = STACK_SIZE - ps; return 0; } #if defined HAVE_ELF && defined PIC && defined DO_VERSIONING @@ -41,11 +41,11 @@ default_symbol_version (__pthread_attr_init_2_1, pthread_attr_init, GLIBC_2.1); int __pthread_attr_init_2_0(pthread_attr_t *attr) { - attr->detachstate = PTHREAD_CREATE_JOINABLE; - attr->schedpolicy = SCHED_OTHER; - attr->schedparam.sched_priority = 0; - attr->inheritsched = PTHREAD_EXPLICIT_SCHED; - attr->scope = PTHREAD_SCOPE_SYSTEM; + attr->__detachstate = PTHREAD_CREATE_JOINABLE; + attr->__schedpolicy = SCHED_OTHER; + attr->__schedparam.sched_priority = 0; + attr->__inheritsched = PTHREAD_EXPLICIT_SCHED; + attr->__scope = PTHREAD_SCOPE_SYSTEM; return 0; } symbol_version (__pthread_attr_init_2_0, pthread_attr_init, GLIBC_2.0); @@ -63,32 +63,32 @@ int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate) if (detachstate < PTHREAD_CREATE_JOINABLE || detachstate > PTHREAD_CREATE_DETACHED) return EINVAL; - attr->detachstate = detachstate; + attr->__detachstate = detachstate; return 0; } int pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate) { - *detachstate = attr->detachstate; + *detachstate = attr->__detachstate; return 0; } int pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param) { - int max_prio = __sched_get_priority_max(attr->schedpolicy); - int min_prio = __sched_get_priority_min(attr->schedpolicy); + int max_prio = __sched_get_priority_max(attr->__schedpolicy); + int min_prio = __sched_get_priority_min(attr->__schedpolicy); if (param->sched_priority < min_prio || param->sched_priority > max_prio) return EINVAL; - memcpy (&attr->schedparam, param, sizeof (struct sched_param)); + memcpy (&attr->__schedparam, param, sizeof (struct sched_param)); return 0; } int pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param) { - memcpy (param, &attr->schedparam, sizeof (struct sched_param)); + memcpy (param, &attr->__schedparam, sizeof (struct sched_param)); return 0; } @@ -96,13 +96,13 @@ int pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy) { if (policy != SCHED_OTHER && policy != SCHED_FIFO && policy != SCHED_RR) return EINVAL; - attr->schedpolicy = policy; + attr->__schedpolicy = policy; return 0; } int pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy) { - *policy = attr->schedpolicy; + *policy = attr->__schedpolicy; return 0; } @@ -110,13 +110,13 @@ int pthread_attr_setinheritsched(pthread_attr_t *attr, int inherit) { if (inherit != PTHREAD_INHERIT_SCHED && inherit != PTHREAD_EXPLICIT_SCHED) return EINVAL; - attr->inheritsched = inherit; + attr->__inheritsched = inherit; return 0; } int pthread_attr_getinheritsched(const pthread_attr_t *attr, int *inherit) { - *inherit = attr->inheritsched; + *inherit = attr->__inheritsched; return 0; } @@ -124,7 +124,7 @@ int pthread_attr_setscope(pthread_attr_t *attr, int scope) { switch (scope) { case PTHREAD_SCOPE_SYSTEM: - attr->scope = scope; + attr->__scope = scope; return 0; case PTHREAD_SCOPE_PROCESS: return ENOTSUP; @@ -135,7 +135,7 @@ int pthread_attr_setscope(pthread_attr_t *attr, int scope) int pthread_attr_getscope(const pthread_attr_t *attr, int *scope) { - *scope = attr->scope; + *scope = attr->__scope; return 0; } @@ -147,9 +147,9 @@ int __pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize) guardsize = roundup (guardsize, ps); /* The guard size must not be larger than the stack itself */ - if (guardsize >= attr->stacksize) return EINVAL; + if (guardsize >= attr->__stacksize) return EINVAL; - attr->guardsize = guardsize; + attr->__guardsize = guardsize; return 0; } @@ -157,15 +157,15 @@ weak_alias (__pthread_attr_setguardsize, pthread_attr_setguardsize) int __pthread_attr_getguardsize(const pthread_attr_t *attr, size_t *guardsize) { - *guardsize = attr->guardsize; + *guardsize = attr->__guardsize; return 0; } weak_alias (__pthread_attr_getguardsize, pthread_attr_getguardsize) int __pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr) { - attr->stackaddr = stackaddr; - attr->stackaddr_set = 1; + attr->__stackaddr = stackaddr; + attr->__stackaddr_set = 1; return 0; } weak_alias (__pthread_attr_setstackaddr, pthread_attr_setstackaddr) @@ -175,7 +175,7 @@ int __pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr) /* XXX This function has a stupid definition. The standard specifies no error value but what is if no stack address was set? We simply return the value we have in the member. */ - *stackaddr = attr->stackaddr; + *stackaddr = attr->__stackaddr; return 0; } weak_alias (__pthread_attr_getstackaddr, pthread_attr_getstackaddr) @@ -186,14 +186,14 @@ int __pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize) if (stacksize < PTHREAD_STACK_MIN) return EINVAL; - attr->stacksize = stacksize; + attr->__stacksize = stacksize; return 0; } weak_alias (__pthread_attr_setstacksize, pthread_attr_setstacksize) int __pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize) { - *stacksize = attr->stacksize; + *stacksize = attr->__stacksize; return 0; } weak_alias (__pthread_attr_getstacksize, pthread_attr_getstacksize) diff --git a/linuxthreads/cancel.c b/linuxthreads/cancel.c index e9cb10cc7c..3ff595418b 100644 --- a/linuxthreads/cancel.c +++ b/linuxthreads/cancel.c @@ -77,9 +77,9 @@ void _pthread_cleanup_push(struct _pthread_cleanup_buffer * buffer, void (*routine)(void *), void * arg) { pthread_descr self = thread_self(); - buffer->routine = routine; - buffer->arg = arg; - buffer->prev = THREAD_GETMEM(self, p_cleanup); + buffer->__routine = routine; + buffer->__arg = arg; + buffer->__prev = THREAD_GETMEM(self, p_cleanup); THREAD_SETMEM(self, p_cleanup, buffer); } @@ -87,18 +87,18 @@ void _pthread_cleanup_pop(struct _pthread_cleanup_buffer * buffer, int execute) { pthread_descr self = thread_self(); - if (execute) buffer->routine(buffer->arg); - THREAD_SETMEM(self, p_cleanup, buffer->prev); + if (execute) buffer->__routine(buffer->__arg); + THREAD_SETMEM(self, p_cleanup, buffer->__prev); } void _pthread_cleanup_push_defer(struct _pthread_cleanup_buffer * buffer, void (*routine)(void *), void * arg) { pthread_descr self = thread_self(); - buffer->routine = routine; - buffer->arg = arg; - buffer->canceltype = THREAD_GETMEM(self, p_canceltype); - buffer->prev = THREAD_GETMEM(self, p_cleanup); + buffer->__routine = routine; + buffer->__arg = arg; + buffer->__canceltype = THREAD_GETMEM(self, p_canceltype); + buffer->__prev = THREAD_GETMEM(self, p_cleanup); THREAD_SETMEM(self, p_canceltype, PTHREAD_CANCEL_DEFERRED); THREAD_SETMEM(self, p_cleanup, buffer); } @@ -107,9 +107,9 @@ void _pthread_cleanup_pop_restore(struct _pthread_cleanup_buffer * buffer, int execute) { pthread_descr self = thread_self(); - if (execute) buffer->routine(buffer->arg); - THREAD_SETMEM(self, p_cleanup, buffer->prev); - THREAD_SETMEM(self, p_canceltype, buffer->canceltype); + if (execute) buffer->__routine(buffer->__arg); + THREAD_SETMEM(self, p_cleanup, buffer->__prev); + THREAD_SETMEM(self, p_canceltype, buffer->__canceltype); if (THREAD_GETMEM(self, p_canceled) && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE && THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS) @@ -120,8 +120,8 @@ void __pthread_perform_cleanup(void) { pthread_descr self = thread_self(); struct _pthread_cleanup_buffer * c; - for (c = THREAD_GETMEM(self, p_cleanup); c != NULL; c = c->prev) - c->routine(c->arg); + for (c = THREAD_GETMEM(self, p_cleanup); c != NULL; c = c->__prev) + c->__routine(c->__arg); } #ifndef PIC diff --git a/linuxthreads/condvar.c b/linuxthreads/condvar.c index 773d7144f0..b880a38a73 100644 --- a/linuxthreads/condvar.c +++ b/linuxthreads/condvar.c @@ -28,14 +28,14 @@ int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) { - __pthread_init_lock(&cond->c_lock); - cond->c_waiting = NULL; + __pthread_init_lock(&cond->__c_lock); + cond->__c_waiting = NULL; return 0; } int pthread_cond_destroy(pthread_cond_t *cond) { - if (cond->c_waiting != NULL) return EBUSY; + if (cond->__c_waiting != NULL) return EBUSY; return 0; } @@ -43,9 +43,9 @@ int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) { volatile pthread_descr self = thread_self(); - __pthread_lock(&cond->c_lock); - enqueue(&cond->c_waiting, self); - __pthread_unlock(&cond->c_lock); + __pthread_lock(&cond->__c_lock); + enqueue(&cond->__c_waiting, self); + __pthread_unlock(&cond->__c_lock); pthread_mutex_unlock(mutex); suspend_with_cancellation(self); pthread_mutex_lock(mutex); @@ -53,9 +53,9 @@ int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) if (THREAD_GETMEM(self, p_canceled) && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) { /* Remove ourselves from the waiting queue if we're still on it */ - __pthread_lock(&cond->c_lock); - remove_from_queue(&cond->c_waiting, self); - __pthread_unlock(&cond->c_lock); + __pthread_lock(&cond->__c_lock); + remove_from_queue(&cond->__c_waiting, self); + __pthread_unlock(&cond->__c_lock); pthread_exit(PTHREAD_CANCELED); } return 0; @@ -72,9 +72,9 @@ pthread_cond_timedwait_relative(pthread_cond_t *cond, sigjmp_buf jmpbuf; /* Wait on the condition */ - __pthread_lock(&cond->c_lock); - enqueue(&cond->c_waiting, self); - __pthread_unlock(&cond->c_lock); + __pthread_lock(&cond->__c_lock); + enqueue(&cond->__c_waiting, self); + __pthread_unlock(&cond->__c_lock); pthread_mutex_unlock(mutex); /* Set up a longjmp handler for the restart and cancel signals */ if (sigsetjmp(jmpbuf, 1) == 0) { @@ -107,17 +107,17 @@ pthread_cond_timedwait_relative(pthread_cond_t *cond, /* This is a cancellation point */ if (THREAD_GETMEM(self, p_canceled) && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) { - __pthread_lock(&cond->c_lock); - remove_from_queue(&cond->c_waiting, self); - __pthread_unlock(&cond->c_lock); + __pthread_lock(&cond->__c_lock); + remove_from_queue(&cond->__c_waiting, self); + __pthread_unlock(&cond->__c_lock); pthread_mutex_lock(mutex); pthread_exit(PTHREAD_CANCELED); } /* If not signaled: also remove ourselves and return an error code */ if (THREAD_GETMEM(self, p_signal) == 0) { - __pthread_lock(&cond->c_lock); - remove_from_queue(&cond->c_waiting, self); - __pthread_unlock(&cond->c_lock); + __pthread_lock(&cond->__c_lock); + remove_from_queue(&cond->__c_waiting, self); + __pthread_unlock(&cond->__c_lock); pthread_mutex_lock(mutex); return retsleep == 0 ? ETIMEDOUT : EINTR; } @@ -147,9 +147,9 @@ int pthread_cond_signal(pthread_cond_t *cond) { pthread_descr th; - __pthread_lock(&cond->c_lock); - th = dequeue(&cond->c_waiting); - __pthread_unlock(&cond->c_lock); + __pthread_lock(&cond->__c_lock); + th = dequeue(&cond->__c_waiting); + __pthread_unlock(&cond->__c_lock); if (th != NULL) restart(th); return 0; } @@ -158,11 +158,11 @@ int pthread_cond_broadcast(pthread_cond_t *cond) { pthread_descr tosignal, th; - __pthread_lock(&cond->c_lock); + __pthread_lock(&cond->__c_lock); /* Copy the current state of the waiting queue and empty it */ - tosignal = cond->c_waiting; - cond->c_waiting = NULL; - __pthread_unlock(&cond->c_lock); + tosignal = cond->__c_waiting; + cond->__c_waiting = NULL; + __pthread_unlock(&cond->__c_lock); /* Now signal each process in the queue */ while ((th = dequeue(&tosignal)) != NULL) restart(th); return 0; diff --git a/linuxthreads/manager.c b/linuxthreads/manager.c index d145395407..b6107da1fb 100644 --- a/linuxthreads/manager.c +++ b/linuxthreads/manager.c @@ -222,12 +222,12 @@ static int pthread_allocate_stack(const pthread_attr_t *attr, char * guardaddr; size_t stacksize, guardsize; - if (attr != NULL && attr->stackaddr_set) + if (attr != NULL && attr->__stackaddr_set) { /* The user provided a stack. */ new_thread = - (pthread_descr) ((long)(attr->stackaddr) & -sizeof(void *)) - 1; - new_thread_bottom = (char *) attr->stackaddr - attr->stacksize; + (pthread_descr) ((long)(attr->__stackaddr) & -sizeof(void *)) - 1; + new_thread_bottom = (char *) attr->__stackaddr - attr->__stacksize; guardaddr = NULL; guardsize = 0; } @@ -249,9 +249,9 @@ static int pthread_allocate_stack(const pthread_attr_t *attr, the RLIMIT_STACK soft limit prevents stacks from running into one another. */ if (attr == NULL || - attr->guardsize == 0 || - (attr->guardsize == pagesize && - attr->stacksize == STACK_SIZE - pagesize)) + attr->__guardsize == 0 || + (attr->__guardsize == pagesize && + attr->__stacksize == STACK_SIZE - pagesize)) { /* We don't need a guard page. */ guardaddr = NULL; @@ -260,11 +260,11 @@ static int pthread_allocate_stack(const pthread_attr_t *attr, else { /* Put a bad page at the bottom of the stack */ - stacksize = roundup(attr->stacksize, pagesize); + stacksize = roundup(attr->__stacksize, pagesize); if (stacksize >= STACK_SIZE - pagesize) stacksize = STACK_SIZE - pagesize; guardaddr = (void *)new_thread - stacksize; - guardsize = attr->guardsize; + guardsize = attr->__guardsize; if (mmap ((caddr_t) guardaddr, guardsize, 0, MAP_FIXED, -1, 0) == MAP_FAILED) { @@ -298,7 +298,7 @@ static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr, we can do this. Normally this should be done by examining the return value of the __sched_setscheduler call in pthread_start_thread but this is hard to implement. FIXME */ - if (attr != NULL && attr->schedpolicy != SCHED_OTHER && geteuid () != 0) + if (attr != NULL && attr->__schedpolicy != SCHED_OTHER && geteuid () != 0) return EPERM; /* Find a free segment for the thread, and allocate a stack if needed */ for (sseg = 2; ; sseg++) @@ -324,7 +324,7 @@ static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr, new_thread->p_signal_jmp = NULL; new_thread->p_cancel_jmp = NULL; new_thread->p_terminated = 0; - new_thread->p_detached = attr == NULL ? 0 : attr->detachstate; + new_thread->p_detached = attr == NULL ? 0 : attr->__detachstate; new_thread->p_exited = 0; new_thread->p_retval = NULL; new_thread->p_joining = NULL; @@ -340,7 +340,7 @@ static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr, new_thread->p_sigwaiting = 0; new_thread->p_guardaddr = guardaddr; new_thread->p_guardsize = guardsize; - new_thread->p_userstack = attr != NULL && attr->stackaddr_set; + new_thread->p_userstack = attr != NULL && attr->__stackaddr_set; memset (new_thread->p_specific, '\0', PTHREAD_KEY_1STLEVEL_SIZE * sizeof (new_thread->p_specific[0])); new_thread->p_self = new_thread; @@ -352,10 +352,10 @@ static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr, /* Determine scheduling parameters for the thread */ new_thread->p_start_args.schedpolicy = -1; if (attr != NULL) { - switch(attr->inheritsched) { + switch(attr->__inheritsched) { case PTHREAD_EXPLICIT_SCHED: - new_thread->p_start_args.schedpolicy = attr->schedpolicy; - memcpy (&new_thread->p_start_args.schedparam, &attr->schedparam, + new_thread->p_start_args.schedpolicy = attr->__schedpolicy; + memcpy (&new_thread->p_start_args.schedparam, &attr->__schedparam, sizeof (struct sched_param)); break; case PTHREAD_INHERIT_SCHED: @@ -382,7 +382,7 @@ static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr, /* Check if cloning succeeded */ if (pid == -1) { /* Free the stack if we allocated it */ - if (attr == NULL || !attr->stackaddr_set) + if (attr == NULL || !attr->__stackaddr_set) { munmap((caddr_t)((char *)(new_thread+1) - INITIAL_STACK_SIZE), INITIAL_STACK_SIZE); diff --git a/linuxthreads/mutex.c b/linuxthreads/mutex.c index 74b0922210..d3ef78cfbe 100644 --- a/linuxthreads/mutex.c +++ b/linuxthreads/mutex.c @@ -26,18 +26,18 @@ int __pthread_mutex_init(pthread_mutex_t * mutex, const pthread_mutexattr_t * mutex_attr) { - __pthread_init_lock(&mutex->m_lock); - mutex->m_kind = - mutex_attr == NULL ? PTHREAD_MUTEX_FAST_NP : mutex_attr->mutexkind; - mutex->m_count = 0; - mutex->m_owner = NULL; + __pthread_init_lock(&mutex->__m_lock); + mutex->__m_kind = + mutex_attr == NULL ? PTHREAD_MUTEX_FAST_NP : mutex_attr->__mutexkind; + mutex->__m_count = 0; + mutex->__m_owner = NULL; return 0; } weak_alias (__pthread_mutex_init, pthread_mutex_init) int __pthread_mutex_destroy(pthread_mutex_t * mutex) { - if (mutex->m_lock.status != 0) return EBUSY; + if (mutex->__m_lock.__status != 0) return EBUSY; return 0; } weak_alias (__pthread_mutex_destroy, pthread_mutex_destroy) @@ -47,26 +47,26 @@ int __pthread_mutex_trylock(pthread_mutex_t * mutex) pthread_descr self; int retcode; - switch(mutex->m_kind) { + switch(mutex->__m_kind) { case PTHREAD_MUTEX_FAST_NP: - retcode = __pthread_trylock(&mutex->m_lock); + retcode = __pthread_trylock(&mutex->__m_lock); return retcode; case PTHREAD_MUTEX_RECURSIVE_NP: self = thread_self(); - if (mutex->m_owner == self) { - |
