diff options
| -rw-r--r-- | ChangeLog | 15 | ||||
| -rw-r--r-- | elf/dl-close.c | 4 | ||||
| -rw-r--r-- | elf/dl-fptr.c | 4 | ||||
| -rw-r--r-- | elf/dl-open.c | 4 | ||||
| -rw-r--r-- | elf/dl-profile.c | 14 | ||||
| -rw-r--r-- | elf/dl-runtime.c | 8 | ||||
| -rw-r--r-- | elf/dl-sym.c | 4 | ||||
| -rw-r--r-- | gmon/mcount.c | 4 | ||||
| -rw-r--r-- | include/atomic.h | 139 | ||||
| -rw-r--r-- | malloc/memusage.c | 116 | ||||
| -rw-r--r-- | nptl/ChangeLog | 5 | ||||
| -rw-r--r-- | nptl/sysdeps/unix/sysv/linux/rtld-lowlevel.h | 18 | ||||
| -rw-r--r-- | resolv/res_libc.c | 2 | ||||
| -rw-r--r-- | stdlib/cxa_finalize.c | 4 | ||||
| -rw-r--r-- | sysdeps/x86_64/bits/atomic.h | 222 |
15 files changed, 415 insertions, 148 deletions
@@ -1,3 +1,18 @@ +2006-10-11 Ulrich Drepper <drepper@redhat.com> + + * include/atomic.c: Define catomic_* operations. + * sysdeps/x86_64/bits/atomic.h: Likewise. Fix a few minor problems. + * stdlib/cxa_finalize.c: Use catomic_* operations instead of atomic_*. + * malloc/memusage.c: Likewise. + * gmon/mcount.c: Likewise. + * elf/dl-close.c: Likewise. + * elf/dl-open.c: Likewise. + * elf/dl-profile.c: Likewise. + * elf/dl-sym.c: Likewise. + * elf/dl-runtime.c: Likewise. + * elf/dl-fptr.c: Likewise. + * resolv/res_libc.c: Likewise. + 2006-10-10 Ulrich Drepper <drepper@redhat.com> * nis/nis_subr.c (nis_getnames): Add trailing dot to NIS_PATH diff --git a/elf/dl-close.c b/elf/dl-close.c index 2e7c506a3d..84e57e09d0 100644 --- a/elf/dl-close.c +++ b/elf/dl-close.c @@ -423,11 +423,11 @@ _dl_close (void *_map) imap->l_scoperec = newp; __rtld_mrlock_done (imap->l_scoperec_lock); - if (atomic_increment_val (&old->nusers) != 1) + if (catomic_increment_val (&old->nusers) != 1) { old->remove_after_use = true; old->notify = true; - if (atomic_decrement_val (&old->nusers) != 0) + if (catomic_decrement_val (&old->nusers) != 0) __rtld_waitzero (old->nusers); } diff --git a/elf/dl-fptr.c b/elf/dl-fptr.c index 78beecfdcb..e068124d6f 100644 --- a/elf/dl-fptr.c +++ b/elf/dl-fptr.c @@ -1,5 +1,5 @@ /* Manage function descriptors. Generic version. - Copyright (C) 1999,2000,2001,2002,2003,2004 Free Software Foundation, Inc. + Copyright (C) 1999-2004, 2006 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -40,7 +40,7 @@ #ifndef COMPARE_AND_SWAP # define COMPARE_AND_SWAP(ptr, old, new) \ - (atomic_compare_and_exchange_bool_acq (ptr, new, old) == 0) + (catomic_compare_and_exchange_bool_acq (ptr, new, old) == 0) #endif ElfW(Addr) _dl_boot_fptr_table [ELF_MACHINE_BOOT_FPTR_TABLE_LEN]; diff --git a/elf/dl-open.c b/elf/dl-open.c index 5c90e06708..35712b5ac0 100644 --- a/elf/dl-open.c +++ b/elf/dl-open.c @@ -429,9 +429,9 @@ dl_open_worker (void *a) imap->l_scoperec = newp; __rtld_mrlock_done (imap->l_scoperec_lock); - atomic_increment (&old->nusers); + catomic_increment (&old->nusers); old->remove_after_use = true; - if (atomic_decrement_val (&old->nusers) == 0) + if (catomic_decrement_val (&old->nusers) == 0) /* No user, we can free it here and now. */ free (old); } diff --git a/elf/dl-profile.c b/elf/dl-profile.c index 41214c1b08..47033f32ef 100644 --- a/elf/dl-profile.c +++ b/elf/dl-profile.c @@ -1,5 +1,5 @@ /* Profiling of shared libraries. - Copyright (C) 1997-2002, 2003, 2004 Free Software Foundation, Inc. + Copyright (C) 1997-2002, 2003, 2004, 2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997. Based on the BSD mcount implementation. @@ -509,24 +509,24 @@ _dl_mcount (ElfW(Addr) frompc, ElfW(Addr) selfpc) size_t newfromidx; to_index = (data[narcs].self_pc / (HASHFRACTION * sizeof (*tos))); - newfromidx = atomic_exchange_and_add (&fromidx, 1) + 1; + newfromidx = catomic_exchange_and_add (&fromidx, 1) + 1; froms[newfromidx].here = &data[narcs]; froms[newfromidx].link = tos[to_index]; tos[to_index] = newfromidx; - atomic_increment (&narcs); + catomic_increment (&narcs); } /* If we still have no entry stop searching and insert. */ if (*topcindex == 0) { - uint_fast32_t newarc = atomic_exchange_and_add (narcsp, 1); + uint_fast32_t newarc = catomic_exchange_and_add (narcsp, 1); /* In rare cases it could happen that all entries in FROMS are occupied. So we cannot count this anymore. */ if (newarc >= fromlimit) goto done; - *topcindex = atomic_exchange_and_add (&fromidx, 1) + 1; + *topcindex = catomic_exchange_and_add (&fromidx, 1) + 1; fromp = &froms[*topcindex]; fromp->here = &data[newarc]; @@ -534,7 +534,7 @@ _dl_mcount (ElfW(Addr) frompc, ElfW(Addr) selfpc) data[newarc].self_pc = selfpc; data[newarc].count = 0; fromp->link = 0; - atomic_increment (&narcs); + catomic_increment (&narcs); break; } @@ -547,7 +547,7 @@ _dl_mcount (ElfW(Addr) frompc, ElfW(Addr) selfpc) } /* Increment the counter. */ - atomic_increment (&fromp->here->count); + catomic_increment (&fromp->here->count); done: ; diff --git a/elf/dl-runtime.c b/elf/dl-runtime.c index 83d565ac71..05fd974bf5 100644 --- a/elf/dl-runtime.c +++ b/elf/dl-runtime.c @@ -97,7 +97,7 @@ _dl_fixup ( { __rtld_mrlock_lock (l->l_scoperec_lock); scoperec = l->l_scoperec; - atomic_increment (&scoperec->nusers); + catomic_increment (&scoperec->nusers); __rtld_mrlock_unlock (l->l_scoperec_lock); } @@ -107,7 +107,7 @@ _dl_fixup ( DL_LOOKUP_ADD_DEPENDENCY, NULL); if (l->l_type == lt_loaded - && atomic_decrement_val (&scoperec->nusers) == 0 + && catomic_decrement_val (&scoperec->nusers) == 0 && __builtin_expect (scoperec->remove_after_use, 0)) { if (scoperec->notify) @@ -199,7 +199,7 @@ _dl_profile_fixup ( { __rtld_mrlock_lock (l->l_scoperec_lock); scoperec = l->l_scoperec; - atomic_increment (&scoperec->nusers); + catomic_increment (&scoperec->nusers); __rtld_mrlock_unlock (l->l_scoperec_lock); } @@ -209,7 +209,7 @@ _dl_profile_fixup ( DL_LOOKUP_ADD_DEPENDENCY, NULL); if (l->l_type == lt_loaded - && atomic_decrement_val (&scoperec->nusers) == 0 + && catomic_decrement_val (&scoperec->nusers) == 0 && __builtin_expect (scoperec->remove_after_use, 0)) { if (scoperec->notify) diff --git a/elf/dl-sym.c b/elf/dl-sym.c index 1c66310d7c..43933466b4 100644 --- a/elf/dl-sym.c +++ b/elf/dl-sym.c @@ -124,7 +124,7 @@ do_sym (void *handle, const char *name, void *who, { __rtld_mrlock_lock (match->l_scoperec_lock); struct r_scoperec *scoperec = match->l_scoperec; - atomic_increment (&scoperec->nusers); + catomic_increment (&scoperec->nusers); __rtld_mrlock_unlock (match->l_scoperec_lock); struct call_dl_lookup_args args; @@ -141,7 +141,7 @@ do_sym (void *handle, const char *name, void *who, int err = GLRO(dl_catch_error) (&objname, &errstring, &malloced, call_dl_lookup, &args); - if (atomic_decrement_val (&scoperec->nusers) == 0 + if (catomic_decrement_val (&scoperec->nusers) == 0 && __builtin_expect (scoperec->remove_after_use, 0)) { if (scoperec->notify) diff --git a/gmon/mcount.c b/gmon/mcount.c index 32a5f1ea0f..5a4a2499d4 100644 --- a/gmon/mcount.c +++ b/gmon/mcount.c @@ -69,8 +69,8 @@ _MCOUNT_DECL(frompc, selfpc) /* _mcount; may be static, inline, etc */ * check that we are profiling * and that we aren't recursively invoked. */ - if (atomic_compare_and_exchange_bool_acq (&p->state, GMON_PROF_BUSY, - GMON_PROF_ON)) + if (catomic_compare_and_exchange_bool_acq (&p->state, GMON_PROF_BUSY, + GMON_PROF_ON)) return; /* diff --git a/include/atomic.h b/include/atomic.h index bd2e2f13f7..340c6e6bfb 100644 --- a/include/atomic.h +++ b/include/atomic.h @@ -21,6 +21,26 @@ #ifndef _ATOMIC_H #define _ATOMIC_H 1 +/* This header defines three types of macros: + + - atomic arithmetic and logic operation on memory. They all + have the prefix "atomic_". + + - conditionally atomic operations of the same kinds. These + always behave identical but can be faster when atomicity + is not really needed since only one thread has access to + the memory location. In that case the code is slower in + the multi-thread case. The interfaces have the prefix + "catomic_". + + - support functions like barriers. They also have the preifx + "atomic_". + + Architectures must provide a few lowlevel macros (the compare + and exchange definitions). All others are optional. They + should only be provided if the architecture has specific + support for the operation. */ + #include <stdlib.h> #include <bits/atomic.h> @@ -70,12 +90,29 @@ #endif +#if !defined catomic_compare_and_exchange_val_acq \ + && defined __arch_c_compare_and_exchange_val_32_acq +# define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \ + __atomic_val_bysize (__arch_c_compare_and_exchange_val,acq, \ + mem, newval, oldval) +#else +# define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \ + atomic_compare_and_exchange_val_acq (mem, newval, oldval) +#endif + + #ifndef atomic_compare_and_exchange_val_rel # define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \ atomic_compare_and_exchange_val_acq (mem, newval, oldval) #endif +#ifndef catomic_compare_and_exchange_val_rel +# define catomic_compare_and_exchange_val_rel(mem, newval, oldval) \ + atomic_compare_and_exchange_val_acq (mem, newval, oldval) +#endif + + /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL. Return zero if *MEM was changed or non-zero if no exchange happened. */ #ifndef atomic_compare_and_exchange_bool_acq @@ -94,12 +131,34 @@ #endif +#ifndef catomic_compare_and_exchange_bool_acq +# ifdef __arch_c_compare_and_exchange_bool_32_acq +# define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ + __atomic_bool_bysize (__arch_c_compare_and_exchange_bool,acq, \ + mem, newval, oldval) +# else +# define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ + ({ /* Cannot use __oldval here, because macros later in this file might \ + call this macro with __oldval argument. */ \ + __typeof (oldval) __old = (oldval); \ + catomic_compare_and_exchange_val_acq (mem, newval, __old) != __old; \ + }) +# endif +#endif + + #ifndef atomic_compare_and_exchange_bool_rel # define atomic_compare_and_exchange_bool_rel(mem, newval, oldval) \ atomic_compare_and_exchange_bool_acq (mem, newval, oldval) #endif +#ifndef catomic_compare_and_exchange_bool_rel +# define catomic_compare_and_exchange_bool_rel(mem, newval, oldval) \ + catomic_compare_and_exchange_bool_acq (mem, newval, oldval) +#endif + + /* Store NEWVALUE in *MEM and return the old value. */ #ifndef atomic_exchange_acq # define atomic_exchange_acq(mem, newvalue) \ @@ -141,6 +200,23 @@ #endif +#ifndef catomic_exchange_and_add +# define catomic_exchange_and_add(mem, value) \ + ({ __typeof (*(mem)) __oldv; \ + __typeof (mem) __memp = (mem); \ + __typeof (*(mem)) __value = (value); \ + \ + do \ + __oldv = *__memp; \ + while (__builtin_expect (catomic_compare_and_exchange_bool_acq (__memp, \ + __oldv \ + + __value,\ + __oldv), \ + 0)); \ + \ + __oldv; }) +#endif + #ifndef atomic_max # define atomic_max(mem, value) \ @@ -159,6 +235,25 @@ } while (0) #endif + +#ifndef catomic_max +# define catomic_max(mem, value) \ + do { \ + __typeof (*(mem)) __oldv; \ + __typeof (mem) __memp = (mem); \ + __typeof (*(mem)) __value = (value); \ + do { \ + __oldv = *__memp; \ + if (__oldv >= __value) \ + break; \ + } while (__builtin_expect (catomic_compare_and_exchange_bool_acq (__memp, \ + __value,\ + __oldv),\ + 0)); \ + } while (0) +#endif + + #ifndef atomic_min # define atomic_min(mem, value) \ do { \ @@ -176,21 +271,38 @@ } while (0) #endif + #ifndef atomic_add # define atomic_add(mem, value) (void) atomic_exchange_and_add ((mem), (value)) #endif +#ifndef catomic_add +# define catomic_add(mem, value) \ + (void) catomic_exchange_and_add ((mem), (value)) +#endif + + #ifndef atomic_increment # define atomic_increment(mem) atomic_add ((mem), 1) #endif +#ifndef catomic_increment +# define catomic_increment(mem) catomic_add ((mem), 1) +#endif + + #ifndef atomic_increment_val # define atomic_increment_val(mem) (atomic_exchange_and_add ((mem), 1) + 1) #endif +#ifndef catomic_increment_val +# define catomic_increment_val(mem) (catomic_exchange_and_add ((mem), 1) + 1) +#endif + + /* Add one to *MEM and return true iff it's now zero. */ #ifndef atomic_increment_and_test # define atomic_increment_and_test(mem) \ @@ -203,11 +315,21 @@ #endif +#ifndef catomic_decrement +# define catomic_decrement(mem) catomic_add ((mem), -1) +#endif + + #ifndef atomic_decrement_val # define atomic_decrement_val(mem) (atomic_exchange_and_add ((mem), -1) - 1) #endif +#ifndef catomic_decrement_val +# define catomic_decrement_val(mem) (catomic_exchange_and_add ((mem), -1) - 1) +#endif + + /* Subtract 1 from *MEM and return true iff it's now zero. */ #ifndef atomic_decrement_and_test # define atomic_decrement_and_test(mem) \ @@ -327,6 +449,23 @@ } while (0) #endif +#ifndef catomic_or +# define catomic_or(mem, mask) \ + do { \ + __typeof (*(mem)) __oldval; \ + __typeof (mem) __memp = (mem); \ + __typeof (*(mem)) __mask = (mask); \ + \ + do \ + __oldval = (*__memp); \ + while (__builtin_expect (catomic_compare_and_exchange_bool_acq (__memp, \ + __oldval \ + | __mask, \ + __oldval),\ + 0)); \ + } while (0) +#endif + /* Atomically *mem |= mask and return the old value of *mem. */ #ifndef atomic_or_val # define atomic_or_val(mem, mask) \ diff --git a/malloc/memusage.c b/malloc/memusage.c index 8b37c43a8a..9003d8094a 100644 --- a/malloc/memusage.c +++ b/malloc/memusage.c @@ -1,5 +1,5 @@ /* Profile heap and stack memory usage of running program. - Copyright (C) 1998-2002, 2004, 2005 Free Software Foundation, Inc. + Copyright (C) 1998-2002, 2004, 2005, 2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998. @@ -128,8 +128,8 @@ update_data (struct header *result, size_t len, size_t old_len) /* Compute current heap usage and compare it with the maximum value. */ memusage_size_t heap - = atomic_exchange_and_add (¤t_heap, len - old_len) + len - old_len; - atomic_max (&peak_heap, heap); + = catomic_exchange_and_add (¤t_heap, len - old_len) + len - old_len; + catomic_max (&peak_heap, heap); /* Compute current stack usage and compare it with the maximum value. The base stack pointer might not be set if this is not @@ -152,15 +152,15 @@ update_data (struct header *result, size_t len, size_t old_len) start_sp = sp; size_t current_stack = start_sp - sp; #endif - atomic_max (&peak_stack, current_stack); + catomic_max (&peak_stack, current_stack); /* Add up heap and stack usage and compare it with the maximum value. */ - atomic_max (&peak_total, heap + current_stack); + catomic_max (&peak_total, heap + current_stack); /* Store the value only if we are writing to a file. */ if (fd != -1) { - uatomic32_t idx = atomic_exchange_and_add (&buffer_cnt, 1); + uatomic32_t idx = catomic_exchange_and_add (&buffer_cnt, 1); if (idx >= 2 * buffer_size) { /* We try to reset the counter to the correct range. If @@ -168,7 +168,7 @@ update_data (struct header *result, size_t len, size_t old_len) counter it does not matter since that thread will take care of the correction. */ unsigned int reset = idx - 2 * buffer_size; - atomic_compare_and_exchange_val_acq (&buffer_size, reset, idx); + catomic_compare_and_exchange_val_acq (&buffer_size, reset, idx); idx = reset; } @@ -337,24 +337,24 @@ malloc (size_t len) return (*mallocp) (len); /* Keep track of number of calls. */ - atomic_increment (&calls[idx_malloc]); + catomic_increment (&calls[idx_malloc]); /* Keep track of total memory consumption for `malloc'. */ - atomic_add (&total[idx_malloc], len); + catomic_add (&total[idx_malloc], len); /* Keep track of total memory requirement. */ - atomic_add (&grand_total, len); + catomic_add (&grand_total, len); /* Remember the size of the request. */ if (len < 65536) - atomic_increment (&histogram[len / 16]); + catomic_increment (&histogram[len / 16]); else - atomic_increment (&large); + catomic_increment (&large); /* Total number of calls of any of the functions. */ - atomic_increment (&calls_total); + catomic_increment (&calls_total); /* Do the real work. */ result = (struct header *) (*mallocp) (len + sizeof (struct header)); if (result == NULL) { - atomic_increment (&failed[idx_malloc]); + catomic_increment (&failed[idx_malloc]); return NULL; } @@ -403,36 +403,36 @@ realloc (void *old, size_t len) } /* Keep track of number of calls. */ - atomic_increment (&calls[idx_realloc]); + catomic_increment (&calls[idx_realloc]); if (len > old_len) { /* Keep track of total memory consumption for `realloc'. */ - atomic_add (&total[idx_realloc], len - old_len); + catomic_add (&total[idx_realloc], len - old_len); /* Keep track of total memory requirement. */ - atomic_add (&grand_total, len - old_len); + catomic_add (&grand_total, len - old_len); } /* Remember the size of the request. */ if (len < 65536) - atomic_increment (&histogram[len / 16]); + catomic_increment (&histogram[len / 16]); else - atomic_increment (&large); + catomic_increment (&large); /* Total number of calls of any of the functions. */ - atomic_increment (&calls_total); + catomic_increment (&calls_total); /* Do the real work. */ result = (struct header *) (*reallocp) (real, len + sizeof (struct header)); if (result == NULL) { - atomic_increment (&failed[idx_realloc]); + catomic_increment (&failed[idx_realloc]); return NULL; } /* Record whether the reduction/increase happened in place. */ if (real == result) - atomic_increment (&inplace); + catomic_increment (&inplace); /* Was the buffer increased? */ if (old_len > len) - atomic_increment (&decreasing); + catomic_increment (&decreasing); /* Update the allocation data and write out the records if necessary. */ update_data (result, len, old_len); @@ -463,16 +463,16 @@ calloc (size_t n, size_t len) return (*callocp) (n, len); /* Keep track of number of calls. */ - atomic_increment (&calls[idx_calloc]); + catomic_increment (&calls[idx_calloc]); /* Keep track of total memory consumption for `calloc'. */ - atomic_add (&total[idx_calloc], size); + catomic_add (&total[idx_calloc], size); /* Keep track of total memory requirement. */ - atomic_add (&grand_total, size); + catomic_add (&grand_total, size); /* Remember the size of the request. */ if (size < 65536) - atomic_increment (&histogram[size / 16]); + catomic_increment (&histogram[size / 16]); else - atomic_increment (&large); + catomic_increment (&large); /* Total number of calls of any of the functions. */ ++calls_total; @@ -480,7 +480,7 @@ calloc (size_t n, size_t len) result = (struct header *) (*mallocp) (size + sizeof (struct header)); if (result == NULL) { - atomic_increment (&failed[idx_calloc]); + catomic_increment (&failed[idx_calloc]); return NULL; } @@ -517,7 +517,7 @@ free (void *ptr) /* `free (NULL)' has no effect. */ if (ptr == NULL) { - atomic_increment (&calls[idx_free]); + catomic_increment (&calls[idx_free]); return; } @@ -531,9 +531,9 |
