mirror of
https://github.com/tobast/libunwind-eh_elf.git
synced 2024-12-23 03:53:43 +01:00
Simplify `sos_alloc()' implementation
Instead of maintaining a pointer to the `sos_memory' array, maintain an index that tells the next free position. When atomic operations are available, the allocation boils down to a single fetch-and-add operation.
This commit is contained in:
parent
26fc1563fb
commit
9a3565ddc1
3 changed files with 30 additions and 28 deletions
|
@ -110,11 +110,12 @@ cmpxchg_ptr (void *addr, void *old, void *new)
|
||||||
return AO_compare_and_swap(u.aop, (AO_t) old, (AO_t) new);
|
return AO_compare_and_swap(u.aop, (AO_t) old, (AO_t) new);
|
||||||
}
|
}
|
||||||
# define fetch_and_add1(_ptr) AO_fetch_and_add1(_ptr)
|
# define fetch_and_add1(_ptr) AO_fetch_and_add1(_ptr)
|
||||||
|
# define fetch_and_add(_ptr, value) AO_fetch_and_add(_ptr, value)
|
||||||
/* GCC 3.2.0 on HP-UX crashes on cmpxchg_ptr() */
|
/* GCC 3.2.0 on HP-UX crashes on cmpxchg_ptr() */
|
||||||
# if !(defined(__hpux) && __GNUC__ == 3 && __GNUC_MINOR__ == 2)
|
# if !(defined(__hpux) && __GNUC__ == 3 && __GNUC_MINOR__ == 2)
|
||||||
# define HAVE_CMPXCHG
|
# define HAVE_CMPXCHG
|
||||||
# endif
|
# endif
|
||||||
# define HAVE_FETCH_AND_ADD1
|
# define HAVE_FETCH_AND_ADD
|
||||||
#else
|
#else
|
||||||
# ifdef HAVE_IA64INTRIN_H
|
# ifdef HAVE_IA64INTRIN_H
|
||||||
# include <ia64intrin.h>
|
# include <ia64intrin.h>
|
||||||
|
@ -132,8 +133,9 @@ cmpxchg_ptr (void *addr, void *old, void *new)
|
||||||
return __sync_bool_compare_and_swap(u.vlp, (long) old, (long) new);
|
return __sync_bool_compare_and_swap(u.vlp, (long) old, (long) new);
|
||||||
}
|
}
|
||||||
# define fetch_and_add1(_ptr) __sync_fetch_and_add(_ptr, 1)
|
# define fetch_and_add1(_ptr) __sync_fetch_and_add(_ptr, 1)
|
||||||
|
# define fetch_and_add(_ptr, value) __sync_fetch_and_add(_ptr, value)
|
||||||
# define HAVE_CMPXCHG
|
# define HAVE_CMPXCHG
|
||||||
# define HAVE_FETCH_AND_ADD1
|
# define HAVE_FETCH_AND_ADD
|
||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
#define atomic_read(ptr) (*(ptr))
|
#define atomic_read(ptr) (*(ptr))
|
||||||
|
|
|
@ -50,7 +50,7 @@ unw_flush_cache (unw_addr_space_t as, unw_word_t lo, unw_word_t hi)
|
||||||
unw_flush_cache() is allowed to flush more than the requested
|
unw_flush_cache() is allowed to flush more than the requested
|
||||||
range. */
|
range. */
|
||||||
|
|
||||||
#ifdef HAVE_FETCH_AND_ADD1
|
#ifdef HAVE_FETCH_AND_ADD
|
||||||
fetch_and_add1 (&as->cache_generation);
|
fetch_and_add1 (&as->cache_generation);
|
||||||
#else
|
#else
|
||||||
# warning unw_flush_cache(): need a way to atomically increment an integer.
|
# warning unw_flush_cache(): need a way to atomically increment an integer.
|
||||||
|
|
|
@ -40,48 +40,48 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static char sos_memory[SOS_MEMORY_SIZE] ALIGNED(MAX_ALIGN);
|
static char sos_memory[SOS_MEMORY_SIZE] ALIGNED(MAX_ALIGN);
|
||||||
static char *sos_memp;
|
static size_t sos_memory_freepos;
|
||||||
static size_t pg_size;
|
static size_t pg_size;
|
||||||
|
|
||||||
HIDDEN void *
|
HIDDEN void *
|
||||||
sos_alloc (size_t size)
|
sos_alloc (size_t size)
|
||||||
{
|
{
|
||||||
char *mem;
|
size_t pos;
|
||||||
|
|
||||||
#ifdef HAVE_CMPXCHG
|
|
||||||
char *old_mem;
|
|
||||||
|
|
||||||
size = UNW_ALIGN(size, MAX_ALIGN);
|
size = UNW_ALIGN(size, MAX_ALIGN);
|
||||||
if (!sos_memp)
|
|
||||||
cmpxchg_ptr (&sos_memp, 0, sos_memory);
|
|
||||||
do
|
|
||||||
{
|
|
||||||
old_mem = sos_memp;
|
|
||||||
|
|
||||||
mem = (char *) UNW_ALIGN((unsigned long) old_mem, MAX_ALIGN);
|
#if defined(__GNUC__)
|
||||||
mem += size;
|
/* Assume `sos_memory' is suitably aligned. */
|
||||||
assert (mem < sos_memory + sizeof (sos_memory));
|
assert(((uintptr_t) &sos_memory[0] & (MAX_ALIGN-1)) == 0);
|
||||||
}
|
#endif
|
||||||
while (!cmpxchg_ptr (&sos_memp, old_mem, mem));
|
|
||||||
|
#if defined(__GNUC__) && defined(HAVE_FETCH_AND_ADD)
|
||||||
|
pos = fetch_and_add (&sos_memory_freepos, size);
|
||||||
#else
|
#else
|
||||||
static define_lock (sos_lock);
|
static define_lock (sos_lock);
|
||||||
intrmask_t saved_mask;
|
intrmask_t saved_mask;
|
||||||
|
|
||||||
size = UNW_ALIGN(size, MAX_ALIGN);
|
|
||||||
|
|
||||||
lock_acquire (&sos_lock, saved_mask);
|
lock_acquire (&sos_lock, saved_mask);
|
||||||
{
|
{
|
||||||
if (!sos_memp)
|
# ifndef __GNUC__
|
||||||
sos_memp = sos_memory;
|
/* No assumptions about `sos_memory' alignment. */
|
||||||
|
if (sos_memory_freepos == 0)
|
||||||
mem = (char *) UNW_ALIGN((unsigned long) sos_memp, MAX_ALIGN);
|
{
|
||||||
mem += size;
|
unsigned align = UNW_ALIGN((uintptr_t) &sos_memory[0], MAX_ALIGN)
|
||||||
assert (mem < sos_memory + sizeof (sos_memory));
|
- (uintptr_t) &sos_memory[0];
|
||||||
sos_memp = mem;
|
sos_memory_freepos = align;
|
||||||
|
}
|
||||||
|
# endif
|
||||||
|
pos = sos_memory_freepos;
|
||||||
|
sos_memory_freepos += size;
|
||||||
}
|
}
|
||||||
lock_release (&sos_lock, saved_mask);
|
lock_release (&sos_lock, saved_mask);
|
||||||
#endif
|
#endif
|
||||||
return mem;
|
|
||||||
|
assert (((uintptr_t) &sos_memory[pos] & (MAX_ALIGN-1)) == 0);
|
||||||
|
assert ((pos+size) <= SOS_MEMORY_SIZE);
|
||||||
|
|
||||||
|
return &sos_memory[pos];
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Must be called while holding the mempool lock. */
|
/* Must be called while holding the mempool lock. */
|
||||||
|
|
Loading…
Reference in a new issue