aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Chisnall <theraven@FreeBSD.org>2013-07-10 10:48:22 +0000
committerDavid Chisnall <theraven@FreeBSD.org>2013-07-10 10:48:22 +0000
commitc7b3fb9657cbe3a2b40054e260891cf69adc6ac1 (patch)
treea8e592b91fa2acd8d065da027129fd4b86742219
parentaf04c9e2c0dfdb2172241883c7b9130e63fec0be (diff)
downloadsrc-c7b3fb9657cbe3a2b40054e260891cf69adc6ac1.tar.gz
src-c7b3fb9657cbe3a2b40054e260891cf69adc6ac1.zip
Notes
Notes: svn path=/vendor/libcxxrt/dist/; revision=253145 svn path=/vendor/libcxxrt/2013-07-08-c61efa043b14378efbd69c9a2686d44ed46ae179/; revision=253148; tag=vendor/libcxxrt/2013-07-08-c61efa043b14378efbd69c9a2686d44ed46ae179
-rw-r--r--atomic.h1
-rw-r--r--auxhelper.cc10
-rw-r--r--cxxabi.h2
-rw-r--r--dwarf_eh.h4
-rw-r--r--exception.cc20
-rw-r--r--guard.cc164
-rw-r--r--memory.cc58
7 files changed, 138 insertions, 121 deletions
diff --git a/atomic.h b/atomic.h
index f68faf325826..cfaff3f7d6c1 100644
--- a/atomic.h
+++ b/atomic.h
@@ -27,3 +27,4 @@
#define ATOMIC_LOAD(addr)\
(__sync_synchronize(), *addr)
#endif
+
diff --git a/auxhelper.cc b/auxhelper.cc
index dd34d3de5534..213f8a0890f7 100644
--- a/auxhelper.cc
+++ b/auxhelper.cc
@@ -65,3 +65,13 @@ extern "C" void __cxa_pure_virtual()
abort();
}
+/**
+ * Compilers may (but are not required to) set any deleted-virtual function's
+ * vtable entry to this function. This makes debugging slightly easier, as
+ * users can add a breakpoint on this function to tell if they've accidentally
+ * called a deleted-virtual function.
+ */
+extern "C" void __cxa_deleted_virtual()
+{
+ abort();
+}
diff --git a/cxxabi.h b/cxxabi.h
index e1f5054b9016..4b661b640c14 100644
--- a/cxxabi.h
+++ b/cxxabi.h
@@ -193,6 +193,8 @@ __cxa_eh_globals *__cxa_get_globals(void);
*/
__cxa_eh_globals *__cxa_get_globals_fast(void);
+std::type_info * __cxa_current_exception_type();
+
/**
* Throws an exception returned by __cxa_current_primary_exception(). This
* exception may have been caught in another thread.
diff --git a/dwarf_eh.h b/dwarf_eh.h
index 21557fc1a831..d5e1e284709a 100644
--- a/dwarf_eh.h
+++ b/dwarf_eh.h
@@ -57,6 +57,8 @@ typedef unsigned char *dw_eh_ptr_t;
/// DWARF data encoding types.
enum dwarf_data_encoding
{
+ /// Absolute pointer value
+ DW_EH_PE_absptr = 0x00,
/// Unsigned, little-endian, base 128-encoded (variable length).
DW_EH_PE_uleb128 = 0x01,
/// Unsigned 16-bit integer.
@@ -95,8 +97,6 @@ enum dwarf_data_relative
{
/// Value is omitted
DW_EH_PE_omit = 0xff,
- /// Absolute pointer value
- DW_EH_PE_absptr = 0x00,
/// Value relative to program counter
DW_EH_PE_pcrel = 0x10,
/// Value relative to the text segment
diff --git a/exception.cc b/exception.cc
index 50ee582ae6f5..c1cb243b105a 100644
--- a/exception.cc
+++ b/exception.cc
@@ -39,6 +39,24 @@
#pragma weak pthread_setspecific
#pragma weak pthread_getspecific
#pragma weak pthread_once
+#ifdef LIBCXXRT_WEAK_LOCKS
+#pragma weak pthread_mutex_lock
+#define pthread_mutex_lock(mtx) do {\
+ if (pthread_mutex_lock) pthread_mutex_lock(mtx);\
+ } while(0)
+#pragma weak pthread_mutex_unlock
+#define pthread_mutex_unlock(mtx) do {\
+ if (pthread_mutex_unlock) pthread_mutex_unlock(mtx);\
+ } while(0)
+#pragma weak pthread_cond_signal
+#define pthread_cond_signal(cv) do {\
+ if (pthread_cond_signal) pthread_cond_signal(cv);\
+ } while(0)
+#pragma weak pthread_cond_wait
+#define pthread_cond_wait(cv, mtx) do {\
+ if (pthread_cond_wait) pthread_cond_wait(cv, mtx);\
+ } while(0)
+#endif
using namespace ABI_NAMESPACE;
@@ -214,8 +232,6 @@ namespace std
}
-extern "C" std::type_info *__cxa_current_exception_type();
-
/**
* Class of exceptions to distinguish between this and other exception types.
*
diff --git a/guard.cc b/guard.cc
index b14a13b84291..f0c26ab9139f 100644
--- a/guard.cc
+++ b/guard.cc
@@ -41,124 +41,112 @@
* initialised.
*/
#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
#include <pthread.h>
#include <assert.h>
+#include "atomic.h"
-#ifdef __arm__
-// ARM ABI - 32-bit guards.
+// Older GCC doesn't define __LITTLE_ENDIAN__
+#ifndef __LITTLE_ENDIAN__
+ // If __BYTE_ORDER__ is defined, use that instead
+# ifdef __BYTE_ORDER__
+# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+# define __LITTLE_ENDIAN__
+# endif
+ // x86 and ARM are the most common little-endian CPUs, so let's have a
+ // special case for them (ARM is already special cased). Assume everything
+ // else is big endian.
+# elif defined(__x86_64) || defined(__i386)
+# define __LITTLE_ENDIAN__
+# endif
+#endif
-/**
- * Acquires a lock on a guard, returning 0 if the object has already been
- * initialised, and 1 if it has not. If the object is already constructed then
- * this function just needs to read a byte from memory and return.
- */
-extern "C" int __cxa_guard_acquire(volatile int32_t *guard_object)
-{
- if ((1<<31) == *guard_object) { return 0; }
- // If we can atomically move the value from 0 -> 1, then this is
- // uninitialised.
- if (__sync_bool_compare_and_swap(guard_object, 0, 1))
- {
- return 1;
- }
- // If the value is not 0, some other thread was initialising this. Spin
- // until it's finished.
- while (__sync_bool_compare_and_swap(guard_object, (1<<31), (1<<31)))
- {
- // If the other thread aborted, then we grab the lock
- if (__sync_bool_compare_and_swap(guard_object, 0, 1))
- {
- return 1;
- }
- sched_yield();
- }
- return 0;
-}
-/**
- * Releases the lock without marking the object as initialised. This function
- * is called if initialising a static causes an exception to be thrown.
+/*
+ * The least significant bit of the guard variable indicates that the object
+ * has been initialised, the most significant bit is used for a spinlock.
*/
-extern "C" void __cxa_guard_abort(int32_t *guard_object)
-{
- assert(__sync_bool_compare_and_swap(guard_object, 1, 0));
-}
-/**
- * Releases the guard and marks the object as initialised. This function is
- * called after successful initialisation of a static.
- */
-extern "C" void __cxa_guard_release(int32_t *guard_object)
-{
- assert(__sync_bool_compare_and_swap(guard_object, 1, (1<<31)));
-}
-
-
+#ifdef __arm__
+// ARM ABI - 32-bit guards.
+typedef uint32_t guard_t;
+static const uint32_t LOCKED = ((guard_t)1) << 31;
+static const uint32_t INITIALISED = 1;
#else
-// Itanium ABI: 64-bit guards
-
-/**
- * Returns a pointer to the low 32 bits in a 64-bit value, respecting the
- * platform's byte order.
- */
-static int32_t *low_32_bits(volatile int64_t *ptr)
-{
- int32_t *low= (int32_t*)ptr;
- // Test if the machine is big endian - constant propagation at compile time
- // should eliminate this completely.
- int one = 1;
- if (*(char*)&one != 1)
- {
- low++;
- }
- return low;
-}
+typedef uint64_t guard_t;
+# if defined(__LITTLE_ENDIAN__)
+static const guard_t LOCKED = ((guard_t)1) << 63;
+static const guard_t INITIALISED = 1;
+# else
+static const guard_t LOCKED = 1;
+static const guard_t INITIALISED = ((guard_t)1) << 56;
+# endif
+#endif
/**
* Acquires a lock on a guard, returning 0 if the object has already been
* initialised, and 1 if it has not. If the object is already constructed then
* this function just needs to read a byte from memory and return.
*/
-extern "C" int __cxa_guard_acquire(volatile int64_t *guard_object)
+extern "C" int __cxa_guard_acquire(volatile guard_t *guard_object)
{
- char first_byte = (*guard_object) >> 56;
- if (1 == first_byte) { return 0; }
- int32_t *lock = low_32_bits(guard_object);
- // Simple spin lock using the low 32 bits. We assume that concurrent
- // attempts to initialize statics are very rare, so we don't need to
- // optimise for the case where we have lots of threads trying to acquire
- // the lock at the same time.
- while (!__sync_bool_compare_and_swap_4(lock, 0, 1))
+ // Not an atomic read, doesn't establish a happens-before relationship, but
+ // if one is already established and we end up seeing an initialised state
+ // then it's a fast path, otherwise we'll do something more expensive than
+ // this test anyway...
+ if ((INITIALISED == *guard_object)) { return 0; }
+ // Spin trying to do the initialisation
+ while (1)
{
- if (1 == ((*guard_object) >> 56))
+ // Loop trying to move the value of the guard from 0 (not
+ // locked, not initialised) to the locked-uninitialised
+ // position.
+ switch (__sync_val_compare_and_swap(guard_object, 0, LOCKED))
{
- break;
+ // If the old value was 0, we succeeded, so continue
+ // initialising
+ case 0:
+ return 1;
+ // If this was already initialised, return and let the caller skip
+ // initialising it again.
+ case INITIALISED:
+ return 0;
+ // If it is locked by another thread, relinquish the CPU and try
+ // again later.
+ case LOCKED:
+ case LOCKED | INITIALISED:
+ sched_yield();
+ break;
+ // If it is some other value, then something has gone badly wrong.
+ // Give up.
+ default:
+ fprintf(stderr, "Invalid state detected attempting to lock static initialiser.\n");
+ abort();
}
- sched_yield();
}
- // We have to test the guard again, in case another thread has performed
- // the initialisation while we were trying to acquire the lock.
- first_byte = (*guard_object) >> 56;
- return (1 != first_byte);
+ //__builtin_unreachable();
+ return 0;
}
/**
* Releases the lock without marking the object as initialised. This function
* is called if initialising a static causes an exception to be thrown.
*/
-extern "C" void __cxa_guard_abort(int64_t *guard_object)
+extern "C" void __cxa_guard_abort(volatile guard_t *guard_object)
{
- int32_t *lock = low_32_bits(guard_object);
- *lock = 0;
+ __attribute__((unused))
+ bool reset = __sync_bool_compare_and_swap(guard_object, LOCKED, 0);
+ assert(reset);
}
/**
* Releases the guard and marks the object as initialised. This function is
* called after successful initialisation of a static.
*/
-extern "C" void __cxa_guard_release(int64_t *guard_object)
+extern "C" void __cxa_guard_release(volatile guard_t *guard_object)
{
- // Set the first byte to 1
- *guard_object |= ((int64_t)1) << 56;
- __cxa_guard_abort(guard_object);
+ __attribute__((unused))
+ bool reset = __sync_bool_compare_and_swap(guard_object, LOCKED, INITIALISED);
+ assert(reset);
}
-#endif
+
diff --git a/memory.cc b/memory.cc
index cc879e0d0d06..c8d28fc87e5a 100644
--- a/memory.cc
+++ b/memory.cc
@@ -99,40 +99,21 @@ void* operator new(size_t size)
__attribute__((weak))
void* operator new(size_t size, const std::nothrow_t &) throw()
{
- if (0 == size)
- {
- size = 1;
- }
- void *mem = malloc(size);
- while (0 == mem)
- {
- new_handler h = std::get_new_handler();
- if (0 != h)
- {
- try
- {
- h();
- }
- catch (...)
- {
- // nothrow operator new should return NULL in case of
- // std::bad_alloc exception in new handler
- return NULL;
- }
- }
- else
- {
- return NULL;
- }
- mem = malloc(size);
+ try {
+ return :: operator new(size);
+ } catch (...) {
+ // nothrow operator new should return NULL in case of
+ // std::bad_alloc exception in new handler
+ return NULL;
}
-
- return mem;
}
__attribute__((weak))
void operator delete(void * ptr)
+#if __cplusplus < 201000L
+throw()
+#endif
{
free(ptr);
}
@@ -140,13 +121,32 @@ void operator delete(void * ptr)
__attribute__((weak))
void * operator new[](size_t size)
+#if __cplusplus < 201000L
+throw(std::bad_alloc)
+#endif
{
return ::operator new(size);
}
__attribute__((weak))
-void operator delete[](void * ptr) throw()
+void * operator new[](size_t size, const std::nothrow_t &) throw()
+{
+ try {
+ return ::operator new[](size);
+ } catch (...) {
+ // nothrow operator new should return NULL in case of
+ // std::bad_alloc exception in new handler
+ return NULL;
+ }
+}
+
+
+__attribute__((weak))
+void operator delete[](void * ptr)
+#if __cplusplus < 201000L
+throw()
+#endif
{
::operator delete(ptr);
}