Main Page   Reference Manual   Namespace List   Compound List   Namespace Members   Compound Members   File Members  

libcwd/private_threading.h

Go to the documentation of this file.
00001 // $Header: /cvsroot/libcwd/libcwd/include/libcwd/private_threading.h,v 1.15 2004/07/14 00:29:35 libcw Exp $
00002 //
00003 // Copyright (C) 2001 - 2004, by
00004 //
00005 // Carlo Wood, Run on IRC <carlo@alinoe.com>
00006 // RSA-1024 0x624ACAD5 1997-01-26                    Sign & Encrypt
00007 // Fingerprint16 = 32 EC A7 B6 AC DB 65 A6  F6 F6 55 DD 1C DC FF 61
00008 //
00009 // This file may be distributed under the terms of the Q Public License
00010 // version 1.0 as appearing in the file LICENSE.QPL included in the
00011 // packaging of this file.
00012 // 
00013 
00018 #ifndef LIBCWD_PRIVATE_THREADING_H
00019 #define LIBCWD_PRIVATE_THREADING_H
00020 
00021 #define LIBCWD_DEBUGDEBUGRWLOCK 0
00022 
00023 #if LIBCWD_DEBUGDEBUGRWLOCK
00024 #define LIBCWD_NO_INTERNAL_STRING
00025 #include <raw_write.h>
00026 #undef LIBCWD_NO_INTERNAL_STRING
00027 extern pthread_mutex_t LIBCWD_DEBUGDEBUGLOCK_CERR_mutex;
00028 extern unsigned int LIBCWD_DEBUGDEBUGLOCK_CERR_count;
00029 #define LIBCWD_DEBUGDEBUGRWLOCK_CERR(x) \
00030         do { \
00031           pthread_mutex_lock(&LIBCWD_DEBUGDEBUGLOCK_CERR_mutex); \
00032           FATALDEBUGDEBUG_CERR(x); \
00033           pthread_mutex_unlock(&LIBCWD_DEBUGDEBUGLOCK_CERR_mutex); \
00034         } while(0)
00035 #define LIBCWD_DEBUGDEBUGLOCK_CERR(x) \
00036         do { \
00037           if (instance != static_tsd_instance) \
00038           { \
00039             pthread_mutex_lock(&LIBCWD_DEBUGDEBUGLOCK_CERR_mutex); \
00040             ++LIBCWD_DEBUGDEBUGLOCK_CERR_count; \
00041             FATALDEBUGDEBUG_CERR("[" << LIBCWD_DEBUGDEBUGLOCK_CERR_count << "] " << pthread_self() << ": " << x); \
00042             pthread_mutex_unlock(&LIBCWD_DEBUGDEBUGLOCK_CERR_mutex); \
00043           } \
00044         } while(0)
00045 #else // !LIBCWD_DEBUGDEBUGRWLOCK
00046 #define LIBCWD_DEBUGDEBUGRWLOCK_CERR(x) do { } while(0)
00047 #define LIBCWD_DEBUGDEBUGLOCK_CERR(x) do { } while(0)
00048 #endif // !LIBCWD_DEBUGDEBUGRWLOCK
00049 
00050 #ifndef LIBCWD_PRIVATE_SET_ALLOC_CHECKING_H
00051 #include <libcwd/private_set_alloc_checking.h>
00052 #endif
00053 #ifndef LIBCWD_PRIVATE_STRUCT_TSD_H
00054 #include <libcwd/private_struct_TSD.h>
00055 #endif
00056 #ifndef LIBCWD_PRIVATE_MUTEX_INSTANCES_H
00057 #include <libcwd/private_mutex_instances.h>
00058 #endif
00059 #ifndef LIBCWD_CORE_DUMP_H
00060 #include <libcwd/core_dump.h>
00061 #endif
00062 #ifndef LIBCW_CSTRING
00063 #define LIBCW_CSTRING
00064 #include <cstring>                      // Needed for std::memset and std::memcpy.
00065 #endif
00066 
00067 #ifdef LIBCWD_HAVE_PTHREAD
00068 #ifdef __linux
00069 #ifndef _GNU_SOURCE
00070 #error "You need to use define _GNU_SOURCE in order to make use of the extensions of Linux Threads."
00071 #endif
00072 #endif
00073 #ifndef LIBCW_PTHREAD_H
00074 #define LIBCW_PTHREAD_H
00075 #include <pthread.h>
00076 #endif
00077 #if defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP) && defined(PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP)
00078 #define LIBCWD_USE_LINUXTHREADS 1
00079 #else
00080 #define LIBCWD_USE_POSIX_THREADS 1
00081 #endif
00082 #else
00083 #if LIBCWD_THREAD_SAFE
00084 #error Fatal error: thread support was not detected during configuration of libcwd (did you use --disable-threading?)! How come you are trying to compile a threaded program now?
00085 #endif
00086 #endif // LIBCWD_HAVE_PTHREAD
00087 
00088 #ifndef LIBCWD_USE_LINUXTHREADS
00089 #define LIBCWD_USE_LINUXTHREADS 0
00090 #endif
00091 #ifndef LIBCWD_USE_POSIX_THREADS
00092 #define LIBCWD_USE_POSIX_THREADS 0
00093 #endif
00094 
00095 #if CWDEBUG_DEBUGT
00096 #define LibcwDebugThreads(x) do { x; } while(0)
00097 #else
00098 #define LibcwDebugThreads(x) do { } while(0)
00099 #endif
00100 
00101 #if CWDEBUG_DEBUGT || CWDEBUG_DEBUG
00102 #ifndef LIBCWD_PRIVATE_ASSERT_H
00103 #include <libcwd/private_assert.h>
00104 #endif
00105 #endif
00106 
00107 #if LIBCWD_THREAD_SAFE
00108 
00109 namespace libcwd {
00110 
00111 #if LIBCWD_DEBUGDEBUGRWLOCK
00112 inline
00113 _private_::raw_write_nt const&
00114 operator<<(_private_::raw_write_nt const& raw_write, pthread_mutex_t const& mutex)
00115 {
00116   raw_write << "(pthread_mutex_t&)" << (void*)&mutex <<
00117     " = { __m_reserved = " << mutex.__m_reserved <<
00118     ", __m_count = " << mutex.__m_count <<
00119     ", __m_owner = " << (void*)mutex.__m_owner <<
00120     ", __m_kind = " << mutex.__m_kind <<
00121     ", __m_lock = { __status = " << mutex.__m_lock.__status <<
00122                  ", __spinlock = " << mutex.__m_lock.__spinlock << " } }";
00123   return raw_write;
00124 }
00125 #endif
00126 
00127   namespace _private_ {
00128 
00129 extern void initialize_global_mutexes(void);
00130 extern bool WST_multi_threaded;
00131 
00132 #if CWDEBUG_DEBUGT
00133 extern void test_for_deadlock(int, struct TSD_st&, void const*);
00134 inline void test_for_deadlock(void const* ptr, struct TSD_st& __libcwd_tsd, void const* from)
00135 {
00136   test_for_deadlock(reinterpret_cast<int>(ptr), __libcwd_tsd, from);
00137 }
00138 #endif
00139 
00140 //===================================================================================================
00141 //
00142 // Mutex locking.
00143 //
00144 // template <int instance>       This class may not use system calls (it may not call malloc(3)).
00145 //   class mutex_tct;
00146 //
00147 // Usage.
00148 //
00149 // Global mutexes can be initialized once, before using the mutex.
00150 // mutex_tct<instance_id_const>::initialize();
00151 //
00152 // Static mutexes in functions (or templates) that can not globally
00153 // be initialized need to call `initialize()' prior to *each* use
00154 // (using -O2 this is at most a single test and nothing at all when
00155 // Linuxthreads are being used.
00156 //
00157 
00158 //========================================================================================================================================17"
00159 // class mutex_tct
00160 
00161 #if LIBCWD_USE_POSIX_THREADS || LIBCWD_USE_LINUXTHREADS
00162 // We have to use macros because pthread_cleanup_push and pthread_cleanup_pop
00163 // are macros with an unmatched '{' and '}' respectively.
00164 #define LIBCWD_DISABLE_CANCEL \
00165     { \
00166       LIBCWD_DISABLE_CANCEL_NO_BRACE
00167 #define LIBCWD_DISABLE_CANCEL_NO_BRACE \
00168       int __libcwd_oldstate; \
00169       pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &__libcwd_oldstate); \
00170       LibcwDebugThreads( ++__libcwd_tsd.cancel_explicitely_disabled )
00171 #if CWDEBUG_ALLOC
00172 #define LIBCWD_ASSERT_USERSPACE_OR_DEFERED_BEFORE_SETCANCELSTATE \
00173       /* pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL) will call, */ \
00174       /* and pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) can call,   */ \
00175       /* __pthread_do_exit() when the thread is cancelled in the meantime.   */ \
00176       /* This might free allocations that are allocated in userspace.        */ \
00177       LIBCWD_ASSERT( !__libcwd_tsd.internal || __libcwd_tsd.cancel_explicitely_disabled || __libcwd_tsd.cancel_explicitely_deferred )
00178 #else
00179 #define LIBCWD_ASSERT_USERSPACE_OR_DEFERED_BEFORE_SETCANCELSTATE
00180 #endif
00181 #define LIBCWD_ENABLE_CANCEL_NO_BRACE \
00182       LibcwDebugThreads(\
00183         LIBCWD_ASSERT( __libcwd_tsd.cancel_explicitely_disabled > 0 ); \
00184         --__libcwd_tsd.cancel_explicitely_disabled; \
00185         LIBCWD_ASSERT_USERSPACE_OR_DEFERED_BEFORE_SETCANCELSTATE; \
00186       ); \
00187       pthread_setcancelstate(__libcwd_oldstate, NULL)
00188 #define LIBCWD_ENABLE_CANCEL \
00189       LIBCWD_ENABLE_CANCEL_NO_BRACE; \
00190     }
00191 
00192 #define LIBCWD_DEFER_CANCEL \
00193     { \
00194       LIBCWD_DEFER_CANCEL_NO_BRACE
00195 #define LIBCWD_DEFER_CANCEL_NO_BRACE \
00196       int __libcwd_oldtype; \
00197       pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &__libcwd_oldtype); \
00198       LibcwDebugThreads( ++__libcwd_tsd.cancel_explicitely_deferred )
00199 #define LIBCWD_RESTORE_CANCEL_NO_BRACE \
00200       LibcwDebugThreads(\
00201         LIBCWD_ASSERT( __libcwd_tsd.cancel_explicitely_deferred > 0 ); \
00202         --__libcwd_tsd.cancel_explicitely_deferred; \
00203         LIBCWD_ASSERT_USERSPACE_OR_DEFERED_BEFORE_SETCANCELSTATE; \
00204       ); \
00205       pthread_setcanceltype(__libcwd_oldtype, NULL)
00206 #define LIBCWD_RESTORE_CANCEL \
00207       LIBCWD_RESTORE_CANCEL_NO_BRACE; \
00208     }
00209 
00210 #if LIBCWD_USE_LINUXTHREADS
00211 #define LIBCWD_DEFER_CLEANUP_PUSH(routine, arg) \
00212     pthread_cleanup_push_defer_np(reinterpret_cast<void(*)(void*)>(routine), reinterpret_cast<void*>(arg)); \
00213       LibcwDebugThreads( ++__libcwd_tsd.cancel_explicitely_deferred; ++__libcwd_tsd.cleanup_handler_installed )
00214 #if CWDEBUG_ALLOC
00215 #define LIBCWD_ASSERT_NONINTERNAL LIBCWD_ASSERT( !__libcwd_tsd.internal )
00216 #else
00217 #define LIBCWD_ASSERT_NONINTERNAL
00218 #endif
00219 #define LIBCWD_CLEANUP_POP_RESTORE(execute) \
00220       LibcwDebugThreads( --__libcwd_tsd.cleanup_handler_installed; \
00221             LIBCWD_ASSERT( __libcwd_tsd.cancel_explicitely_deferred > 0 ); \
00222             LIBCWD_ASSERT_NONINTERNAL; ); \
00223       pthread_cleanup_pop_restore_np(static_cast<int>(execute)); \
00224       LibcwDebugThreads( --__libcwd_tsd.cancel_explicitely_deferred; )
00225 #else // !LIBCWD_USE_LINUXTHREADS
00226 #define LIBCWD_DEFER_CLEANUP_PUSH(routine, arg) \
00227       LIBCWD_DEFER_CANCEL; \
00228       LibcwDebugThreads( ++__libcwd_tsd.cleanup_handler_installed ); \
00229       pthread_cleanup_push(reinterpret_cast<void(*)(void*)>(routine), reinterpret_cast<void*>(arg))
00230 #define LIBCWD_CLEANUP_POP_RESTORE(execute) \
00231       LibcwDebugThreads( --__libcwd_tsd.cleanup_handler_installed ); \
00232       pthread_cleanup_pop(static_cast<int>(execute)); \
00233       LIBCWD_RESTORE_CANCEL
00234 #endif // !LIBCWD_USE_LINUXTHREADS
00235 
00236 #define LIBCWD_PUSH_DEFER_TRYLOCK_MUTEX(instance, unlock_routine) \
00237       LIBCWD_DEFER_CLEANUP_PUSH(static_cast<void (*)(void)>(unlock_routine), &::libcwd::_private_::mutex_tct<(instance)>::S_mutex); \
00238       bool __libcwd_lock_successful = ::libcwd::_private_::mutex_tct<(instance)>::trylock()
00239 #define LIBCWD_DEFER_PUSH_LOCKMUTEX(instance, unlock_routine) \
00240       LIBCWD_DEFER_CLEANUP_PUSH(static_cast<void (*)(void)>(unlock_routine), &::libcwd::_private_::mutex_tct<(instance)>::S_mutex); \
00241       ::libcwd::_private_::mutex_tct<(instance)>::lock(); \
00242       bool const __libcwd_lock_successful = true
00243 #define LIBCWD_UNLOCKMUTEX_POP_RESTORE(instance) \
00244       LIBCWD_CLEANUP_POP_RESTORE(__libcwd_lock_successful)
00245 
00246 #define LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED \
00247     LibcwDebugThreads( \
00248         if (instance != static_tsd_instance) \
00249         { \
00250           /* When entering a critical area, make sure that we have explictely deferred cancellation of this */ \
00251           /* thread (or disabled that) because when cancellation would happen in the middle of the critical */ \
00252           /* area then the lock would stay locked.                                                          */ \
00253           LIBCWD_ASSERT( __libcwd_tsd.cancel_explicitely_deferred || __libcwd_tsd.cancel_explicitely_disabled ); \
00254         } )
00255 
00256 template <int instance>
00257   class mutex_tct {
00258   public:
00259     static pthread_mutex_t S_mutex;
00260 #if !LIBCWD_USE_LINUXTHREADS || CWDEBUG_DEBUGT
00261   protected:
00262     static bool volatile S_initialized;
00263     static void S_initialize(void);
00264 #endif
00265   public:
00266     static void initialize(void)
00267 #if LIBCWD_USE_LINUXTHREADS && !CWDEBUG_DEBUGT
00268         { }
00269 #else
00270         {
00271           if (S_initialized)    // Check if the static `S_mutex' already has been initialized.
00272             return;             //   No need to lock: `S_initialized' is only set after it is
00273                                 //   really initialized.
00274           S_initialize();
00275         }
00276 #endif
00277   public:
00278     static bool trylock(void)
00279     {
00280       LibcwDebugThreads( LIBCWD_ASSERT( S_initialized ) );
00281 #if CWDEBUG_DEBUGT
00282       LIBCWD_TSD_DECLARATION;
00283 #endif
00284       LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
00285       LIBCWD_DEBUGDEBUGLOCK_CERR("Trying to lock mutex " << instance << " (" << (void*)&S_mutex << ") from " << __builtin_return_address(0) << " from " << __builtin_return_address(1));
00286       LIBCWD_DEBUGDEBUGLOCK_CERR("pthread_mutex_trylock(" << S_mutex << ").");
00287       bool success = (pthread_mutex_trylock(&S_mutex) == 0);
00288       LIBCWD_DEBUGDEBUGLOCK_CERR("Result = " << success << ". Mutex now " << S_mutex << ".");
00289 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
00290       if (success)
00291       {
00292 #if CWDEBUG_DEBUGT
00293         _private_::test_for_deadlock(instance, __libcwd_tsd, __builtin_return_address(0));
00294 #endif
00295         LIBCWD_DEBUGDEBUGLOCK_CERR("mutex_tct::trylock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; incrementing it.");
00296         instance_locked[instance] += 1;
00297 #if CWDEBUG_DEBUGT
00298         locked_by[instance] = pthread_self();
00299         locked_from[instance] = __builtin_return_address(0);
00300 #endif
00301       }
00302 #endif
00303       LibcwDebugThreads( if (success) { ++__libcwd_tsd.inside_critical_area; } );
00304       return success;
00305     }
00306     static void lock(void)
00307     {
00308       LibcwDebugThreads( LIBCWD_ASSERT( S_initialized ) );
00309 #if CWDEBUG_DEBUGT
00310       TSD_st* tsd_ptr = 0;
00311       if (instance != static_tsd_instance)
00312       {
00313         LIBCWD_TSD_DECLARATION;
00314         tsd_ptr = &__libcwd_tsd;
00315       }
00316       TSD_st& __libcwd_tsd(*tsd_ptr);
00317 #endif
00318       LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
00319       LibcwDebugThreads( if (instance != static_tsd_instance) { ++__libcwd_tsd.inside_critical_area; } );
00320       LIBCWD_DEBUGDEBUGLOCK_CERR("locking mutex " << instance << " (" << (void*)&S_mutex << ") from " << __builtin_return_address(0) << " from " << __builtin_return_address(1));
00321 #if CWDEBUG_DEBUGT
00322       if (instance != static_tsd_instance && !(instance >= 2 * reserved_instance_low && instance < 3 * reserved_instance_low))
00323       {
00324         __libcwd_tsd.waiting_for_lock = instance;
00325         LIBCWD_DEBUGDEBUGLOCK_CERR("pthread_mutex_lock(" << S_mutex << ").");
00326         int res = pthread_mutex_lock(&S_mutex);
00327         LIBCWD_DEBUGDEBUGLOCK_CERR("Result = " << res << ". Mutex now " << S_mutex << ".");
00328         LIBCWD_ASSERT( res == 0 );
00329         __libcwd_tsd.waiting_for_lock = 0;
00330         _private_::test_for_deadlock(instance, __libcwd_tsd, __builtin_return_address(0));
00331       }
00332       else
00333       {
00334         LIBCWD_DEBUGDEBUGLOCK_CERR("pthread_mutex_lock(" << S_mutex << ").");
00335         int res = pthread_mutex_lock(&S_mutex);
00336         LIBCWD_DEBUGDEBUGLOCK_CERR("Result = " << res << ". Mutex now " << S_mutex << ".");
00337         LIBCWD_ASSERT( res == 0 );
00338       }
00339 #else // !CWDEBUG_DEBUGT
00340       pthread_mutex_lock(&S_mutex);
00341 #endif // !CWDEBUG_DEBUGT
00342       LIBCWD_DEBUGDEBUGLOCK_CERR("Lock " << instance << " obtained (" << (void*)&S_mutex << ").");
00343 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
00344       LIBCWD_DEBUGDEBUGLOCK_CERR("mutex_tct::lock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; incrementing it.");
00345       instance_locked[instance] += 1;
00346 #if CWDEBUG_DEBUGT
00347       if (locked_by[instance] != 0 && locked_by[instance] != pthread_self())
00348       {
00349         LIBCWD_DEBUGDEBUGLOCK_CERR("mutex " << instance << " (" << (void*)&S_mutex << ") is already set by another thread (" << locked_by[instance] << ")!");
00350         core_dump();
00351       }
00352       locked_by[instance] = pthread_self();
00353       locked_from[instance] = __builtin_return_address(0);
00354 #endif
00355 #endif
00356     }
00357     static void unlock(void)
00358     {
00359 #if CWDEBUG_DEBUGT
00360       TSD_st* tsd_ptr = 0;
00361       if (instance != static_tsd_instance)
00362       {
00363         LIBCWD_TSD_DECLARATION;
00364         tsd_ptr = &__libcwd_tsd;
00365       }
00366       TSD_st& __libcwd_tsd(*tsd_ptr);
00367 #endif
00368       LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
00369 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
00370       LIBCWD_DEBUGDEBUGLOCK_CERR("mutex_tct::unlock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; decrementing it.");
00371       LIBCWD_ASSERT( instance_locked[instance] > 0 );
00372 #if CWDEBUG_DEBUGT
00373       if (locked_by[instance] != pthread_self())
00374       {
00375         LIBCWD_DEBUGDEBUGLOCK_CERR("unlocking instance " << instance << " (" << (void*)&S_mutex << ") failed: locked_by[" << instance << "] == " << locked_by[instance] << ".");
00376         core_dump();
00377       }
00378 #endif
00379       instance_locked[instance] -= 1;
00380 #if CWDEBUG_DEBUGT
00381       if (instance_locked[instance] == 0)
00382       {
00383         locked_by[instance] = 0;
00384         LIBCWD_DEBUGDEBUGLOCK_CERR("mutex_tct::unlock(): locked_by[" << instance << "] was reset.");
00385       }
00386       else LIBCWD_DEBUGDEBUGLOCK_CERR("mutex_tct::unlock(): locked_by[" << instance << "] was not reset, it still is " << locked_by[instance] << ".");
00387 #endif
00388 #endif
00389       LIBCWD_DEBUGDEBUGLOCK_CERR("unlocking mutex " << instance << " (" << (void*)&S_mutex << ").");
00390       LIBCWD_DEBUGDEBUGLOCK_CERR("pthread_mutex_unlock(" << S_mutex << ").");
00391 #if CWDEBUG_DEBUGT
00392       int res =
00393 #endif
00394       pthread_mutex_unlock(&S_mutex);
00395 #if CWDEBUG_DEBUGT
00396       LIBCWD_DEBUGDEBUGLOCK_CERR("Result = " << res << ". Mutex now " << S_mutex << ".");
00397       LIBCWD_ASSERT(res == 0);
00398 #endif
00399       LIBCWD_DEBUGDEBUGLOCK_CERR("Lock " << instance << " released (" << (void*)&S_mutex << ").");
00400       LibcwDebugThreads( if (instance != static_tsd_instance) { --__libcwd_tsd.inside_critical_area; } );
00401     }
00402     // This is used as cleanup handler with LIBCWD_DEFER_CLEANUP_PUSH.
00403     static void cleanup(void*);
00404   };
00405 
00406 #if !LIBCWD_USE_LINUXTHREADS || CWDEBUG_DEBUGT
00407 template <int instance>
00408   bool volatile mutex_tct<instance>::S_initialized = false;
00409 
00410 template <int instance>
00411   void mutex_tct<instance>::S_initialize(void)
00412   {
00413     if (instance == mutex_initialization_instance)      // Specialization.
00414     {
00415 #if !LIBCWD_USE_LINUXTHREADS
00416       pthread_mutexattr_t mutex_attr;
00417 #if CWDEBUG_DEBUGT
00418       pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_ERRORCHECK);
00419 #else
00420       pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_NORMAL);
00421 #endif
00422       pthread_mutex_init(&S_mutex, &mutex_attr);
00423 #endif // !LIBCWD_USE_LINUXTHREADS
00424       S_initialized = true;
00425     }
00426     else                                                // General case.
00427     {
00428       mutex_tct<mutex_initialization_instance>::initialize();
00429       /* LIBCWD_DEFER_PUSH_LOCKMUTEX(mutex_initialization_instance, mutex_tct<mutex_initialization_instance>::unlock); */
00430       if (!S_initialized)                                       // Check again now that we are locked.
00431       {
00432 #if !LIBCWD_USE_LINUXTHREADS
00433         pthread_mutexattr_t mutex_attr;
00434         if (instance < end_recursive_types)
00435           pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_RECURSIVE);
00436         else
00437         {
00438 #if CWDEBUG_DEBUGT
00439           pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_ERRORCHECK);
00440 #else
00441           pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_NORMAL);
00442 #endif
00443         }
00444         pthread_mutex_init(&S_mutex, &mutex_attr);
00445 #endif // !LIBCWD_USE_LINUXTHREADS
00446         S_initialized = true;
00447       }
00448       /* LIBCWD_UNLOCKMUTEX_POP_RESTORE(mutex_initialization_instance); */
00449     }
00450   }
00451 #endif // !LIBCWD_USE_LINUXTHREADS || CWDEBUG_DEBUGT
00452 
00453 template <int instance>
00454   pthread_mutex_t mutex_tct<instance>::S_mutex
00455 #if LIBCWD_USE_LINUXTHREADS
00456       =
00457 #if CWDEBUG_DEBUGT
00458         PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
00459 #else
00460         PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP;
00461 #endif
00462 
00463 // Specialization.
00464 template <>
00465   extern pthread_mutex_t mutex_tct<static_tsd_instance>::S_mutex;
00466       // = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
00467 
00468 #else // !LIBCWD_USE_LINUXTHREADS
00469       ;
00470 #endif // !LIBCWD_USE_LINUXTHREADS
00471 
00472 template <int instance>
00473   void mutex_tct<instance>::cleanup(void*)
00474   {
00475     unlock();
00476   }
00477 
00478 //========================================================================================================================================17"
00479 // class cond_tct
00480 
00481 template <int instance>
00482   class cond_tct : public mutex_tct<instance> {
00483   private:
00484     static pthread_cond_t S_condition;
00485 #if CWDEBUG_DEBUGT || !LIBCWD_USE_LINUXTHREADS
00486     static bool volatile S_initialized;
00487   private:
00488     static void S_initialize(void);
00489 #endif
00490   public:
00491     static void initialize(void)
00492 #if CWDEBUG_DEBUGT || !LIBCWD_USE_LINUXTHREADS
00493         {
00494           if (S_initialized)
00495             return;
00496           S_initialize();
00497         }
00498 #else
00499         { }
00500 #endif
00501   public:
00502     void wait(void) {
00503 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
00504       LIBCWD_DEBUGDEBUGLOCK_CERR("cond_tct::wait(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; decrementing it.");
00505       LIBCWD_ASSERT( instance_locked[instance] > 0 );
00506 #if CWDEBUG_DEBUGT
00507       if (locked_by[instance] != pthread_self())
00508       {
00509         LIBCWD_DEBUGDEBUGLOCK_CERR("unlocking instance " << instance << " (" << (void*)&S_mutex << ") failed: locked_by[" << instance << "] == " << locked_by[instance] << ".");
00510         core_dump();
00511       }
00512 #endif
00513       instance_locked[instance] -= 1;
00514 #if CWDEBUG_DEBUGT
00515       if (instance_locked[instance] == 0)
00516       {
00517         locked_by[instance] = 0;
00518         LIBCWD_DEBUGDEBUGLOCK_CERR("cond_tct::wait(): locked_by[" << instance << "] was reset.");
00519       }
00520       else LIBCWD_DEBUGDEBUGLOCK_CERR("cond_tct::wait(): locked_by[" << instance << "] was not reset, it still is " << locked_by[instance] << ".");
00521 #endif
00522 #endif
00523       LIBCWD_DEBUGDEBUGLOCK_CERR("unlocking mutex " << instance << " (" << (void*)&S_mutex << ").");
00524       LIBCWD_DEBUGDEBUGLOCK_CERR("pthread_cond_wait(" << (void*)&S_condition << ", " << this->S_mutex << ").");
00525 #if CWDEBUG_DEBUGT
00526       int res =
00527 #endif
00528       pthread_cond_wait(&S_condition, &this->S_mutex);
00529 #if CWDEBUG_DEBUGT
00530       LIBCWD_DEBUGDEBUGLOCK_CERR("Result = " << res << ". Mutex now " << S_mutex << ".");
00531       LIBCWD_ASSERT(res == 0);
00532 #endif
00533       LIBCWD_DEBUGDEBUGLOCK_CERR("Lock " << instance << " obtained (" << (void*)&S_mutex << ").");
00534 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
00535       LIBCWD_DEBUGDEBUGLOCK_CERR("cond_tct::wait(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; incrementing it.");
00536       instance_locked[instance] += 1;
00537 #if CWDEBUG_DEBUGT
00538       if (locked_by[instance] != 0 && locked_by[instance] != pthread_self())
00539       {
00540         LIBCWD_DEBUGDEBUGLOCK_CERR("mutex " << instance << " (" << (void*)&S_mutex << ") is already set by another thread (" << locked_by[instance] << ")!");
00541         core_dump();
00542       }
00543       locked_by[instance] = pthread_self();
00544       locked_from[instance] = __builtin_return_address(0);
00545 #endif
00546 #endif
00547     }
00548     void signal(void) { pthread_cond_signal(&S_condition); }
00549     void broadcast(void) { pthread_cond_broadcast(&S_condition); }
00550   };
00551 
00552 #if CWDEBUG_DEBUGT || !LIBCWD_USE_LINUXTHREADS
00553 template <int instance>
00554   void cond_tct<instance>::S_initialize(void)
00555   {
00556 #if !LIBCWD_USE_LINUXTHREADS
00557     mutex_tct<mutex_initialization_instance>::initialize();
00558     LIBCWD_DEFER_PUSH_LOCKMUTEX(mutex_initialization_instance, mutex_tct<mutex_initialization_instance>::unlock);
00559     if (!S_initialized)                                 // Check again now that we are locked.
00560     {
00561       pthread_cond_init(&S_condition, NULL);
00562     }
00563     LIBCWD_UNLOCKMUTEX_POP_RESTORE(mutex_initialization_instance);
00564 #endif
00565     mutex_tct<instance>::S_initialize();
00566   }
00567 #endif // !LIBCWD_USE_LINUXTHREADS
00568 
00569 #if CWDEBUG_DEBUGT || !LIBCWD_USE_LINUXTHREADS
00570 template <int instance>
00571   bool volatile cond_tct<instance>::S_initialized = false;
00572 #endif
00573 
00574 template <int instance>
00575   pthread_cond_t cond_tct<instance>::S_condition
00576 #if LIBCWD_USE_LINUXTHREADS
00577       = PTHREAD_COND_INITIALIZER;
00578 #else // !LIBCWD_USE_LINUXTHREADS
00579       ;
00580 #endif // !LIBCWD_USE_LINUXTHREADS
00581 
00582 #endif // LIBCWD_USE_POSIX_THREADS || LIBCWD_USE_LINUXTHREADS
00583 
00584 //========================================================================================================================================17"
00585 // class rwlock_tct
00586 
00587 //
00588 // template <int instance>      This class may not use system calls (it may not call malloc(3)).
00589 //   class rwlock_tct;
00590 //
00591 // Read/write mutex lock implementation.  Readers can set arbitrary number of locks, only locking
00592 // writers.  Writers lock readers and writers.
00593 //
00594 // Examples.
00595 //
00596 // rwlock_tct<instance_id_const>::initialize();
00597 // if (rwlock_tct<instance_id_const>::tryrdlock()) ...
00598 // if (rwlock_tct<instance_id_const>::trywrlock()) ...
00599 // rwlock_tct<instance_id_const>::rdlock();             // Readers lock.
00600 // rwlock_tct<instance_id_const>::rdunlock();
00601 // rwlock_tct<instance_id_const>::wrlock();             // Writers lock.
00602 // rwlock_tct<instance_id_const>::wrunlock();
00603 // rwlock_tct<instance_id_const>::rd2wrlock();          // Convert read lock into write lock.
00604 // rwlock_tct<instance_id_const>::wr2rdlock();          // Convert write lock into read lock.
00605 //
00606 
00607 template <int instance>
00608   class rwlock_tct {
00609   private:
00610     static int const readers_instance = instance + reserved_instance_low;
00611     static int const holders_instance = instance + 2 * reserved_instance_low;
00612     typedef cond_tct<holders_instance> cond_t;
00613     static cond_t S_no_holders_condition;
00614     static int S_holders_count;                         // Number of readers or -1 if a writer locked this object.
00615     static bool volatile S_writer_is_waiting;
00616     static pthread_t S_writer_id;
00617 #if CWDEBUG_DEBUGT || !LIBCWD_USE_LINUXTHREADS
00618     static bool S_initialized;                          // Set when initialized.
00619 #endif
00620   public:
00621     static void initialize(void)
00622     {
00623 #if CWDEBUG_DEBUGT || !LIBCWD_USE_LINUXTHREADS
00624       if (S_initialized)
00625         return;
00626       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling initialize() instance " << instance);
00627       mutex_tct<readers_instance>::initialize();
00628       S_no_holders_condition.initialize();
00629       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving initialize() instance " << instance);
00630       S_initialized = true;
00631 #endif
00632     }
00633     static bool tryrdlock(void)
00634     {
00635 #if CWDEBUG_DEBUGT
00636       LIBCWD_TSD_DECLARATION;
00637 #endif
00638       LibcwDebugThreads( LIBCWD_ASSERT( S_initialized ) );
00639       LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
00640       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::tryrdlock()");
00641       if (instance < end_recursive_types && pthread_equal(S_writer_id, pthread_self()))
00642       {
00643         LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::tryrdlock() (skipped: thread has write lock)");
00644         return true;                                            // No error checking is done.
00645       }
00646       // Give a writer a higher priority (kinda fuzzy).
00647       if (S_writer_is_waiting || !S_no_holders_condition.trylock())
00648         return false;
00649       bool success = (S_holders_count != -1);
00650       if (success)
00651         ++S_holders_count;                              // Add one reader.
00652       S_no_holders_condition.unlock();
00653       LibcwDebugThreads(
00654           if (success)
00655           {
00656             ++__libcwd_tsd.inside_critical_area;
00657             _private_::test_for_deadlock(instance, __libcwd_tsd, __builtin_return_address(0));
00658             __libcwd_tsd.instance_rdlocked[instance] += 1;
00659             if (__libcwd_tsd.instance_rdlocked[instance] == 1)
00660             {
00661               __libcwd_tsd.rdlocked_by1[instance] = pthread_self();
00662               __libcwd_tsd.rdlocked_from1[instance] = __builtin_return_address(0);
00663             }
00664             else if (__libcwd_tsd.instance_rdlocked[instance] == 2)
00665             {
00666               __libcwd_tsd.rdlocked_by2[instance] = pthread_self();
00667               __libcwd_tsd.rdlocked_from2[instance] = __builtin_return_address(0);
00668             }
00669             else
00670               core_dump();
00671           }
00672       );
00673       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::tryrdlock()");
00674       return success;
00675     }
00676     static bool trywrlock(void)
00677     {
00678       LibcwDebugThreads( LIBCWD_ASSERT( S_initialized ) );
00679 #if CWDEBUG_DEBUGT
00680       LIBCWD_TSD_DECLARATION;
00681 #endif
00682       LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
00683       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::trywrlock()");
00684       bool success;
00685       if ((success = mutex_tct<readers_instance>::trylock()))
00686       {
00687         S_writer_is_waiting = true;
00688         if ((success = S_no_holders_condition.trylock()))
00689         {
00690           if ((success = (S_holders_count == 0)))
00691           {
00692             S_holders_count = -1;                                               // Mark that we have a writer.
00693             if (instance < end_recursive_types)
00694               S_writer_id = pthread_self();
00695 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
00696 #if CWDEBUG_DEBUGT
00697             _private_::test_for_deadlock(instance, __libcwd_tsd, __builtin_return_address(0));
00698 #endif
00699             LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::trywrlock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; incrementing it.");
00700             instance_locked[instance] += 1;
00701 #if CWDEBUG_DEBUGT
00702             locked_by[instance] = pthread_self();
00703             locked_from[instance] = __builtin_return_address(0);
00704 #endif
00705 #endif
00706           }
00707           S_no_holders_condition.unlock();
00708         }
00709         S_writer_is_waiting = false;
00710         mutex_tct<readers_instance>::unlock();
00711       }
00712       LibcwDebugThreads( if (success) { ++__libcwd_tsd.inside_critical_area; } );
00713       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::trywrlock()");
00714       return success;
00715     }
00716     static void rdlock(bool high_priority = false)
00717     {
00718       LibcwDebugThreads( LIBCWD_ASSERT( S_initialized ) );
00719 #if CWDEBUG_DEBUGT
00720       LIBCWD_TSD_DECLARATION;
00721 #endif
00722       LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
00723       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::rdlock()");
00724       if (instance < end_recursive_types && pthread_equal(S_writer_id, pthread_self()))
00725       {
00726         LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::rdlock() (skipped: thread has write lock)");
00727         return;                                         // No error checking is done.
00728       }
00729       // Give a writer a higher priority (kinda fuzzy).
00730       if (S_writer_is_waiting)                                          // If there is a writer interested,
00731       {
00732         if (!high_priority)
00733         {
00734           mutex_tct<readers_instance>::lock();                          // then give it precedence and wait here.
00735           mutex_tct<readers_instance>::unlock();
00736         }
00737       }
00738 #if CWDEBUG_DEBUGT
00739       __libcwd_tsd.waiting_for_rdlock = instance;
00740 #endif
00741       S_no_holders_condition.lock();
00742       while (S_holders_count == -1)                     // Writer locked it?
00743         S_no_holders_condition.wait();                  // Wait for writer to finish.
00744 #if CWDEBUG_DEBUGT
00745       __libcwd_tsd.waiting_for_rdlock = 0;
00746 #endif
00747       ++S_holders_count;                                // Add one reader.
00748       S_no_holders_condition.unlock();
00749       LibcwDebugThreads(
00750           ++__libcwd_tsd.inside_critical_area;
00751           // Thread A: rdlock<1> ... mutex<2>
00752           // Thread B: mutex<2>  ... rdlock<1>
00753           //                      ^--- current program counter.
00754           // can still lead to a deadlock when a third thread is trying to get the write lock
00755           // because trying to acquire a write lock immedeately blocks new read locks.
00756           // However, trying to acquire a write lock does not block high priority read locks,
00757           // therefore the following is allowed:
00758           // Thread A: rdlock<1> ... mutex<2>
00759           // Thread B: mutex<2>  ... high priority rdlock<1>
00760           // provided that the write lock wrlock<1> is never used in combination with mutex<2>.
00761           // In order to take this into account, we need to pass the information that this is
00762           // a read lock to the test function.
00763           _private_::test_for_deadlock(instance + (high_priority ? high_priority_read_lock_offset : read_lock_offset), __libcwd_tsd, __builtin_return_address(0));
00764           __libcwd_tsd.instance_rdlocked[instance] += 1;
00765           if (__libcwd_tsd.instance_rdlocked[instance] == 1)
00766           {
00767             __libcwd_tsd.rdlocked_by1[instance] = pthread_self();
00768             __libcwd_tsd.rdlocked_from1[instance] = __builtin_return_address(0);
00769           }
00770           else if (__libcwd_tsd.instance_rdlocked[instance] == 2)
00771           {
00772             __libcwd_tsd.rdlocked_by2[instance] = pthread_self();
00773             __libcwd_tsd.rdlocked_from2[instance] = __builtin_return_address(0);
00774           }
00775           else
00776             core_dump();
00777       );
00778       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::rdlock()");
00779     }
00780     static void rdunlock(void)
00781     {
00782 #if CWDEBUG_DEBUGT
00783       LIBCWD_TSD_DECLARATION;
00784 #endif
00785       LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
00786       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::rdunlock()");
00787       if (instance < end_recursive_types && pthread_equal(S_writer_id, pthread_self()))
00788       {
00789         LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::rdunlock() (skipped: thread has write lock)");
00790         return;                                         // No error checking is done.
00791       }
00792       LibcwDebugThreads( --__libcwd_tsd.inside_critical_area );
00793       S_no_holders_condition.lock();
00794       if (--S_holders_count == 0)                       // Was this the last reader?
00795         S_no_holders_condition.signal();                // Tell waiting threads.
00796       S_no_holders_condition.unlock();
00797       LibcwDebugThreads(
00798           if (__libcwd_tsd.instance_rdlocked[instance] == 2)
00799             __libcwd_tsd.rdlocked_by2[instance] = 0;
00800           else
00801             __libcwd_tsd.rdlocked_by1[instance] = 0;
00802           __libcwd_tsd.instance_rdlocked[instance] -= 1;
00803       );
00804       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::rdunlock()");
00805     }
00806     static void wrlock(void)
00807     {
00808       LibcwDebugThreads( LIBCWD_ASSERT( S_initialized ) );
00809 #if CWDEBUG_DEBUGT
00810       LIBCWD_TSD_DECLARATION;
00811 #endif
00812       LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
00813       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::wrlock()");
00814       mutex_tct<readers_instance>::lock();              // Block new readers,
00815       S_writer_is_waiting = true;                       // from this moment on.
00816 #if CWDEBUG_DEBUGT
00817       __libcwd_tsd.waiting_for_lock = instance;
00818 #endif
00819       S_no_holders_condition.lock();
00820       while (S_holders_count != 0)                      // Other readers or writers have this lock?
00821         S_no_holders_condition.wait();                  // Wait until all current holders are done.
00822 #if CWDEBUG_DEBUGT
00823       __libcwd_tsd.waiting_for_lock = 0;
00824 #endif
00825       S_writer_is_waiting = false;                      // Stop checking the lock for new readers.
00826       mutex_tct<readers_instance>::unlock();            // Release blocked readers.
00827       S_holders_count = -1;                             // Mark that we have a writer.
00828       S_no_holders_condition.unlock();
00829       if (instance < end_recursive_types)
00830         S_writer_id = pthread_self();
00831       LibcwDebugThreads( ++__libcwd_tsd.inside_critical_area );
00832 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
00833 #if CWDEBUG_DEBUGT
00834       _private_::test_for_deadlock(instance, __libcwd_tsd, __builtin_return_address(0));
00835 #endif
00836       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::wrlock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; incrementing it.");
00837       instance_locked[instance] += 1;
00838 #if CWDEBUG_DEBUGT
00839       locked_by[instance] = pthread_self();
00840       locked_from[instance] = __builtin_return_address(0);
00841 #endif
00842 #endif
00843       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::wrlock()");
00844     }
00845     static void wrunlock(void)
00846     {
00847 #if CWDEBUG_DEBUGT
00848       LIBCWD_TSD_DECLARATION;
00849 #endif
00850       LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
00851 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
00852       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::wrunlock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; decrementing it.");
00853 #if CWDEBUG_DEBUGT
00854       LIBCWD_ASSERT( instance_locked[instance] > 0 && locked_by[instance] == pthread_self() );
00855 #endif
00856       instance_locked[instance] -= 1;
00857 #endif
00858 #if CWDEBUG_DEBUGT
00859       if (instance > end_recursive_types || instance_locked[instance] == 0)
00860       {
00861         locked_by[instance] = 0;
00862         LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::unlock(): locked_by[" << instance << "] was reset.");
00863       }
00864       else
00865       {
00866         LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::wrunlock(): locked_by[" << instance << "] was not reset, it still is " << locked_by[instance] << ".");
00867       }
00868 #endif
00869       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::wrunlock()");
00870       LibcwDebugThreads( --__libcwd_tsd.inside_critical_area) ;
00871       if (instance < end_recursive_types)
00872         S_writer_id = 0;
00873       S_no_holders_condition.lock();
00874       S_holders_count = 0;                              // We have no writer anymore.
00875       S_no_holders_condition.signal();                  // No readers and no writers left.
00876       S_no_holders_condition.unlock();
00877       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::wrunlock()");
00878     }
00879     static void rd2wrlock(void)
00880     {
00881 #if CWDEBUG_DEBUGT
00882       LIBCWD_TSD_DECLARATION;
00883 #endif
00884       LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
00885       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::rd2wrlock()");
00886 #if CWDEBUG_DEBUGT
00887       __libcwd_tsd.waiting_for_lock = instance;
00888 #endif
00889       S_no_holders_condition.lock();
00890       if (--S_holders_count > 0)
00891       {
00892         mutex_tct<readers_instance>::lock();    // Block new readers.
00893         S_writer_is_waiting = true;
00894         while (S_holders_count != 0)
00895           S_no_holders_condition.wait();
00896         S_writer_is_waiting = false;
00897         mutex_tct<readers_instance>::unlock();  // Release blocked readers.
00898       }
00899 #if CWDEBUG_DEBUGT
00900       __libcwd_tsd.waiting_for_lock = 0;
00901 #endif
00902       S_holders_count = -1;                     // We are a writer now.
00903       S_no_holders_condition.unlock();
00904       if (instance < end_recursive_types)
00905         S_writer_id = pthread_self();
00906 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
00907 #if CWDEBUG_DEBUGT
00908       _private_::test_for_deadlock(instance, __libcwd_tsd, __builtin_return_address(0));
00909 #endif
00910       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::rd2wrlock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; incrementing it.");
00911       instance_locked[instance] += 1;
00912 #if CWDEBUG_DEBUGT
00913       locked_by[instance] = pthread_self();
00914       locked_from[instance] = __builtin_return_address(0);
00915 #endif
00916 #endif
00917       LibcwDebugThreads(
00918           if (__libcwd_tsd.instance_rdlocked[instance] == 2)
00919             __libcwd_tsd.rdlocked_by2[instance] = 0;
00920           else
00921             __libcwd_tsd.rdlocked_by1[instance] = 0;
00922           __libcwd_tsd.instance_rdlocked[instance] -= 1;
00923       );
00924       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::rd2wrlock()");
00925     }
00926     static void wr2rdlock(void)
00927     {
00928 #if CWDEBUG_DEBUGT
00929       LIBCWD_TSD_DECLARATION;
00930 #endif
00931       LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
00932 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
00933       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::wr2rdlock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; decrementing it.");
00934 #if CWDEBUG_DEBUGT
00935       LIBCWD_ASSERT( instance_locked[instance] > 0 && locked_by[instance] == pthread_self() );
00936 #endif
00937       instance_locked[instance] -= 1;
00938 #if CWDEBUG_DEBUGT
00939       if (instance > end_recursive_types || instance_locked[instance] == 0)
00940       {
00941         locked_by[instance] = 0;
00942         LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::wr2rdlock(): locked_by[" << instance << "] was reset.");
00943       }
00944       else
00945       {
00946         LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::wr2rdlock(): locked_by[" << instance << "] was not reset, it still is " << locked_by[instance] << ".");
00947       }
00948 #endif
00949 #endif
00950       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::wr2rdlock()");
00951       if (instance < end_recursive_types)
00952         S_writer_id = 0;
00953       S_holders_count = 1;                              // Turn writer into a reader (atomic operation).
00954       LibcwDebugThreads(
00955           _private_::test_for_deadlock(instance, __libcwd_tsd, __builtin_return_address(0));
00956           if (instance >= instance_rdlocked_size)
00957             core_dump();
00958           __libcwd_tsd.instance_rdlocked[instance] += 1;
00959           if (__libcwd_tsd.instance_rdlocked[instance] == 1)
00960           {
00961             __libcwd_tsd.rdlocked_by1[instance] = pthread_self();
00962             __libcwd_tsd.rdlocked_from1[instance] = __builtin_return_address(0);
00963           }
00964           else if (__libcwd_tsd.instance_rdlocked[instance] == 2)
00965           {
00966             __libcwd_tsd.rdlocked_by2[instance] = pthread_self();
00967             __libcwd_tsd.rdlocked_from2[instance] = __builtin_return_address(0);
00968           }
00969           else
00970             core_dump();
00971       );
00972       LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::wr2rdlock()");
00973     }
00974     // This is used as cleanup handler with LIBCWD_DEFER_CLEANUP_PUSH.
00975     static void cleanup(void*);
00976   };
00977 
00978 template <int instance>
00979   int rwlock_tct<instance>::S_holders_count = 0;
00980 
00981 template <int instance>
00982   bool volatile rwlock_tct<instance>::S_writer_is_waiting = 0;
00983 
00984 template <int instance>
00985   pthread_t rwlock_tct<instance>::S_writer_id = 0;
00986 
00987 #if CWDEBUG_DEBUGT || !LIBCWD_USE_LINUXTHREADS
00988 template <int instance>
00989   bool rwlock_tct<instance>::S_initialized = 0;
00990 #endif
00991 
00992 template <int instance>
00993   typename  rwlock_tct<instance>::cond_t rwlock_tct<instance>::S_no_holders_condition;
00994 
00995 template <int instance>
00996   void rwlock_tct<instance>::cleanup(void*)
00997   {
00998     if (S_holders_count == -1)
00999       wrunlock();
01000     else
01001       rdunlock();
01002   }
01003 
01004 extern void fatal_cancellation(void*);
01005 
01006   } // namespace _private_
01007 } // namespace libcwd
01008 
01009 #else // !LIBCWD_THREAD_SAFE
01010 #define LIBCWD_DISABLE_CANCEL
01011 #define LIBCWD_DISABLE_CANCEL_NO_BRACE
01012 #define LIBCWD_ENABLE_CANCEL_NO_BRACE
01013 #define LIBCWD_ENABLE_CANCEL
01014 #define LIBCWD_DEFER_CANCEL
01015 #define LIBCWD_DEFER_CANCEL_NO_BRACE
01016 #define LIBCWD_RESTORE_CANCEL_NO_BRACE
01017 #define LIBCWD_RESTORE_CANCEL
01018 #define LIBCWD_DEFER_CLEANUP_PUSH(routine, arg)
01019 #define LIBCWD_CLEANUP_POP_RESTORE(execute)
01020 #define LIBCWD_PUSH_DEFER_TRYLOCK_MUTEX(instance, unlock_routine)
01021 #define LIBCWD_DEFER_PUSH_LOCKMUTEX(instance, unlock_routine)
01022 #define LIBCWD_UNLOCKMUTEX_POP_RESTORE(instance)
01023 #define LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED
01024 #endif // LIBCWD_THREAD_SAFE
01025 #endif // LIBCWD_PRIVATE_THREADING_H
01026 
Copyright © 2001 - 2004 Carlo Wood.  All rights reserved.