| 1 | /* Private libc-internal interface for mutex locks.  NPTL version. | 
| 2 |    Copyright (C) 1996-2018 Free Software Foundation, Inc. | 
| 3 |    This file is part of the GNU C Library. | 
| 4 |  | 
| 5 |    The GNU C Library is free software; you can redistribute it and/or | 
| 6 |    modify it under the terms of the GNU Lesser General Public License as | 
| 7 |    published by the Free Software Foundation; either version 2.1 of the | 
| 8 |    License, or (at your option) any later version. | 
| 9 |  | 
| 10 |    The GNU C Library is distributed in the hope that it will be useful, | 
| 11 |    but WITHOUT ANY WARRANTY; without even the implied warranty of | 
| 12 |    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
| 13 |    Lesser General Public License for more details. | 
| 14 |  | 
| 15 |    You should have received a copy of the GNU Lesser General Public | 
| 16 |    License along with the GNU C Library; see the file COPYING.LIB.  If | 
| 17 |    not, see <http://www.gnu.org/licenses/>.  */ | 
| 18 |  | 
| 19 | #ifndef _LIBC_LOCKP_H | 
| 20 | #define _LIBC_LOCKP_H 1 | 
| 21 |  | 
| 22 | #include <pthread.h> | 
| 23 | #define __need_NULL | 
| 24 | #include <stddef.h> | 
| 25 |  | 
| 26 |  | 
| 27 | /* Fortunately Linux now has a mean to do locking which is realtime | 
| 28 |    safe without the aid of the thread library.  We also need no fancy | 
| 29 |    options like error checking mutexes etc.  We only need simple | 
| 30 |    locks, maybe recursive.  This can be easily and cheaply implemented | 
| 31 |    using futexes.  We will use them everywhere except in ld.so since | 
| 32 |    ld.so might be used on old kernels with a different libc.so.  */ | 
| 33 | #include <lowlevellock.h> | 
| 34 | #include <tls.h> | 
| 35 | #include <pthread-functions.h> | 
| 36 |  | 
| 37 | #if IS_IN (libpthread) | 
| 38 | /* This gets us the declarations of the __pthread_* internal names, | 
| 39 |    and hidden_proto for them.  */ | 
| 40 | # include <nptl/pthreadP.h> | 
| 41 | #endif | 
| 42 |  | 
| 43 | /* Mutex type.  */ | 
| 44 | #if !IS_IN (libc) && !IS_IN (libpthread) | 
| 45 | typedef pthread_mutex_t __libc_lock_t; | 
| 46 | #else | 
| 47 | typedef int __libc_lock_t; | 
| 48 | #endif | 
| 49 | typedef struct { pthread_mutex_t mutex; } __rtld_lock_recursive_t; | 
| 50 | typedef pthread_rwlock_t __libc_rwlock_t; | 
| 51 |  | 
| 52 | /* Type for key to thread-specific data.  */ | 
| 53 | typedef pthread_key_t __libc_key_t; | 
| 54 |  | 
| 55 | /* Define a lock variable NAME with storage class CLASS.  The lock must be | 
| 56 |    initialized with __libc_lock_init before it can be used (or define it | 
| 57 |    with __libc_lock_define_initialized, below).  Use `extern' for CLASS to | 
| 58 |    declare a lock defined in another module.  In public structure | 
| 59 |    definitions you must use a pointer to the lock structure (i.e., NAME | 
| 60 |    begins with a `*'), because its storage size will not be known outside | 
| 61 |    of libc.  */ | 
| 62 | #define __libc_lock_define(CLASS,NAME) \ | 
| 63 |   CLASS __libc_lock_t NAME; | 
| 64 | #define __libc_rwlock_define(CLASS,NAME) \ | 
| 65 |   CLASS __libc_rwlock_t NAME; | 
| 66 | #define __rtld_lock_define_recursive(CLASS,NAME) \ | 
| 67 |   CLASS __rtld_lock_recursive_t NAME; | 
| 68 |  | 
| 69 | /* Define an initialized lock variable NAME with storage class CLASS. | 
| 70 |  | 
| 71 |    For the C library we take a deeper look at the initializer.  For | 
| 72 |    this implementation all fields are initialized to zero.  Therefore | 
| 73 |    we don't initialize the variable which allows putting it into the | 
| 74 |    BSS section.  (Except on PA-RISC and other odd architectures, where | 
| 75 |    initialized locks must be set to one due to the lack of normal | 
| 76 |    atomic operations.) */ | 
| 77 |  | 
| 78 | #define _LIBC_LOCK_INITIALIZER LLL_LOCK_INITIALIZER | 
| 79 | #if IS_IN (libc) || IS_IN (libpthread) | 
| 80 | # if LLL_LOCK_INITIALIZER == 0 | 
| 81 | #  define __libc_lock_define_initialized(CLASS,NAME) \ | 
| 82 |   CLASS __libc_lock_t NAME; | 
| 83 | # else | 
| 84 | #  define __libc_lock_define_initialized(CLASS,NAME) \ | 
| 85 |   CLASS __libc_lock_t NAME = LLL_LOCK_INITIALIZER; | 
| 86 | # endif | 
| 87 | #else | 
| 88 | # define __libc_lock_define_initialized(CLASS,NAME) \ | 
| 89 |   CLASS __libc_lock_t NAME; | 
| 90 | #endif | 
| 91 |  | 
| 92 | #define __libc_rwlock_define_initialized(CLASS,NAME) \ | 
| 93 |   CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER; | 
| 94 |  | 
| 95 | #define __rtld_lock_define_initialized_recursive(CLASS,NAME) \ | 
| 96 |   CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER; | 
| 97 | #define _RTLD_LOCK_RECURSIVE_INITIALIZER \ | 
| 98 |   {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP} | 
| 99 |  | 
| 100 | #define __rtld_lock_initialize(NAME) \ | 
| 101 |   (void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER) | 
| 102 |  | 
| 103 | /* If we check for a weakly referenced symbol and then perform a | 
| 104 |    normal jump to it te code generated for some platforms in case of | 
| 105 |    PIC is unnecessarily slow.  What would happen is that the function | 
| 106 |    is first referenced as data and then it is called indirectly | 
| 107 |    through the PLT.  We can make this a direct jump.  */ | 
| 108 | #ifdef __PIC__ | 
| 109 | # define __libc_maybe_call(FUNC, ARGS, ELSE) \ | 
| 110 |   (__extension__ ({ __typeof (FUNC) *_fn = (FUNC); \ | 
| 111 | 		    _fn != NULL ? (*_fn) ARGS : ELSE; })) | 
| 112 | #else | 
| 113 | # define __libc_maybe_call(FUNC, ARGS, ELSE) \ | 
| 114 |   (FUNC != NULL ? FUNC ARGS : ELSE) | 
| 115 | #endif | 
| 116 |  | 
| 117 | /* Call thread functions through the function pointer table.  */ | 
| 118 | #if defined SHARED && IS_IN (libc) | 
| 119 | # define PTFAVAIL(NAME) __libc_pthread_functions_init | 
| 120 | # define __libc_ptf_call(FUNC, ARGS, ELSE) \ | 
| 121 |   (__libc_pthread_functions_init ? PTHFCT_CALL (ptr_##FUNC, ARGS) : ELSE) | 
| 122 | # define __libc_ptf_call_always(FUNC, ARGS) \ | 
| 123 |   PTHFCT_CALL (ptr_##FUNC, ARGS) | 
| 124 | #elif IS_IN (libpthread) | 
| 125 | # define PTFAVAIL(NAME) 1 | 
| 126 | # define __libc_ptf_call(FUNC, ARGS, ELSE) \ | 
| 127 |   FUNC ARGS | 
| 128 | # define __libc_ptf_call_always(FUNC, ARGS) \ | 
| 129 |   FUNC ARGS | 
| 130 | #else | 
| 131 | # define PTFAVAIL(NAME) (NAME != NULL) | 
| 132 | # define __libc_ptf_call(FUNC, ARGS, ELSE) \ | 
| 133 |   __libc_maybe_call (FUNC, ARGS, ELSE) | 
| 134 | # define __libc_ptf_call_always(FUNC, ARGS) \ | 
| 135 |   FUNC ARGS | 
| 136 | #endif | 
| 137 |  | 
| 138 |  | 
| 139 | /* Initialize the named lock variable, leaving it in a consistent, unlocked | 
| 140 |    state.  */ | 
| 141 | #if IS_IN (libc) || IS_IN (libpthread) | 
| 142 | # define __libc_lock_init(NAME) \ | 
| 143 |   ((void) ((NAME) = LLL_LOCK_INITIALIZER)) | 
| 144 | #else | 
| 145 | # define __libc_lock_init(NAME) \ | 
| 146 |   __libc_maybe_call (__pthread_mutex_init, (&(NAME), NULL), 0) | 
| 147 | #endif | 
| 148 | #if defined SHARED && IS_IN (libc) | 
| 149 | /* ((NAME) = (__libc_rwlock_t) PTHREAD_RWLOCK_INITIALIZER) is inefficient.  */ | 
| 150 | # define __libc_rwlock_init(NAME) \ | 
| 151 |   ((void) __builtin_memset (&(NAME), '\0', sizeof (NAME))) | 
| 152 | #else | 
| 153 | # define __libc_rwlock_init(NAME) \ | 
| 154 |   __libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0) | 
| 155 | #endif | 
| 156 |  | 
| 157 | /* Finalize the named lock variable, which must be locked.  It cannot be | 
| 158 |    used again until __libc_lock_init is called again on it.  This must be | 
| 159 |    called on a lock variable before the containing storage is reused.  */ | 
| 160 | #if IS_IN (libc) || IS_IN (libpthread) | 
| 161 | # define __libc_lock_fini(NAME) ((void) 0) | 
| 162 | #else | 
| 163 | # define __libc_lock_fini(NAME) \ | 
| 164 |   __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0) | 
| 165 | #endif | 
| 166 | #if defined SHARED && IS_IN (libc) | 
| 167 | # define __libc_rwlock_fini(NAME) ((void) 0) | 
| 168 | #else | 
| 169 | # define __libc_rwlock_fini(NAME) \ | 
| 170 |   __libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0) | 
| 171 | #endif | 
| 172 |  | 
| 173 | /* Lock the named lock variable.  */ | 
| 174 | #if IS_IN (libc) || IS_IN (libpthread) | 
| 175 | # ifndef __libc_lock_lock | 
| 176 | #  define __libc_lock_lock(NAME) \ | 
| 177 |   ({ lll_lock (NAME, LLL_PRIVATE); 0; }) | 
| 178 | # endif | 
| 179 | #else | 
| 180 | # undef __libc_lock_lock | 
| 181 | # define __libc_lock_lock(NAME) \ | 
| 182 |   __libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0) | 
| 183 | #endif | 
| 184 | #define __libc_rwlock_rdlock(NAME) \ | 
| 185 |   __libc_ptf_call (__pthread_rwlock_rdlock, (&(NAME)), 0) | 
| 186 | #define __libc_rwlock_wrlock(NAME) \ | 
| 187 |   __libc_ptf_call (__pthread_rwlock_wrlock, (&(NAME)), 0) | 
| 188 |  | 
| 189 | /* Try to lock the named lock variable.  */ | 
| 190 | #if IS_IN (libc) || IS_IN (libpthread) | 
| 191 | # ifndef __libc_lock_trylock | 
| 192 | #  define __libc_lock_trylock(NAME) \ | 
| 193 |   lll_trylock (NAME) | 
| 194 | # endif | 
| 195 | #else | 
| 196 | # undef __libc_lock_trylock | 
| 197 | # define __libc_lock_trylock(NAME) \ | 
| 198 |   __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0) | 
| 199 | #endif | 
| 200 | #define __libc_rwlock_tryrdlock(NAME) \ | 
| 201 |   __libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0) | 
| 202 | #define __libc_rwlock_trywrlock(NAME) \ | 
| 203 |   __libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0) | 
| 204 |  | 
| 205 | #define __rtld_lock_trylock_recursive(NAME) \ | 
| 206 |   __libc_maybe_call (__pthread_mutex_trylock, (&(NAME).mutex), 0) | 
| 207 |  | 
| 208 | /* Unlock the named lock variable.  */ | 
| 209 | #if IS_IN (libc) || IS_IN (libpthread) | 
| 210 | # define __libc_lock_unlock(NAME) \ | 
| 211 |   lll_unlock (NAME, LLL_PRIVATE) | 
| 212 | #else | 
| 213 | # define __libc_lock_unlock(NAME) \ | 
| 214 |   __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0) | 
| 215 | #endif | 
| 216 | #define __libc_rwlock_unlock(NAME) \ | 
| 217 |   __libc_ptf_call (__pthread_rwlock_unlock, (&(NAME)), 0) | 
| 218 |  | 
| 219 | #ifdef SHARED | 
| 220 | # define __rtld_lock_default_lock_recursive(lock) \ | 
| 221 |   ++((pthread_mutex_t *)(lock))->__data.__count; | 
| 222 |  | 
| 223 | # define __rtld_lock_default_unlock_recursive(lock) \ | 
| 224 |   --((pthread_mutex_t *)(lock))->__data.__count; | 
| 225 |  | 
| 226 | # define __rtld_lock_lock_recursive(NAME) \ | 
| 227 |   GL(dl_rtld_lock_recursive) (&(NAME).mutex) | 
| 228 |  | 
| 229 | # define __rtld_lock_unlock_recursive(NAME) \ | 
| 230 |   GL(dl_rtld_unlock_recursive) (&(NAME).mutex) | 
| 231 | #else | 
| 232 | # define __rtld_lock_lock_recursive(NAME) \ | 
| 233 |   __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0) | 
| 234 |  | 
| 235 | # define __rtld_lock_unlock_recursive(NAME) \ | 
| 236 |   __libc_maybe_call (__pthread_mutex_unlock, (&(NAME).mutex), 0) | 
| 237 | #endif | 
| 238 |  | 
| 239 | /* Define once control variable.  */ | 
| 240 | #if PTHREAD_ONCE_INIT == 0 | 
| 241 | /* Special case for static variables where we can avoid the initialization | 
| 242 |    if it is zero.  */ | 
| 243 | # define __libc_once_define(CLASS, NAME) \ | 
| 244 |   CLASS pthread_once_t NAME | 
| 245 | #else | 
| 246 | # define __libc_once_define(CLASS, NAME) \ | 
| 247 |   CLASS pthread_once_t NAME = PTHREAD_ONCE_INIT | 
| 248 | #endif | 
| 249 |  | 
| 250 | /* Call handler iff the first call.  */ | 
| 251 | #define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \ | 
| 252 |   do {									      \ | 
| 253 |     if (PTFAVAIL (__pthread_once))					      \ | 
| 254 |       __libc_ptf_call_always (__pthread_once, (&(ONCE_CONTROL),		      \ | 
| 255 | 					       INIT_FUNCTION));		      \ | 
| 256 |     else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) {			      \ | 
| 257 |       INIT_FUNCTION ();							      \ | 
| 258 |       (ONCE_CONTROL) |= 2;						      \ | 
| 259 |     }									      \ | 
| 260 |   } while (0) | 
| 261 |  | 
| 262 | /* Get once control variable.  */ | 
| 263 | #define __libc_once_get(ONCE_CONTROL)	((ONCE_CONTROL) != PTHREAD_ONCE_INIT) | 
| 264 |  | 
| 265 | /* Note that for I/O cleanup handling we are using the old-style | 
| 266 |    cancel handling.  It does not have to be integrated with C++ snce | 
| 267 |    no C++ code is called in the middle.  The old-style handling is | 
| 268 |    faster and the support is not going away.  */ | 
| 269 | extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer, | 
| 270 | 				   void (*routine) (void *), void *arg); | 
| 271 | extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer, | 
| 272 | 				  int execute); | 
| 273 | extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer, | 
| 274 | 					 void (*routine) (void *), void *arg); | 
| 275 | extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer, | 
| 276 | 					  int execute); | 
| 277 |  | 
| 278 | /* Sometimes we have to exit the block in the middle.  */ | 
| 279 | #define __libc_cleanup_end(DOIT) \ | 
| 280 |     if (_avail) {							      \ | 
| 281 |       __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\ | 
| 282 |     } else if (DOIT)							      \ | 
| 283 |       _buffer.__routine (_buffer.__arg) | 
| 284 |  | 
| 285 |  | 
| 286 | /* Normal cleanup handling, based on C cleanup attribute.  */ | 
| 287 | __extern_inline void | 
| 288 | __libc_cleanup_routine (struct __pthread_cleanup_frame *f) | 
| 289 | { | 
| 290 |   if (f->__do_it) | 
| 291 |     f->__cancel_routine (f->__cancel_arg); | 
| 292 | } | 
| 293 |  | 
| 294 | #define __libc_cleanup_push(fct, arg) \ | 
| 295 |   do {									      \ | 
| 296 |     struct __pthread_cleanup_frame __clframe				      \ | 
| 297 |       __attribute__ ((__cleanup__ (__libc_cleanup_routine)))		      \ | 
| 298 |       = { .__cancel_routine = (fct), .__cancel_arg = (arg),		      \ | 
| 299 | 	  .__do_it = 1 }; | 
| 300 |  | 
| 301 | #define __libc_cleanup_pop(execute) \ | 
| 302 |     __clframe.__do_it = (execute);					      \ | 
| 303 |   } while (0) | 
| 304 |  | 
| 305 |  | 
| 306 | /* Create thread-specific key.  */ | 
| 307 | #define __libc_key_create(KEY, DESTRUCTOR) \ | 
| 308 |   __libc_ptf_call (__pthread_key_create, (KEY, DESTRUCTOR), 1) | 
| 309 |  | 
| 310 | /* Get thread-specific data.  */ | 
| 311 | #define __libc_getspecific(KEY) \ | 
| 312 |   __libc_ptf_call (__pthread_getspecific, (KEY), NULL) | 
| 313 |  | 
| 314 | /* Set thread-specific data.  */ | 
| 315 | #define __libc_setspecific(KEY, VALUE) \ | 
| 316 |   __libc_ptf_call (__pthread_setspecific, (KEY, VALUE), 0) | 
| 317 |  | 
| 318 |  | 
| 319 | /* Register handlers to execute before and after `fork'.  Note that the | 
| 320 |    last parameter is NULL.  The handlers registered by the libc are | 
| 321 |    never removed so this is OK.  */ | 
| 322 | #define __libc_atfork(PREPARE, PARENT, CHILD) \ | 
| 323 |   __register_atfork (PREPARE, PARENT, CHILD, NULL) | 
| 324 | extern int __register_atfork (void (*__prepare) (void), | 
| 325 | 			      void (*__parent) (void), | 
| 326 | 			      void (*__child) (void), | 
| 327 | 			      void *__dso_handle); | 
| 328 |  | 
| 329 | /* Functions that are used by this file and are internal to the GNU C | 
| 330 |    library.  */ | 
| 331 |  | 
| 332 | extern int __pthread_mutex_init (pthread_mutex_t *__mutex, | 
| 333 | 				 const pthread_mutexattr_t *__mutex_attr); | 
| 334 |  | 
| 335 | extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex); | 
| 336 |  | 
| 337 | extern int __pthread_mutex_trylock (pthread_mutex_t *__mutex); | 
| 338 |  | 
| 339 | extern int __pthread_mutex_lock (pthread_mutex_t *__mutex); | 
| 340 |  | 
| 341 | extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex); | 
| 342 |  | 
| 343 | extern int __pthread_mutexattr_init (pthread_mutexattr_t *__attr); | 
| 344 |  | 
| 345 | extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *__attr); | 
| 346 |  | 
| 347 | extern int __pthread_mutexattr_settype (pthread_mutexattr_t *__attr, | 
| 348 | 					int __kind); | 
| 349 |  | 
| 350 | extern int __pthread_rwlock_init (pthread_rwlock_t *__rwlock, | 
| 351 | 				  const pthread_rwlockattr_t *__attr); | 
| 352 |  | 
| 353 | extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock); | 
| 354 |  | 
| 355 | extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock); | 
| 356 |  | 
| 357 | extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock); | 
| 358 |  | 
| 359 | extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock); | 
| 360 |  | 
| 361 | extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock); | 
| 362 |  | 
| 363 | extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock); | 
| 364 |  | 
| 365 | extern int __pthread_key_create (pthread_key_t *__key, | 
| 366 | 				 void (*__destr_function) (void *)); | 
| 367 |  | 
| 368 | extern int __pthread_setspecific (pthread_key_t __key, | 
| 369 | 				  const void *__pointer); | 
| 370 |  | 
| 371 | extern void *__pthread_getspecific (pthread_key_t __key); | 
| 372 |  | 
| 373 | extern int __pthread_once (pthread_once_t *__once_control, | 
| 374 | 			   void (*__init_routine) (void)); | 
| 375 |  | 
| 376 | extern int __pthread_atfork (void (*__prepare) (void), | 
| 377 | 			     void (*__parent) (void), | 
| 378 | 			     void (*__child) (void)); | 
| 379 |  | 
| 380 | extern int __pthread_setcancelstate (int state, int *oldstate); | 
| 381 |  | 
| 382 |  | 
| 383 | /* Make the pthread functions weak so that we can elide them from | 
| 384 |    single-threaded processes.  */ | 
| 385 | #ifndef __NO_WEAK_PTHREAD_ALIASES | 
| 386 | # ifdef weak_extern | 
| 387 | weak_extern (__pthread_mutex_init) | 
| 388 | weak_extern (__pthread_mutex_destroy) | 
| 389 | weak_extern (__pthread_mutex_lock) | 
| 390 | weak_extern (__pthread_mutex_trylock) | 
| 391 | weak_extern (__pthread_mutex_unlock) | 
| 392 | weak_extern (__pthread_mutexattr_init) | 
| 393 | weak_extern (__pthread_mutexattr_destroy) | 
| 394 | weak_extern (__pthread_mutexattr_settype) | 
| 395 | weak_extern (__pthread_rwlock_init) | 
| 396 | weak_extern (__pthread_rwlock_destroy) | 
| 397 | weak_extern (__pthread_rwlock_rdlock) | 
| 398 | weak_extern (__pthread_rwlock_tryrdlock) | 
| 399 | weak_extern (__pthread_rwlock_wrlock) | 
| 400 | weak_extern (__pthread_rwlock_trywrlock) | 
| 401 | weak_extern (__pthread_rwlock_unlock) | 
| 402 | weak_extern (__pthread_key_create) | 
| 403 | weak_extern (__pthread_setspecific) | 
| 404 | weak_extern (__pthread_getspecific) | 
| 405 | weak_extern (__pthread_once) | 
| 406 | weak_extern (__pthread_initialize) | 
| 407 | weak_extern (__pthread_atfork) | 
| 408 | weak_extern (__pthread_setcancelstate) | 
| 409 | weak_extern (_pthread_cleanup_push_defer) | 
| 410 | weak_extern (_pthread_cleanup_pop_restore) | 
| 411 | # else | 
| 412 | #  pragma weak __pthread_mutex_init | 
| 413 | #  pragma weak __pthread_mutex_destroy | 
| 414 | #  pragma weak __pthread_mutex_lock | 
| 415 | #  pragma weak __pthread_mutex_trylock | 
| 416 | #  pragma weak __pthread_mutex_unlock | 
| 417 | #  pragma weak __pthread_mutexattr_init | 
| 418 | #  pragma weak __pthread_mutexattr_destroy | 
| 419 | #  pragma weak __pthread_mutexattr_settype | 
| 420 | #  pragma weak __pthread_rwlock_destroy | 
| 421 | #  pragma weak __pthread_rwlock_rdlock | 
| 422 | #  pragma weak __pthread_rwlock_tryrdlock | 
| 423 | #  pragma weak __pthread_rwlock_wrlock | 
| 424 | #  pragma weak __pthread_rwlock_trywrlock | 
| 425 | #  pragma weak __pthread_rwlock_unlock | 
| 426 | #  pragma weak __pthread_key_create | 
| 427 | #  pragma weak __pthread_setspecific | 
| 428 | #  pragma weak __pthread_getspecific | 
| 429 | #  pragma weak __pthread_once | 
| 430 | #  pragma weak __pthread_initialize | 
| 431 | #  pragma weak __pthread_atfork | 
| 432 | #  pragma weak __pthread_setcancelstate | 
| 433 | #  pragma weak _pthread_cleanup_push_defer | 
| 434 | #  pragma weak _pthread_cleanup_pop_restore | 
| 435 | # endif | 
| 436 | #endif | 
| 437 |  | 
| 438 | #endif	/* libc-lockP.h */ | 
| 439 |  |