1/* Copyright (C) 2003-2016 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19#include "pthreadP.h"
20#include <futex-internal.h>
21#include <atomic.h>
22
23
24unsigned long int __fork_generation attribute_hidden;
25
26
27static void
28clear_once_control (void *arg)
29{
30 pthread_once_t *once_control = (pthread_once_t *) arg;
31
32 /* Reset to the uninitialized state here. We don't need a stronger memory
33 order because we do not need to make any other of our writes visible to
34 other threads that see this value: This function will be called if we
35 get interrupted (see __pthread_once), so all we need to relay to other
36 threads is the state being reset again. */
37 atomic_store_relaxed (once_control, 0);
38 futex_wake ((unsigned int *) once_control, INT_MAX, FUTEX_PRIVATE);
39}
40
41
42/* This is similar to a lock implementation, but we distinguish between three
43 states: not yet initialized (0), initialization in progress
44 (__fork_generation | __PTHREAD_ONCE_INPROGRESS), and initialization
45 finished (__PTHREAD_ONCE_DONE); __fork_generation does not use the bits
46 that are used for __PTHREAD_ONCE_INPROGRESS and __PTHREAD_ONCE_DONE (which
47 is what __PTHREAD_ONCE_FORK_GEN_INCR is used for). If in the first state,
48 threads will try to run the initialization by moving to the second state;
49 the first thread to do so via a CAS on once_control runs init_routine,
50 other threads block.
51 When forking the process, some threads can be interrupted during the second
52 state; they won't be present in the forked child, so we need to restart
53 initialization in the child. To distinguish an in-progress initialization
54 from an interrupted initialization (in which case we need to reclaim the
55 lock), we look at the fork generation that's part of the second state: We
56 can reclaim iff it differs from the current fork generation.
57 XXX: This algorithm has an ABA issue on the fork generation: If an
58 initialization is interrupted, we then fork 2^30 times (30 bits of
59 once_control are used for the fork generation), and try to initialize
60 again, we can deadlock because we can't distinguish the in-progress and
61 interrupted cases anymore.
62 XXX: We split out this slow path because current compilers do not generate
63 as efficient code when the fast path in __pthread_once below is not in a
64 separate function. */
65static int
66__attribute__ ((noinline))
67__pthread_once_slow (pthread_once_t *once_control, void (*init_routine) (void))
68{
69 while (1)
70 {
71 int val, newval;
72
73 /* We need acquire memory order for this load because if the value
74 signals that initialization has finished, we need to see any
75 data modifications done during initialization. */
76 val = atomic_load_acquire (once_control);
77 do
78 {
79 /* Check if the initialization has already been done. */
80 if (__glibc_likely ((val & __PTHREAD_ONCE_DONE) != 0))
81 return 0;
82
83 /* We try to set the state to in-progress and having the current
84 fork generation. We don't need atomic accesses for the fork
85 generation because it's immutable in a particular process, and
86 forked child processes start with a single thread that modified
87 the generation. */
88 newval = __fork_generation | __PTHREAD_ONCE_INPROGRESS;
89 /* We need acquire memory order here for the same reason as for the
90 load from once_control above. */
91 }
92 while (__glibc_unlikely (!atomic_compare_exchange_weak_acquire (
93 once_control, &val, newval)));
94
95 /* Check if another thread already runs the initializer. */
96 if ((val & __PTHREAD_ONCE_INPROGRESS) != 0)
97 {
98 /* Check whether the initializer execution was interrupted by a
99 fork. We know that for both values, __PTHREAD_ONCE_INPROGRESS
100 is set and __PTHREAD_ONCE_DONE is not. */
101 if (val == newval)
102 {
103 /* Same generation, some other thread was faster. Wait and
104 retry. */
105 futex_wait_simple ((unsigned int *) once_control,
106 (unsigned int) newval, FUTEX_PRIVATE);
107 continue;
108 }
109 }
110
111 /* This thread is the first here. Do the initialization.
112 Register a cleanup handler so that in case the thread gets
113 interrupted the initialization can be restarted. */
114 pthread_cleanup_push (clear_once_control, once_control);
115
116 init_routine ();
117
118 pthread_cleanup_pop (0);
119
120
121 /* Mark *once_control as having finished the initialization. We need
122 release memory order here because we need to synchronize with other
123 threads that want to use the initialized data. */
124 atomic_store_release (once_control, __PTHREAD_ONCE_DONE);
125
126 /* Wake up all other threads. */
127 futex_wake ((unsigned int *) once_control, INT_MAX, FUTEX_PRIVATE);
128 break;
129 }
130
131 return 0;
132}
133
134int
135__pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
136{
137 /* Fast path. See __pthread_once_slow. */
138 int val;
139 val = atomic_load_acquire (once_control);
140 if (__glibc_likely ((val & __PTHREAD_ONCE_DONE) != 0))
141 return 0;
142 else
143 return __pthread_once_slow (once_control, init_routine);
144}
145weak_alias (__pthread_once, pthread_once)
146hidden_def (__pthread_once)
147