1 | /* Close a shared object opened by `_dl_open'. |
2 | Copyright (C) 1996-2018 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <assert.h> |
20 | #include <dlfcn.h> |
21 | #include <errno.h> |
22 | #include <libintl.h> |
23 | #include <stddef.h> |
24 | #include <stdio.h> |
25 | #include <stdlib.h> |
26 | #include <string.h> |
27 | #include <unistd.h> |
28 | #include <libc-lock.h> |
29 | #include <ldsodefs.h> |
30 | #include <sys/types.h> |
31 | #include <sys/mman.h> |
32 | #include <sysdep-cancel.h> |
33 | #include <tls.h> |
34 | #include <stap-probe.h> |
35 | |
36 | #include <dl-unmap-segments.h> |
37 | |
38 | |
39 | /* Type of the constructor functions. */ |
40 | typedef void (*fini_t) (void); |
41 | |
42 | |
43 | /* Special l_idx value used to indicate which objects remain loaded. */ |
44 | #define IDX_STILL_USED -1 |
45 | |
46 | |
47 | /* Returns true we an non-empty was found. */ |
48 | static bool |
49 | remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp, |
50 | bool should_be_there) |
51 | { |
52 | if (idx - disp >= listp->len) |
53 | { |
54 | if (listp->next == NULL) |
55 | { |
56 | /* The index is not actually valid in the slotinfo list, |
57 | because this object was closed before it was fully set |
58 | up due to some error. */ |
59 | assert (! should_be_there); |
60 | } |
61 | else |
62 | { |
63 | if (remove_slotinfo (idx, listp->next, disp + listp->len, |
64 | should_be_there)) |
65 | return true; |
66 | |
67 | /* No non-empty entry. Search from the end of this element's |
68 | slotinfo array. */ |
69 | idx = disp + listp->len; |
70 | } |
71 | } |
72 | else |
73 | { |
74 | struct link_map *old_map = listp->slotinfo[idx - disp].map; |
75 | |
76 | /* The entry might still be in its unused state if we are closing an |
77 | object that wasn't fully set up. */ |
78 | if (__glibc_likely (old_map != NULL)) |
79 | { |
80 | assert (old_map->l_tls_modid == idx); |
81 | |
82 | /* Mark the entry as unused. */ |
83 | listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1; |
84 | listp->slotinfo[idx - disp].map = NULL; |
85 | } |
86 | |
87 | /* If this is not the last currently used entry no need to look |
88 | further. */ |
89 | if (idx != GL(dl_tls_max_dtv_idx)) |
90 | return true; |
91 | } |
92 | |
93 | while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0)) |
94 | { |
95 | --idx; |
96 | |
97 | if (listp->slotinfo[idx - disp].map != NULL) |
98 | { |
99 | /* Found a new last used index. */ |
100 | GL(dl_tls_max_dtv_idx) = idx; |
101 | return true; |
102 | } |
103 | } |
104 | |
105 | /* No non-entry in this list element. */ |
106 | return false; |
107 | } |
108 | |
109 | |
110 | void |
111 | _dl_close_worker (struct link_map *map, bool force) |
112 | { |
113 | /* One less direct use. */ |
114 | --map->l_direct_opencount; |
115 | |
116 | /* If _dl_close is called recursively (some destructor call dlclose), |
117 | just record that the parent _dl_close will need to do garbage collection |
118 | again and return. */ |
119 | static enum { not_pending, pending, rerun } dl_close_state; |
120 | |
121 | if (map->l_direct_opencount > 0 || map->l_type != lt_loaded |
122 | || dl_close_state != not_pending) |
123 | { |
124 | if (map->l_direct_opencount == 0 && map->l_type == lt_loaded) |
125 | dl_close_state = rerun; |
126 | |
127 | /* There are still references to this object. Do nothing more. */ |
128 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES)) |
129 | _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n" , |
130 | map->l_name, map->l_direct_opencount); |
131 | |
132 | return; |
133 | } |
134 | |
135 | Lmid_t nsid = map->l_ns; |
136 | struct link_namespaces *ns = &GL(dl_ns)[nsid]; |
137 | |
138 | retry: |
139 | dl_close_state = pending; |
140 | |
141 | bool any_tls = false; |
142 | const unsigned int nloaded = ns->_ns_nloaded; |
143 | char used[nloaded]; |
144 | char done[nloaded]; |
145 | struct link_map *maps[nloaded]; |
146 | |
147 | /* Clear DF_1_NODELETE to force object deletion. We don't need to touch |
148 | l_tls_dtor_count because forced object deletion only happens when an |
149 | error occurs during object load. Destructor registration for TLS |
150 | non-POD objects should not have happened till then for this |
151 | object. */ |
152 | if (force) |
153 | map->l_flags_1 &= ~DF_1_NODELETE; |
154 | |
155 | /* Run over the list and assign indexes to the link maps and enter |
156 | them into the MAPS array. */ |
157 | int idx = 0; |
158 | for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next) |
159 | { |
160 | l->l_idx = idx; |
161 | maps[idx] = l; |
162 | ++idx; |
163 | |
164 | } |
165 | assert (idx == nloaded); |
166 | |
167 | /* Prepare the bitmaps. */ |
168 | memset (used, '\0', sizeof (used)); |
169 | memset (done, '\0', sizeof (done)); |
170 | |
171 | /* Keep track of the lowest index link map we have covered already. */ |
172 | int done_index = -1; |
173 | while (++done_index < nloaded) |
174 | { |
175 | struct link_map *l = maps[done_index]; |
176 | |
177 | if (done[done_index]) |
178 | /* Already handled. */ |
179 | continue; |
180 | |
181 | /* Check whether this object is still used. */ |
182 | if (l->l_type == lt_loaded |
183 | && l->l_direct_opencount == 0 |
184 | && (l->l_flags_1 & DF_1_NODELETE) == 0 |
185 | /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why |
186 | acquire is sufficient and correct. */ |
187 | && atomic_load_acquire (&l->l_tls_dtor_count) == 0 |
188 | && !used[done_index]) |
189 | continue; |
190 | |
191 | /* We need this object and we handle it now. */ |
192 | done[done_index] = 1; |
193 | used[done_index] = 1; |
194 | /* Signal the object is still needed. */ |
195 | l->l_idx = IDX_STILL_USED; |
196 | |
197 | /* Mark all dependencies as used. */ |
198 | if (l->l_initfini != NULL) |
199 | { |
200 | /* We are always the zeroth entry, and since we don't include |
201 | ourselves in the dependency analysis start at 1. */ |
202 | struct link_map **lp = &l->l_initfini[1]; |
203 | while (*lp != NULL) |
204 | { |
205 | if ((*lp)->l_idx != IDX_STILL_USED) |
206 | { |
207 | assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded); |
208 | |
209 | if (!used[(*lp)->l_idx]) |
210 | { |
211 | used[(*lp)->l_idx] = 1; |
212 | /* If we marked a new object as used, and we've |
213 | already processed it, then we need to go back |
214 | and process again from that point forward to |
215 | ensure we keep all of its dependencies also. */ |
216 | if ((*lp)->l_idx - 1 < done_index) |
217 | done_index = (*lp)->l_idx - 1; |
218 | } |
219 | } |
220 | |
221 | ++lp; |
222 | } |
223 | } |
224 | /* And the same for relocation dependencies. */ |
225 | if (l->l_reldeps != NULL) |
226 | for (unsigned int j = 0; j < l->l_reldeps->act; ++j) |
227 | { |
228 | struct link_map *jmap = l->l_reldeps->list[j]; |
229 | |
230 | if (jmap->l_idx != IDX_STILL_USED) |
231 | { |
232 | assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded); |
233 | |
234 | if (!used[jmap->l_idx]) |
235 | { |
236 | used[jmap->l_idx] = 1; |
237 | if (jmap->l_idx - 1 < done_index) |
238 | done_index = jmap->l_idx - 1; |
239 | } |
240 | } |
241 | } |
242 | } |
243 | |
244 | /* Sort the entries. We can skip looking for the binary itself which is |
245 | at the front of the search list for the main namespace. */ |
246 | _dl_sort_maps (maps + (nsid == LM_ID_BASE), nloaded - (nsid == LM_ID_BASE), |
247 | used + (nsid == LM_ID_BASE), true); |
248 | |
249 | /* Call all termination functions at once. */ |
250 | #ifdef SHARED |
251 | bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing; |
252 | #endif |
253 | bool unload_any = false; |
254 | bool scope_mem_left = false; |
255 | unsigned int unload_global = 0; |
256 | unsigned int first_loaded = ~0; |
257 | for (unsigned int i = 0; i < nloaded; ++i) |
258 | { |
259 | struct link_map *imap = maps[i]; |
260 | |
261 | /* All elements must be in the same namespace. */ |
262 | assert (imap->l_ns == nsid); |
263 | |
264 | if (!used[i]) |
265 | { |
266 | assert (imap->l_type == lt_loaded |
267 | && (imap->l_flags_1 & DF_1_NODELETE) == 0); |
268 | |
269 | /* Call its termination function. Do not do it for |
270 | half-cooked objects. */ |
271 | if (imap->l_init_called) |
272 | { |
273 | /* When debugging print a message first. */ |
274 | if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS, |
275 | 0)) |
276 | _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n" , |
277 | imap->l_name, nsid); |
278 | |
279 | if (imap->l_info[DT_FINI_ARRAY] != NULL) |
280 | { |
281 | ElfW(Addr) *array = |
282 | (ElfW(Addr) *) (imap->l_addr |
283 | + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr); |
284 | unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val |
285 | / sizeof (ElfW(Addr))); |
286 | |
287 | while (sz-- > 0) |
288 | ((fini_t) array[sz]) (); |
289 | } |
290 | |
291 | /* Next try the old-style destructor. */ |
292 | if (imap->l_info[DT_FINI] != NULL) |
293 | DL_CALL_DT_FINI (imap, ((void *) imap->l_addr |
294 | + imap->l_info[DT_FINI]->d_un.d_ptr)); |
295 | } |
296 | |
297 | #ifdef SHARED |
298 | /* Auditing checkpoint: we remove an object. */ |
299 | if (__glibc_unlikely (do_audit)) |
300 | { |
301 | struct audit_ifaces *afct = GLRO(dl_audit); |
302 | for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt) |
303 | { |
304 | if (afct->objclose != NULL) |
305 | /* Return value is ignored. */ |
306 | (void) afct->objclose (&imap->l_audit[cnt].cookie); |
307 | |
308 | afct = afct->next; |
309 | } |
310 | } |
311 | #endif |
312 | |
313 | /* This object must not be used anymore. */ |
314 | imap->l_removed = 1; |
315 | |
316 | /* We indeed have an object to remove. */ |
317 | unload_any = true; |
318 | |
319 | if (imap->l_global) |
320 | ++unload_global; |
321 | |
322 | /* Remember where the first dynamically loaded object is. */ |
323 | if (i < first_loaded) |
324 | first_loaded = i; |
325 | } |
326 | /* Else used[i]. */ |
327 | else if (imap->l_type == lt_loaded) |
328 | { |
329 | struct r_scope_elem *new_list = NULL; |
330 | |
331 | if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL) |
332 | { |
333 | /* The object is still used. But one of the objects we are |
334 | unloading right now is responsible for loading it. If |
335 | the current object does not have it's own scope yet we |
336 | have to create one. This has to be done before running |
337 | the finalizers. |
338 | |
339 | To do this count the number of dependencies. */ |
340 | unsigned int cnt; |
341 | for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt) |
342 | ; |
343 | |
344 | /* We simply reuse the l_initfini list. */ |
345 | imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1]; |
346 | imap->l_searchlist.r_nlist = cnt; |
347 | |
348 | new_list = &imap->l_searchlist; |
349 | } |
350 | |
351 | /* Count the number of scopes which remain after the unload. |
352 | When we add the local search list count it. Always add |
353 | one for the terminating NULL pointer. */ |
354 | size_t remain = (new_list != NULL) + 1; |
355 | bool removed_any = false; |
356 | for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt) |
357 | /* This relies on l_scope[] entries being always set either |
358 | to its own l_symbolic_searchlist address, or some map's |
359 | l_searchlist address. */ |
360 | if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist) |
361 | { |
362 | struct link_map *tmap = (struct link_map *) |
363 | ((char *) imap->l_scope[cnt] |
364 | - offsetof (struct link_map, l_searchlist)); |
365 | assert (tmap->l_ns == nsid); |
366 | if (tmap->l_idx == IDX_STILL_USED) |
367 | ++remain; |
368 | else |
369 | removed_any = true; |
370 | } |
371 | else |
372 | ++remain; |
373 | |
374 | if (removed_any) |
375 | { |
376 | /* Always allocate a new array for the scope. This is |
377 | necessary since we must be able to determine the last |
378 | user of the current array. If possible use the link map's |
379 | memory. */ |
380 | size_t new_size; |
381 | struct r_scope_elem **newp; |
382 | |
383 | #define SCOPE_ELEMS(imap) \ |
384 | (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0])) |
385 | |
386 | if (imap->l_scope != imap->l_scope_mem |
387 | && remain < SCOPE_ELEMS (imap)) |
388 | { |
389 | new_size = SCOPE_ELEMS (imap); |
390 | newp = imap->l_scope_mem; |
391 | } |
392 | else |
393 | { |
394 | new_size = imap->l_scope_max; |
395 | newp = (struct r_scope_elem **) |
396 | malloc (new_size * sizeof (struct r_scope_elem *)); |
397 | if (newp == NULL) |
398 | _dl_signal_error (ENOMEM, "dlclose" , NULL, |
399 | N_("cannot create scope list" )); |
400 | } |
401 | |
402 | /* Copy over the remaining scope elements. */ |
403 | remain = 0; |
404 | for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt) |
405 | { |
406 | if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist) |
407 | { |
408 | struct link_map *tmap = (struct link_map *) |
409 | ((char *) imap->l_scope[cnt] |
410 | - offsetof (struct link_map, l_searchlist)); |
411 | if (tmap->l_idx != IDX_STILL_USED) |
412 | { |
413 | /* Remove the scope. Or replace with own map's |
414 | scope. */ |
415 | if (new_list != NULL) |
416 | { |
417 | newp[remain++] = new_list; |
418 | new_list = NULL; |
419 | } |
420 | continue; |
421 | } |
422 | } |
423 | |
424 | newp[remain++] = imap->l_scope[cnt]; |
425 | } |
426 | newp[remain] = NULL; |
427 | |
428 | struct r_scope_elem **old = imap->l_scope; |
429 | |
430 | imap->l_scope = newp; |
431 | |
432 | /* No user anymore, we can free it now. */ |
433 | if (old != imap->l_scope_mem) |
434 | { |
435 | if (_dl_scope_free (old)) |
436 | /* If _dl_scope_free used THREAD_GSCOPE_WAIT (), |
437 | no need to repeat it. */ |
438 | scope_mem_left = false; |
439 | } |
440 | else |
441 | scope_mem_left = true; |
442 | |
443 | imap->l_scope_max = new_size; |
444 | } |
445 | else if (new_list != NULL) |
446 | { |
447 | /* We didn't change the scope array, so reset the search |
448 | list. */ |
449 | imap->l_searchlist.r_list = NULL; |
450 | imap->l_searchlist.r_nlist = 0; |
451 | } |
452 | |
453 | /* The loader is gone, so mark the object as not having one. |
454 | Note: l_idx != IDX_STILL_USED -> object will be removed. */ |
455 | if (imap->l_loader != NULL |
456 | && imap->l_loader->l_idx != IDX_STILL_USED) |
457 | imap->l_loader = NULL; |
458 | |
459 | /* Remember where the first dynamically loaded object is. */ |
460 | if (i < first_loaded) |
461 | first_loaded = i; |
462 | } |
463 | } |
464 | |
465 | /* If there are no objects to unload, do nothing further. */ |
466 | if (!unload_any) |
467 | goto out; |
468 | |
469 | #ifdef SHARED |
470 | /* Auditing checkpoint: we will start deleting objects. */ |
471 | if (__glibc_unlikely (do_audit)) |
472 | { |
473 | struct link_map *head = ns->_ns_loaded; |
474 | struct audit_ifaces *afct = GLRO(dl_audit); |
475 | /* Do not call the functions for any auditing object. */ |
476 | if (head->l_auditing == 0) |
477 | { |
478 | for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt) |
479 | { |
480 | if (afct->activity != NULL) |
481 | afct->activity (&head->l_audit[cnt].cookie, LA_ACT_DELETE); |
482 | |
483 | afct = afct->next; |
484 | } |
485 | } |
486 | } |
487 | #endif |
488 | |
489 | /* Notify the debugger we are about to remove some loaded objects. */ |
490 | struct r_debug *r = _dl_debug_initialize (0, nsid); |
491 | r->r_state = RT_DELETE; |
492 | _dl_debug_state (); |
493 | LIBC_PROBE (unmap_start, 2, nsid, r); |
494 | |
495 | if (unload_global) |
496 | { |
497 | /* Some objects are in the global scope list. Remove them. */ |
498 | struct r_scope_elem *ns_msl = ns->_ns_main_searchlist; |
499 | unsigned int i; |
500 | unsigned int j = 0; |
501 | unsigned int cnt = ns_msl->r_nlist; |
502 | |
503 | while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed) |
504 | --cnt; |
505 | |
506 | if (cnt + unload_global == ns_msl->r_nlist) |
507 | /* Speed up removing most recently added objects. */ |
508 | j = cnt; |
509 | else |
510 | for (i = 0; i < cnt; i++) |
511 | if (ns_msl->r_list[i]->l_removed == 0) |
512 | { |
513 | if (i != j) |
514 | ns_msl->r_list[j] = ns_msl->r_list[i]; |
515 | j++; |
516 | } |
517 | ns_msl->r_nlist = j; |
518 | } |
519 | |
520 | if (!RTLD_SINGLE_THREAD_P |
521 | && (unload_global |
522 | || scope_mem_left |
523 | || (GL(dl_scope_free_list) != NULL |
524 | && GL(dl_scope_free_list)->count))) |
525 | { |
526 | THREAD_GSCOPE_WAIT (); |
527 | |
528 | /* Now we can free any queued old scopes. */ |
529 | struct dl_scope_free_list *fsl = GL(dl_scope_free_list); |
530 | if (fsl != NULL) |
531 | while (fsl->count > 0) |
532 | free (fsl->list[--fsl->count]); |
533 | } |
534 | |
535 | size_t tls_free_start; |
536 | size_t tls_free_end; |
537 | tls_free_start = tls_free_end = NO_TLS_OFFSET; |
538 | |
539 | /* We modify the list of loaded objects. */ |
540 | __rtld_lock_lock_recursive (GL(dl_load_write_lock)); |
541 | |
542 | /* Check each element of the search list to see if all references to |
543 | it are gone. */ |
544 | for (unsigned int i = first_loaded; i < nloaded; ++i) |
545 | { |
546 | struct link_map *imap = maps[i]; |
547 | if (!used[i]) |
548 | { |
549 | assert (imap->l_type == lt_loaded); |
550 | |
551 | /* That was the last reference, and this was a dlopen-loaded |
552 | object. We can unmap it. */ |
553 | |
554 | /* Remove the object from the dtv slotinfo array if it uses TLS. */ |
555 | if (__glibc_unlikely (imap->l_tls_blocksize > 0)) |
556 | { |
557 | any_tls = true; |
558 | |
559 | if (GL(dl_tls_dtv_slotinfo_list) != NULL |
560 | && ! remove_slotinfo (imap->l_tls_modid, |
561 | GL(dl_tls_dtv_slotinfo_list), 0, |
562 | imap->l_init_called)) |
563 | /* All dynamically loaded modules with TLS are unloaded. */ |
564 | GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem); |
565 | |
566 | if (imap->l_tls_offset != NO_TLS_OFFSET |
567 | && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET) |
568 | { |
569 | /* Collect a contiguous chunk built from the objects in |
570 | this search list, going in either direction. When the |
571 | whole chunk is at the end of the used area then we can |
572 | reclaim it. */ |
573 | #if TLS_TCB_AT_TP |
574 | if (tls_free_start == NO_TLS_OFFSET |
575 | || (size_t) imap->l_tls_offset == tls_free_start) |
576 | { |
577 | /* Extend the contiguous chunk being reclaimed. */ |
578 | tls_free_start |
579 | = imap->l_tls_offset - imap->l_tls_blocksize; |
580 | |
581 | if (tls_free_end == NO_TLS_OFFSET) |
582 | tls_free_end = imap->l_tls_offset; |
583 | } |
584 | else if (imap->l_tls_offset - imap->l_tls_blocksize |
585 | == tls_free_end) |
586 | /* Extend the chunk backwards. */ |
587 | tls_free_end = imap->l_tls_offset; |
588 | else |
589 | { |
590 | /* This isn't contiguous with the last chunk freed. |
591 | One of them will be leaked unless we can free |
592 | one block right away. */ |
593 | if (tls_free_end == GL(dl_tls_static_used)) |
594 | { |
595 | GL(dl_tls_static_used) = tls_free_start; |
596 | tls_free_end = imap->l_tls_offset; |
597 | tls_free_start |
598 | = tls_free_end - imap->l_tls_blocksize; |
599 | } |
600 | else if ((size_t) imap->l_tls_offset |
601 | == GL(dl_tls_static_used)) |
602 | GL(dl_tls_static_used) |
603 | = imap->l_tls_offset - imap->l_tls_blocksize; |
604 | else if (tls_free_end < (size_t) imap->l_tls_offset) |
605 | { |
606 | /* We pick the later block. It has a chance to |
607 | be freed. */ |
608 | tls_free_end = imap->l_tls_offset; |
609 | tls_free_start |
610 | = tls_free_end - imap->l_tls_blocksize; |
611 | } |
612 | } |
613 | #elif TLS_DTV_AT_TP |
614 | if (tls_free_start == NO_TLS_OFFSET) |
615 | { |
616 | tls_free_start = imap->l_tls_firstbyte_offset; |
617 | tls_free_end = (imap->l_tls_offset |
618 | + imap->l_tls_blocksize); |
619 | } |
620 | else if (imap->l_tls_firstbyte_offset == tls_free_end) |
621 | /* Extend the contiguous chunk being reclaimed. */ |
622 | tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize; |
623 | else if (imap->l_tls_offset + imap->l_tls_blocksize |
624 | == tls_free_start) |
625 | /* Extend the chunk backwards. */ |
626 | tls_free_start = imap->l_tls_firstbyte_offset; |
627 | /* This isn't contiguous with the last chunk freed. |
628 | One of them will be leaked unless we can free |
629 | one block right away. */ |
630 | else if (imap->l_tls_offset + imap->l_tls_blocksize |
631 | == GL(dl_tls_static_used)) |
632 | GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset; |
633 | else if (tls_free_end == GL(dl_tls_static_used)) |
634 | { |
635 | GL(dl_tls_static_used) = tls_free_start; |
636 | tls_free_start = imap->l_tls_firstbyte_offset; |
637 | tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize; |
638 | } |
639 | else if (tls_free_end < imap->l_tls_firstbyte_offset) |
640 | { |
641 | /* We pick the later block. It has a chance to |
642 | be freed. */ |
643 | tls_free_start = imap->l_tls_firstbyte_offset; |
644 | tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize; |
645 | } |
646 | #else |
647 | # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" |
648 | #endif |
649 | } |
650 | } |
651 | |
652 | /* Reset unique symbols if forced. */ |
653 | if (force) |
654 | { |
655 | struct unique_sym_table *tab = &ns->_ns_unique_sym_table; |
656 | __rtld_lock_lock_recursive (tab->lock); |
657 | struct unique_sym *entries = tab->entries; |
658 | if (entries != NULL) |
659 | { |
660 | size_t idx, size = tab->size; |
661 | for (idx = 0; idx < size; ++idx) |
662 | { |
663 | /* Clear unique symbol entries that belong to this |
664 | object. */ |
665 | if (entries[idx].name != NULL |
666 | && entries[idx].map == imap) |
667 | { |
668 | entries[idx].name = NULL; |
669 | entries[idx].hashval = 0; |
670 | tab->n_elements--; |
671 | } |
672 | } |
673 | } |
674 | __rtld_lock_unlock_recursive (tab->lock); |
675 | } |
676 | |
677 | /* We can unmap all the maps at once. We determined the |
678 | start address and length when we loaded the object and |
679 | the `munmap' call does the rest. */ |
680 | DL_UNMAP (imap); |
681 | |
682 | /* Finally, unlink the data structure and free it. */ |
683 | #if DL_NNS == 1 |
684 | /* The assert in the (imap->l_prev == NULL) case gives |
685 | the compiler license to warn that NS points outside |
686 | the dl_ns array bounds in that case (as nsid != LM_ID_BASE |
687 | is tantamount to nsid >= DL_NNS). That should be impossible |
688 | in this configuration, so just assert about it instead. */ |
689 | assert (nsid == LM_ID_BASE); |
690 | assert (imap->l_prev != NULL); |
691 | #else |
692 | if (imap->l_prev == NULL) |
693 | { |
694 | assert (nsid != LM_ID_BASE); |
695 | ns->_ns_loaded = imap->l_next; |
696 | |
697 | /* Update the pointer to the head of the list |
698 | we leave for debuggers to examine. */ |
699 | r->r_map = (void *) ns->_ns_loaded; |
700 | } |
701 | else |
702 | #endif |
703 | imap->l_prev->l_next = imap->l_next; |
704 | |
705 | --ns->_ns_nloaded; |
706 | if (imap->l_next != NULL) |
707 | imap->l_next->l_prev = imap->l_prev; |
708 | |
709 | free (imap->l_versions); |
710 | if (imap->l_origin != (char *) -1) |
711 | free ((char *) imap->l_origin); |
712 | |
713 | free (imap->l_reldeps); |
714 | |
715 | /* Print debugging message. */ |
716 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES)) |
717 | _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n" , |
718 | imap->l_name, imap->l_ns); |
719 | |
720 | /* This name always is allocated. */ |
721 | free (imap->l_name); |
722 | /* Remove the list with all the names of the shared object. */ |
723 | |
724 | struct libname_list *lnp = imap->l_libname; |
725 | do |
726 | { |
727 | struct libname_list *this = lnp; |
728 | lnp = lnp->next; |
729 | if (!this->dont_free) |
730 | free (this); |
731 | } |
732 | while (lnp != NULL); |
733 | |
734 | /* Remove the searchlists. */ |
735 | free (imap->l_initfini); |
736 | |
737 | /* Remove the scope array if we allocated it. */ |
738 | if (imap->l_scope != imap->l_scope_mem) |
739 | free (imap->l_scope); |
740 | |
741 | if (imap->l_phdr_allocated) |
742 | free ((void *) imap->l_phdr); |
743 | |
744 | if (imap->l_rpath_dirs.dirs != (void *) -1) |
745 | free (imap->l_rpath_dirs.dirs); |
746 | if (imap->l_runpath_dirs.dirs != (void *) -1) |
747 | free (imap->l_runpath_dirs.dirs); |
748 | |
749 | free (imap); |
750 | } |
751 | } |
752 | |
753 | __rtld_lock_unlock_recursive (GL(dl_load_write_lock)); |
754 | |
755 | /* If we removed any object which uses TLS bump the generation counter. */ |
756 | if (any_tls) |
757 | { |
758 | if (__glibc_unlikely (++GL(dl_tls_generation) == 0)) |
759 | _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in " REPORT_BUGS_TO".\n" ); |
760 | |
761 | if (tls_free_end == GL(dl_tls_static_used)) |
762 | GL(dl_tls_static_used) = tls_free_start; |
763 | } |
764 | |
765 | #ifdef SHARED |
766 | /* Auditing checkpoint: we have deleted all objects. */ |
767 | if (__glibc_unlikely (do_audit)) |
768 | { |
769 | struct link_map *head = ns->_ns_loaded; |
770 | /* Do not call the functions for any auditing object. */ |
771 | if (head->l_auditing == 0) |
772 | { |
773 | struct audit_ifaces *afct = GLRO(dl_audit); |
774 | for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt) |
775 | { |
776 | if (afct->activity != NULL) |
777 | afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT); |
778 | |
779 | afct = afct->next; |
780 | } |
781 | } |
782 | } |
783 | #endif |
784 | |
785 | if (__builtin_expect (ns->_ns_loaded == NULL, 0) |
786 | && nsid == GL(dl_nns) - 1) |
787 | do |
788 | --GL(dl_nns); |
789 | while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL); |
790 | |
791 | /* Notify the debugger those objects are finalized and gone. */ |
792 | r->r_state = RT_CONSISTENT; |
793 | _dl_debug_state (); |
794 | LIBC_PROBE (unmap_complete, 2, nsid, r); |
795 | |
796 | /* Recheck if we need to retry, release the lock. */ |
797 | out: |
798 | if (dl_close_state == rerun) |
799 | goto retry; |
800 | |
801 | dl_close_state = not_pending; |
802 | } |
803 | |
804 | |
805 | void |
806 | _dl_close (void *_map) |
807 | { |
808 | struct link_map *map = _map; |
809 | |
810 | /* We must take the lock to examine the contents of map and avoid |
811 | concurrent dlopens. */ |
812 | __rtld_lock_lock_recursive (GL(dl_load_lock)); |
813 | |
814 | /* At this point we are guaranteed nobody else is touching the list of |
815 | loaded maps, but a concurrent dlclose might have freed our map |
816 | before we took the lock. There is no way to detect this (see below) |
817 | so we proceed assuming this isn't the case. First see whether we |
818 | can remove the object at all. */ |
819 | if (__glibc_unlikely (map->l_flags_1 & DF_1_NODELETE)) |
820 | { |
821 | /* Nope. Do nothing. */ |
822 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
823 | return; |
824 | } |
825 | |
826 | /* At present this is an unreliable check except in the case where the |
827 | caller has recursively called dlclose and we are sure the link map |
828 | has not been freed. In a non-recursive dlclose the map itself |
829 | might have been freed and this access is potentially a data race |
830 | with whatever other use this memory might have now, or worse we |
831 | might silently corrupt memory if it looks enough like a link map. |
832 | POSIX has language in dlclose that appears to guarantee that this |
833 | should be a detectable case and given that dlclose should be threadsafe |
834 | we need this to be a reliable detection. |
835 | This is bug 20990. */ |
836 | if (__builtin_expect (map->l_direct_opencount, 1) == 0) |
837 | { |
838 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
839 | _dl_signal_error (0, map->l_name, NULL, N_("shared object not open" )); |
840 | } |
841 | |
842 | _dl_close_worker (map, false); |
843 | |
844 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
845 | } |
846 | |