1/* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2017 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19#include <assert.h>
20#include <dlfcn.h>
21#include <errno.h>
22#include <libintl.h>
23#include <stddef.h>
24#include <stdio.h>
25#include <stdlib.h>
26#include <string.h>
27#include <unistd.h>
28#include <libc-lock.h>
29#include <ldsodefs.h>
30#include <sys/types.h>
31#include <sys/mman.h>
32#include <sysdep-cancel.h>
33#include <tls.h>
34#include <stap-probe.h>
35
36#include <dl-unmap-segments.h>
37
38
39/* Type of the constructor functions. */
40typedef void (*fini_t) (void);
41
42
43/* Special l_idx value used to indicate which objects remain loaded. */
44#define IDX_STILL_USED -1
45
46
47/* Returns true we an non-empty was found. */
48static bool
49remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
50 bool should_be_there)
51{
52 if (idx - disp >= listp->len)
53 {
54 if (listp->next == NULL)
55 {
56 /* The index is not actually valid in the slotinfo list,
57 because this object was closed before it was fully set
58 up due to some error. */
59 assert (! should_be_there);
60 }
61 else
62 {
63 if (remove_slotinfo (idx, listp->next, disp + listp->len,
64 should_be_there))
65 return true;
66
67 /* No non-empty entry. Search from the end of this element's
68 slotinfo array. */
69 idx = disp + listp->len;
70 }
71 }
72 else
73 {
74 struct link_map *old_map = listp->slotinfo[idx - disp].map;
75
76 /* The entry might still be in its unused state if we are closing an
77 object that wasn't fully set up. */
78 if (__glibc_likely (old_map != NULL))
79 {
80 assert (old_map->l_tls_modid == idx);
81
82 /* Mark the entry as unused. */
83 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
84 listp->slotinfo[idx - disp].map = NULL;
85 }
86
87 /* If this is not the last currently used entry no need to look
88 further. */
89 if (idx != GL(dl_tls_max_dtv_idx))
90 return true;
91 }
92
93 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
94 {
95 --idx;
96
97 if (listp->slotinfo[idx - disp].map != NULL)
98 {
99 /* Found a new last used index. */
100 GL(dl_tls_max_dtv_idx) = idx;
101 return true;
102 }
103 }
104
105 /* No non-entry in this list element. */
106 return false;
107}
108
109
110void
111_dl_close_worker (struct link_map *map, bool force)
112{
113 /* One less direct use. */
114 --map->l_direct_opencount;
115
116 /* If _dl_close is called recursively (some destructor call dlclose),
117 just record that the parent _dl_close will need to do garbage collection
118 again and return. */
119 static enum { not_pending, pending, rerun } dl_close_state;
120
121 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
122 || dl_close_state != not_pending)
123 {
124 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
125 dl_close_state = rerun;
126
127 /* There are still references to this object. Do nothing more. */
128 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
129 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
130 map->l_name, map->l_direct_opencount);
131
132 return;
133 }
134
135 Lmid_t nsid = map->l_ns;
136 struct link_namespaces *ns = &GL(dl_ns)[nsid];
137
138 retry:
139 dl_close_state = pending;
140
141 bool any_tls = false;
142 const unsigned int nloaded = ns->_ns_nloaded;
143 char used[nloaded];
144 char done[nloaded];
145 struct link_map *maps[nloaded];
146
147 /* Clear DF_1_NODELETE to force object deletion. We don't need to touch
148 l_tls_dtor_count because forced object deletion only happens when an
149 error occurs during object load. Destructor registration for TLS
150 non-POD objects should not have happened till then for this
151 object. */
152 if (force)
153 map->l_flags_1 &= ~DF_1_NODELETE;
154
155 /* Run over the list and assign indexes to the link maps and enter
156 them into the MAPS array. */
157 int idx = 0;
158 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
159 {
160 l->l_idx = idx;
161 maps[idx] = l;
162 ++idx;
163
164 }
165 assert (idx == nloaded);
166
167 /* Prepare the bitmaps. */
168 memset (used, '\0', sizeof (used));
169 memset (done, '\0', sizeof (done));
170
171 /* Keep track of the lowest index link map we have covered already. */
172 int done_index = -1;
173 while (++done_index < nloaded)
174 {
175 struct link_map *l = maps[done_index];
176
177 if (done[done_index])
178 /* Already handled. */
179 continue;
180
181 /* Check whether this object is still used. */
182 if (l->l_type == lt_loaded
183 && l->l_direct_opencount == 0
184 && (l->l_flags_1 & DF_1_NODELETE) == 0
185 /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
186 acquire is sufficient and correct. */
187 && atomic_load_acquire (&l->l_tls_dtor_count) == 0
188 && !used[done_index])
189 continue;
190
191 /* We need this object and we handle it now. */
192 done[done_index] = 1;
193 used[done_index] = 1;
194 /* Signal the object is still needed. */
195 l->l_idx = IDX_STILL_USED;
196
197 /* Mark all dependencies as used. */
198 if (l->l_initfini != NULL)
199 {
200 /* We are always the zeroth entry, and since we don't include
201 ourselves in the dependency analysis start at 1. */
202 struct link_map **lp = &l->l_initfini[1];
203 while (*lp != NULL)
204 {
205 if ((*lp)->l_idx != IDX_STILL_USED)
206 {
207 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
208
209 if (!used[(*lp)->l_idx])
210 {
211 used[(*lp)->l_idx] = 1;
212 /* If we marked a new object as used, and we've
213 already processed it, then we need to go back
214 and process again from that point forward to
215 ensure we keep all of its dependencies also. */
216 if ((*lp)->l_idx - 1 < done_index)
217 done_index = (*lp)->l_idx - 1;
218 }
219 }
220
221 ++lp;
222 }
223 }
224 /* And the same for relocation dependencies. */
225 if (l->l_reldeps != NULL)
226 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
227 {
228 struct link_map *jmap = l->l_reldeps->list[j];
229
230 if (jmap->l_idx != IDX_STILL_USED)
231 {
232 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
233
234 if (!used[jmap->l_idx])
235 {
236 used[jmap->l_idx] = 1;
237 if (jmap->l_idx - 1 < done_index)
238 done_index = jmap->l_idx - 1;
239 }
240 }
241 }
242 }
243
244 /* Sort the entries. */
245 _dl_sort_fini (maps, nloaded, used, nsid);
246
247 /* Call all termination functions at once. */
248#ifdef SHARED
249 bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
250#endif
251 bool unload_any = false;
252 bool scope_mem_left = false;
253 unsigned int unload_global = 0;
254 unsigned int first_loaded = ~0;
255 for (unsigned int i = 0; i < nloaded; ++i)
256 {
257 struct link_map *imap = maps[i];
258
259 /* All elements must be in the same namespace. */
260 assert (imap->l_ns == nsid);
261
262 if (!used[i])
263 {
264 assert (imap->l_type == lt_loaded
265 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
266
267 /* Call its termination function. Do not do it for
268 half-cooked objects. */
269 if (imap->l_init_called)
270 {
271 /* When debugging print a message first. */
272 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
273 0))
274 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
275 imap->l_name, nsid);
276
277 if (imap->l_info[DT_FINI_ARRAY] != NULL)
278 {
279 ElfW(Addr) *array =
280 (ElfW(Addr) *) (imap->l_addr
281 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
282 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
283 / sizeof (ElfW(Addr)));
284
285 while (sz-- > 0)
286 ((fini_t) array[sz]) ();
287 }
288
289 /* Next try the old-style destructor. */
290 if (imap->l_info[DT_FINI] != NULL)
291 DL_CALL_DT_FINI (imap, ((void *) imap->l_addr
292 + imap->l_info[DT_FINI]->d_un.d_ptr));
293 }
294
295#ifdef SHARED
296 /* Auditing checkpoint: we remove an object. */
297 if (__glibc_unlikely (do_audit))
298 {
299 struct audit_ifaces *afct = GLRO(dl_audit);
300 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
301 {
302 if (afct->objclose != NULL)
303 /* Return value is ignored. */
304 (void) afct->objclose (&imap->l_audit[cnt].cookie);
305
306 afct = afct->next;
307 }
308 }
309#endif
310
311 /* This object must not be used anymore. */
312 imap->l_removed = 1;
313
314 /* We indeed have an object to remove. */
315 unload_any = true;
316
317 if (imap->l_global)
318 ++unload_global;
319
320 /* Remember where the first dynamically loaded object is. */
321 if (i < first_loaded)
322 first_loaded = i;
323 }
324 /* Else used[i]. */
325 else if (imap->l_type == lt_loaded)
326 {
327 struct r_scope_elem *new_list = NULL;
328
329 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
330 {
331 /* The object is still used. But one of the objects we are
332 unloading right now is responsible for loading it. If
333 the current object does not have it's own scope yet we
334 have to create one. This has to be done before running
335 the finalizers.
336
337 To do this count the number of dependencies. */
338 unsigned int cnt;
339 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
340 ;
341
342 /* We simply reuse the l_initfini list. */
343 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
344 imap->l_searchlist.r_nlist = cnt;
345
346 new_list = &imap->l_searchlist;
347 }
348
349 /* Count the number of scopes which remain after the unload.
350 When we add the local search list count it. Always add
351 one for the terminating NULL pointer. */
352 size_t remain = (new_list != NULL) + 1;
353 bool removed_any = false;
354 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
355 /* This relies on l_scope[] entries being always set either
356 to its own l_symbolic_searchlist address, or some map's
357 l_searchlist address. */
358 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
359 {
360 struct link_map *tmap = (struct link_map *)
361 ((char *) imap->l_scope[cnt]
362 - offsetof (struct link_map, l_searchlist));
363 assert (tmap->l_ns == nsid);
364 if (tmap->l_idx == IDX_STILL_USED)
365 ++remain;
366 else
367 removed_any = true;
368 }
369 else
370 ++remain;
371
372 if (removed_any)
373 {
374 /* Always allocate a new array for the scope. This is
375 necessary since we must be able to determine the last
376 user of the current array. If possible use the link map's
377 memory. */
378 size_t new_size;
379 struct r_scope_elem **newp;
380
381#define SCOPE_ELEMS(imap) \
382 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
383
384 if (imap->l_scope != imap->l_scope_mem
385 && remain < SCOPE_ELEMS (imap))
386 {
387 new_size = SCOPE_ELEMS (imap);
388 newp = imap->l_scope_mem;
389 }
390 else
391 {
392 new_size = imap->l_scope_max;
393 newp = (struct r_scope_elem **)
394 malloc (new_size * sizeof (struct r_scope_elem *));
395 if (newp == NULL)
396 _dl_signal_error (ENOMEM, "dlclose", NULL,
397 N_("cannot create scope list"));
398 }
399
400 /* Copy over the remaining scope elements. */
401 remain = 0;
402 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
403 {
404 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
405 {
406 struct link_map *tmap = (struct link_map *)
407 ((char *) imap->l_scope[cnt]
408 - offsetof (struct link_map, l_searchlist));
409 if (tmap->l_idx != IDX_STILL_USED)
410 {
411 /* Remove the scope. Or replace with own map's
412 scope. */
413 if (new_list != NULL)
414 {
415 newp[remain++] = new_list;
416 new_list = NULL;
417 }
418 continue;
419 }
420 }
421
422 newp[remain++] = imap->l_scope[cnt];
423 }
424 newp[remain] = NULL;
425
426 struct r_scope_elem **old = imap->l_scope;
427
428 imap->l_scope = newp;
429
430 /* No user anymore, we can free it now. */
431 if (old != imap->l_scope_mem)
432 {
433 if (_dl_scope_free (old))
434 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
435 no need to repeat it. */
436 scope_mem_left = false;
437 }
438 else
439 scope_mem_left = true;
440
441 imap->l_scope_max = new_size;
442 }
443 else if (new_list != NULL)
444 {
445 /* We didn't change the scope array, so reset the search
446 list. */
447 imap->l_searchlist.r_list = NULL;
448 imap->l_searchlist.r_nlist = 0;
449 }
450
451 /* The loader is gone, so mark the object as not having one.
452 Note: l_idx != IDX_STILL_USED -> object will be removed. */
453 if (imap->l_loader != NULL
454 && imap->l_loader->l_idx != IDX_STILL_USED)
455 imap->l_loader = NULL;
456
457 /* Remember where the first dynamically loaded object is. */
458 if (i < first_loaded)
459 first_loaded = i;
460 }
461 }
462
463 /* If there are no objects to unload, do nothing further. */
464 if (!unload_any)
465 goto out;
466
467#ifdef SHARED
468 /* Auditing checkpoint: we will start deleting objects. */
469 if (__glibc_unlikely (do_audit))
470 {
471 struct link_map *head = ns->_ns_loaded;
472 struct audit_ifaces *afct = GLRO(dl_audit);
473 /* Do not call the functions for any auditing object. */
474 if (head->l_auditing == 0)
475 {
476 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
477 {
478 if (afct->activity != NULL)
479 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_DELETE);
480
481 afct = afct->next;
482 }
483 }
484 }
485#endif
486
487 /* Notify the debugger we are about to remove some loaded objects. */
488 struct r_debug *r = _dl_debug_initialize (0, nsid);
489 r->r_state = RT_DELETE;
490 _dl_debug_state ();
491 LIBC_PROBE (unmap_start, 2, nsid, r);
492
493 if (unload_global)
494 {
495 /* Some objects are in the global scope list. Remove them. */
496 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
497 unsigned int i;
498 unsigned int j = 0;
499 unsigned int cnt = ns_msl->r_nlist;
500
501 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
502 --cnt;
503
504 if (cnt + unload_global == ns_msl->r_nlist)
505 /* Speed up removing most recently added objects. */
506 j = cnt;
507 else
508 for (i = 0; i < cnt; i++)
509 if (ns_msl->r_list[i]->l_removed == 0)
510 {
511 if (i != j)
512 ns_msl->r_list[j] = ns_msl->r_list[i];
513 j++;
514 }
515 ns_msl->r_nlist = j;
516 }
517
518 if (!RTLD_SINGLE_THREAD_P
519 && (unload_global
520 || scope_mem_left
521 || (GL(dl_scope_free_list) != NULL
522 && GL(dl_scope_free_list)->count)))
523 {
524 THREAD_GSCOPE_WAIT ();
525
526 /* Now we can free any queued old scopes. */
527 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
528 if (fsl != NULL)
529 while (fsl->count > 0)
530 free (fsl->list[--fsl->count]);
531 }
532
533 size_t tls_free_start;
534 size_t tls_free_end;
535 tls_free_start = tls_free_end = NO_TLS_OFFSET;
536
537 /* We modify the list of loaded objects. */
538 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
539
540 /* Check each element of the search list to see if all references to
541 it are gone. */
542 for (unsigned int i = first_loaded; i < nloaded; ++i)
543 {
544 struct link_map *imap = maps[i];
545 if (!used[i])
546 {
547 assert (imap->l_type == lt_loaded);
548
549 /* That was the last reference, and this was a dlopen-loaded
550 object. We can unmap it. */
551
552 /* Remove the object from the dtv slotinfo array if it uses TLS. */
553 if (__glibc_unlikely (imap->l_tls_blocksize > 0))
554 {
555 any_tls = true;
556
557 if (GL(dl_tls_dtv_slotinfo_list) != NULL
558 && ! remove_slotinfo (imap->l_tls_modid,
559 GL(dl_tls_dtv_slotinfo_list), 0,
560 imap->l_init_called))
561 /* All dynamically loaded modules with TLS are unloaded. */
562 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
563
564 if (imap->l_tls_offset != NO_TLS_OFFSET
565 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
566 {
567 /* Collect a contiguous chunk built from the objects in
568 this search list, going in either direction. When the
569 whole chunk is at the end of the used area then we can
570 reclaim it. */
571#if TLS_TCB_AT_TP
572 if (tls_free_start == NO_TLS_OFFSET
573 || (size_t) imap->l_tls_offset == tls_free_start)
574 {
575 /* Extend the contiguous chunk being reclaimed. */
576 tls_free_start
577 = imap->l_tls_offset - imap->l_tls_blocksize;
578
579 if (tls_free_end == NO_TLS_OFFSET)
580 tls_free_end = imap->l_tls_offset;
581 }
582 else if (imap->l_tls_offset - imap->l_tls_blocksize
583 == tls_free_end)
584 /* Extend the chunk backwards. */
585 tls_free_end = imap->l_tls_offset;
586 else
587 {
588 /* This isn't contiguous with the last chunk freed.
589 One of them will be leaked unless we can free
590 one block right away. */
591 if (tls_free_end == GL(dl_tls_static_used))
592 {
593 GL(dl_tls_static_used) = tls_free_start;
594 tls_free_end = imap->l_tls_offset;
595 tls_free_start
596 = tls_free_end - imap->l_tls_blocksize;
597 }
598 else if ((size_t) imap->l_tls_offset
599 == GL(dl_tls_static_used))
600 GL(dl_tls_static_used)
601 = imap->l_tls_offset - imap->l_tls_blocksize;
602 else if (tls_free_end < (size_t) imap->l_tls_offset)
603 {
604 /* We pick the later block. It has a chance to
605 be freed. */
606 tls_free_end = imap->l_tls_offset;
607 tls_free_start
608 = tls_free_end - imap->l_tls_blocksize;
609 }
610 }
611#elif TLS_DTV_AT_TP
612 if (tls_free_start == NO_TLS_OFFSET)
613 {
614 tls_free_start = imap->l_tls_firstbyte_offset;
615 tls_free_end = (imap->l_tls_offset
616 + imap->l_tls_blocksize);
617 }
618 else if (imap->l_tls_firstbyte_offset == tls_free_end)
619 /* Extend the contiguous chunk being reclaimed. */
620 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
621 else if (imap->l_tls_offset + imap->l_tls_blocksize
622 == tls_free_start)
623 /* Extend the chunk backwards. */
624 tls_free_start = imap->l_tls_firstbyte_offset;
625 /* This isn't contiguous with the last chunk freed.
626 One of them will be leaked unless we can free
627 one block right away. */
628 else if (imap->l_tls_offset + imap->l_tls_blocksize
629 == GL(dl_tls_static_used))
630 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
631 else if (tls_free_end == GL(dl_tls_static_used))
632 {
633 GL(dl_tls_static_used) = tls_free_start;
634 tls_free_start = imap->l_tls_firstbyte_offset;
635 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
636 }
637 else if (tls_free_end < imap->l_tls_firstbyte_offset)
638 {
639 /* We pick the later block. It has a chance to
640 be freed. */
641 tls_free_start = imap->l_tls_firstbyte_offset;
642 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
643 }
644#else
645# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
646#endif
647 }
648 }
649
650 /* Reset unique symbols if forced. */
651 if (force)
652 {
653 struct unique_sym_table *tab = &ns->_ns_unique_sym_table;
654 __rtld_lock_lock_recursive (tab->lock);
655 struct unique_sym *entries = tab->entries;
656 if (entries != NULL)
657 {
658 size_t idx, size = tab->size;
659 for (idx = 0; idx < size; ++idx)
660 {
661 /* Clear unique symbol entries that belong to this
662 object. */
663 if (entries[idx].name != NULL
664 && entries[idx].map == imap)
665 {
666 entries[idx].name = NULL;
667 entries[idx].hashval = 0;
668 tab->n_elements--;
669 }
670 }
671 }
672 __rtld_lock_unlock_recursive (tab->lock);
673 }
674
675 /* We can unmap all the maps at once. We determined the
676 start address and length when we loaded the object and
677 the `munmap' call does the rest. */
678 DL_UNMAP (imap);
679
680 /* Finally, unlink the data structure and free it. */
681#if DL_NNS == 1
682 /* The assert in the (imap->l_prev == NULL) case gives
683 the compiler license to warn that NS points outside
684 the dl_ns array bounds in that case (as nsid != LM_ID_BASE
685 is tantamount to nsid >= DL_NNS). That should be impossible
686 in this configuration, so just assert about it instead. */
687 assert (nsid == LM_ID_BASE);
688 assert (imap->l_prev != NULL);
689#else
690 if (imap->l_prev == NULL)
691 {
692 assert (nsid != LM_ID_BASE);
693 ns->_ns_loaded = imap->l_next;
694
695 /* Update the pointer to the head of the list
696 we leave for debuggers to examine. */
697 r->r_map = (void *) ns->_ns_loaded;
698 }
699 else
700#endif
701 imap->l_prev->l_next = imap->l_next;
702
703 --ns->_ns_nloaded;
704 if (imap->l_next != NULL)
705 imap->l_next->l_prev = imap->l_prev;
706
707 free (imap->l_versions);
708 if (imap->l_origin != (char *) -1)
709 free ((char *) imap->l_origin);
710
711 free (imap->l_reldeps);
712
713 /* Print debugging message. */
714 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
715 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
716 imap->l_name, imap->l_ns);
717
718 /* This name always is allocated. */
719 free (imap->l_name);
720 /* Remove the list with all the names of the shared object. */
721
722 struct libname_list *lnp = imap->l_libname;
723 do
724 {
725 struct libname_list *this = lnp;
726 lnp = lnp->next;
727 if (!this->dont_free)
728 free (this);
729 }
730 while (lnp != NULL);
731
732 /* Remove the searchlists. */
733 free (imap->l_initfini);
734
735 /* Remove the scope array if we allocated it. */
736 if (imap->l_scope != imap->l_scope_mem)
737 free (imap->l_scope);
738
739 if (imap->l_phdr_allocated)
740 free ((void *) imap->l_phdr);
741
742 if (imap->l_rpath_dirs.dirs != (void *) -1)
743 free (imap->l_rpath_dirs.dirs);
744 if (imap->l_runpath_dirs.dirs != (void *) -1)
745 free (imap->l_runpath_dirs.dirs);
746
747 free (imap);
748 }
749 }
750
751 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
752
753 /* If we removed any object which uses TLS bump the generation counter. */
754 if (any_tls)
755 {
756 if (__glibc_unlikely (++GL(dl_tls_generation) == 0))
757 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
758
759 if (tls_free_end == GL(dl_tls_static_used))
760 GL(dl_tls_static_used) = tls_free_start;
761 }
762
763#ifdef SHARED
764 /* Auditing checkpoint: we have deleted all objects. */
765 if (__glibc_unlikely (do_audit))
766 {
767 struct link_map *head = ns->_ns_loaded;
768 /* Do not call the functions for any auditing object. */
769 if (head->l_auditing == 0)
770 {
771 struct audit_ifaces *afct = GLRO(dl_audit);
772 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
773 {
774 if (afct->activity != NULL)
775 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
776
777 afct = afct->next;
778 }
779 }
780 }
781#endif
782
783 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
784 && nsid == GL(dl_nns) - 1)
785 do
786 --GL(dl_nns);
787 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
788
789 /* Notify the debugger those objects are finalized and gone. */
790 r->r_state = RT_CONSISTENT;
791 _dl_debug_state ();
792 LIBC_PROBE (unmap_complete, 2, nsid, r);
793
794 /* Recheck if we need to retry, release the lock. */
795 out:
796 if (dl_close_state == rerun)
797 goto retry;
798
799 dl_close_state = not_pending;
800}
801
802
803void
804_dl_close (void *_map)
805{
806 struct link_map *map = _map;
807
808 /* We must take the lock to examine the contents of map and avoid
809 concurrent dlopens. */
810 __rtld_lock_lock_recursive (GL(dl_load_lock));
811
812 /* At this point we are guaranteed nobody else is touching the list of
813 loaded maps, but a concurrent dlclose might have freed our map
814 before we took the lock. There is no way to detect this (see below)
815 so we proceed assuming this isn't the case. First see whether we
816 can remove the object at all. */
817 if (__glibc_unlikely (map->l_flags_1 & DF_1_NODELETE))
818 {
819 /* Nope. Do nothing. */
820 __rtld_lock_unlock_recursive (GL(dl_load_lock));
821 return;
822 }
823
824 /* At present this is an unreliable check except in the case where the
825 caller has recursively called dlclose and we are sure the link map
826 has not been freed. In a non-recursive dlclose the map itself
827 might have been freed and this access is potentially a data race
828 with whatever other use this memory might have now, or worse we
829 might silently corrupt memory if it looks enough like a link map.
830 POSIX has language in dlclose that appears to guarantee that this
831 should be a detectable case and given that dlclose should be threadsafe
832 we need this to be a reliable detection.
833 This is bug 20990. */
834 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
835 {
836 __rtld_lock_unlock_recursive (GL(dl_load_lock));
837 _dl_signal_error (0, map->l_name, NULL, N_("shared object not open"));
838 }
839
840 _dl_close_worker (map, false);
841
842 __rtld_lock_unlock_recursive (GL(dl_load_lock));
843}
844