1/* Copyright (C) 1993-2018 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <http://www.gnu.org/licenses/>.
17
18 As a special exception, if you link the code in this file with
19 files compiled with a GNU compiler to produce an executable,
20 that does not cause the resulting executable to be covered by
21 the GNU Lesser General Public License. This exception does not
22 however invalidate any other reasons why the executable file
23 might be covered by the GNU Lesser General Public License.
24 This exception applies to code released by its copyright holders
25 in files containing the exception. */
26
27/* Generic or default I/O operations. */
28
29#include "libioP.h"
30#include <stdlib.h>
31#include <string.h>
32#include <stdbool.h>
33#include <sched.h>
34
35#ifdef _IO_MTSAFE_IO
36static _IO_lock_t list_all_lock = _IO_lock_initializer;
37#endif
38
39static FILE *run_fp;
40
41#ifdef _IO_MTSAFE_IO
42static void
43flush_cleanup (void *not_used)
44{
45 if (run_fp != NULL)
46 _IO_funlockfile (run_fp);
47 _IO_lock_unlock (list_all_lock);
48}
49#endif
50
51void
52_IO_un_link (struct _IO_FILE_plus *fp)
53{
54 if (fp->file._flags & _IO_LINKED)
55 {
56 FILE **f;
57#ifdef _IO_MTSAFE_IO
58 _IO_cleanup_region_start_noarg (flush_cleanup);
59 _IO_lock_lock (list_all_lock);
60 run_fp = (FILE *) fp;
61 _IO_flockfile ((FILE *) fp);
62#endif
63 if (_IO_list_all == NULL)
64 ;
65 else if (fp == _IO_list_all)
66 _IO_list_all = (struct _IO_FILE_plus *) _IO_list_all->file._chain;
67 else
68 for (f = &_IO_list_all->file._chain; *f; f = &(*f)->_chain)
69 if (*f == (FILE *) fp)
70 {
71 *f = fp->file._chain;
72 break;
73 }
74 fp->file._flags &= ~_IO_LINKED;
75#ifdef _IO_MTSAFE_IO
76 _IO_funlockfile ((FILE *) fp);
77 run_fp = NULL;
78 _IO_lock_unlock (list_all_lock);
79 _IO_cleanup_region_end (0);
80#endif
81 }
82}
83libc_hidden_def (_IO_un_link)
84
85void
86_IO_link_in (struct _IO_FILE_plus *fp)
87{
88 if ((fp->file._flags & _IO_LINKED) == 0)
89 {
90 fp->file._flags |= _IO_LINKED;
91#ifdef _IO_MTSAFE_IO
92 _IO_cleanup_region_start_noarg (flush_cleanup);
93 _IO_lock_lock (list_all_lock);
94 run_fp = (FILE *) fp;
95 _IO_flockfile ((FILE *) fp);
96#endif
97 fp->file._chain = (FILE *) _IO_list_all;
98 _IO_list_all = fp;
99#ifdef _IO_MTSAFE_IO
100 _IO_funlockfile ((FILE *) fp);
101 run_fp = NULL;
102 _IO_lock_unlock (list_all_lock);
103 _IO_cleanup_region_end (0);
104#endif
105 }
106}
107libc_hidden_def (_IO_link_in)
108
109/* Return minimum _pos markers
110 Assumes the current get area is the main get area. */
111ssize_t _IO_least_marker (FILE *fp, char *end_p);
112
113ssize_t
114_IO_least_marker (FILE *fp, char *end_p)
115{
116 ssize_t least_so_far = end_p - fp->_IO_read_base;
117 struct _IO_marker *mark;
118 for (mark = fp->_markers; mark != NULL; mark = mark->_next)
119 if (mark->_pos < least_so_far)
120 least_so_far = mark->_pos;
121 return least_so_far;
122}
123
124/* Switch current get area from backup buffer to (start of) main get area. */
125
126void
127_IO_switch_to_main_get_area (FILE *fp)
128{
129 char *tmp;
130 fp->_flags &= ~_IO_IN_BACKUP;
131 /* Swap _IO_read_end and _IO_save_end. */
132 tmp = fp->_IO_read_end;
133 fp->_IO_read_end = fp->_IO_save_end;
134 fp->_IO_save_end= tmp;
135 /* Swap _IO_read_base and _IO_save_base. */
136 tmp = fp->_IO_read_base;
137 fp->_IO_read_base = fp->_IO_save_base;
138 fp->_IO_save_base = tmp;
139 /* Set _IO_read_ptr. */
140 fp->_IO_read_ptr = fp->_IO_read_base;
141}
142
143/* Switch current get area from main get area to (end of) backup area. */
144
145void
146_IO_switch_to_backup_area (FILE *fp)
147{
148 char *tmp;
149 fp->_flags |= _IO_IN_BACKUP;
150 /* Swap _IO_read_end and _IO_save_end. */
151 tmp = fp->_IO_read_end;
152 fp->_IO_read_end = fp->_IO_save_end;
153 fp->_IO_save_end = tmp;
154 /* Swap _IO_read_base and _IO_save_base. */
155 tmp = fp->_IO_read_base;
156 fp->_IO_read_base = fp->_IO_save_base;
157 fp->_IO_save_base = tmp;
158 /* Set _IO_read_ptr. */
159 fp->_IO_read_ptr = fp->_IO_read_end;
160}
161
162int
163_IO_switch_to_get_mode (FILE *fp)
164{
165 if (fp->_IO_write_ptr > fp->_IO_write_base)
166 if (_IO_OVERFLOW (fp, EOF) == EOF)
167 return EOF;
168 if (_IO_in_backup (fp))
169 fp->_IO_read_base = fp->_IO_backup_base;
170 else
171 {
172 fp->_IO_read_base = fp->_IO_buf_base;
173 if (fp->_IO_write_ptr > fp->_IO_read_end)
174 fp->_IO_read_end = fp->_IO_write_ptr;
175 }
176 fp->_IO_read_ptr = fp->_IO_write_ptr;
177
178 fp->_IO_write_base = fp->_IO_write_ptr = fp->_IO_write_end = fp->_IO_read_ptr;
179
180 fp->_flags &= ~_IO_CURRENTLY_PUTTING;
181 return 0;
182}
183libc_hidden_def (_IO_switch_to_get_mode)
184
185void
186_IO_free_backup_area (FILE *fp)
187{
188 if (_IO_in_backup (fp))
189 _IO_switch_to_main_get_area (fp); /* Just in case. */
190 free (fp->_IO_save_base);
191 fp->_IO_save_base = NULL;
192 fp->_IO_save_end = NULL;
193 fp->_IO_backup_base = NULL;
194}
195libc_hidden_def (_IO_free_backup_area)
196
197int
198__overflow (FILE *f, int ch)
199{
200 /* This is a single-byte stream. */
201 if (f->_mode == 0)
202 _IO_fwide (f, -1);
203 return _IO_OVERFLOW (f, ch);
204}
205libc_hidden_def (__overflow)
206
207static int
208save_for_backup (FILE *fp, char *end_p)
209{
210 /* Append [_IO_read_base..end_p] to backup area. */
211 ssize_t least_mark = _IO_least_marker (fp, end_p);
212 /* needed_size is how much space we need in the backup area. */
213 size_t needed_size = (end_p - fp->_IO_read_base) - least_mark;
214 /* FIXME: Dubious arithmetic if pointers are NULL */
215 size_t current_Bsize = fp->_IO_save_end - fp->_IO_save_base;
216 size_t avail; /* Extra space available for future expansion. */
217 ssize_t delta;
218 struct _IO_marker *mark;
219 if (needed_size > current_Bsize)
220 {
221 char *new_buffer;
222 avail = 100;
223 new_buffer = (char *) malloc (avail + needed_size);
224 if (new_buffer == NULL)
225 return EOF; /* FIXME */
226 if (least_mark < 0)
227 {
228 __mempcpy (__mempcpy (new_buffer + avail,
229 fp->_IO_save_end + least_mark,
230 -least_mark),
231 fp->_IO_read_base,
232 end_p - fp->_IO_read_base);
233 }
234 else
235 memcpy (new_buffer + avail,
236 fp->_IO_read_base + least_mark,
237 needed_size);
238 free (fp->_IO_save_base);
239 fp->_IO_save_base = new_buffer;
240 fp->_IO_save_end = new_buffer + avail + needed_size;
241 }
242 else
243 {
244 avail = current_Bsize - needed_size;
245 if (least_mark < 0)
246 {
247 memmove (fp->_IO_save_base + avail,
248 fp->_IO_save_end + least_mark,
249 -least_mark);
250 memcpy (fp->_IO_save_base + avail - least_mark,
251 fp->_IO_read_base,
252 end_p - fp->_IO_read_base);
253 }
254 else if (needed_size > 0)
255 memcpy (fp->_IO_save_base + avail,
256 fp->_IO_read_base + least_mark,
257 needed_size);
258 }
259 fp->_IO_backup_base = fp->_IO_save_base + avail;
260 /* Adjust all the streammarkers. */
261 delta = end_p - fp->_IO_read_base;
262 for (mark = fp->_markers; mark != NULL; mark = mark->_next)
263 mark->_pos -= delta;
264 return 0;
265}
266
267int
268__underflow (FILE *fp)
269{
270 if (_IO_vtable_offset (fp) == 0 && _IO_fwide (fp, -1) != -1)
271 return EOF;
272
273 if (fp->_mode == 0)
274 _IO_fwide (fp, -1);
275 if (_IO_in_put_mode (fp))
276 if (_IO_switch_to_get_mode (fp) == EOF)
277 return EOF;
278 if (fp->_IO_read_ptr < fp->_IO_read_end)
279 return *(unsigned char *) fp->_IO_read_ptr;
280 if (_IO_in_backup (fp))
281 {
282 _IO_switch_to_main_get_area (fp);
283 if (fp->_IO_read_ptr < fp->_IO_read_end)
284 return *(unsigned char *) fp->_IO_read_ptr;
285 }
286 if (_IO_have_markers (fp))
287 {
288 if (save_for_backup (fp, fp->_IO_read_end))
289 return EOF;
290 }
291 else if (_IO_have_backup (fp))
292 _IO_free_backup_area (fp);
293 return _IO_UNDERFLOW (fp);
294}
295libc_hidden_def (__underflow)
296
297int
298__uflow (FILE *fp)
299{
300 if (_IO_vtable_offset (fp) == 0 && _IO_fwide (fp, -1) != -1)
301 return EOF;
302
303 if (fp->_mode == 0)
304 _IO_fwide (fp, -1);
305 if (_IO_in_put_mode (fp))
306 if (_IO_switch_to_get_mode (fp) == EOF)
307 return EOF;
308 if (fp->_IO_read_ptr < fp->_IO_read_end)
309 return *(unsigned char *) fp->_IO_read_ptr++;
310 if (_IO_in_backup (fp))
311 {
312 _IO_switch_to_main_get_area (fp);
313 if (fp->_IO_read_ptr < fp->_IO_read_end)
314 return *(unsigned char *) fp->_IO_read_ptr++;
315 }
316 if (_IO_have_markers (fp))
317 {
318 if (save_for_backup (fp, fp->_IO_read_end))
319 return EOF;
320 }
321 else if (_IO_have_backup (fp))
322 _IO_free_backup_area (fp);
323 return _IO_UFLOW (fp);
324}
325libc_hidden_def (__uflow)
326
327void
328_IO_setb (FILE *f, char *b, char *eb, int a)
329{
330 if (f->_IO_buf_base && !(f->_flags & _IO_USER_BUF))
331 free (f->_IO_buf_base);
332 f->_IO_buf_base = b;
333 f->_IO_buf_end = eb;
334 if (a)
335 f->_flags &= ~_IO_USER_BUF;
336 else
337 f->_flags |= _IO_USER_BUF;
338}
339libc_hidden_def (_IO_setb)
340
341void
342_IO_doallocbuf (FILE *fp)
343{
344 if (fp->_IO_buf_base)
345 return;
346 if (!(fp->_flags & _IO_UNBUFFERED) || fp->_mode > 0)
347 if (_IO_DOALLOCATE (fp) != EOF)
348 return;
349 _IO_setb (fp, fp->_shortbuf, fp->_shortbuf+1, 0);
350}
351libc_hidden_def (_IO_doallocbuf)
352
353int
354_IO_default_underflow (FILE *fp)
355{
356 return EOF;
357}
358
359int
360_IO_default_uflow (FILE *fp)
361{
362 int ch = _IO_UNDERFLOW (fp);
363 if (ch == EOF)
364 return EOF;
365 return *(unsigned char *) fp->_IO_read_ptr++;
366}
367libc_hidden_def (_IO_default_uflow)
368
369size_t
370_IO_default_xsputn (FILE *f, const void *data, size_t n)
371{
372 const char *s = (char *) data;
373 size_t more = n;
374 if (more <= 0)
375 return 0;
376 for (;;)
377 {
378 /* Space available. */
379 if (f->_IO_write_ptr < f->_IO_write_end)
380 {
381 size_t count = f->_IO_write_end - f->_IO_write_ptr;
382 if (count > more)
383 count = more;
384 if (count > 20)
385 {
386 f->_IO_write_ptr = __mempcpy (f->_IO_write_ptr, s, count);
387 s += count;
388 }
389 else if (count)
390 {
391 char *p = f->_IO_write_ptr;
392 ssize_t i;
393 for (i = count; --i >= 0; )
394 *p++ = *s++;
395 f->_IO_write_ptr = p;
396 }
397 more -= count;
398 }
399 if (more == 0 || _IO_OVERFLOW (f, (unsigned char) *s++) == EOF)
400 break;
401 more--;
402 }
403 return n - more;
404}
405libc_hidden_def (_IO_default_xsputn)
406
407size_t
408_IO_sgetn (FILE *fp, void *data, size_t n)
409{
410 /* FIXME handle putback buffer here! */
411 return _IO_XSGETN (fp, data, n);
412}
413libc_hidden_def (_IO_sgetn)
414
415size_t
416_IO_default_xsgetn (FILE *fp, void *data, size_t n)
417{
418 size_t more = n;
419 char *s = (char*) data;
420 for (;;)
421 {
422 /* Data available. */
423 if (fp->_IO_read_ptr < fp->_IO_read_end)
424 {
425 size_t count = fp->_IO_read_end - fp->_IO_read_ptr;
426 if (count > more)
427 count = more;
428 if (count > 20)
429 {
430 s = __mempcpy (s, fp->_IO_read_ptr, count);
431 fp->_IO_read_ptr += count;
432 }
433 else if (count)
434 {
435 char *p = fp->_IO_read_ptr;
436 int i = (int) count;
437 while (--i >= 0)
438 *s++ = *p++;
439 fp->_IO_read_ptr = p;
440 }
441 more -= count;
442 }
443 if (more == 0 || __underflow (fp) == EOF)
444 break;
445 }
446 return n - more;
447}
448libc_hidden_def (_IO_default_xsgetn)
449
450FILE *
451_IO_default_setbuf (FILE *fp, char *p, ssize_t len)
452{
453 if (_IO_SYNC (fp) == EOF)
454 return NULL;
455 if (p == NULL || len == 0)
456 {
457 fp->_flags |= _IO_UNBUFFERED;
458 _IO_setb (fp, fp->_shortbuf, fp->_shortbuf+1, 0);
459 }
460 else
461 {
462 fp->_flags &= ~_IO_UNBUFFERED;
463 _IO_setb (fp, p, p+len, 0);
464 }
465 fp->_IO_write_base = fp->_IO_write_ptr = fp->_IO_write_end = 0;
466 fp->_IO_read_base = fp->_IO_read_ptr = fp->_IO_read_end = 0;
467 return fp;
468}
469
470off64_t
471_IO_default_seekpos (FILE *fp, off64_t pos, int mode)
472{
473 return _IO_SEEKOFF (fp, pos, 0, mode);
474}
475
476int
477_IO_default_doallocate (FILE *fp)
478{
479 char *buf;
480
481 buf = malloc(BUFSIZ);
482 if (__glibc_unlikely (buf == NULL))
483 return EOF;
484
485 _IO_setb (fp, buf, buf+BUFSIZ, 1);
486 return 1;
487}
488libc_hidden_def (_IO_default_doallocate)
489
490void
491_IO_init_internal (FILE *fp, int flags)
492{
493 _IO_no_init (fp, flags, -1, NULL, NULL);
494}
495
496void
497_IO_init (FILE *fp, int flags)
498{
499 IO_set_accept_foreign_vtables (&_IO_vtable_check);
500 _IO_init_internal (fp, flags);
501}
502
503static int stdio_needs_locking;
504
505/* In a single-threaded process most stdio locks can be omitted. After
506 _IO_enable_locks is called, locks are not optimized away any more.
507 It must be first called while the process is still single-threaded.
508
509 This lock optimization can be disabled on a per-file basis by setting
510 _IO_FLAGS2_NEED_LOCK, because a file can have user-defined callbacks
511 or can be locked with flockfile and then a thread may be created
512 between a lock and unlock, so omitting the lock is not valid.
513
514 Here we have to make sure that the flag is set on all existing files
515 and files created later. */
516void
517_IO_enable_locks (void)
518{
519 _IO_ITER i;
520
521 if (stdio_needs_locking)
522 return;
523 stdio_needs_locking = 1;
524 for (i = _IO_iter_begin (); i != _IO_iter_end (); i = _IO_iter_next (i))
525 _IO_iter_file (i)->_flags2 |= _IO_FLAGS2_NEED_LOCK;
526}
527libc_hidden_def (_IO_enable_locks)
528
529void
530_IO_old_init (FILE *fp, int flags)
531{
532 fp->_flags = _IO_MAGIC|flags;
533 fp->_flags2 = 0;
534 if (stdio_needs_locking)
535 fp->_flags2 |= _IO_FLAGS2_NEED_LOCK;
536 fp->_IO_buf_base = NULL;
537 fp->_IO_buf_end = NULL;
538 fp->_IO_read_base = NULL;
539 fp->_IO_read_ptr = NULL;
540 fp->_IO_read_end = NULL;
541 fp->_IO_write_base = NULL;
542 fp->_IO_write_ptr = NULL;
543 fp->_IO_write_end = NULL;
544 fp->_chain = NULL; /* Not necessary. */
545
546 fp->_IO_save_base = NULL;
547 fp->_IO_backup_base = NULL;
548 fp->_IO_save_end = NULL;
549 fp->_markers = NULL;
550 fp->_cur_column = 0;
551#if _IO_JUMPS_OFFSET
552 fp->_vtable_offset = 0;
553#endif
554#ifdef _IO_MTSAFE_IO
555 if (fp->_lock != NULL)
556 _IO_lock_init (*fp->_lock);
557#endif
558}
559
560void
561_IO_no_init (FILE *fp, int flags, int orientation,
562 struct _IO_wide_data *wd, const struct _IO_jump_t *jmp)
563{
564 _IO_old_init (fp, flags);
565 fp->_mode = orientation;
566 if (orientation >= 0)
567 {
568 fp->_wide_data = wd;
569 fp->_wide_data->_IO_buf_base = NULL;
570 fp->_wide_data->_IO_buf_end = NULL;
571 fp->_wide_data->_IO_read_base = NULL;
572 fp->_wide_data->_IO_read_ptr = NULL;
573 fp->_wide_data->_IO_read_end = NULL;
574 fp->_wide_data->_IO_write_base = NULL;
575 fp->_wide_data->_IO_write_ptr = NULL;
576 fp->_wide_data->_IO_write_end = NULL;
577 fp->_wide_data->_IO_save_base = NULL;
578 fp->_wide_data->_IO_backup_base = NULL;
579 fp->_wide_data->_IO_save_end = NULL;
580
581 fp->_wide_data->_wide_vtable = jmp;
582 }
583 else
584 /* Cause predictable crash when a wide function is called on a byte
585 stream. */
586 fp->_wide_data = (struct _IO_wide_data *) -1L;
587 fp->_freeres_list = NULL;
588}
589
590int
591_IO_default_sync (FILE *fp)
592{
593 return 0;
594}
595
596/* The way the C++ classes are mapped into the C functions in the
597 current implementation, this function can get called twice! */
598
599void
600_IO_default_finish (FILE *fp, int dummy)
601{
602 struct _IO_marker *mark;
603 if (fp->_IO_buf_base && !(fp->_flags & _IO_USER_BUF))
604 {
605 free (fp->_IO_buf_base);
606 fp->_IO_buf_base = fp->_IO_buf_end = NULL;
607 }
608
609 for (mark = fp->_markers; mark != NULL; mark = mark->_next)
610 mark->_sbuf = NULL;
611
612 if (fp->_IO_save_base)
613 {
614 free (fp->_IO_save_base);
615 fp->_IO_save_base = NULL;
616 }
617
618 _IO_un_link ((struct _IO_FILE_plus *) fp);
619
620#ifdef _IO_MTSAFE_IO
621 if (fp->_lock != NULL)
622 _IO_lock_fini (*fp->_lock);
623#endif
624}
625libc_hidden_def (_IO_default_finish)
626
627off64_t
628_IO_default_seekoff (FILE *fp, off64_t offset, int dir, int mode)
629{
630 return _IO_pos_BAD;
631}
632
633int
634_IO_sputbackc (FILE *fp, int c)
635{
636 int result;
637
638 if (fp->_IO_read_ptr > fp->_IO_read_base
639 && (unsigned char)fp->_IO_read_ptr[-1] == (unsigned char)c)
640 {
641 fp->_IO_read_ptr--;
642 result = (unsigned char) c;
643 }
644 else
645 result = _IO_PBACKFAIL (fp, c);
646
647 if (result != EOF)
648 fp->_flags &= ~_IO_EOF_SEEN;
649
650 return result;
651}
652libc_hidden_def (_IO_sputbackc)
653
654int
655_IO_sungetc (FILE *fp)
656{
657 int result;
658
659 if (fp->_IO_read_ptr > fp->_IO_read_base)
660 {
661 fp->_IO_read_ptr--;
662 result = (unsigned char) *fp->_IO_read_ptr;
663 }
664 else
665 result = _IO_PBACKFAIL (fp, EOF);
666
667 if (result != EOF)
668 fp->_flags &= ~_IO_EOF_SEEN;
669
670 return result;
671}
672
673unsigned
674_IO_adjust_column (unsigned start, const char *line, int count)
675{
676 const char *ptr = line + count;
677 while (ptr > line)
678 if (*--ptr == '\n')
679 return line + count - ptr - 1;
680 return start + count;
681}
682libc_hidden_def (_IO_adjust_column)
683
684int
685_IO_flush_all_lockp (int do_lock)
686{
687 int result = 0;
688 FILE *fp;
689
690#ifdef _IO_MTSAFE_IO
691 _IO_cleanup_region_start_noarg (flush_cleanup);
692 _IO_lock_lock (list_all_lock);
693#endif
694
695 for (fp = (FILE *) _IO_list_all; fp != NULL; fp = fp->_chain)
696 {
697 run_fp = fp;
698 if (do_lock)
699 _IO_flockfile (fp);
700
701 if (((fp->_mode <= 0 && fp->_IO_write_ptr > fp->_IO_write_base)
702 || (_IO_vtable_offset (fp) == 0
703 && fp->_mode > 0 && (fp->_wide_data->_IO_write_ptr
704 > fp->_wide_data->_IO_write_base))
705 )
706 && _IO_OVERFLOW (fp, EOF) == EOF)
707 result = EOF;
708
709 if (do_lock)
710 _IO_funlockfile (fp);
711 run_fp = NULL;
712 }
713
714#ifdef _IO_MTSAFE_IO
715 _IO_lock_unlock (list_all_lock);
716 _IO_cleanup_region_end (0);
717#endif
718
719 return result;
720}
721
722
723int
724_IO_flush_all (void)
725{
726 /* We want locking. */
727 return _IO_flush_all_lockp (1);
728}
729libc_hidden_def (_IO_flush_all)
730
731void
732_IO_flush_all_linebuffered (void)
733{
734 FILE *fp;
735
736#ifdef _IO_MTSAFE_IO
737 _IO_cleanup_region_start_noarg (flush_cleanup);
738 _IO_lock_lock (list_all_lock);
739#endif
740
741 for (fp = (FILE *) _IO_list_all; fp != NULL; fp = fp->_chain)
742 {
743 run_fp = fp;
744 _IO_flockfile (fp);
745
746 if ((fp->_flags & _IO_NO_WRITES) == 0 && fp->_flags & _IO_LINE_BUF)
747 _IO_OVERFLOW (fp, EOF);
748
749 _IO_funlockfile (fp);
750 run_fp = NULL;
751 }
752
753#ifdef _IO_MTSAFE_IO
754 _IO_lock_unlock (list_all_lock);
755 _IO_cleanup_region_end (0);
756#endif
757}
758libc_hidden_def (_IO_flush_all_linebuffered)
759weak_alias (_IO_flush_all_linebuffered, _flushlbf)
760
761
762/* The following is a bit tricky. In general, we want to unbuffer the
763 streams so that all output which follows is seen. If we are not
764 looking for memory leaks it does not make much sense to free the
765 actual buffer because this will happen anyway once the program
766 terminated. If we do want to look for memory leaks we have to free
767 the buffers. Whether something is freed is determined by the
768 function sin the libc_freeres section. Those are called as part of
769 the atexit routine, just like _IO_cleanup. The problem is we do
770 not know whether the freeres code is called first or _IO_cleanup.
771 if the former is the case, we set the DEALLOC_BUFFER variable to
772 true and _IO_unbuffer_all will take care of the rest. If
773 _IO_unbuffer_all is called first we add the streams to a list
774 which the freeres function later can walk through. */
775static void _IO_unbuffer_all (void);
776
777static bool dealloc_buffers;
778static FILE *freeres_list;
779
780static void
781_IO_unbuffer_all (void)
782{
783 FILE *fp;
784
785#ifdef _IO_MTSAFE_IO
786 _IO_cleanup_region_start_noarg (flush_cleanup);
787 _IO_lock_lock (list_all_lock);
788#endif
789
790 for (fp = (FILE *) _IO_list_all; fp; fp = fp->_chain)
791 {
792 int legacy = 0;
793
794#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_1)
795 if (__glibc_unlikely (_IO_vtable_offset (fp) != 0))
796 legacy = 1;
797#endif
798
799 if (! (fp->_flags & _IO_UNBUFFERED)
800 /* Iff stream is un-orientated, it wasn't used. */
801 && (legacy || fp->_mode != 0))
802 {
803#ifdef _IO_MTSAFE_IO
804 int cnt;
805#define MAXTRIES 2
806 for (cnt = 0; cnt < MAXTRIES; ++cnt)
807 if (fp->_lock == NULL || _IO_lock_trylock (*fp->_lock) == 0)
808 break;
809 else
810 /* Give the other thread time to finish up its use of the
811 stream. */
812 __sched_yield ();
813#endif
814
815 if (! legacy && ! dealloc_buffers && !(fp->_flags & _IO_USER_BUF))
816 {
817 fp->_flags |= _IO_USER_BUF;
818
819 fp->_freeres_list = freeres_list;
820 freeres_list = fp;
821 fp->_freeres_buf = fp->_IO_buf_base;
822 }
823
824 _IO_SETBUF (fp, NULL, 0);
825
826 if (! legacy && fp->_mode > 0)
827 _IO_wsetb (fp, NULL, NULL, 0);
828
829#ifdef _IO_MTSAFE_IO
830 if (cnt < MAXTRIES && fp->_lock != NULL)
831 _IO_lock_unlock (*fp->_lock);
832#endif
833 }
834
835 /* Make sure that never again the wide char functions can be
836 used. */
837 if (! legacy)
838 fp->_mode = -1;
839 }
840
841#ifdef _IO_MTSAFE_IO
842 _IO_lock_unlock (list_all_lock);
843 _IO_cleanup_region_end (0);
844#endif
845}
846
847
848libc_freeres_fn (buffer_free)
849{
850 dealloc_buffers = true;
851
852 while (freeres_list != NULL)
853 {
854 free (freeres_list->_freeres_buf);
855
856 freeres_list = freeres_list->_freeres_list;
857 }
858}
859
860
861int
862_IO_cleanup (void)
863{
864 /* We do *not* want locking. Some threads might use streams but
865 that is their problem, we flush them underneath them. */
866 int result = _IO_flush_all_lockp (0);
867
868 /* We currently don't have a reliable mechanism for making sure that
869 C++ static destructors are executed in the correct order.
870 So it is possible that other static destructors might want to
871 write to cout - and they're supposed to be able to do so.
872
873 The following will make the standard streambufs be unbuffered,
874 which forces any output from late destructors to be written out. */
875 _IO_unbuffer_all ();
876
877 return result;
878}
879
880
881void
882_IO_init_marker (struct _IO_marker *marker, FILE *fp)
883{
884 marker->_sbuf = fp;
885 if (_IO_in_put_mode (fp))
886 _IO_switch_to_get_mode (fp);
887 if (_IO_in_backup (fp))
888 marker->_pos = fp->_IO_read_ptr - fp->_IO_read_end;
889 else
890 marker->_pos = fp->_IO_read_ptr - fp->_IO_read_base;
891
892 /* Should perhaps sort the chain? */
893 marker->_next = fp->_markers;
894 fp->_markers = marker;
895}
896
897void
898_IO_remove_marker (struct _IO_marker *marker)
899{
900 /* Unlink from sb's chain. */
901 struct _IO_marker **ptr = &marker->_sbuf->_markers;
902 for (; ; ptr = &(*ptr)->_next)
903 {
904 if (*ptr == NULL)
905 break;
906 else if (*ptr == marker)
907 {
908 *ptr = marker->_next;
909 return;
910 }
911 }
912 /* FIXME: if _sbuf has a backup area that is no longer needed,
913 should we delete it now, or wait until the next underflow? */
914}
915
916#define BAD_DELTA EOF
917
918int
919_IO_marker_difference (struct _IO_marker *mark1, struct _IO_marker *mark2)
920{
921 return mark1->_pos - mark2->_pos;
922}
923
924/* Return difference between MARK and current position of MARK's stream. */
925int
926_IO_marker_delta (struct _IO_marker *mark)
927{
928 int cur_pos;
929 if (mark->_sbuf == NULL)
930 return BAD_DELTA;
931 if (_IO_in_backup (mark->_sbuf))
932 cur_pos = mark->_sbuf->_IO_read_ptr - mark->_sbuf->_IO_read_end;
933 else
934 cur_pos = mark->_sbuf->_IO_read_ptr - mark->_sbuf->_IO_read_base;
935 return mark->_pos - cur_pos;
936}
937
938int
939_IO_seekmark (FILE *fp, struct _IO_marker *mark, int delta)
940{
941 if (mark->_sbuf != fp)
942 return EOF;
943 if (mark->_pos >= 0)
944 {
945 if (_IO_in_backup (fp))
946 _IO_switch_to_main_get_area (fp);
947 fp->_IO_read_ptr = fp->_IO_read_base + mark->_pos;
948 }
949 else
950 {
951 if (!_IO_in_backup (fp))
952 _IO_switch_to_backup_area (fp);
953 fp->_IO_read_ptr = fp->_IO_read_end + mark->_pos;
954 }
955 return 0;
956}
957
958void
959_IO_unsave_markers (FILE *fp)
960{
961 struct _IO_marker *mark = fp->_markers;
962 if (mark)
963 {
964 fp->_markers = 0;
965 }
966
967 if (_IO_have_backup (fp))
968 _IO_free_backup_area (fp);
969}
970libc_hidden_def (_IO_unsave_markers)
971
972int
973_IO_default_pbackfail (FILE *fp, int c)
974{
975 if (fp->_IO_read_ptr > fp->_IO_read_base && !_IO_in_backup (fp)
976 && (unsigned char) fp->_IO_read_ptr[-1] == c)
977 --fp->_IO_read_ptr;
978 else
979 {
980 /* Need to handle a filebuf in write mode (switch to read mode). FIXME!*/
981 if (!_IO_in_backup (fp))
982 {
983 /* We need to keep the invariant that the main get area
984 logically follows the backup area. */
985 if (fp->_IO_read_ptr > fp->_IO_read_base && _IO_have_backup (fp))
986 {
987 if (save_for_backup (fp, fp->_IO_read_ptr))
988 return EOF;
989 }
990 else if (!_IO_have_backup (fp))
991 {
992 /* No backup buffer: allocate one. */
993 /* Use nshort buffer, if unused? (probably not) FIXME */
994 int backup_size = 128;
995 char *bbuf = (char *) malloc (backup_size);
996 if (bbuf == NULL)
997 return EOF;
998 fp->_IO_save_base = bbuf;
999 fp->_IO_save_end = fp->_IO_save_base + backup_size;
1000 fp->_IO_backup_base = fp->_IO_save_end;
1001 }
1002 fp->_IO_read_base = fp->_IO_read_ptr;
1003 _IO_switch_to_backup_area (fp);
1004 }
1005 else if (fp->_IO_read_ptr <= fp->_IO_read_base)
1006 {
1007 /* Increase size of existing backup buffer. */
1008 size_t new_size;
1009 size_t old_size = fp->_IO_read_end - fp->_IO_read_base;
1010 char *new_buf;
1011 new_size = 2 * old_size;
1012 new_buf = (char *) malloc (new_size);
1013 if (new_buf == NULL)
1014 return EOF;
1015 memcpy (new_buf + (new_size - old_size), fp->_IO_read_base,
1016 old_size);
1017 free (fp->_IO_read_base);
1018 _IO_setg (fp, new_buf, new_buf + (new_size - old_size),
1019 new_buf + new_size);
1020 fp->_IO_backup_base = fp->_IO_read_ptr;
1021 }
1022
1023 *--fp->_IO_read_ptr = c;
1024 }
1025 return (unsigned char) c;
1026}
1027libc_hidden_def (_IO_default_pbackfail)
1028
1029off64_t
1030_IO_default_seek (FILE *fp, off64_t offset, int dir)
1031{
1032 return _IO_pos_BAD;
1033}
1034
1035int
1036_IO_default_stat (FILE *fp, void *st)
1037{
1038 return EOF;
1039}
1040
1041ssize_t
1042_IO_default_read (FILE *fp, void *data, ssize_t n)
1043{
1044 return -1;
1045}
1046
1047ssize_t
1048_IO_default_write (FILE *fp, const void *data, ssize_t n)
1049{
1050 return 0;
1051}
1052
1053int
1054_IO_default_showmanyc (FILE *fp)
1055{
1056 return -1;
1057}
1058
1059void
1060_IO_default_imbue (FILE *fp, void *locale)
1061{
1062}
1063
1064_IO_ITER
1065_IO_iter_begin (void)
1066{
1067 return (_IO_ITER) _IO_list_all;
1068}
1069libc_hidden_def (_IO_iter_begin)
1070
1071_IO_ITER
1072_IO_iter_end (void)
1073{
1074 return NULL;
1075}
1076libc_hidden_def (_IO_iter_end)
1077
1078_IO_ITER
1079_IO_iter_next (_IO_ITER iter)
1080{
1081 return iter->_chain;
1082}
1083libc_hidden_def (_IO_iter_next)
1084
1085FILE *
1086_IO_iter_file (_IO_ITER iter)
1087{
1088 return iter;
1089}
1090libc_hidden_def (_IO_iter_file)
1091
1092void
1093_IO_list_lock (void)
1094{
1095#ifdef _IO_MTSAFE_IO
1096 _IO_lock_lock (list_all_lock);
1097#endif
1098}
1099libc_hidden_def (_IO_list_lock)
1100
1101void
1102_IO_list_unlock (void)
1103{
1104#ifdef _IO_MTSAFE_IO
1105 _IO_lock_unlock (list_all_lock);
1106#endif
1107}
1108libc_hidden_def (_IO_list_unlock)
1109
1110void
1111_IO_list_resetlock (void)
1112{
1113#ifdef _IO_MTSAFE_IO
1114 _IO_lock_init (list_all_lock);
1115#endif
1116}
1117libc_hidden_def (_IO_list_resetlock)
1118
1119text_set_element(__libc_atexit, _IO_cleanup);
1120