| 1 | /* Copyright (C) 1993-2018 Free Software Foundation, Inc. | 
| 2 |    This file is part of the GNU C Library. | 
| 3 |    Written by Per Bothner <bothner@cygnus.com>. | 
| 4 |  | 
| 5 |    The GNU C Library is free software; you can redistribute it and/or | 
| 6 |    modify it under the terms of the GNU Lesser General Public | 
| 7 |    License as published by the Free Software Foundation; either | 
| 8 |    version 2.1 of the License, or (at your option) any later version. | 
| 9 |  | 
| 10 |    The GNU C Library is distributed in the hope that it will be useful, | 
| 11 |    but WITHOUT ANY WARRANTY; without even the implied warranty of | 
| 12 |    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
| 13 |    Lesser General Public License for more details. | 
| 14 |  | 
| 15 |    You should have received a copy of the GNU Lesser General Public | 
| 16 |    License along with the GNU C Library; if not, see | 
| 17 |    <http://www.gnu.org/licenses/>. | 
| 18 |  | 
| 19 |    As a special exception, if you link the code in this file with | 
| 20 |    files compiled with a GNU compiler to produce an executable, | 
| 21 |    that does not cause the resulting executable to be covered by | 
| 22 |    the GNU Lesser General Public License.  This exception does not | 
| 23 |    however invalidate any other reasons why the executable file | 
| 24 |    might be covered by the GNU Lesser General Public License. | 
| 25 |    This exception applies to code released by its copyright holders | 
| 26 |    in files containing the exception.  */ | 
| 27 |  | 
| 28 |  | 
| 29 | #include "libioP.h" | 
| 30 | #include <assert.h> | 
| 31 | #include <fcntl.h> | 
| 32 | #include <sys/mman.h> | 
| 33 | #include <sys/param.h> | 
| 34 | #include <sys/types.h> | 
| 35 | #include <sys/stat.h> | 
| 36 | #include <string.h> | 
| 37 | #include <errno.h> | 
| 38 | #include <unistd.h> | 
| 39 | #include <stdlib.h> | 
| 40 | #include "../wcsmbs/wcsmbsload.h" | 
| 41 | #include "../iconv/gconv_charset.h" | 
| 42 | #include "../iconv/gconv_int.h" | 
| 43 | #include <shlib-compat.h> | 
| 44 | #include <not-cancel.h> | 
| 45 | #include <kernel-features.h> | 
| 46 |  | 
| 47 | extern struct __gconv_trans_data __libio_translit attribute_hidden; | 
| 48 |  | 
| 49 | /* An fstream can be in at most one of put mode, get mode, or putback mode. | 
| 50 |    Putback mode is a variant of get mode. | 
| 51 |  | 
| 52 |    In a filebuf, there is only one current position, instead of two | 
| 53 |    separate get and put pointers.  In get mode, the current position | 
| 54 |    is that of gptr(); in put mode that of pptr(). | 
| 55 |  | 
| 56 |    The position in the buffer that corresponds to the position | 
| 57 |    in external file system is normally _IO_read_end, except in putback | 
| 58 |    mode, when it is _IO_save_end and also when the file is in append mode, | 
| 59 |    since switching from read to write mode automatically sends the position in | 
| 60 |    the external file system to the end of file. | 
| 61 |    If the field _fb._offset is >= 0, it gives the offset in | 
| 62 |    the file as a whole corresponding to eGptr(). (?) | 
| 63 |  | 
| 64 |    PUT MODE: | 
| 65 |    If a filebuf is in put mode, then all of _IO_read_ptr, _IO_read_end, | 
| 66 |    and _IO_read_base are equal to each other.  These are usually equal | 
| 67 |    to _IO_buf_base, though not necessarily if we have switched from | 
| 68 |    get mode to put mode.  (The reason is to maintain the invariant | 
| 69 |    that _IO_read_end corresponds to the external file position.) | 
| 70 |    _IO_write_base is non-NULL and usually equal to _IO_buf_base. | 
| 71 |    We also have _IO_write_end == _IO_buf_end, but only in fully buffered mode. | 
| 72 |    The un-flushed character are those between _IO_write_base and _IO_write_ptr. | 
| 73 |  | 
| 74 |    GET MODE: | 
| 75 |    If a filebuf is in get or putback mode, eback() != egptr(). | 
| 76 |    In get mode, the unread characters are between gptr() and egptr(). | 
| 77 |    The OS file position corresponds to that of egptr(). | 
| 78 |  | 
| 79 |    PUTBACK MODE: | 
| 80 |    Putback mode is used to remember "excess" characters that have | 
| 81 |    been sputbackc'd in a separate putback buffer. | 
| 82 |    In putback mode, the get buffer points to the special putback buffer. | 
| 83 |    The unread characters are the characters between gptr() and egptr() | 
| 84 |    in the putback buffer, as well as the area between save_gptr() | 
| 85 |    and save_egptr(), which point into the original reserve buffer. | 
| 86 |    (The pointers save_gptr() and save_egptr() are the values | 
| 87 |    of gptr() and egptr() at the time putback mode was entered.) | 
| 88 |    The OS position corresponds to that of save_egptr(). | 
| 89 |  | 
| 90 |    LINE BUFFERED OUTPUT: | 
| 91 |    During line buffered output, _IO_write_base==base() && epptr()==base(). | 
| 92 |    However, ptr() may be anywhere between base() and ebuf(). | 
| 93 |    This forces a call to filebuf::overflow(int C) on every put. | 
| 94 |    If there is more space in the buffer, and C is not a '\n', | 
| 95 |    then C is inserted, and pptr() incremented. | 
| 96 |  | 
| 97 |    UNBUFFERED STREAMS: | 
| 98 |    If a filebuf is unbuffered(), the _shortbuf[1] is used as the buffer. | 
| 99 | */ | 
| 100 |  | 
| 101 | #define CLOSED_FILEBUF_FLAGS \ | 
| 102 |   (_IO_IS_FILEBUF+_IO_NO_READS+_IO_NO_WRITES+_IO_TIED_PUT_GET) | 
| 103 |  | 
| 104 |  | 
| 105 | void | 
| 106 | _IO_new_file_init_internal (struct _IO_FILE_plus *fp) | 
| 107 | { | 
| 108 |   /* POSIX.1 allows another file handle to be used to change the position | 
| 109 |      of our file descriptor.  Hence we actually don't know the actual | 
| 110 |      position before we do the first fseek (and until a following fflush). */ | 
| 111 |   fp->file._offset = _IO_pos_BAD; | 
| 112 |   fp->file._IO_file_flags |= CLOSED_FILEBUF_FLAGS; | 
| 113 |  | 
| 114 |   _IO_link_in (fp); | 
| 115 |   fp->file._fileno = -1; | 
| 116 | } | 
| 117 |  | 
| 118 | /* External version of _IO_new_file_init_internal which switches off | 
| 119 |    vtable validation.  */ | 
| 120 | void | 
| 121 | _IO_new_file_init (struct _IO_FILE_plus *fp) | 
| 122 | { | 
| 123 |   IO_set_accept_foreign_vtables (&_IO_vtable_check); | 
| 124 |   _IO_new_file_init_internal (fp); | 
| 125 | } | 
| 126 |  | 
| 127 | int | 
| 128 | _IO_new_file_close_it (_IO_FILE *fp) | 
| 129 | { | 
| 130 |   int write_status; | 
| 131 |   if (!_IO_file_is_open (fp)) | 
| 132 |     return EOF; | 
| 133 |  | 
| 134 |   if ((fp->_flags & _IO_NO_WRITES) == 0 | 
| 135 |       && (fp->_flags & _IO_CURRENTLY_PUTTING) != 0) | 
| 136 |     write_status = _IO_do_flush (fp); | 
| 137 |   else | 
| 138 |     write_status = 0; | 
| 139 |  | 
| 140 |   _IO_unsave_markers (fp); | 
| 141 |  | 
| 142 |   int close_status = ((fp->_flags2 & _IO_FLAGS2_NOCLOSE) == 0 | 
| 143 | 		      ? _IO_SYSCLOSE (fp) : 0); | 
| 144 |  | 
| 145 |   /* Free buffer. */ | 
| 146 |   if (fp->_mode > 0) | 
| 147 |     { | 
| 148 |       if (_IO_have_wbackup (fp)) | 
| 149 | 	_IO_free_wbackup_area (fp); | 
| 150 |       _IO_wsetb (fp, NULL, NULL, 0); | 
| 151 |       _IO_wsetg (fp, NULL, NULL, NULL); | 
| 152 |       _IO_wsetp (fp, NULL, NULL); | 
| 153 |     } | 
| 154 |   _IO_setb (fp, NULL, NULL, 0); | 
| 155 |   _IO_setg (fp, NULL, NULL, NULL); | 
| 156 |   _IO_setp (fp, NULL, NULL); | 
| 157 |  | 
| 158 |   _IO_un_link ((struct _IO_FILE_plus *) fp); | 
| 159 |   fp->_flags = _IO_MAGIC|CLOSED_FILEBUF_FLAGS; | 
| 160 |   fp->_fileno = -1; | 
| 161 |   fp->_offset = _IO_pos_BAD; | 
| 162 |  | 
| 163 |   return close_status ? close_status : write_status; | 
| 164 | } | 
| 165 | libc_hidden_ver (_IO_new_file_close_it, _IO_file_close_it) | 
| 166 |  | 
| 167 | void | 
| 168 | _IO_new_file_finish (_IO_FILE *fp, int dummy) | 
| 169 | { | 
| 170 |   if (_IO_file_is_open (fp)) | 
| 171 |     { | 
| 172 |       _IO_do_flush (fp); | 
| 173 |       if (!(fp->_flags & _IO_DELETE_DONT_CLOSE)) | 
| 174 | 	_IO_SYSCLOSE (fp); | 
| 175 |     } | 
| 176 |   _IO_default_finish (fp, 0); | 
| 177 | } | 
| 178 | libc_hidden_ver (_IO_new_file_finish, _IO_file_finish) | 
| 179 |  | 
| 180 | _IO_FILE * | 
| 181 | _IO_file_open (_IO_FILE *fp, const char *filename, int posix_mode, int prot, | 
| 182 | 	       int read_write, int is32not64) | 
| 183 | { | 
| 184 |   int fdesc; | 
| 185 |   if (__glibc_unlikely (fp->_flags2 & _IO_FLAGS2_NOTCANCEL)) | 
| 186 |     fdesc = __open_nocancel (filename, | 
| 187 | 			     posix_mode | (is32not64 ? 0 : O_LARGEFILE), prot); | 
| 188 |   else | 
| 189 |     fdesc = __open (filename, posix_mode | (is32not64 ? 0 : O_LARGEFILE), prot); | 
| 190 |   if (fdesc < 0) | 
| 191 |     return NULL; | 
| 192 |   fp->_fileno = fdesc; | 
| 193 |   _IO_mask_flags (fp, read_write,_IO_NO_READS+_IO_NO_WRITES+_IO_IS_APPENDING); | 
| 194 |   /* For append mode, send the file offset to the end of the file.  Don't | 
| 195 |      update the offset cache though, since the file handle is not active.  */ | 
| 196 |   if ((read_write & (_IO_IS_APPENDING | _IO_NO_READS)) | 
| 197 |       == (_IO_IS_APPENDING | _IO_NO_READS)) | 
| 198 |     { | 
| 199 |       _IO_off64_t new_pos = _IO_SYSSEEK (fp, 0, _IO_seek_end); | 
| 200 |       if (new_pos == _IO_pos_BAD && errno != ESPIPE) | 
| 201 | 	{ | 
| 202 | 	  __close_nocancel (fdesc); | 
| 203 | 	  return NULL; | 
| 204 | 	} | 
| 205 |     } | 
| 206 |   _IO_link_in ((struct _IO_FILE_plus *) fp); | 
| 207 |   return fp; | 
| 208 | } | 
| 209 | libc_hidden_def (_IO_file_open) | 
| 210 |  | 
| 211 | _IO_FILE * | 
| 212 | _IO_new_file_fopen (_IO_FILE *fp, const char *filename, const char *mode, | 
| 213 | 		    int is32not64) | 
| 214 | { | 
| 215 |   int oflags = 0, omode; | 
| 216 |   int read_write; | 
| 217 |   int oprot = 0666; | 
| 218 |   int i; | 
| 219 |   _IO_FILE *result; | 
| 220 |   const char *cs; | 
| 221 |   const char *last_recognized; | 
| 222 |  | 
| 223 |   if (_IO_file_is_open (fp)) | 
| 224 |     return 0; | 
| 225 |   switch (*mode) | 
| 226 |     { | 
| 227 |     case 'r': | 
| 228 |       omode = O_RDONLY; | 
| 229 |       read_write = _IO_NO_WRITES; | 
| 230 |       break; | 
| 231 |     case 'w': | 
| 232 |       omode = O_WRONLY; | 
| 233 |       oflags = O_CREAT|O_TRUNC; | 
| 234 |       read_write = _IO_NO_READS; | 
| 235 |       break; | 
| 236 |     case 'a': | 
| 237 |       omode = O_WRONLY; | 
| 238 |       oflags = O_CREAT|O_APPEND; | 
| 239 |       read_write = _IO_NO_READS|_IO_IS_APPENDING; | 
| 240 |       break; | 
| 241 |     default: | 
| 242 |       __set_errno (EINVAL); | 
| 243 |       return NULL; | 
| 244 |     } | 
| 245 |   last_recognized = mode; | 
| 246 |   for (i = 1; i < 7; ++i) | 
| 247 |     { | 
| 248 |       switch (*++mode) | 
| 249 | 	{ | 
| 250 | 	case '\0': | 
| 251 | 	  break; | 
| 252 | 	case '+': | 
| 253 | 	  omode = O_RDWR; | 
| 254 | 	  read_write &= _IO_IS_APPENDING; | 
| 255 | 	  last_recognized = mode; | 
| 256 | 	  continue; | 
| 257 | 	case 'x': | 
| 258 | 	  oflags |= O_EXCL; | 
| 259 | 	  last_recognized = mode; | 
| 260 | 	  continue; | 
| 261 | 	case 'b': | 
| 262 | 	  last_recognized = mode; | 
| 263 | 	  continue; | 
| 264 | 	case 'm': | 
| 265 | 	  fp->_flags2 |= _IO_FLAGS2_MMAP; | 
| 266 | 	  continue; | 
| 267 | 	case 'c': | 
| 268 | 	  fp->_flags2 |= _IO_FLAGS2_NOTCANCEL; | 
| 269 | 	  continue; | 
| 270 | 	case 'e': | 
| 271 | 	  oflags |= O_CLOEXEC; | 
| 272 | 	  fp->_flags2 |= _IO_FLAGS2_CLOEXEC; | 
| 273 | 	  continue; | 
| 274 | 	default: | 
| 275 | 	  /* Ignore.  */ | 
| 276 | 	  continue; | 
| 277 | 	} | 
| 278 |       break; | 
| 279 |     } | 
| 280 |  | 
| 281 |   result = _IO_file_open (fp, filename, omode|oflags, oprot, read_write, | 
| 282 | 			  is32not64); | 
| 283 |  | 
| 284 |   if (result != NULL) | 
| 285 |     { | 
| 286 |       /* Test whether the mode string specifies the conversion.  */ | 
| 287 |       cs = strstr (last_recognized + 1, ",ccs=" ); | 
| 288 |       if (cs != NULL) | 
| 289 | 	{ | 
| 290 | 	  /* Yep.  Load the appropriate conversions and set the orientation | 
| 291 | 	     to wide.  */ | 
| 292 | 	  struct gconv_fcts fcts; | 
| 293 | 	  struct _IO_codecvt *cc; | 
| 294 | 	  char *endp = __strchrnul (cs + 5, ','); | 
| 295 | 	  char *ccs = malloc (endp - (cs + 5) + 3); | 
| 296 |  | 
| 297 | 	  if (ccs == NULL) | 
| 298 | 	    { | 
| 299 | 	      int malloc_err = errno;  /* Whatever malloc failed with.  */ | 
| 300 | 	      (void) _IO_file_close_it (fp); | 
| 301 | 	      __set_errno (malloc_err); | 
| 302 | 	      return NULL; | 
| 303 | 	    } | 
| 304 |  | 
| 305 | 	  *((char *) __mempcpy (ccs, cs + 5, endp - (cs + 5))) = '\0'; | 
| 306 | 	  strip (ccs, ccs); | 
| 307 |  | 
| 308 | 	  if (__wcsmbs_named_conv (&fcts, ccs[2] == '\0' | 
| 309 | 				   ? upstr (ccs, cs + 5) : ccs) != 0) | 
| 310 | 	    { | 
| 311 | 	      /* Something went wrong, we cannot load the conversion modules. | 
| 312 | 		 This means we cannot proceed since the user explicitly asked | 
| 313 | 		 for these.  */ | 
| 314 | 	      (void) _IO_file_close_it (fp); | 
| 315 | 	      free (ccs); | 
| 316 | 	      __set_errno (EINVAL); | 
| 317 | 	      return NULL; | 
| 318 | 	    } | 
| 319 |  | 
| 320 | 	  free (ccs); | 
| 321 |  | 
| 322 | 	  assert (fcts.towc_nsteps == 1); | 
| 323 | 	  assert (fcts.tomb_nsteps == 1); | 
| 324 |  | 
| 325 | 	  fp->_wide_data->_IO_read_ptr = fp->_wide_data->_IO_read_end; | 
| 326 | 	  fp->_wide_data->_IO_write_ptr = fp->_wide_data->_IO_write_base; | 
| 327 |  | 
| 328 | 	  /* Clear the state.  We start all over again.  */ | 
| 329 | 	  memset (&fp->_wide_data->_IO_state, '\0', sizeof (__mbstate_t)); | 
| 330 | 	  memset (&fp->_wide_data->_IO_last_state, '\0', sizeof (__mbstate_t)); | 
| 331 |  | 
| 332 | 	  cc = fp->_codecvt = &fp->_wide_data->_codecvt; | 
| 333 |  | 
| 334 | 	  /* The functions are always the same.  */ | 
| 335 | 	  *cc = __libio_codecvt; | 
| 336 |  | 
| 337 | 	  cc->__cd_in.__cd.__nsteps = fcts.towc_nsteps; | 
| 338 | 	  cc->__cd_in.__cd.__steps = fcts.towc; | 
| 339 |  | 
| 340 | 	  cc->__cd_in.__cd.__data[0].__invocation_counter = 0; | 
| 341 | 	  cc->__cd_in.__cd.__data[0].__internal_use = 1; | 
| 342 | 	  cc->__cd_in.__cd.__data[0].__flags = __GCONV_IS_LAST; | 
| 343 | 	  cc->__cd_in.__cd.__data[0].__statep = &result->_wide_data->_IO_state; | 
| 344 |  | 
| 345 | 	  cc->__cd_out.__cd.__nsteps = fcts.tomb_nsteps; | 
| 346 | 	  cc->__cd_out.__cd.__steps = fcts.tomb; | 
| 347 |  | 
| 348 | 	  cc->__cd_out.__cd.__data[0].__invocation_counter = 0; | 
| 349 | 	  cc->__cd_out.__cd.__data[0].__internal_use = 1; | 
| 350 | 	  cc->__cd_out.__cd.__data[0].__flags | 
| 351 | 	    = __GCONV_IS_LAST | __GCONV_TRANSLIT; | 
| 352 | 	  cc->__cd_out.__cd.__data[0].__statep = | 
| 353 | 	    &result->_wide_data->_IO_state; | 
| 354 |  | 
| 355 | 	  /* From now on use the wide character callback functions.  */ | 
| 356 | 	  _IO_JUMPS_FILE_plus (fp) = fp->_wide_data->_wide_vtable; | 
| 357 |  | 
| 358 | 	  /* Set the mode now.  */ | 
| 359 | 	  result->_mode = 1; | 
| 360 | 	} | 
| 361 |     } | 
| 362 |  | 
| 363 |   return result; | 
| 364 | } | 
| 365 | libc_hidden_ver (_IO_new_file_fopen, _IO_file_fopen) | 
| 366 |  | 
| 367 | _IO_FILE * | 
| 368 | _IO_new_file_attach (_IO_FILE *fp, int fd) | 
| 369 | { | 
| 370 |   if (_IO_file_is_open (fp)) | 
| 371 |     return NULL; | 
| 372 |   fp->_fileno = fd; | 
| 373 |   fp->_flags &= ~(_IO_NO_READS+_IO_NO_WRITES); | 
| 374 |   fp->_flags |= _IO_DELETE_DONT_CLOSE; | 
| 375 |   /* Get the current position of the file. */ | 
| 376 |   /* We have to do that since that may be junk. */ | 
| 377 |   fp->_offset = _IO_pos_BAD; | 
| 378 |   int save_errno = errno; | 
| 379 |   if (_IO_SEEKOFF (fp, (_IO_off64_t)0, _IO_seek_cur, _IOS_INPUT|_IOS_OUTPUT) | 
| 380 |       == _IO_pos_BAD && errno != ESPIPE) | 
| 381 |     return NULL; | 
| 382 |   __set_errno (save_errno); | 
| 383 |   return fp; | 
| 384 | } | 
| 385 | libc_hidden_ver (_IO_new_file_attach, _IO_file_attach) | 
| 386 |  | 
| 387 | _IO_FILE * | 
| 388 | _IO_new_file_setbuf (_IO_FILE *fp, char *p, _IO_ssize_t len) | 
| 389 | { | 
| 390 |   if (_IO_default_setbuf (fp, p, len) == NULL) | 
| 391 |     return NULL; | 
| 392 |  | 
| 393 |   fp->_IO_write_base = fp->_IO_write_ptr = fp->_IO_write_end | 
| 394 |     = fp->_IO_buf_base; | 
| 395 |   _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base); | 
| 396 |  | 
| 397 |   return fp; | 
| 398 | } | 
| 399 | libc_hidden_ver (_IO_new_file_setbuf, _IO_file_setbuf) | 
| 400 |  | 
| 401 |  | 
| 402 | _IO_FILE * | 
| 403 | _IO_file_setbuf_mmap (_IO_FILE *fp, char *p, _IO_ssize_t len) | 
| 404 | { | 
| 405 |   _IO_FILE *result; | 
| 406 |  | 
| 407 |   /* Change the function table.  */ | 
| 408 |   _IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps; | 
| 409 |   fp->_wide_data->_wide_vtable = &_IO_wfile_jumps; | 
| 410 |  | 
| 411 |   /* And perform the normal operation.  */ | 
| 412 |   result = _IO_new_file_setbuf (fp, p, len); | 
| 413 |  | 
| 414 |   /* If the call failed, restore to using mmap.  */ | 
| 415 |   if (result == NULL) | 
| 416 |     { | 
| 417 |       _IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps_mmap; | 
| 418 |       fp->_wide_data->_wide_vtable = &_IO_wfile_jumps_mmap; | 
| 419 |     } | 
| 420 |  | 
| 421 |   return result; | 
| 422 | } | 
| 423 |  | 
| 424 | static _IO_size_t new_do_write (_IO_FILE *, const char *, _IO_size_t); | 
| 425 |  | 
| 426 | /* Write TO_DO bytes from DATA to FP. | 
| 427 |    Then mark FP as having empty buffers. */ | 
| 428 |  | 
| 429 | int | 
| 430 | _IO_new_do_write (_IO_FILE *fp, const char *data, _IO_size_t to_do) | 
| 431 | { | 
| 432 |   return (to_do == 0 | 
| 433 | 	  || (_IO_size_t) new_do_write (fp, data, to_do) == to_do) ? 0 : EOF; | 
| 434 | } | 
| 435 | libc_hidden_ver (_IO_new_do_write, _IO_do_write) | 
| 436 |  | 
| 437 | static | 
| 438 | _IO_size_t | 
| 439 | new_do_write (_IO_FILE *fp, const char *data, _IO_size_t to_do) | 
| 440 | { | 
| 441 |   _IO_size_t count; | 
| 442 |   if (fp->_flags & _IO_IS_APPENDING) | 
| 443 |     /* On a system without a proper O_APPEND implementation, | 
| 444 |        you would need to sys_seek(0, SEEK_END) here, but is | 
| 445 |        not needed nor desirable for Unix- or Posix-like systems. | 
| 446 |        Instead, just indicate that offset (before and after) is | 
| 447 |        unpredictable. */ | 
| 448 |     fp->_offset = _IO_pos_BAD; | 
| 449 |   else if (fp->_IO_read_end != fp->_IO_write_base) | 
| 450 |     { | 
| 451 |       _IO_off64_t new_pos | 
| 452 | 	= _IO_SYSSEEK (fp, fp->_IO_write_base - fp->_IO_read_end, 1); | 
| 453 |       if (new_pos == _IO_pos_BAD) | 
| 454 | 	return 0; | 
| 455 |       fp->_offset = new_pos; | 
| 456 |     } | 
| 457 |   count = _IO_SYSWRITE (fp, data, to_do); | 
| 458 |   if (fp->_cur_column && count) | 
| 459 |     fp->_cur_column = _IO_adjust_column (fp->_cur_column - 1, data, count) + 1; | 
| 460 |   _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base); | 
| 461 |   fp->_IO_write_base = fp->_IO_write_ptr = fp->_IO_buf_base; | 
| 462 |   fp->_IO_write_end = (fp->_mode <= 0 | 
| 463 | 		       && (fp->_flags & (_IO_LINE_BUF | _IO_UNBUFFERED)) | 
| 464 | 		       ? fp->_IO_buf_base : fp->_IO_buf_end); | 
| 465 |   return count; | 
| 466 | } | 
| 467 |  | 
| 468 | int | 
| 469 | _IO_new_file_underflow (_IO_FILE *fp) | 
| 470 | { | 
| 471 |   _IO_ssize_t count; | 
| 472 | #if 0 | 
| 473 |   /* SysV does not make this test; take it out for compatibility */ | 
| 474 |   if (fp->_flags & _IO_EOF_SEEN) | 
| 475 |     return (EOF); | 
| 476 | #endif | 
| 477 |  | 
| 478 |   if (fp->_flags & _IO_NO_READS) | 
| 479 |     { | 
| 480 |       fp->_flags |= _IO_ERR_SEEN; | 
| 481 |       __set_errno (EBADF); | 
| 482 |       return EOF; | 
| 483 |     } | 
| 484 |   if (fp->_IO_read_ptr < fp->_IO_read_end) | 
| 485 |     return *(unsigned char *) fp->_IO_read_ptr; | 
| 486 |  | 
| 487 |   if (fp->_IO_buf_base == NULL) | 
| 488 |     { | 
| 489 |       /* Maybe we already have a push back pointer.  */ | 
| 490 |       if (fp->_IO_save_base != NULL) | 
| 491 | 	{ | 
| 492 | 	  free (fp->_IO_save_base); | 
| 493 | 	  fp->_flags &= ~_IO_IN_BACKUP; | 
| 494 | 	} | 
| 495 |       _IO_doallocbuf (fp); | 
| 496 |     } | 
| 497 |  | 
| 498 |   /* Flush all line buffered files before reading. */ | 
| 499 |   /* FIXME This can/should be moved to genops ?? */ | 
| 500 |   if (fp->_flags & (_IO_LINE_BUF|_IO_UNBUFFERED)) | 
| 501 |     { | 
| 502 | #if 0 | 
| 503 |       _IO_flush_all_linebuffered (); | 
| 504 | #else | 
| 505 |       /* We used to flush all line-buffered stream.  This really isn't | 
| 506 | 	 required by any standard.  My recollection is that | 
| 507 | 	 traditional Unix systems did this for stdout.  stderr better | 
| 508 | 	 not be line buffered.  So we do just that here | 
| 509 | 	 explicitly.  --drepper */ | 
| 510 |       _IO_acquire_lock (_IO_stdout); | 
| 511 |  | 
| 512 |       if ((_IO_stdout->_flags & (_IO_LINKED | _IO_NO_WRITES | _IO_LINE_BUF)) | 
| 513 | 	  == (_IO_LINKED | _IO_LINE_BUF)) | 
| 514 | 	_IO_OVERFLOW (_IO_stdout, EOF); | 
| 515 |  | 
| 516 |       _IO_release_lock (_IO_stdout); | 
| 517 | #endif | 
| 518 |     } | 
| 519 |  | 
| 520 |   _IO_switch_to_get_mode (fp); | 
| 521 |  | 
| 522 |   /* This is very tricky. We have to adjust those | 
| 523 |      pointers before we call _IO_SYSREAD () since | 
| 524 |      we may longjump () out while waiting for | 
| 525 |      input. Those pointers may be screwed up. H.J. */ | 
| 526 |   fp->_IO_read_base = fp->_IO_read_ptr = fp->_IO_buf_base; | 
| 527 |   fp->_IO_read_end = fp->_IO_buf_base; | 
| 528 |   fp->_IO_write_base = fp->_IO_write_ptr = fp->_IO_write_end | 
| 529 |     = fp->_IO_buf_base; | 
| 530 |  | 
| 531 |   count = _IO_SYSREAD (fp, fp->_IO_buf_base, | 
| 532 | 		       fp->_IO_buf_end - fp->_IO_buf_base); | 
| 533 |   if (count <= 0) | 
| 534 |     { | 
| 535 |       if (count == 0) | 
| 536 | 	fp->_flags |= _IO_EOF_SEEN; | 
| 537 |       else | 
| 538 | 	fp->_flags |= _IO_ERR_SEEN, count = 0; | 
| 539 |   } | 
| 540 |   fp->_IO_read_end += count; | 
| 541 |   if (count == 0) | 
| 542 |     { | 
| 543 |       /* If a stream is read to EOF, the calling application may switch active | 
| 544 | 	 handles.  As a result, our offset cache would no longer be valid, so | 
| 545 | 	 unset it.  */ | 
| 546 |       fp->_offset = _IO_pos_BAD; | 
| 547 |       return EOF; | 
| 548 |     } | 
| 549 |   if (fp->_offset != _IO_pos_BAD) | 
| 550 |     _IO_pos_adjust (fp->_offset, count); | 
| 551 |   return *(unsigned char *) fp->_IO_read_ptr; | 
| 552 | } | 
| 553 | libc_hidden_ver (_IO_new_file_underflow, _IO_file_underflow) | 
| 554 |  | 
| 555 | /* Guts of underflow callback if we mmap the file.  This stats the file and | 
| 556 |    updates the stream state to match.  In the normal case we return zero. | 
| 557 |    If the file is no longer eligible for mmap, its jump tables are reset to | 
| 558 |    the vanilla ones and we return nonzero.  */ | 
| 559 | static int | 
| 560 | mmap_remap_check (_IO_FILE *fp) | 
| 561 | { | 
| 562 |   struct stat64 st; | 
| 563 |  | 
| 564 |   if (_IO_SYSSTAT (fp, &st) == 0 | 
| 565 |       && S_ISREG (st.st_mode) && st.st_size != 0 | 
| 566 |       /* Limit the file size to 1MB for 32-bit machines.  */ | 
| 567 |       && (sizeof (ptrdiff_t) > 4 || st.st_size < 1*1024*1024)) | 
| 568 |     { | 
| 569 |       const size_t pagesize = __getpagesize (); | 
| 570 | # define ROUNDED(x)	(((x) + pagesize - 1) & ~(pagesize - 1)) | 
| 571 |       if (ROUNDED (st.st_size) < ROUNDED (fp->_IO_buf_end | 
| 572 | 					  - fp->_IO_buf_base)) | 
| 573 | 	{ | 
| 574 | 	  /* We can trim off some pages past the end of the file.  */ | 
| 575 | 	  (void) __munmap (fp->_IO_buf_base + ROUNDED (st.st_size), | 
| 576 | 			   ROUNDED (fp->_IO_buf_end - fp->_IO_buf_base) | 
| 577 | 			   - ROUNDED (st.st_size)); | 
| 578 | 	  fp->_IO_buf_end = fp->_IO_buf_base + st.st_size; | 
| 579 | 	} | 
| 580 |       else if (ROUNDED (st.st_size) > ROUNDED (fp->_IO_buf_end | 
| 581 | 					       - fp->_IO_buf_base)) | 
| 582 | 	{ | 
| 583 | 	  /* The file added some pages.  We need to remap it.  */ | 
| 584 | 	  void *p; | 
| 585 | #ifdef _G_HAVE_MREMAP | 
| 586 | 	  p = __mremap (fp->_IO_buf_base, ROUNDED (fp->_IO_buf_end | 
| 587 | 						   - fp->_IO_buf_base), | 
| 588 | 			ROUNDED (st.st_size), MREMAP_MAYMOVE); | 
| 589 | 	  if (p == MAP_FAILED) | 
| 590 | 	    { | 
| 591 | 	      (void) __munmap (fp->_IO_buf_base, | 
| 592 | 			       fp->_IO_buf_end - fp->_IO_buf_base); | 
| 593 | 	      goto punt; | 
| 594 | 	    } | 
| 595 | #else | 
| 596 | 	  (void) __munmap (fp->_IO_buf_base, | 
| 597 | 			   fp->_IO_buf_end - fp->_IO_buf_base); | 
| 598 | 	  p = __mmap64 (NULL, st.st_size, PROT_READ, MAP_SHARED, | 
| 599 | 			fp->_fileno, 0); | 
| 600 | 	  if (p == MAP_FAILED) | 
| 601 | 	    goto punt; | 
| 602 | #endif | 
| 603 | 	  fp->_IO_buf_base = p; | 
| 604 | 	  fp->_IO_buf_end = fp->_IO_buf_base + st.st_size; | 
| 605 | 	} | 
| 606 |       else | 
| 607 | 	{ | 
| 608 | 	  /* The number of pages didn't change.  */ | 
| 609 | 	  fp->_IO_buf_end = fp->_IO_buf_base + st.st_size; | 
| 610 | 	} | 
| 611 | # undef ROUNDED | 
| 612 |  | 
| 613 |       fp->_offset -= fp->_IO_read_end - fp->_IO_read_ptr; | 
| 614 |       _IO_setg (fp, fp->_IO_buf_base, | 
| 615 | 		fp->_offset < fp->_IO_buf_end - fp->_IO_buf_base | 
| 616 | 		? fp->_IO_buf_base + fp->_offset : fp->_IO_buf_end, | 
| 617 | 		fp->_IO_buf_end); | 
| 618 |  | 
| 619 |       /* If we are already positioned at or past the end of the file, don't | 
| 620 | 	 change the current offset.  If not, seek past what we have mapped, | 
| 621 | 	 mimicking the position left by a normal underflow reading into its | 
| 622 | 	 buffer until EOF.  */ | 
| 623 |  | 
| 624 |       if (fp->_offset < fp->_IO_buf_end - fp->_IO_buf_base) | 
| 625 | 	{ | 
| 626 | 	  if (__lseek64 (fp->_fileno, fp->_IO_buf_end - fp->_IO_buf_base, | 
| 627 | 			 SEEK_SET) | 
| 628 | 	      != fp->_IO_buf_end - fp->_IO_buf_base) | 
| 629 | 	    fp->_flags |= _IO_ERR_SEEN; | 
| 630 | 	  else | 
| 631 | 	    fp->_offset = fp->_IO_buf_end - fp->_IO_buf_base; | 
| 632 | 	} | 
| 633 |  | 
| 634 |       return 0; | 
| 635 |     } | 
| 636 |   else | 
| 637 |     { | 
| 638 |       /* Life is no longer good for mmap.  Punt it.  */ | 
| 639 |       (void) __munmap (fp->_IO_buf_base, | 
| 640 | 		       fp->_IO_buf_end - fp->_IO_buf_base); | 
| 641 |     punt: | 
| 642 |       fp->_IO_buf_base = fp->_IO_buf_end = NULL; | 
| 643 |       _IO_setg (fp, NULL, NULL, NULL); | 
| 644 |       if (fp->_mode <= 0) | 
| 645 | 	_IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps; | 
| 646 |       else | 
| 647 | 	_IO_JUMPS_FILE_plus (fp) = &_IO_wfile_jumps; | 
| 648 |       fp->_wide_data->_wide_vtable = &_IO_wfile_jumps; | 
| 649 |  | 
| 650 |       return 1; | 
| 651 |     } | 
| 652 | } | 
| 653 |  | 
| 654 | /* Special callback replacing the underflow callbacks if we mmap the file.  */ | 
| 655 | int | 
| 656 | _IO_file_underflow_mmap (_IO_FILE *fp) | 
| 657 | { | 
| 658 |   if (fp->_IO_read_ptr < fp->_IO_read_end) | 
| 659 |     return *(unsigned char *) fp->_IO_read_ptr; | 
| 660 |  | 
| 661 |   if (__glibc_unlikely (mmap_remap_check (fp))) | 
| 662 |     /* We punted to the regular file functions.  */ | 
| 663 |     return _IO_UNDERFLOW (fp); | 
| 664 |  | 
| 665 |   if (fp->_IO_read_ptr < fp->_IO_read_end) | 
| 666 |     return *(unsigned char *) fp->_IO_read_ptr; | 
| 667 |  | 
| 668 |   fp->_flags |= _IO_EOF_SEEN; | 
| 669 |   return EOF; | 
| 670 | } | 
| 671 |  | 
| 672 | static void | 
| 673 | decide_maybe_mmap (_IO_FILE *fp) | 
| 674 | { | 
| 675 |   /* We use the file in read-only mode.  This could mean we can | 
| 676 |      mmap the file and use it without any copying.  But not all | 
| 677 |      file descriptors are for mmap-able objects and on 32-bit | 
| 678 |      machines we don't want to map files which are too large since | 
| 679 |      this would require too much virtual memory.  */ | 
| 680 |   struct stat64 st; | 
| 681 |  | 
| 682 |   if (_IO_SYSSTAT (fp, &st) == 0 | 
| 683 |       && S_ISREG (st.st_mode) && st.st_size != 0 | 
| 684 |       /* Limit the file size to 1MB for 32-bit machines.  */ | 
| 685 |       && (sizeof (ptrdiff_t) > 4 || st.st_size < 1*1024*1024) | 
| 686 |       /* Sanity check.  */ | 
| 687 |       && (fp->_offset == _IO_pos_BAD || fp->_offset <= st.st_size)) | 
| 688 |     { | 
| 689 |       /* Try to map the file.  */ | 
| 690 |       void *p; | 
| 691 |  | 
| 692 |       p = __mmap64 (NULL, st.st_size, PROT_READ, MAP_SHARED, fp->_fileno, 0); | 
| 693 |       if (p != MAP_FAILED) | 
| 694 | 	{ | 
| 695 | 	  /* OK, we managed to map the file.  Set the buffer up and use a | 
| 696 | 	     special jump table with simplified underflow functions which | 
| 697 | 	     never tries to read anything from the file.  */ | 
| 698 |  | 
| 699 | 	  if (__lseek64 (fp->_fileno, st.st_size, SEEK_SET) != st.st_size) | 
| 700 | 	    { | 
| 701 | 	      (void) __munmap (p, st.st_size); | 
| 702 | 	      fp->_offset = _IO_pos_BAD; | 
| 703 | 	    } | 
| 704 | 	  else | 
| 705 | 	    { | 
| 706 | 	      _IO_setb (fp, p, (char *) p + st.st_size, 0); | 
| 707 |  | 
| 708 | 	      if (fp->_offset == _IO_pos_BAD) | 
| 709 | 		fp->_offset = 0; | 
| 710 |  | 
| 711 | 	      _IO_setg (fp, p, p + fp->_offset, p + st.st_size); | 
| 712 | 	      fp->_offset = st.st_size; | 
| 713 |  | 
| 714 | 	      if (fp->_mode <= 0) | 
| 715 | 		_IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps_mmap; | 
| 716 | 	      else | 
| 717 | 		_IO_JUMPS_FILE_plus (fp) = &_IO_wfile_jumps_mmap; | 
| 718 | 	      fp->_wide_data->_wide_vtable = &_IO_wfile_jumps_mmap; | 
| 719 |  | 
| 720 | 	      return; | 
| 721 | 	    } | 
| 722 | 	} | 
| 723 |     } | 
| 724 |  | 
| 725 |   /* We couldn't use mmap, so revert to the vanilla file operations.  */ | 
| 726 |  | 
| 727 |   if (fp->_mode <= 0) | 
| 728 |     _IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps; | 
| 729 |   else | 
| 730 |     _IO_JUMPS_FILE_plus (fp) = &_IO_wfile_jumps; | 
| 731 |   fp->_wide_data->_wide_vtable = &_IO_wfile_jumps; | 
| 732 | } | 
| 733 |  | 
| 734 | int | 
| 735 | _IO_file_underflow_maybe_mmap (_IO_FILE *fp) | 
| 736 | { | 
| 737 |   /* This is the first read attempt.  Choose mmap or vanilla operations | 
| 738 |      and then punt to the chosen underflow routine.  */ | 
| 739 |   decide_maybe_mmap (fp); | 
| 740 |   return _IO_UNDERFLOW (fp); | 
| 741 | } | 
| 742 |  | 
| 743 |  | 
| 744 | int | 
| 745 | _IO_new_file_overflow (_IO_FILE *f, int ch) | 
| 746 | { | 
| 747 |   if (f->_flags & _IO_NO_WRITES) /* SET ERROR */ | 
| 748 |     { | 
| 749 |       f->_flags |= _IO_ERR_SEEN; | 
| 750 |       __set_errno (EBADF); | 
| 751 |       return EOF; | 
| 752 |     } | 
| 753 |   /* If currently reading or no buffer allocated. */ | 
| 754 |   if ((f->_flags & _IO_CURRENTLY_PUTTING) == 0 || f->_IO_write_base == NULL) | 
| 755 |     { | 
| 756 |       /* Allocate a buffer if needed. */ | 
| 757 |       if (f->_IO_write_base == NULL) | 
| 758 | 	{ | 
| 759 | 	  _IO_doallocbuf (f); | 
| 760 | 	  _IO_setg (f, f->_IO_buf_base, f->_IO_buf_base, f->_IO_buf_base); | 
| 761 | 	} | 
| 762 |       /* Otherwise must be currently reading. | 
| 763 | 	 If _IO_read_ptr (and hence also _IO_read_end) is at the buffer end, | 
| 764 | 	 logically slide the buffer forwards one block (by setting the | 
| 765 | 	 read pointers to all point at the beginning of the block).  This | 
| 766 | 	 makes room for subsequent output. | 
| 767 | 	 Otherwise, set the read pointers to _IO_read_end (leaving that | 
| 768 | 	 alone, so it can continue to correspond to the external position). */ | 
| 769 |       if (__glibc_unlikely (_IO_in_backup (f))) | 
| 770 | 	{ | 
| 771 | 	  size_t nbackup = f->_IO_read_end - f->_IO_read_ptr; | 
| 772 | 	  _IO_free_backup_area (f); | 
| 773 | 	  f->_IO_read_base -= MIN (nbackup, | 
| 774 | 				   f->_IO_read_base - f->_IO_buf_base); | 
| 775 | 	  f->_IO_read_ptr = f->_IO_read_base; | 
| 776 | 	} | 
| 777 |  | 
| 778 |       if (f->_IO_read_ptr == f->_IO_buf_end) | 
| 779 | 	f->_IO_read_end = f->_IO_read_ptr = f->_IO_buf_base; | 
| 780 |       f->_IO_write_ptr = f->_IO_read_ptr; | 
| 781 |       f->_IO_write_base = f->_IO_write_ptr; | 
| 782 |       f->_IO_write_end = f->_IO_buf_end; | 
| 783 |       f->_IO_read_base = f->_IO_read_ptr = f->_IO_read_end; | 
| 784 |  | 
| 785 |       f->_flags |= _IO_CURRENTLY_PUTTING; | 
| 786 |       if (f->_mode <= 0 && f->_flags & (_IO_LINE_BUF | _IO_UNBUFFERED)) | 
| 787 | 	f->_IO_write_end = f->_IO_write_ptr; | 
| 788 |     } | 
| 789 |   if (ch == EOF) | 
| 790 |     return _IO_do_write (f, f->_IO_write_base, | 
| 791 | 			 f->_IO_write_ptr - f->_IO_write_base); | 
| 792 |   if (f->_IO_write_ptr == f->_IO_buf_end ) /* Buffer is really full */ | 
| 793 |     if (_IO_do_flush (f) == EOF) | 
| 794 |       return EOF; | 
| 795 |   *f->_IO_write_ptr++ = ch; | 
| 796 |   if ((f->_flags & _IO_UNBUFFERED) | 
| 797 |       || ((f->_flags & _IO_LINE_BUF) && ch == '\n')) | 
| 798 |     if (_IO_do_write (f, f->_IO_write_base, | 
| 799 | 		      f->_IO_write_ptr - f->_IO_write_base) == EOF) | 
| 800 |       return EOF; | 
| 801 |   return (unsigned char) ch; | 
| 802 | } | 
| 803 | libc_hidden_ver (_IO_new_file_overflow, _IO_file_overflow) | 
| 804 |  | 
| 805 | int | 
| 806 | _IO_new_file_sync (_IO_FILE *fp) | 
| 807 | { | 
| 808 |   _IO_ssize_t delta; | 
| 809 |   int retval = 0; | 
| 810 |  | 
| 811 |   /*    char* ptr = cur_ptr(); */ | 
| 812 |   if (fp->_IO_write_ptr > fp->_IO_write_base) | 
| 813 |     if (_IO_do_flush(fp)) return EOF; | 
| 814 |   delta = fp->_IO_read_ptr - fp->_IO_read_end; | 
| 815 |   if (delta != 0) | 
| 816 |     { | 
| 817 | #ifdef TODO | 
| 818 |       if (_IO_in_backup (fp)) | 
| 819 | 	delta -= eGptr () - Gbase (); | 
| 820 | #endif | 
| 821 |       _IO_off64_t new_pos = _IO_SYSSEEK (fp, delta, 1); | 
| 822 |       if (new_pos != (_IO_off64_t) EOF) | 
| 823 | 	fp->_IO_read_end = fp->_IO_read_ptr; | 
| 824 |       else if (errno == ESPIPE) | 
| 825 | 	; /* Ignore error from unseekable devices. */ | 
| 826 |       else | 
| 827 | 	retval = EOF; | 
| 828 |     } | 
| 829 |   if (retval != EOF) | 
| 830 |     fp->_offset = _IO_pos_BAD; | 
| 831 |   /* FIXME: Cleanup - can this be shared? */ | 
| 832 |   /*    setg(base(), ptr, ptr); */ | 
| 833 |   return retval; | 
| 834 | } | 
| 835 | libc_hidden_ver (_IO_new_file_sync, _IO_file_sync) | 
| 836 |  | 
| 837 | static int | 
| 838 | _IO_file_sync_mmap (_IO_FILE *fp) | 
| 839 | { | 
| 840 |   if (fp->_IO_read_ptr != fp->_IO_read_end) | 
| 841 |     { | 
| 842 | #ifdef TODO | 
| 843 |       if (_IO_in_backup (fp)) | 
| 844 | 	delta -= eGptr () - Gbase (); | 
| 845 | #endif | 
| 846 |       if (__lseek64 (fp->_fileno, fp->_IO_read_ptr - fp->_IO_buf_base, | 
| 847 | 		     SEEK_SET) | 
| 848 | 	  != fp->_IO_read_ptr - fp->_IO_buf_base) | 
| 849 | 	{ | 
| 850 | 	  fp->_flags |= _IO_ERR_SEEN; | 
| 851 | 	  return EOF; | 
| 852 | 	} | 
| 853 |     } | 
| 854 |   fp->_offset = fp->_IO_read_ptr - fp->_IO_buf_base; | 
| 855 |   fp->_IO_read_end = fp->_IO_read_ptr = fp->_IO_read_base; | 
| 856 |   return 0; | 
| 857 | } | 
| 858 |  | 
| 859 | /* ftell{,o} implementation.  The only time we modify the state of the stream | 
| 860 |    is when we have unflushed writes.  In that case we seek to the end and | 
| 861 |    record that offset in the stream object.  */ | 
| 862 | static _IO_off64_t | 
| 863 | do_ftell (_IO_FILE *fp) | 
| 864 | { | 
| 865 |   _IO_off64_t result, offset = 0; | 
| 866 |  | 
| 867 |   /* No point looking at unflushed data if we haven't allocated buffers | 
| 868 |      yet.  */ | 
| 869 |   if (fp->_IO_buf_base != NULL) | 
| 870 |     { | 
| 871 |       bool unflushed_writes = fp->_IO_write_ptr > fp->_IO_write_base; | 
| 872 |  | 
| 873 |       bool append_mode = (fp->_flags & _IO_IS_APPENDING) == _IO_IS_APPENDING; | 
| 874 |  | 
| 875 |       /* When we have unflushed writes in append mode, seek to the end of the | 
| 876 | 	 file and record that offset.  This is the only time we change the file | 
| 877 | 	 stream state and it is safe since the file handle is active.  */ | 
| 878 |       if (unflushed_writes && append_mode) | 
| 879 | 	{ | 
| 880 | 	  result = _IO_SYSSEEK (fp, 0, _IO_seek_end); | 
| 881 | 	  if (result == _IO_pos_BAD) | 
| 882 | 	    return EOF; | 
| 883 | 	  else | 
| 884 | 	    fp->_offset = result; | 
| 885 | 	} | 
| 886 |  | 
| 887 |       /* Adjust for unflushed data.  */ | 
| 888 |       if (!unflushed_writes) | 
| 889 | 	offset -= fp->_IO_read_end - fp->_IO_read_ptr; | 
| 890 |       /* We don't trust _IO_read_end to represent the current file offset when | 
| 891 | 	 writing in append mode because the value would have to be shifted to | 
| 892 | 	 the end of the file during a flush.  Use the write base instead, along | 
| 893 | 	 with the new offset we got above when we did a seek to the end of the | 
| 894 | 	 file.  */ | 
| 895 |       else if (append_mode) | 
| 896 | 	offset += fp->_IO_write_ptr - fp->_IO_write_base; | 
| 897 |       /* For all other modes, _IO_read_end represents the file offset.  */ | 
| 898 |       else | 
| 899 | 	offset += fp->_IO_write_ptr - fp->_IO_read_end; | 
| 900 |     } | 
| 901 |  | 
| 902 |   if (fp->_offset != _IO_pos_BAD) | 
| 903 |     result = fp->_offset; | 
| 904 |   else | 
| 905 |     result = _IO_SYSSEEK (fp, 0, _IO_seek_cur); | 
| 906 |  | 
| 907 |   if (result == EOF) | 
| 908 |     return result; | 
| 909 |  | 
| 910 |   result += offset; | 
| 911 |  | 
| 912 |   if (result < 0) | 
| 913 |     { | 
| 914 |       __set_errno (EINVAL); | 
| 915 |       return EOF; | 
| 916 |     } | 
| 917 |  | 
| 918 |   return result; | 
| 919 | } | 
| 920 |  | 
| 921 | _IO_off64_t | 
| 922 | _IO_new_file_seekoff (_IO_FILE *fp, _IO_off64_t offset, int dir, int mode) | 
| 923 | { | 
| 924 |   _IO_off64_t result; | 
| 925 |   _IO_off64_t delta, new_offset; | 
| 926 |   long count; | 
| 927 |  | 
| 928 |   /* Short-circuit into a separate function.  We don't want to mix any | 
| 929 |      functionality and we don't want to touch anything inside the FILE | 
| 930 |      object. */ | 
| 931 |   if (mode == 0) | 
| 932 |     return do_ftell (fp); | 
| 933 |  | 
| 934 |   /* POSIX.1 8.2.3.7 says that after a call the fflush() the file | 
| 935 |      offset of the underlying file must be exact.  */ | 
| 936 |   int must_be_exact = (fp->_IO_read_base == fp->_IO_read_end | 
| 937 | 		       && fp->_IO_write_base == fp->_IO_write_ptr); | 
| 938 |  | 
| 939 |   bool was_writing = (fp->_IO_write_ptr > fp->_IO_write_base | 
| 940 | 		      || _IO_in_put_mode (fp)); | 
| 941 |  | 
| 942 |   /* Flush unwritten characters. | 
| 943 |      (This may do an unneeded write if we seek within the buffer. | 
| 944 |      But to be able to switch to reading, we would need to set | 
| 945 |      egptr to pptr.  That can't be done in the current design, | 
| 946 |      which assumes file_ptr() is eGptr.  Anyway, since we probably | 
| 947 |      end up flushing when we close(), it doesn't make much difference.) | 
| 948 |      FIXME: simulate mem-mapped files. */ | 
| 949 |   if (was_writing && _IO_switch_to_get_mode (fp)) | 
| 950 |     return EOF; | 
| 951 |  | 
| 952 |   if (fp->_IO_buf_base == NULL) | 
| 953 |     { | 
| 954 |       /* It could be that we already have a pushback buffer.  */ | 
| 955 |       if (fp->_IO_read_base != NULL) | 
| 956 | 	{ | 
| 957 | 	  free (fp->_IO_read_base); | 
| 958 | 	  fp->_flags &= ~_IO_IN_BACKUP; | 
| 959 | 	} | 
| 960 |       _IO_doallocbuf (fp); | 
| 961 |       _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); | 
| 962 |       _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base); | 
| 963 |     } | 
| 964 |  | 
| 965 |   switch (dir) | 
| 966 |     { | 
| 967 |     case _IO_seek_cur: | 
| 968 |       /* Adjust for read-ahead (bytes is buffer). */ | 
| 969 |       offset -= fp->_IO_read_end - fp->_IO_read_ptr; | 
| 970 |  | 
| 971 |       if (fp->_offset == _IO_pos_BAD) | 
| 972 | 	goto dumb; | 
| 973 |       /* Make offset absolute, assuming current pointer is file_ptr(). */ | 
| 974 |       offset += fp->_offset; | 
| 975 |       if (offset < 0) | 
| 976 | 	{ | 
| 977 | 	  __set_errno (EINVAL); | 
| 978 | 	  return EOF; | 
| 979 | 	} | 
| 980 |  | 
| 981 |       dir = _IO_seek_set; | 
| 982 |       break; | 
| 983 |     case _IO_seek_set: | 
| 984 |       break; | 
| 985 |     case _IO_seek_end: | 
| 986 |       { | 
| 987 | 	struct stat64 st; | 
| 988 | 	if (_IO_SYSSTAT (fp, &st) == 0 && S_ISREG (st.st_mode)) | 
| 989 | 	  { | 
| 990 | 	    offset += st.st_size; | 
| 991 | 	    dir = _IO_seek_set; | 
| 992 | 	  } | 
| 993 | 	else | 
| 994 | 	  goto dumb; | 
| 995 |       } | 
| 996 |     } | 
| 997 |  | 
| 998 |   _IO_free_backup_area (fp); | 
| 999 |  | 
| 1000 |   /* At this point, dir==_IO_seek_set. */ | 
| 1001 |  | 
| 1002 |   /* If destination is within current buffer, optimize: */ | 
| 1003 |   if (fp->_offset != _IO_pos_BAD && fp->_IO_read_base != NULL | 
| 1004 |       && !_IO_in_backup (fp)) | 
| 1005 |     { | 
| 1006 |       _IO_off64_t start_offset = (fp->_offset | 
| 1007 | 				  - (fp->_IO_read_end - fp->_IO_buf_base)); | 
| 1008 |       if (offset >= start_offset && offset < fp->_offset) | 
| 1009 | 	{ | 
| 1010 | 	  _IO_setg (fp, fp->_IO_buf_base, | 
| 1011 | 		    fp->_IO_buf_base + (offset - start_offset), | 
| 1012 | 		    fp->_IO_read_end); | 
| 1013 | 	  _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); | 
| 1014 |  | 
| 1015 | 	  _IO_mask_flags (fp, 0, _IO_EOF_SEEN); | 
| 1016 | 	  goto resync; | 
| 1017 | 	} | 
| 1018 |     } | 
| 1019 |  | 
| 1020 |   if (fp->_flags & _IO_NO_READS) | 
| 1021 |     goto dumb; | 
| 1022 |  | 
| 1023 |   /* Try to seek to a block boundary, to improve kernel page management. */ | 
| 1024 |   new_offset = offset & ~(fp->_IO_buf_end - fp->_IO_buf_base - 1); | 
| 1025 |   delta = offset - new_offset; | 
| 1026 |   if (delta > fp->_IO_buf_end - fp->_IO_buf_base) | 
| 1027 |     { | 
| 1028 |       new_offset = offset; | 
| 1029 |       delta = 0; | 
| 1030 |     } | 
| 1031 |   result = _IO_SYSSEEK (fp, new_offset, 0); | 
| 1032 |   if (result < 0) | 
| 1033 |     return EOF; | 
| 1034 |   if (delta == 0) | 
| 1035 |     count = 0; | 
| 1036 |   else | 
| 1037 |     { | 
| 1038 |       count = _IO_SYSREAD (fp, fp->_IO_buf_base, | 
| 1039 | 			   (must_be_exact | 
| 1040 | 			    ? delta : fp->_IO_buf_end - fp->_IO_buf_base)); | 
| 1041 |       if (count < delta) | 
| 1042 | 	{ | 
| 1043 | 	  /* We weren't allowed to read, but try to seek the remainder. */ | 
| 1044 | 	  offset = count == EOF ? delta : delta-count; | 
| 1045 | 	  dir = _IO_seek_cur; | 
| 1046 | 	  goto dumb; | 
| 1047 | 	} | 
| 1048 |     } | 
| 1049 |   _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base + delta, | 
| 1050 | 	    fp->_IO_buf_base + count); | 
| 1051 |   _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); | 
| 1052 |   fp->_offset = result + count; | 
| 1053 |   _IO_mask_flags (fp, 0, _IO_EOF_SEEN); | 
| 1054 |   return offset; | 
| 1055 |  dumb: | 
| 1056 |  | 
| 1057 |   _IO_unsave_markers (fp); | 
| 1058 |   result = _IO_SYSSEEK (fp, offset, dir); | 
| 1059 |   if (result != EOF) | 
| 1060 |     { | 
| 1061 |       _IO_mask_flags (fp, 0, _IO_EOF_SEEN); | 
| 1062 |       fp->_offset = result; | 
| 1063 |       _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base); | 
| 1064 |       _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); | 
| 1065 |     } | 
| 1066 |   return result; | 
| 1067 |  | 
| 1068 | resync: | 
| 1069 |   /* We need to do it since it is possible that the file offset in | 
| 1070 |      the kernel may be changed behind our back. It may happen when | 
| 1071 |      we fopen a file and then do a fork. One process may access the | 
| 1072 |      file and the kernel file offset will be changed. */ | 
| 1073 |   if (fp->_offset >= 0) | 
| 1074 |     _IO_SYSSEEK (fp, fp->_offset, 0); | 
| 1075 |  | 
| 1076 |   return offset; | 
| 1077 | } | 
| 1078 | libc_hidden_ver (_IO_new_file_seekoff, _IO_file_seekoff) | 
| 1079 |  | 
| 1080 | _IO_off64_t | 
| 1081 | _IO_file_seekoff_mmap (_IO_FILE *fp, _IO_off64_t offset, int dir, int mode) | 
| 1082 | { | 
| 1083 |   _IO_off64_t result; | 
| 1084 |  | 
| 1085 |   /* If we are only interested in the current position, calculate it and | 
| 1086 |      return right now.  This calculation does the right thing when we are | 
| 1087 |      using a pushback buffer, but in the usual case has the same value as | 
| 1088 |      (fp->_IO_read_ptr - fp->_IO_buf_base).  */ | 
| 1089 |   if (mode == 0) | 
| 1090 |     return fp->_offset - (fp->_IO_read_end - fp->_IO_read_ptr); | 
| 1091 |  | 
| 1092 |   switch (dir) | 
| 1093 |     { | 
| 1094 |     case _IO_seek_cur: | 
| 1095 |       /* Adjust for read-ahead (bytes is buffer). */ | 
| 1096 |       offset += fp->_IO_read_ptr - fp->_IO_read_base; | 
| 1097 |       break; | 
| 1098 |     case _IO_seek_set: | 
| 1099 |       break; | 
| 1100 |     case _IO_seek_end: | 
| 1101 |       offset += fp->_IO_buf_end - fp->_IO_buf_base; | 
| 1102 |       break; | 
| 1103 |     } | 
| 1104 |   /* At this point, dir==_IO_seek_set. */ | 
| 1105 |  | 
| 1106 |   if (offset < 0) | 
| 1107 |     { | 
| 1108 |       /* No negative offsets are valid.  */ | 
| 1109 |       __set_errno (EINVAL); | 
| 1110 |       return EOF; | 
| 1111 |     } | 
| 1112 |  | 
| 1113 |   result = _IO_SYSSEEK (fp, offset, 0); | 
| 1114 |   if (result < 0) | 
| 1115 |     return EOF; | 
| 1116 |  | 
| 1117 |   if (offset > fp->_IO_buf_end - fp->_IO_buf_base) | 
| 1118 |     /* One can fseek arbitrarily past the end of the file | 
| 1119 |        and it is meaningless until one attempts to read. | 
| 1120 |        Leave the buffer pointers in EOF state until underflow.  */ | 
| 1121 |     _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_end, fp->_IO_buf_end); | 
| 1122 |   else | 
| 1123 |     /* Adjust the read pointers to match the file position, | 
| 1124 |        but so the next read attempt will call underflow.  */ | 
| 1125 |     _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base + offset, | 
| 1126 | 	      fp->_IO_buf_base + offset); | 
| 1127 |  | 
| 1128 |   fp->_offset = result; | 
| 1129 |  | 
| 1130 |   _IO_mask_flags (fp, 0, _IO_EOF_SEEN); | 
| 1131 |  | 
| 1132 |   return offset; | 
| 1133 | } | 
| 1134 |  | 
| 1135 | static _IO_off64_t | 
| 1136 | _IO_file_seekoff_maybe_mmap (_IO_FILE *fp, _IO_off64_t offset, int dir, | 
| 1137 | 			     int mode) | 
| 1138 | { | 
| 1139 |   /* We only get here when we haven't tried to read anything yet. | 
| 1140 |      So there is nothing more useful for us to do here than just | 
| 1141 |      the underlying lseek call.  */ | 
| 1142 |  | 
| 1143 |   _IO_off64_t result = _IO_SYSSEEK (fp, offset, dir); | 
| 1144 |   if (result < 0) | 
| 1145 |     return EOF; | 
| 1146 |  | 
| 1147 |   fp->_offset = result; | 
| 1148 |   return result; | 
| 1149 | } | 
| 1150 |  | 
| 1151 | _IO_ssize_t | 
| 1152 | _IO_file_read (_IO_FILE *fp, void *buf, _IO_ssize_t size) | 
| 1153 | { | 
| 1154 |   return (__builtin_expect (fp->_flags2 & _IO_FLAGS2_NOTCANCEL, 0) | 
| 1155 | 	  ? __read_nocancel (fp->_fileno, buf, size) | 
| 1156 | 	  : __read (fp->_fileno, buf, size)); | 
| 1157 | } | 
| 1158 | libc_hidden_def (_IO_file_read) | 
| 1159 |  | 
| 1160 | _IO_off64_t | 
| 1161 | _IO_file_seek (_IO_FILE *fp, _IO_off64_t offset, int dir) | 
| 1162 | { | 
| 1163 |   return __lseek64 (fp->_fileno, offset, dir); | 
| 1164 | } | 
| 1165 | libc_hidden_def (_IO_file_seek) | 
| 1166 |  | 
| 1167 | int | 
| 1168 | _IO_file_stat (_IO_FILE *fp, void *st) | 
| 1169 | { | 
| 1170 |   return __fxstat64 (_STAT_VER, fp->_fileno, (struct stat64 *) st); | 
| 1171 | } | 
| 1172 | libc_hidden_def (_IO_file_stat) | 
| 1173 |  | 
| 1174 | int | 
| 1175 | _IO_file_close_mmap (_IO_FILE *fp) | 
| 1176 | { | 
| 1177 |   /* In addition to closing the file descriptor we have to unmap the file.  */ | 
| 1178 |   (void) __munmap (fp->_IO_buf_base, fp->_IO_buf_end - fp->_IO_buf_base); | 
| 1179 |   fp->_IO_buf_base = fp->_IO_buf_end = NULL; | 
| 1180 |   /* Cancelling close should be avoided if possible since it leaves an | 
| 1181 |      unrecoverable state behind.  */ | 
| 1182 |   return __close_nocancel (fp->_fileno); | 
| 1183 | } | 
| 1184 |  | 
| 1185 | int | 
| 1186 | _IO_file_close (_IO_FILE *fp) | 
| 1187 | { | 
| 1188 |   /* Cancelling close should be avoided if possible since it leaves an | 
| 1189 |      unrecoverable state behind.  */ | 
| 1190 |   return __close_nocancel (fp->_fileno); | 
| 1191 | } | 
| 1192 | libc_hidden_def (_IO_file_close) | 
| 1193 |  | 
| 1194 | _IO_ssize_t | 
| 1195 | _IO_new_file_write (_IO_FILE *f, const void *data, _IO_ssize_t n) | 
| 1196 | { | 
| 1197 |   _IO_ssize_t to_do = n; | 
| 1198 |   while (to_do > 0) | 
| 1199 |     { | 
| 1200 |       _IO_ssize_t count = (__builtin_expect (f->_flags2 | 
| 1201 | 					     & _IO_FLAGS2_NOTCANCEL, 0) | 
| 1202 | 			   ? __write_nocancel (f->_fileno, data, to_do) | 
| 1203 | 			   : __write (f->_fileno, data, to_do)); | 
| 1204 |       if (count < 0) | 
| 1205 | 	{ | 
| 1206 | 	  f->_flags |= _IO_ERR_SEEN; | 
| 1207 | 	  break; | 
| 1208 | 	} | 
| 1209 |       to_do -= count; | 
| 1210 |       data = (void *) ((char *) data + count); | 
| 1211 |     } | 
| 1212 |   n -= to_do; | 
| 1213 |   if (f->_offset >= 0) | 
| 1214 |     f->_offset += n; | 
| 1215 |   return n; | 
| 1216 | } | 
| 1217 |  | 
| 1218 | _IO_size_t | 
| 1219 | _IO_new_file_xsputn (_IO_FILE *f, const void *data, _IO_size_t n) | 
| 1220 | { | 
| 1221 |   const char *s = (const char *) data; | 
| 1222 |   _IO_size_t to_do = n; | 
| 1223 |   int must_flush = 0; | 
| 1224 |   _IO_size_t count = 0; | 
| 1225 |  | 
| 1226 |   if (n <= 0) | 
| 1227 |     return 0; | 
| 1228 |   /* This is an optimized implementation. | 
| 1229 |      If the amount to be written straddles a block boundary | 
| 1230 |      (or the filebuf is unbuffered), use sys_write directly. */ | 
| 1231 |  | 
| 1232 |   /* First figure out how much space is available in the buffer. */ | 
| 1233 |   if ((f->_flags & _IO_LINE_BUF) && (f->_flags & _IO_CURRENTLY_PUTTING)) | 
| 1234 |     { | 
| 1235 |       count = f->_IO_buf_end - f->_IO_write_ptr; | 
| 1236 |       if (count >= n) | 
| 1237 | 	{ | 
| 1238 | 	  const char *p; | 
| 1239 | 	  for (p = s + n; p > s; ) | 
| 1240 | 	    { | 
| 1241 | 	      if (*--p == '\n') | 
| 1242 | 		{ | 
| 1243 | 		  count = p - s + 1; | 
| 1244 | 		  must_flush = 1; | 
| 1245 | 		  break; | 
| 1246 | 		} | 
| 1247 | 	    } | 
| 1248 | 	} | 
| 1249 |     } | 
| 1250 |   else if (f->_IO_write_end > f->_IO_write_ptr) | 
| 1251 |     count = f->_IO_write_end - f->_IO_write_ptr; /* Space available. */ | 
| 1252 |  | 
| 1253 |   /* Then fill the buffer. */ | 
| 1254 |   if (count > 0) | 
| 1255 |     { | 
| 1256 |       if (count > to_do) | 
| 1257 | 	count = to_do; | 
| 1258 |       f->_IO_write_ptr = __mempcpy (f->_IO_write_ptr, s, count); | 
| 1259 |       s += count; | 
| 1260 |       to_do -= count; | 
| 1261 |     } | 
| 1262 |   if (to_do + must_flush > 0) | 
| 1263 |     { | 
| 1264 |       _IO_size_t block_size, do_write; | 
| 1265 |       /* Next flush the (full) buffer. */ | 
| 1266 |       if (_IO_OVERFLOW (f, EOF) == EOF) | 
| 1267 | 	/* If nothing else has to be written we must not signal the | 
| 1268 | 	   caller that everything has been written.  */ | 
| 1269 | 	return to_do == 0 ? EOF : n - to_do; | 
| 1270 |  | 
| 1271 |       /* Try to maintain alignment: write a whole number of blocks.  */ | 
| 1272 |       block_size = f->_IO_buf_end - f->_IO_buf_base; | 
| 1273 |       do_write = to_do - (block_size >= 128 ? to_do % block_size : 0); | 
| 1274 |  | 
| 1275 |       if (do_write) | 
| 1276 | 	{ | 
| 1277 | 	  count = new_do_write (f, s, do_write); | 
| 1278 | 	  to_do -= count; | 
| 1279 | 	  if (count < do_write) | 
| 1280 | 	    return n - to_do; | 
| 1281 | 	} | 
| 1282 |  | 
| 1283 |       /* Now write out the remainder.  Normally, this will fit in the | 
| 1284 | 	 buffer, but it's somewhat messier for line-buffered files, | 
| 1285 | 	 so we let _IO_default_xsputn handle the general case. */ | 
| 1286 |       if (to_do) | 
| 1287 | 	to_do -= _IO_default_xsputn (f, s+do_write, to_do); | 
| 1288 |     } | 
| 1289 |   return n - to_do; | 
| 1290 | } | 
| 1291 | libc_hidden_ver (_IO_new_file_xsputn, _IO_file_xsputn) | 
| 1292 |  | 
| 1293 | _IO_size_t | 
| 1294 | _IO_file_xsgetn (_IO_FILE *fp, void *data, _IO_size_t n) | 
| 1295 | { | 
| 1296 |   _IO_size_t want, have; | 
| 1297 |   _IO_ssize_t count; | 
| 1298 |   char *s = data; | 
| 1299 |  | 
| 1300 |   want = n; | 
| 1301 |  | 
| 1302 |   if (fp->_IO_buf_base == NULL) | 
| 1303 |     { | 
| 1304 |       /* Maybe we already have a push back pointer.  */ | 
| 1305 |       if (fp->_IO_save_base != NULL) | 
| 1306 | 	{ | 
| 1307 | 	  free (fp->_IO_save_base); | 
| 1308 | 	  fp->_flags &= ~_IO_IN_BACKUP; | 
| 1309 | 	} | 
| 1310 |       _IO_doallocbuf (fp); | 
| 1311 |     } | 
| 1312 |  | 
| 1313 |   while (want > 0) | 
| 1314 |     { | 
| 1315 |       have = fp->_IO_read_end - fp->_IO_read_ptr; | 
| 1316 |       if (want <= have) | 
| 1317 | 	{ | 
| 1318 | 	  memcpy (s, fp->_IO_read_ptr, want); | 
| 1319 | 	  fp->_IO_read_ptr += want; | 
| 1320 | 	  want = 0; | 
| 1321 | 	} | 
| 1322 |       else | 
| 1323 | 	{ | 
| 1324 | 	  if (have > 0) | 
| 1325 | 	    { | 
| 1326 | 	      s = __mempcpy (s, fp->_IO_read_ptr, have); | 
| 1327 | 	      want -= have; | 
| 1328 | 	      fp->_IO_read_ptr += have; | 
| 1329 | 	    } | 
| 1330 |  | 
| 1331 | 	  /* Check for backup and repeat */ | 
| 1332 | 	  if (_IO_in_backup (fp)) | 
| 1333 | 	    { | 
| 1334 | 	      _IO_switch_to_main_get_area (fp); | 
| 1335 | 	      continue; | 
| 1336 | 	    } | 
| 1337 |  | 
| 1338 | 	  /* If we now want less than a buffer, underflow and repeat | 
| 1339 | 	     the copy.  Otherwise, _IO_SYSREAD directly to | 
| 1340 | 	     the user buffer. */ | 
| 1341 | 	  if (fp->_IO_buf_base | 
| 1342 | 	      && want < (size_t) (fp->_IO_buf_end - fp->_IO_buf_base)) | 
| 1343 | 	    { | 
| 1344 | 	      if (__underflow (fp) == EOF) | 
| 1345 | 		break; | 
| 1346 |  | 
| 1347 | 	      continue; | 
| 1348 | 	    } | 
| 1349 |  | 
| 1350 | 	  /* These must be set before the sysread as we might longjmp out | 
| 1351 | 	     waiting for input. */ | 
| 1352 | 	  _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base); | 
| 1353 | 	  _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); | 
| 1354 |  | 
| 1355 | 	  /* Try to maintain alignment: read a whole number of blocks.  */ | 
| 1356 | 	  count = want; | 
| 1357 | 	  if (fp->_IO_buf_base) | 
| 1358 | 	    { | 
| 1359 | 	      _IO_size_t block_size = fp->_IO_buf_end - fp->_IO_buf_base; | 
| 1360 | 	      if (block_size >= 128) | 
| 1361 | 		count -= want % block_size; | 
| 1362 | 	    } | 
| 1363 |  | 
| 1364 | 	  count = _IO_SYSREAD (fp, s, count); | 
| 1365 | 	  if (count <= 0) | 
| 1366 | 	    { | 
| 1367 | 	      if (count == 0) | 
| 1368 | 		fp->_flags |= _IO_EOF_SEEN; | 
| 1369 | 	      else | 
| 1370 | 		fp->_flags |= _IO_ERR_SEEN; | 
| 1371 |  | 
| 1372 | 	      break; | 
| 1373 | 	    } | 
| 1374 |  | 
| 1375 | 	  s += count; | 
| 1376 | 	  want -= count; | 
| 1377 | 	  if (fp->_offset != _IO_pos_BAD) | 
| 1378 | 	    _IO_pos_adjust (fp->_offset, count); | 
| 1379 | 	} | 
| 1380 |     } | 
| 1381 |  | 
| 1382 |   return n - want; | 
| 1383 | } | 
| 1384 | libc_hidden_def (_IO_file_xsgetn) | 
| 1385 |  | 
| 1386 | static _IO_size_t | 
| 1387 | _IO_file_xsgetn_mmap (_IO_FILE *fp, void *data, _IO_size_t n) | 
| 1388 | { | 
| 1389 |   _IO_size_t have; | 
| 1390 |   char *read_ptr = fp->_IO_read_ptr; | 
| 1391 |   char *s = (char *) data; | 
| 1392 |  | 
| 1393 |   have = fp->_IO_read_end - fp->_IO_read_ptr; | 
| 1394 |  | 
| 1395 |   if (have < n) | 
| 1396 |     { | 
| 1397 |       if (__glibc_unlikely (_IO_in_backup (fp))) | 
| 1398 | 	{ | 
| 1399 | 	  s = __mempcpy (s, read_ptr, have); | 
| 1400 | 	  n -= have; | 
| 1401 | 	  _IO_switch_to_main_get_area (fp); | 
| 1402 | 	  read_ptr = fp->_IO_read_ptr; | 
| 1403 | 	  have = fp->_IO_read_end - fp->_IO_read_ptr; | 
| 1404 | 	} | 
| 1405 |  | 
| 1406 |       if (have < n) | 
| 1407 | 	{ | 
| 1408 | 	  /* Check that we are mapping all of the file, in case it grew.  */ | 
| 1409 | 	  if (__glibc_unlikely (mmap_remap_check (fp))) | 
| 1410 | 	    /* We punted mmap, so complete with the vanilla code.  */ | 
| 1411 | 	    return s - (char *) data + _IO_XSGETN (fp, data, n); | 
| 1412 |  | 
| 1413 | 	  read_ptr = fp->_IO_read_ptr; | 
| 1414 | 	  have = fp->_IO_read_end - read_ptr; | 
| 1415 | 	} | 
| 1416 |     } | 
| 1417 |  | 
| 1418 |   if (have < n) | 
| 1419 |     fp->_flags |= _IO_EOF_SEEN; | 
| 1420 |  | 
| 1421 |   if (have != 0) | 
| 1422 |     { | 
| 1423 |       have = MIN (have, n); | 
| 1424 |       s = __mempcpy (s, read_ptr, have); | 
| 1425 |       fp->_IO_read_ptr = read_ptr + have; | 
| 1426 |     } | 
| 1427 |  | 
| 1428 |   return s - (char *) data; | 
| 1429 | } | 
| 1430 |  | 
| 1431 | static _IO_size_t | 
| 1432 | _IO_file_xsgetn_maybe_mmap (_IO_FILE *fp, void *data, _IO_size_t n) | 
| 1433 | { | 
| 1434 |   /* We only get here if this is the first attempt to read something. | 
| 1435 |      Decide which operations to use and then punt to the chosen one.  */ | 
| 1436 |  | 
| 1437 |   decide_maybe_mmap (fp); | 
| 1438 |   return _IO_XSGETN (fp, data, n); | 
| 1439 | } | 
| 1440 |  | 
| 1441 | versioned_symbol (libc, _IO_new_do_write, _IO_do_write, GLIBC_2_1); | 
| 1442 | versioned_symbol (libc, _IO_new_file_attach, _IO_file_attach, GLIBC_2_1); | 
| 1443 | versioned_symbol (libc, _IO_new_file_close_it, _IO_file_close_it, GLIBC_2_1); | 
| 1444 | versioned_symbol (libc, _IO_new_file_finish, _IO_file_finish, GLIBC_2_1); | 
| 1445 | versioned_symbol (libc, _IO_new_file_fopen, _IO_file_fopen, GLIBC_2_1); | 
| 1446 | versioned_symbol (libc, _IO_new_file_init, _IO_file_init, GLIBC_2_1); | 
| 1447 | versioned_symbol (libc, _IO_new_file_setbuf, _IO_file_setbuf, GLIBC_2_1); | 
| 1448 | versioned_symbol (libc, _IO_new_file_sync, _IO_file_sync, GLIBC_2_1); | 
| 1449 | versioned_symbol (libc, _IO_new_file_overflow, _IO_file_overflow, GLIBC_2_1); | 
| 1450 | versioned_symbol (libc, _IO_new_file_seekoff, _IO_file_seekoff, GLIBC_2_1); | 
| 1451 | versioned_symbol (libc, _IO_new_file_underflow, _IO_file_underflow, GLIBC_2_1); | 
| 1452 | versioned_symbol (libc, _IO_new_file_write, _IO_file_write, GLIBC_2_1); | 
| 1453 | versioned_symbol (libc, _IO_new_file_xsputn, _IO_file_xsputn, GLIBC_2_1); | 
| 1454 |  | 
| 1455 | const struct _IO_jump_t _IO_file_jumps libio_vtable = | 
| 1456 | { | 
| 1457 |   JUMP_INIT_DUMMY, | 
| 1458 |   JUMP_INIT(finish, _IO_file_finish), | 
| 1459 |   JUMP_INIT(overflow, _IO_file_overflow), | 
| 1460 |   JUMP_INIT(underflow, _IO_file_underflow), | 
| 1461 |   JUMP_INIT(uflow, _IO_default_uflow), | 
| 1462 |   JUMP_INIT(pbackfail, _IO_default_pbackfail), | 
| 1463 |   JUMP_INIT(xsputn, _IO_file_xsputn), | 
| 1464 |   JUMP_INIT(xsgetn, _IO_file_xsgetn), | 
| 1465 |   JUMP_INIT(seekoff, _IO_new_file_seekoff), | 
| 1466 |   JUMP_INIT(seekpos, _IO_default_seekpos), | 
| 1467 |   JUMP_INIT(setbuf, _IO_new_file_setbuf), | 
| 1468 |   JUMP_INIT(sync, _IO_new_file_sync), | 
| 1469 |   JUMP_INIT(doallocate, _IO_file_doallocate), | 
| 1470 |   JUMP_INIT(read, _IO_file_read), | 
| 1471 |   JUMP_INIT(write, _IO_new_file_write), | 
| 1472 |   JUMP_INIT(seek, _IO_file_seek), | 
| 1473 |   JUMP_INIT(close, _IO_file_close), | 
| 1474 |   JUMP_INIT(stat, _IO_file_stat), | 
| 1475 |   JUMP_INIT(showmanyc, _IO_default_showmanyc), | 
| 1476 |   JUMP_INIT(imbue, _IO_default_imbue) | 
| 1477 | }; | 
| 1478 | libc_hidden_data_def (_IO_file_jumps) | 
| 1479 |  | 
| 1480 | const struct _IO_jump_t _IO_file_jumps_mmap libio_vtable = | 
| 1481 | { | 
| 1482 |   JUMP_INIT_DUMMY, | 
| 1483 |   JUMP_INIT(finish, _IO_file_finish), | 
| 1484 |   JUMP_INIT(overflow, _IO_file_overflow), | 
| 1485 |   JUMP_INIT(underflow, _IO_file_underflow_mmap), | 
| 1486 |   JUMP_INIT(uflow, _IO_default_uflow), | 
| 1487 |   JUMP_INIT(pbackfail, _IO_default_pbackfail), | 
| 1488 |   JUMP_INIT(xsputn, _IO_new_file_xsputn), | 
| 1489 |   JUMP_INIT(xsgetn, _IO_file_xsgetn_mmap), | 
| 1490 |   JUMP_INIT(seekoff, _IO_file_seekoff_mmap), | 
| 1491 |   JUMP_INIT(seekpos, _IO_default_seekpos), | 
| 1492 |   JUMP_INIT(setbuf, (_IO_setbuf_t) _IO_file_setbuf_mmap), | 
| 1493 |   JUMP_INIT(sync, _IO_file_sync_mmap), | 
| 1494 |   JUMP_INIT(doallocate, _IO_file_doallocate), | 
| 1495 |   JUMP_INIT(read, _IO_file_read), | 
| 1496 |   JUMP_INIT(write, _IO_new_file_write), | 
| 1497 |   JUMP_INIT(seek, _IO_file_seek), | 
| 1498 |   JUMP_INIT(close, _IO_file_close_mmap), | 
| 1499 |   JUMP_INIT(stat, _IO_file_stat), | 
| 1500 |   JUMP_INIT(showmanyc, _IO_default_showmanyc), | 
| 1501 |   JUMP_INIT(imbue, _IO_default_imbue) | 
| 1502 | }; | 
| 1503 |  | 
| 1504 | const struct _IO_jump_t _IO_file_jumps_maybe_mmap libio_vtable = | 
| 1505 | { | 
| 1506 |   JUMP_INIT_DUMMY, | 
| 1507 |   JUMP_INIT(finish, _IO_file_finish), | 
| 1508 |   JUMP_INIT(overflow, _IO_file_overflow), | 
| 1509 |   JUMP_INIT(underflow, _IO_file_underflow_maybe_mmap), | 
| 1510 |   JUMP_INIT(uflow, _IO_default_uflow), | 
| 1511 |   JUMP_INIT(pbackfail, _IO_default_pbackfail), | 
| 1512 |   JUMP_INIT(xsputn, _IO_new_file_xsputn), | 
| 1513 |   JUMP_INIT(xsgetn, _IO_file_xsgetn_maybe_mmap), | 
| 1514 |   JUMP_INIT(seekoff, _IO_file_seekoff_maybe_mmap), | 
| 1515 |   JUMP_INIT(seekpos, _IO_default_seekpos), | 
| 1516 |   JUMP_INIT(setbuf, (_IO_setbuf_t) _IO_file_setbuf_mmap), | 
| 1517 |   JUMP_INIT(sync, _IO_new_file_sync), | 
| 1518 |   JUMP_INIT(doallocate, _IO_file_doallocate), | 
| 1519 |   JUMP_INIT(read, _IO_file_read), | 
| 1520 |   JUMP_INIT(write, _IO_new_file_write), | 
| 1521 |   JUMP_INIT(seek, _IO_file_seek), | 
| 1522 |   JUMP_INIT(close, _IO_file_close), | 
| 1523 |   JUMP_INIT(stat, _IO_file_stat), | 
| 1524 |   JUMP_INIT(showmanyc, _IO_default_showmanyc), | 
| 1525 |   JUMP_INIT(imbue, _IO_default_imbue) | 
| 1526 | }; | 
| 1527 |  |