5 use Fcntl 'O_CREAT', 'O_RDWR', 'LOCK_EX';
9 my $DEFAULT_MEMORY_SIZE = 1<<21; # 2 megabytes
11 my %good_opt = map {$_ => 1, "-$_" => 1}
12 qw(memory dw_size mode recsep discipline autochomp);
16 croak "usage: tie \@array, $_[0], filename, [option => value]...";
18 my ($pack, $file, %opts) = @_;
20 # transform '-foo' keys into 'foo' keys
21 for my $key (keys %opts) {
22 unless ($good_opt{$key}) {
23 croak("$pack: Unrecognized option '$key'\n");
26 if ($key =~ s/^-+//) {
27 $opts{$key} = delete $opts{$okey};
31 unless (defined $opts{memory}) {
32 # default is the larger of the default cache size and the
33 # deferred-write buffer size (if specified)
34 $opts{memory} = $DEFAULT_MEMORY_SIZE;
35 $opts{memory} = $opts{dw_size}
36 if defined $opts{dw_size} && $opts{dw_size} > $DEFAULT_MEMORY_SIZE;
39 $opts{dw_size} = $opts{memory} unless defined $opts{dw_size};
40 if ($opts{dw_size} > $opts{memory}) {
41 croak("$pack: dw_size may not be larger than total memory allocation\n");
43 # are we in deferred-write mode?
44 $opts{defer} = 0 unless defined $opts{defer};
45 $opts{deferred} = {}; # no records are presently deferred
46 $opts{deferred_s} = 0; # count of total bytes in ->{deferred}
48 # the cache is a hash instead of an array because it is likely to be
51 $opts{cached} = 0; # total size of cached data
52 $opts{lru} = []; # replace with heap in later version
55 $opts{filename} = $file;
56 unless (defined $opts{recsep}) {
57 $opts{recsep} = _default_recsep();
59 $opts{recseplen} = length($opts{recsep});
60 if ($opts{recseplen} == 0) {
61 croak "Empty record separator not supported by $pack";
64 $opts{autochomp} = 1 unless defined $opts{autochomp};
66 my $mode = defined($opts{mode}) ? $opts{mode} : O_CREAT|O_RDWR;
69 if (UNIVERSAL::isa($file, 'GLOB')) {
70 # We use 1 here on the theory that some systems
71 # may not indicate failure if we use 0.
72 # MSWin32 does not indicate failure with 0, but I don't know if
73 # it will indicate failure with 1 or not.
74 unless (seek $file, 1, SEEK_SET) {
75 croak "$pack: your filehandle does not appear to be seekable";
77 seek $file, 0, SEEK_SET # put it back
78 $fh = $file; # setting binmode is the user's problem
80 croak "usage: tie \@array, $pack, filename, [option => value]...";
82 $fh = \do { local *FH }; # only works in 5.005 and later
83 sysopen $fh, $file, $mode, 0666 or return;
86 { my $ofh = select $fh; $| = 1; select $ofh } # autoflush on write
87 if (defined $opts{discipline} && $] >= 5.006) {
88 # This avoids a compile-time warning under 5.005
89 eval 'binmode($fh, $opts{discipline})';
90 croak $@ if $@ =~ /unknown discipline/i;
95 bless \%opts => $pack;
100 my $rec = exists $self->{deferred}{$n}
101 ? $self->{deferred}{$n} : $self->_fetch($n);
102 $self->_chomp1($rec);
105 # Chomp many records in-place; return nothing useful
108 return unless $self->{autochomp};
109 if ($self->{autochomp}) {
112 substr($_, - $self->{recseplen}) = "";
117 # Chomp one record in-place; return modified record
119 my ($self, $rec) = @_;
120 return $rec unless $self->{autochomp};
121 return unless defined $rec;
122 substr($rec, - $self->{recseplen}) = "";
129 # check the record cache
130 { my $cached = $self->_check_cache($n);
131 return $cached if defined $cached;
134 unless ($#{$self->{offsets}} >= $n) {
135 my $o = $self->_fill_offsets_to($n);
136 # If it's still undefined, there is no such record, so return 'undef'
137 return unless defined $o;
140 my $fh = $self->{FH};
141 $self->_seek($n); # we can do this now that offsets is populated
142 my $rec = $self->_read_record;
144 # If we happen to have just read the first record, check to see if
145 # the length of the record matches what 'tell' says. If not, Tie::File
146 # won't work, and should drop dead.
148 # if ($n == 0 && defined($rec) && tell($self->{fh}) != length($rec)) {
149 # if (defined $self->{discipline}) {
150 # croak "I/O discipline $self->{discipline} not supported";
152 # croak "File encoding not supported";
156 $self->_cache_insert($n, $rec) if defined $rec;
161 my ($self, $n, $rec) = @_;
163 $self->_fixrecs($rec);
165 return $self->_store_deferred($n, $rec) if $self->{defer};
167 # We need this to decide whether the new record will fit
168 # It incidentally populates the offsets table
169 # Note we have to do this before we alter the cache
170 my $oldrec = $self->_fetch($n);
172 if (my $cached = $self->_check_cache($n)) {
173 my $len_diff = length($rec) - length($cached);
174 $self->{cache}{$n} = $rec;
175 $self->{cached} += $len_diff;
176 $self->_cache_flush if $len_diff > 0 && $self->_cache_too_full;
179 if (not defined $oldrec) {
180 # We're storing a record beyond the end of the file
181 $self->_extend_file_to($n+1);
182 $oldrec = $self->{recsep};
184 my $len_diff = length($rec) - length($oldrec);
186 # length($oldrec) here is not consistent with text mode TODO XXX BUG
187 $self->_twrite($rec, $self->{offsets}[$n], length($oldrec));
189 # now update the offsets
190 # array slice goes from element $n+1 (the first one to move)
192 for (@{$self->{offsets}}[$n+1 .. $#{$self->{offsets}}]) {
197 sub _store_deferred {
198 my ($self, $n, $rec) = @_;
200 my $old_deferred = $self->{deferred}{$n};
201 $self->{deferred}{$n} = $rec;
202 $self->{deferred_s} += length($rec);
203 $self->{deferred_s} -= length($old_deferred) if defined $old_deferred;
204 if ($self->{deferred_s} > $self->{dw_size}) {
206 } elsif ($self->_cache_too_full) {
211 # Remove a single record from the deferred-write buffer without writing it
212 # The record need not be present
213 sub _delete_deferred {
215 my $rec = delete $self->{deferred}{$n};
216 return unless defined $rec;
217 $self->{deferred_s} -= length $rec;
222 my $n = $#{$self->{offsets}};
223 # 20020317 Change this to binary search
224 while (defined ($self->_fill_offsets_to($n+1))) {
227 for my $k (keys %{$self->{deferred}}) {
228 $n = $k+1 if $n < $k+1;
234 my ($self, $len) = @_;
235 my $olen = $self->FETCHSIZE;
236 return if $len == $olen; # Woo-hoo!
240 if ($self->{defer}) {
241 for ($olen .. $len-1) {
242 $self->_store_deferred($_, $self->{recsep});
245 $self->_extend_file_to($len);
251 if ($self->{defer}) {
252 for (grep $_ >= $len, keys %{$self->{deferred}}) {
253 $self->_delete_deferred($_);
259 $#{$self->{offsets}} = $len;
260 # $self->{offsets}[0] = 0; # in case we just chopped this
261 my @cached = grep $_ >= $len, keys %{$self->{cache}};
262 $self->_uncache(@cached);
267 $self->SPLICE($self->FETCHSIZE, scalar(@_), @_);
268 # $self->FETCHSIZE; # av.c takes care of this for me
273 my $size = $self->FETCHSIZE;
274 return if $size == 0;
275 # print STDERR "# POPPITY POP POP POP\n";
276 scalar $self->SPLICE($size-1, 1);
281 scalar $self->SPLICE(0, 1);
286 $self->SPLICE(0, 0, @_);
287 # $self->FETCHSIZE; # av.c takes care of this for me
291 # And enable auto-defer mode, since it's likely that they just
295 # Maybe that's too much dwimmery. But stuffing a fake '-1' into the
296 # autodefer history might not be too much. If you did that, you
297 # could also special-case [ -1, 0 ], which might not be too much.
301 %{$self->{cache}} = ();
303 @{$self->{lru}} = ();
304 @{$self->{offsets}} = (0);
305 %{$self->{deferred}}= ();
306 $self->{deferred_s} = 0;
312 # No need to pre-extend anything in this case
313 return if $self->{defer};
315 $self->_fill_offsets_to($n);
316 $self->_extend_file_to($n);
321 $self->_delete_deferred($n) if $self->{defer};
322 my $lastrec = $self->FETCHSIZE-1;
323 my $rec = $self->FETCH($n);
324 if ($n == $lastrec) {
327 $#{$self->{offsets}}--;
329 # perhaps in this case I should also remove trailing null records?
331 # Note that delete @a[-3..-1] deletes the records in the wrong order,
332 # so we only chop the very last one out of the file. We could repair this
333 # by tracking deleted records inside the object.
334 } elsif ($n < $lastrec) {
335 $self->STORE($n, "");
342 return 1 if exists $self->{deferred}{$n};
343 $self->_fill_offsets_to($n); # I think this is unnecessary
344 $n < $self->FETCHSIZE;
349 $self->_flush if $self->{defer};
351 $self->_chomp(my @a = $self->_splice(@_));
354 $self->_chomp1(scalar $self->_splice(@_));
360 $self->flush if $self->{defer};
364 my ($self, $pos, $nrecs, @data) = @_;
367 $pos = 0 unless defined $pos;
369 # Deal with negative and other out-of-range positions
370 # Also set default for $nrecs
372 my $oldsize = $self->FETCHSIZE;
373 $nrecs = $oldsize unless defined $nrecs;
379 croak "Modification of non-creatable array value attempted, subscript $oldpos";
383 if ($pos > $oldsize) {
385 $pos = $oldsize; # This is what perl does for normal arrays
389 $self->_fixrecs(@data);
390 my $data = join '', @data;
391 my $datalen = length $data;
394 # compute length of data being removed
395 # Incidentally fills offsets table
396 for ($pos .. $pos+$nrecs-1) {
397 my $rec = $self->_fetch($_);
398 last unless defined $rec;
400 $oldlen += length($rec);
404 $self->_twrite($data, $self->{offsets}[$pos], $oldlen);
406 # update the offsets table part 1
407 # compute the offsets of the new records:
410 push @new_offsets, $self->{offsets}[$pos];
411 for (0 .. $#data-1) {
412 push @new_offsets, $new_offsets[-1] + length($data[$_]);
415 splice(@{$self->{offsets}}, $pos, $nrecs, @new_offsets);
417 # update the offsets table part 2
418 # adjust the offsets of the following old records
419 for ($pos+@data .. $#{$self->{offsets}}) {
420 $self->{offsets}[$_] += $datalen - $oldlen;
422 # If we scrubbed out all known offsets, regenerate the trivial table
423 # that knows that the file does indeed start at 0.
424 $self->{offsets}[0] = 0 unless @{$self->{offsets}};
426 # Perhaps the following cache foolery could be factored out
427 # into a bunch of mor opaque cache functions. For example,
428 # it's odd to delete a record from the cache and then remove
429 # it from the LRU queue later on; there should be a function to
432 # update the read cache, part 1
434 # Consider this carefully for correctness
435 for ($pos .. $pos+$nrecs-1) {
436 my $cached = $self->{cache}{$_};
437 next unless defined $cached;
438 my $new = $data[$_-$pos];
440 $self->{cached} += length($new) - length($cached);
441 $self->{cache}{$_} = $new;
446 # update the read cache, part 2
447 # moved records - records past the site of the change
448 # need to be renumbered
449 # Maybe merge this with the previous block?
452 for (keys %{$self->{cache}}) {
453 next unless $_ >= $pos + $nrecs;
454 $adjusted{$_-$nrecs+@data} = delete $self->{cache}{$_};
456 @{$self->{cache}}{keys %adjusted} = values %adjusted;
457 # for (keys %{$self->{cache}}) {
458 # next unless $_ >= $pos + $nrecs;
459 # $self->{cache}{$_-$nrecs+@data} = delete $self->{cache}{$_};
465 for (@{$self->{lru}}) {
466 if ($_ >= $pos + $nrecs) {
467 push @new, $_ + @data - $nrecs;
468 } elsif ($_ >= $pos) {
469 push @changed, $_ if $_ < $pos + @data;
474 @{$self->{lru}} = (@new, @changed);
476 # Now there might be too much data in the cache, if we spliced out
477 # some short records and spliced in some long ones. If so, flush
481 # Yes, the return value of 'splice' *is* actually this complicated
482 wantarray ? @result : @result ? $result[-1] : undef;
485 # write data into the file
486 # $data is the data to be written.
487 # it should be written at position $pos, and should overwrite
488 # exactly $len of the following bytes.
489 # Note that if length($data) > $len, the subsequent bytes will have to
490 # be moved up, and if length($data) < $len, they will have to
493 my ($self, $data, $pos, $len) = @_;
495 unless (defined $pos) {
496 die "\$pos was undefined in _twrite";
499 my $len_diff = length($data) - $len;
501 if ($len_diff == 0) { # Woo-hoo!
502 my $fh = $self->{fh};
504 $self->_write_record($data);
505 return; # well, that was easy.
508 # the two records are of different lengths
509 # our strategy here: rewrite the tail of the file,
510 # reading ahead one buffer at a time
511 # $bufsize is required to be at least as large as the data we're overwriting
512 my $bufsize = _bufsize($len_diff);
513 my ($writepos, $readpos) = ($pos, $pos+$len);
516 # Seems like there ought to be a way to avoid the repeated code
517 # and the special case here. The read(1) is also a little weird.
520 $self->_seekb($readpos);
521 my $br = read $self->{fh}, $next_block, $bufsize;
522 my $more_data = read $self->{fh}, my($dummy), 1;
523 $self->_seekb($writepos);
524 $self->_write_record($data);
526 $writepos += length $data;
529 $self->_seekb($writepos);
530 $self->_write_record($next_block);
532 # There might be leftover data at the end of the file
533 $self->_chop_file if $len_diff < 0;
536 # If a record does not already end with the appropriate terminator
537 # string, append one.
541 $_ .= $self->{recsep}
542 unless substr($_, - $self->{recseplen}) eq $self->{recsep};
547 ################################################################
549 # Basic read, write, and seek
552 # seek to the beginning of record #$n
553 # Assumes that the offsets table is already correctly populated
555 # Note that $n=-1 has a special meaning here: It means the start of
556 # the last known record; this may or may not be the very last record
557 # in the file, depending on whether the offsets table is fully populated.
561 my $o = $self->{offsets}[$n];
563 or confess("logic error: undefined offset for record $n");
564 seek $self->{fh}, $o, SEEK_SET
565 or die "Couldn't seek filehandle: $!"; # "Should never happen."
570 seek $self->{fh}, $b, SEEK_SET
571 or die "Couldn't seek filehandle: $!"; # "Should never happen."
574 # populate the offsets table up to the beginning of record $n
575 # return the offset of record $n
576 sub _fill_offsets_to {
578 my $fh = $self->{fh};
579 local *OFF = $self->{offsets};
582 until ($#OFF >= $n) {
584 $self->_seek(-1); # tricky -- see comment at _seek
585 $rec = $self->_read_record;
589 return; # It turns out there is no such record
593 # we have now read all the records up to record n-1,
594 # so we can return the offset of record n
598 # assumes that $rec is already suitably terminated
600 my ($self, $rec) = @_;
601 my $fh = $self->{fh};
603 or die "Couldn't write record: $!"; # "Should never happen."
610 { local $/ = $self->{recsep};
611 my $fh = $self->{fh};
617 ################################################################
619 # Read cache management
621 # Insert a record into the cache at position $n
622 # Only appropriate when no data is cached for $n already
624 my ($self, $n, $rec) = @_;
626 # Do not cache records that are too big to fit in the cache.
627 return unless length $rec <= $self->{memory};
629 $self->{cache}{$n} = $rec;
630 $self->{cached} += length $rec;
631 push @{$self->{lru}}, $n; # most-recently-used is at the END
633 $self->_cache_flush if $self->_cache_too_full;
636 # Remove cached data for record $n, if there is any
637 # (It is OK if $n is not in the cache at all)
641 my $cached = delete $self->{cache}{$n};
642 next unless defined $cached;
643 @{$self->{lru}} = grep $_ != $n, @{$self->{lru}};
644 $self->{cached} -= length($cached);
648 # _check_cache promotes record $n to MRU. Is this correct behavior?
652 return unless defined($rec = $self->{cache}{$n});
654 # cache hit; update LRU queue and return $rec
655 # replace this with a heap in a later version
656 # 20020317 This should be a separate method
657 @{$self->{lru}} = ((grep $_ ne $n, @{$self->{lru}}), $n);
661 sub _cache_too_full {
663 $self->{cached} + $self->{deferred_s} > $self->{memory};
668 while ($self->_cache_too_full) {
669 my $lru = shift @{$self->{lru}};
670 my $rec = delete $self->{cache}{$lru};
671 $self->{cached} -= length $rec;
675 ################################################################
677 # File custodial services
681 # We have read to the end of the file and have the offsets table
682 # entirely populated. Now we need to write a new record beyond
683 # the end of the file. We prepare for this by writing
684 # empty records into the file up to the position we want
686 # assumes that the offsets table already contains the offset of record $n,
687 # if it exists, and extends to the end of the file if not.
688 sub _extend_file_to {
690 $self->_seek(-1); # position after the end of the last record
691 my $pos = $self->{offsets}[-1];
693 # the offsets table has one entry more than the total number of records
694 $extras = $n - $#{$self->{offsets}};
696 # Todo : just use $self->{recsep} x $extras here?
697 while ($extras-- > 0) {
698 $self->_write_record($self->{recsep});
699 push @{$self->{offsets}}, tell $self->{fh};
703 # Truncate the file at the current position
706 truncate $self->{fh}, tell($self->{fh});
710 # compute the size of a buffer suitable for moving
711 # all the data in a file forward $n bytes
712 # ($n may be negative)
713 # The result should be at least $n.
716 return 8192 if $n < 0;
718 $b += 8192 if $n & 8191;
722 ################################################################
724 # Miscellaneous public methods
729 my ($self, $op) = @_;
731 my $pack = ref $self;
732 croak "Usage: $pack\->flock([OPERATION])";
734 my $fh = $self->{fh};
735 $op = LOCK_EX unless defined $op;
739 # Get/set autochomp option
743 my $old = $self->{autochomp};
744 $self->{autochomp} = shift;
751 ################################################################
753 # Matters related to deferred writing
762 # Flush deferred writes
764 # This could be better optimized to write the file in one pass, instead
765 # of one pass per block of records. But that will require modifications
766 # to _twrite, so I should have a good _twite test suite first.
776 my @writable = sort {$a<=>$b} (keys %{$self->{deferred}});
779 # gather all consecutive records from the front of @writable
780 my $first_rec = shift @writable;
781 my $last_rec = $first_rec+1;
782 ++$last_rec, shift @writable while @writable && $last_rec == $writable[0];
784 $self->_fill_offsets_to($last_rec);
785 $self->_extend_file_to($last_rec);
786 $self->_splice($first_rec, $last_rec-$first_rec+1,
787 @{$self->{deferred}}{$first_rec .. $last_rec});
790 $self->_discard; # clear out defered-write-cache
793 # Discard deferred writes and disable future deferred writes
800 # Discard deferred writes, but retain old deferred writing mode
803 $self->{deferred} = {};
804 $self->{deferred_s} = 0;
807 # Not yet implemented
810 # This is NOT a method. It is here for two reasons:
811 # 1. To factor a fairly complicated block out of the constructor
812 # 2. To provide access for the test suite, which need to be sure
813 # files are being written properly.
814 sub _default_recsep {
816 if ($^O eq 'MSWin32') { # Dos too?
817 # Windows users expect files to be terminated with \r\n
818 # But $/ is set to \n instead
819 # Note that this also transforms \n\n into \r\n\r\n.
821 $recsep =~ s/\n/\r\n/g;
826 # Utility function for _check_integrity
834 # Given a file, make sure the cache is consistent with the
835 # file contents and the internal data structures are consistent with
836 # each other. Returns true if everything checks out, false if not
838 # The $file argument is no longer used. It is retained for compatibility
839 # with the existing test suite.
840 sub _check_integrity {
841 my ($self, $file, $warn) = @_;
844 if (not defined $self->{offsets}[0]) {
845 _ci_warn("offset 0 is missing!");
847 } elsif ($self->{offsets}[0] != 0) {
848 _ci_warn("rec 0: offset <$self->{offsets}[0]> s/b 0!");
852 local *F = $self->{fh};
854 local $/ = $self->{recsep};
855 my $rsl = $self->{recseplen};
860 my $cached = $self->{cache}{$n};
861 my $offset = $self->{offsets}[$.];
863 if (defined $offset && $offset != $ao) {
864 _ci_warn("rec $n: offset <$offset> actual <$ao>");
867 if (defined $cached && $_ ne $cached) {
871 _ci_warn("rec $n: cached <$cached> actual <$_>");
873 if (defined $cached && substr($cached, -$rsl) ne $/) {
874 _ci_warn("rec $n in the cache is missing the record separator");
879 while (my ($n, $r) = each %{$self->{cache}}) {
880 $cached += length($r);
881 next if $n+1 <= $.; # checked this already
882 _ci_warn("spurious caching of record $n");
885 if ($cached != $self->{cached}) {
886 _ci_warn("cache size is $self->{cached}, should be $cached");
890 my (%seen, @duplicate);
891 for (@{$self->{lru}}) {
893 if (not exists $self->{cache}{$_}) {
894 _ci_warn("$_ is mentioned in the LRU queue, but not in the cache");
898 @duplicate = grep $seen{$_}>1, keys %seen;
900 my $records = @duplicate == 1 ? 'Record' : 'Records';
901 my $appear = @duplicate == 1 ? 'appears' : 'appear';
902 _ci_warn("$records @duplicate $appear multiple times in LRU queue: @{$self->{lru}}");
905 for (keys %{$self->{cache}}) {
906 unless (exists $seen{$_}) {
907 _ci_warn("record $_ is in the cache but not the LRU queue");
912 # Now let's check the deferbuffer
913 # Unless deferred writing is enabled, it should be empty
914 if (! $self->{defer} && %{$self->{deferred}}) {
915 _ci_warn("deferred writing disabled, but deferbuffer nonempty");
919 # Any record in the deferbuffer should *not* be present in the readcache
921 while (my ($n, $r) = each %{$self->{deferred}}) {
922 $deferred_s += length($r);
923 if (exists $self->{cache}{$n}) {
924 _ci_warn("record $n is in the deferbuffer *and* the readcache");
927 if (substr($r, -$rsl) ne $/) {
928 _ci_warn("rec $n in the deferbuffer is missing the record separator");
933 # Total size of deferbuffer should match internal total
934 if ($deferred_s != $self->{deferred_s}) {
935 _ci_warn("buffer size is $self->{deferred_s}, should be $deferred_s");
939 # Total size of deferbuffer should not exceed the specified limit
940 if ($deferred_s > $self->{dw_size}) {
941 _ci_warn("buffer size is $self->{deferred_s} which exceeds the limit of $self->{dw_size}");
945 # Total size of cached data should not exceed the specified limit
946 if ($deferred_s + $cached > $self->{memory}) {
947 my $total = $deferred_s + $cached;
948 _ci_warn("total stored data size is $total which exceeds the limit of $self->{memory}");
955 "Cogito, ergo sum."; # don't forget to return a true value from the file
959 Tie::File - Access the lines of a disk file via a Perl array
963 # This file documents Tie::File version 0.21
965 tie @array, 'Tie::File', filename or die ...;
967 $array[13] = 'blah'; # line 13 of the file is now 'blah'
968 print $array[42]; # display line 42 of the file
970 $n_recs = @array; # how many records are in the file?
971 $#array -= 2; # chop two records off the end
975 s/PERL/Perl/g; # Replace PERL with Perl everywhere in the file
978 # These are just like regular push, pop, unshift, shift, and splice
979 # Except that they modify the file in the way you would expect
981 push @array, new recs...;
983 unshift @array, new recs...;
984 my $r1 = shift @array;
985 @old_recs = splice @array, 3, 7, new recs...;
987 untie @array; # all finished
992 C<Tie::File> represents a regular text file as a Perl array. Each
993 element in the array corresponds to a record in the file. The first
994 line of the file is element 0 of the array; the second line is element
997 The file is I<not> loaded into memory, so this will work even for
1000 Changes to the array are reflected in the file immediately.
1002 Lazy people and beginners may now stop reading the manual.
1006 What is a 'record'? By default, the meaning is the same as for the
1007 C<E<lt>...E<gt>> operator: It's a string terminated by C<$/>, which is
1008 probably C<"\n">. (Minor exception: on dos and Win32 systems, a
1009 'record' is a string terminated by C<"\r\n">.) You may change the
1010 definition of "record" by supplying the C<recsep> option in the C<tie>
1013 tie @array, 'Tie::File', $file, recsep => 'es';
1015 This says that records are delimited by the string C<es>. If the file
1016 contained the following data:
1018 Curse these pesky flies!\n
1020 then the C<@array> would appear to have four elements:
1027 An undefined value is not permitted as a record separator. Perl's
1028 special "paragraph mode" semantics (E<agrave> la C<$/ = "">) are not
1031 Records read from the tied array do not have the record separator
1032 string on the end; this is to allow
1034 $array[17] .= "extra";
1036 to work as expected.
1038 (See L<"autochomp">, below.) Records stored into the array will have
1039 the record separator string appended before they are written to the
1040 file, if they don't have one already. For example, if the record
1041 separator string is C<"\n">, then the following two lines do exactly
1044 $array[17] = "Cherry pie";
1045 $array[17] = "Cherry pie\n";
1047 The result is that the contents of line 17 of the file will be
1048 replaced with "Cherry pie"; a newline character will separate line 17
1049 from line 18. This means that in particular, this will do nothing:
1053 Because the C<chomp>ed value will have the separator reattached when
1054 it is written back to the file. There is no way to create a file
1055 whose trailing record separator string is missing.
1057 Inserting records that I<contain> the record separator string will
1058 produce a reasonable result, but if you can't foresee what this result
1059 will be, you'd better avoid doing this.
1063 Normally, array elements have the record separator removed, so that if
1064 the file contains the text
1070 the tied array will appear to contain C<("Gold", "Frankincense",
1071 "Myrrh")>. If you set C<autochomp> to a false value, the record
1072 separator will not be removed. If the file above was tied with
1074 tie @gifts, "Tie::File", $gifts, autochomp => 0;
1076 then the array C<@gifts> would appear to contain C<("Gold\n",
1077 "Frankincense\n", "Myrrh\n")>, or (on Win32 systems) C<("Gold\r\n",
1078 "Frankincense\r\n", "Myrrh\r\n")>.
1082 Normally, the specified file will be opened for read and write access,
1083 and will be created if it does not exist. (That is, the flags
1084 C<O_RDWR | O_CREAT> are supplied in the C<open> call.) If you want to
1085 change this, you may supply alternative flags in the C<mode> option.
1086 See L<Fcntl> for a listing of available flags.
1089 # open the file if it exists, but fail if it does not exist
1091 tie @array, 'Tie::File', $file, mode => O_RDWR;
1093 # create the file if it does not exist
1094 use Fcntl 'O_RDWR', 'O_CREAT';
1095 tie @array, 'Tie::File', $file, mode => O_RDWR | O_CREAT;
1097 # open an existing file in read-only mode
1098 use Fcntl 'O_RDONLY';
1099 tie @array, 'Tie::File', $file, mode => O_RDONLY;
1101 Opening the data file in write-only or append mode is not supported.
1105 This is an upper limit on the amount of memory that C<Tie::File> will
1106 consume at any time while managing the file. This is used for two
1107 things: managing the I<read cache> and managing the I<deferred write
1110 Records read in from the file are cached, to avoid having to re-read
1111 them repeatedly. If you read the same record twice, the first time it
1112 will be stored in memory, and the second time it will be fetched from
1113 the I<read cache>. The amount of data in the read cache will not
1114 exceed the value you specified for C<memory>. If C<Tie::File> wants
1115 to cache a new record, but the read cache is full, it will make room
1116 by expiring the least-recently visited records from the read cache.
1118 The default memory limit is 2Mib. You can adjust the maximum read
1119 cache size by supplying the C<memory> option. The argument is the
1120 desired cache size, in bytes.
1122 # I have a lot of memory, so use a large cache to speed up access
1123 tie @array, 'Tie::File', $file, memory => 20_000_000;
1125 Setting the memory limit to 0 will inhibit caching; records will be
1126 fetched from disk every time you examine them.
1130 (This is an advanced feature. Skip this section on first reading.)
1132 If you use deferred writing (See L<"Deferred Writing">, below) then
1133 data you write into the array will not be written directly to the
1134 file; instead, it will be saved in the I<deferred write buffer> to be
1135 written out later. Data in the deferred write buffer is also charged
1136 against the memory limit you set with the C<memory> option.
1138 You may set the C<dw_size> option to limit the amount of data that can
1139 be saved in the deferred write buffer. This limit may not exceed the
1140 total memory limit. For example, if you set C<dw_size> to 1000 and
1141 C<memory> to 2500, that means that no more than 1000 bytes of deferred
1142 writes will be saved up. The space available for the read cache will
1143 vary, but it will always be at least 1500 bytes (if the deferred write
1144 buffer is full) and it could grow as large as 2500 bytes (if the
1145 deferred write buffer is empty.)
1147 If you don't specify a C<dw_size>, it defaults to the entire memory
1150 =head2 Option Format
1152 C<-mode> is a synonym for C<mode>. C<-recsep> is a synonym for
1153 C<recsep>. C<-memory> is a synonym for C<memory>. You get the
1156 =head1 Public Methods
1158 The C<tie> call returns an object, say C<$o>. You may call
1160 $rec = $o->FETCH($n);
1161 $o->STORE($n, $rec);
1163 to fetch or store the record at line C<$n>, respectively; similarly
1164 the other tied array methods. (See L<perltie> for details.) You may
1165 also call the following methods on this object:
1171 will lock the tied file. C<MODE> has the same meaning as the second
1172 argument to the Perl built-in C<flock> function; for example
1173 C<LOCK_SH> or C<LOCK_EX | LOCK_NB>. (These constants are provided by
1174 the C<use Fcntl ':flock'> declaration.)
1176 C<MODE> is optional; the default is C<LOCK_EX>.
1178 C<Tie::File> promises that the following sequence of operations will
1181 my $o = tie @array, "Tie::File", $filename;
1184 In particular, C<Tie::File> will I<not> read or write the file during
1185 the C<tie> call. (Exception: Using C<mode =E<gt> O_TRUNC> will, of
1186 course, erase the file during the C<tie> call. If you want to do this
1187 safely, then open the file without C<O_TRUNC>, lock the file, and use
1190 The best way to unlock a file is to discard the object and untie the
1191 array. It is probably unsafe to unlock the file without also untying
1192 it, because if you do, changes may remain unwritten inside the object.
1193 That is why there is no shortcut for unlocking. If you really want to
1194 unlock the file prematurely, you know what to do; if you don't know
1195 what to do, then don't do it.
1197 All the usual warnings about file locking apply here. In particular,
1198 note that file locking in Perl is B<advisory>, which means that
1199 holding a lock will not prevent anyone else from reading, writing, or
1200 erasing the file; it only prevents them from getting another lock at
1201 the same time. Locks are analogous to green traffic lights: If you
1202 have a green light, that does not prevent the idiot coming the other
1203 way from plowing into you sideways; it merely guarantees to you that
1204 the idiot does not also have a green light at the same time.
1208 my $old_value = $o->autochomp(0); # disable autochomp option
1209 my $old_value = $o->autochomp(1); # enable autochomp option
1211 my $ac = $o->autochomp(); # recover current value
1213 See L<"autochomp">, above.
1215 =head2 C<defer>, C<flush>, and C<discard>
1217 See L<"Deferred Writing">, below.
1219 =head1 Tying to an already-opened filehandle
1221 If C<$fh> is a filehandle, such as is returned by C<IO::File> or one
1222 of the other C<IO> modules, you may use:
1224 tie @array, 'Tie::File', $fh, ...;
1226 Similarly if you opened that handle C<FH> with regular C<open> or
1227 C<sysopen>, you may use:
1229 tie @array, 'Tie::File', \*FH, ...;
1231 Handles that were opened write-only won't work. Handles that were
1232 opened read-only will work as long as you don't try to modify the
1233 array. Handles must be attached to seekable sources of data---that
1234 means no pipes or sockets. If you supply a non-seekable handle, the
1235 C<tie> call will try to throw an exception. (On Unix systems, it
1236 B<will> throw an exception.)
1238 =head1 Deferred Writing
1240 (This is an advanced feature. Skip this section on first reading.)
1242 Normally, modifying a C<Tie::File> array writes to the underlying file
1243 immediately. Every assignment like C<$a[3] = ...> rewrites as much of
1244 the file as is necessary; typically, everything from line 3 through
1245 the end will need to be rewritten. This is the simplest and most
1246 transparent behavior. Performance even for large files is reasonably
1249 However, under some circumstances, this behavior may be excessively
1250 slow. For example, suppose you have a million-record file, and you
1257 The first time through the loop, you will rewrite the entire file,
1258 from line 0 through the end. The second time through the loop, you
1259 will rewrite the entire file from line 1 through the end. The third
1260 time through the loop, you will rewrite the entire file from line 2 to
1263 If the performance in such cases is unacceptable, you may defer the
1264 actual writing, and then have it done all at once. The following loop
1265 will perform much better for large files:
1273 If C<Tie::File>'s memory limit is large enough, all the writing will
1274 done in memory. Then, when you call C<-E<gt>flush>, the entire file
1275 will be rewritten in a single pass.
1277 Calling C<-E<gt>flush> returns the array to immediate-write mode. If
1278 you wish to discard the deferred writes, you may call C<-E<gt>discard>
1279 instead of C<-E<gt>flush>. Note that in some cases, some of the data
1280 will have been written already, and it will be too late for
1281 C<-E<gt>discard> to discard all the changes.
1283 Deferred writes are cached in memory up to the limit specified by the
1284 C<dw_size> option (see above). If the deferred-write buffer is full
1285 and you try to write still more deferred data, the buffer will be
1286 flushed. All buffered data will be written immediately, the buffer
1287 will be emptied, and the now-empty space will be used for future
1290 If the deferred-write buffer isn't yet full, but the total size of the
1291 buffer and the read cache would exceed the C<memory> limit, the oldest
1292 records will be flushed out of the read cache until total usage is
1295 C<push>, C<pop>, C<shift>, C<unshift>, and C<splice> cannot be
1296 deferred. When you perform one of these operations, any deferred data
1297 is written to the file and the operation is performed immediately.
1298 This may change in a future version.
1300 A soon-to-be-released version of this module may enabled deferred
1301 write mode automagically if it guesses that you are about to write
1302 many consecutive records. To disable this feature, use
1304 (tied @o)->autodefer(0);
1306 (At present, this call does nothing.)
1310 (That's Latin for 'warnings'.)
1316 This is BETA RELEASE SOFTWARE. It may have bugs. See the discussion
1317 below about the (lack of any) warranty.
1321 Every effort was made to make this module efficient. Nevertheless,
1322 changing the size of a record in the middle of a large file will
1323 always be fairly slow, because everything after the new record must be
1328 The behavior of tied arrays is not precisely the same as for regular
1329 arrays. For example:
1331 # This DOES print "How unusual!"
1332 undef $a[10]; print "How unusual!\n" if defined $a[10];
1334 C<undef>-ing a C<Tie::File> array element just blanks out the
1335 corresponding record in the file. When you read it back again, you'll
1336 get the empty string, so the supposedly-C<undef>'ed value will be
1337 defined. Similarly, if you have C<autochomp> disabled, then
1339 # This DOES print "How unusual!" if 'autochomp' is disabled
1341 print "How unusual!\n" if $a[10];
1343 Because when C<autochomp> is disabled, C<$a[10]> will read back as
1344 C<"\n"> (or whatever the record separator string is.)
1346 There are other minor differences, but in general, the correspondence
1351 Not quite every effort was made to make this module as efficient as
1352 possible. C<FETCHSIZE> should use binary search instead of linear
1353 search. The cache's LRU queue should be a heap instead of a list.
1355 The performance of the C<flush> method could be improved. At present,
1356 it still rewrites the tail of the file once for each block of
1357 contiguous lines to be changed. In the typical case, this will result
1358 in only one rewrite, but in peculiar cases it might be bad. It should
1359 be possible to perform I<all> deferred writing with a single rewrite.
1361 These defects are probably minor; in any event, they will be fixed in
1362 a future version of the module.
1366 The author has supposed that since this module is concerned with file
1367 I/O, almost all normal use of it will be heavily I/O bound, and that
1368 the time to maintain complicated data structures inside the module
1369 will be dominated by the time to actually perform the I/O. This
1370 suggests, for example, that an LRU read-cache is a good tradeoff,
1371 even if it requires substantial adjustment following a C<splice>
1375 You might be tempted to think that deferred writing is like
1376 transactions, with C<flush> as C<commit> and C<discard> as
1377 C<rollback>, but it isn't, so don't.
1383 This version promises absolutely nothing about the internals, which
1384 may change without notice. A future version of the module will have a
1385 well-defined and stable subclassing API.
1387 =head1 WHAT ABOUT C<DB_File>?
1389 C<DB_File>'s C<DB_RECNO> feature does something similar to
1390 C<Tie::File>, but there are a number of reasons that you might prefer
1391 C<Tie::File>. C<DB_File> is a great piece of software, but the
1392 C<DB_RECNO> part is less great than the rest of it.
1398 C<DB_File> reads your entire file into memory, modifies it in memory,
1399 and the writes out the entire file again when you untie the file.
1400 This is completely impractical for large files.
1402 C<Tie::File> does not do any of those things. It doesn't try to read
1403 the entire file into memory; instead it uses a lazy approach and
1404 caches recently-used records. The cache size is strictly bounded by
1405 the C<memory> option. DB_File's C<-E<gt>{cachesize}> doesn't prevent
1406 your process from blowing up when reading a big file.
1410 C<DB_File> has an extremely poor writing strategy. If you have a
1411 ten-megabyte file and tie it with C<DB_File>, and then use
1413 $a[0] =~ s/PERL/Perl/;
1415 C<DB_file> will then read the entire ten-megabyte file into memory, do
1416 the change, and write the entire file back to disk, reading ten
1417 megabytes and writing ten megabytes. C<Tie::File> will read and write
1418 only the first record.
1420 If you have a million-record file and tie it with C<DB_File>, and then
1423 $a[999998] =~ s/Larry/Larry Wall/;
1425 C<DB_File> will read the entire million-record file into memory, do
1426 the change, and write the entire file back to disk. C<Tie::File> will
1427 only rewrite records 999998 and 999999. During the writing process,
1428 it will never have more than a few kilobytes of data in memory at any
1429 time, even if the two records are very large.
1433 Since changes to C<DB_File> files only appear when you do C<untie>, it
1434 can be inconvenient to arrange for concurrent access to the same file
1435 by two or more processes. Each process needs to call C<$db-E<gt>sync>
1436 after every write. When you change a C<Tie::File> array, the changes
1437 are reflected in the file immediately; no explicit C<-E<gt>sync> call
1438 is required. (Or you can enable deferred writing mode to require that
1439 changes be explicitly sync'ed.)
1443 C<DB_File> is only installed by default if you already have the C<db>
1444 library on your system; C<Tie::File> is pure Perl and is installed by
1445 default no matter what. Starting with Perl 5.7.3 you can be
1446 absolutely sure it will be everywhere. You will never have that
1447 surety with C<DB_File>. If you don't have C<DB_File> yet, it requires
1448 a C compiler. You can install C<Tie::File> from CPAN in five minutes
1453 C<DB_File> is written in C, so if you aren't allowed to install
1454 modules on your system, it is useless. C<Tie::File> is written in Perl,
1455 so even if you aren't allowed to install modules, you can look into
1456 the source code, see how it works, and copy the subroutines or the
1457 ideas from the subroutines directly into your own Perl program.
1461 Except in very old, unsupported versions, C<DB_File>'s free license
1462 requires that you distribute the source code for your entire
1463 application. If you are not able to distribute the source code for
1464 your application, you must negotiate an alternative license from
1465 Sleepycat, possibly for a fee. Tie::File is under the Perl Artistic
1466 license and can be distributed free under the same terms as Perl
1475 To contact the author, send email to: C<mjd-perl-tiefile+@plover.com>
1477 To receive an announcement whenever a new version of this module is
1478 released, send a blank email message to
1479 C<mjd-perl-tiefile-subscribe@plover.com>.
1481 The most recent version of this module, including documentation and
1482 any news of importance, will be available at
1484 http://perl.plover.com/TieFile/
1489 C<Tie::File> version 0.21 is copyright (C) 2002 Mark Jason Dominus.
1491 This library is free software; you may redistribute it and/or modify
1492 it under the same terms as Perl itself.
1494 These terms are your choice of any of (1) the Perl Artistic Licence,
1495 or (2) version 2 of the GNU General Public License as published by the
1496 Free Software Foundation, or (3) any later version of the GNU General
1499 This library is distributed in the hope that it will be useful,
1500 but WITHOUT ANY WARRANTY; without even the implied warranty of
1501 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1502 GNU General Public License for more details.
1504 You should have received a copy of the GNU General Public License
1505 along with this library program; it should be in the file C<COPYING>.
1506 If not, write to the Free Software Foundation, Inc., 59 Temple Place,
1507 Suite 330, Boston, MA 02111 USA
1509 For licensing inquiries, contact the author at:
1513 Philadelphia, PA 19107
1517 C<Tie::File> version 0.21 comes with ABSOLUTELY NO WARRANTY.
1518 For details, see the license.
1522 Gigantic thanks to Jarkko Hietaniemi, for agreeing to put this in the
1523 core when I hadn't written it yet, and for generally being helpful,
1524 supportive, and competent. (Usually the rule is "choose any one.")
1525 Also big thanks to Abhijit Menon-Sen for all of the same things.
1527 Special thanks to Craig Berry and Peter Prymmer (for VMS portability
1528 help), Randy Kobes (for Win32 portability help), Clinton Pierce and
1529 Autrijus Tang (for heroic eleventh-hour Win32 testing above and beyond
1530 the call of duty), and the rest of the CPAN testers (for testing
1533 Additional thanks to:
1538 Tassilo von Parseval /
1542 Autrijus Tang (again) /
1547 Test DELETE machinery more carefully.
1549 More tests. (C<mode> option. _twrite should be tested separately,
1550 because there are a lot of weird special cases lurking in there.)
1552 More tests. (Stuff I didn't think of yet.)
1560 Maybe an autolocking mode?
1564 Record locking with fcntl()? Then you might support an undo log and
1565 get real transactions. What a coup that would be. All would bow