=cut
-my $SHORT = undef; # Debug var - set this to limit the number of lines parsed
+my $SHORTEND = ''; # Debug var - set this to limit the number of lines parsed
my %base_text_index;
-my $edits_required;
+my $edits_required = {};
# edits_required -> wit -> [ { start_idx, end_idx, items } ]
my @base_line_starts = read_base( $base_file, $collation );
my %all_witnesses;
+ my @unwitnessed_lemma_nodes;
foreach my $app ( @app_entries ) {
my( $line, $num ) = split( /\./, $app->{_id} );
# DEBUG with a short graph
- last if $SHORT && $line > $SHORT;
+ last if $SHORTEND && $line > $SHORTEND;
# DEBUG for problematic entries
my $scrutinize = '';
my $first_line_reading = $base_line_starts[ $line ];
# terms of a splice operation, for each subsequent reading.
# We also note which witnesses take the given edit.
- my @lemma_set = $collation->reading_sequence( $lemma_start, $lemma_end );
+ my @lemma_set = $collation->reading_sequence( $lemma_start,
+ $lemma_end );
my @reading_sets = [ @lemma_set ];
# For each reading that is not rdg_0, we create the variant
# reading nodes, and store the range as an edit operation on
# the base text.
my $variant_objects;
- my %pc_lemma; # Keep track of mss that have been corrected back to lemma
- my %pc_variant; # Keep track of mss with other corrections
+ my %pc_seen; # Keep track of mss with explicit post-corr data
foreach my $k ( grep { /^rdg/ } keys( %$app ) ) {
my @mss = grep { $app->{$_} eq $k } keys( %$app );
+
+ # Keep track of lemma nodes that don't actually appear in
+ # any MSS; we will want to remove them from the collation.
+ push( @unwitnessed_lemma_nodes, @lemma_set )
+ if !@mss && $k eq 'rdg_0';
+
# Keep track of what witnesses we have seen.
@all_witnesses{ @mss } = ( 1 ) x scalar( @mss );
- my $pc_hash = $k eq 'rdg_0' ? \%pc_lemma : \%pc_variant;
-
# Keep track of which witnesses bear corrected readings here.
foreach my $m ( @mss ) {
my $base = _is_post_corr( $m );
next unless $base;
- $pc_hash->{$base} = 1;
+ $pc_seen{$base} = 1;
}
next if $k eq 'rdg_0';
+ # Parse the variant into reading tokens.
# TODO don't hardcode the reading split operation
my @variant = split( /\s+/, $app->{$k} );
@variant = () if $app->{$k} eq '/'; # This is an omission.
- # Make the variant into a set of readings.
my @variant_readings;
my $ctr = 0;
foreach my $vw ( @variant ) {
# collated sets. Modifies the reading sets that were passed.
collate_variants( $collation, @reading_sets );
+ # Record any stated relationships between the nodes and the lemma.
+ set_relationships( $collation, $app, \@lemma_set, $variant_objects );
+
# Now create the splice-edit objects that will be used
# to reconstruct each witness.
foreach my $rkey ( keys %$variant_objects ) {
# Object is argument list for splice, so:
# offset, length, replacements
- my $edit_object = [ $base_text_index{$lemma_start->name},
+ my $edit_object = [ $lemma_start->name,
scalar( @lemma_set ),
$variant_objects->{$rkey}->{reading} ];
foreach my $ms ( @{$variant_objects->{$rkey}->{mss}} ) {
_add_hash_entry( $edits_required, $pc_key, $edit_object );
} else { # this is an ante-corr witness
my $pc_key = $ms . "_post";
- _add_hash_entry( $edits_required, $_, $edit_object );
- unless( !$pc_lemma{$ms} && !$pc_variant{$ms} ) {
- # If this witness carries no correction, add this same object
- # to its post-corrected state.
- # TODO combine these hashes?
- _add_hash_entry( $edits_required, $pc_key, $edit_object );
+ _add_hash_entry( $edits_required, $ms, $edit_object );
+ unless( $pc_seen{$ms} ) {
+ # If this witness carries no correction, add this
+ # same object to its post-corrected state.
+ _add_hash_entry( $edits_required, $pc_key,
+ $edit_object );
}
}
}
} # Finished going through the apparatus entries
# Now make the witness objects, and create their text sequences
- foreach my $w ( grep { $_ !~ /_base$/ } keys %$edits_required ) {
+ foreach my $w ( grep { $_ !~ /_post$/ } keys %$edits_required ) {
+ print STDERR "Creating witness $w\n";
my $witness_obj = $collation->tradition->add_witness( sigil => $w );
- my @ante_corr_seq = apply_edits( $edits_required->{$w} );
- my @post_corr_seq = apply_edits( $edits_required->{$w."_post"} )
+ my $debug; # = $w eq 'Vb11';
+ my @ante_corr_seq = apply_edits( $collation, $edits_required->{$w}, $debug );
+ my @post_corr_seq = apply_edits( $collation, $edits_required->{$w."_post"}, $debug )
if exists( $edits_required->{$w."_post"} );
- # Now how to save these paths in my witness object?
+ my @repeated = _check_for_repeated( @ante_corr_seq );
+ warn "Repeated elements @repeated in $w a.c."
+ if @repeated;
+ @repeated = _check_for_repeated( @post_corr_seq );
+ warn "Repeated elements @repeated in $w p.c."
+ if @repeated;
+
+ # Now save these paths in my witness object
if( @post_corr_seq ) {
- $witness_obj->add_path( @post_corr_seq );
- $witness_obj->add_uncorrected_path( @ante_corr_seq );
+ $witness_obj->path( \@post_corr_seq );
+ $witness_obj->uncorrected_path( \@ante_corr_seq );
} else {
- $witness_obj->add_path( @ante_corr_seq );
+ $witness_obj->path( \@ante_corr_seq );
}
}
- # TODO Now remove all the 'base text' links.
+ # Now remove our 'base text' edges, which is to say, the only
+ # ones we have created so far. Also remove any unwitnessed
+ # lemma nodes (TODO unless we are treating base as witness)
+ foreach ( $collation->paths() ) {
+ $collation->del_path( $_ );
+ }
+ foreach( @unwitnessed_lemma_nodes ) {
+ $collation->del_reading( $_ );
+ }
+
+ ### HACKY HACKY Do some one-off path corrections here.
+ if( $collation->linear ) {
+ my $c = $collation;
+ my $end = $SHORTEND ? $SHORTEND : 155;
+ my $path = $c->tradition->witness('Vb11')->path;
+ if( $end > 16 ) {
+ $c->merge_readings( $c->reading('rdg_1/16.3.0'), $c->reading('rdg_1/16.2.1') );
+ splice( @$path, 209, 2, $c->reading( 'rdg_1/16.3.0' ), $c->reading( 'rdg_1/16.2.2' ) );
+ }
+ # What else?
+ } else {
+ my $c = $collation;
+ my $end = $SHORTEND ? $SHORTEND : 155;
+ # Vb5:
+ my $path = $c->tradition->witness('Vb5')->path;
+ splice( @$path, 1436, 0, $c->reading('106,14') ) if $end > 106;
+ # Vb11:
+ $path = $c->tradition->witness('Vb11')->path;
+ if( $end > 16 ) {
+ $c->merge_readings( $c->reading('rdg_1/16.3.0'), $c->reading('rdg_1/16.2.1') );
+ splice( @$path, 209, 2, $c->reading( 'rdg_1/16.3.0' ), $c->reading( '16,1' ) );
+ }
+ # Vb12 a.c.:
+ $path = $c->tradition->witness('Vb12')->uncorrected_path;
+ splice( @$path, 1828, 1, $c->reading('rdg_2/137.5.0') ) if $end > 137;
+ # Vb13:
+ $path = $c->tradition->witness('Vb13')->path;
+ splice( @$path, 782, 0, $c->reading( '58,5' ) ) if $end > 58;
+ # Vb20 a.c.:
+ $path = $c->tradition->witness('Vb20')->uncorrected_path;
+ splice( @$path, 1251, 1, $c->reading( '94,6' ) ) if $end > 94;
+ # Vb26:
+ $path = $c->tradition->witness('Vb26')->path;
+ splice( @$path, 618, 0, $c->reading('46,2') ) if $end > 46;
+ }
# Now walk paths and calculate positions.
my @common_readings =
- $collation->walk_and_expand_base( $collation->reading( '#END#' ) );
+ $collation->make_witness_paths();
$collation->calculate_positions( @common_readings );
}
+sub _check_for_repeated {
+ my @seq = @_;
+ my %unique;
+ my @repeated;
+ foreach ( @seq ) {
+ if( exists $unique{$_->name} ) {
+ push( @repeated, $_->name );
+ } else {
+ $unique{$_->name} = 1;
+ }
+ }
+ return @repeated;
+}
+
=item B<read_base>
my @line_beginnings = read_base( 'reference.txt', $collation );
# This array gives the first reading for each line. We put the
# common starting point in line zero.
my $last_reading = $collation->start();
+ $base_text_index{$last_reading->name} = 0;
my $lineref_array = [ $last_reading ]; # There is no line zero.
open( BASE, $base_file ) or die "Could not open file $base_file: $!";
- my $i = 0;
+ my $i = 1;
while(<BASE>) {
# Make the readings, and connect them up for the base, but
# also save the first reading of each line in an array for the
my $started = 0;
my $wordref = 0;
my $lineref = scalar @$lineref_array;
- last if $SHORT && $lineref > $SHORT;
+ last if $SHORTEND && $lineref > $SHORTEND;
foreach my $w ( @words ) {
my $readingref = join( ',', $lineref, ++$wordref );
my $reading = $collation->add_reading( $readingref );
my $endpoint = $collation->add_reading( '#END#' );
$collation->add_path( $last_reading, $endpoint, $collation->baselabel );
push( @$lineref_array, $endpoint );
+ $base_text_index{$endpoint->name} = $i;
return( @$lineref_array );
}
sub collate_variants {
my( $collation, @reading_sets ) = @_;
- # my $detranspose = 1; # TODO handle merging transposed nodes
- # Merge the nodes across the sets so that there is only one node
- # for any given reading. Use diff to identify the 'same' nodes.
+ # Two different ways to do this, depending on whether we want
+ # transposed reading nodes to be merged into one (producing a
+ # nonlinear, bidirectional graph) or not (producing a relatively
+ # linear, unidirectional graph.)
+ return $collation->linear ? collate_linearly( @_ )
+ : collate_nonlinearly( @_ );
+}
- my $lemma_set = shift @reading_sets;
+sub collate_linearly {
+ my( $collation, $lemma_set, @variant_sets ) = @_;
my @unique;
push( @unique, @$lemma_set );
-
- while( @reading_sets ) {
- my $variant_set = shift @reading_sets;
- my $diff = Algorithm::Diff->new( \@unique, $variant_set, \&_collation_hash );
+ while( @variant_sets ) {
+ my $variant_set = shift @variant_sets;
+ # Use diff to do this job
+ my $diff = Algorithm::Diff->new( \@unique, $variant_set,
+ {'keyGen' => \&_collation_hash} );
my @new_unique;
- push( @new_unique, @unique );
+ my %merged;
while( $diff->Next ) {
if( $diff->Same ) {
# merge the nodes
my @l = $diff->Items( 1 );
my @v = $diff->Items( 2 );
foreach my $i ( 0 .. $#l ) {
- $collation->merge_readings( $l[$i], $v[$i] );
+ if( !$merged{$l[$i]->name} ) {
+ $collation->merge_readings( $l[$i], $v[$i] );
+ $merged{$l[$i]->name} = 1;
+ } else {
+ print STDERR "Would have double merged " . $l[$i]->name . "\n";
+ }
}
# splice the lemma nodes into the variant set
- splice( @$variant_set, $diff->Get( 'min2' ), scalar( @l ), @l );
+ my( $offset ) = $diff->Get( 'min2' );
+ splice( @$variant_set, $offset, scalar( @l ), @l );
push( @new_unique, @l );
} else {
# Keep the old unique readings
}
@unique = @new_unique;
}
+}
- return;
+sub collate_nonlinearly {
+ my( $collation, $lemma_set, @variant_sets ) = @_;
+
+ my @unique;
+ push( @unique, @$lemma_set );
+ while( @variant_sets ) {
+ my $variant_set = shift @variant_sets;
+ # Simply match the first reading that carries the same word, so
+ # long as that reading has not yet been used to match another
+ # word in this variant. That way lies loopy madness.
+ my @distinct;
+ my %merged;
+ foreach my $idx ( 0 .. $#{$variant_set} ) {
+ my $vw = $variant_set->[$idx];
+ my @same = grep { cmp_str( $_ ) eq $vw->label } @unique;
+ my $matched;
+ if( @same ) {
+ foreach my $i ( 0 .. $#same ) {
+ unless( $merged{$same[$i]->name} ) {
+ #print STDERR sprintf( "Merging %s into %s\n",
+ # $vw->name,
+ # $same[$i]->name );
+ $collation->merge_readings( $same[$i], $vw );
+ $merged{$same[$i]->name} = 1;
+ $matched = $i;
+ $variant_set->[$idx] = $same[$i];
+ }
+ }
+ }
+ unless( @same && defined($matched) ) {
+ push( @distinct, $vw );
+ }
+ }
+ push( @unique, @distinct );
+ }
}
+
sub _collation_hash {
my $node = shift;
- return _cmp_str( $node->label );
+ return cmp_str( $node );
}
-sub apply_edits {
- my $edit_sequence = shift;
- my @lemma_text = map { $base_text_index{$_} } sort( keys %base_text_index );
+sub set_relationships {
+ my( $collation, $app, $lemma, $variants ) = @_;
+ foreach my $rkey ( keys %$variants ) {
+ my $var = $variants->{$rkey}->{'reading'};
+ my $type = $app->{sprintf( "_%s_type", $rkey )};
+ my $noncorr = $app->{sprintf( "_%s_non_corr", $rkey )};
+ my $nonindep = $app->{sprintf( "_%s_non_indep", $rkey )};
+
+ my %rel_options = ();
+ $rel_options{'non_correctable'} = $noncorr if $noncorr && $noncorr =~ /^\d$/;
+ $rel_options{'non_indep'} = $nonindep if $nonindep && $nonindep =~ /^\d$/;
+
+ if( $type =~ /^(inv|tr|rep)$/i ) {
+ # Transposition or repetition: look for nodes with the
+ # same label but different IDs and mark them.
+ $type = 'repetition' if $type =~ /^rep/i;
+ $rel_options{'type'} = $type;
+ my %labels;
+ foreach my $r ( @$lemma ) {
+ $labels{cmp_str( $r )} = $r;
+ }
+ foreach my $r( @$var ) {
+ if( exists $labels{$r->label} &&
+ $r->name ne $labels{$r->label}->name ) {
+ if( $type eq 'repetition' ) {
+ # Repetition
+ $collation->add_relationship( $r, $labels{$r->label}, \%rel_options );
+ } else {
+ # Transposition
+ $r->set_identical( $labels{$r->label} );
+ }
+ }
+ }
+ } elsif( $type =~ /^(gr|lex|sp(el)?)$/i ) {
+
+ # Grammar/spelling/lexical: this can be a one-to-one or
+ # one-to-many mapping. We should think about merging
+ # readings if it is one-to-many.
+
+ $type = 'grammatical' if $type =~ /gr/i;
+ $type = 'spelling' if $type =~ /sp/i;
+ $type = 'repetition' if $type =~ /rep/i;
+ $type = 'lexical' if $type =~ /lex/i;
+ $rel_options{'type'} = $type;
+ if( @$lemma == @$var ) {
+ foreach my $i ( 0 .. $#{$lemma} ) {
+ $collation->add_relationship( $var->[$i], $lemma->[$i],
+ \%rel_options );
+ }
+ } else {
+ # An uneven many-to-many mapping. Make a segment out of
+ # whatever we have.
+ my $lemseg = @$lemma > 1 ? $collation->add_segment( @$lemma ) : $lemma->[0];
+ my $varseg = @$var > 1 ? $collation->add_segment( @$var ) : $var->[0];
+ $collation->add_relationship( $varseg, $lemseg, \%rel_options );
+ }
+ } elsif( $type !~ /^(add|om)$/i ) {
+ warn "Unrecognized type $type";
+ }
+ }
+}
+
+
+sub apply_edits {
+ my( $collation, $edit_sequence, $debug ) = @_;
+ my @lemma_text = $collation->reading_sequence( $collation->start,
+ $collation->reading( '#END#' ) );
my $drift = 0;
foreach my $correction ( @$edit_sequence ) {
- my( $offset, $length, $items ) = @$correction;
+ my( $lemma_start, $length, $items ) = @$correction;
+ my $offset = $base_text_index{$lemma_start};
my $realoffset = $offset + $drift;
+ if( $debug ||
+ $lemma_text[$realoffset]->name ne $lemma_start ) {
+ my @this_phrase = @lemma_text[$realoffset..$realoffset+$length-1];
+ my @base_phrase;
+ my $i = $realoffset;
+ my $l = $collation->reading( $lemma_start );
+ while( $i < $realoffset+$length ) {
+ push( @base_phrase, $l );
+ $l = $collation->next_reading( $l );
+ $i++;
+ }
+
+ print STDERR sprintf( "Trying to replace %s (%s) starting at %d " .
+ "with %s (%s) with drift %d\n",
+ join( ' ', map {$_->label} @base_phrase ),
+ join( ' ', map {$_->name} @base_phrase ),
+ $realoffset,
+ join( ' ', map {$_->label} @$items ),
+ join( ' ', map {$_->name} @$items ),
+ $drift,
+ ) if $debug;
+
+ if( $lemma_text[$realoffset]->name ne $lemma_start ) {
+ warn( sprintf( "Should be replacing %s (%s) with %s (%s) " .
+ "but %s (%s) is there instead",
+ join( ' ', map {$_->label} @base_phrase ),
+ join( ' ', map {$_->name} @base_phrase ),
+ join( ' ', map {$_->label} @$items ),
+ join( ' ', map {$_->name} @$items ),
+ join( ' ', map {$_->label} @this_phrase ),
+ join( ' ', map {$_->name} @this_phrase ),
+ ) );
+ # next;
+ }
+ }
splice( @lemma_text, $realoffset, $length, @$items );
$drift += @$items - $length;
}
- return \@lemma_text;
+ return @lemma_text;
}
-
+
# Helper function. Given a witness sigil, if it is a post-correctione
# sigil,return the base witness. If not, return a false value.
return undef;
}
+sub _add_hash_entry {
+ my( $hash, $key, $entry ) = @_;
+ if( exists $hash->{$key} ) {
+ push( @{$hash->{$key}}, $entry );
+ } else {
+ $hash->{$key} = [ $entry ];
+ }
+}
+
+
=item B<cmp_str>
Pretend you never saw this method. Really it needs to not be hardcoded.