continued doc and testing drive; rationalize GraphML a little
[scpubgit/stemmatology.git] / lib / Text / Tradition / Parser / Util.pm
CommitLineData
910a0a6d 1package Text::Tradition::Parser::Util;
2
3use strict;
4use warnings;
5use Algorithm::Diff;
6use Exporter 'import';
7use vars qw/ @EXPORT_OK /;
8@EXPORT_OK = qw/ add_hash_entry check_for_repeated cmp_str collate_variants is_monotonic /;
9
25331c49 10=head1 NAME
910a0a6d 11
25331c49 12Text::Tradition::Parser::Util
13
14=head1 DESCRIPTION
15
16A collection of utilities used by multiple Text::Tradition parsers.
17Probably not of external interest.
18
19=head1 METHODS
20
21=head2 B<collate_variants>( $collation, @reading_ranges )
910a0a6d 22
23Given a set of readings in the form
24( lemma_start, lemma_end, rdg1_start, rdg1_end, ... )
25walks through each to identify those readings that are identical. The
26collation is a Text::Tradition::Collation object; the elements of
27@readings are Text::Tradition::Collation::Reading objects that appear
28on the collation graph.
29
910a0a6d 30=cut
31
32sub collate_variants {
33 my( $collation, @reading_sets ) = @_;
34
35 # Two different ways to do this, depending on whether we want
36 # transposed reading nodes to be merged into one (producing a
37 # nonlinear, bidirectional graph) or not (producing a relatively
38 # linear, unidirectional graph.)
39 return $collation->linear ? collate_linearly( @_ )
40 : collate_nonlinearly( @_ );
41}
42
43sub collate_linearly {
44 my( $collation, $lemma_set, @variant_sets ) = @_;
45
46 my @unique;
47 my $substitutions = {};
48 push( @unique, @$lemma_set );
49 while( @variant_sets ) {
50 my $variant_set = shift @variant_sets;
51 # Use diff to do this job
52 my $diff = Algorithm::Diff->new( \@unique, $variant_set,
53 {'keyGen' => \&_collation_hash} );
54 my @new_unique;
55 my %merged;
56 while( $diff->Next ) {
57 if( $diff->Same ) {
58 # merge the nodes
59 my @l = $diff->Items( 1 );
60 my @v = $diff->Items( 2 );
61 foreach my $i ( 0 .. $#l ) {
62 if( !$merged{$l[$i]->name} ) {
63 print STDERR sprintf( "Merging %s into %s\n",
64 $v[$i]->name,
65 $l[$i]->name );
66 $collation->merge_readings( $l[$i], $v[$i] );
67 $merged{$l[$i]->name} = 1;
68 $substitutions->{$v[$i]->name} = $l[$i];
69 } else {
70 print STDERR "Would have double merged " . $l[$i]->name . "\n";
71 }
72 }
73 # splice the lemma nodes into the variant set
74 my( $offset ) = $diff->Get( 'min2' );
75 splice( @$variant_set, $offset, scalar( @l ), @l );
76 push( @new_unique, @l );
77 } else {
78 # Keep the old unique readings
79 push( @new_unique, $diff->Items( 1 ) ) if $diff->Items( 1 );
80 # Add the new readings to the 'unique' list
81 push( @new_unique, $diff->Items( 2 ) ) if $diff->Items( 2 );
82 }
83 }
84 @unique = @new_unique;
85 }
86 return $substitutions;
87}
88
89sub collate_nonlinearly {
90 my( $collation, $lemma_set, @variant_sets ) = @_;
91
92 my @unique;
93 my $substitutions = {};
94 push( @unique, @$lemma_set );
95 while( @variant_sets ) {
96 my $variant_set = shift @variant_sets;
97 # Simply match the first reading that carries the same word, so
98 # long as that reading has not yet been used to match another
99 # word in this variant. That way lies loopy madness.
100 my @distinct;
101 my %merged;
102 foreach my $idx ( 0 .. $#{$variant_set} ) {
103 my $vw = $variant_set->[$idx];
104 my @same = grep { cmp_str( $_ ) eq $vw->label } @unique;
105 my $matched;
106 if( @same ) {
107 foreach my $i ( 0 .. $#same ) {
108 unless( $merged{$same[$i]->name} ) {
109 #print STDERR sprintf( "Merging %s into %s\n",
110 # $vw->name,
111 # $same[$i]->name );
112 $collation->merge_readings( $same[$i], $vw );
113 $merged{$same[$i]->name} = 1;
114 $matched = $i;
115 $variant_set->[$idx] = $same[$i];
116 $substitutions->{$vw->name} = $same[$i];
117 }
118 }
119 }
120 unless( @same && defined($matched) ) {
121 push( @distinct, $vw );
122 }
123 }
124 push( @unique, @distinct );
125 }
126 return $substitutions;
127}
128
129sub _collation_hash {
130 my $node = shift;
131 return cmp_str( $node );
132}
133
910a0a6d 134sub cmp_str {
135 my( $reading ) = @_;
136 my $word = $reading->label();
137 $word = lc( $word );
138 $word =~ s/\W//g;
139 $word =~ s/v/u/g;
140 $word =~ s/j/i/g;
141 $word =~ s/cha/ca/g;
142 $word =~ s/quatuor/quattuor/g;
143 $word =~ s/ioannes/iohannes/g;
144 return $word;
145}
146
25331c49 147=head2 B<check_for_repeated>( @readings )
910a0a6d 148
149Given an array of items, returns any items that appear in the array more
150than once.
151
152=cut
153
154sub check_for_repeated {
155 my @seq = @_;
156 my %unique;
157 my @repeated;
158 foreach ( @seq ) {
159 if( exists $unique{$_->name} ) {
160 push( @repeated, $_->name );
161 } else {
162 $unique{$_->name} = 1;
163 }
164 }
165 return @repeated;
166}
167
168sub add_hash_entry {
169 my( $hash, $key, $entry ) = @_;
170 if( exists $hash->{$key} ) {
171 push( @{$hash->{$key}}, $entry );
172 } else {
173 $hash->{$key} = [ $entry ];
174 }
175}
176
177sub is_monotonic {
178 my( @readings ) = @_;
179 my( $common, $min, $max ) = ( -1, -1, -1 );
180 foreach my $rdg ( @readings ) {
181# print STDERR "Checking reading " . $rdg->name . "/" . $rdg->text . " - "
182# . $rdg->position->reference ."\n";
183 return 0 if $rdg->position->common < $common;
184 if( $rdg->position->common == $common ) {
185 return 0 if $rdg->position->min <= $min;
186 return 0 if $rdg->position->max <= $max;
187 }
188 $common = $rdg->position->common;
189 $min = $rdg->position->min;
190 $max = $rdg->position->max;
191 }
192 return 1;
25331c49 193}
194
1951;
196
197=head1 BUGS / TODO
198
199=over
200
201=item * Get rid of abomination that is cmp_str.
202
203=back
204
205=head1 LICENSE
206
207This package is free software and is provided "as is" without express
208or implied warranty. You can redistribute it and/or modify it under
209the same terms as Perl itself.
210
211=head1 AUTHOR
212
213Tara L Andrews E<lt>aurum@cpan.orgE<gt>