# -*- Mode: cperl; cperl-indent-level: 4 -*-
-# $Id: Harness.pm,v 1.14.2.18 2002/04/25 05:04:35 schwern Exp $
+# $Id: Harness.pm,v 1.38 2002/06/19 21:01:01 schwern Exp $
package Test::Harness;
use strict;
use vars qw($VERSION $Verbose $Switches $Have_Devel_Corestack $Curtest
- $Columns $verbose $switches
+ $Columns $verbose $switches $ML $Strap
@ISA @EXPORT @EXPORT_OK
);
$Have_Devel_Corestack = 0;
-$VERSION = '2.03';
+$VERSION = '2.26';
$ENV{HARNESS_ACTIVE} = 1;
my $Files_In_Dir = $ENV{HARNESS_FILELEAK_IN_DIR};
-my $Strap = Test::Harness::Straps->new;
+$Strap = Test::Harness::Straps->new;
@ISA = ('Exporter');
@EXPORT = qw(&runtests);
out each individual test which failed along with a summary report and
a how long it all took.
-It returns true if everything was ok, false otherwise.
+It returns true if everything was ok. Otherwise it will die() with
+one of the messages in the DIAGNOSTICS section.
=for _private
This is just _run_all_tests() plus _show_results()
=cut
+#'#
sub _run_all_tests {
my(@tests) = @_;
local($|) = 1;
bench => 0,
);
- local($ENV{'PERL5LIB'}) = $Strap->_INC2PERL5LIB;
-
my @dir_files = _globdir $Files_In_Dir if defined $Files_In_Dir;
my $t_start = new Benchmark;
my $width = _leader_width(@tests);
foreach my $tfile (@tests) {
- $Strap->_reset_file_state;
my($leader, $ml) = _mk_leader($tfile, $width);
+ local $ML = $ml;
print $leader;
- my $fh = _open_test($tfile);
-
$tot{files}++;
+ $Strap->{_seen_header} = 0;
+ my %results = $Strap->analyze_file($tfile) or
+ do { warn "$Strap->{error}\n"; next };
+
# state of the current test.
+ my @failed = grep { !$results{details}[$_-1]{ok} }
+ 1..@{$results{details}};
my %test = (
- ok => 0,
- 'next' => 0,
- max => 0,
- failed => [],
- todo => {},
- bonus => 0,
- skipped => 0,
- skip_reason => undef,
+ ok => $results{ok},
+ 'next' => $Strap->{'next'},
+ max => $results{max},
+ failed => \@failed,
+ bonus => $results{bonus},
+ skipped => $results{skip},
+ skip_reason => $Strap->{_skip_reason},
+ skip_all => $Strap->{skip_all},
ml => $ml,
);
- my($seen_header, $tests_seen) = (0,0);
- while (<$fh>) {
- print if $Verbose;
-
- $Strap->{line}++;
- if( _parse_header($_, \%test, \%tot) ) {
- warn "Test header seen twice!\n" if $seen_header;
+ $tot{bonus} += $results{bonus};
+ $tot{max} += $results{max};
+ $tot{ok} += $results{ok};
+ $tot{todo} += $results{todo};
+ $tot{sub_skipped} += $results{skip};
- $seen_header = 1;
-
- warn "1..M can only appear at the beginning or end of tests\n"
- if $tests_seen && $test{max} < $tests_seen;
- }
- elsif( _parse_test_line($_, \%test, \%tot) ) {
- $tests_seen++;
- }
- # else, ignore it.
- }
-
- my($estatus, $wstatus) = _close_fh($fh);
-
- my $allok = $test{ok} == $test{max} && $test{'next'} == $test{max}+1;
+ my($estatus, $wstatus) = @results{qw(exit wait)};
if ($wstatus) {
$failedtests{$tfile} = _dubious_return(\%test, \%tot,
$estatus, $wstatus);
$failedtests{$tfile}{name} = $tfile;
}
- elsif ($allok) {
+ elsif ($results{passing}) {
if ($test{max} and $test{skipped} + $test{bonus}) {
my @msg;
push(@msg, "$test{skipped}/$test{max} skipped: $test{skip_reason}")
if $test{skipped};
push(@msg, "$test{bonus}/$test{max} unexpectedly succeeded")
if $test{bonus};
- print "$test{ml}ok, ".join(', ', @msg)."\n";
+ print "$test{ml}ok\n ".join(', ', @msg)."\n";
} elsif ($test{max}) {
print "$test{ml}ok\n";
- } elsif (defined $test{skip_reason}) {
- print "skipped: $test{skip_reason}\n";
+ } elsif (defined $test{skip_all} and length $test{skip_all}) {
+ print "skipped\n all skipped: $test{skip_all}\n";
$tot{skipped}++;
} else {
- print "skipped test on this platform\n";
+ print "skipped\n all skipped: no reason given\n";
$tot{skipped}++;
}
$tot{good}++;
}
}
- $tot{sub_skipped} += $test{skipped};
-
if (defined $Files_In_Dir) {
my @new_dir_files = _globdir $Files_In_Dir;
if (@new_dir_files != @dir_files) {
@dir_files = @new_dir_files;
}
}
-
- close $fh;
}
$tot{bench} = timediff(new Benchmark, $t_start);
}
-sub _parse_header {
- my($line, $test, $tot) = @_;
+my %Handlers = ();
+$Strap->{callback} = sub {
+ my($self, $line, $type, $totals) = @_;
+ print $line if $Verbose;
- my $is_header = 0;
+ my $meth = $Handlers{$type};
+ $meth->($self, $line, $type, $totals) if $meth;
+};
- if( $Strap->_is_header($line) ) {
- $is_header = 1;
- $test->{max} = $Strap->{max};
- for ( keys %{$Strap->{todo}} ) { $test->{todo}{$_} = 1; }
+$Handlers{header} = sub {
+ my($self, $line, $type, $totals) = @_;
- $test->{skip_reason} = $Strap->{skip_all}
- if not $test->{max} and defined $Strap->{skip_all};
+ warn "Test header seen more than once!\n" if $self->{_seen_header};
- $test->{'next'} = 1 unless $test->{'next'};
+ $self->{_seen_header}++;
+ warn "1..M can only appear at the beginning or end of tests\n"
+ if $totals->{seen} &&
+ $totals->{max} < $totals->{seen};
+};
- $tot->{max} += $test->{max};
- }
- else {
- $is_header = 0;
- }
+$Handlers{test} = sub {
+ my($self, $line, $type, $totals) = @_;
- return $is_header;
-}
-
-
-sub _open_test {
- my($test) = shift;
-
- my $s = _set_switches($test);
+ my $curr = $totals->{seen};
+ my $next = $self->{'next'};
+ my $max = $totals->{max};
+ my $detail = $totals->{details}[-1];
- my $perl = -x $^X ? $^X : $Config{perlpath};
+ if( $detail->{ok} ) {
+ _print_ml("ok $curr/$max");
- # XXX This is WAY too core specific!
- my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
- ? "./perl -I../lib ../utils/perlcc $test "
- . "-r 2>> ./compilelog |"
- : "$perl $s $test|";
- $cmd = "MCR $cmd" if $^O eq 'VMS';
-
- if( open(PERL, $cmd) ) {
- return \*PERL;
+ if( $detail->{type} eq 'skip' ) {
+ $self->{_skip_reason} = $detail->{reason}
+ unless defined $self->{_skip_reason};
+ $self->{_skip_reason} = 'various reasons'
+ if $self->{_skip_reason} ne $detail->{reason};
+ }
}
else {
- print "can't run $test. $!\n";
- return;
+ _print_ml("NOK $curr");
}
-}
+ if( $curr > $next ) {
+ print "Test output counter mismatch [test $curr]\n";
+ }
+ elsif( $curr < $next ) {
+ print "Confused test output: test $curr answered after ".
+ "test ", $next - 1, "\n";
+ }
-sub _parse_test_line {
- my($line, $test, $tot) = @_;
-
- my %result;
- if ( $Strap->_is_test($line, \%result) ) {
- $test->{'next'} ||= 1;
- my $this = $test->{'next'};
-
- my($not, $tnum) = (!$result{ok}, $result{number});
-
- $this = $tnum if $tnum;
-
- my($type, $reason) = ($result{type}, $result{reason});
-
- my($istodo, $isskip);
- if( defined $type ) {
- $istodo = 1 if $type eq 'todo';
- $isskip = 1 if $type eq 'skip';
- }
-
- $test->{todo}{$this} = 1 if $istodo;
- if( $test->{todo}{$this} ) {
- $tot->{todo}++;
- $test->{bonus}++, $tot->{bonus}++ unless $not;
- }
+};
- if( $not && !$test->{todo}{$this} ) {
- print "$test->{ml}NOK $this" if $test->{ml};
- push @{$test->{failed}}, $this;
- }
- else {
- print "$test->{ml}ok $this/$test->{max}" if $test->{ml};
- $test->{ok}++;
- $tot->{ok}++;
- $test->{skipped}++ if $isskip;
-
- $reason = '[no reason given]'
- if $isskip and not defined $reason;
- if (defined $reason and defined $test->{skip_reason}) {
- # print "was: '$skip_reason' new '$reason'\n";
- $test->{skip_reason} = 'various reasons'
- if $test->{skip_reason} ne $reason;
- } elsif (defined $reason) {
- $test->{skip_reason} = $reason;
- }
- }
+$Handlers{bailout} = sub {
+ my($self, $line, $type, $totals) = @_;
- if ($this > $test->{'next'}) {
- print "Test output counter mismatch [test $this]\n";
+ die "FAILED--Further testing stopped" .
+ ($self->{bailout_reason} ? ": $self->{bailout_reason}\n" : ".\n");
+};
- # Guard against resource starvation.
- if( $this > 100000 ) {
- print "Enourmous test number seen [test $this]\n";
- }
- else {
- push @{$test->{failed}}, $test->{'next'}..$this-1;
- }
- }
- elsif ($this < $test->{'next'}) {
- #we have seen more "ok" lines than the number suggests
- print "Confused test output: test $this answered after ".
- "test ", $test->{'next'}-1, "\n";
- $test->{'next'} = $this;
- }
- $test->{'next'} = $this + 1;
- }
- else {
- my $bail_reason;
- if( $Strap->_is_bail_out($line, \$bail_reason) ) { # bail out!
- die "FAILED--Further testing stopped" .
- ($bail_reason ? ": $bail_reason\n" : ".\n");
- }
- }
+sub _print_ml {
+ print join '', $ML, @_ if $ML;
}
return $bonusmsg;
}
-# VMS has some subtle nastiness with closing the test files.
-sub _close_fh {
- my($fh) = shift;
-
- close($fh); # must close to reap child resource values
-
- my $wstatus = $Ignore_Exitcode ? 0 : $?; # Can trust $? ?
- my $estatus;
- $estatus = ($^O eq 'VMS'
- ? eval 'use vmsish "status"; $estatus = $?'
- : $wstatus >> 8);
-
- return($estatus, $wstatus);
-}
-
-
-# Set up the command-line switches to run perl as.
-sub _set_switches {
- my($test) = shift;
-
- my $s = $Switches;
- $s .= $Strap->_switches($test);
-
- return $s;
-}
-
-
# Test program go boom.
sub _dubious_return {
my($test, $tot, $estatus, $wstatus) = @_;
}
-sub _garbled_output {
- my($gibberish) = shift;
- warn "Confusing test output: '$gibberish'\n";
-}
-
-
sub _create_fmts {
my($failedtests) = @_;
my $ender = 's' x ($skipped > 1);
my $good = $max - $failed - $skipped;
my $goodper = sprintf("%.2f",100*($good/$max));
- push @result, " (-$skipped skipped test$ender: $good okay, ".
+ push @result, " (less $skipped skipped test$ender: $good okay, ".
"$goodper%)"
if $skipped;
push @result, "\n";
If not all tests were successful, the script dies with one of the
above messages.
-=item C<FAILED--Further testing stopped%s>
+=item C<FAILED--Further testing stopped: %s>
If a single subtest decides that further testing will not make sense,
the script dies with this message.
Figure a way to report test names in the failure summary.
Rework the test summary so long test names are not truncated as badly.
+(Partially done with new skip test styles)
Deal with VMS's "not \nok 4\n" mistake.