use Config;
use strict;
-our($VERSION, $verbose, $switches, $have_devel_corestack, $curtest,
- $columns, @ISA, @EXPORT, @EXPORT_OK);
-$have_devel_corestack = 0;
+our($VERSION, $Verbose, $Switches, $Have_Devel_Corestack, $Curtest,
+ $Columns, $verbose, $switches,
+ @ISA, @EXPORT, @EXPORT_OK
+ );
-$VERSION = "1.1607";
+# Backwards compatibility for exportable variable names.
+*verbose = \$Verbose;
+*switches = \$Switches;
+
+$Have_Devel_Corestack = 0;
+
+$VERSION = "1.1702";
$ENV{HARNESS_ACTIVE} = 1;
# Some experimental versions of OS/2 build have broken $?
-my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
+my $Ignore_Exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
+
+my $Files_In_Dir = $ENV{HARNESS_FILELEAK_IN_DIR};
-my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
-my $tests_skipped = 0;
-my $subtests_skipped = 0;
+@ISA = ('Exporter');
+@EXPORT = qw(&runtests);
+@EXPORT_OK = qw($verbose $switches);
-@ISA=('Exporter');
-@EXPORT= qw(&runtests);
-@EXPORT_OK= qw($verbose $switches);
+$Verbose = 0;
+$Switches = "-w";
+$Columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
-$verbose = 0;
-$switches = "-w";
-$columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
sub runtests {
my(@tests) = @_;
+
+ my($tot, $failedtests) = _runtests(@tests);
+ _show_results($tot, $failedtests);
+
+ return ($tot->{bad} == 0 && $tot->{max}) ;
+}
+
+
+sub _runtests {
+ my(@tests) = @_;
local($|) = 1;
- my($test,$te,$ok,$next,$max,$pct,$totbonus,@failed,%failedtests);
- my $totmax = 0;
- my $totok = 0;
- my $files = 0;
- my $bad = 0;
- my $good = 0;
- my $total = @tests;
+ my(%failedtests);
+
+ # Test-wide totals.
+ my(%tot) = (
+ bonus => 0,
+ max => 0,
+ ok => 0,
+ files => 0,
+ bad => 0,
+ good => 0,
+ tests => scalar @tests,
+ sub_skipped => 0,
+ skipped => 0,
+ bench => 0
+ );
# pass -I flags to children
my $old5lib = $ENV{PERL5LIB};
my $new5lib;
if ($^O eq 'VMS') {
$new5lib = join($Config{path_sep}, grep {!/perl_root/i;} @INC);
- $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g;
+ $Switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g;
}
else {
$new5lib = join($Config{path_sep}, @INC);
local($ENV{'PERL5LIB'}) = $new5lib;
- my @dir_files = globdir $files_in_dir if defined $files_in_dir;
+ my @dir_files = globdir $Files_In_Dir if defined $Files_In_Dir;
my $t_start = new Benchmark;
- while ($test = shift(@tests)) {
- $te = $test;
- chop($te);
+
+ foreach my $test (@tests) {
+ my $te = $test;
+ chop($te); # XXX chomp?
+
if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./s; }
my $blank = (' ' x 77);
my $leader = "$te" . '.' x (20 - length($te));
my $ml = "";
$ml = "\r$blank\r$leader"
- if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $verbose;
+ if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $Verbose;
print $leader;
- open(my $fh, $test) or print "can't open $test. $!\n";
- my $first = <$fh>;
- my $s = $switches;
- $s .= " $ENV{'HARNESS_PERL_SWITCHES'}"
- if exists $ENV{'HARNESS_PERL_SWITCHES'};
- $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
- if $first =~ /^#!.*\bperl.*-\w*T/;
- close($fh) or print "can't close $test. $!\n";
+
+ my $s = _set_switches($test);
+
my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
? "./perl -I../lib ../utils/perlcc $test "
. "-run 2>> ./compilelog |"
: "$^X $s $test|";
$cmd = "MCR $cmd" if $^O eq 'VMS';
- open($fh, $cmd) or print "can't run $test. $!\n";
- $ok = $next = $max = 0;
- @failed = ();
- my %todo = ();
- my $bonus = 0;
- my $skipped = 0;
- my $skip_reason;
+ open(my $fh, $cmd) or print "can't run $test. $!\n";
+
+ # state of the current test.
+ my %test = (
+ ok => 0,
+ next => 0,
+ max => 0,
+ failed => [],
+ todo => {},
+ bonus => 0,
+ skipped => 0,
+ skip_reason => undef,
+ ml => $ml,
+ );
+
+ my($seen_header, $tests_seen) = (0,0);
while (<$fh>) {
- if( $verbose ){
- print $_;
- }
- if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
- $max = $1;
- for (split(/\s+/, $2)) { $todo{$_} = 1; }
- $totmax += $max;
- $files++;
- $next = 1;
- } elsif (/^1\.\.([0-9]+)(\s*\#\s*[Ss]kip\S*(?>\s+)(.+))?/) {
- $max = $1;
- $totmax += $max;
- $files++;
- $next = 1;
- $skip_reason = $3 if not $max and defined $3;
- } elsif ($max && /^(not\s+)?ok\b/) {
- my $this = $next;
- if (/^not ok\s*(\d*)/){
- $this = $1 if $1 > 0;
- print "${ml}NOK $this" if $ml;
- if (!$todo{$this}) {
- push @failed, $this;
- } else {
- $ok++;
- $totok++;
- }
- } elsif (/^ok\s*(\d*) *(\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?$/) {
- $this = $1 if $1 > 0;
- print "${ml}ok $this/$max" if $ml;
- $ok++;
- $totok++;
- $skipped++ if defined $2;
- my $reason;
- $reason = 'unknown reason' if defined $2;
- $reason = $3 if defined $3;
- if (defined $reason and defined $skip_reason) {
- # print "was: '$skip_reason' new '$reason'\n";
- $skip_reason = 'various reasons'
- if $skip_reason ne $reason;
- } elsif (defined $reason) {
- $skip_reason = $reason;
- }
- $bonus++, $totbonus++ if $todo{$this};
- } elsif (/^ok\s*(\d*)\s*\#([^\r]*)$/) {
- $this = $1 if $1 > 0;
- print "${ml}ok $this/$max" if $ml;
- $ok++;
- $totok++;
- } else {
- # an ok or not ok not matching the 3 cases above...
- # just ignore it for compatibility with TEST
- next;
- }
- if ($this > $next) {
- # print "Test output counter mismatch [test $this]\n";
- # no need to warn probably
- push @failed, $next..$this-1;
- } elsif ($this < $next) {
- #we have seen more "ok" lines than the number suggests
- print "Confused test output: test $this answered after test ", $next-1, "\n";
- $next = $this;
- }
- $next = $this + 1;
- } elsif (/^Bail out!\s*(.*)/i) { # magic words
- die "FAILED--Further testing stopped" . ($1 ? ": $1\n" : ".\n");
+ if( _parse_header($_, \%test, \%tot) ) {
+ warn "Test header seen twice!\n" if $seen_header;
+
+ $seen_header = 1;
+
+ warn "1..M can only appear at the beginning or end of tests\n"
+ if $tests_seen && $test{max} < $tests_seen;
+ }
+ elsif( _parse_test_line($_, \%test, \%tot) ) {
+ $tests_seen++;
}
+ # else, ignore it.
}
- close($fh); # must close to reap child resource values
- my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
- my $estatus;
- $estatus = ($^O eq 'VMS'
- ? eval 'use vmsish "status"; $estatus = $?'
- : $wstatus >> 8);
+
+ my($estatus, $wstatus) = _close_fh($fh);
+
if ($wstatus) {
- my ($failed, $canon, $percent) = ('??', '??');
- printf "${ml}dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
- $wstatus,$wstatus;
- print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
- if (corestatus($wstatus)) { # until we have a wait module
- if ($have_devel_corestack) {
- Devel::CoreStack::stack($^X);
- } else {
- print "\ttest program seems to have generated a core\n";
- }
- }
- $bad++;
- if ($max) {
- if ($next == $max + 1 and not @failed) {
- print "\tafter all the subtests completed successfully\n";
- $percent = 0;
- $failed = 0; # But we do not set $canon!
- } else {
- push @failed, $next..$max;
- $failed = @failed;
- (my $txt, $canon) = canonfailed($max,$skipped,@failed);
- $percent = 100*(scalar @failed)/$max;
- print "DIED. ",$txt;
- }
- }
- $failedtests{$test} = { canon => $canon, max => $max || '??',
- failed => $failed,
- name => $test, percent => $percent,
- estat => $estatus, wstat => $wstatus,
- };
- } elsif ($ok == $max && $next == $max+1) {
- if ($max and $skipped + $bonus) {
+ $failedtests{$test} = _dubious_return(\%test, \%tot,
+ $estatus, $wstatus);
+ }
+ elsif ($test{ok} == $test{max} && $test{next} == $test{max}+1) {
+ if ($test{max} and $test{skipped} + $test{bonus}) {
my @msg;
- push(@msg, "$skipped/$max skipped: $skip_reason")
- if $skipped;
- push(@msg, "$bonus/$max unexpectedly succeeded")
- if $bonus;
- print "${ml}ok, ".join(', ', @msg)."\n";
- } elsif ($max) {
- print "${ml}ok\n";
- } elsif (defined $skip_reason) {
- print "skipped: $skip_reason\n";
- $tests_skipped++;
+ push(@msg, "$test{skipped}/$test{max} skipped: $test{skip_reason}")
+ if $test{skipped};
+ push(@msg, "$test{bonus}/$test{max} unexpectedly succeeded")
+ if $test{bonus};
+ print "$test{ml}ok, ".join(', ', @msg)."\n";
+ } elsif ($test{max}) {
+ print "$test{ml}ok\n";
+ } elsif (defined $test{skip_reason}) {
+ print "skipped: $test{skip_reason}\n";
+ $tot{skipped}++;
} else {
print "skipped test on this platform\n";
- $tests_skipped++;
+ $tot{skipped}++;
}
- $good++;
- } elsif ($max) {
- if ($next <= $max) {
- push @failed, $next..$max;
+ $tot{good}++;
+ } elsif ($test{max}) {
+ if ($test{next} <= $test{max}) {
+ push @{$test{failed}}, $test{next}..$test{max};
}
- if (@failed) {
- my ($txt, $canon) = canonfailed($max,$skipped,@failed);
- print "${ml}$txt";
- $failedtests{$test} = { canon => $canon, max => $max,
- failed => scalar @failed,
- name => $test, percent => 100*(scalar @failed)/$max,
- estat => '', wstat => '',
+ if (@{$test{failed}}) {
+ my ($txt, $canon) = canonfailed($test{max},$test{skipped},
+ @{$test{failed}});
+ print "$test{ml}$txt";
+ $failedtests{$test} = { canon => $canon,
+ max => $test{max},
+ failed => scalar @{$test{failed}},
+ name => $test,
+ percent => 100*(scalar @{$test{failed}})/$test{max},
+ estat => '',
+ wstat => '',
};
} else {
- print "Don't know which tests failed: got $ok ok, expected $max\n";
- $failedtests{$test} = { canon => '??', max => $max,
- failed => '??',
- name => $test, percent => undef,
- estat => '', wstat => '',
+ print "Don't know which tests failed: got $test{ok} ok, ".
+ "expected $test{max}\n";
+ $failedtests{$test} = { canon => '??',
+ max => $test{max},
+ failed => '??',
+ name => $test,
+ percent => undef,
+ estat => '',
+ wstat => '',
};
}
- $bad++;
- } elsif ($next == 0) {
+ $tot{bad}++;
+ } elsif ($test{next} == 0) {
print "FAILED before any test output arrived\n";
- $bad++;
- $failedtests{$test} = { canon => '??', max => '??',
- failed => '??',
- name => $test, percent => undef,
- estat => '', wstat => '',
+ $tot{bad}++;
+ $failedtests{$test} = { canon => '??',
+ max => '??',
+ failed => '??',
+ name => $test,
+ percent => undef,
+ estat => '',
+ wstat => '',
};
}
- $subtests_skipped += $skipped;
- if (defined $files_in_dir) {
- my @new_dir_files = globdir $files_in_dir;
+ $tot{sub_skipped} += $test{skipped};
+
+ if (defined $Files_In_Dir) {
+ my @new_dir_files = globdir $Files_In_Dir;
if (@new_dir_files != @dir_files) {
my %f;
@f{@new_dir_files} = (1) x @new_dir_files;
}
}
}
- my $t_total = timediff(new Benchmark, $t_start);
+ $tot{bench} = timediff(new Benchmark, $t_start);
if ($^O eq 'VMS') {
if (defined $old5lib) {
delete $ENV{PERL5LIB};
}
}
- my $bonusmsg = '';
- $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
- " UNEXPECTEDLY SUCCEEDED)")
- if $totbonus;
- if ($tests_skipped) {
- $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
- if ($subtests_skipped) {
- $bonusmsg .= " and $subtests_skipped subtest"
- . ($subtests_skipped != 1 ? 's' : '');
- }
- $bonusmsg .= ' skipped';
- }
- elsif ($subtests_skipped) {
- $bonusmsg .= ", $subtests_skipped subtest"
- . ($subtests_skipped != 1 ? 's' : '')
- . " skipped";
- }
- if ($bad == 0 && $totmax) {
+
+ return(\%tot, \%failedtests);
+}
+
+
+sub _show_results {
+ my($tot, $failedtests) = @_;
+
+ my $pct;
+ my $bonusmsg = _bonusmsg($tot);
+
+ if ($tot->{bad} == 0 && $tot->{max}) {
print "All tests successful$bonusmsg.\n";
- } elsif ($total==0){
+ } elsif ($tot->{tests}==0){
die "FAILED--no tests were run for some reason.\n";
- } elsif ($totmax==0) {
- my $blurb = $total==1 ? "script" : "scripts";
- die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
+ } elsif ($tot->{max} == 0) {
+ my $blurb = $tot->{tests}==1 ? "script" : "scripts";
+ die "FAILED--$tot->{tests} test $blurb could be run, ".
+ "alas--no output ever seen\n";
} else {
- $pct = sprintf("%.2f", $good / $total * 100);
+ $pct = sprintf("%.2f", $tot->{good} / $tot->{tests} * 100);
my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
- $totmax - $totok, $totmax, 100*$totok/$totmax;
- # Create formats
- # First, figure out max length of test names
- my $failed_str = "Failed Test";
- my $middle_str = " Status Wstat Total Fail Failed ";
- my $list_str = "List of Failed";
- my $max_namelen = length($failed_str);
- my $script;
- foreach $script (keys %failedtests) {
- $max_namelen =
- (length $failedtests{$script}->{name} > $max_namelen) ?
- length $failedtests{$script}->{name} : $max_namelen;
- }
- my $list_len = $columns - length($middle_str) - $max_namelen;
- if ($list_len < length($list_str)) {
- $list_len = length($list_str);
- $max_namelen = $columns - length($middle_str) - $list_len;
- if ($max_namelen < length($failed_str)) {
- $max_namelen = length($failed_str);
- $columns = $max_namelen + length($middle_str) + $list_len;
- }
- }
-
- my $fmt_top = "format STDOUT_TOP =\n"
- . sprintf("%-${max_namelen}s", $failed_str)
- . $middle_str
- . $list_str . "\n"
- . "-" x $columns
- . "\n.\n";
- my $fmt = "format STDOUT =\n"
- . "@" . "<" x ($max_namelen - 1)
- . " @>> @>>>> @>>>> @>>> ^##.##% "
- . "^" . "<" x ($list_len - 1) . "\n"
- . '{ $curtest->{name}, $curtest->{estat},'
- . ' $curtest->{wstat}, $curtest->{max},'
- . ' $curtest->{failed}, $curtest->{percent},'
- . ' $curtest->{canon}'
- . "\n}\n"
- . "~~" . " " x ($columns - $list_len - 2) . "^"
- . "<" x ($list_len - 1) . "\n"
- . '$curtest->{canon}'
- . "\n.\n";
+ $tot->{max} - $tot->{ok}, $tot->{max},
+ 100*$tot->{ok}/$tot->{max};
- eval $fmt_top;
- die $@ if $@;
- eval $fmt;
- die $@ if $@;
+ my($fmt_top, $fmt) = _create_fmts($failedtests);
# Now write to formats
- for $script (sort keys %failedtests) {
- $curtest = $failedtests{$script};
+ for my $script (sort keys %$failedtests) {
+ $Curtest = $failedtests->{$script};
write;
}
- if ($bad) {
+ if ($tot->{bad}) {
$bonusmsg =~ s/^,\s*//;
print "$bonusmsg.\n" if $bonusmsg;
- die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
+ die "Failed $tot->{bad}/$tot->{tests} test scripts, $pct% okay.".
+ "$subpct\n";
}
}
- printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
- return ($bad == 0 && $totmax) ;
+ printf("Files=%d, Tests=%d, %s\n",
+ $tot->{files}, $tot->{max}, timestr($tot->{bench}, 'nop'));
+}
+
+
+sub _parse_header {
+ my($line, $test, $tot) = @_;
+
+ my $is_header = 0;
+
+ print $line if $Verbose;
+
+ # 1..10 todo 4 7 10;
+ if ($line =~ /^1\.\.([0-9]+) todo([\d\s]+);?/i) {
+ $test->{max} = $1;
+ for (split(/\s+/, $2)) { $test->{todo}{$_} = 1; }
+
+ $tot->{max} += $test->{max};
+ $tot->{files}++;
+
+ $is_header = 1;
+ }
+ # 1..10
+ # 1..0 # skip Why? Because I said so!
+ elsif ($line =~ /^1\.\.([0-9]+)
+ (\s*\#\s*[Ss]kip\S*(?>\s+) (.+))?
+ /x
+ )
+ {
+ $test->{max} = $1;
+ $tot->{max} += $test->{max};
+ $tot->{files}++;
+ $test->{next} = 1 unless $test->{next};
+ $test->{skip_reason} = $3 if not $test->{max} and defined $3;
+
+ $is_header = 1;
+ }
+ else {
+ $is_header = 0;
+ }
+
+ return $is_header;
}
+
+sub _parse_test_line {
+ my($line, $test, $tot) = @_;
+
+ if ($line =~ /^(not\s+)?ok\b/i) {
+ my $this = $test->{next} || 1;
+ # "not ok 23"
+ if ($line =~ /^not ok\s*(\d*)/){ # test failed
+ $this = $1 if length $1 and $1 > 0;
+ print "$test->{ml}NOK $this" if $test->{ml};
+ if (!$test->{todo}{$this}) {
+ push @{$test->{failed}}, $this;
+ } else {
+ $test->{ok}++;
+ $tot->{ok}++;
+ }
+ }
+ # "ok 23 # skip (you're not cleared for that)"
+ elsif ($line =~ /^ok\s*(\d*)\ *
+ (\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?
+ /x) # test skipped
+ {
+ $this = $1 if length $1 and $1 > 0;
+ print "$test->{ml}ok $this/$test->{max}" if $test->{ml};
+ $test->{ok}++;
+ $tot->{ok}++;
+ $test->{skipped}++ if defined $2;
+ my $reason;
+ $reason = 'unknown reason' if defined $2;
+ $reason = $3 if defined $3;
+ if (defined $reason and defined $test->{skip_reason}) {
+ # print "was: '$skip_reason' new '$reason'\n";
+ $test->{skip_reason} = 'various reasons'
+ if $test->{skip_reason} ne $reason;
+ } elsif (defined $reason) {
+ $test->{skip_reason} = $reason;
+ }
+ $test->{bonus}++, $tot->{bonus}++ if $test->{todo}{$this};
+ }
+ # XXX ummm... dunno
+ elsif ($line =~ /^ok\s*(\d*)\s*\#([^\r]*)$/) { # XXX multiline ok?
+ $this = $1 if $1 > 0;
+ print "$test->{ml}ok $this/$test->{max}" if $test->{ml};
+ $test->{ok}++;
+ $tot->{ok}++;
+ }
+ else {
+ # an ok or not ok not matching the 3 cases above...
+ # just ignore it for compatibility with TEST
+ next;
+ }
+
+ if ($this > $test->{next}) {
+ # print "Test output counter mismatch [test $this]\n";
+ # no need to warn probably
+ push @{$test->{failed}}, $test->{next}..$this-1;
+ }
+ elsif ($this < $test->{next}) {
+ #we have seen more "ok" lines than the number suggests
+ print "Confused test output: test $this answered after ".
+ "test ", $test->{next}-1, "\n";
+ $test->{next} = $this;
+ }
+ $test->{next} = $this + 1;
+
+ }
+ elsif ($line =~ /^Bail out!\s*(.*)/i) { # magic words
+ die "FAILED--Further testing stopped" .
+ ($1 ? ": $1\n" : ".\n");
+ }
+}
+
+
+sub _bonusmsg {
+ my($tot) = @_;
+
+ my $bonusmsg = '';
+ $bonusmsg = (" ($tot->{bonus} subtest".($tot->{bonus} > 1 ? 's' : '').
+ " UNEXPECTEDLY SUCCEEDED)")
+ if $tot->{bonus};
+
+ if ($tot->{skipped}) {
+ $bonusmsg .= ", $tot->{skipped} test"
+ . ($tot->{skipped} != 1 ? 's' : '');
+ if ($tot->{sub_skipped}) {
+ $bonusmsg .= " and $tot->{sub_skipped} subtest"
+ . ($tot->{sub_skipped} != 1 ? 's' : '');
+ }
+ $bonusmsg .= ' skipped';
+ }
+ elsif ($tot->{sub_skipped}) {
+ $bonusmsg .= ", $tot->{sub_skipped} subtest"
+ . ($tot->{sub_skipped} != 1 ? 's' : '')
+ . " skipped";
+ }
+
+ return $bonusmsg;
+}
+
+# VMS has some subtle nastiness with closing the test files.
+sub _close_fh {
+ my($fh) = shift;
+
+ close($fh); # must close to reap child resource values
+
+ my $wstatus = $Ignore_Exitcode ? 0 : $?; # Can trust $? ?
+ my $estatus;
+ $estatus = ($^O eq 'VMS'
+ ? eval 'use vmsish "status"; $estatus = $?'
+ : $wstatus >> 8);
+
+ return($estatus, $wstatus);
+}
+
+
+# Set up the command-line switches to run perl as.
+sub _set_switches {
+ my($test) = shift;
+
+ open(my $fh, $test) or print "can't open $test. $!\n";
+ my $first = <$fh>;
+ my $s = $Switches;
+ $s .= " $ENV{'HARNESS_PERL_SWITCHES'}"
+ if exists $ENV{'HARNESS_PERL_SWITCHES'};
+ $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
+ if $first =~ /^#!.*\bperl.*-\w*T/;
+
+ close($fh) or print "can't close $test. $!\n";
+
+ return $s;
+}
+
+
+# Test program go boom.
+sub _dubious_return {
+ my($test, $tot, $estatus, $wstatus) = @_;
+ my ($failed, $canon, $percent) = ('??', '??');
+
+ printf "$test->{ml}dubious\n\tTest returned status $estatus ".
+ "(wstat %d, 0x%x)\n",
+ $wstatus,$wstatus;
+ print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
+
+ if (corestatus($wstatus)) { # until we have a wait module
+ if ($Have_Devel_Corestack) {
+ Devel::CoreStack::stack($^X);
+ } else {
+ print "\ttest program seems to have generated a core\n";
+ }
+ }
+
+ $tot->{bad}++;
+
+ if ($test->{max}) {
+ if ($test->{next} == $test->{max} + 1 and not @{$test->{failed}}) {
+ print "\tafter all the subtests completed successfully\n";
+ $percent = 0;
+ $failed = 0; # But we do not set $canon!
+ }
+ else {
+ push @{$test->{failed}}, $test->{next}..$test->{max};
+ $failed = @{$test->{failed}};
+ (my $txt, $canon) = canonfailed($test->{max},$test->{skipped},@{$test->{failed}});
+ $percent = 100*(scalar @{$test->{failed}})/$test->{max};
+ print "DIED. ",$txt;
+ }
+ }
+
+ return { canon => $canon, max => $test->{max} || '??',
+ failed => $failed,
+ name => $test, percent => $percent,
+ estat => $estatus, wstat => $wstatus,
+ };
+}
+
+
+sub _garbled_output {
+ my($gibberish) = shift;
+ warn "Confusing test output: '$gibberish'\n";
+}
+
+
+sub _create_fmts {
+ my($failedtests) = @_;
+
+ my $failed_str = "Failed Test";
+ my $middle_str = " Status Wstat Total Fail Failed ";
+ my $list_str = "List of Failed";
+
+ # Figure out our longest name string for formatting purposes.
+ my $max_namelen = length($failed_str);
+ foreach my $script (keys %$failedtests) {
+ my $namelen = length $failedtests->{$script}->{name};
+ $max_namelen = $namelen if $namelen > $max_namelen;
+ }
+
+ my $list_len = $Columns - length($middle_str) - $max_namelen;
+ if ($list_len < length($list_str)) {
+ $list_len = length($list_str);
+ $max_namelen = $Columns - length($middle_str) - $list_len;
+ if ($max_namelen < length($failed_str)) {
+ $max_namelen = length($failed_str);
+ $Columns = $max_namelen + length($middle_str) + $list_len;
+ }
+ }
+
+ my $fmt_top = "format STDOUT_TOP =\n"
+ . sprintf("%-${max_namelen}s", $failed_str)
+ . $middle_str
+ . $list_str . "\n"
+ . "-" x $Columns
+ . "\n.\n";
+
+ my $fmt = "format STDOUT =\n"
+ . "@" . "<" x ($max_namelen - 1)
+ . " @>> @>>>> @>>>> @>>> ^##.##% "
+ . "^" . "<" x ($list_len - 1) . "\n"
+ . '{ $Curtest->{name}, $Curtest->{estat},'
+ . ' $Curtest->{wstat}, $Curtest->{max},'
+ . ' $Curtest->{failed}, $Curtest->{percent},'
+ . ' $Curtest->{canon}'
+ . "\n}\n"
+ . "~~" . " " x ($Columns - $list_len - 2) . "^"
+ . "<" x ($list_len - 1) . "\n"
+ . '$Curtest->{canon}'
+ . "\n.\n";
+
+ eval $fmt_top;
+ die $@ if $@;
+ eval $fmt;
+ die $@ if $@;
+
+ return($fmt_top, $fmt);
+}
+
+
my $tried_devel_corestack;
sub corestatus {
my($st) = @_;
eval {require 'wait.ph'};
my $ret = defined &WCOREDUMP ? WCOREDUMP($st) : $st & 0200;
- eval { require Devel::CoreStack; $have_devel_corestack++ }
+ eval { require Devel::CoreStack; $Have_Devel_Corestack++ }
unless $tried_devel_corestack++;
$ret;
my $ender = 's' x ($skipped > 1);
my $good = $max - $failed - $skipped;
my $goodper = sprintf("%.2f",100*($good/$max));
- push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
+ push @result, " (-$skipped skipped test$ender: $good okay, ".
+ "$goodper%)"
+ if $skipped;
push @result, "\n";
my $txt = join "", @result;
($txt, $canon);
=head1 DESCRIPTION
-(By using the L<Test> module, you can write test scripts without
+(By using the Test module, you can write test scripts without
knowing the exact output this module expects. However, if you need to
know the specifics, read on!)
=head2 The test script output
+=over 4
+
+=item B<1..M>
+
+This header tells how many tests there will be. It should be the
+first line output by your test program (but its okay if its preceded
+by comments).
+
+In certain instanced, you may not know how many tests you will
+ultimately be running. In this case, it is permitted (but not
+encouraged) for the 1..M header to appear as the B<last> line output
+by your test (again, it can be followed by further comments). But we
+strongly encourage you to put it first.
+
+Under B<no> circumstances should 1..M appear in the middle of your
+output or more than once.
+
+
+=item B<'ok', 'not ok'. Ok?>
+
Any output from the testscript to standard error is ignored and
bypassed, thus will be seen by the user. Lines written to standard
output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
runtests(). All other lines are discarded.
-It is tolerated if the test numbers after C<ok> are omitted. In this
-case Test::Harness maintains temporarily its own counter until the
-script supplies test numbers again. So the following test script
+C</^not ok/> indicates a failed test. C</^ok/> is a successful test.
+
+
+=item B<test numbers>
+
+Perl normally expects the 'ok' or 'not ok' to be followed by a test
+number. It is tolerated if the test numbers after 'ok' are
+omitted. In this case Test::Harness maintains temporarily its own
+counter until the script supplies test numbers again. So the following
+test script
print <<END;
1..6
FAILED tests 1, 3, 6
Failed 3/6 tests, 50.00% okay
+
+=item B<$Test::Harness::verbose>
+
The global variable $Test::Harness::verbose is exportable and can be
used to let runtests() display the standard output of the script
without altering the behavior otherwise.
+=item B<$Test::Harness::switches>
+
The global variable $Test::Harness::switches is exportable and can be
used to set perl command line options used for running the test
script(s). The default value is C<-w>.
+=item B<Skipping tests>
+
If the standard output line contains substring C< # Skip> (with
variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
-counted as a skipped test. In no other circumstance is anything
-allowed to follow C<ok> or C<ok NUMBER>. If the whole testscript
-succeeds, the count of skipped tests is included in the generated
-output.
+counted as a skipped test. If the whole testscript succeeds, the
+count of skipped tests is included in the generated output.
C<Test::Harness> reports the text after C< # Skip\S*\s+> as a reason
for skipping. Similarly, one can include a similar explanation in a
-C<1..0> line emitted if the test is skipped completely:
+C<1..0> line emitted if the test script is skipped completely:
1..0 # Skipped: no leverage found
+=item B<Bail out!>
+
As an emergency measure, a test script can decide that further tests
are useless (e.g. missing dependencies) and testing should stop
immediately. In that case the test script prints the magic words
to standard output. Any message after these words will be displayed by
C<Test::Harness> as the reason why testing is stopped.
+=item B<Comments>
+
+Additional comments may be put into the testing output on their own
+lines. Comment lines should begin with a '#', Test::Harness will
+ignore them.
+
+ ok 1
+ # Life is good, the sun is shining, RAM is cheap.
+ not ok 2
+ # got 'Bush' expected 'Gore'
+
+
=head1 EXPORT
C<&runtests> is exported by Test::Harness per default.
+C<$verbose> and C<$switches> are exported upon request.
+
+
=head1 DIAGNOSTICS
=over 4
=item C<Test returned status %d (wstat %d)>
-Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
-printed in a message similar to the above.
+Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8>
+and C<$?> are printed in a message similar to the above.
=item C<Failed 1 test, %.2f%% okay. %s>