+# -*- Mode: cperl; cperl-indent-level: 4 -*-
package Test::Harness;
-BEGIN {require 5.002;}
+use 5.005_64;
use Exporter;
use Benchmark;
use Config;
-use FileHandle;
use strict;
-use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
- @ISA @EXPORT @EXPORT_OK);
-$have_devel_corestack = 0;
-
-$VERSION = "1.1502";
-
-@ISA=('Exporter');
-@EXPORT= qw(&runtests);
-@EXPORT_OK= qw($verbose $switches);
-
-format STDOUT_TOP =
-Failed Test Status Wstat Total Fail Failed List of failed
-------------------------------------------------------------------------------
-.
-
-format STDOUT =
-@<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
-{ $curtest->{name},
- $curtest->{estat},
- $curtest->{wstat},
- $curtest->{max},
- $curtest->{failed},
- $curtest->{percent},
- $curtest->{canon}
-}
-.
+our($VERSION, $Verbose, $Switches, $Have_Devel_Corestack, $Curtest,
+ $Columns, $verbose, $switches,
+ @ISA, @EXPORT, @EXPORT_OK
+ );
+
+# Backwards compatibility for exportable variable names.
+*verbose = \$Verbose;
+*switches = \$Switches;
+
+$Have_Devel_Corestack = 0;
+
+$VERSION = "1.1702";
+
+$ENV{HARNESS_ACTIVE} = 1;
+
+# Some experimental versions of OS/2 build have broken $?
+my $Ignore_Exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
+
+my $Files_In_Dir = $ENV{HARNESS_FILELEAK_IN_DIR};
+
+
+@ISA = ('Exporter');
+@EXPORT = qw(&runtests);
+@EXPORT_OK = qw($verbose $switches);
+
+$Verbose = 0;
+$Switches = "-w";
+$Columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
-$verbose = 0;
-$switches = "-w";
+sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
sub runtests {
my(@tests) = @_;
+
+ my($tot, $failedtests) = _runtests(@tests);
+ _show_results($tot, $failedtests);
+
+ return ($tot->{bad} == 0 && $tot->{max}) ;
+}
+
+
+sub _runtests {
+ my(@tests) = @_;
local($|) = 1;
- my($test,$te,$ok,$next,$max,$pct,$totok,@failed,%failedtests);
- my $totmax = 0;
- my $files = 0;
- my $bad = 0;
- my $good = 0;
- my $total = @tests;
+ my(%failedtests);
+
+ # Test-wide totals.
+ my(%tot) = (
+ bonus => 0,
+ max => 0,
+ ok => 0,
+ files => 0,
+ bad => 0,
+ good => 0,
+ tests => scalar @tests,
+ sub_skipped => 0,
+ skipped => 0,
+ bench => 0
+ );
# pass -I flags to children
my $old5lib = $ENV{PERL5LIB};
- local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC);
- if ($^O eq 'VMS') { $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g }
+ # VMS has a 255-byte limit on the length of %ENV entries, so
+ # toss the ones that involve perl_root, the install location
+ # for VMS
+ my $new5lib;
+ if ($^O eq 'VMS') {
+ $new5lib = join($Config{path_sep}, grep {!/perl_root/i;} @INC);
+ $Switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g;
+ }
+ else {
+ $new5lib = join($Config{path_sep}, @INC);
+ }
+
+ local($ENV{'PERL5LIB'}) = $new5lib;
+ my @dir_files = globdir $Files_In_Dir if defined $Files_In_Dir;
my $t_start = new Benchmark;
- while ($test = shift(@tests)) {
- $te = $test;
- chop($te);
- if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./; }
- print "$te" . '.' x (20 - length($te));
- my $fh = new FileHandle;
- $fh->open($test) or print "can't open $test. $!\n";
- my $first = <$fh>;
- my $s = $switches;
- $s .= q[ "-T"] if $first =~ /^#!.*\bperl.*-\w*T/;
- $fh->close or print "can't close $test. $!\n";
- my $cmd = "$^X $s $test|";
+
+ foreach my $test (@tests) {
+ my $te = $test;
+ chop($te); # XXX chomp?
+
+ if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./s; }
+ my $blank = (' ' x 77);
+ my $leader = "$te" . '.' x (20 - length($te));
+ my $ml = "";
+ $ml = "\r$blank\r$leader"
+ if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $Verbose;
+ print $leader;
+
+ my $s = _set_switches($test);
+
+ my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
+ ? "./perl -I../lib ../utils/perlcc $test "
+ . "-run 2>> ./compilelog |"
+ : "$^X $s $test|";
$cmd = "MCR $cmd" if $^O eq 'VMS';
- $fh->open($cmd) or print "can't run $test. $!\n";
- $ok = $next = $max = 0;
- @failed = ();
+ open(my $fh, $cmd) or print "can't run $test. $!\n";
+
+ # state of the current test.
+ my %test = (
+ ok => 0,
+ next => 0,
+ max => 0,
+ failed => [],
+ todo => {},
+ bonus => 0,
+ skipped => 0,
+ skip_reason => undef,
+ ml => $ml,
+ );
+
+ my($seen_header, $tests_seen) = (0,0);
while (<$fh>) {
- if( $verbose ){
- print $_;
- }
- if (/^1\.\.([0-9]+)/) {
- $max = $1;
- $totmax += $max;
- $files++;
- $next = 1;
- } elsif ($max && /^(not\s+)?ok\b/) {
- my $this = $next;
- if (/^not ok\s*(\d*)/){
- $this = $1 if $1 > 0;
- push @failed, $this;
- } elsif (/^ok\s*(\d*)/) {
- $this = $1 if $1 > 0;
- $ok++;
- $totok++;
- }
- if ($this > $next) {
- # warn "Test output counter mismatch [test $this]\n";
- # no need to warn probably
- push @failed, $next..$this-1;
- } elsif ($this < $next) {
- #we have seen more "ok" lines than the number suggests
- warn "Confused test output: test $this answered after test ", $next-1, "\n";
- $next = $this;
- }
- $next = $this + 1;
- }
+ if( _parse_header($_, \%test, \%tot) ) {
+ warn "Test header seen twice!\n" if $seen_header;
+
+ $seen_header = 1;
+
+ warn "1..M can only appear at the beginning or end of tests\n"
+ if $tests_seen && $test{max} < $tests_seen;
+ }
+ elsif( _parse_test_line($_, \%test, \%tot) ) {
+ $tests_seen++;
+ }
+ # else, ignore it.
}
- $fh->close; # must close to reap child resource values
- my $wstatus = $?;
- my $estatus;
- $estatus = ($^O eq 'VMS'
- ? eval 'use vmsish "status"; $estatus = $?'
- : $wstatus >> 8);
+
+ my($estatus, $wstatus) = _close_fh($fh);
+
if ($wstatus) {
- my ($failed, $canon, $percent) = ('??', '??');
- print "dubious\n\tTest returned status $estatus (wstat $wstatus)\n";
- print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
- if (corestatus($wstatus)) { # until we have a wait module
- if ($have_devel_corestack) {
- Devel::CoreStack::stack($^X);
- } else {
- print "\ttest program seems to have generated a core\n";
- }
- }
- $bad++;
- if ($max) {
- if ($next == $max + 1 and not @failed) {
- print "\tafter all the subtests completed successfully\n";
- $percent = 0;
- $failed = 0; # But we do not set $canon!
- } else {
- push @failed, $next..$max;
- $failed = @failed;
- (my $txt, $canon) = canonfailed($max,@failed);
- $percent = 100*(scalar @failed)/$max;
- print "DIED. ",$txt;
- }
- }
- $failedtests{$test} = { canon => $canon, max => $max || '??',
- failed => $failed,
- name => $test, percent => $percent,
- estat => $estatus, wstat => $wstatus,
- };
- } elsif ($ok == $max && $next == $max+1) {
- if ($max) {
- print "ok\n";
+ $failedtests{$test} = _dubious_return(\%test, \%tot,
+ $estatus, $wstatus);
+ }
+ elsif ($test{ok} == $test{max} && $test{next} == $test{max}+1) {
+ if ($test{max} and $test{skipped} + $test{bonus}) {
+ my @msg;
+ push(@msg, "$test{skipped}/$test{max} skipped: $test{skip_reason}")
+ if $test{skipped};
+ push(@msg, "$test{bonus}/$test{max} unexpectedly succeeded")
+ if $test{bonus};
+ print "$test{ml}ok, ".join(', ', @msg)."\n";
+ } elsif ($test{max}) {
+ print "$test{ml}ok\n";
+ } elsif (defined $test{skip_reason}) {
+ print "skipped: $test{skip_reason}\n";
+ $tot{skipped}++;
} else {
- print "skipping test on this platform\n";
+ print "skipped test on this platform\n";
+ $tot{skipped}++;
}
- $good++;
- } elsif ($max) {
- if ($next <= $max) {
- push @failed, $next..$max;
+ $tot{good}++;
+ } elsif ($test{max}) {
+ if ($test{next} <= $test{max}) {
+ push @{$test{failed}}, $test{next}..$test{max};
}
- if (@failed) {
- my ($txt, $canon) = canonfailed($max,@failed);
- print $txt;
- $failedtests{$test} = { canon => $canon, max => $max,
- failed => scalar @failed,
- name => $test, percent => 100*(scalar @failed)/$max,
- estat => '', wstat => '',
+ if (@{$test{failed}}) {
+ my ($txt, $canon) = canonfailed($test{max},$test{skipped},
+ @{$test{failed}});
+ print "$test{ml}$txt";
+ $failedtests{$test} = { canon => $canon,
+ max => $test{max},
+ failed => scalar @{$test{failed}},
+ name => $test,
+ percent => 100*(scalar @{$test{failed}})/$test{max},
+ estat => '',
+ wstat => '',
};
} else {
- print "Don't know which tests failed: got $ok ok, expected $max\n";
- $failedtests{$test} = { canon => '??', max => $max,
- failed => '??',
- name => $test, percent => undef,
- estat => '', wstat => '',
+ print "Don't know which tests failed: got $test{ok} ok, ".
+ "expected $test{max}\n";
+ $failedtests{$test} = { canon => '??',
+ max => $test{max},
+ failed => '??',
+ name => $test,
+ percent => undef,
+ estat => '',
+ wstat => '',
};
}
- $bad++;
- } elsif ($next == 0) {
+ $tot{bad}++;
+ } elsif ($test{next} == 0) {
print "FAILED before any test output arrived\n";
- $bad++;
- $failedtests{$test} = { canon => '??', max => '??',
- failed => '??',
- name => $test, percent => undef,
- estat => '', wstat => '',
+ $tot{bad}++;
+ $failedtests{$test} = { canon => '??',
+ max => '??',
+ failed => '??',
+ name => $test,
+ percent => undef,
+ estat => '',
+ wstat => '',
};
}
+ $tot{sub_skipped} += $test{skipped};
+
+ if (defined $Files_In_Dir) {
+ my @new_dir_files = globdir $Files_In_Dir;
+ if (@new_dir_files != @dir_files) {
+ my %f;
+ @f{@new_dir_files} = (1) x @new_dir_files;
+ delete @f{@dir_files};
+ my @f = sort keys %f;
+ print "LEAKED FILES: @f\n";
+ @dir_files = @new_dir_files;
+ }
+ }
}
- my $t_total = timediff(new Benchmark, $t_start);
-
+ $tot{bench} = timediff(new Benchmark, $t_start);
+
if ($^O eq 'VMS') {
if (defined $old5lib) {
$ENV{PERL5LIB} = $old5lib;
delete $ENV{PERL5LIB};
}
}
- if ($bad == 0 && $totmax) {
- print "All tests successful.\n";
- } elsif ($total==0){
+
+ return(\%tot, \%failedtests);
+}
+
+
+sub _show_results {
+ my($tot, $failedtests) = @_;
+
+ my $pct;
+ my $bonusmsg = _bonusmsg($tot);
+
+ if ($tot->{bad} == 0 && $tot->{max}) {
+ print "All tests successful$bonusmsg.\n";
+ } elsif ($tot->{tests}==0){
die "FAILED--no tests were run for some reason.\n";
- } elsif ($totmax==0) {
- my $blurb = $total==1 ? "script" : "scripts";
- die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
+ } elsif ($tot->{max} == 0) {
+ my $blurb = $tot->{tests}==1 ? "script" : "scripts";
+ die "FAILED--$tot->{tests} test $blurb could be run, ".
+ "alas--no output ever seen\n";
} else {
- $pct = sprintf("%.2f", $good / $total * 100);
+ $pct = sprintf("%.2f", $tot->{good} / $tot->{tests} * 100);
my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
- $totmax - $totok, $totmax, 100*$totok/$totmax;
- my $script;
- for $script (sort keys %failedtests) {
- $curtest = $failedtests{$script};
+ $tot->{max} - $tot->{ok}, $tot->{max},
+ 100*$tot->{ok}/$tot->{max};
+
+ my($fmt_top, $fmt) = _create_fmts($failedtests);
+
+ # Now write to formats
+ for my $script (sort keys %$failedtests) {
+ $Curtest = $failedtests->{$script};
write;
}
- if ($bad) {
- die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
+ if ($tot->{bad}) {
+ $bonusmsg =~ s/^,\s*//;
+ print "$bonusmsg.\n" if $bonusmsg;
+ die "Failed $tot->{bad}/$tot->{tests} test scripts, $pct% okay.".
+ "$subpct\n";
+ }
+ }
+
+ printf("Files=%d, Tests=%d, %s\n",
+ $tot->{files}, $tot->{max}, timestr($tot->{bench}, 'nop'));
+}
+
+
+sub _parse_header {
+ my($line, $test, $tot) = @_;
+
+ my $is_header = 0;
+
+ print $line if $Verbose;
+
+ # 1..10 todo 4 7 10;
+ if ($line =~ /^1\.\.([0-9]+) todo([\d\s]+);?/i) {
+ $test->{max} = $1;
+ for (split(/\s+/, $2)) { $test->{todo}{$_} = 1; }
+
+ $tot->{max} += $test->{max};
+ $tot->{files}++;
+
+ $is_header = 1;
+ }
+ # 1..10
+ # 1..0 # skip Why? Because I said so!
+ elsif ($line =~ /^1\.\.([0-9]+)
+ (\s*\#\s*[Ss]kip\S*(?>\s+) (.+))?
+ /x
+ )
+ {
+ $test->{max} = $1;
+ $tot->{max} += $test->{max};
+ $tot->{files}++;
+ $test->{next} = 1 unless $test->{next};
+ $test->{skip_reason} = $3 if not $test->{max} and defined $3;
+
+ $is_header = 1;
+ }
+ else {
+ $is_header = 0;
+ }
+
+ return $is_header;
+}
+
+
+sub _parse_test_line {
+ my($line, $test, $tot) = @_;
+
+ if ($line =~ /^(not\s+)?ok\b/i) {
+ my $this = $test->{next} || 1;
+ # "not ok 23"
+ if ($line =~ /^not ok\s*(\d*)/){ # test failed
+ $this = $1 if length $1 and $1 > 0;
+ print "$test->{ml}NOK $this" if $test->{ml};
+ if (!$test->{todo}{$this}) {
+ push @{$test->{failed}}, $this;
+ } else {
+ $test->{ok}++;
+ $tot->{ok}++;
+ }
+ }
+ # "ok 23 # skip (you're not cleared for that)"
+ elsif ($line =~ /^ok\s*(\d*)\ *
+ (\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?
+ /x) # test skipped
+ {
+ $this = $1 if length $1 and $1 > 0;
+ print "$test->{ml}ok $this/$test->{max}" if $test->{ml};
+ $test->{ok}++;
+ $tot->{ok}++;
+ $test->{skipped}++ if defined $2;
+ my $reason;
+ $reason = 'unknown reason' if defined $2;
+ $reason = $3 if defined $3;
+ if (defined $reason and defined $test->{skip_reason}) {
+ # print "was: '$skip_reason' new '$reason'\n";
+ $test->{skip_reason} = 'various reasons'
+ if $test->{skip_reason} ne $reason;
+ } elsif (defined $reason) {
+ $test->{skip_reason} = $reason;
+ }
+ $test->{bonus}++, $tot->{bonus}++ if $test->{todo}{$this};
+ }
+ # XXX ummm... dunno
+ elsif ($line =~ /^ok\s*(\d*)\s*\#([^\r]*)$/) { # XXX multiline ok?
+ $this = $1 if $1 > 0;
+ print "$test->{ml}ok $this/$test->{max}" if $test->{ml};
+ $test->{ok}++;
+ $tot->{ok}++;
+ }
+ else {
+ # an ok or not ok not matching the 3 cases above...
+ # just ignore it for compatibility with TEST
+ next;
+ }
+
+ if ($this > $test->{next}) {
+ # print "Test output counter mismatch [test $this]\n";
+ # no need to warn probably
+ push @{$test->{failed}}, $test->{next}..$this-1;
+ }
+ elsif ($this < $test->{next}) {
+ #we have seen more "ok" lines than the number suggests
+ print "Confused test output: test $this answered after ".
+ "test ", $test->{next}-1, "\n";
+ $test->{next} = $this;
+ }
+ $test->{next} = $this + 1;
+
+ }
+ elsif ($line =~ /^Bail out!\s*(.*)/i) { # magic words
+ die "FAILED--Further testing stopped" .
+ ($1 ? ": $1\n" : ".\n");
+ }
+}
+
+
+sub _bonusmsg {
+ my($tot) = @_;
+
+ my $bonusmsg = '';
+ $bonusmsg = (" ($tot->{bonus} subtest".($tot->{bonus} > 1 ? 's' : '').
+ " UNEXPECTEDLY SUCCEEDED)")
+ if $tot->{bonus};
+
+ if ($tot->{skipped}) {
+ $bonusmsg .= ", $tot->{skipped} test"
+ . ($tot->{skipped} != 1 ? 's' : '');
+ if ($tot->{sub_skipped}) {
+ $bonusmsg .= " and $tot->{sub_skipped} subtest"
+ . ($tot->{sub_skipped} != 1 ? 's' : '');
}
+ $bonusmsg .= ' skipped';
+ }
+ elsif ($tot->{sub_skipped}) {
+ $bonusmsg .= ", $tot->{sub_skipped} subtest"
+ . ($tot->{sub_skipped} != 1 ? 's' : '')
+ . " skipped";
+ }
+
+ return $bonusmsg;
+}
+
+# VMS has some subtle nastiness with closing the test files.
+sub _close_fh {
+ my($fh) = shift;
+
+ close($fh); # must close to reap child resource values
+
+ my $wstatus = $Ignore_Exitcode ? 0 : $?; # Can trust $? ?
+ my $estatus;
+ $estatus = ($^O eq 'VMS'
+ ? eval 'use vmsish "status"; $estatus = $?'
+ : $wstatus >> 8);
+
+ return($estatus, $wstatus);
+}
+
+
+# Set up the command-line switches to run perl as.
+sub _set_switches {
+ my($test) = shift;
+
+ open(my $fh, $test) or print "can't open $test. $!\n";
+ my $first = <$fh>;
+ my $s = $Switches;
+ $s .= " $ENV{'HARNESS_PERL_SWITCHES'}"
+ if exists $ENV{'HARNESS_PERL_SWITCHES'};
+ $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
+ if $first =~ /^#!.*\bperl.*-\w*T/;
+
+ close($fh) or print "can't close $test. $!\n";
+
+ return $s;
+}
+
+
+# Test program go boom.
+sub _dubious_return {
+ my($test, $tot, $estatus, $wstatus) = @_;
+ my ($failed, $canon, $percent) = ('??', '??');
+
+ printf "$test->{ml}dubious\n\tTest returned status $estatus ".
+ "(wstat %d, 0x%x)\n",
+ $wstatus,$wstatus;
+ print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
+
+ if (corestatus($wstatus)) { # until we have a wait module
+ if ($Have_Devel_Corestack) {
+ Devel::CoreStack::stack($^X);
+ } else {
+ print "\ttest program seems to have generated a core\n";
+ }
}
- printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
- return ($bad == 0 && $totmax) ;
+ $tot->{bad}++;
+
+ if ($test->{max}) {
+ if ($test->{next} == $test->{max} + 1 and not @{$test->{failed}}) {
+ print "\tafter all the subtests completed successfully\n";
+ $percent = 0;
+ $failed = 0; # But we do not set $canon!
+ }
+ else {
+ push @{$test->{failed}}, $test->{next}..$test->{max};
+ $failed = @{$test->{failed}};
+ (my $txt, $canon) = canonfailed($test->{max},$test->{skipped},@{$test->{failed}});
+ $percent = 100*(scalar @{$test->{failed}})/$test->{max};
+ print "DIED. ",$txt;
+ }
+ }
+
+ return { canon => $canon, max => $test->{max} || '??',
+ failed => $failed,
+ name => $test, percent => $percent,
+ estat => $estatus, wstat => $wstatus,
+ };
}
+
+sub _garbled_output {
+ my($gibberish) = shift;
+ warn "Confusing test output: '$gibberish'\n";
+}
+
+
+sub _create_fmts {
+ my($failedtests) = @_;
+
+ my $failed_str = "Failed Test";
+ my $middle_str = " Status Wstat Total Fail Failed ";
+ my $list_str = "List of Failed";
+
+ # Figure out our longest name string for formatting purposes.
+ my $max_namelen = length($failed_str);
+ foreach my $script (keys %$failedtests) {
+ my $namelen = length $failedtests->{$script}->{name};
+ $max_namelen = $namelen if $namelen > $max_namelen;
+ }
+
+ my $list_len = $Columns - length($middle_str) - $max_namelen;
+ if ($list_len < length($list_str)) {
+ $list_len = length($list_str);
+ $max_namelen = $Columns - length($middle_str) - $list_len;
+ if ($max_namelen < length($failed_str)) {
+ $max_namelen = length($failed_str);
+ $Columns = $max_namelen + length($middle_str) + $list_len;
+ }
+ }
+
+ my $fmt_top = "format STDOUT_TOP =\n"
+ . sprintf("%-${max_namelen}s", $failed_str)
+ . $middle_str
+ . $list_str . "\n"
+ . "-" x $Columns
+ . "\n.\n";
+
+ my $fmt = "format STDOUT =\n"
+ . "@" . "<" x ($max_namelen - 1)
+ . " @>> @>>>> @>>>> @>>> ^##.##% "
+ . "^" . "<" x ($list_len - 1) . "\n"
+ . '{ $Curtest->{name}, $Curtest->{estat},'
+ . ' $Curtest->{wstat}, $Curtest->{max},'
+ . ' $Curtest->{failed}, $Curtest->{percent},'
+ . ' $Curtest->{canon}'
+ . "\n}\n"
+ . "~~" . " " x ($Columns - $list_len - 2) . "^"
+ . "<" x ($list_len - 1) . "\n"
+ . '$Curtest->{canon}'
+ . "\n.\n";
+
+ eval $fmt_top;
+ die $@ if $@;
+ eval $fmt;
+ die $@ if $@;
+
+ return($fmt_top, $fmt);
+}
+
+
my $tried_devel_corestack;
sub corestatus {
my($st) = @_;
- my($ret);
eval {require 'wait.ph'};
- if ($@) {
- SWITCH: {
- $ret = ($st & 0200); # Tim says, this is for 90%
- }
- } else {
- $ret = WCOREDUMP($st);
- }
+ my $ret = defined &WCOREDUMP ? WCOREDUMP($st) : $st & 0200;
- eval { require Devel::CoreStack; $have_devel_corestack++ }
+ eval { require Devel::CoreStack; $Have_Devel_Corestack++ }
unless $tried_devel_corestack++;
$ret;
}
sub canonfailed ($@) {
- my($max,@failed) = @_;
+ my($max,$skipped,@failed) = @_;
my %seen;
@failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
my $failed = @failed;
}
push @result, "\tFailed $failed/$max tests, ";
- push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay\n";
+ push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
+ my $ender = 's' x ($skipped > 1);
+ my $good = $max - $failed - $skipped;
+ my $goodper = sprintf("%.2f",100*($good/$max));
+ push @result, " (-$skipped skipped test$ender: $good okay, ".
+ "$goodper%)"
+ if $skipped;
+ push @result, "\n";
my $txt = join "", @result;
($txt, $canon);
}
=head1 DESCRIPTION
+(By using the Test module, you can write test scripts without
+knowing the exact output this module expects. However, if you need to
+know the specifics, read on!)
+
Perl test scripts print to standard output C<"ok N"> for each single
test, where C<N> is an increasing sequence of integers. The first line
output by a standard test script is C<"1..M"> with C<M> being the
=head2 The test script output
+=over 4
+
+=item B<1..M>
+
+This header tells how many tests there will be. It should be the
+first line output by your test program (but its okay if its preceded
+by comments).
+
+In certain instanced, you may not know how many tests you will
+ultimately be running. In this case, it is permitted (but not
+encouraged) for the 1..M header to appear as the B<last> line output
+by your test (again, it can be followed by further comments). But we
+strongly encourage you to put it first.
+
+Under B<no> circumstances should 1..M appear in the middle of your
+output or more than once.
+
+
+=item B<'ok', 'not ok'. Ok?>
+
Any output from the testscript to standard error is ignored and
bypassed, thus will be seen by the user. Lines written to standard
output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
runtests(). All other lines are discarded.
-It is tolerated if the test numbers after C<ok> are omitted. In this
-case Test::Harness maintains temporarily its own counter until the
-script supplies test numbers again. So the following test script
+C</^not ok/> indicates a failed test. C</^ok/> is a successful test.
+
+
+=item B<test numbers>
+
+Perl normally expects the 'ok' or 'not ok' to be followed by a test
+number. It is tolerated if the test numbers after 'ok' are
+omitted. In this case Test::Harness maintains temporarily its own
+counter until the script supplies test numbers again. So the following
+test script
print <<END;
1..6
ok
END
-will generate
+will generate
FAILED tests 1, 3, 6
Failed 3/6 tests, 50.00% okay
+
+=item B<$Test::Harness::verbose>
+
The global variable $Test::Harness::verbose is exportable and can be
used to let runtests() display the standard output of the script
without altering the behavior otherwise.
+=item B<$Test::Harness::switches>
+
+The global variable $Test::Harness::switches is exportable and can be
+used to set perl command line options used for running the test
+script(s). The default value is C<-w>.
+
+=item B<Skipping tests>
+
+If the standard output line contains substring C< # Skip> (with
+variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
+counted as a skipped test. If the whole testscript succeeds, the
+count of skipped tests is included in the generated output.
+
+C<Test::Harness> reports the text after C< # Skip\S*\s+> as a reason
+for skipping. Similarly, one can include a similar explanation in a
+C<1..0> line emitted if the test script is skipped completely:
+
+ 1..0 # Skipped: no leverage found
+
+=item B<Bail out!>
+
+As an emergency measure, a test script can decide that further tests
+are useless (e.g. missing dependencies) and testing should stop
+immediately. In that case the test script prints the magic words
+
+ Bail out!
+
+to standard output. Any message after these words will be displayed by
+C<Test::Harness> as the reason why testing is stopped.
+
+=item B<Comments>
+
+Additional comments may be put into the testing output on their own
+lines. Comment lines should begin with a '#', Test::Harness will
+ignore them.
+
+ ok 1
+ # Life is good, the sun is shining, RAM is cheap.
+ not ok 2
+ # got 'Bush' expected 'Gore'
+
+
=head1 EXPORT
C<&runtests> is exported by Test::Harness per default.
+C<$verbose> and C<$switches> are exported upon request.
+
+
=head1 DIAGNOSTICS
=over 4
=item C<Test returned status %d (wstat %d)>
-Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
-printed in a message similar to the above.
+Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8>
+and C<$?> are printed in a message similar to the above.
=item C<Failed 1 test, %.2f%% okay. %s>
If not all tests were successful, the script dies with one of the
above messages.
+=item C<FAILED--Further testing stopped%s>
+
+If a single subtest decides that further testing will not make sense,
+the script dies with this message.
+
=back
+=head1 ENVIRONMENT
+
+Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
+of child processes.
+
+Setting C<HARNESS_NOTTY> to a true value forces it to behave as though
+STDOUT were not a console. You may need to set this if you don't want
+harness to output more frequent progress messages using carriage returns.
+Some consoles may not handle carriage returns properly (which results
+in a somewhat messy output).
+
+Setting C<HARNESS_COMPILE_TEST> to a true value will make harness attempt
+to compile the test using C<perlcc> before running it.
+
+If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
+will check after each test whether new files appeared in that directory,
+and report them as
+
+ LEAKED FILES: scr.tmp 0 my.db
+
+If relative, directory name is with respect to the current directory at
+the moment runtests() was called. Putting absolute path into
+C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
+
+The value of C<HARNESS_PERL_SWITCHES> will be prepended to the
+switches used to invoke perl on each test. For example, setting
+C<HARNESS_PERL_SWITCHES> to "-W" will run all tests with all
+warnings enabled.
+
+If C<HARNESS_COLUMNS> is set, then this value will be used for the
+width of the terminal. If it is not set then it will default to
+C<COLUMNS>. If this is not set, it will default to 80. Note that users
+of Bourne-sh based shells will need to C<export COLUMNS> for this
+module to use that variable.
+
+Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
+This allows the tests to determine if they are being executed through the
+harness or by any other means.
+
=head1 SEE ALSO
-See L<Benchmark> for the underlying timing routines.
+L<Test> for writing test scripts and also L<Benchmark> for the
+underlying timing routines.
=head1 AUTHORS