10 use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
11 @ISA @EXPORT @EXPORT_OK);
12 $have_devel_corestack = 0;
17 @EXPORT= qw(&runtests);
18 @EXPORT_OK= qw($verbose $switches);
21 Failed Test Status Wstat Total Fail Failed List of failed
22 ------------------------------------------------------------------------------
26 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
44 my($test,$te,$ok,$next,$max,$pct,$totok,@failed,%failedtests);
50 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC); # pass -I flags to children
52 my $t_start = new Benchmark;
53 while ($test = shift(@tests)) {
56 print "$te" . '.' x (20 - length($te));
57 my $fh = new FileHandle;
58 $fh->open("$^X $switches $test|") || (print "can't run. $!\n");
59 $ok = $next = $max = 0;
65 if (/^1\.\.([0-9]+)/) {
70 } elsif ($max && /^(not\s+)?ok\b/) {
72 if (/^not ok\s*(\d*)/){
75 } elsif (/^ok\s*(\d*)/) {
81 # warn "Test output counter mismatch [test $this]\n";
82 # no need to warn probably
83 push @failed, $next..$this-1;
84 } elsif ($this < $next) {
85 #we have seen more "ok" lines than the number suggests
86 warn "Confused test output: test $this answered after test ", $next-1, "\n";
92 $fh->close; # must close to reap child resource values
94 my $estatus = $^O eq 'VMS' ? $wstatus : $wstatus >> 8;
95 if ($^O eq 'VMS' ? !($wstatus & 1) : $wstatus) {
96 print "dubious\n\tTest returned status $estatus (wstat $wstatus)\n";
97 if (corestatus($wstatus)) { # until we have a wait module
98 if ($have_devel_corestack) {
99 Devel::CoreStack::stack($^X);
101 print "\ttest program seems to have generated a core\n";
105 $failedtests{$test} = { canon => '??', max => $max || '??',
107 name => $test, percent => undef,
108 estat => $estatus, wstat => $wstatus,
110 } elsif ($ok == $max && $next == $max+1) {
114 print "skipping test on this platform\n";
119 push @failed, $next..$max;
122 my ($txt, $canon) = canonfailed($max,@failed);
124 $failedtests{$test} = { canon => $canon, max => $max,
125 failed => scalar @failed,
126 name => $test, percent => 100*(scalar @failed)/$max,
127 estat => '', wstat => '',
130 print "Don't know which tests failed: got $ok ok, expected $max\n";
131 $failedtests{$test} = { canon => '??', max => $max,
133 name => $test, percent => undef,
134 estat => '', wstat => '',
138 } elsif ($next == 0) {
139 print "FAILED before any test output arrived\n";
141 $failedtests{$test} = { canon => '??', max => '??',
143 name => $test, percent => undef,
144 estat => '', wstat => '',
148 my $t_total = timediff(new Benchmark, $t_start);
150 if ($bad == 0 && $totmax) {
151 print "All tests successful.\n";
153 die "FAILED--no tests were run for some reason.\n";
154 } elsif ($totmax==0) {
155 my $blurb = $total==1 ? "script" : "scripts";
156 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
158 $pct = sprintf("%.2f", $good / $total * 100);
159 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
160 $totmax - $totok, $totmax, 100*$totok/$totmax;
162 for $script (sort keys %failedtests) {
163 $curtest = $failedtests{$script};
167 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
170 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
177 eval {require 'wait.ph'};
180 $ret = ($st & 0200); # Tim says, this is for 90%
183 $ret = WCOREDUMP($st);
186 eval {require Devel::CoreStack};
187 $have_devel_corestack++ unless $@;
192 sub canonfailed ($@) {
193 my($max,@failed) = @_;
195 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
196 my $failed = @failed;
200 my $last = $min = shift @failed;
203 for (@failed, $failed[-1]) { # don't forget the last one
204 if ($_ > $last+1 || $_ == $last) {
208 push @canon, "$min-$last";
215 push @result, "FAILED tests @canon\n";
218 push @result, "FAILED test $last\n";
222 push @result, "\tFailed $failed/$max tests, ";
223 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay\n";
224 my $txt = join "", @result;
233 Test::Harness - run perl standard test scripts with statistics
243 Perl test scripts print to standard output C<"ok N"> for each single
244 test, where C<N> is an increasing sequence of integers. The first line
245 output by a standard test script is C<"1..M"> with C<M> being the
246 number of tests that should be run within the test
247 script. Test::Harness::runtests(@tests) runs all the testscripts
248 named as arguments and checks standard output for the expected
251 After all tests have been performed, runtests() prints some
252 performance statistics that are computed by the Benchmark module.
254 =head2 The test script output
256 Any output from the testscript to standard error is ignored and
257 bypassed, thus will be seen by the user. Lines written to standard
258 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
259 runtests(). All other lines are discarded.
261 It is tolerated if the test numbers after C<ok> are omitted. In this
262 case Test::Harness maintains temporarily its own counter until the
263 script supplies test numbers again. So the following test script
277 Failed 3/6 tests, 50.00% okay
279 The global variable $Test::Harness::verbose is exportable and can be
280 used to let runtests() display the standard output of the script
281 without altering the behavior otherwise.
285 C<&runtests> is exported by Test::Harness per default.
291 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
293 If all tests are successful some statistics about the performance are
296 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
298 For any single script that has failing subtests statistics like the
301 =item C<Test returned status %d (wstat %d)>
303 Scripts that return a non-zero exit status, both $?>>8 and $? are
304 printed in a message similar to the above.
306 =item C<Failed 1 test, %.2f%% okay. %s>
308 =item C<Failed %d/%d tests, %.2f%% okay. %s>
310 If not all tests were successful, the script dies with one of the
317 See L<Benchmark> for the underlying timing routines.
321 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
322 sure is, that it was inspired by Larry Wall's TEST script that came
323 with perl distributions for ages. Current maintainer is Andreas
328 Test::Harness uses $^X to determine the perl binary to run the tests
329 with. Test scripts running via the shebang (C<#!>) line may not be
330 portable because $^X is not consistent for shebang scripts across
331 platforms. This is no problem when Test::Harness is run with an
332 absolute path to the perl binary or when $^X can be found in the path.