10 use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
11 @ISA @EXPORT @EXPORT_OK);
12 $have_devel_corestack = 0;
17 @EXPORT= qw(&runtests);
18 @EXPORT_OK= qw($verbose $switches);
21 Failed Test Status Wstat Total Fail Failed List of failed
22 ------------------------------------------------------------------------------
26 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
44 my($test,$te,$ok,$next,$max,$pct,$totok,@failed,%failedtests);
50 my $old5lib = $ENV{PERL5LIB};
51 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC); # pass -I flags to children
53 my $t_start = new Benchmark;
54 while ($test = shift(@tests)) {
57 print "$te" . '.' x (20 - length($te));
58 my $fh = new FileHandle;
59 if ($^O eq 'VMS') { $fh->open("MCR $^X $switches $test|") || (print "can't run. $!\n"); }
60 else { $fh->open("$^X $switches $test|") || (print "can't run. $!\n"); }
61 $ok = $next = $max = 0;
67 if (/^1\.\.([0-9]+)/) {
72 } elsif ($max && /^(not\s+)?ok\b/) {
74 if (/^not ok\s*(\d*)/){
77 } elsif (/^ok\s*(\d*)/) {
83 # warn "Test output counter mismatch [test $this]\n";
84 # no need to warn probably
85 push @failed, $next..$this-1;
86 } elsif ($this < $next) {
87 #we have seen more "ok" lines than the number suggests
88 warn "Confused test output: test $this answered after test ", $next-1, "\n";
94 $fh->close; # must close to reap child resource values
96 my $estatus = $^O eq 'VMS' ? $wstatus : $wstatus >> 8;
97 if ($^O eq 'VMS' ? !($wstatus & 1) : $wstatus) {
98 print "dubious\n\tTest returned status $estatus (wstat $wstatus)\n";
99 if (corestatus($wstatus)) { # until we have a wait module
100 if ($have_devel_corestack) {
101 Devel::CoreStack::stack($^X);
103 print "\ttest program seems to have generated a core\n";
107 $failedtests{$test} = { canon => '??', max => $max || '??',
109 name => $test, percent => undef,
110 estat => $estatus, wstat => $wstatus,
112 } elsif ($ok == $max && $next == $max+1) {
116 print "skipping test on this platform\n";
121 push @failed, $next..$max;
124 my ($txt, $canon) = canonfailed($max,@failed);
126 $failedtests{$test} = { canon => $canon, max => $max,
127 failed => scalar @failed,
128 name => $test, percent => 100*(scalar @failed)/$max,
129 estat => '', wstat => '',
132 print "Don't know which tests failed: got $ok ok, expected $max\n";
133 $failedtests{$test} = { canon => '??', max => $max,
135 name => $test, percent => undef,
136 estat => '', wstat => '',
140 } elsif ($next == 0) {
141 print "FAILED before any test output arrived\n";
143 $failedtests{$test} = { canon => '??', max => '??',
145 name => $test, percent => undef,
146 estat => '', wstat => '',
150 my $t_total = timediff(new Benchmark, $t_start);
152 if ($^O eq 'VMS' and defined($old5lib)) { $ENV{PERL5LIB} = $old5lib; }
153 if ($bad == 0 && $totmax) {
154 print "All tests successful.\n";
156 die "FAILED--no tests were run for some reason.\n";
157 } elsif ($totmax==0) {
158 my $blurb = $total==1 ? "script" : "scripts";
159 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
161 $pct = sprintf("%.2f", $good / $total * 100);
162 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
163 $totmax - $totok, $totmax, 100*$totok/$totmax;
165 for $script (sort keys %failedtests) {
166 $curtest = $failedtests{$script};
170 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
173 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
175 return ($bad == 0 && $totmax) ;
182 eval {require 'wait.ph'};
185 $ret = ($st & 0200); # Tim says, this is for 90%
188 $ret = WCOREDUMP($st);
191 eval {require Devel::CoreStack};
192 $have_devel_corestack++ unless $@;
197 sub canonfailed ($@) {
198 my($max,@failed) = @_;
200 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
201 my $failed = @failed;
205 my $last = $min = shift @failed;
208 for (@failed, $failed[-1]) { # don't forget the last one
209 if ($_ > $last+1 || $_ == $last) {
213 push @canon, "$min-$last";
220 push @result, "FAILED tests @canon\n";
223 push @result, "FAILED test $last\n";
227 push @result, "\tFailed $failed/$max tests, ";
228 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay\n";
229 my $txt = join "", @result;
238 Test::Harness - run perl standard test scripts with statistics
248 Perl test scripts print to standard output C<"ok N"> for each single
249 test, where C<N> is an increasing sequence of integers. The first line
250 output by a standard test script is C<"1..M"> with C<M> being the
251 number of tests that should be run within the test
252 script. Test::Harness::runtests(@tests) runs all the testscripts
253 named as arguments and checks standard output for the expected
256 After all tests have been performed, runtests() prints some
257 performance statistics that are computed by the Benchmark module.
259 =head2 The test script output
261 Any output from the testscript to standard error is ignored and
262 bypassed, thus will be seen by the user. Lines written to standard
263 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
264 runtests(). All other lines are discarded.
266 It is tolerated if the test numbers after C<ok> are omitted. In this
267 case Test::Harness maintains temporarily its own counter until the
268 script supplies test numbers again. So the following test script
282 Failed 3/6 tests, 50.00% okay
284 The global variable $Test::Harness::verbose is exportable and can be
285 used to let runtests() display the standard output of the script
286 without altering the behavior otherwise.
290 C<&runtests> is exported by Test::Harness per default.
296 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
298 If all tests are successful some statistics about the performance are
301 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
303 For any single script that has failing subtests statistics like the
306 =item C<Test returned status %d (wstat %d)>
308 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
309 printed in a message similar to the above.
311 =item C<Failed 1 test, %.2f%% okay. %s>
313 =item C<Failed %d/%d tests, %.2f%% okay. %s>
315 If not all tests were successful, the script dies with one of the
322 See L<Benchmark> for the underlying timing routines.
326 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
327 sure is, that it was inspired by Larry Wall's TEST script that came
328 with perl distributions for ages. Current maintainer is Andreas
333 Test::Harness uses $^X to determine the perl binary to run the tests
334 with. Test scripts running via the shebang (C<#!>) line may not be
335 portable because $^X is not consistent for shebang scripts across
336 platforms. This is no problem when Test::Harness is run with an
337 absolute path to the perl binary or when $^X can be found in the path.