10 use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
11 @ISA @EXPORT @EXPORT_OK);
12 $have_devel_corestack = 0;
17 @EXPORT= qw(&runtests);
18 @EXPORT_OK= qw($verbose $switches);
21 Failed Test Status Wstat Total Fail Failed List of failed
22 ------------------------------------------------------------------------------
26 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
44 my($test,$te,$ok,$next,$max,$pct,$totok,@failed,%failedtests);
51 # pass -I flags to children
52 my $old5lib = $ENV{PERL5LIB};
53 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC);
55 if ($^O eq 'VMS') { $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g }
57 my $t_start = new Benchmark;
58 while ($test = shift(@tests)) {
61 print "$te" . '.' x (20 - length($te));
62 my $fh = new FileHandle;
63 $fh->open($test) or print "can't open $test. $!\n";
66 $s .= " -T" if $first =~ /^#!.*\bperl.*-\w*T/;
67 $fh->close or print "can't close $test. $!\n";
68 my $cmd = "$^X $s $test|";
69 $cmd = "MCR $cmd" if $^O eq 'VMS';
70 $fh->open($cmd) or print "can't run $test. $!\n";
71 $ok = $next = $max = 0;
77 if (/^1\.\.([0-9]+)/) {
82 } elsif ($max && /^(not\s+)?ok\b/) {
84 if (/^not ok\s*(\d*)/){
87 } elsif (/^ok\s*(\d*)/) {
93 # warn "Test output counter mismatch [test $this]\n";
94 # no need to warn probably
95 push @failed, $next..$this-1;
96 } elsif ($this < $next) {
97 #we have seen more "ok" lines than the number suggests
98 warn "Confused test output: test $this answered after test ", $next-1, "\n";
104 $fh->close; # must close to reap child resource values
106 my $estatus = $^O eq 'VMS' ? $wstatus : $wstatus >> 8;
107 if ($^O eq 'VMS' ? !($wstatus & 1) : $wstatus) {
108 my ($failed, $canon, $percent) = ('??', '??');
109 print "dubious\n\tTest returned status $estatus (wstat $wstatus)\n";
110 if (corestatus($wstatus)) { # until we have a wait module
111 if ($have_devel_corestack) {
112 Devel::CoreStack::stack($^X);
114 print "\ttest program seems to have generated a core\n";
119 if ($next == $max + 1 and not @failed) {
120 print "\tafter all the subtests completed successfully\n";
122 $failed = 0; # But we do not set $canon!
124 push @failed, $next..$max;
126 (my $txt, $canon) = canonfailed($max,@failed);
127 $percent = 100*(scalar @failed)/$max;
131 $failedtests{$test} = { canon => $canon, max => $max || '??',
133 name => $test, percent => $percent,
134 estat => $estatus, wstat => $wstatus,
136 } elsif ($ok == $max && $next == $max+1) {
140 print "skipping test on this platform\n";
145 push @failed, $next..$max;
148 my ($txt, $canon) = canonfailed($max,@failed);
150 $failedtests{$test} = { canon => $canon, max => $max,
151 failed => scalar @failed,
152 name => $test, percent => 100*(scalar @failed)/$max,
153 estat => '', wstat => '',
156 print "Don't know which tests failed: got $ok ok, expected $max\n";
157 $failedtests{$test} = { canon => '??', max => $max,
159 name => $test, percent => undef,
160 estat => '', wstat => '',
164 } elsif ($next == 0) {
165 print "FAILED before any test output arrived\n";
167 $failedtests{$test} = { canon => '??', max => '??',
169 name => $test, percent => undef,
170 estat => '', wstat => '',
174 my $t_total = timediff(new Benchmark, $t_start);
177 if (defined $old5lib) {
178 $ENV{PERL5LIB} = $old5lib;
180 delete $ENV{PERL5LIB};
183 if ($bad == 0 && $totmax) {
184 print "All tests successful.\n";
186 die "FAILED--no tests were run for some reason.\n";
187 } elsif ($totmax==0) {
188 my $blurb = $total==1 ? "script" : "scripts";
189 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
191 $pct = sprintf("%.2f", $good / $total * 100);
192 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
193 $totmax - $totok, $totmax, 100*$totok/$totmax;
195 for $script (sort keys %failedtests) {
196 $curtest = $failedtests{$script};
200 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
203 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
205 return ($bad == 0 && $totmax) ;
208 my $tried_devel_corestack;
213 eval {require 'wait.ph'};
216 $ret = ($st & 0200); # Tim says, this is for 90%
219 $ret = WCOREDUMP($st);
222 eval { require Devel::CoreStack; $have_devel_corestack++ }
223 unless $tried_devel_corestack++;
228 sub canonfailed ($@) {
229 my($max,@failed) = @_;
231 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
232 my $failed = @failed;
236 my $last = $min = shift @failed;
239 for (@failed, $failed[-1]) { # don't forget the last one
240 if ($_ > $last+1 || $_ == $last) {
244 push @canon, "$min-$last";
251 push @result, "FAILED tests @canon\n";
254 push @result, "FAILED test $last\n";
258 push @result, "\tFailed $failed/$max tests, ";
259 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay\n";
260 my $txt = join "", @result;
269 Test::Harness - run perl standard test scripts with statistics
279 Perl test scripts print to standard output C<"ok N"> for each single
280 test, where C<N> is an increasing sequence of integers. The first line
281 output by a standard test script is C<"1..M"> with C<M> being the
282 number of tests that should be run within the test
283 script. Test::Harness::runtests(@tests) runs all the testscripts
284 named as arguments and checks standard output for the expected
287 After all tests have been performed, runtests() prints some
288 performance statistics that are computed by the Benchmark module.
290 =head2 The test script output
292 Any output from the testscript to standard error is ignored and
293 bypassed, thus will be seen by the user. Lines written to standard
294 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
295 runtests(). All other lines are discarded.
297 It is tolerated if the test numbers after C<ok> are omitted. In this
298 case Test::Harness maintains temporarily its own counter until the
299 script supplies test numbers again. So the following test script
313 Failed 3/6 tests, 50.00% okay
315 The global variable $Test::Harness::verbose is exportable and can be
316 used to let runtests() display the standard output of the script
317 without altering the behavior otherwise.
321 C<&runtests> is exported by Test::Harness per default.
327 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
329 If all tests are successful some statistics about the performance are
332 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
334 For any single script that has failing subtests statistics like the
337 =item C<Test returned status %d (wstat %d)>
339 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
340 printed in a message similar to the above.
342 =item C<Failed 1 test, %.2f%% okay. %s>
344 =item C<Failed %d/%d tests, %.2f%% okay. %s>
346 If not all tests were successful, the script dies with one of the
353 See L<Benchmark> for the underlying timing routines.
357 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
358 sure is, that it was inspired by Larry Wall's TEST script that came
359 with perl distributions for ages. Numerous anonymous contributors
360 exist. Current maintainer is Andreas Koenig.
364 Test::Harness uses $^X to determine the perl binary to run the tests
365 with. Test scripts running via the shebang (C<#!>) line may not be
366 portable because $^X is not consistent for shebang scripts across
367 platforms. This is no problem when Test::Harness is run with an
368 absolute path to the perl binary or when $^X can be found in the path.