10 use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
11 @ISA @EXPORT @EXPORT_OK);
12 $have_devel_corestack = 0;
17 @EXPORT= qw(&runtests);
18 @EXPORT_OK= qw($verbose $switches);
21 Failed Test Status Wstat Total Fail Failed List of failed
22 ------------------------------------------------------------------------------
26 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
44 my($test,$te,$ok,$next,$max,$pct,$totok,@failed,%failedtests);
51 # pass -I flags to children
52 my $old5lib = $ENV{PERL5LIB};
53 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC);
55 if ($^O eq 'VMS') { $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g }
57 my $t_start = new Benchmark;
58 while ($test = shift(@tests)) {
61 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./; }
62 print "$te" . '.' x (20 - length($te));
63 my $fh = new FileHandle;
64 $fh->open($test) or print "can't open $test. $!\n";
67 $s .= q[ "-T"] if $first =~ /^#!.*\bperl.*-\w*T/;
68 $fh->close or print "can't close $test. $!\n";
69 my $cmd = "$^X $s $test|";
70 $cmd = "MCR $cmd" if $^O eq 'VMS';
71 $fh->open($cmd) or print "can't run $test. $!\n";
72 $ok = $next = $max = 0;
78 if (/^1\.\.([0-9]+)/) {
83 } elsif ($max && /^(not\s+)?ok\b/) {
85 if (/^not ok\s*(\d*)/){
88 } elsif (/^ok\s*(\d*)/) {
94 # warn "Test output counter mismatch [test $this]\n";
95 # no need to warn probably
96 push @failed, $next..$this-1;
97 } elsif ($this < $next) {
98 #we have seen more "ok" lines than the number suggests
99 warn "Confused test output: test $this answered after test ", $next-1, "\n";
105 $fh->close; # must close to reap child resource values
108 $estatus = ($^O eq 'VMS'
109 ? eval 'use vmsish "status"; $estatus = $?'
112 my ($failed, $canon, $percent) = ('??', '??');
113 print "dubious\n\tTest returned status $estatus (wstat $wstatus)\n";
114 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
115 if (corestatus($wstatus)) { # until we have a wait module
116 if ($have_devel_corestack) {
117 Devel::CoreStack::stack($^X);
119 print "\ttest program seems to have generated a core\n";
124 if ($next == $max + 1 and not @failed) {
125 print "\tafter all the subtests completed successfully\n";
127 $failed = 0; # But we do not set $canon!
129 push @failed, $next..$max;
131 (my $txt, $canon) = canonfailed($max,@failed);
132 $percent = 100*(scalar @failed)/$max;
136 $failedtests{$test} = { canon => $canon, max => $max || '??',
138 name => $test, percent => $percent,
139 estat => $estatus, wstat => $wstatus,
141 } elsif ($ok == $max && $next == $max+1) {
145 print "skipping test on this platform\n";
150 push @failed, $next..$max;
153 my ($txt, $canon) = canonfailed($max,@failed);
155 $failedtests{$test} = { canon => $canon, max => $max,
156 failed => scalar @failed,
157 name => $test, percent => 100*(scalar @failed)/$max,
158 estat => '', wstat => '',
161 print "Don't know which tests failed: got $ok ok, expected $max\n";
162 $failedtests{$test} = { canon => '??', max => $max,
164 name => $test, percent => undef,
165 estat => '', wstat => '',
169 } elsif ($next == 0) {
170 print "FAILED before any test output arrived\n";
172 $failedtests{$test} = { canon => '??', max => '??',
174 name => $test, percent => undef,
175 estat => '', wstat => '',
179 my $t_total = timediff(new Benchmark, $t_start);
182 if (defined $old5lib) {
183 $ENV{PERL5LIB} = $old5lib;
185 delete $ENV{PERL5LIB};
188 if ($bad == 0 && $totmax) {
189 print "All tests successful.\n";
191 die "FAILED--no tests were run for some reason.\n";
192 } elsif ($totmax==0) {
193 my $blurb = $total==1 ? "script" : "scripts";
194 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
196 $pct = sprintf("%.2f", $good / $total * 100);
197 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
198 $totmax - $totok, $totmax, 100*$totok/$totmax;
200 for $script (sort keys %failedtests) {
201 $curtest = $failedtests{$script};
205 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
208 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
210 return ($bad == 0 && $totmax) ;
213 my $tried_devel_corestack;
218 eval {require 'wait.ph'};
221 $ret = ($st & 0200); # Tim says, this is for 90%
224 $ret = WCOREDUMP($st);
227 eval { require Devel::CoreStack; $have_devel_corestack++ }
228 unless $tried_devel_corestack++;
233 sub canonfailed ($@) {
234 my($max,@failed) = @_;
236 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
237 my $failed = @failed;
241 my $last = $min = shift @failed;
244 for (@failed, $failed[-1]) { # don't forget the last one
245 if ($_ > $last+1 || $_ == $last) {
249 push @canon, "$min-$last";
256 push @result, "FAILED tests @canon\n";
259 push @result, "FAILED test $last\n";
263 push @result, "\tFailed $failed/$max tests, ";
264 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay\n";
265 my $txt = join "", @result;
274 Test::Harness - run perl standard test scripts with statistics
284 Perl test scripts print to standard output C<"ok N"> for each single
285 test, where C<N> is an increasing sequence of integers. The first line
286 output by a standard test script is C<"1..M"> with C<M> being the
287 number of tests that should be run within the test
288 script. Test::Harness::runtests(@tests) runs all the testscripts
289 named as arguments and checks standard output for the expected
292 After all tests have been performed, runtests() prints some
293 performance statistics that are computed by the Benchmark module.
295 =head2 The test script output
297 Any output from the testscript to standard error is ignored and
298 bypassed, thus will be seen by the user. Lines written to standard
299 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
300 runtests(). All other lines are discarded.
302 It is tolerated if the test numbers after C<ok> are omitted. In this
303 case Test::Harness maintains temporarily its own counter until the
304 script supplies test numbers again. So the following test script
318 Failed 3/6 tests, 50.00% okay
320 The global variable $Test::Harness::verbose is exportable and can be
321 used to let runtests() display the standard output of the script
322 without altering the behavior otherwise.
326 C<&runtests> is exported by Test::Harness per default.
332 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
334 If all tests are successful some statistics about the performance are
337 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
339 For any single script that has failing subtests statistics like the
342 =item C<Test returned status %d (wstat %d)>
344 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
345 printed in a message similar to the above.
347 =item C<Failed 1 test, %.2f%% okay. %s>
349 =item C<Failed %d/%d tests, %.2f%% okay. %s>
351 If not all tests were successful, the script dies with one of the
358 See L<Benchmark> for the underlying timing routines.
362 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
363 sure is, that it was inspired by Larry Wall's TEST script that came
364 with perl distributions for ages. Numerous anonymous contributors
365 exist. Current maintainer is Andreas Koenig.
369 Test::Harness uses $^X to determine the perl binary to run the tests
370 with. Test scripts running via the shebang (C<#!>) line may not be
371 portable because $^X is not consistent for shebang scripts across
372 platforms. This is no problem when Test::Harness is run with an
373 absolute path to the perl binary or when $^X can be found in the path.