11 use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
12 @ISA @EXPORT @EXPORT_OK);
13 $have_devel_corestack = 0;
18 @EXPORT= qw(&runtests);
19 @EXPORT_OK= qw($verbose $switches);
22 Failed Test Status Wstat Total Fail Failed List of failed
23 ------------------------------------------------------------------------------
27 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
45 my($test,$te,$ok,$next,$max,$pct,$totok,@failed,%failedtests);
52 # pass -I flags to children
53 my $old5lib = $ENV{PERL5LIB};
54 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC);
56 if ($Is_VMS) { $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g }
58 my $t_start = new Benchmark;
59 while ($test = shift(@tests)) {
62 print "$te" . '.' x (20 - length($te));
63 my $fh = new FileHandle;
64 if ($^O eq 'VMS') { $fh->open("MCR $^X $switches $test|") || (print "can't run. $!\n"); }
65 else { $fh->open("$^X $switches $test|") || (print "can't run. $!\n"); }
66 $ok = $next = $max = 0;
72 if (/^1\.\.([0-9]+)/) {
77 } elsif ($max && /^(not\s+)?ok\b/) {
79 if (/^not ok\s*(\d*)/){
82 } elsif (/^ok\s*(\d*)/) {
88 # warn "Test output counter mismatch [test $this]\n";
89 # no need to warn probably
90 push @failed, $next..$this-1;
91 } elsif ($this < $next) {
92 #we have seen more "ok" lines than the number suggests
93 warn "Confused test output: test $this answered after test ", $next-1, "\n";
99 $fh->close; # must close to reap child resource values
101 my $estatus = $^O eq 'VMS' ? $wstatus : $wstatus >> 8;
102 if ($^O eq 'VMS' ? !($wstatus & 1) : $wstatus) {
103 print "dubious\n\tTest returned status $estatus (wstat $wstatus)\n";
104 if (corestatus($wstatus)) { # until we have a wait module
105 if ($have_devel_corestack) {
106 Devel::CoreStack::stack($^X);
108 print "\ttest program seems to have generated a core\n";
112 $failedtests{$test} = { canon => '??', max => $max || '??',
114 name => $test, percent => undef,
115 estat => $estatus, wstat => $wstatus,
117 } elsif ($ok == $max && $next == $max+1) {
121 print "skipping test on this platform\n";
126 push @failed, $next..$max;
129 my ($txt, $canon) = canonfailed($max,@failed);
131 $failedtests{$test} = { canon => $canon, max => $max,
132 failed => scalar @failed,
133 name => $test, percent => 100*(scalar @failed)/$max,
134 estat => '', wstat => '',
137 print "Don't know which tests failed: got $ok ok, expected $max\n";
138 $failedtests{$test} = { canon => '??', max => $max,
140 name => $test, percent => undef,
141 estat => '', wstat => '',
145 } elsif ($next == 0) {
146 print "FAILED before any test output arrived\n";
148 $failedtests{$test} = { canon => '??', max => '??',
150 name => $test, percent => undef,
151 estat => '', wstat => '',
155 my $t_total = timediff(new Benchmark, $t_start);
158 if (defined $old5lib) {
159 $ENV{PERL5LIB} = $old5lib;
162 delete $ENV{PERL5LIB};
165 if ($bad == 0 && $totmax) {
166 print "All tests successful.\n";
168 die "FAILED--no tests were run for some reason.\n";
169 } elsif ($totmax==0) {
170 my $blurb = $total==1 ? "script" : "scripts";
171 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
173 $pct = sprintf("%.2f", $good / $total * 100);
174 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
175 $totmax - $totok, $totmax, 100*$totok/$totmax;
177 for $script (sort keys %failedtests) {
178 $curtest = $failedtests{$script};
182 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
185 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
187 return ($bad == 0 && $totmax) ;
194 eval {require 'wait.ph'};
197 $ret = ($st & 0200); # Tim says, this is for 90%
200 $ret = WCOREDUMP($st);
203 eval {require Devel::CoreStack};
204 $have_devel_corestack++ unless $@;
209 sub canonfailed ($@) {
210 my($max,@failed) = @_;
212 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
213 my $failed = @failed;
217 my $last = $min = shift @failed;
220 for (@failed, $failed[-1]) { # don't forget the last one
221 if ($_ > $last+1 || $_ == $last) {
225 push @canon, "$min-$last";
232 push @result, "FAILED tests @canon\n";
235 push @result, "FAILED test $last\n";
239 push @result, "\tFailed $failed/$max tests, ";
240 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay\n";
241 my $txt = join "", @result;
250 Test::Harness - run perl standard test scripts with statistics
260 Perl test scripts print to standard output C<"ok N"> for each single
261 test, where C<N> is an increasing sequence of integers. The first line
262 output by a standard test script is C<"1..M"> with C<M> being the
263 number of tests that should be run within the test
264 script. Test::Harness::runtests(@tests) runs all the testscripts
265 named as arguments and checks standard output for the expected
268 After all tests have been performed, runtests() prints some
269 performance statistics that are computed by the Benchmark module.
271 =head2 The test script output
273 Any output from the testscript to standard error is ignored and
274 bypassed, thus will be seen by the user. Lines written to standard
275 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
276 runtests(). All other lines are discarded.
278 It is tolerated if the test numbers after C<ok> are omitted. In this
279 case Test::Harness maintains temporarily its own counter until the
280 script supplies test numbers again. So the following test script
294 Failed 3/6 tests, 50.00% okay
296 The global variable $Test::Harness::verbose is exportable and can be
297 used to let runtests() display the standard output of the script
298 without altering the behavior otherwise.
302 C<&runtests> is exported by Test::Harness per default.
308 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
310 If all tests are successful some statistics about the performance are
313 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
315 For any single script that has failing subtests statistics like the
318 =item C<Test returned status %d (wstat %d)>
320 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
321 printed in a message similar to the above.
323 =item C<Failed 1 test, %.2f%% okay. %s>
325 =item C<Failed %d/%d tests, %.2f%% okay. %s>
327 If not all tests were successful, the script dies with one of the
334 See L<Benchmark> for the underlying timing routines.
338 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
339 sure is, that it was inspired by Larry Wall's TEST script that came
340 with perl distributions for ages. Current maintainer is Andreas
345 Test::Harness uses $^X to determine the perl binary to run the tests
346 with. Test scripts running via the shebang (C<#!>) line may not be
347 portable because $^X is not consistent for shebang scripts across
348 platforms. This is no problem when Test::Harness is run with an
349 absolute path to the perl binary or when $^X can be found in the path.