11 use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
12 @ISA @EXPORT @EXPORT_OK);
13 $have_devel_corestack = 0;
18 @EXPORT= qw(&runtests);
19 @EXPORT_OK= qw($verbose $switches);
22 Failed Test Status Wstat Total Fail Failed List of failed
23 ------------------------------------------------------------------------------
27 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
45 my($test,$te,$ok,$next,$max,$pct,$totok,@failed,%failedtests);
51 my $old5lib = $ENV{PERL5LIB};
52 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC); # pass -I flags to children
54 my $t_start = new Benchmark;
55 while ($test = shift(@tests)) {
58 print "$te" . '.' x (20 - length($te));
59 my $fh = new FileHandle;
60 if ($^O eq 'VMS') { $fh->open("MCR $^X $switches $test|") || (print "can't run. $!\n"); }
61 else { $fh->open("$^X $switches $test|") || (print "can't run. $!\n"); }
62 $ok = $next = $max = 0;
68 if (/^1\.\.([0-9]+)/) {
73 } elsif ($max && /^(not\s+)?ok\b/) {
75 if (/^not ok\s*(\d*)/){
78 } elsif (/^ok\s*(\d*)/) {
84 # warn "Test output counter mismatch [test $this]\n";
85 # no need to warn probably
86 push @failed, $next..$this-1;
87 } elsif ($this < $next) {
88 #we have seen more "ok" lines than the number suggests
89 warn "Confused test output: test $this answered after test ", $next-1, "\n";
95 $fh->close; # must close to reap child resource values
97 my $estatus = $^O eq 'VMS' ? $wstatus : $wstatus >> 8;
98 if ($^O eq 'VMS' ? !($wstatus & 1) : $wstatus) {
99 print "dubious\n\tTest returned status $estatus (wstat $wstatus)\n";
100 if (corestatus($wstatus)) { # until we have a wait module
101 if ($have_devel_corestack) {
102 Devel::CoreStack::stack($^X);
104 print "\ttest program seems to have generated a core\n";
108 $failedtests{$test} = { canon => '??', max => $max || '??',
110 name => $test, percent => undef,
111 estat => $estatus, wstat => $wstatus,
113 } elsif ($ok == $max && $next == $max+1) {
117 print "skipping test on this platform\n";
122 push @failed, $next..$max;
125 my ($txt, $canon) = canonfailed($max,@failed);
127 $failedtests{$test} = { canon => $canon, max => $max,
128 failed => scalar @failed,
129 name => $test, percent => 100*(scalar @failed)/$max,
130 estat => '', wstat => '',
133 print "Don't know which tests failed: got $ok ok, expected $max\n";
134 $failedtests{$test} = { canon => '??', max => $max,
136 name => $test, percent => undef,
137 estat => '', wstat => '',
141 } elsif ($next == 0) {
142 print "FAILED before any test output arrived\n";
144 $failedtests{$test} = { canon => '??', max => '??',
146 name => $test, percent => undef,
147 estat => '', wstat => '',
151 my $t_total = timediff(new Benchmark, $t_start);
153 if ($^O eq 'VMS' and defined($old5lib)) { $ENV{PERL5LIB} = $old5lib; }
154 if ($bad == 0 && $totmax) {
155 print "All tests successful.\n";
157 die "FAILED--no tests were run for some reason.\n";
158 } elsif ($totmax==0) {
159 my $blurb = $total==1 ? "script" : "scripts";
160 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
162 $pct = sprintf("%.2f", $good / $total * 100);
163 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
164 $totmax - $totok, $totmax, 100*$totok/$totmax;
166 for $script (sort keys %failedtests) {
167 $curtest = $failedtests{$script};
171 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
174 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
176 return ($bad == 0 && $totmax) ;
183 eval {require 'wait.ph'};
186 $ret = ($st & 0200); # Tim says, this is for 90%
189 $ret = WCOREDUMP($st);
192 eval {require Devel::CoreStack};
193 $have_devel_corestack++ unless $@;
198 sub canonfailed ($@) {
199 my($max,@failed) = @_;
201 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
202 my $failed = @failed;
206 my $last = $min = shift @failed;
209 for (@failed, $failed[-1]) { # don't forget the last one
210 if ($_ > $last+1 || $_ == $last) {
214 push @canon, "$min-$last";
221 push @result, "FAILED tests @canon\n";
224 push @result, "FAILED test $last\n";
228 push @result, "\tFailed $failed/$max tests, ";
229 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay\n";
230 my $txt = join "", @result;
239 Test::Harness - run perl standard test scripts with statistics
249 Perl test scripts print to standard output C<"ok N"> for each single
250 test, where C<N> is an increasing sequence of integers. The first line
251 output by a standard test script is C<"1..M"> with C<M> being the
252 number of tests that should be run within the test
253 script. Test::Harness::runtests(@tests) runs all the testscripts
254 named as arguments and checks standard output for the expected
257 After all tests have been performed, runtests() prints some
258 performance statistics that are computed by the Benchmark module.
260 =head2 The test script output
262 Any output from the testscript to standard error is ignored and
263 bypassed, thus will be seen by the user. Lines written to standard
264 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
265 runtests(). All other lines are discarded.
267 It is tolerated if the test numbers after C<ok> are omitted. In this
268 case Test::Harness maintains temporarily its own counter until the
269 script supplies test numbers again. So the following test script
283 Failed 3/6 tests, 50.00% okay
285 The global variable $Test::Harness::verbose is exportable and can be
286 used to let runtests() display the standard output of the script
287 without altering the behavior otherwise.
291 C<&runtests> is exported by Test::Harness per default.
297 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
299 If all tests are successful some statistics about the performance are
302 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
304 For any single script that has failing subtests statistics like the
307 =item C<Test returned status %d (wstat %d)>
309 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
310 printed in a message similar to the above.
312 =item C<Failed 1 test, %.2f%% okay. %s>
314 =item C<Failed %d/%d tests, %.2f%% okay. %s>
316 If not all tests were successful, the script dies with one of the
323 See L<Benchmark> for the underlying timing routines.
327 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
328 sure is, that it was inspired by Larry Wall's TEST script that came
329 with perl distributions for ages. Current maintainer is Andreas
334 Test::Harness uses $^X to determine the perl binary to run the tests
335 with. Test scripts running via the shebang (C<#!>) line may not be
336 portable because $^X is not consistent for shebang scripts across
337 platforms. This is no problem when Test::Harness is run with an
338 absolute path to the perl binary or when $^X can be found in the path.