10 use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
11 @ISA @EXPORT @EXPORT_OK);
12 $have_devel_corestack = 0;
17 @EXPORT= qw(&runtests);
18 @EXPORT_OK= qw($verbose $switches);
21 Failed Test Status Wstat Total Fail Failed List of failed
22 ------------------------------------------------------------------------------
26 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
44 my($test,$te,$ok,$next,$max,$pct,$totok,@failed,%failedtests);
51 # pass -I flags to children
52 my $old5lib = $ENV{PERL5LIB};
53 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC);
55 if ($^O eq 'VMS') { $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g }
57 my $t_start = new Benchmark;
58 while ($test = shift(@tests)) {
61 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./; }
62 print "$te" . '.' x (20 - length($te));
63 my $fh = new FileHandle;
64 $fh->open($test) or print "can't open $test. $!\n";
67 $s .= q[ "-T"] if $first =~ /^#!.*\bperl.*-\w*T/;
68 $fh->close or print "can't close $test. $!\n";
69 my $cmd = "$^X $s $test|";
70 $cmd = "MCR $cmd" if $^O eq 'VMS';
71 $fh->open($cmd) or print "can't run $test. $!\n";
72 $ok = $next = $max = 0;
78 if (/^1\.\.([0-9]+)/) {
83 } elsif ($max && /^(not\s+)?ok\b/) {
85 if (/^not ok\s*(\d*)/){
88 } elsif (/^ok\s*(\d*)/) {
94 # warn "Test output counter mismatch [test $this]\n";
95 # no need to warn probably
96 push @failed, $next..$this-1;
97 } elsif ($this < $next) {
98 #we have seen more "ok" lines than the number suggests
99 warn "Confused test output: test $this answered after test ", $next-1, "\n";
105 $fh->close; # must close to reap child resource values
107 my $estatus = $wstatus >> 8;
108 if ($^O eq 'VMS') { use vmsish 'status'; $estatus = $?; }
110 my ($failed, $canon, $percent) = ('??', '??');
111 print "dubious\n\tTest returned status $estatus (wstat $wstatus)\n";
112 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
113 if (corestatus($wstatus)) { # until we have a wait module
114 if ($have_devel_corestack) {
115 Devel::CoreStack::stack($^X);
117 print "\ttest program seems to have generated a core\n";
122 if ($next == $max + 1 and not @failed) {
123 print "\tafter all the subtests completed successfully\n";
125 $failed = 0; # But we do not set $canon!
127 push @failed, $next..$max;
129 (my $txt, $canon) = canonfailed($max,@failed);
130 $percent = 100*(scalar @failed)/$max;
134 $failedtests{$test} = { canon => $canon, max => $max || '??',
136 name => $test, percent => $percent,
137 estat => $estatus, wstat => $wstatus,
139 } elsif ($ok == $max && $next == $max+1) {
143 print "skipping test on this platform\n";
148 push @failed, $next..$max;
151 my ($txt, $canon) = canonfailed($max,@failed);
153 $failedtests{$test} = { canon => $canon, max => $max,
154 failed => scalar @failed,
155 name => $test, percent => 100*(scalar @failed)/$max,
156 estat => '', wstat => '',
159 print "Don't know which tests failed: got $ok ok, expected $max\n";
160 $failedtests{$test} = { canon => '??', max => $max,
162 name => $test, percent => undef,
163 estat => '', wstat => '',
167 } elsif ($next == 0) {
168 print "FAILED before any test output arrived\n";
170 $failedtests{$test} = { canon => '??', max => '??',
172 name => $test, percent => undef,
173 estat => '', wstat => '',
177 my $t_total = timediff(new Benchmark, $t_start);
180 if (defined $old5lib) {
181 $ENV{PERL5LIB} = $old5lib;
183 delete $ENV{PERL5LIB};
186 if ($bad == 0 && $totmax) {
187 print "All tests successful.\n";
189 die "FAILED--no tests were run for some reason.\n";
190 } elsif ($totmax==0) {
191 my $blurb = $total==1 ? "script" : "scripts";
192 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
194 $pct = sprintf("%.2f", $good / $total * 100);
195 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
196 $totmax - $totok, $totmax, 100*$totok/$totmax;
198 for $script (sort keys %failedtests) {
199 $curtest = $failedtests{$script};
203 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
206 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
208 return ($bad == 0 && $totmax) ;
211 my $tried_devel_corestack;
216 eval {require 'wait.ph'};
219 $ret = ($st & 0200); # Tim says, this is for 90%
222 $ret = WCOREDUMP($st);
225 eval { require Devel::CoreStack; $have_devel_corestack++ }
226 unless $tried_devel_corestack++;
231 sub canonfailed ($@) {
232 my($max,@failed) = @_;
234 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
235 my $failed = @failed;
239 my $last = $min = shift @failed;
242 for (@failed, $failed[-1]) { # don't forget the last one
243 if ($_ > $last+1 || $_ == $last) {
247 push @canon, "$min-$last";
254 push @result, "FAILED tests @canon\n";
257 push @result, "FAILED test $last\n";
261 push @result, "\tFailed $failed/$max tests, ";
262 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay\n";
263 my $txt = join "", @result;
272 Test::Harness - run perl standard test scripts with statistics
282 Perl test scripts print to standard output C<"ok N"> for each single
283 test, where C<N> is an increasing sequence of integers. The first line
284 output by a standard test script is C<"1..M"> with C<M> being the
285 number of tests that should be run within the test
286 script. Test::Harness::runtests(@tests) runs all the testscripts
287 named as arguments and checks standard output for the expected
290 After all tests have been performed, runtests() prints some
291 performance statistics that are computed by the Benchmark module.
293 =head2 The test script output
295 Any output from the testscript to standard error is ignored and
296 bypassed, thus will be seen by the user. Lines written to standard
297 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
298 runtests(). All other lines are discarded.
300 It is tolerated if the test numbers after C<ok> are omitted. In this
301 case Test::Harness maintains temporarily its own counter until the
302 script supplies test numbers again. So the following test script
316 Failed 3/6 tests, 50.00% okay
318 The global variable $Test::Harness::verbose is exportable and can be
319 used to let runtests() display the standard output of the script
320 without altering the behavior otherwise.
324 C<&runtests> is exported by Test::Harness per default.
330 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
332 If all tests are successful some statistics about the performance are
335 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
337 For any single script that has failing subtests statistics like the
340 =item C<Test returned status %d (wstat %d)>
342 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
343 printed in a message similar to the above.
345 =item C<Failed 1 test, %.2f%% okay. %s>
347 =item C<Failed %d/%d tests, %.2f%% okay. %s>
349 If not all tests were successful, the script dies with one of the
356 See L<Benchmark> for the underlying timing routines.
360 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
361 sure is, that it was inspired by Larry Wall's TEST script that came
362 with perl distributions for ages. Numerous anonymous contributors
363 exist. Current maintainer is Andreas Koenig.
367 Test::Harness uses $^X to determine the perl binary to run the tests
368 with. Test scripts running via the shebang (C<#!>) line may not be
369 portable because $^X is not consistent for shebang scripts across
370 platforms. This is no problem when Test::Harness is run with an
371 absolute path to the perl binary or when $^X can be found in the path.