8 use vars qw($VERSION $verbose $switches $have_devel_corestack);
9 $have_devel_corestack = 0;
14 @EXPORT= qw(&runtests);
15 @EXPORT_OK= qw($verbose $switches);
24 my($test,$te,$ok,$next,$max,$pct);
30 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC); # pass -I flags to children
32 my $t_start = new Benchmark;
33 while ($test = shift(@tests)) {
36 print "$te" . '.' x (20 - length($te));
37 my $fh = new FileHandle;
38 $fh->open("$^X $switches $test|") || (print "can't run. $!\n");
39 $ok = $next = $max = 0;
45 if (/^1\.\.([0-9]+)/) {
50 } elsif ($max && /^(not\s+)?ok\b/) {
52 if (/^not ok\s*(\d*)/){
55 } elsif (/^ok\s*(\d*)/) {
61 # warn "Test output counter mismatch [test $this]\n";
62 # no need to warn probably
63 push @failed, $next..$this-1;
64 } elsif ($this < $next) {
65 #we have seen more "ok" lines than the number suggests
66 warn "Confused test output: test $this answered after test ", $next-1, "\n";
72 $fh->close; # must close to reap child resource values
74 my $estatus = $^O eq 'VMS' ? $wstatus : $wstatus >> 8;
75 if ($^O eq 'VMS' ? !($wstatus & 1) : $wstatus) {
76 print "dubious\n\tTest returned status $estatus (wstat $wstatus)\n";
77 if (corestatus($wstatus)) { # until we have a wait module
78 if ($have_devel_corestack) {
79 Devel::CoreStack::stack($^X);
81 print "\ttest program seems to have generated a core\n";
85 } elsif ($ok == $max && $next == $max+1) {
89 print "skipping test on this platform\n";
94 push @failed, $next..$max;
97 print canonfailed($max,@failed);
99 print "Don't know which tests failed: got $ok ok, expected $max\n";
102 } elsif ($next == 0) {
103 print "FAILED before any test output arrived\n";
107 my $t_total = timediff(new Benchmark, $t_start);
109 if ($bad == 0 && $totmax) {
110 print "All tests successful.\n";
112 die "FAILED--no tests were run for some reason.\n";
113 } elsif ($totmax==0) {
114 my $blurb = $total==1 ? "script" : "scripts";
115 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
117 $pct = sprintf("%.2f", $good / $total * 100);
118 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
119 $totmax - $totok, $totmax, 100*$totok/$totmax;
121 die "Failed 1 test script, $pct% okay.$subpct\n";
123 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
126 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
133 eval {require 'wait.ph'};
136 $ret = ($st & 0200); # Tim says, this is for 90%
139 $ret = WCOREDUMP($st);
142 eval {require Devel::CoreStack};
143 $have_devel_corestack++ unless $@;
148 sub canonfailed ($@) {
149 my($max,@failed) = @_;
151 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
152 my $failed = @failed;
156 my $last = $min = shift @failed;
158 for (@failed, $failed[-1]) { # don't forget the last one
159 if ($_ > $last+1 || $_ == $last) {
163 push @canon, "$min-$last";
170 push @result, "FAILED tests @canon\n";
172 push @result, "FAILED test $last\n";
175 push @result, "\tFailed $failed/$max tests, ";
176 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay\n";
185 Test::Harness - run perl standard test scripts with statistics
195 Perl test scripts print to standard output C<"ok N"> for each single
196 test, where C<N> is an increasing sequence of integers. The first line
197 output by a standard test script is C<"1..M"> with C<M> being the
198 number of tests that should be run within the test
199 script. Test::Harness::runtests(@tests) runs all the testscripts
200 named as arguments and checks standard output for the expected
203 After all tests have been performed, runtests() prints some
204 performance statistics that are computed by the Benchmark module.
206 =head2 The test script output
208 Any output from the testscript to standard error is ignored and
209 bypassed, thus will be seen by the user. Lines written to standard
210 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
211 runtests(). All other lines are discarded.
213 It is tolerated if the test numbers after C<ok> are omitted. In this
214 case Test::Harness maintains temporarily its own counter until the
215 script supplies test numbers again. So the following test script
229 Failed 3/6 tests, 50.00% okay
231 The global variable $Test::Harness::verbose is exportable and can be
232 used to let runtests() display the standard output of the script
233 without altering the behavior otherwise.
237 C<&runtests> is exported by Test::Harness per default.
243 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
245 If all tests are successful some statistics about the performance are
248 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
250 For any single script that has failing subtests statistics like the
253 =item C<Test returned status %d (wstat %d)>
255 Scripts that return a non-zero exit status, both $?>>8 and $? are
256 printed in a message similar to the above.
258 =item C<Failed 1 test, %.2f%% okay. %s>
260 =item C<Failed %d/%d tests, %.2f%% okay. %s>
262 If not all tests were successful, the script dies with one of the
269 See L<Benchmark> for the underlying timing routines.
273 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
274 sure is, that it was inspired by Larry Wall's TEST script that came
275 with perl distributions for ages. Current maintainer is Andreas
280 Test::Harness uses $^X to determine the perl binary to run the tests
281 with. Test scripts running via the shebang (C<#!>) line may not be
282 portable because $^X is not consistent for shebang scripts across
283 platforms. This is no problem when Test::Harness is run with an
284 absolute path to the perl binary or when $^X can be found in the path.