7 use vars qw($VERSION $verbose $switches);
13 @EXPORT= qw(&runtests);
14 @EXPORT_OK= qw($verbose $switches);
23 my($test,$te,$ok,$next,$max,$pct);
29 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC); # pass -I flags to children
31 my $t_start = new Benchmark;
32 while ($test = shift(@tests)) {
35 print "$te" . '.' x (20 - length($te));
36 my $fh = new FileHandle;
37 $fh->open("$^X $switches $test|") || (print "can't run. $!\n");
38 $ok = $next = $max = 0;
45 if (/^1\.\.([0-9]+)/) {
50 } elsif ($max && /^(not\s+)?ok\b/) {
52 if (/^not ok\s*(\d*)/){
55 } elsif (/^ok\s*(\d*)/) {
61 # warn "Test output counter mismatch [test $this]\n";
62 # no need to warn probably
63 push @failed, $next..$this-1;
64 } elsif ($this < $next) {
65 #we have seen more "ok" lines than the number suggests
66 warn "Aborting test: output counter mismatch [test $this answered when test $next expected]\n";
73 $fh->close; # must close to reap child resource values
75 my $estatus = $wstatus >> 8;
76 if ($ok == $max && $next == $max+1 && ! $estatus) {
81 push @failed, $next..$max;
84 print canonfailed($max,@failed);
86 print "Don't know which tests failed for some reason\n";
89 } elsif ($next == 0) {
90 print "FAILED before any test output arrived\n";
94 print "\tTest returned status $estatus (wstat $wstatus)\n";
97 my $t_total = timediff(new Benchmark, $t_start);
99 if ($bad == 0 && $totmax) {
100 print "All tests successful.\n";
102 die "FAILED--no tests were run for some reason.\n";
103 } elsif ($totmax==0) {
104 my $blurb = $total==1 ? "script" : "scripts";
105 die "FAILED--$total test $blurb could be run, alas -- no output ever seen\n";
107 $pct = sprintf("%.2f", $good / $total * 100);
108 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
109 $totmax - $totok, $totmax, 100*$totok/$totmax;
111 die "Failed 1 test script, $pct% okay.$subpct\n";
113 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
116 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
119 sub canonfailed ($@) {
120 my($max,@failed) = @_;
122 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
123 my $failed = @failed;
127 my $last = $min = shift @failed;
129 for (@failed, $failed[-1]) { # don't forget the last one
130 if ($_ > $last+1 || $_ == $last) {
134 push @canon, "$min-$last";
141 push @result, "FAILED tests @canon\n";
143 push @result, "FAILED test $last\n";
146 push @result, "\tFailed $failed/$max tests, ";
147 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay\n";
156 Test::Harness - run perl standard test scripts with statistics
166 Perl test scripts print to standard output C<"ok N"> for each single
167 test, where C<N> is an increasing sequence of integers. The first line
168 output by a standard test scxript is C<"1..M"> with C<M> being the
169 number of tests that should be run within the test
170 script. Test::Harness::runscripts(@tests) runs all the testscripts
171 named as arguments and checks standard output for the expected
174 After all tests have been performed, runscripts() prints some
175 performance statistics that are computed by the Benchmark module.
177 =head2 The test script output
179 Any output from the testscript to standard error is ignored and
180 bypassed, thus will be seen by the user. Lines written to standard
181 output that look like perl comments (start with C</^\s*\#/>) are
182 discarded. Lines containing C</^(not\s+)?ok\b/> are interpreted as
183 feedback for runtests().
185 It is tolerated if the test numbers after C<ok> are omitted. In this
186 case Test::Harness maintains temporarily its own counter until the
187 script supplies test numbers again. So the following test script
201 Failed 3/6 tests, 50.00% okay
203 The global variable $Test::Harness::verbose is exportable and can be
204 used to let runscripts() display the standard output of the script
205 without altering the behavior otherwise.
209 C<&runscripts> is exported by Test::Harness per default.
215 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
217 If all tests are successful some statistics about the performance are
220 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
222 For any single script that has failing subtests statistics like the
225 =item C<Test returned status %d (wstat %d)>
227 Scripts that return a non-zero exit status, both $?>>8 and $? are
228 printed in a message similar to the above.
230 =item C<Failed 1 test, %.2f%% okay. %s>
232 =item C<Failed %d/%d tests, %.2f%% okay. %s>
234 If not all tests were successful, the script dies with one of the
241 See L<Benchmark> for the underlying timing routines.
245 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
246 sure is, that it was inspired by Larry Wall's TEST script that came
247 with perl distributions for ages. Current maintainer is Andreas
252 Test::Harness uses $^X to determine the perl binary to run the tests
253 with. Test scripts running via the shebang (C<#!>) line may not be
254 portable because $^X is not consistent for shebang scripts across
255 platforms. This is no problem when Test::Harness is run with an
256 absolute path to the perl binary or when $^X can be found in the path.