10 use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
11 @ISA @EXPORT @EXPORT_OK);
12 $have_devel_corestack = 0;
17 @EXPORT= qw(&runtests);
18 @EXPORT_OK= qw($verbose $switches);
21 Failed Test Status Wstat Total Fail Failed List of failed
22 ------------------------------------------------------------------------------
26 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
44 my($test,$te,$ok,$next,$max,$pct,$totok,@failed,%failedtests);
50 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC); # pass -I flags to children
52 my $t_start = new Benchmark;
53 while ($test = shift(@tests)) {
56 print "$te" . '.' x (20 - length($te));
57 my $fh = new FileHandle;
58 $fh->open("$^X $switches $test|") || (print "can't run. $!\n");
59 $ok = $next = $max = 0;
65 if (/^1\.\.([0-9]+)/) {
70 } elsif ($max && /^(not\s+)?ok\b/) {
72 if (/^not ok\s*(\d*)/){
75 } elsif (/^ok\s*(\d*)/) {
81 # warn "Test output counter mismatch [test $this]\n";
82 # no need to warn probably
83 push @failed, $next..$this-1;
84 } elsif ($this < $next) {
85 #we have seen more "ok" lines than the number suggests
86 warn "Confused test output: test $this answered after test ", $next-1, "\n";
92 $fh->close; # must close to reap child resource values
94 my $estatus = $^O eq 'VMS' ? $wstatus : $wstatus >> 8;
95 if ($^O eq 'VMS' ? !($wstatus & 1) : $wstatus) {
96 print "dubious\n\tTest returned status $estatus (wstat $wstatus)\n";
97 if (corestatus($wstatus)) { # until we have a wait module
98 if ($have_devel_corestack) {
99 Devel::CoreStack::stack($^X);
101 print "\ttest program seems to have generated a core\n";
105 $failedtests{$test} = { canon => '??', max => $max || '??',
107 name => $test, percent => undef,
108 estat => $estatus, wstat => $wstatus,
110 } elsif ($ok == $max && $next == $max+1) {
114 print "skipping test on this platform\n";
119 push @failed, $next..$max;
122 my ($txt, $canon) = canonfailed($max,@failed);
124 $failedtests{$test} = { canon => $canon, max => $max,
125 failed => scalar @failed,
126 name => $test, percent => 100*(scalar @failed)/$max,
127 estat => '', wstat => '',
130 print "Don't know which tests failed: got $ok ok, expected $max\n";
131 $failedtests{$test} = { canon => '??', max => $max,
133 name => $test, percent => undef,
134 estat => '', wstat => '',
138 } elsif ($next == 0) {
139 print "FAILED before any test output arrived\n";
141 $failedtests{$test} = { canon => '??', max => '??',
143 name => $test, percent => undef,
144 estat => '', wstat => '',
148 my $t_total = timediff(new Benchmark, $t_start);
150 if ($bad == 0 && $totmax) {
151 print "All tests successful.\n";
153 die "FAILED--no tests were run for some reason.\n";
154 } elsif ($totmax==0) {
155 my $blurb = $total==1 ? "script" : "scripts";
156 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
158 $pct = sprintf("%.2f", $good / $total * 100);
159 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
160 $totmax - $totok, $totmax, 100*$totok/$totmax;
162 for $script (sort keys %failedtests) {
163 $curtest = $failedtests{$script};
167 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
170 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
172 return ($bad == 0 && $totmax) ;
179 eval {require 'wait.ph'};
182 $ret = ($st & 0200); # Tim says, this is for 90%
185 $ret = WCOREDUMP($st);
188 eval {require Devel::CoreStack};
189 $have_devel_corestack++ unless $@;
194 sub canonfailed ($@) {
195 my($max,@failed) = @_;
197 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
198 my $failed = @failed;
202 my $last = $min = shift @failed;
205 for (@failed, $failed[-1]) { # don't forget the last one
206 if ($_ > $last+1 || $_ == $last) {
210 push @canon, "$min-$last";
217 push @result, "FAILED tests @canon\n";
220 push @result, "FAILED test $last\n";
224 push @result, "\tFailed $failed/$max tests, ";
225 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay\n";
226 my $txt = join "", @result;
235 Test::Harness - run perl standard test scripts with statistics
245 Perl test scripts print to standard output C<"ok N"> for each single
246 test, where C<N> is an increasing sequence of integers. The first line
247 output by a standard test script is C<"1..M"> with C<M> being the
248 number of tests that should be run within the test
249 script. Test::Harness::runtests(@tests) runs all the testscripts
250 named as arguments and checks standard output for the expected
253 After all tests have been performed, runtests() prints some
254 performance statistics that are computed by the Benchmark module.
256 =head2 The test script output
258 Any output from the testscript to standard error is ignored and
259 bypassed, thus will be seen by the user. Lines written to standard
260 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
261 runtests(). All other lines are discarded.
263 It is tolerated if the test numbers after C<ok> are omitted. In this
264 case Test::Harness maintains temporarily its own counter until the
265 script supplies test numbers again. So the following test script
279 Failed 3/6 tests, 50.00% okay
281 The global variable $Test::Harness::verbose is exportable and can be
282 used to let runtests() display the standard output of the script
283 without altering the behavior otherwise.
287 C<&runtests> is exported by Test::Harness per default.
293 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
295 If all tests are successful some statistics about the performance are
298 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
300 For any single script that has failing subtests statistics like the
303 =item C<Test returned status %d (wstat %d)>
305 Scripts that return a non-zero exit status, both $?>>8 and $? are
306 printed in a message similar to the above.
308 =item C<Failed 1 test, %.2f%% okay. %s>
310 =item C<Failed %d/%d tests, %.2f%% okay. %s>
312 If not all tests were successful, the script dies with one of the
319 See L<Benchmark> for the underlying timing routines.
323 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
324 sure is, that it was inspired by Larry Wall's TEST script that came
325 with perl distributions for ages. Current maintainer is Andreas
330 Test::Harness uses $^X to determine the perl binary to run the tests
331 with. Test scripts running via the shebang (C<#!>) line may not be
332 portable because $^X is not consistent for shebang scripts across
333 platforms. This is no problem when Test::Harness is run with an
334 absolute path to the perl binary or when $^X can be found in the path.