10 use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
11 @ISA @EXPORT @EXPORT_OK);
12 $have_devel_corestack = 0;
17 @EXPORT= qw(&runtests);
18 @EXPORT_OK= qw($verbose $switches);
21 Failed Test Status Wstat Total Fail Failed List of failed
22 ------------------------------------------------------------------------------
26 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
44 my($test,$te,$ok,$next,$max,$pct,$totok,@failed,%failedtests);
51 # pass -I flags to children
52 my $old5lib = $ENV{PERL5LIB};
53 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC);
55 if ($^O eq 'VMS') { $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g }
57 my $t_start = new Benchmark;
58 while ($test = shift(@tests)) {
61 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./; }
62 print "$te" . '.' x (20 - length($te));
63 my $fh = new FileHandle;
64 $fh->open($test) or print "can't open $test. $!\n";
67 $s .= q[ "-T"] if $first =~ /^#!.*\bperl.*-\w*T/;
68 $fh->close or print "can't close $test. $!\n";
69 my $cmd = "$^X $s $test|";
70 $cmd = "MCR $cmd" if $^O eq 'VMS';
71 $fh->open($cmd) or print "can't run $test. $!\n";
72 $ok = $next = $max = 0;
78 if (/^1\.\.([0-9]+)/) {
83 } elsif ($max && /^(not\s+)?ok\b/) {
85 if (/^not ok\s*(\d*)/){
88 } elsif (/^ok\s*(\d*)/) {
94 # warn "Test output counter mismatch [test $this]\n";
95 # no need to warn probably
96 push @failed, $next..$this-1;
97 } elsif ($this < $next) {
98 #we have seen more "ok" lines than the number suggests
99 warn "Confused test output: test $this answered after test ", $next-1, "\n";
105 $fh->close; # must close to reap child resource values
107 my $estatus = ($^O eq 'VMS'
108 ? eval 'use vmsish "status"; $estatus = $?'
111 my ($failed, $canon, $percent) = ('??', '??');
112 print "dubious\n\tTest returned status $estatus (wstat $wstatus)\n";
113 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
114 if (corestatus($wstatus)) { # until we have a wait module
115 if ($have_devel_corestack) {
116 Devel::CoreStack::stack($^X);
118 print "\ttest program seems to have generated a core\n";
123 if ($next == $max + 1 and not @failed) {
124 print "\tafter all the subtests completed successfully\n";
126 $failed = 0; # But we do not set $canon!
128 push @failed, $next..$max;
130 (my $txt, $canon) = canonfailed($max,@failed);
131 $percent = 100*(scalar @failed)/$max;
135 $failedtests{$test} = { canon => $canon, max => $max || '??',
137 name => $test, percent => $percent,
138 estat => $estatus, wstat => $wstatus,
140 } elsif ($ok == $max && $next == $max+1) {
144 print "skipping test on this platform\n";
149 push @failed, $next..$max;
152 my ($txt, $canon) = canonfailed($max,@failed);
154 $failedtests{$test} = { canon => $canon, max => $max,
155 failed => scalar @failed,
156 name => $test, percent => 100*(scalar @failed)/$max,
157 estat => '', wstat => '',
160 print "Don't know which tests failed: got $ok ok, expected $max\n";
161 $failedtests{$test} = { canon => '??', max => $max,
163 name => $test, percent => undef,
164 estat => '', wstat => '',
168 } elsif ($next == 0) {
169 print "FAILED before any test output arrived\n";
171 $failedtests{$test} = { canon => '??', max => '??',
173 name => $test, percent => undef,
174 estat => '', wstat => '',
178 my $t_total = timediff(new Benchmark, $t_start);
181 if (defined $old5lib) {
182 $ENV{PERL5LIB} = $old5lib;
184 delete $ENV{PERL5LIB};
187 if ($bad == 0 && $totmax) {
188 print "All tests successful.\n";
190 die "FAILED--no tests were run for some reason.\n";
191 } elsif ($totmax==0) {
192 my $blurb = $total==1 ? "script" : "scripts";
193 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
195 $pct = sprintf("%.2f", $good / $total * 100);
196 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
197 $totmax - $totok, $totmax, 100*$totok/$totmax;
199 for $script (sort keys %failedtests) {
200 $curtest = $failedtests{$script};
204 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
207 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
209 return ($bad == 0 && $totmax) ;
212 my $tried_devel_corestack;
217 eval {require 'wait.ph'};
220 $ret = ($st & 0200); # Tim says, this is for 90%
223 $ret = WCOREDUMP($st);
226 eval { require Devel::CoreStack; $have_devel_corestack++ }
227 unless $tried_devel_corestack++;
232 sub canonfailed ($@) {
233 my($max,@failed) = @_;
235 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
236 my $failed = @failed;
240 my $last = $min = shift @failed;
243 for (@failed, $failed[-1]) { # don't forget the last one
244 if ($_ > $last+1 || $_ == $last) {
248 push @canon, "$min-$last";
255 push @result, "FAILED tests @canon\n";
258 push @result, "FAILED test $last\n";
262 push @result, "\tFailed $failed/$max tests, ";
263 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay\n";
264 my $txt = join "", @result;
273 Test::Harness - run perl standard test scripts with statistics
283 Perl test scripts print to standard output C<"ok N"> for each single
284 test, where C<N> is an increasing sequence of integers. The first line
285 output by a standard test script is C<"1..M"> with C<M> being the
286 number of tests that should be run within the test
287 script. Test::Harness::runtests(@tests) runs all the testscripts
288 named as arguments and checks standard output for the expected
291 After all tests have been performed, runtests() prints some
292 performance statistics that are computed by the Benchmark module.
294 =head2 The test script output
296 Any output from the testscript to standard error is ignored and
297 bypassed, thus will be seen by the user. Lines written to standard
298 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
299 runtests(). All other lines are discarded.
301 It is tolerated if the test numbers after C<ok> are omitted. In this
302 case Test::Harness maintains temporarily its own counter until the
303 script supplies test numbers again. So the following test script
317 Failed 3/6 tests, 50.00% okay
319 The global variable $Test::Harness::verbose is exportable and can be
320 used to let runtests() display the standard output of the script
321 without altering the behavior otherwise.
325 C<&runtests> is exported by Test::Harness per default.
331 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
333 If all tests are successful some statistics about the performance are
336 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
338 For any single script that has failing subtests statistics like the
341 =item C<Test returned status %d (wstat %d)>
343 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
344 printed in a message similar to the above.
346 =item C<Failed 1 test, %.2f%% okay. %s>
348 =item C<Failed %d/%d tests, %.2f%% okay. %s>
350 If not all tests were successful, the script dies with one of the
357 See L<Benchmark> for the underlying timing routines.
361 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
362 sure is, that it was inspired by Larry Wall's TEST script that came
363 with perl distributions for ages. Numerous anonymous contributors
364 exist. Current maintainer is Andreas Koenig.
368 Test::Harness uses $^X to determine the perl binary to run the tests
369 with. Test scripts running via the shebang (C<#!>) line may not be
370 portable because $^X is not consistent for shebang scripts across
371 platforms. This is no problem when Test::Harness is run with an
372 absolute path to the perl binary or when $^X can be found in the path.