10 use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
11 @ISA @EXPORT @EXPORT_OK);
12 $have_devel_corestack = 0;
17 @EXPORT= qw(&runtests);
18 @EXPORT_OK= qw($verbose $switches);
21 Failed Test Status Wstat Total Fail Failed List of failed
22 -------------------------------------------------------------------------------
26 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
35 ~~ ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
46 my($test,$te,$ok,$next,$max,$pct,$totok,$totbonus,@failed,%failedtests);
53 # pass -I flags to children
54 my $old5lib = $ENV{PERL5LIB};
55 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC);
57 if ($^O eq 'VMS') { $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g }
59 my $t_start = new Benchmark;
60 while ($test = shift(@tests)) {
63 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./; }
64 print "$te" . '.' x (20 - length($te));
65 my $fh = new FileHandle;
66 $fh->open($test) or print "can't open $test. $!\n";
69 $s .= q[ "-T"] if $first =~ /^#!.*\bperl.*-\w*T/;
70 $fh->close or print "can't close $test. $!\n";
71 my $cmd = "$^X $s $test|";
72 $cmd = "MCR $cmd" if $^O eq 'VMS';
73 $fh->open($cmd) or print "can't run $test. $!\n";
74 $ok = $next = $max = 0;
83 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
85 for (split(/\s+/, $2)) { $todo{$_} = 1; }
89 } elsif (/^1\.\.([0-9]+)/) {
94 } elsif ($max && /^(not\s+)?ok\b/) {
96 if (/^not ok\s*(\d*)/){
104 } elsif (/^ok\s*(\d*)(\s*\#\s*[Ss]kip)?/) {
105 $this = $1 if $1 > 0;
108 $skipped++ if defined $2;
109 $bonus++, $totbonus++ if $todo{$this};
112 # warn "Test output counter mismatch [test $this]\n";
113 # no need to warn probably
114 push @failed, $next..$this-1;
115 } elsif ($this < $next) {
116 #we have seen more "ok" lines than the number suggests
117 warn "Confused test output: test $this answered after test ", $next-1, "\n";
123 $fh->close; # must close to reap child resource values
126 $estatus = ($^O eq 'VMS'
127 ? eval 'use vmsish "status"; $estatus = $?'
130 my ($failed, $canon, $percent) = ('??', '??');
131 printf "dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
133 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
134 if (corestatus($wstatus)) { # until we have a wait module
135 if ($have_devel_corestack) {
136 Devel::CoreStack::stack($^X);
138 print "\ttest program seems to have generated a core\n";
143 if ($next == $max + 1 and not @failed) {
144 print "\tafter all the subtests completed successfully\n";
146 $failed = 0; # But we do not set $canon!
148 push @failed, $next..$max;
150 (my $txt, $canon) = canonfailed($max,@failed);
151 $percent = 100*(scalar @failed)/$max;
155 $failedtests{$test} = { canon => $canon, max => $max || '??',
157 name => $test, percent => $percent,
158 estat => $estatus, wstat => $wstatus,
160 } elsif ($ok == $max && $next == $max+1) {
161 if ($max and $skipped + $bonus) {
163 push(@msg, "$skipped subtest".($skipped>1?'s':'')." skipped")
165 push(@msg, "$bonus subtest".($bonus>1?'s':'').
166 " unexpectedly succeeded")
168 print "ok, ".join(', ', @msg)."\n";
172 print "skipping test on this platform\n";
177 push @failed, $next..$max;
180 my ($txt, $canon) = canonfailed($max,@failed);
182 $failedtests{$test} = { canon => $canon, max => $max,
183 failed => scalar @failed,
184 name => $test, percent => 100*(scalar @failed)/$max,
185 estat => '', wstat => '',
188 print "Don't know which tests failed: got $ok ok, expected $max\n";
189 $failedtests{$test} = { canon => '??', max => $max,
191 name => $test, percent => undef,
192 estat => '', wstat => '',
196 } elsif ($next == 0) {
197 print "FAILED before any test output arrived\n";
199 $failedtests{$test} = { canon => '??', max => '??',
201 name => $test, percent => undef,
202 estat => '', wstat => '',
206 my $t_total = timediff(new Benchmark, $t_start);
209 if (defined $old5lib) {
210 $ENV{PERL5LIB} = $old5lib;
212 delete $ENV{PERL5LIB};
216 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
217 " UNEXPECTEDLY SUCCEEDED)")
219 if ($bad == 0 && $totmax) {
220 print "All tests successful$bonusmsg.\n";
222 die "FAILED--no tests were run for some reason.\n";
223 } elsif ($totmax==0) {
224 my $blurb = $total==1 ? "script" : "scripts";
225 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
227 $pct = sprintf("%.2f", $good / $total * 100);
228 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
229 $totmax - $totok, $totmax, 100*$totok/$totmax;
231 for $script (sort keys %failedtests) {
232 $curtest = $failedtests{$script};
236 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
239 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
241 return ($bad == 0 && $totmax) ;
244 my $tried_devel_corestack;
249 eval {require 'wait.ph'};
252 $ret = ($st & 0200); # Tim says, this is for 90%
255 $ret = WCOREDUMP($st);
258 eval { require Devel::CoreStack; $have_devel_corestack++ }
259 unless $tried_devel_corestack++;
264 sub canonfailed ($@) {
265 my($max,@failed) = @_;
267 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
268 my $failed = @failed;
272 my $last = $min = shift @failed;
275 for (@failed, $failed[-1]) { # don't forget the last one
276 if ($_ > $last+1 || $_ == $last) {
280 push @canon, "$min-$last";
287 push @result, "FAILED tests @canon\n";
290 push @result, "FAILED test $last\n";
294 push @result, "\tFailed $failed/$max tests, ";
295 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay\n";
296 my $txt = join "", @result;
305 Test::Harness - run perl standard test scripts with statistics
315 (By using the L<Test> module, you can write test scripts without
316 knowing the exact output this module expects. However, if you need to
317 know the specifics, read on!)
319 Perl test scripts print to standard output C<"ok N"> for each single
320 test, where C<N> is an increasing sequence of integers. The first line
321 output by a standard test script is C<"1..M"> with C<M> being the
322 number of tests that should be run within the test
323 script. Test::Harness::runtests(@tests) runs all the testscripts
324 named as arguments and checks standard output for the expected
327 After all tests have been performed, runtests() prints some
328 performance statistics that are computed by the Benchmark module.
330 =head2 The test script output
332 Any output from the testscript to standard error is ignored and
333 bypassed, thus will be seen by the user. Lines written to standard
334 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
335 runtests(). All other lines are discarded.
337 It is tolerated if the test numbers after C<ok> are omitted. In this
338 case Test::Harness maintains temporarily its own counter until the
339 script supplies test numbers again. So the following test script
353 Failed 3/6 tests, 50.00% okay
355 The global variable $Test::Harness::verbose is exportable and can be
356 used to let runtests() display the standard output of the script
357 without altering the behavior otherwise.
359 The global variable $Test::Harness::switches is exportable and can be
360 used to set perl command line options used for running the test
361 script(s). The default value is C<-w>.
363 If the standard output line contains substring C< # Skip> (with
364 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
365 counted as a skipped test. If the whole testscript succeeds, the
366 count of skipped tests is included in the generated output.
370 C<&runtests> is exported by Test::Harness per default.
376 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
378 If all tests are successful some statistics about the performance are
381 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
383 For any single script that has failing subtests statistics like the
386 =item C<Test returned status %d (wstat %d)>
388 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
389 printed in a message similar to the above.
391 =item C<Failed 1 test, %.2f%% okay. %s>
393 =item C<Failed %d/%d tests, %.2f%% okay. %s>
395 If not all tests were successful, the script dies with one of the
402 L<Test> for writing test scripts and also L<Benchmark> for the
403 underlying timing routines.
407 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
408 sure is, that it was inspired by Larry Wall's TEST script that came
409 with perl distributions for ages. Numerous anonymous contributors
410 exist. Current maintainer is Andreas Koenig.
414 Test::Harness uses $^X to determine the perl binary to run the tests
415 with. Test scripts running via the shebang (C<#!>) line may not be
416 portable because $^X is not consistent for shebang scripts across
417 platforms. This is no problem when Test::Harness is run with an
418 absolute path to the perl binary or when $^X can be found in the path.