10 use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
11 @ISA @EXPORT @EXPORT_OK);
12 $have_devel_corestack = 0;
16 # Some experimental versions of OS/2 build have broken $?
17 my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
19 my $tests_skipped = 0;
20 my $subtests_skipped = 0;
23 @EXPORT= qw(&runtests);
24 @EXPORT_OK= qw($verbose $switches);
27 Failed Test Status Wstat Total Fail Failed List of failed
28 -------------------------------------------------------------------------------
32 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
41 ~~ ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
52 my($test,$te,$ok,$next,$max,$pct,$totok,$totbonus,@failed,%failedtests);
59 # pass -I flags to children
60 my $old5lib = $ENV{PERL5LIB};
61 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC);
63 if ($^O eq 'VMS') { $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g }
65 my $t_start = new Benchmark;
66 while ($test = shift(@tests)) {
69 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./; }
70 print "$te" . '.' x (20 - length($te));
71 my $fh = new FileHandle;
72 $fh->open($test) or print "can't open $test. $!\n";
75 $s .= q[ "-T"] if $first =~ /^#!.*\bperl.*-\w*T/;
76 $fh->close or print "can't close $test. $!\n";
77 my $cmd = ($ENV{'COMPILE_TEST'})?
78 "./perl -I../lib ../utils/perlcc $test -run -verbose dcf -log ./compilelog |"
80 $cmd = "MCR $cmd" if $^O eq 'VMS';
81 $fh->open($cmd) or print "can't run $test. $!\n";
82 $ok = $next = $max = 0;
91 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
93 for (split(/\s+/, $2)) { $todo{$_} = 1; }
97 } elsif (/^1\.\.([0-9]+)/) {
102 } elsif ($max && /^(not\s+)?ok\b/) {
104 if (/^not ok\s*(\d*)/){
105 $this = $1 if $1 > 0;
112 } elsif (/^ok\s*(\d*)(\s*\#\s*[Ss]kip)?/) {
113 $this = $1 if $1 > 0;
116 $skipped++ if defined $2;
117 $bonus++, $totbonus++ if $todo{$this};
120 # warn "Test output counter mismatch [test $this]\n";
121 # no need to warn probably
122 push @failed, $next..$this-1;
123 } elsif ($this < $next) {
124 #we have seen more "ok" lines than the number suggests
125 warn "Confused test output: test $this answered after test ", $next-1, "\n";
131 $fh->close; # must close to reap child resource values
132 my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
134 $estatus = ($^O eq 'VMS'
135 ? eval 'use vmsish "status"; $estatus = $?'
138 my ($failed, $canon, $percent) = ('??', '??');
139 printf "dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
141 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
142 if (corestatus($wstatus)) { # until we have a wait module
143 if ($have_devel_corestack) {
144 Devel::CoreStack::stack($^X);
146 print "\ttest program seems to have generated a core\n";
151 if ($next == $max + 1 and not @failed) {
152 print "\tafter all the subtests completed successfully\n";
154 $failed = 0; # But we do not set $canon!
156 push @failed, $next..$max;
158 (my $txt, $canon) = canonfailed($max,@failed);
159 $percent = 100*(scalar @failed)/$max;
163 $failedtests{$test} = { canon => $canon, max => $max || '??',
165 name => $test, percent => $percent,
166 estat => $estatus, wstat => $wstatus,
168 } elsif ($ok == $max && $next == $max+1) {
169 if ($max and $skipped + $bonus) {
171 push(@msg, "$skipped subtest".($skipped>1?'s':'')." skipped")
173 push(@msg, "$bonus subtest".($bonus>1?'s':'').
174 " unexpectedly succeeded")
176 print "ok, ".join(', ', @msg)."\n";
180 print "skipping test on this platform\n";
186 push @failed, $next..$max;
189 my ($txt, $canon) = canonfailed($max,@failed);
191 $failedtests{$test} = { canon => $canon, max => $max,
192 failed => scalar @failed,
193 name => $test, percent => 100*(scalar @failed)/$max,
194 estat => '', wstat => '',
197 print "Don't know which tests failed: got $ok ok, expected $max\n";
198 $failedtests{$test} = { canon => '??', max => $max,
200 name => $test, percent => undef,
201 estat => '', wstat => '',
205 } elsif ($next == 0) {
206 print "FAILED before any test output arrived\n";
208 $failedtests{$test} = { canon => '??', max => '??',
210 name => $test, percent => undef,
211 estat => '', wstat => '',
214 $subtests_skipped += $skipped;
216 my $t_total = timediff(new Benchmark, $t_start);
219 if (defined $old5lib) {
220 $ENV{PERL5LIB} = $old5lib;
222 delete $ENV{PERL5LIB};
226 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
227 " UNEXPECTEDLY SUCCEEDED)")
229 if ($tests_skipped) {
230 $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '') .
233 if ($subtests_skipped) {
234 $bonusmsg .= ($tests_skipped ? ', plus ' : ', ').
235 "$subtests_skipped subtest"
236 . ($subtests_skipped != 1 ? 's' : '') .
239 if ($bad == 0 && $totmax) {
240 print "All tests successful$bonusmsg.\n";
242 die "FAILED--no tests were run for some reason.\n";
243 } elsif ($totmax==0) {
244 my $blurb = $total==1 ? "script" : "scripts";
245 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
247 $pct = sprintf("%.2f", $good / $total * 100);
248 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
249 $totmax - $totok, $totmax, 100*$totok/$totmax;
251 for $script (sort keys %failedtests) {
252 $curtest = $failedtests{$script};
256 $bonusmsg =~ s/^,\s*//;
257 print "$bonusmsg.\n" if $bonusmsg;
258 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
261 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
263 return ($bad == 0 && $totmax) ;
266 my $tried_devel_corestack;
271 eval {require 'wait.ph'};
274 $ret = ($st & 0200); # Tim says, this is for 90%
277 $ret = WCOREDUMP($st);
280 eval { require Devel::CoreStack; $have_devel_corestack++ }
281 unless $tried_devel_corestack++;
286 sub canonfailed ($@) {
287 my($max,@failed) = @_;
289 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
290 my $failed = @failed;
294 my $last = $min = shift @failed;
297 for (@failed, $failed[-1]) { # don't forget the last one
298 if ($_ > $last+1 || $_ == $last) {
302 push @canon, "$min-$last";
309 push @result, "FAILED tests @canon\n";
312 push @result, "FAILED test $last\n";
316 push @result, "\tFailed $failed/$max tests, ";
317 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay\n";
318 my $txt = join "", @result;
327 Test::Harness - run perl standard test scripts with statistics
337 (By using the L<Test> module, you can write test scripts without
338 knowing the exact output this module expects. However, if you need to
339 know the specifics, read on!)
341 Perl test scripts print to standard output C<"ok N"> for each single
342 test, where C<N> is an increasing sequence of integers. The first line
343 output by a standard test script is C<"1..M"> with C<M> being the
344 number of tests that should be run within the test
345 script. Test::Harness::runtests(@tests) runs all the testscripts
346 named as arguments and checks standard output for the expected
349 After all tests have been performed, runtests() prints some
350 performance statistics that are computed by the Benchmark module.
352 =head2 The test script output
354 Any output from the testscript to standard error is ignored and
355 bypassed, thus will be seen by the user. Lines written to standard
356 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
357 runtests(). All other lines are discarded.
359 It is tolerated if the test numbers after C<ok> are omitted. In this
360 case Test::Harness maintains temporarily its own counter until the
361 script supplies test numbers again. So the following test script
375 Failed 3/6 tests, 50.00% okay
377 The global variable $Test::Harness::verbose is exportable and can be
378 used to let runtests() display the standard output of the script
379 without altering the behavior otherwise.
381 The global variable $Test::Harness::switches is exportable and can be
382 used to set perl command line options used for running the test
383 script(s). The default value is C<-w>.
385 If the standard output line contains substring C< # Skip> (with
386 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
387 counted as a skipped test. If the whole testscript succeeds, the
388 count of skipped tests is included in the generated output.
392 C<&runtests> is exported by Test::Harness per default.
398 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
400 If all tests are successful some statistics about the performance are
403 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
405 For any single script that has failing subtests statistics like the
408 =item C<Test returned status %d (wstat %d)>
410 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
411 printed in a message similar to the above.
413 =item C<Failed 1 test, %.2f%% okay. %s>
415 =item C<Failed %d/%d tests, %.2f%% okay. %s>
417 If not all tests were successful, the script dies with one of the
424 Setting C<HARNESS_IGNORE_EXITCODE> makes it ignore the exit status
429 L<Test> for writing test scripts and also L<Benchmark> for the
430 underlying timing routines.
434 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
435 sure is, that it was inspired by Larry Wall's TEST script that came
436 with perl distributions for ages. Numerous anonymous contributors
437 exist. Current maintainer is Andreas Koenig.
441 Test::Harness uses $^X to determine the perl binary to run the tests
442 with. Test scripts running via the shebang (C<#!>) line may not be
443 portable because $^X is not consistent for shebang scripts across
444 platforms. This is no problem when Test::Harness is run with an
445 absolute path to the perl binary or when $^X can be found in the path.