10 use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
11 @ISA @EXPORT @EXPORT_OK);
12 $have_devel_corestack = 0;
16 $ENV{HARNESS_ACTIVE} = 1;
18 # Some experimental versions of OS/2 build have broken $?
19 my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
21 my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
23 my $tests_skipped = 0;
24 my $subtests_skipped = 0;
27 @EXPORT= qw(&runtests);
28 @EXPORT_OK= qw($verbose $switches);
31 Failed Test Status Wstat Total Fail Failed List of failed
32 -------------------------------------------------------------------------------
36 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
45 ~~ ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
53 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
58 my($test,$te,$ok,$next,$max,$pct,$totok,$totbonus,@failed,%failedtests);
65 # pass -I flags to children
66 my $old5lib = $ENV{PERL5LIB};
67 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC);
69 if ($^O eq 'VMS') { $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g }
71 my @dir_files = globdir $files_in_dir if defined $files_in_dir;
72 my $t_start = new Benchmark;
73 while ($test = shift(@tests)) {
76 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./; }
77 print "$te" . '.' x (20 - length($te));
78 my $fh = new FileHandle;
79 $fh->open($test) or print "can't open $test. $!\n";
82 $s .= q[ "-T"] if $first =~ /^#!.*\bperl.*-\w*T/;
83 $fh->close or print "can't close $test. $!\n";
84 my $cmd = ($ENV{'COMPILE_TEST'})?
85 "./perl -I../lib ../utils/perlcc $test -run 2>> ./compilelog |"
87 $cmd = "MCR $cmd" if $^O eq 'VMS';
88 $fh->open($cmd) or print "can't run $test. $!\n";
89 $ok = $next = $max = 0;
98 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
100 for (split(/\s+/, $2)) { $todo{$_} = 1; }
104 } elsif (/^1\.\.([0-9]+)/) {
109 } elsif ($max && /^(not\s+)?ok\b/) {
111 if (/^not ok\s*(\d*)/){
112 $this = $1 if $1 > 0;
119 } elsif (/^ok\s*(\d*)(\s*\#\s*[Ss]kip)?/) {
120 $this = $1 if $1 > 0;
123 $skipped++ if defined $2;
124 $bonus++, $totbonus++ if $todo{$this};
127 # warn "Test output counter mismatch [test $this]\n";
128 # no need to warn probably
129 push @failed, $next..$this-1;
130 } elsif ($this < $next) {
131 #we have seen more "ok" lines than the number suggests
132 warn "Confused test output: test $this answered after test ", $next-1, "\n";
138 $fh->close; # must close to reap child resource values
139 my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
141 $estatus = ($^O eq 'VMS'
142 ? eval 'use vmsish "status"; $estatus = $?'
145 my ($failed, $canon, $percent) = ('??', '??');
146 printf "dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
148 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
149 if (corestatus($wstatus)) { # until we have a wait module
150 if ($have_devel_corestack) {
151 Devel::CoreStack::stack($^X);
153 print "\ttest program seems to have generated a core\n";
158 if ($next == $max + 1 and not @failed) {
159 print "\tafter all the subtests completed successfully\n";
161 $failed = 0; # But we do not set $canon!
163 push @failed, $next..$max;
165 (my $txt, $canon) = canonfailed($max,$skipped,@failed);
166 $percent = 100*(scalar @failed)/$max;
170 $failedtests{$test} = { canon => $canon, max => $max || '??',
172 name => $test, percent => $percent,
173 estat => $estatus, wstat => $wstatus,
175 } elsif ($ok == $max && $next == $max+1) {
176 if ($max and $skipped + $bonus) {
178 push(@msg, "$skipped/$max subtest".($skipped>1?'s':'')." skipped")
180 push(@msg, "$bonus subtest".($bonus>1?'s':'').
181 " unexpectedly succeeded")
183 print "ok, ".join(', ', @msg)."\n";
187 print "skipping test on this platform\n";
193 push @failed, $next..$max;
196 my ($txt, $canon) = canonfailed($max,$skipped,@failed);
198 $failedtests{$test} = { canon => $canon, max => $max,
199 failed => scalar @failed,
200 name => $test, percent => 100*(scalar @failed)/$max,
201 estat => '', wstat => '',
204 print "Don't know which tests failed: got $ok ok, expected $max\n";
205 $failedtests{$test} = { canon => '??', max => $max,
207 name => $test, percent => undef,
208 estat => '', wstat => '',
212 } elsif ($next == 0) {
213 print "FAILED before any test output arrived\n";
215 $failedtests{$test} = { canon => '??', max => '??',
217 name => $test, percent => undef,
218 estat => '', wstat => '',
221 $subtests_skipped += $skipped;
222 if (defined $files_in_dir) {
223 my @new_dir_files = globdir $files_in_dir;
224 if (@new_dir_files != @dir_files) {
226 @f{@new_dir_files} = (1) x @new_dir_files;
227 delete @f{@dir_files};
228 my @f = sort keys %f;
229 print "LEAKED FILES: @f\n";
230 @dir_files = @new_dir_files;
234 my $t_total = timediff(new Benchmark, $t_start);
237 if (defined $old5lib) {
238 $ENV{PERL5LIB} = $old5lib;
240 delete $ENV{PERL5LIB};
244 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
245 " UNEXPECTEDLY SUCCEEDED)")
247 if ($tests_skipped) {
248 $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
249 if ($subtests_skipped) {
250 $bonusmsg .= " and $subtests_skipped subtest"
251 . ($subtests_skipped != 1 ? 's' : '');
253 $bonusmsg .= ' skipped';
255 elsif ($subtests_skipped) {
256 $bonusmsg .= ", $subtests_skipped subtest"
257 . ($subtests_skipped != 1 ? 's' : '')
260 if ($bad == 0 && $totmax) {
261 print "All tests successful$bonusmsg.\n";
263 die "FAILED--no tests were run for some reason.\n";
264 } elsif ($totmax==0) {
265 my $blurb = $total==1 ? "script" : "scripts";
266 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
268 $pct = sprintf("%.2f", $good / $total * 100);
269 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
270 $totmax - $totok, $totmax, 100*$totok/$totmax;
272 for $script (sort keys %failedtests) {
273 $curtest = $failedtests{$script};
277 $bonusmsg =~ s/^,\s*//;
278 print "$bonusmsg.\n" if $bonusmsg;
279 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
282 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
284 return ($bad == 0 && $totmax) ;
287 my $tried_devel_corestack;
292 eval {require 'wait.ph'};
295 $ret = ($st & 0200); # Tim says, this is for 90%
298 $ret = WCOREDUMP($st);
301 eval { require Devel::CoreStack; $have_devel_corestack++ }
302 unless $tried_devel_corestack++;
307 sub canonfailed ($@) {
308 my($max,$skipped,@failed) = @_;
310 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
311 my $failed = @failed;
315 my $last = $min = shift @failed;
318 for (@failed, $failed[-1]) { # don't forget the last one
319 if ($_ > $last+1 || $_ == $last) {
323 push @canon, "$min-$last";
330 push @result, "FAILED tests @canon\n";
333 push @result, "FAILED test $last\n";
337 push @result, "\tFailed $failed/$max tests, ";
338 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
339 my $ender = 's' x ($skipped > 1);
340 my $good = $max - $failed - $skipped;
341 my $goodper = sprintf("%.2f",100*($good/$max));
342 push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
344 my $txt = join "", @result;
353 Test::Harness - run perl standard test scripts with statistics
363 (By using the L<Test> module, you can write test scripts without
364 knowing the exact output this module expects. However, if you need to
365 know the specifics, read on!)
367 Perl test scripts print to standard output C<"ok N"> for each single
368 test, where C<N> is an increasing sequence of integers. The first line
369 output by a standard test script is C<"1..M"> with C<M> being the
370 number of tests that should be run within the test
371 script. Test::Harness::runtests(@tests) runs all the testscripts
372 named as arguments and checks standard output for the expected
375 After all tests have been performed, runtests() prints some
376 performance statistics that are computed by the Benchmark module.
378 =head2 The test script output
380 Any output from the testscript to standard error is ignored and
381 bypassed, thus will be seen by the user. Lines written to standard
382 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
383 runtests(). All other lines are discarded.
385 It is tolerated if the test numbers after C<ok> are omitted. In this
386 case Test::Harness maintains temporarily its own counter until the
387 script supplies test numbers again. So the following test script
401 Failed 3/6 tests, 50.00% okay
403 The global variable $Test::Harness::verbose is exportable and can be
404 used to let runtests() display the standard output of the script
405 without altering the behavior otherwise.
407 The global variable $Test::Harness::switches is exportable and can be
408 used to set perl command line options used for running the test
409 script(s). The default value is C<-w>.
411 If the standard output line contains substring C< # Skip> (with
412 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
413 counted as a skipped test. If the whole testscript succeeds, the
414 count of skipped tests is included in the generated output.
418 C<&runtests> is exported by Test::Harness per default.
424 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
426 If all tests are successful some statistics about the performance are
429 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
431 For any single script that has failing subtests statistics like the
434 =item C<Test returned status %d (wstat %d)>
436 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
437 printed in a message similar to the above.
439 =item C<Failed 1 test, %.2f%% okay. %s>
441 =item C<Failed %d/%d tests, %.2f%% okay. %s>
443 If not all tests were successful, the script dies with one of the
450 Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
453 If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
454 will check after each test whether new files appeared in that directory,
457 LEAKED FILES: scr.tmp 0 my.db
459 If relative, directory name is with respect to the current directory at
460 the moment runtests() was called. Putting absolute path into
461 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
463 Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
464 This allows the tests to determine if they are being executed through the
465 harness or by any other means.
469 L<Test> for writing test scripts and also L<Benchmark> for the
470 underlying timing routines.
474 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
475 sure is, that it was inspired by Larry Wall's TEST script that came
476 with perl distributions for ages. Numerous anonymous contributors
477 exist. Current maintainer is Andreas Koenig.
481 Test::Harness uses $^X to determine the perl binary to run the tests
482 with. Test scripts running via the shebang (C<#!>) line may not be
483 portable because $^X is not consistent for shebang scripts across
484 platforms. This is no problem when Test::Harness is run with an
485 absolute path to the perl binary or when $^X can be found in the path.