10 use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
11 @ISA @EXPORT @EXPORT_OK);
12 $have_devel_corestack = 0;
16 $ENV{HARNESS_ACTIVE} = 1;
18 # Some experimental versions of OS/2 build have broken $?
19 my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
21 my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
23 my $tests_skipped = 0;
24 my $subtests_skipped = 0;
27 @EXPORT= qw(&runtests);
28 @EXPORT_OK= qw($verbose $switches);
31 Failed Test Status Wstat Total Fail Failed List of failed
32 -------------------------------------------------------------------------------
36 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
45 ~~ ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
53 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
58 my($test,$te,$ok,$next,$max,$pct,$totok,$totbonus,@failed,%failedtests);
65 # pass -I flags to children
66 my $old5lib = $ENV{PERL5LIB};
67 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC);
69 if ($^O eq 'VMS') { $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g }
71 my @dir_files = globdir $files_in_dir if defined $files_in_dir;
72 my $t_start = new Benchmark;
73 while ($test = shift(@tests)) {
76 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./; }
77 print "$te" . '.' x (20 - length($te));
78 my $fh = new FileHandle;
79 $fh->open($test) or print "can't open $test. $!\n";
82 $s .= q[ "-T"] if $first =~ /^#!.*\bperl.*-\w*T/;
83 $fh->close or print "can't close $test. $!\n";
84 my $cmd = ($ENV{'COMPILE_TEST'})?
85 "./perl -I../lib ../utils/perlcc $test -run 2>> ./compilelog |"
87 $cmd = "MCR $cmd" if $^O eq 'VMS';
88 $fh->open($cmd) or print "can't run $test. $!\n";
89 $ok = $next = $max = 0;
99 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
101 for (split(/\s+/, $2)) { $todo{$_} = 1; }
105 } elsif (/^1\.\.([0-9]+)/) {
110 } elsif ($max && /^(not\s+)?ok\b/) {
112 if (/^not ok\s*(\d*)/){
113 $this = $1 if $1 > 0;
120 } elsif (/^ok\s*(\d*)(\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?/) {
121 $this = $1 if $1 > 0;
124 $skipped++ if defined $2;
126 $reason = 'unknown reason' if defined $2;
127 $reason = $3 if defined $3;
128 if (defined $reason and defined $skip_reason) {
129 # print "was: '$skip_reason' new '$reason'\n";
130 $skip_reason = 'various reasons'
131 if $skip_reason ne $reason;
132 } elsif (defined $reason) {
133 $skip_reason = $reason;
135 $bonus++, $totbonus++ if $todo{$this};
138 # warn "Test output counter mismatch [test $this]\n";
139 # no need to warn probably
140 push @failed, $next..$this-1;
141 } elsif ($this < $next) {
142 #we have seen more "ok" lines than the number suggests
143 warn "Confused test output: test $this answered after test ", $next-1, "\n";
149 $fh->close; # must close to reap child resource values
150 my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
152 $estatus = ($^O eq 'VMS'
153 ? eval 'use vmsish "status"; $estatus = $?'
156 my ($failed, $canon, $percent) = ('??', '??');
157 printf "dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
159 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
160 if (corestatus($wstatus)) { # until we have a wait module
161 if ($have_devel_corestack) {
162 Devel::CoreStack::stack($^X);
164 print "\ttest program seems to have generated a core\n";
169 if ($next == $max + 1 and not @failed) {
170 print "\tafter all the subtests completed successfully\n";
172 $failed = 0; # But we do not set $canon!
174 push @failed, $next..$max;
176 (my $txt, $canon) = canonfailed($max,$skipped,@failed);
177 $percent = 100*(scalar @failed)/$max;
181 $failedtests{$test} = { canon => $canon, max => $max || '??',
183 name => $test, percent => $percent,
184 estat => $estatus, wstat => $wstatus,
186 } elsif ($ok == $max && $next == $max+1) {
187 if ($max and $skipped + $bonus) {
189 push(@msg, "$skipped/$max subtest".($skipped>1?'s':'')." skipped: $skip_reason")
191 push(@msg, "$bonus subtest".($bonus>1?'s':'').
192 " unexpectedly succeeded")
194 print "ok, ".join(', ', @msg)."\n";
198 print "skipping test on this platform\n";
204 push @failed, $next..$max;
207 my ($txt, $canon) = canonfailed($max,$skipped,@failed);
209 $failedtests{$test} = { canon => $canon, max => $max,
210 failed => scalar @failed,
211 name => $test, percent => 100*(scalar @failed)/$max,
212 estat => '', wstat => '',
215 print "Don't know which tests failed: got $ok ok, expected $max\n";
216 $failedtests{$test} = { canon => '??', max => $max,
218 name => $test, percent => undef,
219 estat => '', wstat => '',
223 } elsif ($next == 0) {
224 print "FAILED before any test output arrived\n";
226 $failedtests{$test} = { canon => '??', max => '??',
228 name => $test, percent => undef,
229 estat => '', wstat => '',
232 $subtests_skipped += $skipped;
233 if (defined $files_in_dir) {
234 my @new_dir_files = globdir $files_in_dir;
235 if (@new_dir_files != @dir_files) {
237 @f{@new_dir_files} = (1) x @new_dir_files;
238 delete @f{@dir_files};
239 my @f = sort keys %f;
240 print "LEAKED FILES: @f\n";
241 @dir_files = @new_dir_files;
245 my $t_total = timediff(new Benchmark, $t_start);
248 if (defined $old5lib) {
249 $ENV{PERL5LIB} = $old5lib;
251 delete $ENV{PERL5LIB};
255 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
256 " UNEXPECTEDLY SUCCEEDED)")
258 if ($tests_skipped) {
259 $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
260 if ($subtests_skipped) {
261 $bonusmsg .= " and $subtests_skipped subtest"
262 . ($subtests_skipped != 1 ? 's' : '');
264 $bonusmsg .= ' skipped';
266 elsif ($subtests_skipped) {
267 $bonusmsg .= ", $subtests_skipped subtest"
268 . ($subtests_skipped != 1 ? 's' : '')
271 if ($bad == 0 && $totmax) {
272 print "All tests successful$bonusmsg.\n";
274 die "FAILED--no tests were run for some reason.\n";
275 } elsif ($totmax==0) {
276 my $blurb = $total==1 ? "script" : "scripts";
277 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
279 $pct = sprintf("%.2f", $good / $total * 100);
280 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
281 $totmax - $totok, $totmax, 100*$totok/$totmax;
283 for $script (sort keys %failedtests) {
284 $curtest = $failedtests{$script};
288 $bonusmsg =~ s/^,\s*//;
289 print "$bonusmsg.\n" if $bonusmsg;
290 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
293 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
295 return ($bad == 0 && $totmax) ;
298 my $tried_devel_corestack;
303 eval {require 'wait.ph'};
306 $ret = ($st & 0200); # Tim says, this is for 90%
309 $ret = WCOREDUMP($st);
312 eval { require Devel::CoreStack; $have_devel_corestack++ }
313 unless $tried_devel_corestack++;
318 sub canonfailed ($@) {
319 my($max,$skipped,@failed) = @_;
321 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
322 my $failed = @failed;
326 my $last = $min = shift @failed;
329 for (@failed, $failed[-1]) { # don't forget the last one
330 if ($_ > $last+1 || $_ == $last) {
334 push @canon, "$min-$last";
341 push @result, "FAILED tests @canon\n";
344 push @result, "FAILED test $last\n";
348 push @result, "\tFailed $failed/$max tests, ";
349 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
350 my $ender = 's' x ($skipped > 1);
351 my $good = $max - $failed - $skipped;
352 my $goodper = sprintf("%.2f",100*($good/$max));
353 push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
355 my $txt = join "", @result;
364 Test::Harness - run perl standard test scripts with statistics
374 (By using the L<Test> module, you can write test scripts without
375 knowing the exact output this module expects. However, if you need to
376 know the specifics, read on!)
378 Perl test scripts print to standard output C<"ok N"> for each single
379 test, where C<N> is an increasing sequence of integers. The first line
380 output by a standard test script is C<"1..M"> with C<M> being the
381 number of tests that should be run within the test
382 script. Test::Harness::runtests(@tests) runs all the testscripts
383 named as arguments and checks standard output for the expected
386 After all tests have been performed, runtests() prints some
387 performance statistics that are computed by the Benchmark module.
389 =head2 The test script output
391 Any output from the testscript to standard error is ignored and
392 bypassed, thus will be seen by the user. Lines written to standard
393 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
394 runtests(). All other lines are discarded.
396 It is tolerated if the test numbers after C<ok> are omitted. In this
397 case Test::Harness maintains temporarily its own counter until the
398 script supplies test numbers again. So the following test script
412 Failed 3/6 tests, 50.00% okay
414 The global variable $Test::Harness::verbose is exportable and can be
415 used to let runtests() display the standard output of the script
416 without altering the behavior otherwise.
418 The global variable $Test::Harness::switches is exportable and can be
419 used to set perl command line options used for running the test
420 script(s). The default value is C<-w>.
422 If the standard output line contains substring C< # Skip> (with
423 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
424 counted as a skipped test. If the whole testscript succeeds, the
425 count of skipped tests is included in the generated output.
429 C<&runtests> is exported by Test::Harness per default.
435 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
437 If all tests are successful some statistics about the performance are
440 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
442 For any single script that has failing subtests statistics like the
445 =item C<Test returned status %d (wstat %d)>
447 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
448 printed in a message similar to the above.
450 =item C<Failed 1 test, %.2f%% okay. %s>
452 =item C<Failed %d/%d tests, %.2f%% okay. %s>
454 If not all tests were successful, the script dies with one of the
461 Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
464 If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
465 will check after each test whether new files appeared in that directory,
468 LEAKED FILES: scr.tmp 0 my.db
470 If relative, directory name is with respect to the current directory at
471 the moment runtests() was called. Putting absolute path into
472 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
474 Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
475 This allows the tests to determine if they are being executed through the
476 harness or by any other means.
480 L<Test> for writing test scripts and also L<Benchmark> for the
481 underlying timing routines.
485 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
486 sure is, that it was inspired by Larry Wall's TEST script that came
487 with perl distributions for ages. Numerous anonymous contributors
488 exist. Current maintainer is Andreas Koenig.
492 Test::Harness uses $^X to determine the perl binary to run the tests
493 with. Test scripts running via the shebang (C<#!>) line may not be
494 portable because $^X is not consistent for shebang scripts across
495 platforms. This is no problem when Test::Harness is run with an
496 absolute path to the perl binary or when $^X can be found in the path.