10 our($VERSION, $verbose, $switches, $have_devel_corestack, $curtest,
11 @ISA, @EXPORT, @EXPORT_OK);
12 $have_devel_corestack = 0;
16 $ENV{HARNESS_ACTIVE} = 1;
18 # Some experimental versions of OS/2 build have broken $?
19 my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
21 my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
23 my $tests_skipped = 0;
24 my $subtests_skipped = 0;
27 @EXPORT= qw(&runtests);
28 @EXPORT_OK= qw($verbose $switches);
31 Failed Test Status Wstat Total Fail Failed List of failed
32 -------------------------------------------------------------------------------
36 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
45 ~~ ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
53 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
58 my($test,$te,$ok,$next,$max,$pct,$totok,$totbonus,@failed,%failedtests);
65 # pass -I flags to children
66 my $old5lib = $ENV{PERL5LIB};
68 # VMS has a 255-byte limit on the length of %ENV entries, so
69 # toss the ones that involve perl_root, the install location
73 $new5lib = join($Config{path_sep}, grep {!/perl_root/i;} @INC);
74 $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g;
77 $new5lib = join($Config{path_sep}, @INC);
80 local($ENV{'PERL5LIB'}) = $new5lib;
82 my @dir_files = globdir $files_in_dir if defined $files_in_dir;
83 my $t_start = new Benchmark;
84 while ($test = shift(@tests)) {
87 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./s; }
88 my $blank = (' ' x 77);
89 my $leader = "$te" . '.' x (20 - length($te));
91 $ml = "\r$blank\r$leader"
92 if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $verbose;
94 my $fh = new FileHandle;
95 $fh->open($test) or print "can't open $test. $!\n";
98 $s .= " $ENV{'HARNESS_PERL_SWITCHES'}"
99 if exists $ENV{'HARNESS_PERL_SWITCHES'};
100 $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
101 if $first =~ /^#!.*\bperl.*-\w*T/;
102 $fh->close or print "can't close $test. $!\n";
103 my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
104 ? "./perl -I../lib ../utils/perlcc $test "
105 . "-run 2>> ./compilelog |"
107 $cmd = "MCR $cmd" if $^O eq 'VMS';
108 $fh->open($cmd) or print "can't run $test. $!\n";
109 $ok = $next = $max = 0;
119 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
121 for (split(/\s+/, $2)) { $todo{$_} = 1; }
125 } elsif (/^1\.\.([0-9]+)(\s*\#\s*[Ss]kip\S*(?>\s+)(.+))?/) {
130 $skip_reason = $3 if not $max and defined $3;
131 } elsif ($max && /^(not\s+)?ok\b/) {
133 if (/^not ok\s*(\d*)/){
134 $this = $1 if $1 > 0;
135 print "${ml}NOK $this" if $ml;
142 } elsif (/^ok\s*(\d*)(\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?/) {
143 $this = $1 if $1 > 0;
144 print "${ml}ok $this/$max" if $ml;
147 $skipped++ if defined $2;
149 $reason = 'unknown reason' if defined $2;
150 $reason = $3 if defined $3;
151 if (defined $reason and defined $skip_reason) {
152 # print "was: '$skip_reason' new '$reason'\n";
153 $skip_reason = 'various reasons'
154 if $skip_reason ne $reason;
155 } elsif (defined $reason) {
156 $skip_reason = $reason;
158 $bonus++, $totbonus++ if $todo{$this};
161 # warn "Test output counter mismatch [test $this]\n";
162 # no need to warn probably
163 push @failed, $next..$this-1;
164 } elsif ($this < $next) {
165 #we have seen more "ok" lines than the number suggests
166 warn "Confused test output: test $this answered after test ", $next-1, "\n";
172 $fh->close; # must close to reap child resource values
173 my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
175 $estatus = ($^O eq 'VMS'
176 ? eval 'use vmsish "status"; $estatus = $?'
179 my ($failed, $canon, $percent) = ('??', '??');
180 printf "${ml}dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
182 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
183 if (corestatus($wstatus)) { # until we have a wait module
184 if ($have_devel_corestack) {
185 Devel::CoreStack::stack($^X);
187 print "\ttest program seems to have generated a core\n";
192 if ($next == $max + 1 and not @failed) {
193 print "\tafter all the subtests completed successfully\n";
195 $failed = 0; # But we do not set $canon!
197 push @failed, $next..$max;
199 (my $txt, $canon) = canonfailed($max,$skipped,@failed);
200 $percent = 100*(scalar @failed)/$max;
204 $failedtests{$test} = { canon => $canon, max => $max || '??',
206 name => $test, percent => $percent,
207 estat => $estatus, wstat => $wstatus,
209 } elsif ($ok == $max && $next == $max+1) {
210 if ($max and $skipped + $bonus) {
212 push(@msg, "$skipped/$max skipped: $skip_reason")
214 push(@msg, "$bonus/$max unexpectedly succeeded")
216 print "${ml}ok, ".join(', ', @msg)."\n";
219 } elsif (defined $skip_reason) {
220 print "skipped: $skip_reason\n";
223 print "skipped test on this platform\n";
229 push @failed, $next..$max;
232 my ($txt, $canon) = canonfailed($max,$skipped,@failed);
234 $failedtests{$test} = { canon => $canon, max => $max,
235 failed => scalar @failed,
236 name => $test, percent => 100*(scalar @failed)/$max,
237 estat => '', wstat => '',
240 print "Don't know which tests failed: got $ok ok, expected $max\n";
241 $failedtests{$test} = { canon => '??', max => $max,
243 name => $test, percent => undef,
244 estat => '', wstat => '',
248 } elsif ($next == 0) {
249 print "FAILED before any test output arrived\n";
251 $failedtests{$test} = { canon => '??', max => '??',
253 name => $test, percent => undef,
254 estat => '', wstat => '',
257 $subtests_skipped += $skipped;
258 if (defined $files_in_dir) {
259 my @new_dir_files = globdir $files_in_dir;
260 if (@new_dir_files != @dir_files) {
262 @f{@new_dir_files} = (1) x @new_dir_files;
263 delete @f{@dir_files};
264 my @f = sort keys %f;
265 print "LEAKED FILES: @f\n";
266 @dir_files = @new_dir_files;
270 my $t_total = timediff(new Benchmark, $t_start);
273 if (defined $old5lib) {
274 $ENV{PERL5LIB} = $old5lib;
276 delete $ENV{PERL5LIB};
280 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
281 " UNEXPECTEDLY SUCCEEDED)")
283 if ($tests_skipped) {
284 $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
285 if ($subtests_skipped) {
286 $bonusmsg .= " and $subtests_skipped subtest"
287 . ($subtests_skipped != 1 ? 's' : '');
289 $bonusmsg .= ' skipped';
291 elsif ($subtests_skipped) {
292 $bonusmsg .= ", $subtests_skipped subtest"
293 . ($subtests_skipped != 1 ? 's' : '')
296 if ($bad == 0 && $totmax) {
297 print "All tests successful$bonusmsg.\n";
299 die "FAILED--no tests were run for some reason.\n";
300 } elsif ($totmax==0) {
301 my $blurb = $total==1 ? "script" : "scripts";
302 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
304 $pct = sprintf("%.2f", $good / $total * 100);
305 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
306 $totmax - $totok, $totmax, 100*$totok/$totmax;
308 for $script (sort keys %failedtests) {
309 $curtest = $failedtests{$script};
313 $bonusmsg =~ s/^,\s*//;
314 print "$bonusmsg.\n" if $bonusmsg;
315 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
318 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
320 return ($bad == 0 && $totmax) ;
323 my $tried_devel_corestack;
328 eval {require 'wait.ph'};
331 $ret = ($st & 0200); # Tim says, this is for 90%
334 $ret = WCOREDUMP($st);
337 eval { require Devel::CoreStack; $have_devel_corestack++ }
338 unless $tried_devel_corestack++;
343 sub canonfailed ($@) {
344 my($max,$skipped,@failed) = @_;
346 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
347 my $failed = @failed;
351 my $last = $min = shift @failed;
354 for (@failed, $failed[-1]) { # don't forget the last one
355 if ($_ > $last+1 || $_ == $last) {
359 push @canon, "$min-$last";
366 push @result, "FAILED tests @canon\n";
369 push @result, "FAILED test $last\n";
373 push @result, "\tFailed $failed/$max tests, ";
374 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
375 my $ender = 's' x ($skipped > 1);
376 my $good = $max - $failed - $skipped;
377 my $goodper = sprintf("%.2f",100*($good/$max));
378 push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
380 my $txt = join "", @result;
389 Test::Harness - run perl standard test scripts with statistics
399 (By using the L<Test> module, you can write test scripts without
400 knowing the exact output this module expects. However, if you need to
401 know the specifics, read on!)
403 Perl test scripts print to standard output C<"ok N"> for each single
404 test, where C<N> is an increasing sequence of integers. The first line
405 output by a standard test script is C<"1..M"> with C<M> being the
406 number of tests that should be run within the test
407 script. Test::Harness::runtests(@tests) runs all the testscripts
408 named as arguments and checks standard output for the expected
411 After all tests have been performed, runtests() prints some
412 performance statistics that are computed by the Benchmark module.
414 =head2 The test script output
416 Any output from the testscript to standard error is ignored and
417 bypassed, thus will be seen by the user. Lines written to standard
418 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
419 runtests(). All other lines are discarded.
421 It is tolerated if the test numbers after C<ok> are omitted. In this
422 case Test::Harness maintains temporarily its own counter until the
423 script supplies test numbers again. So the following test script
437 Failed 3/6 tests, 50.00% okay
439 The global variable $Test::Harness::verbose is exportable and can be
440 used to let runtests() display the standard output of the script
441 without altering the behavior otherwise.
443 The global variable $Test::Harness::switches is exportable and can be
444 used to set perl command line options used for running the test
445 script(s). The default value is C<-w>.
447 If the standard output line contains substring C< # Skip> (with
448 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
449 counted as a skipped test. If the whole testscript succeeds, the
450 count of skipped tests is included in the generated output.
452 C<Test::Harness> reports the text after C< # Skip(whatever)> as a
453 reason for skipping. Similarly, one can include a similar explanation
454 in a C<1..0> line emitted if the test is skipped completely:
456 1..0 # Skipped: no leverage found
460 C<&runtests> is exported by Test::Harness per default.
466 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
468 If all tests are successful some statistics about the performance are
471 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
473 For any single script that has failing subtests statistics like the
476 =item C<Test returned status %d (wstat %d)>
478 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
479 printed in a message similar to the above.
481 =item C<Failed 1 test, %.2f%% okay. %s>
483 =item C<Failed %d/%d tests, %.2f%% okay. %s>
485 If not all tests were successful, the script dies with one of the
492 Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
495 Setting C<HARNESS_NOTTY> to a true value forces it to behave as though
496 STDOUT were not a console. You may need to set this if you don't want
497 harness to output more frequent progress messages using carriage returns.
498 Some consoles may not handle carriage returns properly (which results
499 in a somewhat messy output).
501 Setting C<HARNESS_COMPILE_TEST> to a true value will make harness attempt
502 to compile the test using C<perlcc> before running it.
504 If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
505 will check after each test whether new files appeared in that directory,
508 LEAKED FILES: scr.tmp 0 my.db
510 If relative, directory name is with respect to the current directory at
511 the moment runtests() was called. Putting absolute path into
512 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
514 The value of C<HARNESS_PERL_SWITCHES> will be prepended to the
515 switches used to invoke perl on each test. For example, setting
516 C<HARNESS_PERL_SWITCHES> to "-W" will run all tests with all
519 Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
520 This allows the tests to determine if they are being executed through the
521 harness or by any other means.
525 L<Test> for writing test scripts and also L<Benchmark> for the
526 underlying timing routines.
530 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
531 sure is, that it was inspired by Larry Wall's TEST script that came
532 with perl distributions for ages. Numerous anonymous contributors
533 exist. Current maintainer is Andreas Koenig.
537 Test::Harness uses $^X to determine the perl binary to run the tests
538 with. Test scripts running via the shebang (C<#!>) line may not be
539 portable because $^X is not consistent for shebang scripts across
540 platforms. This is no problem when Test::Harness is run with an
541 absolute path to the perl binary or when $^X can be found in the path.