10 our($VERSION, $verbose, $switches, $have_devel_corestack, $curtest,
11 $columns, @ISA, @EXPORT, @EXPORT_OK);
12 $have_devel_corestack = 0;
16 $ENV{HARNESS_ACTIVE} = 1;
18 # Some experimental versions of OS/2 build have broken $?
19 my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
21 my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
23 my $tests_skipped = 0;
24 my $subtests_skipped = 0;
27 @EXPORT= qw(&runtests);
28 @EXPORT_OK= qw($verbose $switches);
32 $columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
34 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
39 my($test,$te,$ok,$next,$max,$pct,$totbonus,@failed,%failedtests);
47 # pass -I flags to children
48 my $old5lib = $ENV{PERL5LIB};
50 # VMS has a 255-byte limit on the length of %ENV entries, so
51 # toss the ones that involve perl_root, the install location
55 $new5lib = join($Config{path_sep}, grep {!/perl_root/i;} @INC);
56 $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g;
59 $new5lib = join($Config{path_sep}, @INC);
62 local($ENV{'PERL5LIB'}) = $new5lib;
64 my @dir_files = globdir $files_in_dir if defined $files_in_dir;
65 my $t_start = new Benchmark;
66 while ($test = shift(@tests)) {
69 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./s; }
70 my $blank = (' ' x 77);
71 my $leader = "$te" . '.' x (20 - length($te));
73 $ml = "\r$blank\r$leader"
74 if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $verbose;
76 my $fh = new FileHandle;
77 $fh->open($test) or print "can't open $test. $!\n";
80 $s .= " $ENV{'HARNESS_PERL_SWITCHES'}"
81 if exists $ENV{'HARNESS_PERL_SWITCHES'};
82 $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
83 if $first =~ /^#!.*\bperl.*-\w*T/;
84 $fh->close or print "can't close $test. $!\n";
85 my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
86 ? "./perl -I../lib ../utils/perlcc $test "
87 . "-run 2>> ./compilelog |"
89 $cmd = "MCR $cmd" if $^O eq 'VMS';
90 $fh->open($cmd) or print "can't run $test. $!\n";
91 $ok = $next = $max = 0;
101 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
103 for (split(/\s+/, $2)) { $todo{$_} = 1; }
107 } elsif (/^1\.\.([0-9]+)(\s*\#\s*[Ss]kip\S*(?>\s+)(.+))?/) {
112 $skip_reason = $3 if not $max and defined $3;
113 } elsif ($max && /^(not\s+)?ok\b/) {
115 if (/^not ok\s*(\d*)/){
116 $this = $1 if $1 > 0;
117 print "${ml}NOK $this" if $ml;
124 } elsif (/^ok\s*(\d*)(\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?$/) {
125 $this = $1 if $1 > 0;
126 print "${ml}ok $this/$max" if $ml;
129 $skipped++ if defined $2;
131 $reason = 'unknown reason' if defined $2;
132 $reason = $3 if defined $3;
133 if (defined $reason and defined $skip_reason) {
134 # print "was: '$skip_reason' new '$reason'\n";
135 $skip_reason = 'various reasons'
136 if $skip_reason ne $reason;
137 } elsif (defined $reason) {
138 $skip_reason = $reason;
140 $bonus++, $totbonus++ if $todo{$this};
142 # an ok or not ok not matching the 2 cases above...
143 # just ignore it for compatibility with TEST
147 # print "Test output counter mismatch [test $this]\n";
148 # no need to warn probably
149 push @failed, $next..$this-1;
150 } elsif ($this < $next) {
151 #we have seen more "ok" lines than the number suggests
152 print "Confused test output: test $this answered after test ", $next-1, "\n";
158 $fh->close; # must close to reap child resource values
159 my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
161 $estatus = ($^O eq 'VMS'
162 ? eval 'use vmsish "status"; $estatus = $?'
165 my ($failed, $canon, $percent) = ('??', '??');
166 printf "${ml}dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
168 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
169 if (corestatus($wstatus)) { # until we have a wait module
170 if ($have_devel_corestack) {
171 Devel::CoreStack::stack($^X);
173 print "\ttest program seems to have generated a core\n";
178 if ($next == $max + 1 and not @failed) {
179 print "\tafter all the subtests completed successfully\n";
181 $failed = 0; # But we do not set $canon!
183 push @failed, $next..$max;
185 (my $txt, $canon) = canonfailed($max,$skipped,@failed);
186 $percent = 100*(scalar @failed)/$max;
190 $failedtests{$test} = { canon => $canon, max => $max || '??',
192 name => $test, percent => $percent,
193 estat => $estatus, wstat => $wstatus,
195 } elsif ($ok == $max && $next == $max+1) {
196 if ($max and $skipped + $bonus) {
198 push(@msg, "$skipped/$max skipped: $skip_reason")
200 push(@msg, "$bonus/$max unexpectedly succeeded")
202 print "${ml}ok, ".join(', ', @msg)."\n";
205 } elsif (defined $skip_reason) {
206 print "skipped: $skip_reason\n";
209 print "skipped test on this platform\n";
215 push @failed, $next..$max;
218 my ($txt, $canon) = canonfailed($max,$skipped,@failed);
220 $failedtests{$test} = { canon => $canon, max => $max,
221 failed => scalar @failed,
222 name => $test, percent => 100*(scalar @failed)/$max,
223 estat => '', wstat => '',
226 print "Don't know which tests failed: got $ok ok, expected $max\n";
227 $failedtests{$test} = { canon => '??', max => $max,
229 name => $test, percent => undef,
230 estat => '', wstat => '',
234 } elsif ($next == 0) {
235 print "FAILED before any test output arrived\n";
237 $failedtests{$test} = { canon => '??', max => '??',
239 name => $test, percent => undef,
240 estat => '', wstat => '',
243 $subtests_skipped += $skipped;
244 if (defined $files_in_dir) {
245 my @new_dir_files = globdir $files_in_dir;
246 if (@new_dir_files != @dir_files) {
248 @f{@new_dir_files} = (1) x @new_dir_files;
249 delete @f{@dir_files};
250 my @f = sort keys %f;
251 print "LEAKED FILES: @f\n";
252 @dir_files = @new_dir_files;
256 my $t_total = timediff(new Benchmark, $t_start);
259 if (defined $old5lib) {
260 $ENV{PERL5LIB} = $old5lib;
262 delete $ENV{PERL5LIB};
266 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
267 " UNEXPECTEDLY SUCCEEDED)")
269 if ($tests_skipped) {
270 $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
271 if ($subtests_skipped) {
272 $bonusmsg .= " and $subtests_skipped subtest"
273 . ($subtests_skipped != 1 ? 's' : '');
275 $bonusmsg .= ' skipped';
277 elsif ($subtests_skipped) {
278 $bonusmsg .= ", $subtests_skipped subtest"
279 . ($subtests_skipped != 1 ? 's' : '')
282 if ($bad == 0 && $totmax) {
283 print "All tests successful$bonusmsg.\n";
285 die "FAILED--no tests were run for some reason.\n";
286 } elsif ($totmax==0) {
287 my $blurb = $total==1 ? "script" : "scripts";
288 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
290 $pct = sprintf("%.2f", $good / $total * 100);
291 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
292 $totmax - $totok, $totmax, 100*$totok/$totmax;
294 # First, figure out max length of test names
295 my $failed_str = "Failed Test";
296 my $middle_str = " Status Wstat Total Fail Failed ";
297 my $list_str = "List of Failed";
298 my $max_namelen = length($failed_str);
300 foreach $script (keys %failedtests) {
302 (length $failedtests{$script}->{name} > $max_namelen) ?
303 length $failedtests{$script}->{name} : $max_namelen;
305 my $list_len = $columns - length($middle_str) - $max_namelen;
306 if ($list_len < length($list_str)) {
307 $list_len = length($list_str);
308 $max_namelen = $columns - length($middle_str) - $list_len;
309 if ($max_namelen < length($failed_str)) {
310 $max_namelen = length($failed_str);
311 $columns = $max_namelen + length($middle_str) + $list_len;
315 my $fmt_top = "format STDOUT_TOP =\n"
316 . sprintf("%-${max_namelen}s", $failed_str)
321 my $fmt = "format STDOUT =\n"
322 . "@" . "<" x ($max_namelen - 1)
323 . " @>> @>>>> @>>>> @>>> ^##.##% "
324 . "^" . "<" x ($list_len - 1) . "\n"
325 . '{ $curtest->{name}, $curtest->{estat},'
326 . ' $curtest->{wstat}, $curtest->{max},'
327 . ' $curtest->{failed}, $curtest->{percent},'
328 . ' $curtest->{canon}'
330 . "~~" . " " x ($columns - $list_len - 2) . "^"
331 . "<" x ($list_len - 1) . "\n"
332 . '$curtest->{canon}'
340 # Now write to formats
341 for $script (sort keys %failedtests) {
342 $curtest = $failedtests{$script};
346 $bonusmsg =~ s/^,\s*//;
347 print "$bonusmsg.\n" if $bonusmsg;
348 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
351 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
353 return ($bad == 0 && $totmax) ;
356 my $tried_devel_corestack;
360 eval {require 'wait.ph'};
361 my $ret = defined &WCOREDUMP ? WCOREDUMP($st) : $st & 0200;
363 eval { require Devel::CoreStack; $have_devel_corestack++ }
364 unless $tried_devel_corestack++;
369 sub canonfailed ($@) {
370 my($max,$skipped,@failed) = @_;
372 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
373 my $failed = @failed;
377 my $last = $min = shift @failed;
380 for (@failed, $failed[-1]) { # don't forget the last one
381 if ($_ > $last+1 || $_ == $last) {
385 push @canon, "$min-$last";
392 push @result, "FAILED tests @canon\n";
395 push @result, "FAILED test $last\n";
399 push @result, "\tFailed $failed/$max tests, ";
400 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
401 my $ender = 's' x ($skipped > 1);
402 my $good = $max - $failed - $skipped;
403 my $goodper = sprintf("%.2f",100*($good/$max));
404 push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
406 my $txt = join "", @result;
415 Test::Harness - run perl standard test scripts with statistics
425 (By using the L<Test> module, you can write test scripts without
426 knowing the exact output this module expects. However, if you need to
427 know the specifics, read on!)
429 Perl test scripts print to standard output C<"ok N"> for each single
430 test, where C<N> is an increasing sequence of integers. The first line
431 output by a standard test script is C<"1..M"> with C<M> being the
432 number of tests that should be run within the test
433 script. Test::Harness::runtests(@tests) runs all the testscripts
434 named as arguments and checks standard output for the expected
437 After all tests have been performed, runtests() prints some
438 performance statistics that are computed by the Benchmark module.
440 =head2 The test script output
442 Any output from the testscript to standard error is ignored and
443 bypassed, thus will be seen by the user. Lines written to standard
444 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
445 runtests(). All other lines are discarded.
447 It is tolerated if the test numbers after C<ok> are omitted. In this
448 case Test::Harness maintains temporarily its own counter until the
449 script supplies test numbers again. So the following test script
463 Failed 3/6 tests, 50.00% okay
465 The global variable $Test::Harness::verbose is exportable and can be
466 used to let runtests() display the standard output of the script
467 without altering the behavior otherwise.
469 The global variable $Test::Harness::switches is exportable and can be
470 used to set perl command line options used for running the test
471 script(s). The default value is C<-w>.
473 If the standard output line contains substring C< # Skip> (with
474 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
475 counted as a skipped test. In no other circumstance is anything
476 allowed to follow C<ok> or C<ok NUMBER>. If the whole testscript
477 succeeds, the count of skipped tests is included in the generated
480 C<Test::Harness> reports the text after C< # Skip\S*\s+> as a reason
481 for skipping. Similarly, one can include a similar explanation in a
482 C<1..0> line emitted if the test is skipped completely:
484 1..0 # Skipped: no leverage found
488 C<&runtests> is exported by Test::Harness per default.
494 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
496 If all tests are successful some statistics about the performance are
499 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
501 For any single script that has failing subtests statistics like the
504 =item C<Test returned status %d (wstat %d)>
506 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
507 printed in a message similar to the above.
509 =item C<Failed 1 test, %.2f%% okay. %s>
511 =item C<Failed %d/%d tests, %.2f%% okay. %s>
513 If not all tests were successful, the script dies with one of the
520 Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
523 Setting C<HARNESS_NOTTY> to a true value forces it to behave as though
524 STDOUT were not a console. You may need to set this if you don't want
525 harness to output more frequent progress messages using carriage returns.
526 Some consoles may not handle carriage returns properly (which results
527 in a somewhat messy output).
529 Setting C<HARNESS_COMPILE_TEST> to a true value will make harness attempt
530 to compile the test using C<perlcc> before running it.
532 If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
533 will check after each test whether new files appeared in that directory,
536 LEAKED FILES: scr.tmp 0 my.db
538 If relative, directory name is with respect to the current directory at
539 the moment runtests() was called. Putting absolute path into
540 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
542 The value of C<HARNESS_PERL_SWITCHES> will be prepended to the
543 switches used to invoke perl on each test. For example, setting
544 C<HARNESS_PERL_SWITCHES> to "-W" will run all tests with all
547 If C<HARNESS_COLUMNS> is set, then this value will be used for the
548 width of the terminal. If it is not set then it will default to
549 C<COLUMNS>. If this is not set, it will default to 80. Note that users
550 of Bourne-sh based shells will need to C<export COLUMNS> for this
551 module to use that variable.
553 Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
554 This allows the tests to determine if they are being executed through the
555 harness or by any other means.
559 L<Test> for writing test scripts and also L<Benchmark> for the
560 underlying timing routines.
564 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
565 sure is, that it was inspired by Larry Wall's TEST script that came
566 with perl distributions for ages. Numerous anonymous contributors
567 exist. Current maintainer is Andreas Koenig.
571 Test::Harness uses $^X to determine the perl binary to run the tests
572 with. Test scripts running via the shebang (C<#!>) line may not be
573 portable because $^X is not consistent for shebang scripts across
574 platforms. This is no problem when Test::Harness is run with an
575 absolute path to the perl binary or when $^X can be found in the path.