10 our($VERSION, $verbose, $switches, $have_devel_corestack, $curtest,
11 $columns, @ISA, @EXPORT, @EXPORT_OK);
12 $have_devel_corestack = 0;
16 $ENV{HARNESS_ACTIVE} = 1;
18 # Some experimental versions of OS/2 build have broken $?
19 my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
21 my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
23 my $tests_skipped = 0;
24 my $subtests_skipped = 0;
27 @EXPORT= qw(&runtests);
28 @EXPORT_OK= qw($verbose $switches);
32 $columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
34 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
39 my($test,$te,$ok,$next,$max,$pct,$totbonus,@failed,%failedtests);
47 # pass -I flags to children
48 my $old5lib = $ENV{PERL5LIB};
50 # VMS has a 255-byte limit on the length of %ENV entries, so
51 # toss the ones that involve perl_root, the install location
55 $new5lib = join($Config{path_sep}, grep {!/perl_root/i;} @INC);
56 $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g;
59 $new5lib = join($Config{path_sep}, @INC);
62 local($ENV{'PERL5LIB'}) = $new5lib;
64 my @dir_files = globdir $files_in_dir if defined $files_in_dir;
65 my $t_start = new Benchmark;
66 while ($test = shift(@tests)) {
69 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./s; }
70 my $blank = (' ' x 77);
71 my $leader = "$te" . '.' x (20 - length($te));
73 $ml = "\r$blank\r$leader"
74 if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $verbose;
76 my $fh = new FileHandle;
77 $fh->open($test) or print "can't open $test. $!\n";
80 $s .= " $ENV{'HARNESS_PERL_SWITCHES'}"
81 if exists $ENV{'HARNESS_PERL_SWITCHES'};
82 $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
83 if $first =~ /^#!.*\bperl.*-\w*T/;
84 $fh->close or print "can't close $test. $!\n";
85 my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
86 ? "./perl -I../lib ../utils/perlcc $test "
87 . "-run 2>> ./compilelog |"
89 $cmd = "MCR $cmd" if $^O eq 'VMS';
90 $fh->open($cmd) or print "can't run $test. $!\n";
91 $ok = $next = $max = 0;
101 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
103 for (split(/\s+/, $2)) { $todo{$_} = 1; }
107 } elsif (/^1\.\.([0-9]+)(\s*\#\s*[Ss]kip\S*(?>\s+)(.+))?/) {
112 $skip_reason = $3 if not $max and defined $3;
113 } elsif ($max && /^(not\s+)?ok\b/) {
115 if (/^not ok\s*(\d*)/){
116 $this = $1 if $1 > 0;
117 print "${ml}NOK $this" if $ml;
124 } elsif (/^ok\s*(\d*) *(\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?$/) {
125 $this = $1 if $1 > 0;
126 print "${ml}ok $this/$max" if $ml;
129 $skipped++ if defined $2;
131 $reason = 'unknown reason' if defined $2;
132 $reason = $3 if defined $3;
133 if (defined $reason and defined $skip_reason) {
134 # print "was: '$skip_reason' new '$reason'\n";
135 $skip_reason = 'various reasons'
136 if $skip_reason ne $reason;
137 } elsif (defined $reason) {
138 $skip_reason = $reason;
140 $bonus++, $totbonus++ if $todo{$this};
141 } elsif (/^ok\s*(\d*)\s*\#([^\r]*)$/) {
142 $this = $1 if $1 > 0;
143 print "${ml}ok $this/$max" if $ml;
147 # an ok or not ok not matching the 3 cases above...
148 # just ignore it for compatibility with TEST
152 # print "Test output counter mismatch [test $this]\n";
153 # no need to warn probably
154 push @failed, $next..$this-1;
155 } elsif ($this < $next) {
156 #we have seen more "ok" lines than the number suggests
157 print "Confused test output: test $this answered after test ", $next-1, "\n";
163 $fh->close; # must close to reap child resource values
164 my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
166 $estatus = ($^O eq 'VMS'
167 ? eval 'use vmsish "status"; $estatus = $?'
170 my ($failed, $canon, $percent) = ('??', '??');
171 printf "${ml}dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
173 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
174 if (corestatus($wstatus)) { # until we have a wait module
175 if ($have_devel_corestack) {
176 Devel::CoreStack::stack($^X);
178 print "\ttest program seems to have generated a core\n";
183 if ($next == $max + 1 and not @failed) {
184 print "\tafter all the subtests completed successfully\n";
186 $failed = 0; # But we do not set $canon!
188 push @failed, $next..$max;
190 (my $txt, $canon) = canonfailed($max,$skipped,@failed);
191 $percent = 100*(scalar @failed)/$max;
195 $failedtests{$test} = { canon => $canon, max => $max || '??',
197 name => $test, percent => $percent,
198 estat => $estatus, wstat => $wstatus,
200 } elsif ($ok == $max && $next == $max+1) {
201 if ($max and $skipped + $bonus) {
203 push(@msg, "$skipped/$max skipped: $skip_reason")
205 push(@msg, "$bonus/$max unexpectedly succeeded")
207 print "${ml}ok, ".join(', ', @msg)."\n";
210 } elsif (defined $skip_reason) {
211 print "skipped: $skip_reason\n";
214 print "skipped test on this platform\n";
220 push @failed, $next..$max;
223 my ($txt, $canon) = canonfailed($max,$skipped,@failed);
225 $failedtests{$test} = { canon => $canon, max => $max,
226 failed => scalar @failed,
227 name => $test, percent => 100*(scalar @failed)/$max,
228 estat => '', wstat => '',
231 print "Don't know which tests failed: got $ok ok, expected $max\n";
232 $failedtests{$test} = { canon => '??', max => $max,
234 name => $test, percent => undef,
235 estat => '', wstat => '',
239 } elsif ($next == 0) {
240 print "FAILED before any test output arrived\n";
242 $failedtests{$test} = { canon => '??', max => '??',
244 name => $test, percent => undef,
245 estat => '', wstat => '',
248 $subtests_skipped += $skipped;
249 if (defined $files_in_dir) {
250 my @new_dir_files = globdir $files_in_dir;
251 if (@new_dir_files != @dir_files) {
253 @f{@new_dir_files} = (1) x @new_dir_files;
254 delete @f{@dir_files};
255 my @f = sort keys %f;
256 print "LEAKED FILES: @f\n";
257 @dir_files = @new_dir_files;
261 my $t_total = timediff(new Benchmark, $t_start);
264 if (defined $old5lib) {
265 $ENV{PERL5LIB} = $old5lib;
267 delete $ENV{PERL5LIB};
271 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
272 " UNEXPECTEDLY SUCCEEDED)")
274 if ($tests_skipped) {
275 $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
276 if ($subtests_skipped) {
277 $bonusmsg .= " and $subtests_skipped subtest"
278 . ($subtests_skipped != 1 ? 's' : '');
280 $bonusmsg .= ' skipped';
282 elsif ($subtests_skipped) {
283 $bonusmsg .= ", $subtests_skipped subtest"
284 . ($subtests_skipped != 1 ? 's' : '')
287 if ($bad == 0 && $totmax) {
288 print "All tests successful$bonusmsg.\n";
290 die "FAILED--no tests were run for some reason.\n";
291 } elsif ($totmax==0) {
292 my $blurb = $total==1 ? "script" : "scripts";
293 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
295 $pct = sprintf("%.2f", $good / $total * 100);
296 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
297 $totmax - $totok, $totmax, 100*$totok/$totmax;
299 # First, figure out max length of test names
300 my $failed_str = "Failed Test";
301 my $middle_str = " Status Wstat Total Fail Failed ";
302 my $list_str = "List of Failed";
303 my $max_namelen = length($failed_str);
305 foreach $script (keys %failedtests) {
307 (length $failedtests{$script}->{name} > $max_namelen) ?
308 length $failedtests{$script}->{name} : $max_namelen;
310 my $list_len = $columns - length($middle_str) - $max_namelen;
311 if ($list_len < length($list_str)) {
312 $list_len = length($list_str);
313 $max_namelen = $columns - length($middle_str) - $list_len;
314 if ($max_namelen < length($failed_str)) {
315 $max_namelen = length($failed_str);
316 $columns = $max_namelen + length($middle_str) + $list_len;
320 my $fmt_top = "format STDOUT_TOP =\n"
321 . sprintf("%-${max_namelen}s", $failed_str)
326 my $fmt = "format STDOUT =\n"
327 . "@" . "<" x ($max_namelen - 1)
328 . " @>> @>>>> @>>>> @>>> ^##.##% "
329 . "^" . "<" x ($list_len - 1) . "\n"
330 . '{ $curtest->{name}, $curtest->{estat},'
331 . ' $curtest->{wstat}, $curtest->{max},'
332 . ' $curtest->{failed}, $curtest->{percent},'
333 . ' $curtest->{canon}'
335 . "~~" . " " x ($columns - $list_len - 2) . "^"
336 . "<" x ($list_len - 1) . "\n"
337 . '$curtest->{canon}'
345 # Now write to formats
346 for $script (sort keys %failedtests) {
347 $curtest = $failedtests{$script};
351 $bonusmsg =~ s/^,\s*//;
352 print "$bonusmsg.\n" if $bonusmsg;
353 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
356 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
358 return ($bad == 0 && $totmax) ;
361 my $tried_devel_corestack;
365 eval {require 'wait.ph'};
366 my $ret = defined &WCOREDUMP ? WCOREDUMP($st) : $st & 0200;
368 eval { require Devel::CoreStack; $have_devel_corestack++ }
369 unless $tried_devel_corestack++;
374 sub canonfailed ($@) {
375 my($max,$skipped,@failed) = @_;
377 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
378 my $failed = @failed;
382 my $last = $min = shift @failed;
385 for (@failed, $failed[-1]) { # don't forget the last one
386 if ($_ > $last+1 || $_ == $last) {
390 push @canon, "$min-$last";
397 push @result, "FAILED tests @canon\n";
400 push @result, "FAILED test $last\n";
404 push @result, "\tFailed $failed/$max tests, ";
405 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
406 my $ender = 's' x ($skipped > 1);
407 my $good = $max - $failed - $skipped;
408 my $goodper = sprintf("%.2f",100*($good/$max));
409 push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
411 my $txt = join "", @result;
420 Test::Harness - run perl standard test scripts with statistics
430 (By using the L<Test> module, you can write test scripts without
431 knowing the exact output this module expects. However, if you need to
432 know the specifics, read on!)
434 Perl test scripts print to standard output C<"ok N"> for each single
435 test, where C<N> is an increasing sequence of integers. The first line
436 output by a standard test script is C<"1..M"> with C<M> being the
437 number of tests that should be run within the test
438 script. Test::Harness::runtests(@tests) runs all the testscripts
439 named as arguments and checks standard output for the expected
442 After all tests have been performed, runtests() prints some
443 performance statistics that are computed by the Benchmark module.
445 =head2 The test script output
447 Any output from the testscript to standard error is ignored and
448 bypassed, thus will be seen by the user. Lines written to standard
449 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
450 runtests(). All other lines are discarded.
452 It is tolerated if the test numbers after C<ok> are omitted. In this
453 case Test::Harness maintains temporarily its own counter until the
454 script supplies test numbers again. So the following test script
468 Failed 3/6 tests, 50.00% okay
470 The global variable $Test::Harness::verbose is exportable and can be
471 used to let runtests() display the standard output of the script
472 without altering the behavior otherwise.
474 The global variable $Test::Harness::switches is exportable and can be
475 used to set perl command line options used for running the test
476 script(s). The default value is C<-w>.
478 If the standard output line contains substring C< # Skip> (with
479 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
480 counted as a skipped test. In no other circumstance is anything
481 allowed to follow C<ok> or C<ok NUMBER>. If the whole testscript
482 succeeds, the count of skipped tests is included in the generated
485 C<Test::Harness> reports the text after C< # Skip\S*\s+> as a reason
486 for skipping. Similarly, one can include a similar explanation in a
487 C<1..0> line emitted if the test is skipped completely:
489 1..0 # Skipped: no leverage found
493 C<&runtests> is exported by Test::Harness per default.
499 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
501 If all tests are successful some statistics about the performance are
504 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
506 For any single script that has failing subtests statistics like the
509 =item C<Test returned status %d (wstat %d)>
511 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
512 printed in a message similar to the above.
514 =item C<Failed 1 test, %.2f%% okay. %s>
516 =item C<Failed %d/%d tests, %.2f%% okay. %s>
518 If not all tests were successful, the script dies with one of the
525 Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
528 Setting C<HARNESS_NOTTY> to a true value forces it to behave as though
529 STDOUT were not a console. You may need to set this if you don't want
530 harness to output more frequent progress messages using carriage returns.
531 Some consoles may not handle carriage returns properly (which results
532 in a somewhat messy output).
534 Setting C<HARNESS_COMPILE_TEST> to a true value will make harness attempt
535 to compile the test using C<perlcc> before running it.
537 If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
538 will check after each test whether new files appeared in that directory,
541 LEAKED FILES: scr.tmp 0 my.db
543 If relative, directory name is with respect to the current directory at
544 the moment runtests() was called. Putting absolute path into
545 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
547 The value of C<HARNESS_PERL_SWITCHES> will be prepended to the
548 switches used to invoke perl on each test. For example, setting
549 C<HARNESS_PERL_SWITCHES> to "-W" will run all tests with all
552 If C<HARNESS_COLUMNS> is set, then this value will be used for the
553 width of the terminal. If it is not set then it will default to
554 C<COLUMNS>. If this is not set, it will default to 80. Note that users
555 of Bourne-sh based shells will need to C<export COLUMNS> for this
556 module to use that variable.
558 Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
559 This allows the tests to determine if they are being executed through the
560 harness or by any other means.
564 L<Test> for writing test scripts and also L<Benchmark> for the
565 underlying timing routines.
569 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
570 sure is, that it was inspired by Larry Wall's TEST script that came
571 with perl distributions for ages. Numerous anonymous contributors
572 exist. Current maintainer is Andreas Koenig.
576 Test::Harness uses $^X to determine the perl binary to run the tests
577 with. Test scripts running via the shebang (C<#!>) line may not be
578 portable because $^X is not consistent for shebang scripts across
579 platforms. This is no problem when Test::Harness is run with an
580 absolute path to the perl binary or when $^X can be found in the path.