1 # -*- Mode: cperl; cperl-indent-level: 4 -*-
10 our($VERSION, $Verbose, $Switches, $Have_Devel_Corestack, $Curtest,
11 $Columns, $verbose, $switches,
12 @ISA, @EXPORT, @EXPORT_OK
15 # Backwards compatibility for exportable variable names.
17 *switches = \$Switches;
19 $Have_Devel_Corestack = 0;
23 $ENV{HARNESS_ACTIVE} = 1;
25 # Some experimental versions of OS/2 build have broken $?
26 my $Ignore_Exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
28 my $Files_In_Dir = $ENV{HARNESS_FILELEAK_IN_DIR};
32 @EXPORT = qw(&runtests);
33 @EXPORT_OK = qw($verbose $switches);
37 $Columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
38 $Columns--; # Do no write into the last column
40 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
45 my($tot, $failedtests) = _runtests(@tests);
46 _show_results($tot, $failedtests);
48 return ($tot->{bad} == 0 && $tot->{max}) ;
65 tests => scalar @tests,
71 # pass -I flags to children
72 my $old5lib = $ENV{PERL5LIB};
74 # VMS has a 255-byte limit on the length of %ENV entries, so
75 # toss the ones that involve perl_root, the install location
79 $new5lib = join($Config{path_sep}, grep {!/perl_root/i;} @INC);
80 $Switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g;
83 $new5lib = join($Config{path_sep}, @INC);
86 local($ENV{'PERL5LIB'}) = $new5lib;
88 my @dir_files = globdir $Files_In_Dir if defined $Files_In_Dir;
89 my $t_start = new Benchmark;
91 foreach my $test (@tests) {
93 chop($te); # XXX chomp?
95 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./s; }
96 my $blank = (' ' x 77);
97 my $leader = "$te" . '.' x (20 - length($te));
99 $ml = "\r$blank\r$leader"
100 if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $Verbose;
103 my $s = _set_switches($test);
105 my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
106 ? "./perl -I../lib ../utils/perlcc $test "
107 . "-r 2>> ./compilelog |"
109 $cmd = "MCR $cmd" if $^O eq 'VMS';
110 open(my $fh, $cmd) or print "can't run $test. $!\n";
112 # state of the current test.
121 skip_reason => undef,
125 my($seen_header, $tests_seen) = (0,0);
127 if( _parse_header($_, \%test, \%tot) ) {
128 warn "Test header seen twice!\n" if $seen_header;
132 warn "1..M can only appear at the beginning or end of tests\n"
133 if $tests_seen && $test{max} < $tests_seen;
135 elsif( _parse_test_line($_, \%test, \%tot) ) {
141 my($estatus, $wstatus) = _close_fh($fh);
144 $failedtests{$test} = _dubious_return(\%test, \%tot,
146 $failedtests{$test}{name} = $test;
148 elsif ($test{ok} == $test{max} && $test{next} == $test{max}+1) {
149 if ($test{max} and $test{skipped} + $test{bonus}) {
151 push(@msg, "$test{skipped}/$test{max} skipped: $test{skip_reason}")
153 push(@msg, "$test{bonus}/$test{max} unexpectedly succeeded")
155 print "$test{ml}ok, ".join(', ', @msg)."\n";
156 } elsif ($test{max}) {
157 print "$test{ml}ok\n";
158 } elsif (defined $test{skip_reason}) {
159 print "skipped: $test{skip_reason}\n";
162 print "skipped test on this platform\n";
166 } elsif ($test{max}) {
167 if ($test{next} <= $test{max}) {
168 push @{$test{failed}}, $test{next}..$test{max};
170 if (@{$test{failed}}) {
171 my ($txt, $canon) = canonfailed($test{max},$test{skipped},
173 print "$test{ml}$txt";
174 $failedtests{$test} = { canon => $canon,
176 failed => scalar @{$test{failed}},
178 percent => 100*(scalar @{$test{failed}})/$test{max},
183 print "Don't know which tests failed: got $test{ok} ok, ".
184 "expected $test{max}\n";
185 $failedtests{$test} = { canon => '??',
195 } elsif ($test{next} == 0) {
196 print "FAILED before any test output arrived\n";
198 $failedtests{$test} = { canon => '??',
207 $tot{sub_skipped} += $test{skipped};
209 if (defined $Files_In_Dir) {
210 my @new_dir_files = globdir $Files_In_Dir;
211 if (@new_dir_files != @dir_files) {
213 @f{@new_dir_files} = (1) x @new_dir_files;
214 delete @f{@dir_files};
215 my @f = sort keys %f;
216 print "LEAKED FILES: @f\n";
217 @dir_files = @new_dir_files;
221 $tot{bench} = timediff(new Benchmark, $t_start);
224 if (defined $old5lib) {
225 $ENV{PERL5LIB} = $old5lib;
227 delete $ENV{PERL5LIB};
231 return(\%tot, \%failedtests);
236 my($tot, $failedtests) = @_;
239 my $bonusmsg = _bonusmsg($tot);
241 if ($tot->{bad} == 0 && $tot->{max}) {
242 print "All tests successful$bonusmsg.\n";
243 } elsif ($tot->{tests}==0){
244 die "FAILED--no tests were run for some reason.\n";
245 } elsif ($tot->{max} == 0) {
246 my $blurb = $tot->{tests}==1 ? "script" : "scripts";
247 die "FAILED--$tot->{tests} test $blurb could be run, ".
248 "alas--no output ever seen\n";
250 $pct = sprintf("%.2f", $tot->{good} / $tot->{tests} * 100);
251 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
252 $tot->{max} - $tot->{ok}, $tot->{max},
253 100*$tot->{ok}/$tot->{max};
255 my($fmt_top, $fmt) = _create_fmts($failedtests);
257 # Now write to formats
258 for my $script (sort keys %$failedtests) {
259 $Curtest = $failedtests->{$script};
263 $bonusmsg =~ s/^,\s*//;
264 print "$bonusmsg.\n" if $bonusmsg;
265 die "Failed $tot->{bad}/$tot->{tests} test scripts, $pct% okay.".
270 printf("Files=%d, Tests=%d, %s\n",
271 $tot->{files}, $tot->{max}, timestr($tot->{bench}, 'nop'));
276 my($line, $test, $tot) = @_;
280 print $line if $Verbose;
283 if ($line =~ /^1\.\.([0-9]+) todo([\d\s]+);?/i) {
285 for (split(/\s+/, $2)) { $test->{todo}{$_} = 1; }
287 $tot->{max} += $test->{max};
293 # 1..0 # skip Why? Because I said so!
294 elsif ($line =~ /^1\.\.([0-9]+)
295 (\s*\#\s*[Ss]kip\S*(?>\s+) (.+))?
300 $tot->{max} += $test->{max};
302 $test->{next} = 1 unless $test->{next};
303 $test->{skip_reason} = $3 if not $test->{max} and defined $3;
315 sub _parse_test_line {
316 my($line, $test, $tot) = @_;
318 if ($line =~ /^(not\s+)?ok\b/i) {
319 my $this = $test->{next} || 1;
321 if ($line =~ /^(not )?ok\s*(\d*)(\s*#.*)?/) {
322 my($not, $tnum, $extra) = ($1, $2, $3);
324 $this = $tnum if $tnum;
326 my($type, $reason) = $extra =~ /^\s*#\s*([Ss]kip\S*|TODO)(\s+.+)?/
329 my($istodo, $isskip);
330 if( defined $type ) {
331 $istodo = $type =~ /TODO/;
332 $isskip = $type =~ /skip/i;
335 $test->{todo}{$tnum} = 1 if $istodo;
338 print "$test->{ml}NOK $this" if $test->{ml};
339 if (!$test->{todo}{$this}) {
340 push @{$test->{failed}}, $this;
347 print "$test->{ml}ok $this/$test->{max}" if $test->{ml};
350 $test->{skipped}++ if $isskip;
352 if (defined $reason and defined $test->{skip_reason}) {
353 # print "was: '$skip_reason' new '$reason'\n";
354 $test->{skip_reason} = 'various reasons'
355 if $test->{skip_reason} ne $reason;
356 } elsif (defined $reason) {
357 $test->{skip_reason} = $reason;
360 $test->{bonus}++, $tot->{bonus}++ if $test->{todo}{$this};
364 elsif ($line =~ /^ok\s*(\d*)\s*\#([^\r]*)$/) { # XXX multiline ok?
365 $this = $1 if $1 > 0;
366 print "$test->{ml}ok $this/$test->{max}" if $test->{ml};
371 # an ok or not ok not matching the 3 cases above...
372 # just ignore it for compatibility with TEST
376 if ($this > $test->{next}) {
377 # print "Test output counter mismatch [test $this]\n";
378 # no need to warn probably
379 push @{$test->{failed}}, $test->{next}..$this-1;
381 elsif ($this < $test->{next}) {
382 #we have seen more "ok" lines than the number suggests
383 print "Confused test output: test $this answered after ".
384 "test ", $test->{next}-1, "\n";
385 $test->{next} = $this;
387 $test->{next} = $this + 1;
390 elsif ($line =~ /^Bail out!\s*(.*)/i) { # magic words
391 die "FAILED--Further testing stopped" .
392 ($1 ? ": $1\n" : ".\n");
401 $bonusmsg = (" ($tot->{bonus} subtest".($tot->{bonus} > 1 ? 's' : '').
402 " UNEXPECTEDLY SUCCEEDED)")
405 if ($tot->{skipped}) {
406 $bonusmsg .= ", $tot->{skipped} test"
407 . ($tot->{skipped} != 1 ? 's' : '');
408 if ($tot->{sub_skipped}) {
409 $bonusmsg .= " and $tot->{sub_skipped} subtest"
410 . ($tot->{sub_skipped} != 1 ? 's' : '');
412 $bonusmsg .= ' skipped';
414 elsif ($tot->{sub_skipped}) {
415 $bonusmsg .= ", $tot->{sub_skipped} subtest"
416 . ($tot->{sub_skipped} != 1 ? 's' : '')
423 # VMS has some subtle nastiness with closing the test files.
427 close($fh); # must close to reap child resource values
429 my $wstatus = $Ignore_Exitcode ? 0 : $?; # Can trust $? ?
431 $estatus = ($^O eq 'VMS'
432 ? eval 'use vmsish "status"; $estatus = $?'
435 return($estatus, $wstatus);
439 # Set up the command-line switches to run perl as.
443 open(my $fh, $test) or print "can't open $test. $!\n";
446 $s .= " $ENV{'HARNESS_PERL_SWITCHES'}"
447 if exists $ENV{'HARNESS_PERL_SWITCHES'};
448 $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
449 if $first =~ /^#!.*\bperl.*-\w*T/;
451 close($fh) or print "can't close $test. $!\n";
457 # Test program go boom.
458 sub _dubious_return {
459 my($test, $tot, $estatus, $wstatus) = @_;
460 my ($failed, $canon, $percent) = ('??', '??');
462 printf "$test->{ml}dubious\n\tTest returned status $estatus ".
463 "(wstat %d, 0x%x)\n",
465 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
467 if (corestatus($wstatus)) { # until we have a wait module
468 if ($Have_Devel_Corestack) {
469 Devel::CoreStack::stack($^X);
471 print "\ttest program seems to have generated a core\n";
478 if ($test->{next} == $test->{max} + 1 and not @{$test->{failed}}) {
479 print "\tafter all the subtests completed successfully\n";
481 $failed = 0; # But we do not set $canon!
484 push @{$test->{failed}}, $test->{next}..$test->{max};
485 $failed = @{$test->{failed}};
486 (my $txt, $canon) = canonfailed($test->{max},$test->{skipped},@{$test->{failed}});
487 $percent = 100*(scalar @{$test->{failed}})/$test->{max};
492 return { canon => $canon, max => $test->{max} || '??',
495 estat => $estatus, wstat => $wstatus,
500 sub _garbled_output {
501 my($gibberish) = shift;
502 warn "Confusing test output: '$gibberish'\n";
507 my($failedtests) = @_;
509 my $failed_str = "Failed Test "; # Borrow up to 3 chars from Status
510 my $middle_str = " Status Wstat Total Fail Failed ";
511 my $list_str = "List of Failed";
513 # Figure out our longest name string for formatting purposes.
514 my $max_namelen = length($failed_str);
515 foreach my $script (keys %$failedtests) {
516 my $namelen = length $failedtests->{$script}->{name};
517 $max_namelen = $namelen if $namelen > $max_namelen;
520 my $list_len = $Columns - length($middle_str) - $max_namelen;
521 if ($list_len < length($list_str)) {
522 $list_len = length($list_str);
523 $max_namelen = $Columns - length($middle_str) - $list_len;
524 if ($max_namelen < length($failed_str)) {
525 $max_namelen = length($failed_str);
526 $Columns = $max_namelen + length($middle_str) + $list_len;
530 my $failed_len = $max_namelen - 3;
531 my $fmt_top = "format STDOUT_TOP =\n"
532 . sprintf("%-${failed_len}s", "Failed Test")
538 my $fmt = "format STDOUT =\n"
539 . "@" . "<" x ($max_namelen - 1)
540 . " @>> @>>>> @>>>> @>>> ^##.##% "
541 . "^" . "<" x ($list_len - 1) . "\n"
542 . '{ $Curtest->{name}, $Curtest->{estat},'
543 . ' $Curtest->{wstat}, $Curtest->{max},'
544 . ' $Curtest->{failed}, $Curtest->{percent},'
545 . ' $Curtest->{canon}'
547 . "~~" . " " x ($Columns - $list_len - 2) . "^"
548 . "<" x ($list_len - 1) . "\n"
549 . '$Curtest->{canon}'
557 return($fmt_top, $fmt);
561 my $tried_devel_corestack;
565 eval {require 'wait.ph'};
566 my $ret = defined &WCOREDUMP ? WCOREDUMP($st) : $st & 0200;
568 eval { require Devel::CoreStack; $Have_Devel_Corestack++ }
569 unless $tried_devel_corestack++;
574 sub canonfailed ($@) {
575 my($max,$skipped,@failed) = @_;
577 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
578 my $failed = @failed;
582 my $last = $min = shift @failed;
585 for (@failed, $failed[-1]) { # don't forget the last one
586 if ($_ > $last+1 || $_ == $last) {
590 push @canon, "$min-$last";
597 push @result, "FAILED tests @canon\n";
600 push @result, "FAILED test $last\n";
604 push @result, "\tFailed $failed/$max tests, ";
605 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
606 my $ender = 's' x ($skipped > 1);
607 my $good = $max - $failed - $skipped;
608 my $goodper = sprintf("%.2f",100*($good/$max));
609 push @result, " (-$skipped skipped test$ender: $good okay, ".
613 my $txt = join "", @result;
622 Test::Harness - run perl standard test scripts with statistics
628 runtests(@test_files);
632 (By using the Test module, you can write test scripts without
633 knowing the exact output this module expects. However, if you need to
634 know the specifics, read on!)
636 Perl test scripts print to standard output C<"ok N"> for each single
637 test, where C<N> is an increasing sequence of integers. The first line
638 output by a standard test script is C<"1..M"> with C<M> being the
639 number of tests that should be run within the test
640 script. Test::Harness::runtests(@tests) runs all the testscripts
641 named as arguments and checks standard output for the expected
644 After all tests have been performed, runtests() prints some
645 performance statistics that are computed by the Benchmark module.
647 =head2 The test script output
649 The following explains how Test::Harness interprets the output of your
656 This header tells how many tests there will be. It should be the
657 first line output by your test program (but its okay if its preceded
660 In certain instanced, you may not know how many tests you will
661 ultimately be running. In this case, it is permitted (but not
662 encouraged) for the 1..M header to appear as the B<last> line output
663 by your test (again, it can be followed by further comments). But we
664 strongly encourage you to put it first.
666 Under B<no> circumstances should 1..M appear in the middle of your
667 output or more than once.
670 =item B<'ok', 'not ok'. Ok?>
672 Any output from the testscript to standard error is ignored and
673 bypassed, thus will be seen by the user. Lines written to standard
674 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
675 runtests(). All other lines are discarded.
677 C</^not ok/> indicates a failed test. C</^ok/> is a successful test.
680 =item B<test numbers>
682 Perl normally expects the 'ok' or 'not ok' to be followed by a test
683 number. It is tolerated if the test numbers after 'ok' are
684 omitted. In this case Test::Harness maintains temporarily its own
685 counter until the script supplies test numbers again. So the following
700 Failed 3/6 tests, 50.00% okay
703 =item B<$Test::Harness::verbose>
705 The global variable $Test::Harness::verbose is exportable and can be
706 used to let runtests() display the standard output of the script
707 without altering the behavior otherwise.
709 =item B<$Test::Harness::switches>
711 The global variable $Test::Harness::switches is exportable and can be
712 used to set perl command line options used for running the test
713 script(s). The default value is C<-w>.
715 =item B<Skipping tests>
717 If the standard output line contains the substring C< # Skip> (with
718 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
719 counted as a skipped test. If the whole testscript succeeds, the
720 count of skipped tests is included in the generated output.
721 C<Test::Harness> reports the text after C< # Skip\S*\s+> as a reason
724 ok 23 # skip Insufficient flogiston pressure.
726 Similarly, one can include a similar explanation in a C<1..0> line
727 emitted if the test script is skipped completely:
729 1..0 # Skipped: no leverage found
733 If the standard output line contains the substring C< # TODO> after
734 C<not ok> or C<not ok NUMBER>, it is counted as a todo test. The text
735 afterwards is the thing that has to be done before this test will
738 not ok 13 # TODO harness the power of the atom
740 These tests represent a feature to be implemented or a bug to be fixed
741 and act as something of an executable "thing to do" list. They are
742 B<not> expected to succeed. Should a todo test begin succeeding,
743 Test::Harness will report it as a bonus. This indicates that whatever
744 you were supposed to do has been done and you should promote this to a
749 As an emergency measure, a test script can decide that further tests
750 are useless (e.g. missing dependencies) and testing should stop
751 immediately. In that case the test script prints the magic words
755 to standard output. Any message after these words will be displayed by
756 C<Test::Harness> as the reason why testing is stopped.
760 Additional comments may be put into the testing output on their own
761 lines. Comment lines should begin with a '#', Test::Harness will
765 # Life is good, the sun is shining, RAM is cheap.
767 # got 'Bush' expected 'Gore'
773 C<&runtests> is exported by Test::Harness per default.
775 C<$verbose> and C<$switches> are exported upon request.
782 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
784 If all tests are successful some statistics about the performance are
787 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
789 For any single script that has failing subtests statistics like the
792 =item C<Test returned status %d (wstat %d)>
794 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8>
795 and C<$?> are printed in a message similar to the above.
797 =item C<Failed 1 test, %.2f%% okay. %s>
799 =item C<Failed %d/%d tests, %.2f%% okay. %s>
801 If not all tests were successful, the script dies with one of the
804 =item C<FAILED--Further testing stopped%s>
806 If a single subtest decides that further testing will not make sense,
807 the script dies with this message.
815 =item C<HARNESS_IGNORE_EXITCODE>
817 Makes harness ignore the exit status of child processes when defined.
819 =item C<HARNESS_NOTTY>
821 When set to a true value, forces it to behave as though STDOUT were
822 not a console. You may need to set this if you don't want harness to
823 output more frequent progress messages using carriage returns. Some
824 consoles may not handle carriage returns properly (which results in a
825 somewhat messy output).
827 =item C<HARNESS_COMPILE_TEST>
829 When true it will make harness attempt to compile the test using
830 C<perlcc> before running it.
832 =item C<HARNESS_FILELEAK_IN_DIR>
834 When set to the name of a directory, harness will check after each
835 test whether new files appeared in that directory, and report them as
837 LEAKED FILES: scr.tmp 0 my.db
839 If relative, directory name is with respect to the current directory at
840 the moment runtests() was called. Putting absolute path into
841 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
843 =item C<HARNESS_PERL_SWITCHES>
845 Its value will be prepended to the switches used to invoke perl on
846 each test. For example, setting C<HARNESS_PERL_SWITCHES> to "-W" will
847 run all tests with all warnings enabled.
849 =item C<HARNESS_COLUMNS>
851 This value will be used for the width of the terminal. If it is not
852 set then it will default to C<COLUMNS>. If this is not set, it will
853 default to 80. Note that users of Bourne-sh based shells will need to
854 C<export COLUMNS> for this module to use that variable.
856 =item C<HARNESS_ACTIVE>
858 Harness sets this before executing the individual tests. This allows
859 the tests to determine if they are being executed through the harness
860 or by any other means.
867 L<Test> for writing test scripts, L<Benchmark> for the underlying
868 timing routines and L<Devel::Coverage> for test coverage analysis.
872 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
873 sure is, that it was inspired by Larry Wall's TEST script that came
874 with perl distributions for ages. Numerous anonymous contributors
877 Current maintainers are Andreas Koenig <andreas.koenig@anima.de> and
878 Michael G Schwern <schwern@pobox.com>
882 Test::Harness uses $^X to determine the perl binary to run the tests
883 with. Test scripts running via the shebang (C<#!>) line may not be
884 portable because $^X is not consistent for shebang scripts across
885 platforms. This is no problem when Test::Harness is run with an
886 absolute path to the perl binary or when $^X can be found in the path.