1 # -*- Mode: cperl; cperl-indent-level: 4 -*-
10 our($VERSION, $Verbose, $Switches, $Have_Devel_Corestack, $Curtest,
11 $Columns, $verbose, $switches,
12 @ISA, @EXPORT, @EXPORT_OK
15 # Backwards compatibility for exportable variable names.
17 *switches = \$Switches;
19 $Have_Devel_Corestack = 0;
23 $ENV{HARNESS_ACTIVE} = 1;
25 # Some experimental versions of OS/2 build have broken $?
26 my $Ignore_Exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
28 my $Files_In_Dir = $ENV{HARNESS_FILELEAK_IN_DIR};
32 @EXPORT = qw(&runtests);
33 @EXPORT_OK = qw($verbose $switches);
37 $Columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
40 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
45 my($tot, $failedtests) = _runtests(@tests);
46 _show_results($tot, $failedtests);
48 return ($tot->{bad} == 0 && $tot->{max}) ;
65 tests => scalar @tests,
71 # pass -I flags to children
72 my $old5lib = $ENV{PERL5LIB};
74 # VMS has a 255-byte limit on the length of %ENV entries, so
75 # toss the ones that involve perl_root, the install location
79 $new5lib = join($Config{path_sep}, grep {!/perl_root/i;} @INC);
80 $Switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g;
83 $new5lib = join($Config{path_sep}, @INC);
86 local($ENV{'PERL5LIB'}) = $new5lib;
88 my @dir_files = globdir $Files_In_Dir if defined $Files_In_Dir;
89 my $t_start = new Benchmark;
91 foreach my $test (@tests) {
93 chop($te); # XXX chomp?
95 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./s; }
96 my $blank = (' ' x 77);
97 my $leader = "$te" . '.' x (20 - length($te));
99 $ml = "\r$blank\r$leader"
100 if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $Verbose;
103 my $s = _set_switches($test);
105 my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
106 ? "./perl -I../lib ../utils/perlcc $test "
107 . "-r 2>> ./compilelog |"
109 $cmd = "MCR $cmd" if $^O eq 'VMS';
110 open(my $fh, $cmd) or print "can't run $test. $!\n";
112 # state of the current test.
121 skip_reason => undef,
125 my($seen_header, $tests_seen) = (0,0);
127 if( _parse_header($_, \%test, \%tot) ) {
128 warn "Test header seen twice!\n" if $seen_header;
132 warn "1..M can only appear at the beginning or end of tests\n"
133 if $tests_seen && $test{max} < $tests_seen;
135 elsif( _parse_test_line($_, \%test, \%tot) ) {
141 my($estatus, $wstatus) = _close_fh($fh);
144 $failedtests{$test} = _dubious_return(\%test, \%tot,
146 $failedtests{$test}{name} = $test;
148 elsif ($test{ok} == $test{max} && $test{next} == $test{max}+1) {
149 if ($test{max} and $test{skipped} + $test{bonus}) {
151 push(@msg, "$test{skipped}/$test{max} skipped: $test{skip_reason}")
153 push(@msg, "$test{bonus}/$test{max} unexpectedly succeeded")
155 print "$test{ml}ok, ".join(', ', @msg)."\n";
156 } elsif ($test{max}) {
157 print "$test{ml}ok\n";
158 } elsif (defined $test{skip_reason}) {
159 print "skipped: $test{skip_reason}\n";
162 print "skipped test on this platform\n";
166 } elsif ($test{max}) {
167 if ($test{next} <= $test{max}) {
168 push @{$test{failed}}, $test{next}..$test{max};
170 if (@{$test{failed}}) {
171 my ($txt, $canon) = canonfailed($test{max},$test{skipped},
173 print "$test{ml}$txt";
174 $failedtests{$test} = { canon => $canon,
176 failed => scalar @{$test{failed}},
178 percent => 100*(scalar @{$test{failed}})/$test{max},
183 print "Don't know which tests failed: got $test{ok} ok, ".
184 "expected $test{max}\n";
185 $failedtests{$test} = { canon => '??',
195 } elsif ($test{next} == 0) {
196 print "FAILED before any test output arrived\n";
198 $failedtests{$test} = { canon => '??',
207 $tot{sub_skipped} += $test{skipped};
209 if (defined $Files_In_Dir) {
210 my @new_dir_files = globdir $Files_In_Dir;
211 if (@new_dir_files != @dir_files) {
213 @f{@new_dir_files} = (1) x @new_dir_files;
214 delete @f{@dir_files};
215 my @f = sort keys %f;
216 print "LEAKED FILES: @f\n";
217 @dir_files = @new_dir_files;
221 $tot{bench} = timediff(new Benchmark, $t_start);
224 if (defined $old5lib) {
225 $ENV{PERL5LIB} = $old5lib;
227 delete $ENV{PERL5LIB};
231 return(\%tot, \%failedtests);
236 my($tot, $failedtests) = @_;
239 my $bonusmsg = _bonusmsg($tot);
241 if ($tot->{bad} == 0 && $tot->{max}) {
242 print "All tests successful$bonusmsg.\n";
243 } elsif ($tot->{tests}==0){
244 die "FAILED--no tests were run for some reason.\n";
245 } elsif ($tot->{max} == 0) {
246 my $blurb = $tot->{tests}==1 ? "script" : "scripts";
247 die "FAILED--$tot->{tests} test $blurb could be run, ".
248 "alas--no output ever seen\n";
250 $pct = sprintf("%.2f", $tot->{good} / $tot->{tests} * 100);
251 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
252 $tot->{max} - $tot->{ok}, $tot->{max},
253 100*$tot->{ok}/$tot->{max};
255 my($fmt_top, $fmt) = _create_fmts($failedtests);
257 # Now write to formats
258 for my $script (sort keys %$failedtests) {
259 $Curtest = $failedtests->{$script};
263 $bonusmsg =~ s/^,\s*//;
264 print "$bonusmsg.\n" if $bonusmsg;
265 die "Failed $tot->{bad}/$tot->{tests} test scripts, $pct% okay.".
270 printf("Files=%d, Tests=%d, %s\n",
271 $tot->{files}, $tot->{max}, timestr($tot->{bench}, 'nop'));
276 my($line, $test, $tot) = @_;
280 print $line if $Verbose;
283 if ($line =~ /^1\.\.([0-9]+) todo([\d\s]+);?/i) {
285 for (split(/\s+/, $2)) { $test->{todo}{$_} = 1; }
287 $tot->{max} += $test->{max};
293 # 1..0 # skip Why? Because I said so!
294 elsif ($line =~ /^1\.\.([0-9]+)
295 (\s*\#\s*[Ss]kip\S*(?>\s+) (.+))?
300 $tot->{max} += $test->{max};
302 $test->{next} = 1 unless $test->{next};
303 $test->{skip_reason} = $3 if not $test->{max} and defined $3;
315 sub _parse_test_line {
316 my($line, $test, $tot) = @_;
318 if ($line =~ /^(not\s+)?ok\b/i) {
319 my $this = $test->{next} || 1;
321 if ($line =~ /^(not )?ok\s*(\d*)(\s*#.*)?/) {
322 my($not, $tnum, $extra) = ($1, $2, $3);
324 $this = $tnum if $tnum;
326 my($type, $reason) = $extra =~ /^\s*#\s*([Ss]kip\S*|TODO)(\s+.+)?/
329 my($istodo, $isskip);
330 if( defined $type ) {
331 $istodo = $type =~ /TODO/;
332 $isskip = $type =~ /skip/i;
335 $test->{todo}{$tnum} = 1 if $istodo;
338 print "$test->{ml}NOK $this" if $test->{ml};
339 if (!$test->{todo}{$this}) {
340 push @{$test->{failed}}, $this;
347 print "$test->{ml}ok $this/$test->{max}" if $test->{ml};
350 $test->{skipped}++ if $isskip;
352 if (defined $reason and defined $test->{skip_reason}) {
353 # print "was: '$skip_reason' new '$reason'\n";
354 $test->{skip_reason} = 'various reasons'
355 if $test->{skip_reason} ne $reason;
356 } elsif (defined $reason) {
357 $test->{skip_reason} = $reason;
360 $test->{bonus}++, $tot->{bonus}++ if $test->{todo}{$this};
364 elsif ($line =~ /^ok\s*(\d*)\s*\#([^\r]*)$/) { # XXX multiline ok?
365 $this = $1 if $1 > 0;
366 print "$test->{ml}ok $this/$test->{max}" if $test->{ml};
371 # an ok or not ok not matching the 3 cases above...
372 # just ignore it for compatibility with TEST
376 if ($this > $test->{next}) {
377 # print "Test output counter mismatch [test $this]\n";
378 # no need to warn probably
379 push @{$test->{failed}}, $test->{next}..$this-1;
381 elsif ($this < $test->{next}) {
382 #we have seen more "ok" lines than the number suggests
383 print "Confused test output: test $this answered after ".
384 "test ", $test->{next}-1, "\n";
385 $test->{next} = $this;
387 $test->{next} = $this + 1;
390 elsif ($line =~ /^Bail out!\s*(.*)/i) { # magic words
391 die "FAILED--Further testing stopped" .
392 ($1 ? ": $1\n" : ".\n");
401 $bonusmsg = (" ($tot->{bonus} subtest".($tot->{bonus} > 1 ? 's' : '').
402 " UNEXPECTEDLY SUCCEEDED)")
405 if ($tot->{skipped}) {
406 $bonusmsg .= ", $tot->{skipped} test"
407 . ($tot->{skipped} != 1 ? 's' : '');
408 if ($tot->{sub_skipped}) {
409 $bonusmsg .= " and $tot->{sub_skipped} subtest"
410 . ($tot->{sub_skipped} != 1 ? 's' : '');
412 $bonusmsg .= ' skipped';
414 elsif ($tot->{sub_skipped}) {
415 $bonusmsg .= ", $tot->{sub_skipped} subtest"
416 . ($tot->{sub_skipped} != 1 ? 's' : '')
423 # VMS has some subtle nastiness with closing the test files.
427 close($fh); # must close to reap child resource values
429 my $wstatus = $Ignore_Exitcode ? 0 : $?; # Can trust $? ?
431 $estatus = ($^O eq 'VMS'
432 ? eval 'use vmsish "status"; $estatus = $?'
435 return($estatus, $wstatus);
439 # Set up the command-line switches to run perl as.
443 open(my $fh, $test) or print "can't open $test. $!\n";
446 $s .= " $ENV{'HARNESS_PERL_SWITCHES'}"
447 if exists $ENV{'HARNESS_PERL_SWITCHES'};
448 $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
449 if $first =~ /^#!.*\bperl.*-\w*T/;
451 close($fh) or print "can't close $test. $!\n";
457 # Test program go boom.
458 sub _dubious_return {
459 my($test, $tot, $estatus, $wstatus) = @_;
460 my ($failed, $canon, $percent) = ('??', '??');
462 printf "$test->{ml}dubious\n\tTest returned status $estatus ".
463 "(wstat %d, 0x%x)\n",
465 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
467 if (corestatus($wstatus)) { # until we have a wait module
468 if ($Have_Devel_Corestack) {
469 Devel::CoreStack::stack($^X);
471 print "\ttest program seems to have generated a core\n";
478 if ($test->{next} == $test->{max} + 1 and not @{$test->{failed}}) {
479 print "\tafter all the subtests completed successfully\n";
481 $failed = 0; # But we do not set $canon!
484 push @{$test->{failed}}, $test->{next}..$test->{max};
485 $failed = @{$test->{failed}};
486 (my $txt, $canon) = canonfailed($test->{max},$test->{skipped},@{$test->{failed}});
487 $percent = 100*(scalar @{$test->{failed}})/$test->{max};
492 return { canon => $canon, max => $test->{max} || '??',
495 estat => $estatus, wstat => $wstatus,
500 sub _garbled_output {
501 my($gibberish) = shift;
502 warn "Confusing test output: '$gibberish'\n";
507 my($failedtests) = @_;
509 my $failed_str = "Failed Test";
510 my $middle_str = " Status Wstat Total Fail Failed ";
511 my $list_str = "List of Failed";
513 # Figure out our longest name string for formatting purposes.
514 my $max_namelen = length($failed_str);
515 foreach my $script (keys %$failedtests) {
516 my $namelen = length $failedtests->{$script}->{name};
517 $max_namelen = $namelen if $namelen > $max_namelen;
520 my $list_len = $Columns - length($middle_str) - $max_namelen;
521 if ($list_len < length($list_str)) {
522 $list_len = length($list_str);
523 $max_namelen = $Columns - length($middle_str) - $list_len;
524 if ($max_namelen < length($failed_str)) {
525 $max_namelen = length($failed_str);
526 $Columns = $max_namelen + length($middle_str) + $list_len;
530 my $fmt_top = "format STDOUT_TOP =\n"
531 . sprintf("%-${max_namelen}s", $failed_str)
537 my $fmt = "format STDOUT =\n"
538 . "@" . "<" x ($max_namelen - 1)
539 . " @>> @>>>> @>>>> @>>> ^##.##% "
540 . "^" . "<" x ($list_len - 1) . "\n"
541 . '{ $Curtest->{name}, $Curtest->{estat},'
542 . ' $Curtest->{wstat}, $Curtest->{max},'
543 . ' $Curtest->{failed}, $Curtest->{percent},'
544 . ' $Curtest->{canon}'
546 . "~~" . " " x ($Columns - $list_len - 2) . "^"
547 . "<" x ($list_len - 1) . "\n"
548 . '$Curtest->{canon}'
556 return($fmt_top, $fmt);
560 my $tried_devel_corestack;
564 eval {require 'wait.ph'};
565 my $ret = defined &WCOREDUMP ? WCOREDUMP($st) : $st & 0200;
567 eval { require Devel::CoreStack; $Have_Devel_Corestack++ }
568 unless $tried_devel_corestack++;
573 sub canonfailed ($@) {
574 my($max,$skipped,@failed) = @_;
576 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
577 my $failed = @failed;
581 my $last = $min = shift @failed;
584 for (@failed, $failed[-1]) { # don't forget the last one
585 if ($_ > $last+1 || $_ == $last) {
589 push @canon, "$min-$last";
596 push @result, "FAILED tests @canon\n";
599 push @result, "FAILED test $last\n";
603 push @result, "\tFailed $failed/$max tests, ";
604 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
605 my $ender = 's' x ($skipped > 1);
606 my $good = $max - $failed - $skipped;
607 my $goodper = sprintf("%.2f",100*($good/$max));
608 push @result, " (-$skipped skipped test$ender: $good okay, ".
612 my $txt = join "", @result;
621 Test::Harness - run perl standard test scripts with statistics
627 runtests(@test_files);
631 (By using the Test module, you can write test scripts without
632 knowing the exact output this module expects. However, if you need to
633 know the specifics, read on!)
635 Perl test scripts print to standard output C<"ok N"> for each single
636 test, where C<N> is an increasing sequence of integers. The first line
637 output by a standard test script is C<"1..M"> with C<M> being the
638 number of tests that should be run within the test
639 script. Test::Harness::runtests(@tests) runs all the testscripts
640 named as arguments and checks standard output for the expected
643 After all tests have been performed, runtests() prints some
644 performance statistics that are computed by the Benchmark module.
646 =head2 The test script output
648 The following explains how Test::Harness interprets the output of your
655 This header tells how many tests there will be. It should be the
656 first line output by your test program (but its okay if its preceded
659 In certain instanced, you may not know how many tests you will
660 ultimately be running. In this case, it is permitted (but not
661 encouraged) for the 1..M header to appear as the B<last> line output
662 by your test (again, it can be followed by further comments). But we
663 strongly encourage you to put it first.
665 Under B<no> circumstances should 1..M appear in the middle of your
666 output or more than once.
669 =item B<'ok', 'not ok'. Ok?>
671 Any output from the testscript to standard error is ignored and
672 bypassed, thus will be seen by the user. Lines written to standard
673 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
674 runtests(). All other lines are discarded.
676 C</^not ok/> indicates a failed test. C</^ok/> is a successful test.
679 =item B<test numbers>
681 Perl normally expects the 'ok' or 'not ok' to be followed by a test
682 number. It is tolerated if the test numbers after 'ok' are
683 omitted. In this case Test::Harness maintains temporarily its own
684 counter until the script supplies test numbers again. So the following
699 Failed 3/6 tests, 50.00% okay
702 =item B<$Test::Harness::verbose>
704 The global variable $Test::Harness::verbose is exportable and can be
705 used to let runtests() display the standard output of the script
706 without altering the behavior otherwise.
708 =item B<$Test::Harness::switches>
710 The global variable $Test::Harness::switches is exportable and can be
711 used to set perl command line options used for running the test
712 script(s). The default value is C<-w>.
714 =item B<Skipping tests>
716 If the standard output line contains the substring C< # Skip> (with
717 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
718 counted as a skipped test. If the whole testscript succeeds, the
719 count of skipped tests is included in the generated output.
720 C<Test::Harness> reports the text after C< # Skip\S*\s+> as a reason
723 ok 23 # skip Insufficient flogiston pressure.
725 Similarly, one can include a similar explanation in a C<1..0> line
726 emitted if the test script is skipped completely:
728 1..0 # Skipped: no leverage found
732 If the standard output line contains the substring C< # TODO> after
733 C<not ok> or C<not ok NUMBER>, it is counted as a todo test. The text
734 afterwards is the thing that has to be done before this test will
737 not ok 13 # TODO harness the power of the atom
739 These tests represent a feature to be implemented or a bug to be fixed
740 and act as something of an executable "thing to do" list. They are
741 B<not> expected to succeed. Should a todo test begin succeeding,
742 Test::Harness will report it as a bonus. This indicates that whatever
743 you were supposed to do has been done and you should promote this to a
748 As an emergency measure, a test script can decide that further tests
749 are useless (e.g. missing dependencies) and testing should stop
750 immediately. In that case the test script prints the magic words
754 to standard output. Any message after these words will be displayed by
755 C<Test::Harness> as the reason why testing is stopped.
759 Additional comments may be put into the testing output on their own
760 lines. Comment lines should begin with a '#', Test::Harness will
764 # Life is good, the sun is shining, RAM is cheap.
766 # got 'Bush' expected 'Gore'
772 C<&runtests> is exported by Test::Harness per default.
774 C<$verbose> and C<$switches> are exported upon request.
781 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
783 If all tests are successful some statistics about the performance are
786 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
788 For any single script that has failing subtests statistics like the
791 =item C<Test returned status %d (wstat %d)>
793 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8>
794 and C<$?> are printed in a message similar to the above.
796 =item C<Failed 1 test, %.2f%% okay. %s>
798 =item C<Failed %d/%d tests, %.2f%% okay. %s>
800 If not all tests were successful, the script dies with one of the
803 =item C<FAILED--Further testing stopped%s>
805 If a single subtest decides that further testing will not make sense,
806 the script dies with this message.
814 =item C<HARNESS_IGNORE_EXITCODE>
816 Makes harness ignore the exit status of child processes when defined.
818 =item C<HARNESS_NOTTY>
820 When set to a true value, forces it to behave as though STDOUT were
821 not a console. You may need to set this if you don't want harness to
822 output more frequent progress messages using carriage returns. Some
823 consoles may not handle carriage returns properly (which results in a
824 somewhat messy output).
826 =item C<HARNESS_COMPILE_TEST>
828 When true it will make harness attempt to compile the test using
829 C<perlcc> before running it.
831 =item C<HARNESS_FILELEAK_IN_DIR>
833 When set to the name of a directory, harness will check after each
834 test whether new files appeared in that directory, and report them as
836 LEAKED FILES: scr.tmp 0 my.db
838 If relative, directory name is with respect to the current directory at
839 the moment runtests() was called. Putting absolute path into
840 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
842 =item C<HARNESS_PERL_SWITCHES>
844 Its value will be prepended to the switches used to invoke perl on
845 each test. For example, setting C<HARNESS_PERL_SWITCHES> to "-W" will
846 run all tests with all warnings enabled.
848 =item C<HARNESS_COLUMNS>
850 This value will be used for the width of the terminal. If it is not
851 set then it will default to C<COLUMNS>. If this is not set, it will
852 default to 80. Note that users of Bourne-sh based shells will need to
853 C<export COLUMNS> for this module to use that variable.
855 =item C<HARNESS_ACTIVE>
857 Harness sets this before executing the individual tests. This allows
858 the tests to determine if they are being executed through the harness
859 or by any other means.
866 L<Test> for writing test scripts, L<Benchmark> for the underlying
867 timing routines and L<Devel::Coverage> for test coverage analysis.
871 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
872 sure is, that it was inspired by Larry Wall's TEST script that came
873 with perl distributions for ages. Numerous anonymous contributors
876 Current maintainers are Andreas Koenig <andreas.koenig@anima.de> and
877 Michael G Schwern <schwern@pobox.com>
881 Test::Harness uses $^X to determine the perl binary to run the tests
882 with. Test scripts running via the shebang (C<#!>) line may not be
883 portable because $^X is not consistent for shebang scripts across
884 platforms. This is no problem when Test::Harness is run with an
885 absolute path to the perl binary or when $^X can be found in the path.