1 # -*- Mode: cperl; cperl-indent-level: 4 -*-
10 our($VERSION, $Verbose, $Switches, $Have_Devel_Corestack, $Curtest,
11 $Columns, $verbose, $switches,
12 @ISA, @EXPORT, @EXPORT_OK
15 # Backwards compatibility for exportable variable names.
17 *switches = \$Switches;
19 $Have_Devel_Corestack = 0;
23 $ENV{HARNESS_ACTIVE} = 1;
25 # Some experimental versions of OS/2 build have broken $?
26 my $Ignore_Exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
28 my $Files_In_Dir = $ENV{HARNESS_FILELEAK_IN_DIR};
32 @EXPORT = qw(&runtests);
33 @EXPORT_OK = qw($verbose $switches);
37 $Columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
40 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
45 my($tot, $failedtests) = _runtests(@tests);
46 _show_results($tot, $failedtests);
48 return ($tot->{bad} == 0 && $tot->{max}) ;
65 tests => scalar @tests,
71 # pass -I flags to children
72 my $old5lib = $ENV{PERL5LIB};
74 # VMS has a 255-byte limit on the length of %ENV entries, so
75 # toss the ones that involve perl_root, the install location
79 $new5lib = join($Config{path_sep}, grep {!/perl_root/i;} @INC);
80 $Switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g;
83 $new5lib = join($Config{path_sep}, @INC);
86 local($ENV{'PERL5LIB'}) = $new5lib;
88 my @dir_files = globdir $Files_In_Dir if defined $Files_In_Dir;
89 my $t_start = new Benchmark;
91 foreach my $test (@tests) {
93 chop($te); # XXX chomp?
95 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./s; }
96 my $blank = (' ' x 77);
97 my $leader = "$te" . '.' x (20 - length($te));
99 $ml = "\r$blank\r$leader"
100 if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $Verbose;
103 my $s = _set_switches($test);
105 my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
106 ? "./perl -I../lib ../utils/perlcc $test "
107 . "-run 2>> ./compilelog |"
109 $cmd = "MCR $cmd" if $^O eq 'VMS';
110 open(my $fh, $cmd) or print "can't run $test. $!\n";
112 # state of the current test.
121 skip_reason => undef,
125 my($seen_header, $tests_seen) = (0,0);
127 if( _parse_header($_, \%test, \%tot) ) {
128 warn "Test header seen twice!\n" if $seen_header;
132 warn "1..M can only appear at the beginning or end of tests\n"
133 if $tests_seen && $test{max} < $tests_seen;
135 elsif( _parse_test_line($_, \%test, \%tot) ) {
141 my($estatus, $wstatus) = _close_fh($fh);
144 $failedtests{$test} = _dubious_return(\%test, \%tot,
147 elsif ($test{ok} == $test{max} && $test{next} == $test{max}+1) {
148 if ($test{max} and $test{skipped} + $test{bonus}) {
150 push(@msg, "$test{skipped}/$test{max} skipped: $test{skip_reason}")
152 push(@msg, "$test{bonus}/$test{max} unexpectedly succeeded")
154 print "$test{ml}ok, ".join(', ', @msg)."\n";
155 } elsif ($test{max}) {
156 print "$test{ml}ok\n";
157 } elsif (defined $test{skip_reason}) {
158 print "skipped: $test{skip_reason}\n";
161 print "skipped test on this platform\n";
165 } elsif ($test{max}) {
166 if ($test{next} <= $test{max}) {
167 push @{$test{failed}}, $test{next}..$test{max};
169 if (@{$test{failed}}) {
170 my ($txt, $canon) = canonfailed($test{max},$test{skipped},
172 print "$test{ml}$txt";
173 $failedtests{$test} = { canon => $canon,
175 failed => scalar @{$test{failed}},
177 percent => 100*(scalar @{$test{failed}})/$test{max},
182 print "Don't know which tests failed: got $test{ok} ok, ".
183 "expected $test{max}\n";
184 $failedtests{$test} = { canon => '??',
194 } elsif ($test{next} == 0) {
195 print "FAILED before any test output arrived\n";
197 $failedtests{$test} = { canon => '??',
206 $tot{sub_skipped} += $test{skipped};
208 if (defined $Files_In_Dir) {
209 my @new_dir_files = globdir $Files_In_Dir;
210 if (@new_dir_files != @dir_files) {
212 @f{@new_dir_files} = (1) x @new_dir_files;
213 delete @f{@dir_files};
214 my @f = sort keys %f;
215 print "LEAKED FILES: @f\n";
216 @dir_files = @new_dir_files;
220 $tot{bench} = timediff(new Benchmark, $t_start);
223 if (defined $old5lib) {
224 $ENV{PERL5LIB} = $old5lib;
226 delete $ENV{PERL5LIB};
230 return(\%tot, \%failedtests);
235 my($tot, $failedtests) = @_;
238 my $bonusmsg = _bonusmsg($tot);
240 if ($tot->{bad} == 0 && $tot->{max}) {
241 print "All tests successful$bonusmsg.\n";
242 } elsif ($tot->{tests}==0){
243 die "FAILED--no tests were run for some reason.\n";
244 } elsif ($tot->{max} == 0) {
245 my $blurb = $tot->{tests}==1 ? "script" : "scripts";
246 die "FAILED--$tot->{tests} test $blurb could be run, ".
247 "alas--no output ever seen\n";
249 $pct = sprintf("%.2f", $tot->{good} / $tot->{tests} * 100);
250 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
251 $tot->{max} - $tot->{ok}, $tot->{max},
252 100*$tot->{ok}/$tot->{max};
254 my($fmt_top, $fmt) = _create_fmts($failedtests);
256 # Now write to formats
257 for my $script (sort keys %$failedtests) {
258 $Curtest = $failedtests->{$script};
262 $bonusmsg =~ s/^,\s*//;
263 print "$bonusmsg.\n" if $bonusmsg;
264 die "Failed $tot->{bad}/$tot->{tests} test scripts, $pct% okay.".
269 printf("Files=%d, Tests=%d, %s\n",
270 $tot->{files}, $tot->{max}, timestr($tot->{bench}, 'nop'));
275 my($line, $test, $tot) = @_;
279 print $line if $Verbose;
282 if ($line =~ /^1\.\.([0-9]+) todo([\d\s]+);?/i) {
284 for (split(/\s+/, $2)) { $test->{todo}{$_} = 1; }
286 $tot->{max} += $test->{max};
292 # 1..0 # skip Why? Because I said so!
293 elsif ($line =~ /^1\.\.([0-9]+)
294 (\s*\#\s*[Ss]kip\S*(?>\s+) (.+))?
299 $tot->{max} += $test->{max};
301 $test->{next} = 1 unless $test->{next};
302 $test->{skip_reason} = $3 if not $test->{max} and defined $3;
314 sub _parse_test_line {
315 my($line, $test, $tot) = @_;
317 if ($line =~ /^(not\s+)?ok\b/i) {
318 my $this = $test->{next} || 1;
320 if ($line =~ /^not ok\s*(\d*)/){ # test failed
321 $this = $1 if length $1 and $1 > 0;
322 print "$test->{ml}NOK $this" if $test->{ml};
323 if (!$test->{todo}{$this}) {
324 push @{$test->{failed}}, $this;
330 # "ok 23 # skip (you're not cleared for that)"
331 elsif ($line =~ /^ok\s*(\d*)\ *
332 (\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?
335 $this = $1 if length $1 and $1 > 0;
336 print "$test->{ml}ok $this/$test->{max}" if $test->{ml};
339 $test->{skipped}++ if defined $2;
341 $reason = 'unknown reason' if defined $2;
342 $reason = $3 if defined $3;
343 if (defined $reason and defined $test->{skip_reason}) {
344 # print "was: '$skip_reason' new '$reason'\n";
345 $test->{skip_reason} = 'various reasons'
346 if $test->{skip_reason} ne $reason;
347 } elsif (defined $reason) {
348 $test->{skip_reason} = $reason;
350 $test->{bonus}++, $tot->{bonus}++ if $test->{todo}{$this};
353 elsif ($line =~ /^ok\s*(\d*)\s*\#([^\r]*)$/) { # XXX multiline ok?
354 $this = $1 if $1 > 0;
355 print "$test->{ml}ok $this/$test->{max}" if $test->{ml};
360 # an ok or not ok not matching the 3 cases above...
361 # just ignore it for compatibility with TEST
365 if ($this > $test->{next}) {
366 # print "Test output counter mismatch [test $this]\n";
367 # no need to warn probably
368 push @{$test->{failed}}, $test->{next}..$this-1;
370 elsif ($this < $test->{next}) {
371 #we have seen more "ok" lines than the number suggests
372 print "Confused test output: test $this answered after ".
373 "test ", $test->{next}-1, "\n";
374 $test->{next} = $this;
376 $test->{next} = $this + 1;
379 elsif ($line =~ /^Bail out!\s*(.*)/i) { # magic words
380 die "FAILED--Further testing stopped" .
381 ($1 ? ": $1\n" : ".\n");
390 $bonusmsg = (" ($tot->{bonus} subtest".($tot->{bonus} > 1 ? 's' : '').
391 " UNEXPECTEDLY SUCCEEDED)")
394 if ($tot->{skipped}) {
395 $bonusmsg .= ", $tot->{skipped} test"
396 . ($tot->{skipped} != 1 ? 's' : '');
397 if ($tot->{sub_skipped}) {
398 $bonusmsg .= " and $tot->{sub_skipped} subtest"
399 . ($tot->{sub_skipped} != 1 ? 's' : '');
401 $bonusmsg .= ' skipped';
403 elsif ($tot->{sub_skipped}) {
404 $bonusmsg .= ", $tot->{sub_skipped} subtest"
405 . ($tot->{sub_skipped} != 1 ? 's' : '')
412 # VMS has some subtle nastiness with closing the test files.
416 close($fh); # must close to reap child resource values
418 my $wstatus = $Ignore_Exitcode ? 0 : $?; # Can trust $? ?
420 $estatus = ($^O eq 'VMS'
421 ? eval 'use vmsish "status"; $estatus = $?'
424 return($estatus, $wstatus);
428 # Set up the command-line switches to run perl as.
432 open(my $fh, $test) or print "can't open $test. $!\n";
435 $s .= " $ENV{'HARNESS_PERL_SWITCHES'}"
436 if exists $ENV{'HARNESS_PERL_SWITCHES'};
437 $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
438 if $first =~ /^#!.*\bperl.*-\w*T/;
440 close($fh) or print "can't close $test. $!\n";
446 # Test program go boom.
447 sub _dubious_return {
448 my($test, $tot, $estatus, $wstatus) = @_;
449 my ($failed, $canon, $percent) = ('??', '??');
451 printf "$test->{ml}dubious\n\tTest returned status $estatus ".
452 "(wstat %d, 0x%x)\n",
454 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
456 if (corestatus($wstatus)) { # until we have a wait module
457 if ($Have_Devel_Corestack) {
458 Devel::CoreStack::stack($^X);
460 print "\ttest program seems to have generated a core\n";
467 if ($test->{next} == $test->{max} + 1 and not @{$test->{failed}}) {
468 print "\tafter all the subtests completed successfully\n";
470 $failed = 0; # But we do not set $canon!
473 push @{$test->{failed}}, $test->{next}..$test->{max};
474 $failed = @{$test->{failed}};
475 (my $txt, $canon) = canonfailed($test->{max},$test->{skipped},@{$test->{failed}});
476 $percent = 100*(scalar @{$test->{failed}})/$test->{max};
481 return { canon => $canon, max => $test->{max} || '??',
483 name => $test, percent => $percent,
484 estat => $estatus, wstat => $wstatus,
489 sub _garbled_output {
490 my($gibberish) = shift;
491 warn "Confusing test output: '$gibberish'\n";
496 my($failedtests) = @_;
498 my $failed_str = "Failed Test";
499 my $middle_str = " Status Wstat Total Fail Failed ";
500 my $list_str = "List of Failed";
502 # Figure out our longest name string for formatting purposes.
503 my $max_namelen = length($failed_str);
504 foreach my $script (keys %$failedtests) {
505 my $namelen = length $failedtests->{$script}->{name};
506 $max_namelen = $namelen if $namelen > $max_namelen;
509 my $list_len = $Columns - length($middle_str) - $max_namelen;
510 if ($list_len < length($list_str)) {
511 $list_len = length($list_str);
512 $max_namelen = $Columns - length($middle_str) - $list_len;
513 if ($max_namelen < length($failed_str)) {
514 $max_namelen = length($failed_str);
515 $Columns = $max_namelen + length($middle_str) + $list_len;
519 my $fmt_top = "format STDOUT_TOP =\n"
520 . sprintf("%-${max_namelen}s", $failed_str)
526 my $fmt = "format STDOUT =\n"
527 . "@" . "<" x ($max_namelen - 1)
528 . " @>> @>>>> @>>>> @>>> ^##.##% "
529 . "^" . "<" x ($list_len - 1) . "\n"
530 . '{ $Curtest->{name}, $Curtest->{estat},'
531 . ' $Curtest->{wstat}, $Curtest->{max},'
532 . ' $Curtest->{failed}, $Curtest->{percent},'
533 . ' $Curtest->{canon}'
535 . "~~" . " " x ($Columns - $list_len - 2) . "^"
536 . "<" x ($list_len - 1) . "\n"
537 . '$Curtest->{canon}'
545 return($fmt_top, $fmt);
549 my $tried_devel_corestack;
553 eval {require 'wait.ph'};
554 my $ret = defined &WCOREDUMP ? WCOREDUMP($st) : $st & 0200;
556 eval { require Devel::CoreStack; $Have_Devel_Corestack++ }
557 unless $tried_devel_corestack++;
562 sub canonfailed ($@) {
563 my($max,$skipped,@failed) = @_;
565 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
566 my $failed = @failed;
570 my $last = $min = shift @failed;
573 for (@failed, $failed[-1]) { # don't forget the last one
574 if ($_ > $last+1 || $_ == $last) {
578 push @canon, "$min-$last";
585 push @result, "FAILED tests @canon\n";
588 push @result, "FAILED test $last\n";
592 push @result, "\tFailed $failed/$max tests, ";
593 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
594 my $ender = 's' x ($skipped > 1);
595 my $good = $max - $failed - $skipped;
596 my $goodper = sprintf("%.2f",100*($good/$max));
597 push @result, " (-$skipped skipped test$ender: $good okay, ".
601 my $txt = join "", @result;
610 Test::Harness - run perl standard test scripts with statistics
620 (By using the Test module, you can write test scripts without
621 knowing the exact output this module expects. However, if you need to
622 know the specifics, read on!)
624 Perl test scripts print to standard output C<"ok N"> for each single
625 test, where C<N> is an increasing sequence of integers. The first line
626 output by a standard test script is C<"1..M"> with C<M> being the
627 number of tests that should be run within the test
628 script. Test::Harness::runtests(@tests) runs all the testscripts
629 named as arguments and checks standard output for the expected
632 After all tests have been performed, runtests() prints some
633 performance statistics that are computed by the Benchmark module.
635 =head2 The test script output
641 This header tells how many tests there will be. It should be the
642 first line output by your test program (but its okay if its preceded
645 In certain instanced, you may not know how many tests you will
646 ultimately be running. In this case, it is permitted (but not
647 encouraged) for the 1..M header to appear as the B<last> line output
648 by your test (again, it can be followed by further comments). But we
649 strongly encourage you to put it first.
651 Under B<no> circumstances should 1..M appear in the middle of your
652 output or more than once.
655 =item B<'ok', 'not ok'. Ok?>
657 Any output from the testscript to standard error is ignored and
658 bypassed, thus will be seen by the user. Lines written to standard
659 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
660 runtests(). All other lines are discarded.
662 C</^not ok/> indicates a failed test. C</^ok/> is a successful test.
665 =item B<test numbers>
667 Perl normally expects the 'ok' or 'not ok' to be followed by a test
668 number. It is tolerated if the test numbers after 'ok' are
669 omitted. In this case Test::Harness maintains temporarily its own
670 counter until the script supplies test numbers again. So the following
685 Failed 3/6 tests, 50.00% okay
688 =item B<$Test::Harness::verbose>
690 The global variable $Test::Harness::verbose is exportable and can be
691 used to let runtests() display the standard output of the script
692 without altering the behavior otherwise.
694 =item B<$Test::Harness::switches>
696 The global variable $Test::Harness::switches is exportable and can be
697 used to set perl command line options used for running the test
698 script(s). The default value is C<-w>.
700 =item B<Skipping tests>
702 If the standard output line contains substring C< # Skip> (with
703 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
704 counted as a skipped test. If the whole testscript succeeds, the
705 count of skipped tests is included in the generated output.
707 C<Test::Harness> reports the text after C< # Skip\S*\s+> as a reason
708 for skipping. Similarly, one can include a similar explanation in a
709 C<1..0> line emitted if the test script is skipped completely:
711 1..0 # Skipped: no leverage found
715 As an emergency measure, a test script can decide that further tests
716 are useless (e.g. missing dependencies) and testing should stop
717 immediately. In that case the test script prints the magic words
721 to standard output. Any message after these words will be displayed by
722 C<Test::Harness> as the reason why testing is stopped.
726 Additional comments may be put into the testing output on their own
727 lines. Comment lines should begin with a '#', Test::Harness will
731 # Life is good, the sun is shining, RAM is cheap.
733 # got 'Bush' expected 'Gore'
738 C<&runtests> is exported by Test::Harness per default.
740 C<$verbose> and C<$switches> are exported upon request.
747 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
749 If all tests are successful some statistics about the performance are
752 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
754 For any single script that has failing subtests statistics like the
757 =item C<Test returned status %d (wstat %d)>
759 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8>
760 and C<$?> are printed in a message similar to the above.
762 =item C<Failed 1 test, %.2f%% okay. %s>
764 =item C<Failed %d/%d tests, %.2f%% okay. %s>
766 If not all tests were successful, the script dies with one of the
769 =item C<FAILED--Further testing stopped%s>
771 If a single subtest decides that further testing will not make sense,
772 the script dies with this message.
778 Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
781 Setting C<HARNESS_NOTTY> to a true value forces it to behave as though
782 STDOUT were not a console. You may need to set this if you don't want
783 harness to output more frequent progress messages using carriage returns.
784 Some consoles may not handle carriage returns properly (which results
785 in a somewhat messy output).
787 Setting C<HARNESS_COMPILE_TEST> to a true value will make harness attempt
788 to compile the test using C<perlcc> before running it.
790 If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
791 will check after each test whether new files appeared in that directory,
794 LEAKED FILES: scr.tmp 0 my.db
796 If relative, directory name is with respect to the current directory at
797 the moment runtests() was called. Putting absolute path into
798 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
800 The value of C<HARNESS_PERL_SWITCHES> will be prepended to the
801 switches used to invoke perl on each test. For example, setting
802 C<HARNESS_PERL_SWITCHES> to "-W" will run all tests with all
805 If C<HARNESS_COLUMNS> is set, then this value will be used for the
806 width of the terminal. If it is not set then it will default to
807 C<COLUMNS>. If this is not set, it will default to 80. Note that users
808 of Bourne-sh based shells will need to C<export COLUMNS> for this
809 module to use that variable.
811 Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
812 This allows the tests to determine if they are being executed through the
813 harness or by any other means.
817 L<Test> for writing test scripts and also L<Benchmark> for the
818 underlying timing routines.
822 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
823 sure is, that it was inspired by Larry Wall's TEST script that came
824 with perl distributions for ages. Numerous anonymous contributors
825 exist. Current maintainer is Andreas Koenig.
829 Test::Harness uses $^X to determine the perl binary to run the tests
830 with. Test scripts running via the shebang (C<#!>) line may not be
831 portable because $^X is not consistent for shebang scripts across
832 platforms. This is no problem when Test::Harness is run with an
833 absolute path to the perl binary or when $^X can be found in the path.