1 # -*- Mode: cperl; cperl-indent-level: 4 -*-
10 our($VERSION, $Verbose, $Switches, $Have_Devel_Corestack, $Curtest,
11 $Columns, $verbose, $switches,
12 @ISA, @EXPORT, @EXPORT_OK
15 # Backwards compatibility for exportable variable names.
17 *switches = \$Switches;
19 $Have_Devel_Corestack = 0;
23 $ENV{HARNESS_ACTIVE} = 1;
25 # Some experimental versions of OS/2 build have broken $?
26 my $Ignore_Exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
28 my $Files_In_Dir = $ENV{HARNESS_FILELEAK_IN_DIR};
32 @EXPORT = qw(&runtests);
33 @EXPORT_OK = qw($verbose $switches);
37 $Columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
40 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
45 my($tot, $failedtests) = _runtests(@tests);
46 _show_results($tot, $failedtests);
48 return ($tot->{bad} == 0 && $tot->{max}) ;
65 tests => scalar @tests,
71 # pass -I flags to children
72 my $old5lib = $ENV{PERL5LIB};
74 # VMS has a 255-byte limit on the length of %ENV entries, so
75 # toss the ones that involve perl_root, the install location
79 $new5lib = join($Config{path_sep}, grep {!/perl_root/i;} @INC);
80 $Switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g;
83 $new5lib = join($Config{path_sep}, @INC);
85 local($ENV{'PERL5LIB'}) = $new5lib;
87 my @dir_files = globdir $Files_In_Dir if defined $Files_In_Dir;
88 my $t_start = new Benchmark;
90 foreach my $test (@tests) {
92 chop($te); # XXX chomp?
94 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./s; }
95 my $blank = (' ' x 77);
96 my $leader = "$te" . '.' x (20 - length($te));
98 $ml = "\r$blank\r$leader"
99 if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $Verbose;
102 my $s = _set_switches($test);
104 my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
105 ? "./perl -I../lib ../utils/perlcc $test "
106 . "-run 2>> ./compilelog |"
108 $cmd = "MCR $cmd" if $^O eq 'VMS';
109 open(my $fh, $cmd) or print "can't run $test. $!\n";
111 # state of the current test.
120 skip_reason => undef,
124 my($seen_header, $tests_seen) = (0,0);
126 if( _parse_header($_, \%test, \%tot) ) {
127 warn "Test header seen twice!\n" if $seen_header;
131 warn "1..M can only appear at the beginning or end of tests\n"
132 if $tests_seen && $test{max} < $tests_seen;
134 elsif( _parse_test_line($_, \%test, \%tot) ) {
140 my($estatus, $wstatus) = _close_fh($fh);
143 $failedtests{$test} = _dubious_return(\%test, \%tot,
146 elsif ($test{ok} == $test{max} && $test{next} == $test{max}+1) {
147 if ($test{max} and $test{skipped} + $test{bonus}) {
149 push(@msg, "$test{skipped}/$test{max} skipped: $test{skip_reason}")
151 push(@msg, "$test{bonus}/$test{max} unexpectedly succeeded")
153 print "$test{ml}ok, ".join(', ', @msg)."\n";
154 } elsif ($test{max}) {
155 print "$test{ml}ok\n";
156 } elsif (defined $test{skip_reason}) {
157 print "skipped: $test{skip_reason}\n";
160 print "skipped test on this platform\n";
164 } elsif ($test{max}) {
165 if ($test{next} <= $test{max}) {
166 push @{$test{failed}}, $test{next}..$test{max};
168 if (@{$test{failed}}) {
169 my ($txt, $canon) = canonfailed($test{max},$test{skipped},
171 print "$test{ml}$txt";
172 $failedtests{$test} = { canon => $canon,
174 failed => scalar @{$test{failed}},
176 percent => 100*(scalar @{$test{failed}})/$test{max},
181 print "Don't know which tests failed: got $test{ok} ok, ".
182 "expected $test{max}\n";
183 $failedtests{$test} = { canon => '??',
193 } elsif ($test{next} == 0) {
194 print "FAILED before any test output arrived\n";
196 $failedtests{$test} = { canon => '??',
205 $tot{sub_skipped} += $test{skipped};
207 if (defined $Files_In_Dir) {
208 my @new_dir_files = globdir $Files_In_Dir;
209 if (@new_dir_files != @dir_files) {
211 @f{@new_dir_files} = (1) x @new_dir_files;
212 delete @f{@dir_files};
213 my @f = sort keys %f;
214 print "LEAKED FILES: @f\n";
215 @dir_files = @new_dir_files;
219 $tot{bench} = timediff(new Benchmark, $t_start);
222 if (defined $old5lib) {
223 $ENV{PERL5LIB} = $old5lib;
225 delete $ENV{PERL5LIB};
229 return(\%tot, \%failedtests);
234 my($tot, $failedtests) = @_;
237 my $bonusmsg = _bonusmsg($tot);
239 if ($tot->{bad} == 0 && $tot->{max}) {
240 print "All tests successful$bonusmsg.\n";
241 } elsif ($tot->{tests}==0){
242 die "FAILED--no tests were run for some reason.\n";
243 } elsif ($tot->{max} == 0) {
244 my $blurb = $tot->{tests}==1 ? "script" : "scripts";
245 die "FAILED--$tot->{tests} test $blurb could be run, ".
246 "alas--no output ever seen\n";
248 $pct = sprintf("%.2f", $tot->{good} / $tot->{tests} * 100);
249 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
250 $tot->{max} - $tot->{ok}, $tot->{max},
251 100*$tot->{ok}/$tot->{max};
253 my($fmt_top, $fmt) = _create_fmts($failedtests);
255 # Now write to formats
256 for my $script (sort keys %$failedtests) {
257 $Curtest = $failedtests->{$script};
261 $bonusmsg =~ s/^,\s*//;
262 print "$bonusmsg.\n" if $bonusmsg;
263 die "Failed $tot->{bad}/$tot->{tests} test scripts, $pct% okay.".
268 printf("Files=%d, Tests=%d, %s\n",
269 $tot->{files}, $tot->{max}, timestr($tot->{bench}, 'nop'));
274 my($line, $test, $tot) = @_;
278 print $line if $Verbose;
281 if ($line =~ /^1\.\.([0-9]+) todo([\d\s]+);?/i) {
283 for (split(/\s+/, $2)) { $test->{todo}{$_} = 1; }
285 $tot->{max} += $test->{max};
291 # 1..0 # skip Why? Because I said so!
292 elsif ($line =~ /^1\.\.([0-9]+)
293 (\s*\#\s*[Ss]kip\S*(?>\s+) (.+))?
298 $tot->{max} += $test->{max};
300 $test->{next} = 1 unless $test->{next};
301 $test->{skip_reason} = $3 if not $test->{max} and defined $3;
313 sub _parse_test_line {
314 my($line, $test, $tot) = @_;
316 if ($line =~ /^(not\s+)?ok\b/i) {
317 my $this = $test->{next} || 1;
319 if ($line =~ /^not ok\s*(\d*)/){ # test failed
320 $this = $1 if length $1 and $1 > 0;
321 print "$test->{ml}NOK $this" if $test->{ml};
322 if (!$test->{todo}{$this}) {
323 push @{$test->{failed}}, $this;
329 # "ok 23 # skip (you're not cleared for that)"
330 elsif ($line =~ /^ok\s*(\d*)\ *
331 (\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?
334 $this = $1 if length $1 and $1 > 0;
335 print "$test->{ml}ok $this/$test->{max}" if $test->{ml};
338 $test->{skipped}++ if defined $2;
340 $reason = 'unknown reason' if defined $2;
341 $reason = $3 if defined $3;
342 if (defined $reason and defined $test->{skip_reason}) {
343 # print "was: '$skip_reason' new '$reason'\n";
344 $test->{skip_reason} = 'various reasons'
345 if $test->{skip_reason} ne $reason;
346 } elsif (defined $reason) {
347 $test->{skip_reason} = $reason;
349 $test->{bonus}++, $tot->{bonus}++ if $test->{todo}{$this};
352 elsif ($line =~ /^ok\s*(\d*)\s*\#([^\r]*)$/) { # XXX multiline ok?
353 $this = $1 if $1 > 0;
354 print "$test->{ml}ok $this/$test->{max}" if $test->{ml};
359 # an ok or not ok not matching the 3 cases above...
360 # just ignore it for compatibility with TEST
364 if ($this > $test->{next}) {
365 # print "Test output counter mismatch [test $this]\n";
366 # no need to warn probably
367 push @{$test->{failed}}, $test->{next}..$this-1;
369 elsif ($this < $test->{next}) {
370 #we have seen more "ok" lines than the number suggests
371 print "Confused test output: test $this answered after ".
372 "test ", $test->{next}-1, "\n";
373 $test->{next} = $this;
375 $test->{next} = $this + 1;
378 elsif ($line =~ /^Bail out!\s*(.*)/i) { # magic words
379 die "FAILED--Further testing stopped" .
380 ($1 ? ": $1\n" : ".\n");
389 $bonusmsg = (" ($tot->{bonus} subtest".($tot->{bonus} > 1 ? 's' : '').
390 " UNEXPECTEDLY SUCCEEDED)")
393 if ($tot->{skipped}) {
394 $bonusmsg .= ", $tot->{skipped} test"
395 . ($tot->{skipped} != 1 ? 's' : '');
396 if ($tot->{sub_skipped}) {
397 $bonusmsg .= " and $tot->{sub_skipped} subtest"
398 . ($tot->{sub_skipped} != 1 ? 's' : '');
400 $bonusmsg .= ' skipped';
402 elsif ($tot->{sub_skipped}) {
403 $bonusmsg .= ", $tot->{sub_skipped} subtest"
404 . ($tot->{sub_skipped} != 1 ? 's' : '')
411 # VMS has some subtle nastiness with closing the test files.
415 close($fh); # must close to reap child resource values
417 my $wstatus = $Ignore_Exitcode ? 0 : $?; # Can trust $? ?
419 $estatus = ($^O eq 'VMS'
420 ? eval 'use vmsish "status"; $estatus = $?'
423 return($estatus, $wstatus);
427 # Set up the command-line switches to run perl as.
431 open(my $fh, $test) or print "can't open $test. $!\n";
434 $s .= " $ENV{'HARNESS_PERL_SWITCHES'}"
435 if exists $ENV{'HARNESS_PERL_SWITCHES'};
436 $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
437 if $first =~ /^#!.*\bperl.*-\w*T/;
439 close($fh) or print "can't close $test. $!\n";
445 # Test program go boom.
446 sub _dubious_return {
447 my($test, $tot, $estatus, $wstatus) = @_;
448 my ($failed, $canon, $percent) = ('??', '??');
450 printf "$test->{ml}dubious\n\tTest returned status $estatus ".
451 "(wstat %d, 0x%x)\n",
453 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
455 if (corestatus($wstatus)) { # until we have a wait module
456 if ($Have_Devel_Corestack) {
457 Devel::CoreStack::stack($^X);
459 print "\ttest program seems to have generated a core\n";
466 if ($test->{next} == $test->{max} + 1 and not @{$test->{failed}}) {
467 print "\tafter all the subtests completed successfully\n";
469 $failed = 0; # But we do not set $canon!
472 push @{$test->{failed}}, $test->{next}..$test->{max};
473 $failed = @{$test->{failed}};
474 (my $txt, $canon) = canonfailed($test->{max},$test->{skipped},@{$test->{failed}});
475 $percent = 100*(scalar @{$test->{failed}})/$test->{max};
480 return { canon => $canon, max => $test->{max} || '??',
482 name => $test, percent => $percent,
483 estat => $estatus, wstat => $wstatus,
488 sub _garbled_output {
489 my($gibberish) = shift;
490 warn "Confusing test output: '$gibberish'\n";
495 my($failedtests) = @_;
497 my $failed_str = "Failed Test";
498 my $middle_str = " Status Wstat Total Fail Failed ";
499 my $list_str = "List of Failed";
501 # Figure out our longest name string for formatting purposes.
502 my $max_namelen = length($failed_str);
503 foreach my $script (keys %$failedtests) {
504 my $namelen = length $failedtests->{$script}->{name};
505 $max_namelen = $namelen if $namelen > $max_namelen;
508 my $list_len = $Columns - length($middle_str) - $max_namelen;
509 if ($list_len < length($list_str)) {
510 $list_len = length($list_str);
511 $max_namelen = $Columns - length($middle_str) - $list_len;
512 if ($max_namelen < length($failed_str)) {
513 $max_namelen = length($failed_str);
514 $Columns = $max_namelen + length($middle_str) + $list_len;
518 my $fmt_top = "format STDOUT_TOP =\n"
519 . sprintf("%-${max_namelen}s", $failed_str)
525 my $fmt = "format STDOUT =\n"
526 . "@" . "<" x ($max_namelen - 1)
527 . " @>> @>>>> @>>>> @>>> ^##.##% "
528 . "^" . "<" x ($list_len - 1) . "\n"
529 . '{ $Curtest->{name}, $Curtest->{estat},'
530 . ' $Curtest->{wstat}, $Curtest->{max},'
531 . ' $Curtest->{failed}, $Curtest->{percent},'
532 . ' $Curtest->{canon}'
534 . "~~" . " " x ($Columns - $list_len - 2) . "^"
535 . "<" x ($list_len - 1) . "\n"
536 . '$Curtest->{canon}'
544 return($fmt_top, $fmt);
548 my $tried_devel_corestack;
552 eval {require 'wait.ph'};
553 my $ret = defined &WCOREDUMP ? WCOREDUMP($st) : $st & 0200;
555 eval { require Devel::CoreStack; $Have_Devel_Corestack++ }
556 unless $tried_devel_corestack++;
561 sub canonfailed ($@) {
562 my($max,$skipped,@failed) = @_;
564 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
565 my $failed = @failed;
569 my $last = $min = shift @failed;
572 for (@failed, $failed[-1]) { # don't forget the last one
573 if ($_ > $last+1 || $_ == $last) {
577 push @canon, "$min-$last";
584 push @result, "FAILED tests @canon\n";
587 push @result, "FAILED test $last\n";
591 push @result, "\tFailed $failed/$max tests, ";
592 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
593 my $ender = 's' x ($skipped > 1);
594 my $good = $max - $failed - $skipped;
595 my $goodper = sprintf("%.2f",100*($good/$max));
596 push @result, " (-$skipped skipped test$ender: $good okay, ".
600 my $txt = join "", @result;
609 Test::Harness - run perl standard test scripts with statistics
619 (By using the Test module, you can write test scripts without
620 knowing the exact output this module expects. However, if you need to
621 know the specifics, read on!)
623 Perl test scripts print to standard output C<"ok N"> for each single
624 test, where C<N> is an increasing sequence of integers. The first line
625 output by a standard test script is C<"1..M"> with C<M> being the
626 number of tests that should be run within the test
627 script. Test::Harness::runtests(@tests) runs all the testscripts
628 named as arguments and checks standard output for the expected
631 After all tests have been performed, runtests() prints some
632 performance statistics that are computed by the Benchmark module.
634 =head2 The test script output
640 This header tells how many tests there will be. It should be the
641 first line output by your test program (but its okay if its preceded
644 In certain instanced, you may not know how many tests you will
645 ultimately be running. In this case, it is permitted (but not
646 encouraged) for the 1..M header to appear as the B<last> line output
647 by your test (again, it can be followed by further comments). But we
648 strongly encourage you to put it first.
650 Under B<no> circumstances should 1..M appear in the middle of your
651 output or more than once.
654 =item B<'ok', 'not ok'. Ok?>
656 Any output from the testscript to standard error is ignored and
657 bypassed, thus will be seen by the user. Lines written to standard
658 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
659 runtests(). All other lines are discarded.
661 C</^not ok/> indicates a failed test. C</^ok/> is a successful test.
664 =item B<test numbers>
666 Perl normally expects the 'ok' or 'not ok' to be followed by a test
667 number. It is tolerated if the test numbers after 'ok' are
668 omitted. In this case Test::Harness maintains temporarily its own
669 counter until the script supplies test numbers again. So the following
684 Failed 3/6 tests, 50.00% okay
687 =item B<$Test::Harness::verbose>
689 The global variable $Test::Harness::verbose is exportable and can be
690 used to let runtests() display the standard output of the script
691 without altering the behavior otherwise.
693 =item B<$Test::Harness::switches>
695 The global variable $Test::Harness::switches is exportable and can be
696 used to set perl command line options used for running the test
697 script(s). The default value is C<-w>.
699 =item B<Skipping tests>
701 If the standard output line contains substring C< # Skip> (with
702 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
703 counted as a skipped test. If the whole testscript succeeds, the
704 count of skipped tests is included in the generated output.
706 C<Test::Harness> reports the text after C< # Skip\S*\s+> as a reason
707 for skipping. Similarly, one can include a similar explanation in a
708 C<1..0> line emitted if the test script is skipped completely:
710 1..0 # Skipped: no leverage found
714 As an emergency measure, a test script can decide that further tests
715 are useless (e.g. missing dependencies) and testing should stop
716 immediately. In that case the test script prints the magic words
720 to standard output. Any message after these words will be displayed by
721 C<Test::Harness> as the reason why testing is stopped.
725 Additional comments may be put into the testing output on their own
726 lines. Comment lines should begin with a '#', Test::Harness will
730 # Life is good, the sun is shining, RAM is cheap.
732 # got 'Bush' expected 'Gore'
737 C<&runtests> is exported by Test::Harness per default.
739 C<$verbose> and C<$switches> are exported upon request.
746 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
748 If all tests are successful some statistics about the performance are
751 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
753 For any single script that has failing subtests statistics like the
756 =item C<Test returned status %d (wstat %d)>
758 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8>
759 and C<$?> are printed in a message similar to the above.
761 =item C<Failed 1 test, %.2f%% okay. %s>
763 =item C<Failed %d/%d tests, %.2f%% okay. %s>
765 If not all tests were successful, the script dies with one of the
768 =item C<FAILED--Further testing stopped%s>
770 If a single subtest decides that further testing will not make sense,
771 the script dies with this message.
777 Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
780 Setting C<HARNESS_NOTTY> to a true value forces it to behave as though
781 STDOUT were not a console. You may need to set this if you don't want
782 harness to output more frequent progress messages using carriage returns.
783 Some consoles may not handle carriage returns properly (which results
784 in a somewhat messy output).
786 Setting C<HARNESS_COMPILE_TEST> to a true value will make harness attempt
787 to compile the test using C<perlcc> before running it.
789 If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
790 will check after each test whether new files appeared in that directory,
793 LEAKED FILES: scr.tmp 0 my.db
795 If relative, directory name is with respect to the current directory at
796 the moment runtests() was called. Putting absolute path into
797 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
799 The value of C<HARNESS_PERL_SWITCHES> will be prepended to the
800 switches used to invoke perl on each test. For example, setting
801 C<HARNESS_PERL_SWITCHES> to "-W" will run all tests with all
804 If C<HARNESS_COLUMNS> is set, then this value will be used for the
805 width of the terminal. If it is not set then it will default to
806 C<COLUMNS>. If this is not set, it will default to 80. Note that users
807 of Bourne-sh based shells will need to C<export COLUMNS> for this
808 module to use that variable.
810 Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
811 This allows the tests to determine if they are being executed through the
812 harness or by any other means.
816 L<Test> for writing test scripts and also L<Benchmark> for the
817 underlying timing routines.
821 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
822 sure is, that it was inspired by Larry Wall's TEST script that came
823 with perl distributions for ages. Numerous anonymous contributors
824 exist. Current maintainer is Andreas Koenig.
828 Test::Harness uses $^X to determine the perl binary to run the tests
829 with. Test scripts running via the shebang (C<#!>) line may not be
830 portable because $^X is not consistent for shebang scripts across
831 platforms. This is no problem when Test::Harness is run with an
832 absolute path to the perl binary or when $^X can be found in the path.