1 # -*- Mode: cperl; cperl-indent-level: 4 -*-
10 our($VERSION, $Verbose, $Switches, $Have_Devel_Corestack, $Curtest,
11 $Columns, $verbose, $switches,
12 @ISA, @EXPORT, @EXPORT_OK
15 # Backwards compatibility for exportable variable names.
17 *switches = \$Switches;
19 $Have_Devel_Corestack = 0;
23 $ENV{HARNESS_ACTIVE} = 1;
25 # Some experimental versions of OS/2 build have broken $?
26 my $Ignore_Exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
28 my $Files_In_Dir = $ENV{HARNESS_FILELEAK_IN_DIR};
32 @EXPORT = qw(&runtests);
33 @EXPORT_OK = qw($verbose $switches);
37 $Columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
40 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
45 my($tot, $failedtests) = _runtests(@tests);
46 _show_results($tot, $failedtests);
48 return ($tot->{bad} == 0 && $tot->{max}) ;
65 tests => scalar @tests,
71 # pass -I flags to children
72 my $old5lib = $ENV{PERL5LIB};
74 # VMS has a 255-byte limit on the length of %ENV entries, so
75 # toss the ones that involve perl_root, the install location
79 $new5lib = join($Config{path_sep}, grep {!/perl_root/i;} @INC);
80 $Switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g;
83 $new5lib = join($Config{path_sep}, @INC);
86 local($ENV{'PERL5LIB'}) = $new5lib;
88 my @dir_files = globdir $Files_In_Dir if defined $Files_In_Dir;
89 my $t_start = new Benchmark;
91 foreach my $test (@tests) {
93 chop($te); # XXX chomp?
95 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./s; }
96 my $blank = (' ' x 77);
97 my $leader = "$te" . '.' x (20 - length($te));
99 $ml = "\r$blank\r$leader"
100 if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $Verbose;
103 my $s = _set_switches($test);
105 my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
106 ? "./perl -I../lib ../utils/perlcc $test "
107 . "-run 2>> ./compilelog |"
109 $cmd = "MCR $cmd" if $^O eq 'VMS';
110 open(my $fh, $cmd) or print "can't run $test. $!\n";
112 # state of the current test.
121 skip_reason => undef,
125 my($seen_header, $tests_seen) = (0,0);
127 if( _parse_header($_, \%test, \%tot) ) {
128 warn "Test header seen twice!\n" if $seen_header;
132 warn "1..M can only appear at the beginning or end of tests\n"
133 if $tests_seen && $test{max} < $tests_seen;
135 elsif( _parse_test_line($_, \%test, \%tot) ) {
141 my($estatus, $wstatus) = _close_fh($fh);
144 $failedtests{$test} = _dubious_return(\%test, \%tot,
146 $failedtests{$test}{name} = $test;
148 elsif ($test{ok} == $test{max} && $test{next} == $test{max}+1) {
149 if ($test{max} and $test{skipped} + $test{bonus}) {
151 push(@msg, "$test{skipped}/$test{max} skipped: $test{skip_reason}")
153 push(@msg, "$test{bonus}/$test{max} unexpectedly succeeded")
155 print "$test{ml}ok, ".join(', ', @msg)."\n";
156 } elsif ($test{max}) {
157 print "$test{ml}ok\n";
158 } elsif (defined $test{skip_reason}) {
159 print "skipped: $test{skip_reason}\n";
162 print "skipped test on this platform\n";
166 } elsif ($test{max}) {
167 if ($test{next} <= $test{max}) {
168 push @{$test{failed}}, $test{next}..$test{max};
170 if (@{$test{failed}}) {
171 my ($txt, $canon) = canonfailed($test{max},$test{skipped},
173 print "$test{ml}$txt";
174 $failedtests{$test} = { canon => $canon,
176 failed => scalar @{$test{failed}},
178 percent => 100*(scalar @{$test{failed}})/$test{max},
183 print "Don't know which tests failed: got $test{ok} ok, ".
184 "expected $test{max}\n";
185 $failedtests{$test} = { canon => '??',
195 } elsif ($test{next} == 0) {
196 print "FAILED before any test output arrived\n";
198 $failedtests{$test} = { canon => '??',
207 $tot{sub_skipped} += $test{skipped};
209 if (defined $Files_In_Dir) {
210 my @new_dir_files = globdir $Files_In_Dir;
211 if (@new_dir_files != @dir_files) {
213 @f{@new_dir_files} = (1) x @new_dir_files;
214 delete @f{@dir_files};
215 my @f = sort keys %f;
216 print "LEAKED FILES: @f\n";
217 @dir_files = @new_dir_files;
221 $tot{bench} = timediff(new Benchmark, $t_start);
224 if (defined $old5lib) {
225 $ENV{PERL5LIB} = $old5lib;
227 delete $ENV{PERL5LIB};
231 return(\%tot, \%failedtests);
236 my($tot, $failedtests) = @_;
239 my $bonusmsg = _bonusmsg($tot);
241 if ($tot->{bad} == 0 && $tot->{max}) {
242 print "All tests successful$bonusmsg.\n";
243 } elsif ($tot->{tests}==0){
244 die "FAILED--no tests were run for some reason.\n";
245 } elsif ($tot->{max} == 0) {
246 my $blurb = $tot->{tests}==1 ? "script" : "scripts";
247 die "FAILED--$tot->{tests} test $blurb could be run, ".
248 "alas--no output ever seen\n";
250 $pct = sprintf("%.2f", $tot->{good} / $tot->{tests} * 100);
251 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
252 $tot->{max} - $tot->{ok}, $tot->{max},
253 100*$tot->{ok}/$tot->{max};
255 my($fmt_top, $fmt) = _create_fmts($failedtests);
257 # Now write to formats
258 for my $script (sort keys %$failedtests) {
259 $Curtest = $failedtests->{$script};
263 $bonusmsg =~ s/^,\s*//;
264 print "$bonusmsg.\n" if $bonusmsg;
265 die "Failed $tot->{bad}/$tot->{tests} test scripts, $pct% okay.".
270 printf("Files=%d, Tests=%d, %s\n",
271 $tot->{files}, $tot->{max}, timestr($tot->{bench}, 'nop'));
276 my($line, $test, $tot) = @_;
280 print $line if $Verbose;
283 if ($line =~ /^1\.\.([0-9]+) todo([\d\s]+);?/i) {
285 for (split(/\s+/, $2)) { $test->{todo}{$_} = 1; }
287 $tot->{max} += $test->{max};
293 # 1..0 # skip Why? Because I said so!
294 elsif ($line =~ /^1\.\.([0-9]+)
295 (\s*\#\s*[Ss]kip\S*(?>\s+) (.+))?
300 $tot->{max} += $test->{max};
302 $test->{next} = 1 unless $test->{next};
303 $test->{skip_reason} = $3 if not $test->{max} and defined $3;
315 sub _parse_test_line {
316 my($line, $test, $tot) = @_;
318 if ($line =~ /^(not\s+)?ok\b/i) {
319 my $this = $test->{next} || 1;
321 if ($line =~ /^not ok\s*(\d*)/){ # test failed
322 $this = $1 if length $1 and $1 > 0;
323 print "$test->{ml}NOK $this" if $test->{ml};
324 if (!$test->{todo}{$this}) {
325 push @{$test->{failed}}, $this;
331 # "ok 23 # skip (you're not cleared for that)"
332 elsif ($line =~ /^ok\s*(\d*)\ *
333 (\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?
336 $this = $1 if length $1 and $1 > 0;
337 print "$test->{ml}ok $this/$test->{max}" if $test->{ml};
340 $test->{skipped}++ if defined $2;
342 $reason = 'unknown reason' if defined $2;
343 $reason = $3 if defined $3;
344 if (defined $reason and defined $test->{skip_reason}) {
345 # print "was: '$skip_reason' new '$reason'\n";
346 $test->{skip_reason} = 'various reasons'
347 if $test->{skip_reason} ne $reason;
348 } elsif (defined $reason) {
349 $test->{skip_reason} = $reason;
351 $test->{bonus}++, $tot->{bonus}++ if $test->{todo}{$this};
354 elsif ($line =~ /^ok\s*(\d*)\s*\#([^\r]*)$/) { # XXX multiline ok?
355 $this = $1 if $1 > 0;
356 print "$test->{ml}ok $this/$test->{max}" if $test->{ml};
361 # an ok or not ok not matching the 3 cases above...
362 # just ignore it for compatibility with TEST
366 if ($this > $test->{next}) {
367 # print "Test output counter mismatch [test $this]\n";
368 # no need to warn probably
369 push @{$test->{failed}}, $test->{next}..$this-1;
371 elsif ($this < $test->{next}) {
372 #we have seen more "ok" lines than the number suggests
373 print "Confused test output: test $this answered after ".
374 "test ", $test->{next}-1, "\n";
375 $test->{next} = $this;
377 $test->{next} = $this + 1;
380 elsif ($line =~ /^Bail out!\s*(.*)/i) { # magic words
381 die "FAILED--Further testing stopped" .
382 ($1 ? ": $1\n" : ".\n");
391 $bonusmsg = (" ($tot->{bonus} subtest".($tot->{bonus} > 1 ? 's' : '').
392 " UNEXPECTEDLY SUCCEEDED)")
395 if ($tot->{skipped}) {
396 $bonusmsg .= ", $tot->{skipped} test"
397 . ($tot->{skipped} != 1 ? 's' : '');
398 if ($tot->{sub_skipped}) {
399 $bonusmsg .= " and $tot->{sub_skipped} subtest"
400 . ($tot->{sub_skipped} != 1 ? 's' : '');
402 $bonusmsg .= ' skipped';
404 elsif ($tot->{sub_skipped}) {
405 $bonusmsg .= ", $tot->{sub_skipped} subtest"
406 . ($tot->{sub_skipped} != 1 ? 's' : '')
413 # VMS has some subtle nastiness with closing the test files.
417 close($fh); # must close to reap child resource values
419 my $wstatus = $Ignore_Exitcode ? 0 : $?; # Can trust $? ?
421 $estatus = ($^O eq 'VMS'
422 ? eval 'use vmsish "status"; $estatus = $?'
425 return($estatus, $wstatus);
429 # Set up the command-line switches to run perl as.
433 open(my $fh, $test) or print "can't open $test. $!\n";
436 $s .= " $ENV{'HARNESS_PERL_SWITCHES'}"
437 if exists $ENV{'HARNESS_PERL_SWITCHES'};
438 $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
439 if $first =~ /^#!.*\bperl.*-\w*T/;
441 close($fh) or print "can't close $test. $!\n";
447 # Test program go boom.
448 sub _dubious_return {
449 my($test, $tot, $estatus, $wstatus) = @_;
450 my ($failed, $canon, $percent) = ('??', '??');
452 printf "$test->{ml}dubious\n\tTest returned status $estatus ".
453 "(wstat %d, 0x%x)\n",
455 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
457 if (corestatus($wstatus)) { # until we have a wait module
458 if ($Have_Devel_Corestack) {
459 Devel::CoreStack::stack($^X);
461 print "\ttest program seems to have generated a core\n";
468 if ($test->{next} == $test->{max} + 1 and not @{$test->{failed}}) {
469 print "\tafter all the subtests completed successfully\n";
471 $failed = 0; # But we do not set $canon!
474 push @{$test->{failed}}, $test->{next}..$test->{max};
475 $failed = @{$test->{failed}};
476 (my $txt, $canon) = canonfailed($test->{max},$test->{skipped},@{$test->{failed}});
477 $percent = 100*(scalar @{$test->{failed}})/$test->{max};
482 return { canon => $canon, max => $test->{max} || '??',
485 estat => $estatus, wstat => $wstatus,
490 sub _garbled_output {
491 my($gibberish) = shift;
492 warn "Confusing test output: '$gibberish'\n";
497 my($failedtests) = @_;
499 my $failed_str = "Failed Test";
500 my $middle_str = " Status Wstat Total Fail Failed ";
501 my $list_str = "List of Failed";
503 # Figure out our longest name string for formatting purposes.
504 my $max_namelen = length($failed_str);
505 foreach my $script (keys %$failedtests) {
506 my $namelen = length $failedtests->{$script}->{name};
507 $max_namelen = $namelen if $namelen > $max_namelen;
510 my $list_len = $Columns - length($middle_str) - $max_namelen;
511 if ($list_len < length($list_str)) {
512 $list_len = length($list_str);
513 $max_namelen = $Columns - length($middle_str) - $list_len;
514 if ($max_namelen < length($failed_str)) {
515 $max_namelen = length($failed_str);
516 $Columns = $max_namelen + length($middle_str) + $list_len;
520 my $fmt_top = "format STDOUT_TOP =\n"
521 . sprintf("%-${max_namelen}s", $failed_str)
527 my $fmt = "format STDOUT =\n"
528 . "@" . "<" x ($max_namelen - 1)
529 . " @>> @>>>> @>>>> @>>> ^##.##% "
530 . "^" . "<" x ($list_len - 1) . "\n"
531 . '{ $Curtest->{name}, $Curtest->{estat},'
532 . ' $Curtest->{wstat}, $Curtest->{max},'
533 . ' $Curtest->{failed}, $Curtest->{percent},'
534 . ' $Curtest->{canon}'
536 . "~~" . " " x ($Columns - $list_len - 2) . "^"
537 . "<" x ($list_len - 1) . "\n"
538 . '$Curtest->{canon}'
546 return($fmt_top, $fmt);
550 my $tried_devel_corestack;
554 eval {require 'wait.ph'};
555 my $ret = defined &WCOREDUMP ? WCOREDUMP($st) : $st & 0200;
557 eval { require Devel::CoreStack; $Have_Devel_Corestack++ }
558 unless $tried_devel_corestack++;
563 sub canonfailed ($@) {
564 my($max,$skipped,@failed) = @_;
566 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
567 my $failed = @failed;
571 my $last = $min = shift @failed;
574 for (@failed, $failed[-1]) { # don't forget the last one
575 if ($_ > $last+1 || $_ == $last) {
579 push @canon, "$min-$last";
586 push @result, "FAILED tests @canon\n";
589 push @result, "FAILED test $last\n";
593 push @result, "\tFailed $failed/$max tests, ";
594 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
595 my $ender = 's' x ($skipped > 1);
596 my $good = $max - $failed - $skipped;
597 my $goodper = sprintf("%.2f",100*($good/$max));
598 push @result, " (-$skipped skipped test$ender: $good okay, ".
602 my $txt = join "", @result;
611 Test::Harness - run perl standard test scripts with statistics
621 (By using the Test module, you can write test scripts without
622 knowing the exact output this module expects. However, if you need to
623 know the specifics, read on!)
625 Perl test scripts print to standard output C<"ok N"> for each single
626 test, where C<N> is an increasing sequence of integers. The first line
627 output by a standard test script is C<"1..M"> with C<M> being the
628 number of tests that should be run within the test
629 script. Test::Harness::runtests(@tests) runs all the testscripts
630 named as arguments and checks standard output for the expected
633 After all tests have been performed, runtests() prints some
634 performance statistics that are computed by the Benchmark module.
636 =head2 The test script output
642 This header tells how many tests there will be. It should be the
643 first line output by your test program (but its okay if its preceded
646 In certain instanced, you may not know how many tests you will
647 ultimately be running. In this case, it is permitted (but not
648 encouraged) for the 1..M header to appear as the B<last> line output
649 by your test (again, it can be followed by further comments). But we
650 strongly encourage you to put it first.
652 Under B<no> circumstances should 1..M appear in the middle of your
653 output or more than once.
656 =item B<'ok', 'not ok'. Ok?>
658 Any output from the testscript to standard error is ignored and
659 bypassed, thus will be seen by the user. Lines written to standard
660 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
661 runtests(). All other lines are discarded.
663 C</^not ok/> indicates a failed test. C</^ok/> is a successful test.
666 =item B<test numbers>
668 Perl normally expects the 'ok' or 'not ok' to be followed by a test
669 number. It is tolerated if the test numbers after 'ok' are
670 omitted. In this case Test::Harness maintains temporarily its own
671 counter until the script supplies test numbers again. So the following
686 Failed 3/6 tests, 50.00% okay
689 =item B<$Test::Harness::verbose>
691 The global variable $Test::Harness::verbose is exportable and can be
692 used to let runtests() display the standard output of the script
693 without altering the behavior otherwise.
695 =item B<$Test::Harness::switches>
697 The global variable $Test::Harness::switches is exportable and can be
698 used to set perl command line options used for running the test
699 script(s). The default value is C<-w>.
701 =item B<Skipping tests>
703 If the standard output line contains substring C< # Skip> (with
704 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
705 counted as a skipped test. If the whole testscript succeeds, the
706 count of skipped tests is included in the generated output.
708 C<Test::Harness> reports the text after C< # Skip\S*\s+> as a reason
709 for skipping. Similarly, one can include a similar explanation in a
710 C<1..0> line emitted if the test script is skipped completely:
712 1..0 # Skipped: no leverage found
716 As an emergency measure, a test script can decide that further tests
717 are useless (e.g. missing dependencies) and testing should stop
718 immediately. In that case the test script prints the magic words
722 to standard output. Any message after these words will be displayed by
723 C<Test::Harness> as the reason why testing is stopped.
727 Additional comments may be put into the testing output on their own
728 lines. Comment lines should begin with a '#', Test::Harness will
732 # Life is good, the sun is shining, RAM is cheap.
734 # got 'Bush' expected 'Gore'
739 C<&runtests> is exported by Test::Harness per default.
741 C<$verbose> and C<$switches> are exported upon request.
748 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
750 If all tests are successful some statistics about the performance are
753 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
755 For any single script that has failing subtests statistics like the
758 =item C<Test returned status %d (wstat %d)>
760 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8>
761 and C<$?> are printed in a message similar to the above.
763 =item C<Failed 1 test, %.2f%% okay. %s>
765 =item C<Failed %d/%d tests, %.2f%% okay. %s>
767 If not all tests were successful, the script dies with one of the
770 =item C<FAILED--Further testing stopped%s>
772 If a single subtest decides that further testing will not make sense,
773 the script dies with this message.
779 Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
782 Setting C<HARNESS_NOTTY> to a true value forces it to behave as though
783 STDOUT were not a console. You may need to set this if you don't want
784 harness to output more frequent progress messages using carriage returns.
785 Some consoles may not handle carriage returns properly (which results
786 in a somewhat messy output).
788 Setting C<HARNESS_COMPILE_TEST> to a true value will make harness attempt
789 to compile the test using C<perlcc> before running it.
791 If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
792 will check after each test whether new files appeared in that directory,
795 LEAKED FILES: scr.tmp 0 my.db
797 If relative, directory name is with respect to the current directory at
798 the moment runtests() was called. Putting absolute path into
799 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
801 The value of C<HARNESS_PERL_SWITCHES> will be prepended to the
802 switches used to invoke perl on each test. For example, setting
803 C<HARNESS_PERL_SWITCHES> to "-W" will run all tests with all
806 If C<HARNESS_COLUMNS> is set, then this value will be used for the
807 width of the terminal. If it is not set then it will default to
808 C<COLUMNS>. If this is not set, it will default to 80. Note that users
809 of Bourne-sh based shells will need to C<export COLUMNS> for this
810 module to use that variable.
812 Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
813 This allows the tests to determine if they are being executed through the
814 harness or by any other means.
818 L<Test> for writing test scripts and also L<Benchmark> for the
819 underlying timing routines.
823 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
824 sure is, that it was inspired by Larry Wall's TEST script that came
825 with perl distributions for ages. Numerous anonymous contributors
826 exist. Current maintainer is Andreas Koenig.
830 Test::Harness uses $^X to determine the perl binary to run the tests
831 with. Test scripts running via the shebang (C<#!>) line may not be
832 portable because $^X is not consistent for shebang scripts across
833 platforms. This is no problem when Test::Harness is run with an
834 absolute path to the perl binary or when $^X can be found in the path.