1 # -*- Mode: cperl; cperl-indent-level: 4 -*-
11 our($VERSION, $verbose, $switches, $have_devel_corestack, $curtest,
12 $columns, @ISA, @EXPORT, @EXPORT_OK);
13 $have_devel_corestack = 0;
17 $ENV{HARNESS_ACTIVE} = 1;
19 # Some experimental versions of OS/2 build have broken $?
20 my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
22 my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
24 my $tests_skipped = 0;
25 my $subtests_skipped = 0;
28 @EXPORT= qw(&runtests);
29 @EXPORT_OK= qw($verbose $switches);
33 $columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
35 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
40 my($test,$te,$ok,$next,$max,$pct,$totbonus,@failed,%failedtests);
48 # pass -I flags to children
49 my $old5lib = $ENV{PERL5LIB};
51 # VMS has a 255-byte limit on the length of %ENV entries, so
52 # toss the ones that involve perl_root, the install location
56 $new5lib = join($Config{path_sep}, grep {!/perl_root/i;} @INC);
57 $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g;
60 $new5lib = join($Config{path_sep}, @INC);
63 local($ENV{'PERL5LIB'}) = $new5lib;
65 my @dir_files = globdir $files_in_dir if defined $files_in_dir;
66 my $t_start = new Benchmark;
67 while ($test = shift(@tests)) {
70 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./s; }
71 my $blank = (' ' x 77);
72 my $leader = "$te" . '.' x (20 - length($te));
74 $ml = "\r$blank\r$leader"
75 if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $verbose;
77 my $fh = new FileHandle;
78 $fh->open($test) or print "can't open $test. $!\n";
81 $s .= " $ENV{'HARNESS_PERL_SWITCHES'}"
82 if exists $ENV{'HARNESS_PERL_SWITCHES'};
83 $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
84 if $first =~ /^#!.*\bperl.*-\w*T/;
85 $fh->close or print "can't close $test. $!\n";
86 my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
87 ? "./perl -I../lib ../utils/perlcc $test "
88 . "-run 2>> ./compilelog |"
90 $cmd = "MCR $cmd" if $^O eq 'VMS';
91 $fh->open($cmd) or print "can't run $test. $!\n";
92 $ok = $next = $max = 0;
102 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
104 for (split(/\s+/, $2)) { $todo{$_} = 1; }
108 } elsif (/^1\.\.([0-9]+)(\s*\#\s*[Ss]kip\S*(?>\s+)(.+))?/) {
113 $skip_reason = $3 if not $max and defined $3;
114 } elsif ($max && /^(not\s+)?ok\b/) {
116 if (/^not ok\s*(\d*)/){
117 $this = $1 if $1 > 0;
118 print "${ml}NOK $this" if $ml;
125 } elsif (/^ok\s*(\d*) *(\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?$/) {
126 $this = $1 if $1 > 0;
127 print "${ml}ok $this/$max" if $ml;
130 $skipped++ if defined $2;
132 $reason = 'unknown reason' if defined $2;
133 $reason = $3 if defined $3;
134 if (defined $reason and defined $skip_reason) {
135 # print "was: '$skip_reason' new '$reason'\n";
136 $skip_reason = 'various reasons'
137 if $skip_reason ne $reason;
138 } elsif (defined $reason) {
139 $skip_reason = $reason;
141 $bonus++, $totbonus++ if $todo{$this};
142 } elsif (/^ok\s*(\d*)\s*\#([^\r]*)$/) {
143 $this = $1 if $1 > 0;
144 print "${ml}ok $this/$max" if $ml;
148 # an ok or not ok not matching the 3 cases above...
149 # just ignore it for compatibility with TEST
153 # print "Test output counter mismatch [test $this]\n";
154 # no need to warn probably
155 push @failed, $next..$this-1;
156 } elsif ($this < $next) {
157 #we have seen more "ok" lines than the number suggests
158 print "Confused test output: test $this answered after test ", $next-1, "\n";
162 } elsif (/^Bail out!\s*(.*)/i) { # magic words
163 die "FAILED--Further testing stopped" . ($1 ? ": $1\n" : ".\n");
166 $fh->close; # must close to reap child resource values
167 my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
169 $estatus = ($^O eq 'VMS'
170 ? eval 'use vmsish "status"; $estatus = $?'
173 my ($failed, $canon, $percent) = ('??', '??');
174 printf "${ml}dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
176 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
177 if (corestatus($wstatus)) { # until we have a wait module
178 if ($have_devel_corestack) {
179 Devel::CoreStack::stack($^X);
181 print "\ttest program seems to have generated a core\n";
186 if ($next == $max + 1 and not @failed) {
187 print "\tafter all the subtests completed successfully\n";
189 $failed = 0; # But we do not set $canon!
191 push @failed, $next..$max;
193 (my $txt, $canon) = canonfailed($max,$skipped,@failed);
194 $percent = 100*(scalar @failed)/$max;
198 $failedtests{$test} = { canon => $canon, max => $max || '??',
200 name => $test, percent => $percent,
201 estat => $estatus, wstat => $wstatus,
203 } elsif ($ok == $max && $next == $max+1) {
204 if ($max and $skipped + $bonus) {
206 push(@msg, "$skipped/$max skipped: $skip_reason")
208 push(@msg, "$bonus/$max unexpectedly succeeded")
210 print "${ml}ok, ".join(', ', @msg)."\n";
213 } elsif (defined $skip_reason) {
214 print "skipped: $skip_reason\n";
217 print "skipped test on this platform\n";
223 push @failed, $next..$max;
226 my ($txt, $canon) = canonfailed($max,$skipped,@failed);
228 $failedtests{$test} = { canon => $canon, max => $max,
229 failed => scalar @failed,
230 name => $test, percent => 100*(scalar @failed)/$max,
231 estat => '', wstat => '',
234 print "Don't know which tests failed: got $ok ok, expected $max\n";
235 $failedtests{$test} = { canon => '??', max => $max,
237 name => $test, percent => undef,
238 estat => '', wstat => '',
242 } elsif ($next == 0) {
243 print "FAILED before any test output arrived\n";
245 $failedtests{$test} = { canon => '??', max => '??',
247 name => $test, percent => undef,
248 estat => '', wstat => '',
251 $subtests_skipped += $skipped;
252 if (defined $files_in_dir) {
253 my @new_dir_files = globdir $files_in_dir;
254 if (@new_dir_files != @dir_files) {
256 @f{@new_dir_files} = (1) x @new_dir_files;
257 delete @f{@dir_files};
258 my @f = sort keys %f;
259 print "LEAKED FILES: @f\n";
260 @dir_files = @new_dir_files;
264 my $t_total = timediff(new Benchmark, $t_start);
267 if (defined $old5lib) {
268 $ENV{PERL5LIB} = $old5lib;
270 delete $ENV{PERL5LIB};
274 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
275 " UNEXPECTEDLY SUCCEEDED)")
277 if ($tests_skipped) {
278 $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
279 if ($subtests_skipped) {
280 $bonusmsg .= " and $subtests_skipped subtest"
281 . ($subtests_skipped != 1 ? 's' : '');
283 $bonusmsg .= ' skipped';
285 elsif ($subtests_skipped) {
286 $bonusmsg .= ", $subtests_skipped subtest"
287 . ($subtests_skipped != 1 ? 's' : '')
290 if ($bad == 0 && $totmax) {
291 print "All tests successful$bonusmsg.\n";
293 die "FAILED--no tests were run for some reason.\n";
294 } elsif ($totmax==0) {
295 my $blurb = $total==1 ? "script" : "scripts";
296 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
298 $pct = sprintf("%.2f", $good / $total * 100);
299 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
300 $totmax - $totok, $totmax, 100*$totok/$totmax;
302 # First, figure out max length of test names
303 my $failed_str = "Failed Test";
304 my $middle_str = " Status Wstat Total Fail Failed ";
305 my $list_str = "List of Failed";
306 my $max_namelen = length($failed_str);
308 foreach $script (keys %failedtests) {
310 (length $failedtests{$script}->{name} > $max_namelen) ?
311 length $failedtests{$script}->{name} : $max_namelen;
313 my $list_len = $columns - length($middle_str) - $max_namelen;
314 if ($list_len < length($list_str)) {
315 $list_len = length($list_str);
316 $max_namelen = $columns - length($middle_str) - $list_len;
317 if ($max_namelen < length($failed_str)) {
318 $max_namelen = length($failed_str);
319 $columns = $max_namelen + length($middle_str) + $list_len;
323 my $fmt_top = "format STDOUT_TOP =\n"
324 . sprintf("%-${max_namelen}s", $failed_str)
329 my $fmt = "format STDOUT =\n"
330 . "@" . "<" x ($max_namelen - 1)
331 . " @>> @>>>> @>>>> @>>> ^##.##% "
332 . "^" . "<" x ($list_len - 1) . "\n"
333 . '{ $curtest->{name}, $curtest->{estat},'
334 . ' $curtest->{wstat}, $curtest->{max},'
335 . ' $curtest->{failed}, $curtest->{percent},'
336 . ' $curtest->{canon}'
338 . "~~" . " " x ($columns - $list_len - 2) . "^"
339 . "<" x ($list_len - 1) . "\n"
340 . '$curtest->{canon}'
348 # Now write to formats
349 for $script (sort keys %failedtests) {
350 $curtest = $failedtests{$script};
354 $bonusmsg =~ s/^,\s*//;
355 print "$bonusmsg.\n" if $bonusmsg;
356 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
359 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
361 return ($bad == 0 && $totmax) ;
364 my $tried_devel_corestack;
368 eval {require 'wait.ph'};
369 my $ret = defined &WCOREDUMP ? WCOREDUMP($st) : $st & 0200;
371 eval { require Devel::CoreStack; $have_devel_corestack++ }
372 unless $tried_devel_corestack++;
377 sub canonfailed ($@) {
378 my($max,$skipped,@failed) = @_;
380 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
381 my $failed = @failed;
385 my $last = $min = shift @failed;
388 for (@failed, $failed[-1]) { # don't forget the last one
389 if ($_ > $last+1 || $_ == $last) {
393 push @canon, "$min-$last";
400 push @result, "FAILED tests @canon\n";
403 push @result, "FAILED test $last\n";
407 push @result, "\tFailed $failed/$max tests, ";
408 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
409 my $ender = 's' x ($skipped > 1);
410 my $good = $max - $failed - $skipped;
411 my $goodper = sprintf("%.2f",100*($good/$max));
412 push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
414 my $txt = join "", @result;
423 Test::Harness - run perl standard test scripts with statistics
433 (By using the L<Test> module, you can write test scripts without
434 knowing the exact output this module expects. However, if you need to
435 know the specifics, read on!)
437 Perl test scripts print to standard output C<"ok N"> for each single
438 test, where C<N> is an increasing sequence of integers. The first line
439 output by a standard test script is C<"1..M"> with C<M> being the
440 number of tests that should be run within the test
441 script. Test::Harness::runtests(@tests) runs all the testscripts
442 named as arguments and checks standard output for the expected
445 After all tests have been performed, runtests() prints some
446 performance statistics that are computed by the Benchmark module.
448 =head2 The test script output
450 Any output from the testscript to standard error is ignored and
451 bypassed, thus will be seen by the user. Lines written to standard
452 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
453 runtests(). All other lines are discarded.
455 It is tolerated if the test numbers after C<ok> are omitted. In this
456 case Test::Harness maintains temporarily its own counter until the
457 script supplies test numbers again. So the following test script
471 Failed 3/6 tests, 50.00% okay
473 The global variable $Test::Harness::verbose is exportable and can be
474 used to let runtests() display the standard output of the script
475 without altering the behavior otherwise.
477 The global variable $Test::Harness::switches is exportable and can be
478 used to set perl command line options used for running the test
479 script(s). The default value is C<-w>.
481 If the standard output line contains substring C< # Skip> (with
482 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
483 counted as a skipped test. In no other circumstance is anything
484 allowed to follow C<ok> or C<ok NUMBER>. If the whole testscript
485 succeeds, the count of skipped tests is included in the generated
488 C<Test::Harness> reports the text after C< # Skip\S*\s+> as a reason
489 for skipping. Similarly, one can include a similar explanation in a
490 C<1..0> line emitted if the test is skipped completely:
492 1..0 # Skipped: no leverage found
494 As an emergency measure, a test script can decide that further tests
495 are useless (e.g. missing dependencies) and testing should stop
496 immediately. In that case the test script prints the magic words
500 to standard output. Any message after these words will be displayed by
501 C<Test::Harness> as the reason why testing is stopped.
505 C<&runtests> is exported by Test::Harness per default.
511 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
513 If all tests are successful some statistics about the performance are
516 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
518 For any single script that has failing subtests statistics like the
521 =item C<Test returned status %d (wstat %d)>
523 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
524 printed in a message similar to the above.
526 =item C<Failed 1 test, %.2f%% okay. %s>
528 =item C<Failed %d/%d tests, %.2f%% okay. %s>
530 If not all tests were successful, the script dies with one of the
533 =item C<FAILED--Further testing stopped%s>
535 If a single subtest decides that further testing will not make sense,
536 the script dies with this message.
542 Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
545 Setting C<HARNESS_NOTTY> to a true value forces it to behave as though
546 STDOUT were not a console. You may need to set this if you don't want
547 harness to output more frequent progress messages using carriage returns.
548 Some consoles may not handle carriage returns properly (which results
549 in a somewhat messy output).
551 Setting C<HARNESS_COMPILE_TEST> to a true value will make harness attempt
552 to compile the test using C<perlcc> before running it.
554 If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
555 will check after each test whether new files appeared in that directory,
558 LEAKED FILES: scr.tmp 0 my.db
560 If relative, directory name is with respect to the current directory at
561 the moment runtests() was called. Putting absolute path into
562 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
564 The value of C<HARNESS_PERL_SWITCHES> will be prepended to the
565 switches used to invoke perl on each test. For example, setting
566 C<HARNESS_PERL_SWITCHES> to "-W" will run all tests with all
569 If C<HARNESS_COLUMNS> is set, then this value will be used for the
570 width of the terminal. If it is not set then it will default to
571 C<COLUMNS>. If this is not set, it will default to 80. Note that users
572 of Bourne-sh based shells will need to C<export COLUMNS> for this
573 module to use that variable.
575 Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
576 This allows the tests to determine if they are being executed through the
577 harness or by any other means.
581 L<Test> for writing test scripts and also L<Benchmark> for the
582 underlying timing routines.
586 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
587 sure is, that it was inspired by Larry Wall's TEST script that came
588 with perl distributions for ages. Numerous anonymous contributors
589 exist. Current maintainer is Andreas Koenig.
593 Test::Harness uses $^X to determine the perl binary to run the tests
594 with. Test scripts running via the shebang (C<#!>) line may not be
595 portable because $^X is not consistent for shebang scripts across
596 platforms. This is no problem when Test::Harness is run with an
597 absolute path to the perl binary or when $^X can be found in the path.