1 # -*- Mode: cperl; cperl-indent-level: 4 -*-
10 our($VERSION, $verbose, $switches, $have_devel_corestack, $curtest,
11 $columns, @ISA, @EXPORT, @EXPORT_OK);
12 $have_devel_corestack = 0;
16 $ENV{HARNESS_ACTIVE} = 1;
18 # Some experimental versions of OS/2 build have broken $?
19 my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
21 my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
23 my $tests_skipped = 0;
24 my $subtests_skipped = 0;
27 @EXPORT= qw(&runtests);
28 @EXPORT_OK= qw($verbose $switches);
32 $columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
34 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
39 my($test,$te,$ok,$next,$max,$pct,$totbonus,@failed,%failedtests);
47 # pass -I flags to children
48 my $old5lib = $ENV{PERL5LIB};
50 # VMS has a 255-byte limit on the length of %ENV entries, so
51 # toss the ones that involve perl_root, the install location
55 $new5lib = join($Config{path_sep}, grep {!/perl_root/i;} @INC);
56 $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g;
59 $new5lib = join($Config{path_sep}, @INC);
62 local($ENV{'PERL5LIB'}) = $new5lib;
64 my @dir_files = globdir $files_in_dir if defined $files_in_dir;
65 my $t_start = new Benchmark;
66 while ($test = shift(@tests)) {
69 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./s; }
70 my $blank = (' ' x 77);
71 my $leader = "$te" . '.' x (20 - length($te));
73 $ml = "\r$blank\r$leader"
74 if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $verbose;
76 open(my $fh, $test) or print "can't open $test. $!\n";
79 $s .= " $ENV{'HARNESS_PERL_SWITCHES'}"
80 if exists $ENV{'HARNESS_PERL_SWITCHES'};
81 $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
82 if $first =~ /^#!.*\bperl.*-\w*T/;
83 close($fh) or print "can't close $test. $!\n";
84 my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
85 ? "./perl -I../lib ../utils/perlcc $test "
86 . "-run 2>> ./compilelog |"
88 $cmd = "MCR $cmd" if $^O eq 'VMS';
89 open($fh, $cmd) or print "can't run $test. $!\n";
90 $ok = $next = $max = 0;
100 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
102 for (split(/\s+/, $2)) { $todo{$_} = 1; }
106 } elsif (/^1\.\.([0-9]+)(\s*\#\s*[Ss]kip\S*(?>\s+)(.+))?/) {
111 $skip_reason = $3 if not $max and defined $3;
112 } elsif ($max && /^(not\s+)?ok\b/) {
114 if (/^not ok\s*(\d*)/){
115 $this = $1 if $1 > 0;
116 print "${ml}NOK $this" if $ml;
123 } elsif (/^ok\s*(\d*) *(\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?$/) {
124 $this = $1 if $1 > 0;
125 print "${ml}ok $this/$max" if $ml;
128 $skipped++ if defined $2;
130 $reason = 'unknown reason' if defined $2;
131 $reason = $3 if defined $3;
132 if (defined $reason and defined $skip_reason) {
133 # print "was: '$skip_reason' new '$reason'\n";
134 $skip_reason = 'various reasons'
135 if $skip_reason ne $reason;
136 } elsif (defined $reason) {
137 $skip_reason = $reason;
139 $bonus++, $totbonus++ if $todo{$this};
140 } elsif (/^ok\s*(\d*)\s*\#([^\r]*)$/) {
141 $this = $1 if $1 > 0;
142 print "${ml}ok $this/$max" if $ml;
146 # an ok or not ok not matching the 3 cases above...
147 # just ignore it for compatibility with TEST
151 # print "Test output counter mismatch [test $this]\n";
152 # no need to warn probably
153 push @failed, $next..$this-1;
154 } elsif ($this < $next) {
155 #we have seen more "ok" lines than the number suggests
156 print "Confused test output: test $this answered after test ", $next-1, "\n";
160 } elsif (/^Bail out!\s*(.*)/i) { # magic words
161 die "FAILED--Further testing stopped" . ($1 ? ": $1\n" : ".\n");
164 close($fh); # must close to reap child resource values
165 my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
167 $estatus = ($^O eq 'VMS'
168 ? eval 'use vmsish "status"; $estatus = $?'
171 my ($failed, $canon, $percent) = ('??', '??');
172 printf "${ml}dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
174 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
175 if (corestatus($wstatus)) { # until we have a wait module
176 if ($have_devel_corestack) {
177 Devel::CoreStack::stack($^X);
179 print "\ttest program seems to have generated a core\n";
184 if ($next == $max + 1 and not @failed) {
185 print "\tafter all the subtests completed successfully\n";
187 $failed = 0; # But we do not set $canon!
189 push @failed, $next..$max;
191 (my $txt, $canon) = canonfailed($max,$skipped,@failed);
192 $percent = 100*(scalar @failed)/$max;
196 $failedtests{$test} = { canon => $canon, max => $max || '??',
198 name => $test, percent => $percent,
199 estat => $estatus, wstat => $wstatus,
201 } elsif ($ok == $max && $next == $max+1) {
202 if ($max and $skipped + $bonus) {
204 push(@msg, "$skipped/$max skipped: $skip_reason")
206 push(@msg, "$bonus/$max unexpectedly succeeded")
208 print "${ml}ok, ".join(', ', @msg)."\n";
211 } elsif (defined $skip_reason) {
212 print "skipped: $skip_reason\n";
215 print "skipped test on this platform\n";
221 push @failed, $next..$max;
224 my ($txt, $canon) = canonfailed($max,$skipped,@failed);
226 $failedtests{$test} = { canon => $canon, max => $max,
227 failed => scalar @failed,
228 name => $test, percent => 100*(scalar @failed)/$max,
229 estat => '', wstat => '',
232 print "Don't know which tests failed: got $ok ok, expected $max\n";
233 $failedtests{$test} = { canon => '??', max => $max,
235 name => $test, percent => undef,
236 estat => '', wstat => '',
240 } elsif ($next == 0) {
241 print "FAILED before any test output arrived\n";
243 $failedtests{$test} = { canon => '??', max => '??',
245 name => $test, percent => undef,
246 estat => '', wstat => '',
249 $subtests_skipped += $skipped;
250 if (defined $files_in_dir) {
251 my @new_dir_files = globdir $files_in_dir;
252 if (@new_dir_files != @dir_files) {
254 @f{@new_dir_files} = (1) x @new_dir_files;
255 delete @f{@dir_files};
256 my @f = sort keys %f;
257 print "LEAKED FILES: @f\n";
258 @dir_files = @new_dir_files;
262 my $t_total = timediff(new Benchmark, $t_start);
265 if (defined $old5lib) {
266 $ENV{PERL5LIB} = $old5lib;
268 delete $ENV{PERL5LIB};
272 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
273 " UNEXPECTEDLY SUCCEEDED)")
275 if ($tests_skipped) {
276 $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
277 if ($subtests_skipped) {
278 $bonusmsg .= " and $subtests_skipped subtest"
279 . ($subtests_skipped != 1 ? 's' : '');
281 $bonusmsg .= ' skipped';
283 elsif ($subtests_skipped) {
284 $bonusmsg .= ", $subtests_skipped subtest"
285 . ($subtests_skipped != 1 ? 's' : '')
288 if ($bad == 0 && $totmax) {
289 print "All tests successful$bonusmsg.\n";
291 die "FAILED--no tests were run for some reason.\n";
292 } elsif ($totmax==0) {
293 my $blurb = $total==1 ? "script" : "scripts";
294 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
296 $pct = sprintf("%.2f", $good / $total * 100);
297 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
298 $totmax - $totok, $totmax, 100*$totok/$totmax;
300 # First, figure out max length of test names
301 my $failed_str = "Failed Test";
302 my $middle_str = " Status Wstat Total Fail Failed ";
303 my $list_str = "List of Failed";
304 my $max_namelen = length($failed_str);
306 foreach $script (keys %failedtests) {
308 (length $failedtests{$script}->{name} > $max_namelen) ?
309 length $failedtests{$script}->{name} : $max_namelen;
311 my $list_len = $columns - length($middle_str) - $max_namelen;
312 if ($list_len < length($list_str)) {
313 $list_len = length($list_str);
314 $max_namelen = $columns - length($middle_str) - $list_len;
315 if ($max_namelen < length($failed_str)) {
316 $max_namelen = length($failed_str);
317 $columns = $max_namelen + length($middle_str) + $list_len;
321 my $fmt_top = "format STDOUT_TOP =\n"
322 . sprintf("%-${max_namelen}s", $failed_str)
327 my $fmt = "format STDOUT =\n"
328 . "@" . "<" x ($max_namelen - 1)
329 . " @>> @>>>> @>>>> @>>> ^##.##% "
330 . "^" . "<" x ($list_len - 1) . "\n"
331 . '{ $curtest->{name}, $curtest->{estat},'
332 . ' $curtest->{wstat}, $curtest->{max},'
333 . ' $curtest->{failed}, $curtest->{percent},'
334 . ' $curtest->{canon}'
336 . "~~" . " " x ($columns - $list_len - 2) . "^"
337 . "<" x ($list_len - 1) . "\n"
338 . '$curtest->{canon}'
346 # Now write to formats
347 for $script (sort keys %failedtests) {
348 $curtest = $failedtests{$script};
352 $bonusmsg =~ s/^,\s*//;
353 print "$bonusmsg.\n" if $bonusmsg;
354 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
357 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
359 return ($bad == 0 && $totmax) ;
362 my $tried_devel_corestack;
366 eval {require 'wait.ph'};
367 my $ret = defined &WCOREDUMP ? WCOREDUMP($st) : $st & 0200;
369 eval { require Devel::CoreStack; $have_devel_corestack++ }
370 unless $tried_devel_corestack++;
375 sub canonfailed ($@) {
376 my($max,$skipped,@failed) = @_;
378 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
379 my $failed = @failed;
383 my $last = $min = shift @failed;
386 for (@failed, $failed[-1]) { # don't forget the last one
387 if ($_ > $last+1 || $_ == $last) {
391 push @canon, "$min-$last";
398 push @result, "FAILED tests @canon\n";
401 push @result, "FAILED test $last\n";
405 push @result, "\tFailed $failed/$max tests, ";
406 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
407 my $ender = 's' x ($skipped > 1);
408 my $good = $max - $failed - $skipped;
409 my $goodper = sprintf("%.2f",100*($good/$max));
410 push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
412 my $txt = join "", @result;
421 Test::Harness - run perl standard test scripts with statistics
431 (By using the L<Test> module, you can write test scripts without
432 knowing the exact output this module expects. However, if you need to
433 know the specifics, read on!)
435 Perl test scripts print to standard output C<"ok N"> for each single
436 test, where C<N> is an increasing sequence of integers. The first line
437 output by a standard test script is C<"1..M"> with C<M> being the
438 number of tests that should be run within the test
439 script. Test::Harness::runtests(@tests) runs all the testscripts
440 named as arguments and checks standard output for the expected
443 After all tests have been performed, runtests() prints some
444 performance statistics that are computed by the Benchmark module.
446 =head2 The test script output
448 Any output from the testscript to standard error is ignored and
449 bypassed, thus will be seen by the user. Lines written to standard
450 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
451 runtests(). All other lines are discarded.
453 It is tolerated if the test numbers after C<ok> are omitted. In this
454 case Test::Harness maintains temporarily its own counter until the
455 script supplies test numbers again. So the following test script
469 Failed 3/6 tests, 50.00% okay
471 The global variable $Test::Harness::verbose is exportable and can be
472 used to let runtests() display the standard output of the script
473 without altering the behavior otherwise.
475 The global variable $Test::Harness::switches is exportable and can be
476 used to set perl command line options used for running the test
477 script(s). The default value is C<-w>.
479 If the standard output line contains substring C< # Skip> (with
480 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
481 counted as a skipped test. In no other circumstance is anything
482 allowed to follow C<ok> or C<ok NUMBER>. If the whole testscript
483 succeeds, the count of skipped tests is included in the generated
486 C<Test::Harness> reports the text after C< # Skip\S*\s+> as a reason
487 for skipping. Similarly, one can include a similar explanation in a
488 C<1..0> line emitted if the test is skipped completely:
490 1..0 # Skipped: no leverage found
492 As an emergency measure, a test script can decide that further tests
493 are useless (e.g. missing dependencies) and testing should stop
494 immediately. In that case the test script prints the magic words
498 to standard output. Any message after these words will be displayed by
499 C<Test::Harness> as the reason why testing is stopped.
503 C<&runtests> is exported by Test::Harness per default.
509 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
511 If all tests are successful some statistics about the performance are
514 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
516 For any single script that has failing subtests statistics like the
519 =item C<Test returned status %d (wstat %d)>
521 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
522 printed in a message similar to the above.
524 =item C<Failed 1 test, %.2f%% okay. %s>
526 =item C<Failed %d/%d tests, %.2f%% okay. %s>
528 If not all tests were successful, the script dies with one of the
531 =item C<FAILED--Further testing stopped%s>
533 If a single subtest decides that further testing will not make sense,
534 the script dies with this message.
540 Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
543 Setting C<HARNESS_NOTTY> to a true value forces it to behave as though
544 STDOUT were not a console. You may need to set this if you don't want
545 harness to output more frequent progress messages using carriage returns.
546 Some consoles may not handle carriage returns properly (which results
547 in a somewhat messy output).
549 Setting C<HARNESS_COMPILE_TEST> to a true value will make harness attempt
550 to compile the test using C<perlcc> before running it.
552 If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
553 will check after each test whether new files appeared in that directory,
556 LEAKED FILES: scr.tmp 0 my.db
558 If relative, directory name is with respect to the current directory at
559 the moment runtests() was called. Putting absolute path into
560 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
562 The value of C<HARNESS_PERL_SWITCHES> will be prepended to the
563 switches used to invoke perl on each test. For example, setting
564 C<HARNESS_PERL_SWITCHES> to "-W" will run all tests with all
567 If C<HARNESS_COLUMNS> is set, then this value will be used for the
568 width of the terminal. If it is not set then it will default to
569 C<COLUMNS>. If this is not set, it will default to 80. Note that users
570 of Bourne-sh based shells will need to C<export COLUMNS> for this
571 module to use that variable.
573 Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
574 This allows the tests to determine if they are being executed through the
575 harness or by any other means.
579 L<Test> for writing test scripts and also L<Benchmark> for the
580 underlying timing routines.
584 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
585 sure is, that it was inspired by Larry Wall's TEST script that came
586 with perl distributions for ages. Numerous anonymous contributors
587 exist. Current maintainer is Andreas Koenig.
591 Test::Harness uses $^X to determine the perl binary to run the tests
592 with. Test scripts running via the shebang (C<#!>) line may not be
593 portable because $^X is not consistent for shebang scripts across
594 platforms. This is no problem when Test::Harness is run with an
595 absolute path to the perl binary or when $^X can be found in the path.