1 # -*- Mode: cperl; cperl-indent-level: 4 -*-
10 our($VERSION, $verbose, $switches, $have_devel_corestack, $curtest,
11 $columns, @ISA, @EXPORT, @EXPORT_OK);
12 $have_devel_corestack = 0;
16 $ENV{HARNESS_ACTIVE} = 1;
18 # Some experimental versions of OS/2 build have broken $?
19 my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
21 my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
23 my $tests_skipped = 0;
24 my $subtests_skipped = 0;
27 @EXPORT= qw(&runtests);
28 @EXPORT_OK= qw($verbose $switches);
32 $columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
34 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
39 my($test,$te,$ok,$next,$max,$pct,$totbonus,@failed,%failedtests);
47 # pass -I flags to children
48 my $old5lib = $ENV{PERL5LIB};
50 # VMS has a 255-byte limit on the length of %ENV entries, so
51 # toss the ones that involve perl_root, the install location
55 $new5lib = join($Config{path_sep}, grep {!/perl_root/i;} @INC);
56 $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g;
59 $new5lib = join($Config{path_sep}, @INC);
61 local($ENV{'PERL5LIB'}) = $new5lib;
63 my @dir_files = globdir $files_in_dir if defined $files_in_dir;
64 my $t_start = new Benchmark;
65 while ($test = shift(@tests)) {
68 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./s; }
69 my $blank = (' ' x 77);
70 my $leader = "$te" . '.' x (20 - length($te));
72 $ml = "\r$blank\r$leader"
73 if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $verbose;
75 open(my $fh, $test) or print "can't open $test. $!\n";
78 $s .= " $ENV{'HARNESS_PERL_SWITCHES'}"
79 if exists $ENV{'HARNESS_PERL_SWITCHES'};
80 $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
81 if $first =~ /^#!.*\bperl.*-\w*T/;
82 close($fh) or print "can't close $test. $!\n";
83 my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
84 ? "./perl -I../lib ../utils/perlcc $test "
85 . "-run 2>> ./compilelog |"
87 $cmd = "MCR $cmd" if $^O eq 'VMS';
88 open($fh, $cmd) or print "can't run $test. $!\n";
89 $ok = $next = $max = 0;
99 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
101 for (split(/\s+/, $2)) { $todo{$_} = 1; }
105 } elsif (/^1\.\.([0-9]+)(\s*\#\s*[Ss]kip\S*(?>\s+)(.+))?/) {
110 $skip_reason = $3 if not $max and defined $3;
111 } elsif ($max && /^(not\s+)?ok\b/) {
113 if (/^not ok\s*(\d*)/){
114 $this = $1 if $1 > 0;
115 print "${ml}NOK $this" if $ml;
122 } elsif (/^ok\s*(\d*) *(\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?$/) {
123 $this = $1 if $1 > 0;
124 print "${ml}ok $this/$max" if $ml;
127 $skipped++ if defined $2;
129 $reason = 'unknown reason' if defined $2;
130 $reason = $3 if defined $3;
131 if (defined $reason and defined $skip_reason) {
132 # print "was: '$skip_reason' new '$reason'\n";
133 $skip_reason = 'various reasons'
134 if $skip_reason ne $reason;
135 } elsif (defined $reason) {
136 $skip_reason = $reason;
138 $bonus++, $totbonus++ if $todo{$this};
139 } elsif (/^ok\s*(\d*)\s*\#([^\r]*)$/) {
140 $this = $1 if $1 > 0;
141 print "${ml}ok $this/$max" if $ml;
145 # an ok or not ok not matching the 3 cases above...
146 # just ignore it for compatibility with TEST
150 # print "Test output counter mismatch [test $this]\n";
151 # no need to warn probably
152 push @failed, $next..$this-1;
153 } elsif ($this < $next) {
154 #we have seen more "ok" lines than the number suggests
155 print "Confused test output: test $this answered after test ", $next-1, "\n";
159 } elsif (/^Bail out!\s*(.*)/i) { # magic words
160 die "FAILED--Further testing stopped" . ($1 ? ": $1\n" : ".\n");
163 close($fh); # must close to reap child resource values
164 my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
166 $estatus = ($^O eq 'VMS'
167 ? eval 'use vmsish "status"; $estatus = $?'
170 my ($failed, $canon, $percent) = ('??', '??');
171 printf "${ml}dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
173 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
174 if (corestatus($wstatus)) { # until we have a wait module
175 if ($have_devel_corestack) {
176 Devel::CoreStack::stack($^X);
178 print "\ttest program seems to have generated a core\n";
183 if ($next == $max + 1 and not @failed) {
184 print "\tafter all the subtests completed successfully\n";
186 $failed = 0; # But we do not set $canon!
188 push @failed, $next..$max;
190 (my $txt, $canon) = canonfailed($max,$skipped,@failed);
191 $percent = 100*(scalar @failed)/$max;
195 $failedtests{$test} = { canon => $canon, max => $max || '??',
197 name => $test, percent => $percent,
198 estat => $estatus, wstat => $wstatus,
200 } elsif ($ok == $max && $next == $max+1) {
201 if ($max and $skipped + $bonus) {
203 push(@msg, "$skipped/$max skipped: $skip_reason")
205 push(@msg, "$bonus/$max unexpectedly succeeded")
207 print "${ml}ok, ".join(', ', @msg)."\n";
210 } elsif (defined $skip_reason) {
211 print "skipped: $skip_reason\n";
214 print "skipped test on this platform\n";
220 push @failed, $next..$max;
223 my ($txt, $canon) = canonfailed($max,$skipped,@failed);
225 $failedtests{$test} = { canon => $canon, max => $max,
226 failed => scalar @failed,
227 name => $test, percent => 100*(scalar @failed)/$max,
228 estat => '', wstat => '',
231 print "Don't know which tests failed: got $ok ok, expected $max\n";
232 $failedtests{$test} = { canon => '??', max => $max,
234 name => $test, percent => undef,
235 estat => '', wstat => '',
239 } elsif ($next == 0) {
240 print "FAILED before any test output arrived\n";
242 $failedtests{$test} = { canon => '??', max => '??',
244 name => $test, percent => undef,
245 estat => '', wstat => '',
248 $subtests_skipped += $skipped;
249 if (defined $files_in_dir) {
250 my @new_dir_files = globdir $files_in_dir;
251 if (@new_dir_files != @dir_files) {
253 @f{@new_dir_files} = (1) x @new_dir_files;
254 delete @f{@dir_files};
255 my @f = sort keys %f;
256 print "LEAKED FILES: @f\n";
257 @dir_files = @new_dir_files;
261 my $t_total = timediff(new Benchmark, $t_start);
264 if (defined $old5lib) {
265 $ENV{PERL5LIB} = $old5lib;
267 delete $ENV{PERL5LIB};
271 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
272 " UNEXPECTEDLY SUCCEEDED)")
274 if ($tests_skipped) {
275 $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
276 if ($subtests_skipped) {
277 $bonusmsg .= " and $subtests_skipped subtest"
278 . ($subtests_skipped != 1 ? 's' : '');
280 $bonusmsg .= ' skipped';
282 elsif ($subtests_skipped) {
283 $bonusmsg .= ", $subtests_skipped subtest"
284 . ($subtests_skipped != 1 ? 's' : '')
287 if ($bad == 0 && $totmax) {
288 print "All tests successful$bonusmsg.\n";
290 die "FAILED--no tests were run for some reason.\n";
291 } elsif ($totmax==0) {
292 my $blurb = $total==1 ? "script" : "scripts";
293 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
295 $pct = sprintf("%.2f", $good / $total * 100);
296 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
297 $totmax - $totok, $totmax, 100*$totok/$totmax;
299 # First, figure out max length of test names
300 my $failed_str = "Failed Test";
301 my $middle_str = " Status Wstat Total Fail Failed ";
302 my $list_str = "List of Failed";
303 my $max_namelen = length($failed_str);
305 foreach $script (keys %failedtests) {
307 (length $failedtests{$script}->{name} > $max_namelen) ?
308 length $failedtests{$script}->{name} : $max_namelen;
310 my $list_len = $columns - length($middle_str) - $max_namelen;
311 if ($list_len < length($list_str)) {
312 $list_len = length($list_str);
313 $max_namelen = $columns - length($middle_str) - $list_len;
314 if ($max_namelen < length($failed_str)) {
315 $max_namelen = length($failed_str);
316 $columns = $max_namelen + length($middle_str) + $list_len;
320 my $fmt_top = "format STDOUT_TOP =\n"
321 . sprintf("%-${max_namelen}s", $failed_str)
326 my $fmt = "format STDOUT =\n"
327 . "@" . "<" x ($max_namelen - 1)
328 . " @>> @>>>> @>>>> @>>> ^##.##% "
329 . "^" . "<" x ($list_len - 1) . "\n"
330 . '{ $curtest->{name}, $curtest->{estat},'
331 . ' $curtest->{wstat}, $curtest->{max},'
332 . ' $curtest->{failed}, $curtest->{percent},'
333 . ' $curtest->{canon}'
335 . "~~" . " " x ($columns - $list_len - 2) . "^"
336 . "<" x ($list_len - 1) . "\n"
337 . '$curtest->{canon}'
345 # Now write to formats
346 for $script (sort keys %failedtests) {
347 $curtest = $failedtests{$script};
351 $bonusmsg =~ s/^,\s*//;
352 print "$bonusmsg.\n" if $bonusmsg;
353 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
356 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
358 return ($bad == 0 && $totmax) ;
361 my $tried_devel_corestack;
365 eval {require 'wait.ph'};
366 my $ret = defined &WCOREDUMP ? WCOREDUMP($st) : $st & 0200;
368 eval { require Devel::CoreStack; $have_devel_corestack++ }
369 unless $tried_devel_corestack++;
374 sub canonfailed ($@) {
375 my($max,$skipped,@failed) = @_;
377 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
378 my $failed = @failed;
382 my $last = $min = shift @failed;
385 for (@failed, $failed[-1]) { # don't forget the last one
386 if ($_ > $last+1 || $_ == $last) {
390 push @canon, "$min-$last";
397 push @result, "FAILED tests @canon\n";
400 push @result, "FAILED test $last\n";
404 push @result, "\tFailed $failed/$max tests, ";
405 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
406 my $ender = 's' x ($skipped > 1);
407 my $good = $max - $failed - $skipped;
408 my $goodper = sprintf("%.2f",100*($good/$max));
409 push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
411 my $txt = join "", @result;
420 Test::Harness - run perl standard test scripts with statistics
430 (By using the L<Test> module, you can write test scripts without
431 knowing the exact output this module expects. However, if you need to
432 know the specifics, read on!)
434 Perl test scripts print to standard output C<"ok N"> for each single
435 test, where C<N> is an increasing sequence of integers. The first line
436 output by a standard test script is C<"1..M"> with C<M> being the
437 number of tests that should be run within the test
438 script. Test::Harness::runtests(@tests) runs all the testscripts
439 named as arguments and checks standard output for the expected
442 After all tests have been performed, runtests() prints some
443 performance statistics that are computed by the Benchmark module.
445 =head2 The test script output
447 Any output from the testscript to standard error is ignored and
448 bypassed, thus will be seen by the user. Lines written to standard
449 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
450 runtests(). All other lines are discarded.
452 It is tolerated if the test numbers after C<ok> are omitted. In this
453 case Test::Harness maintains temporarily its own counter until the
454 script supplies test numbers again. So the following test script
468 Failed 3/6 tests, 50.00% okay
470 The global variable $Test::Harness::verbose is exportable and can be
471 used to let runtests() display the standard output of the script
472 without altering the behavior otherwise.
474 The global variable $Test::Harness::switches is exportable and can be
475 used to set perl command line options used for running the test
476 script(s). The default value is C<-w>.
478 If the standard output line contains substring C< # Skip> (with
479 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
480 counted as a skipped test. In no other circumstance is anything
481 allowed to follow C<ok> or C<ok NUMBER>. If the whole testscript
482 succeeds, the count of skipped tests is included in the generated
485 C<Test::Harness> reports the text after C< # Skip\S*\s+> as a reason
486 for skipping. Similarly, one can include a similar explanation in a
487 C<1..0> line emitted if the test is skipped completely:
489 1..0 # Skipped: no leverage found
491 As an emergency measure, a test script can decide that further tests
492 are useless (e.g. missing dependencies) and testing should stop
493 immediately. In that case the test script prints the magic words
497 to standard output. Any message after these words will be displayed by
498 C<Test::Harness> as the reason why testing is stopped.
502 C<&runtests> is exported by Test::Harness per default.
508 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
510 If all tests are successful some statistics about the performance are
513 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
515 For any single script that has failing subtests statistics like the
518 =item C<Test returned status %d (wstat %d)>
520 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
521 printed in a message similar to the above.
523 =item C<Failed 1 test, %.2f%% okay. %s>
525 =item C<Failed %d/%d tests, %.2f%% okay. %s>
527 If not all tests were successful, the script dies with one of the
530 =item C<FAILED--Further testing stopped%s>
532 If a single subtest decides that further testing will not make sense,
533 the script dies with this message.
539 Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
542 Setting C<HARNESS_NOTTY> to a true value forces it to behave as though
543 STDOUT were not a console. You may need to set this if you don't want
544 harness to output more frequent progress messages using carriage returns.
545 Some consoles may not handle carriage returns properly (which results
546 in a somewhat messy output).
548 Setting C<HARNESS_COMPILE_TEST> to a true value will make harness attempt
549 to compile the test using C<perlcc> before running it.
551 If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
552 will check after each test whether new files appeared in that directory,
555 LEAKED FILES: scr.tmp 0 my.db
557 If relative, directory name is with respect to the current directory at
558 the moment runtests() was called. Putting absolute path into
559 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
561 The value of C<HARNESS_PERL_SWITCHES> will be prepended to the
562 switches used to invoke perl on each test. For example, setting
563 C<HARNESS_PERL_SWITCHES> to "-W" will run all tests with all
566 If C<HARNESS_COLUMNS> is set, then this value will be used for the
567 width of the terminal. If it is not set then it will default to
568 C<COLUMNS>. If this is not set, it will default to 80. Note that users
569 of Bourne-sh based shells will need to C<export COLUMNS> for this
570 module to use that variable.
572 Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
573 This allows the tests to determine if they are being executed through the
574 harness or by any other means.
578 L<Test> for writing test scripts and also L<Benchmark> for the
579 underlying timing routines.
583 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
584 sure is, that it was inspired by Larry Wall's TEST script that came
585 with perl distributions for ages. Numerous anonymous contributors
586 exist. Current maintainer is Andreas Koenig.
590 Test::Harness uses $^X to determine the perl binary to run the tests
591 with. Test scripts running via the shebang (C<#!>) line may not be
592 portable because $^X is not consistent for shebang scripts across
593 platforms. This is no problem when Test::Harness is run with an
594 absolute path to the perl binary or when $^X can be found in the path.