10 our($VERSION, $verbose, $switches, $have_devel_corestack, $curtest,
11 @ISA, @EXPORT, @EXPORT_OK);
12 $have_devel_corestack = 0;
16 $ENV{HARNESS_ACTIVE} = 1;
18 # Some experimental versions of OS/2 build have broken $?
19 my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
21 my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
23 my $tests_skipped = 0;
24 my $subtests_skipped = 0;
27 @EXPORT= qw(&runtests);
28 @EXPORT_OK= qw($verbose $switches);
31 Failed Test Status Wstat Total Fail Failed List of failed
32 -------------------------------------------------------------------------------
36 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
45 ~~ ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
53 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
58 my($test,$te,$ok,$next,$max,$pct,$totok,$totbonus,@failed,%failedtests);
65 # pass -I flags to children
66 my $old5lib = $ENV{PERL5LIB};
68 # VMS has a 255-byte limit on the length of %ENV entries, so
69 # toss the ones that involve perl_root, the install location
73 $new5lib = join($Config{path_sep}, grep {!/perl_root/i;} @INC);
74 $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g;
77 $new5lib = join($Config{path_sep}, @INC);
79 local($ENV{'PERL5LIB'}) = $new5lib;
81 my @dir_files = globdir $files_in_dir if defined $files_in_dir;
82 my $t_start = new Benchmark;
83 while ($test = shift(@tests)) {
86 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./; }
87 my $blank = (' ' x 77);
88 my $leader = "$te" . '.' x (20 - length($te));
90 $ml = "\r$blank\r$leader"
91 if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $verbose;
93 my $fh = new FileHandle;
94 $fh->open($test) or print "can't open $test. $!\n";
97 $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
98 if $first =~ /^#!.*\bperl.*-\w*T/;
99 $fh->close or print "can't close $test. $!\n";
100 my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
101 ? "./perl -I../lib ../utils/perlcc $test "
102 . "-run 2>> ./compilelog |"
104 $cmd = "MCR $cmd" if $^O eq 'VMS';
105 $fh->open($cmd) or print "can't run $test. $!\n";
106 $ok = $next = $max = 0;
116 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
118 for (split(/\s+/, $2)) { $todo{$_} = 1; }
122 } elsif (/^1\.\.([0-9]+)(\s*\#\s*[Ss]kip\S*(?>\s+)(.+))?/) {
127 $skip_reason = $3 if not $max and defined $3;
128 } elsif ($max && /^(not\s+)?ok\b/) {
130 if (/^not ok\s*(\d*)/){
131 $this = $1 if $1 > 0;
132 print "${ml}NOK $this" if $ml;
139 } elsif (/^ok\s*(\d*)(\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?/) {
140 $this = $1 if $1 > 0;
141 print "${ml}ok $this/$max" if $ml;
144 $skipped++ if defined $2;
146 $reason = 'unknown reason' if defined $2;
147 $reason = $3 if defined $3;
148 if (defined $reason and defined $skip_reason) {
149 # print "was: '$skip_reason' new '$reason'\n";
150 $skip_reason = 'various reasons'
151 if $skip_reason ne $reason;
152 } elsif (defined $reason) {
153 $skip_reason = $reason;
155 $bonus++, $totbonus++ if $todo{$this};
158 # warn "Test output counter mismatch [test $this]\n";
159 # no need to warn probably
160 push @failed, $next..$this-1;
161 } elsif ($this < $next) {
162 #we have seen more "ok" lines than the number suggests
163 warn "Confused test output: test $this answered after test ", $next-1, "\n";
169 $fh->close; # must close to reap child resource values
170 my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
172 $estatus = ($^O eq 'VMS'
173 ? eval 'use vmsish "status"; $estatus = $?'
176 my ($failed, $canon, $percent) = ('??', '??');
177 printf "${ml}dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
179 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
180 if (corestatus($wstatus)) { # until we have a wait module
181 if ($have_devel_corestack) {
182 Devel::CoreStack::stack($^X);
184 print "\ttest program seems to have generated a core\n";
189 if ($next == $max + 1 and not @failed) {
190 print "\tafter all the subtests completed successfully\n";
192 $failed = 0; # But we do not set $canon!
194 push @failed, $next..$max;
196 (my $txt, $canon) = canonfailed($max,$skipped,@failed);
197 $percent = 100*(scalar @failed)/$max;
201 $failedtests{$test} = { canon => $canon, max => $max || '??',
203 name => $test, percent => $percent,
204 estat => $estatus, wstat => $wstatus,
206 } elsif ($ok == $max && $next == $max+1) {
207 if ($max and $skipped + $bonus) {
209 push(@msg, "$skipped/$max skipped: $skip_reason")
211 push(@msg, "$bonus/$max unexpectedly succeeded")
213 print "${ml}ok, ".join(', ', @msg)."\n";
216 } elsif (defined $skip_reason) {
217 print "skipped: $skip_reason\n";
220 print "skipped test on this platform\n";
226 push @failed, $next..$max;
229 my ($txt, $canon) = canonfailed($max,$skipped,@failed);
231 $failedtests{$test} = { canon => $canon, max => $max,
232 failed => scalar @failed,
233 name => $test, percent => 100*(scalar @failed)/$max,
234 estat => '', wstat => '',
237 print "Don't know which tests failed: got $ok ok, expected $max\n";
238 $failedtests{$test} = { canon => '??', max => $max,
240 name => $test, percent => undef,
241 estat => '', wstat => '',
245 } elsif ($next == 0) {
246 print "FAILED before any test output arrived\n";
248 $failedtests{$test} = { canon => '??', max => '??',
250 name => $test, percent => undef,
251 estat => '', wstat => '',
254 $subtests_skipped += $skipped;
255 if (defined $files_in_dir) {
256 my @new_dir_files = globdir $files_in_dir;
257 if (@new_dir_files != @dir_files) {
259 @f{@new_dir_files} = (1) x @new_dir_files;
260 delete @f{@dir_files};
261 my @f = sort keys %f;
262 print "LEAKED FILES: @f\n";
263 @dir_files = @new_dir_files;
267 my $t_total = timediff(new Benchmark, $t_start);
270 if (defined $old5lib) {
271 $ENV{PERL5LIB} = $old5lib;
273 delete $ENV{PERL5LIB};
277 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
278 " UNEXPECTEDLY SUCCEEDED)")
280 if ($tests_skipped) {
281 $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
282 if ($subtests_skipped) {
283 $bonusmsg .= " and $subtests_skipped subtest"
284 . ($subtests_skipped != 1 ? 's' : '');
286 $bonusmsg .= ' skipped';
288 elsif ($subtests_skipped) {
289 $bonusmsg .= ", $subtests_skipped subtest"
290 . ($subtests_skipped != 1 ? 's' : '')
293 if ($bad == 0 && $totmax) {
294 print "All tests successful$bonusmsg.\n";
296 die "FAILED--no tests were run for some reason.\n";
297 } elsif ($totmax==0) {
298 my $blurb = $total==1 ? "script" : "scripts";
299 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
301 $pct = sprintf("%.2f", $good / $total * 100);
302 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
303 $totmax - $totok, $totmax, 100*$totok/$totmax;
305 for $script (sort keys %failedtests) {
306 $curtest = $failedtests{$script};
310 $bonusmsg =~ s/^,\s*//;
311 print "$bonusmsg.\n" if $bonusmsg;
312 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
315 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
317 return ($bad == 0 && $totmax) ;
320 my $tried_devel_corestack;
325 eval {require 'wait.ph'};
328 $ret = ($st & 0200); # Tim says, this is for 90%
331 $ret = WCOREDUMP($st);
334 eval { require Devel::CoreStack; $have_devel_corestack++ }
335 unless $tried_devel_corestack++;
340 sub canonfailed ($@) {
341 my($max,$skipped,@failed) = @_;
343 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
344 my $failed = @failed;
348 my $last = $min = shift @failed;
351 for (@failed, $failed[-1]) { # don't forget the last one
352 if ($_ > $last+1 || $_ == $last) {
356 push @canon, "$min-$last";
363 push @result, "FAILED tests @canon\n";
366 push @result, "FAILED test $last\n";
370 push @result, "\tFailed $failed/$max tests, ";
371 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
372 my $ender = 's' x ($skipped > 1);
373 my $good = $max - $failed - $skipped;
374 my $goodper = sprintf("%.2f",100*($good/$max));
375 push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
377 my $txt = join "", @result;
386 Test::Harness - run perl standard test scripts with statistics
396 (By using the L<Test> module, you can write test scripts without
397 knowing the exact output this module expects. However, if you need to
398 know the specifics, read on!)
400 Perl test scripts print to standard output C<"ok N"> for each single
401 test, where C<N> is an increasing sequence of integers. The first line
402 output by a standard test script is C<"1..M"> with C<M> being the
403 number of tests that should be run within the test
404 script. Test::Harness::runtests(@tests) runs all the testscripts
405 named as arguments and checks standard output for the expected
408 After all tests have been performed, runtests() prints some
409 performance statistics that are computed by the Benchmark module.
411 =head2 The test script output
413 Any output from the testscript to standard error is ignored and
414 bypassed, thus will be seen by the user. Lines written to standard
415 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
416 runtests(). All other lines are discarded.
418 It is tolerated if the test numbers after C<ok> are omitted. In this
419 case Test::Harness maintains temporarily its own counter until the
420 script supplies test numbers again. So the following test script
434 Failed 3/6 tests, 50.00% okay
436 The global variable $Test::Harness::verbose is exportable and can be
437 used to let runtests() display the standard output of the script
438 without altering the behavior otherwise.
440 The global variable $Test::Harness::switches is exportable and can be
441 used to set perl command line options used for running the test
442 script(s). The default value is C<-w>.
444 If the standard output line contains substring C< # Skip> (with
445 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
446 counted as a skipped test. If the whole testscript succeeds, the
447 count of skipped tests is included in the generated output.
449 C<Test::Harness> reports the text after C< # Skip(whatever)> as a
450 reason for skipping. Similarly, one can include a similar explanation
451 in a C<1..0> line emitted if the test is skipped completely:
453 1..0 # Skipped: no leverage found
457 C<&runtests> is exported by Test::Harness per default.
463 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
465 If all tests are successful some statistics about the performance are
468 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
470 For any single script that has failing subtests statistics like the
473 =item C<Test returned status %d (wstat %d)>
475 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
476 printed in a message similar to the above.
478 =item C<Failed 1 test, %.2f%% okay. %s>
480 =item C<Failed %d/%d tests, %.2f%% okay. %s>
482 If not all tests were successful, the script dies with one of the
489 Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
492 Setting C<HARNESS_NOTTY> to a true value forces it to behave as though
493 STDOUT were not a console. You may need to set this if you don't want
494 harness to output more frequent progress messages using carriage returns.
495 Some consoles may not handle carriage returns properly (which results
496 in a somewhat messy output).
498 Setting C<HARNESS_COMPILE_TEST> to a true value will make harness attempt
499 to compile the test using C<perlcc> before running it.
501 If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
502 will check after each test whether new files appeared in that directory,
505 LEAKED FILES: scr.tmp 0 my.db
507 If relative, directory name is with respect to the current directory at
508 the moment runtests() was called. Putting absolute path into
509 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
511 Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
512 This allows the tests to determine if they are being executed through the
513 harness or by any other means.
517 L<Test> for writing test scripts and also L<Benchmark> for the
518 underlying timing routines.
522 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
523 sure is, that it was inspired by Larry Wall's TEST script that came
524 with perl distributions for ages. Numerous anonymous contributors
525 exist. Current maintainer is Andreas Koenig.
529 Test::Harness uses $^X to determine the perl binary to run the tests
530 with. Test scripts running via the shebang (C<#!>) line may not be
531 portable because $^X is not consistent for shebang scripts across
532 platforms. This is no problem when Test::Harness is run with an
533 absolute path to the perl binary or when $^X can be found in the path.