10 our($VERSION, $verbose, $switches, $have_devel_corestack, $curtest,
11 @ISA, @EXPORT, @EXPORT_OK);
12 $have_devel_corestack = 0;
16 $ENV{HARNESS_ACTIVE} = 1;
18 # Some experimental versions of OS/2 build have broken $?
19 my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
21 my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
23 my $tests_skipped = 0;
24 my $subtests_skipped = 0;
27 @EXPORT= qw(&runtests);
28 @EXPORT_OK= qw($verbose $switches);
31 Failed Test Status Wstat Total Fail Failed List of failed
32 -------------------------------------------------------------------------------
36 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
45 ~~ ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
53 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
58 my($test,$te,$ok,$next,$max,$pct,$totok,$totbonus,@failed,%failedtests);
65 # pass -I flags to children
66 my $old5lib = $ENV{PERL5LIB};
68 # VMS has a 255-byte limit on the length of %ENV entries, so
69 # toss the ones that involve perl_root, the install location
73 $new5lib = join($Config{path_sep}, grep {!/perl_root/i;} @INC);
74 $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g;
77 $new5lib = join($Config{path_sep}, @INC);
79 local($ENV{'PERL5LIB'}) = $new5lib;
81 my @dir_files = globdir $files_in_dir if defined $files_in_dir;
82 my $t_start = new Benchmark;
83 while ($test = shift(@tests)) {
86 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./s; }
87 my $blank = (' ' x 77);
88 my $leader = "$te" . '.' x (20 - length($te));
90 $ml = "\r$blank\r$leader"
91 if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $verbose;
93 my $fh = new FileHandle;
94 $fh->open($test) or print "can't open $test. $!\n";
97 $s .= " $ENV{'HARNESS_PERL_SWITCHES'}"
98 if exists $ENV{'HARNESS_PERL_SWITCHES'};
99 $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
100 if $first =~ /^#!.*\bperl.*-\w*T/;
101 $fh->close or print "can't close $test. $!\n";
102 my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
103 ? "./perl -I../lib ../utils/perlcc $test "
104 . "-run 2>> ./compilelog |"
106 $cmd = "MCR $cmd" if $^O eq 'VMS';
107 $fh->open($cmd) or print "can't run $test. $!\n";
108 $ok = $next = $max = 0;
118 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
120 for (split(/\s+/, $2)) { $todo{$_} = 1; }
124 } elsif (/^1\.\.([0-9]+)(\s*\#\s*[Ss]kip\S*(?>\s+)(.+))?/) {
129 $skip_reason = $3 if not $max and defined $3;
130 } elsif ($max && /^(not\s+)?ok\b/) {
132 if (/^not ok\s*(\d*)/){
133 $this = $1 if $1 > 0;
134 print "${ml}NOK $this" if $ml;
141 } elsif (/^ok\s*(\d*)(\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?/) {
142 $this = $1 if $1 > 0;
143 print "${ml}ok $this/$max" if $ml;
146 $skipped++ if defined $2;
148 $reason = 'unknown reason' if defined $2;
149 $reason = $3 if defined $3;
150 if (defined $reason and defined $skip_reason) {
151 # print "was: '$skip_reason' new '$reason'\n";
152 $skip_reason = 'various reasons'
153 if $skip_reason ne $reason;
154 } elsif (defined $reason) {
155 $skip_reason = $reason;
157 $bonus++, $totbonus++ if $todo{$this};
160 # warn "Test output counter mismatch [test $this]\n";
161 # no need to warn probably
162 push @failed, $next..$this-1;
163 } elsif ($this < $next) {
164 #we have seen more "ok" lines than the number suggests
165 warn "Confused test output: test $this answered after test ", $next-1, "\n";
171 $fh->close; # must close to reap child resource values
172 my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
174 $estatus = ($^O eq 'VMS'
175 ? eval 'use vmsish "status"; $estatus = $?'
178 my ($failed, $canon, $percent) = ('??', '??');
179 printf "${ml}dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
181 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
182 if (corestatus($wstatus)) { # until we have a wait module
183 if ($have_devel_corestack) {
184 Devel::CoreStack::stack($^X);
186 print "\ttest program seems to have generated a core\n";
191 if ($next == $max + 1 and not @failed) {
192 print "\tafter all the subtests completed successfully\n";
194 $failed = 0; # But we do not set $canon!
196 push @failed, $next..$max;
198 (my $txt, $canon) = canonfailed($max,$skipped,@failed);
199 $percent = 100*(scalar @failed)/$max;
203 $failedtests{$test} = { canon => $canon, max => $max || '??',
205 name => $test, percent => $percent,
206 estat => $estatus, wstat => $wstatus,
208 } elsif ($ok == $max && $next == $max+1) {
209 if ($max and $skipped + $bonus) {
211 push(@msg, "$skipped/$max skipped: $skip_reason")
213 push(@msg, "$bonus/$max unexpectedly succeeded")
215 print "${ml}ok, ".join(', ', @msg)."\n";
218 } elsif (defined $skip_reason) {
219 print "skipped: $skip_reason\n";
222 print "skipped test on this platform\n";
228 push @failed, $next..$max;
231 my ($txt, $canon) = canonfailed($max,$skipped,@failed);
233 $failedtests{$test} = { canon => $canon, max => $max,
234 failed => scalar @failed,
235 name => $test, percent => 100*(scalar @failed)/$max,
236 estat => '', wstat => '',
239 print "Don't know which tests failed: got $ok ok, expected $max\n";
240 $failedtests{$test} = { canon => '??', max => $max,
242 name => $test, percent => undef,
243 estat => '', wstat => '',
247 } elsif ($next == 0) {
248 print "FAILED before any test output arrived\n";
250 $failedtests{$test} = { canon => '??', max => '??',
252 name => $test, percent => undef,
253 estat => '', wstat => '',
256 $subtests_skipped += $skipped;
257 if (defined $files_in_dir) {
258 my @new_dir_files = globdir $files_in_dir;
259 if (@new_dir_files != @dir_files) {
261 @f{@new_dir_files} = (1) x @new_dir_files;
262 delete @f{@dir_files};
263 my @f = sort keys %f;
264 print "LEAKED FILES: @f\n";
265 @dir_files = @new_dir_files;
269 my $t_total = timediff(new Benchmark, $t_start);
272 if (defined $old5lib) {
273 $ENV{PERL5LIB} = $old5lib;
275 delete $ENV{PERL5LIB};
279 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
280 " UNEXPECTEDLY SUCCEEDED)")
282 if ($tests_skipped) {
283 $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
284 if ($subtests_skipped) {
285 $bonusmsg .= " and $subtests_skipped subtest"
286 . ($subtests_skipped != 1 ? 's' : '');
288 $bonusmsg .= ' skipped';
290 elsif ($subtests_skipped) {
291 $bonusmsg .= ", $subtests_skipped subtest"
292 . ($subtests_skipped != 1 ? 's' : '')
295 if ($bad == 0 && $totmax) {
296 print "All tests successful$bonusmsg.\n";
298 die "FAILED--no tests were run for some reason.\n";
299 } elsif ($totmax==0) {
300 my $blurb = $total==1 ? "script" : "scripts";
301 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
303 $pct = sprintf("%.2f", $good / $total * 100);
304 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
305 $totmax - $totok, $totmax, 100*$totok/$totmax;
307 for $script (sort keys %failedtests) {
308 $curtest = $failedtests{$script};
312 $bonusmsg =~ s/^,\s*//;
313 print "$bonusmsg.\n" if $bonusmsg;
314 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
317 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
319 return ($bad == 0 && $totmax) ;
322 my $tried_devel_corestack;
327 eval {require 'wait.ph'};
330 $ret = ($st & 0200); # Tim says, this is for 90%
333 $ret = WCOREDUMP($st);
336 eval { require Devel::CoreStack; $have_devel_corestack++ }
337 unless $tried_devel_corestack++;
342 sub canonfailed ($@) {
343 my($max,$skipped,@failed) = @_;
345 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
346 my $failed = @failed;
350 my $last = $min = shift @failed;
353 for (@failed, $failed[-1]) { # don't forget the last one
354 if ($_ > $last+1 || $_ == $last) {
358 push @canon, "$min-$last";
365 push @result, "FAILED tests @canon\n";
368 push @result, "FAILED test $last\n";
372 push @result, "\tFailed $failed/$max tests, ";
373 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
374 my $ender = 's' x ($skipped > 1);
375 my $good = $max - $failed - $skipped;
376 my $goodper = sprintf("%.2f",100*($good/$max));
377 push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
379 my $txt = join "", @result;
388 Test::Harness - run perl standard test scripts with statistics
398 (By using the L<Test> module, you can write test scripts without
399 knowing the exact output this module expects. However, if you need to
400 know the specifics, read on!)
402 Perl test scripts print to standard output C<"ok N"> for each single
403 test, where C<N> is an increasing sequence of integers. The first line
404 output by a standard test script is C<"1..M"> with C<M> being the
405 number of tests that should be run within the test
406 script. Test::Harness::runtests(@tests) runs all the testscripts
407 named as arguments and checks standard output for the expected
410 After all tests have been performed, runtests() prints some
411 performance statistics that are computed by the Benchmark module.
413 =head2 The test script output
415 Any output from the testscript to standard error is ignored and
416 bypassed, thus will be seen by the user. Lines written to standard
417 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
418 runtests(). All other lines are discarded.
420 It is tolerated if the test numbers after C<ok> are omitted. In this
421 case Test::Harness maintains temporarily its own counter until the
422 script supplies test numbers again. So the following test script
436 Failed 3/6 tests, 50.00% okay
438 The global variable $Test::Harness::verbose is exportable and can be
439 used to let runtests() display the standard output of the script
440 without altering the behavior otherwise.
442 The global variable $Test::Harness::switches is exportable and can be
443 used to set perl command line options used for running the test
444 script(s). The default value is C<-w>.
446 If the standard output line contains substring C< # Skip> (with
447 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
448 counted as a skipped test. If the whole testscript succeeds, the
449 count of skipped tests is included in the generated output.
451 C<Test::Harness> reports the text after C< # Skip(whatever)> as a
452 reason for skipping. Similarly, one can include a similar explanation
453 in a C<1..0> line emitted if the test is skipped completely:
455 1..0 # Skipped: no leverage found
459 C<&runtests> is exported by Test::Harness per default.
465 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
467 If all tests are successful some statistics about the performance are
470 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
472 For any single script that has failing subtests statistics like the
475 =item C<Test returned status %d (wstat %d)>
477 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
478 printed in a message similar to the above.
480 =item C<Failed 1 test, %.2f%% okay. %s>
482 =item C<Failed %d/%d tests, %.2f%% okay. %s>
484 If not all tests were successful, the script dies with one of the
491 Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
494 Setting C<HARNESS_NOTTY> to a true value forces it to behave as though
495 STDOUT were not a console. You may need to set this if you don't want
496 harness to output more frequent progress messages using carriage returns.
497 Some consoles may not handle carriage returns properly (which results
498 in a somewhat messy output).
500 Setting C<HARNESS_COMPILE_TEST> to a true value will make harness attempt
501 to compile the test using C<perlcc> before running it.
503 If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
504 will check after each test whether new files appeared in that directory,
507 LEAKED FILES: scr.tmp 0 my.db
509 If relative, directory name is with respect to the current directory at
510 the moment runtests() was called. Putting absolute path into
511 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
513 The value of C<HARNESS_PERL_SWITCHES> will be prepended to the
514 switches used to invoke perl on each test. For example, setting
515 C<HARNESS_PERL_SWITCHES> to "-W" will run all tests with all
518 Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
519 This allows the tests to determine if they are being executed through the
520 harness or by any other means.
524 L<Test> for writing test scripts and also L<Benchmark> for the
525 underlying timing routines.
529 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
530 sure is, that it was inspired by Larry Wall's TEST script that came
531 with perl distributions for ages. Numerous anonymous contributors
532 exist. Current maintainer is Andreas Koenig.
536 Test::Harness uses $^X to determine the perl binary to run the tests
537 with. Test scripts running via the shebang (C<#!>) line may not be
538 portable because $^X is not consistent for shebang scripts across
539 platforms. This is no problem when Test::Harness is run with an
540 absolute path to the perl binary or when $^X can be found in the path.