10 our($VERSION, $verbose, $switches, $have_devel_corestack, $curtest,
11 @ISA, @EXPORT, @EXPORT_OK);
12 $have_devel_corestack = 0;
16 $ENV{HARNESS_ACTIVE} = 1;
18 # Some experimental versions of OS/2 build have broken $?
19 my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
21 my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
23 my $tests_skipped = 0;
24 my $subtests_skipped = 0;
27 @EXPORT= qw(&runtests);
28 @EXPORT_OK= qw($verbose $switches);
31 Failed Test Status Wstat Total Fail Failed List of failed
32 -------------------------------------------------------------------------------
36 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
45 ~~ ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
53 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
58 my($test,$te,$ok,$next,$max,$pct,$totok,$totbonus,@failed,%failedtests);
65 # pass -I flags to children
66 my $old5lib = $ENV{PERL5LIB};
68 # VMS has a 255-byte limit on the length of %ENV entries, so
69 # toss the ones that involve perl_root, the install location
73 $new5lib = join($Config{path_sep}, grep {!/perl_root/i;} @INC);
74 $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g;
77 $new5lib = join($Config{path_sep}, @INC);
80 local($ENV{'PERL5LIB'}) = $new5lib;
82 my @dir_files = globdir $files_in_dir if defined $files_in_dir;
83 my $t_start = new Benchmark;
84 while ($test = shift(@tests)) {
87 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./; }
88 my $blank = (' ' x 77);
89 my $leader = "$te" . '.' x (20 - length($te));
91 $ml = "\r$blank\r$leader"
92 if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $verbose;
94 my $fh = new FileHandle;
95 $fh->open($test) or print "can't open $test. $!\n";
98 $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
99 if $first =~ /^#!.*\bperl.*-\w*T/;
100 $fh->close or print "can't close $test. $!\n";
101 my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
102 ? "./perl -I../lib ../utils/perlcc $test "
103 . "-run 2>> ./compilelog |"
105 $cmd = "MCR $cmd" if $^O eq 'VMS';
106 $fh->open($cmd) or print "can't run $test. $!\n";
107 $ok = $next = $max = 0;
117 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
119 for (split(/\s+/, $2)) { $todo{$_} = 1; }
123 } elsif (/^1\.\.([0-9]+)(\s*\#\s*[Ss]kip\S*(?>\s+)(.+))?/) {
128 $skip_reason = $3 if not $max and defined $3;
129 } elsif ($max && /^(not\s+)?ok\b/) {
131 if (/^not ok\s*(\d*)/){
132 $this = $1 if $1 > 0;
133 print "${ml}NOK $this" if $ml;
140 } elsif (/^ok\s*(\d*)(\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?/) {
141 $this = $1 if $1 > 0;
142 print "${ml}ok $this/$max" if $ml;
145 $skipped++ if defined $2;
147 $reason = 'unknown reason' if defined $2;
148 $reason = $3 if defined $3;
149 if (defined $reason and defined $skip_reason) {
150 # print "was: '$skip_reason' new '$reason'\n";
151 $skip_reason = 'various reasons'
152 if $skip_reason ne $reason;
153 } elsif (defined $reason) {
154 $skip_reason = $reason;
156 $bonus++, $totbonus++ if $todo{$this};
159 # warn "Test output counter mismatch [test $this]\n";
160 # no need to warn probably
161 push @failed, $next..$this-1;
162 } elsif ($this < $next) {
163 #we have seen more "ok" lines than the number suggests
164 warn "Confused test output: test $this answered after test ", $next-1, "\n";
170 $fh->close; # must close to reap child resource values
171 my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
173 $estatus = ($^O eq 'VMS'
174 ? eval 'use vmsish "status"; $estatus = $?'
177 my ($failed, $canon, $percent) = ('??', '??');
178 printf "${ml}dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
180 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
181 if (corestatus($wstatus)) { # until we have a wait module
182 if ($have_devel_corestack) {
183 Devel::CoreStack::stack($^X);
185 print "\ttest program seems to have generated a core\n";
190 if ($next == $max + 1 and not @failed) {
191 print "\tafter all the subtests completed successfully\n";
193 $failed = 0; # But we do not set $canon!
195 push @failed, $next..$max;
197 (my $txt, $canon) = canonfailed($max,$skipped,@failed);
198 $percent = 100*(scalar @failed)/$max;
202 $failedtests{$test} = { canon => $canon, max => $max || '??',
204 name => $test, percent => $percent,
205 estat => $estatus, wstat => $wstatus,
207 } elsif ($ok == $max && $next == $max+1) {
208 if ($max and $skipped + $bonus) {
210 push(@msg, "$skipped/$max skipped: $skip_reason")
212 push(@msg, "$bonus/$max unexpectedly succeeded")
214 print "${ml}ok, ".join(', ', @msg)."\n";
217 } elsif (defined $skip_reason) {
218 print "skipped: $skip_reason\n";
221 print "skipped test on this platform\n";
227 push @failed, $next..$max;
230 my ($txt, $canon) = canonfailed($max,$skipped,@failed);
232 $failedtests{$test} = { canon => $canon, max => $max,
233 failed => scalar @failed,
234 name => $test, percent => 100*(scalar @failed)/$max,
235 estat => '', wstat => '',
238 print "Don't know which tests failed: got $ok ok, expected $max\n";
239 $failedtests{$test} = { canon => '??', max => $max,
241 name => $test, percent => undef,
242 estat => '', wstat => '',
246 } elsif ($next == 0) {
247 print "FAILED before any test output arrived\n";
249 $failedtests{$test} = { canon => '??', max => '??',
251 name => $test, percent => undef,
252 estat => '', wstat => '',
255 $subtests_skipped += $skipped;
256 if (defined $files_in_dir) {
257 my @new_dir_files = globdir $files_in_dir;
258 if (@new_dir_files != @dir_files) {
260 @f{@new_dir_files} = (1) x @new_dir_files;
261 delete @f{@dir_files};
262 my @f = sort keys %f;
263 print "LEAKED FILES: @f\n";
264 @dir_files = @new_dir_files;
268 my $t_total = timediff(new Benchmark, $t_start);
271 if (defined $old5lib) {
272 $ENV{PERL5LIB} = $old5lib;
274 delete $ENV{PERL5LIB};
278 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
279 " UNEXPECTEDLY SUCCEEDED)")
281 if ($tests_skipped) {
282 $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
283 if ($subtests_skipped) {
284 $bonusmsg .= " and $subtests_skipped subtest"
285 . ($subtests_skipped != 1 ? 's' : '');
287 $bonusmsg .= ' skipped';
289 elsif ($subtests_skipped) {
290 $bonusmsg .= ", $subtests_skipped subtest"
291 . ($subtests_skipped != 1 ? 's' : '')
294 if ($bad == 0 && $totmax) {
295 print "All tests successful$bonusmsg.\n";
297 die "FAILED--no tests were run for some reason.\n";
298 } elsif ($totmax==0) {
299 my $blurb = $total==1 ? "script" : "scripts";
300 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
302 $pct = sprintf("%.2f", $good / $total * 100);
303 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
304 $totmax - $totok, $totmax, 100*$totok/$totmax;
306 for $script (sort keys %failedtests) {
307 $curtest = $failedtests{$script};
311 $bonusmsg =~ s/^,\s*//;
312 print "$bonusmsg.\n" if $bonusmsg;
313 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
316 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
318 return ($bad == 0 && $totmax) ;
321 my $tried_devel_corestack;
326 eval {require 'wait.ph'};
329 $ret = ($st & 0200); # Tim says, this is for 90%
332 $ret = WCOREDUMP($st);
335 eval { require Devel::CoreStack; $have_devel_corestack++ }
336 unless $tried_devel_corestack++;
341 sub canonfailed ($@) {
342 my($max,$skipped,@failed) = @_;
344 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
345 my $failed = @failed;
349 my $last = $min = shift @failed;
352 for (@failed, $failed[-1]) { # don't forget the last one
353 if ($_ > $last+1 || $_ == $last) {
357 push @canon, "$min-$last";
364 push @result, "FAILED tests @canon\n";
367 push @result, "FAILED test $last\n";
371 push @result, "\tFailed $failed/$max tests, ";
372 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
373 my $ender = 's' x ($skipped > 1);
374 my $good = $max - $failed - $skipped;
375 my $goodper = sprintf("%.2f",100*($good/$max));
376 push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
378 my $txt = join "", @result;
387 Test::Harness - run perl standard test scripts with statistics
397 (By using the L<Test> module, you can write test scripts without
398 knowing the exact output this module expects. However, if you need to
399 know the specifics, read on!)
401 Perl test scripts print to standard output C<"ok N"> for each single
402 test, where C<N> is an increasing sequence of integers. The first line
403 output by a standard test script is C<"1..M"> with C<M> being the
404 number of tests that should be run within the test
405 script. Test::Harness::runtests(@tests) runs all the testscripts
406 named as arguments and checks standard output for the expected
409 After all tests have been performed, runtests() prints some
410 performance statistics that are computed by the Benchmark module.
412 =head2 The test script output
414 Any output from the testscript to standard error is ignored and
415 bypassed, thus will be seen by the user. Lines written to standard
416 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
417 runtests(). All other lines are discarded.
419 It is tolerated if the test numbers after C<ok> are omitted. In this
420 case Test::Harness maintains temporarily its own counter until the
421 script supplies test numbers again. So the following test script
435 Failed 3/6 tests, 50.00% okay
437 The global variable $Test::Harness::verbose is exportable and can be
438 used to let runtests() display the standard output of the script
439 without altering the behavior otherwise.
441 The global variable $Test::Harness::switches is exportable and can be
442 used to set perl command line options used for running the test
443 script(s). The default value is C<-w>.
445 If the standard output line contains substring C< # Skip> (with
446 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
447 counted as a skipped test. If the whole testscript succeeds, the
448 count of skipped tests is included in the generated output.
450 C<Test::Harness> reports the text after C< # Skip(whatever)> as a
451 reason for skipping. Similarly, one can include a similar explanation
452 in a C<1..0> line emitted if the test is skipped completely:
454 1..0 # Skipped: no leverage found
458 C<&runtests> is exported by Test::Harness per default.
464 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
466 If all tests are successful some statistics about the performance are
469 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
471 For any single script that has failing subtests statistics like the
474 =item C<Test returned status %d (wstat %d)>
476 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
477 printed in a message similar to the above.
479 =item C<Failed 1 test, %.2f%% okay. %s>
481 =item C<Failed %d/%d tests, %.2f%% okay. %s>
483 If not all tests were successful, the script dies with one of the
490 Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
493 Setting C<HARNESS_NOTTY> to a true value forces it to behave as though
494 STDOUT were not a console. You may need to set this if you don't want
495 harness to output more frequent progress messages using carriage returns.
496 Some consoles may not handle carriage returns properly (which results
497 in a somewhat messy output).
499 Setting C<HARNESS_COMPILE_TEST> to a true value will make harness attempt
500 to compile the test using C<perlcc> before running it.
502 If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
503 will check after each test whether new files appeared in that directory,
506 LEAKED FILES: scr.tmp 0 my.db
508 If relative, directory name is with respect to the current directory at
509 the moment runtests() was called. Putting absolute path into
510 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
512 Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
513 This allows the tests to determine if they are being executed through the
514 harness or by any other means.
518 L<Test> for writing test scripts and also L<Benchmark> for the
519 underlying timing routines.
523 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
524 sure is, that it was inspired by Larry Wall's TEST script that came
525 with perl distributions for ages. Numerous anonymous contributors
526 exist. Current maintainer is Andreas Koenig.
530 Test::Harness uses $^X to determine the perl binary to run the tests
531 with. Test scripts running via the shebang (C<#!>) line may not be
532 portable because $^X is not consistent for shebang scripts across
533 platforms. This is no problem when Test::Harness is run with an
534 absolute path to the perl binary or when $^X can be found in the path.