10 use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
11 @ISA @EXPORT @EXPORT_OK);
12 $have_devel_corestack = 0;
16 $ENV{HARNESS_ACTIVE} = 1;
18 # Some experimental versions of OS/2 build have broken $?
19 my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
21 my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
23 my $tests_skipped = 0;
24 my $subtests_skipped = 0;
27 @EXPORT= qw(&runtests);
28 @EXPORT_OK= qw($verbose $switches);
31 Failed Test Status Wstat Total Fail Failed List of failed
32 -------------------------------------------------------------------------------
36 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
45 ~~ ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
53 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
58 my($test,$te,$ok,$next,$max,$pct,$totok,$totbonus,@failed,%failedtests);
65 # pass -I flags to children
66 my $old5lib = $ENV{PERL5LIB};
67 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC);
69 if ($^O eq 'VMS') { $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g }
71 my @dir_files = globdir $files_in_dir if defined $files_in_dir;
72 my $t_start = new Benchmark;
73 while ($test = shift(@tests)) {
76 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./; }
77 my $blank = (' ' x 77);
78 my $leader = "$te" . '.' x (20 - length($te));
80 $ml = "\r$blank\r$leader"
81 if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $verbose;
83 my $fh = new FileHandle;
84 $fh->open($test) or print "can't open $test. $!\n";
87 $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
88 if $first =~ /^#!.*\bperl.*-\w*T/;
89 $fh->close or print "can't close $test. $!\n";
90 my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
91 ? "./perl -I../lib ../utils/perlcc $test "
92 . "-run 2>> ./compilelog |"
94 $cmd = "MCR $cmd" if $^O eq 'VMS';
95 $fh->open($cmd) or print "can't run $test. $!\n";
96 $ok = $next = $max = 0;
106 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
108 for (split(/\s+/, $2)) { $todo{$_} = 1; }
112 } elsif (/^1\.\.([0-9]+)(\s*\#\s*[Ss]kip\S*(?>\s+)(.+))?/) {
117 $skip_reason = $3 if not $max and defined $3;
118 } elsif ($max && /^(not\s+)?ok\b/) {
120 if (/^not ok\s*(\d*)/){
121 $this = $1 if $1 > 0;
122 print "${ml}NOK $this" if $ml;
129 } elsif (/^ok\s*(\d*)(\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?/) {
130 $this = $1 if $1 > 0;
131 print "${ml}ok $this/$max" if $ml;
134 $skipped++ if defined $2;
136 $reason = 'unknown reason' if defined $2;
137 $reason = $3 if defined $3;
138 if (defined $reason and defined $skip_reason) {
139 # print "was: '$skip_reason' new '$reason'\n";
140 $skip_reason = 'various reasons'
141 if $skip_reason ne $reason;
142 } elsif (defined $reason) {
143 $skip_reason = $reason;
145 $bonus++, $totbonus++ if $todo{$this};
148 # warn "Test output counter mismatch [test $this]\n";
149 # no need to warn probably
150 push @failed, $next..$this-1;
151 } elsif ($this < $next) {
152 #we have seen more "ok" lines than the number suggests
153 warn "Confused test output: test $this answered after test ", $next-1, "\n";
159 $fh->close; # must close to reap child resource values
160 my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
162 $estatus = ($^O eq 'VMS'
163 ? eval 'use vmsish "status"; $estatus = $?'
166 my ($failed, $canon, $percent) = ('??', '??');
167 printf "${ml}dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
169 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
170 if (corestatus($wstatus)) { # until we have a wait module
171 if ($have_devel_corestack) {
172 Devel::CoreStack::stack($^X);
174 print "\ttest program seems to have generated a core\n";
179 if ($next == $max + 1 and not @failed) {
180 print "\tafter all the subtests completed successfully\n";
182 $failed = 0; # But we do not set $canon!
184 push @failed, $next..$max;
186 (my $txt, $canon) = canonfailed($max,$skipped,@failed);
187 $percent = 100*(scalar @failed)/$max;
191 $failedtests{$test} = { canon => $canon, max => $max || '??',
193 name => $test, percent => $percent,
194 estat => $estatus, wstat => $wstatus,
196 } elsif ($ok == $max && $next == $max+1) {
197 if ($max and $skipped + $bonus) {
199 push(@msg, "$skipped/$max skipped: $skip_reason")
201 push(@msg, "$bonus/$max unexpectedly succeeded")
203 print "${ml}ok, ".join(', ', @msg)."\n";
206 } elsif (defined $skip_reason) {
207 print "skipped: $skip_reason\n";
210 print "skipped test on this platform\n";
216 push @failed, $next..$max;
219 my ($txt, $canon) = canonfailed($max,$skipped,@failed);
221 $failedtests{$test} = { canon => $canon, max => $max,
222 failed => scalar @failed,
223 name => $test, percent => 100*(scalar @failed)/$max,
224 estat => '', wstat => '',
227 print "Don't know which tests failed: got $ok ok, expected $max\n";
228 $failedtests{$test} = { canon => '??', max => $max,
230 name => $test, percent => undef,
231 estat => '', wstat => '',
235 } elsif ($next == 0) {
236 print "FAILED before any test output arrived\n";
238 $failedtests{$test} = { canon => '??', max => '??',
240 name => $test, percent => undef,
241 estat => '', wstat => '',
244 $subtests_skipped += $skipped;
245 if (defined $files_in_dir) {
246 my @new_dir_files = globdir $files_in_dir;
247 if (@new_dir_files != @dir_files) {
249 @f{@new_dir_files} = (1) x @new_dir_files;
250 delete @f{@dir_files};
251 my @f = sort keys %f;
252 print "LEAKED FILES: @f\n";
253 @dir_files = @new_dir_files;
257 my $t_total = timediff(new Benchmark, $t_start);
260 if (defined $old5lib) {
261 $ENV{PERL5LIB} = $old5lib;
263 delete $ENV{PERL5LIB};
267 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
268 " UNEXPECTEDLY SUCCEEDED)")
270 if ($tests_skipped) {
271 $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
272 if ($subtests_skipped) {
273 $bonusmsg .= " and $subtests_skipped subtest"
274 . ($subtests_skipped != 1 ? 's' : '');
276 $bonusmsg .= ' skipped';
278 elsif ($subtests_skipped) {
279 $bonusmsg .= ", $subtests_skipped subtest"
280 . ($subtests_skipped != 1 ? 's' : '')
283 if ($bad == 0 && $totmax) {
284 print "All tests successful$bonusmsg.\n";
286 die "FAILED--no tests were run for some reason.\n";
287 } elsif ($totmax==0) {
288 my $blurb = $total==1 ? "script" : "scripts";
289 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
291 $pct = sprintf("%.2f", $good / $total * 100);
292 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
293 $totmax - $totok, $totmax, 100*$totok/$totmax;
295 for $script (sort keys %failedtests) {
296 $curtest = $failedtests{$script};
300 $bonusmsg =~ s/^,\s*//;
301 print "$bonusmsg.\n" if $bonusmsg;
302 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
305 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
307 return ($bad == 0 && $totmax) ;
310 my $tried_devel_corestack;
315 eval {require 'wait.ph'};
318 $ret = ($st & 0200); # Tim says, this is for 90%
321 $ret = WCOREDUMP($st);
324 eval { require Devel::CoreStack; $have_devel_corestack++ }
325 unless $tried_devel_corestack++;
330 sub canonfailed ($@) {
331 my($max,$skipped,@failed) = @_;
333 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
334 my $failed = @failed;
338 my $last = $min = shift @failed;
341 for (@failed, $failed[-1]) { # don't forget the last one
342 if ($_ > $last+1 || $_ == $last) {
346 push @canon, "$min-$last";
353 push @result, "FAILED tests @canon\n";
356 push @result, "FAILED test $last\n";
360 push @result, "\tFailed $failed/$max tests, ";
361 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
362 my $ender = 's' x ($skipped > 1);
363 my $good = $max - $failed - $skipped;
364 my $goodper = sprintf("%.2f",100*($good/$max));
365 push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
367 my $txt = join "", @result;
376 Test::Harness - run perl standard test scripts with statistics
386 (By using the L<Test> module, you can write test scripts without
387 knowing the exact output this module expects. However, if you need to
388 know the specifics, read on!)
390 Perl test scripts print to standard output C<"ok N"> for each single
391 test, where C<N> is an increasing sequence of integers. The first line
392 output by a standard test script is C<"1..M"> with C<M> being the
393 number of tests that should be run within the test
394 script. Test::Harness::runtests(@tests) runs all the testscripts
395 named as arguments and checks standard output for the expected
398 After all tests have been performed, runtests() prints some
399 performance statistics that are computed by the Benchmark module.
401 =head2 The test script output
403 Any output from the testscript to standard error is ignored and
404 bypassed, thus will be seen by the user. Lines written to standard
405 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
406 runtests(). All other lines are discarded.
408 It is tolerated if the test numbers after C<ok> are omitted. In this
409 case Test::Harness maintains temporarily its own counter until the
410 script supplies test numbers again. So the following test script
424 Failed 3/6 tests, 50.00% okay
426 The global variable $Test::Harness::verbose is exportable and can be
427 used to let runtests() display the standard output of the script
428 without altering the behavior otherwise.
430 The global variable $Test::Harness::switches is exportable and can be
431 used to set perl command line options used for running the test
432 script(s). The default value is C<-w>.
434 If the standard output line contains substring C< # Skip> (with
435 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
436 counted as a skipped test. If the whole testscript succeeds, the
437 count of skipped tests is included in the generated output.
439 C<Test::Harness> reports the text after C< # Skip(whatever)> as a
440 reason for skipping. Similarly, one can include a similar explanation
441 in a C<1..0> line emitted if the test is skipped completely:
443 1..0 # Skipped: no leverage found
447 C<&runtests> is exported by Test::Harness per default.
453 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
455 If all tests are successful some statistics about the performance are
458 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
460 For any single script that has failing subtests statistics like the
463 =item C<Test returned status %d (wstat %d)>
465 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
466 printed in a message similar to the above.
468 =item C<Failed 1 test, %.2f%% okay. %s>
470 =item C<Failed %d/%d tests, %.2f%% okay. %s>
472 If not all tests were successful, the script dies with one of the
479 Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
482 Setting C<HARNESS_NOTTY> to a true value forces it to behave as though
483 STDOUT were not a console. You may need to set this if you don't want
484 harness to output more frequent progress messages using carriage returns.
485 Some consoles may not handle carriage returns properly (which results
486 in a somewhat messy output).
488 Setting C<HARNESS_COMPILE_TEST> to a true value will make harness attempt
489 to compile the test using C<perlcc> before running it.
491 If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
492 will check after each test whether new files appeared in that directory,
495 LEAKED FILES: scr.tmp 0 my.db
497 If relative, directory name is with respect to the current directory at
498 the moment runtests() was called. Putting absolute path into
499 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
501 Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
502 This allows the tests to determine if they are being executed through the
503 harness or by any other means.
507 L<Test> for writing test scripts and also L<Benchmark> for the
508 underlying timing routines.
512 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
513 sure is, that it was inspired by Larry Wall's TEST script that came
514 with perl distributions for ages. Numerous anonymous contributors
515 exist. Current maintainer is Andreas Koenig.
519 Test::Harness uses $^X to determine the perl binary to run the tests
520 with. Test scripts running via the shebang (C<#!>) line may not be
521 portable because $^X is not consistent for shebang scripts across
522 platforms. This is no problem when Test::Harness is run with an
523 absolute path to the perl binary or when $^X can be found in the path.