10 use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
11 @ISA @EXPORT @EXPORT_OK);
12 $have_devel_corestack = 0;
16 $ENV{HARNESS_ACTIVE} = 1;
18 # Some experimental versions of OS/2 build have broken $?
19 my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
21 my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
23 my $tests_skipped = 0;
24 my $subtests_skipped = 0;
27 @EXPORT= qw(&runtests);
28 @EXPORT_OK= qw($verbose $switches);
31 Failed Test Status Wstat Total Fail Failed List of failed
32 -------------------------------------------------------------------------------
36 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
45 ~~ ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
53 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
58 my($test,$te,$ok,$next,$max,$pct,$totok,$totbonus,@failed,%failedtests);
65 # pass -I flags to children
66 my $old5lib = $ENV{PERL5LIB};
67 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC);
69 if ($^O eq 'VMS') { $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g }
71 my @dir_files = globdir $files_in_dir if defined $files_in_dir;
72 my $t_start = new Benchmark;
73 while ($test = shift(@tests)) {
76 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./; }
77 my $leader = "$te" . '.' x (20 - length($te));
79 $ml = "\r$leader" if -t STDOUT and not $ENV{HARNESS_NOTTY};
81 my $fh = new FileHandle;
82 $fh->open($test) or print "can't open $test. $!\n";
85 $s .= q[ "-T"] if $first =~ /^#!.*\bperl.*-\w*T/;
86 $fh->close or print "can't close $test. $!\n";
87 my $cmd = ($ENV{'COMPILE_TEST'})?
88 "./perl -I../lib ../utils/perlcc $test -run 2>> ./compilelog |"
90 $cmd = "MCR $cmd" if $^O eq 'VMS';
91 $fh->open($cmd) or print "can't run $test. $!\n";
92 $ok = $next = $max = 0;
102 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
104 for (split(/\s+/, $2)) { $todo{$_} = 1; }
108 } elsif (/^1\.\.([0-9]+)/) {
113 } elsif ($max && /^(not\s+)?ok\b/) {
115 if (/^not ok\s*(\d*)/){
116 $this = $1 if $1 > 0;
117 print "${ml}NOK $this \n" if $ml;
124 } elsif (/^ok\s*(\d*)(\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?/) {
125 $this = $1 if $1 > 0;
126 print "${ml}ok $this " if $ml;
129 $skipped++ if defined $2;
131 $reason = 'unknown reason' if defined $2;
132 $reason = $3 if defined $3;
133 if (defined $reason and defined $skip_reason) {
134 # print "was: '$skip_reason' new '$reason'\n";
135 $skip_reason = 'various reasons'
136 if $skip_reason ne $reason;
137 } elsif (defined $reason) {
138 $skip_reason = $reason;
140 $bonus++, $totbonus++ if $todo{$this};
143 # warn "Test output counter mismatch [test $this]\n";
144 # no need to warn probably
145 push @failed, $next..$this-1;
146 } elsif ($this < $next) {
147 #we have seen more "ok" lines than the number suggests
148 warn "Confused test output: test $this answered after test ", $next-1, "\n";
154 $fh->close; # must close to reap child resource values
155 my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
157 $estatus = ($^O eq 'VMS'
158 ? eval 'use vmsish "status"; $estatus = $?'
161 my ($failed, $canon, $percent) = ('??', '??');
162 printf "${ml}dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
164 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
165 if (corestatus($wstatus)) { # until we have a wait module
166 if ($have_devel_corestack) {
167 Devel::CoreStack::stack($^X);
169 print "\ttest program seems to have generated a core\n";
174 if ($next == $max + 1 and not @failed) {
175 print "\tafter all the subtests completed successfully\n";
177 $failed = 0; # But we do not set $canon!
179 push @failed, $next..$max;
181 (my $txt, $canon) = canonfailed($max,$skipped,@failed);
182 $percent = 100*(scalar @failed)/$max;
186 $failedtests{$test} = { canon => $canon, max => $max || '??',
188 name => $test, percent => $percent,
189 estat => $estatus, wstat => $wstatus,
191 } elsif ($ok == $max && $next == $max+1) {
192 if ($max and $skipped + $bonus) {
194 push(@msg, "$skipped/$max subtest".($skipped>1?'s':'')." skipped: $skip_reason")
196 push(@msg, "$bonus subtest".($bonus>1?'s':'').
197 " unexpectedly succeeded")
199 print "${ml}ok, ".join(', ', @msg)." \n";
203 print "skipping test on this platform\n";
209 push @failed, $next..$max;
212 my ($txt, $canon) = canonfailed($max,$skipped,@failed);
214 $failedtests{$test} = { canon => $canon, max => $max,
215 failed => scalar @failed,
216 name => $test, percent => 100*(scalar @failed)/$max,
217 estat => '', wstat => '',
220 print "Don't know which tests failed: got $ok ok, expected $max\n";
221 $failedtests{$test} = { canon => '??', max => $max,
223 name => $test, percent => undef,
224 estat => '', wstat => '',
228 } elsif ($next == 0) {
229 print "FAILED before any test output arrived\n";
231 $failedtests{$test} = { canon => '??', max => '??',
233 name => $test, percent => undef,
234 estat => '', wstat => '',
237 $subtests_skipped += $skipped;
238 if (defined $files_in_dir) {
239 my @new_dir_files = globdir $files_in_dir;
240 if (@new_dir_files != @dir_files) {
242 @f{@new_dir_files} = (1) x @new_dir_files;
243 delete @f{@dir_files};
244 my @f = sort keys %f;
245 print "LEAKED FILES: @f\n";
246 @dir_files = @new_dir_files;
250 my $t_total = timediff(new Benchmark, $t_start);
253 if (defined $old5lib) {
254 $ENV{PERL5LIB} = $old5lib;
256 delete $ENV{PERL5LIB};
260 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
261 " UNEXPECTEDLY SUCCEEDED)")
263 if ($tests_skipped) {
264 $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
265 if ($subtests_skipped) {
266 $bonusmsg .= " and $subtests_skipped subtest"
267 . ($subtests_skipped != 1 ? 's' : '');
269 $bonusmsg .= ' skipped';
271 elsif ($subtests_skipped) {
272 $bonusmsg .= ", $subtests_skipped subtest"
273 . ($subtests_skipped != 1 ? 's' : '')
276 if ($bad == 0 && $totmax) {
277 print "All tests successful$bonusmsg.\n";
279 die "FAILED--no tests were run for some reason.\n";
280 } elsif ($totmax==0) {
281 my $blurb = $total==1 ? "script" : "scripts";
282 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
284 $pct = sprintf("%.2f", $good / $total * 100);
285 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
286 $totmax - $totok, $totmax, 100*$totok/$totmax;
288 for $script (sort keys %failedtests) {
289 $curtest = $failedtests{$script};
293 $bonusmsg =~ s/^,\s*//;
294 print "$bonusmsg.\n" if $bonusmsg;
295 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
298 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
300 return ($bad == 0 && $totmax) ;
303 my $tried_devel_corestack;
308 eval {require 'wait.ph'};
311 $ret = ($st & 0200); # Tim says, this is for 90%
314 $ret = WCOREDUMP($st);
317 eval { require Devel::CoreStack; $have_devel_corestack++ }
318 unless $tried_devel_corestack++;
323 sub canonfailed ($@) {
324 my($max,$skipped,@failed) = @_;
326 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
327 my $failed = @failed;
331 my $last = $min = shift @failed;
334 for (@failed, $failed[-1]) { # don't forget the last one
335 if ($_ > $last+1 || $_ == $last) {
339 push @canon, "$min-$last";
346 push @result, "FAILED tests @canon\n";
349 push @result, "FAILED test $last\n";
353 push @result, "\tFailed $failed/$max tests, ";
354 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
355 my $ender = 's' x ($skipped > 1);
356 my $good = $max - $failed - $skipped;
357 my $goodper = sprintf("%.2f",100*($good/$max));
358 push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
360 my $txt = join "", @result;
369 Test::Harness - run perl standard test scripts with statistics
379 (By using the L<Test> module, you can write test scripts without
380 knowing the exact output this module expects. However, if you need to
381 know the specifics, read on!)
383 Perl test scripts print to standard output C<"ok N"> for each single
384 test, where C<N> is an increasing sequence of integers. The first line
385 output by a standard test script is C<"1..M"> with C<M> being the
386 number of tests that should be run within the test
387 script. Test::Harness::runtests(@tests) runs all the testscripts
388 named as arguments and checks standard output for the expected
391 After all tests have been performed, runtests() prints some
392 performance statistics that are computed by the Benchmark module.
394 =head2 The test script output
396 Any output from the testscript to standard error is ignored and
397 bypassed, thus will be seen by the user. Lines written to standard
398 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
399 runtests(). All other lines are discarded.
401 It is tolerated if the test numbers after C<ok> are omitted. In this
402 case Test::Harness maintains temporarily its own counter until the
403 script supplies test numbers again. So the following test script
417 Failed 3/6 tests, 50.00% okay
419 The global variable $Test::Harness::verbose is exportable and can be
420 used to let runtests() display the standard output of the script
421 without altering the behavior otherwise.
423 The global variable $Test::Harness::switches is exportable and can be
424 used to set perl command line options used for running the test
425 script(s). The default value is C<-w>.
427 If the standard output line contains substring C< # Skip> (with
428 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
429 counted as a skipped test. If the whole testscript succeeds, the
430 count of skipped tests is included in the generated output.
434 C<&runtests> is exported by Test::Harness per default.
440 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
442 If all tests are successful some statistics about the performance are
445 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
447 For any single script that has failing subtests statistics like the
450 =item C<Test returned status %d (wstat %d)>
452 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
453 printed in a message similar to the above.
455 =item C<Failed 1 test, %.2f%% okay. %s>
457 =item C<Failed %d/%d tests, %.2f%% okay. %s>
459 If not all tests were successful, the script dies with one of the
466 Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
469 Setting C<HARNESS_NOTTY> to a true value forces it to behave as though
470 STDOUT were not a console. You may need to set this if you don't want
471 harness to output more frequent progress messages using carriage returns.
472 Some consoles may not handle carriage returns properly (which results
473 in a somewhat messy output).
475 If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
476 will check after each test whether new files appeared in that directory,
479 LEAKED FILES: scr.tmp 0 my.db
481 If relative, directory name is with respect to the current directory at
482 the moment runtests() was called. Putting absolute path into
483 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
485 Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
486 This allows the tests to determine if they are being executed through the
487 harness or by any other means.
491 L<Test> for writing test scripts and also L<Benchmark> for the
492 underlying timing routines.
496 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
497 sure is, that it was inspired by Larry Wall's TEST script that came
498 with perl distributions for ages. Numerous anonymous contributors
499 exist. Current maintainer is Andreas Koenig.
503 Test::Harness uses $^X to determine the perl binary to run the tests
504 with. Test scripts running via the shebang (C<#!>) line may not be
505 portable because $^X is not consistent for shebang scripts across
506 platforms. This is no problem when Test::Harness is run with an
507 absolute path to the perl binary or when $^X can be found in the path.