10 our($VERSION, $verbose, $switches, $have_devel_corestack, $curtest,
11 @ISA, @EXPORT, @EXPORT_OK);
12 $have_devel_corestack = 0;
16 $ENV{HARNESS_ACTIVE} = 1;
18 # Some experimental versions of OS/2 build have broken $?
19 my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
21 my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
23 my $tests_skipped = 0;
24 my $subtests_skipped = 0;
27 @EXPORT= qw(&runtests);
28 @EXPORT_OK= qw($verbose $switches);
31 Failed Test Status Wstat Total Fail Failed List of failed
32 -------------------------------------------------------------------------------
36 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
45 ~~ ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
53 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
58 my($test,$te,$ok,$next,$max,$pct,$totok,$totbonus,@failed,%failedtests);
65 # pass -I flags to children
66 my $old5lib = $ENV{PERL5LIB};
68 # VMS has a 255-byte limit on the length of %ENV entries, so
69 # toss the ones that involve perl_root, the install location
71 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, grep {!/perl_root/i;} @INC) if $^O eq 'VMS';
72 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC) unless $^O eq 'VMS';
74 if ($^O eq 'VMS') { $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g;}
76 my @dir_files = globdir $files_in_dir if defined $files_in_dir;
77 my $t_start = new Benchmark;
78 while ($test = shift(@tests)) {
81 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./; }
82 my $blank = (' ' x 77);
83 my $leader = "$te" . '.' x (20 - length($te));
85 $ml = "\r$blank\r$leader"
86 if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $verbose;
88 my $fh = new FileHandle;
89 $fh->open($test) or print "can't open $test. $!\n";
92 $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
93 if $first =~ /^#!.*\bperl.*-\w*T/;
94 $fh->close or print "can't close $test. $!\n";
95 my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
96 ? "./perl -I../lib ../utils/perlcc $test "
97 . "-run 2>> ./compilelog |"
99 $cmd = "MCR $cmd" if $^O eq 'VMS';
100 $fh->open($cmd) or print "can't run $test. $!\n";
101 $ok = $next = $max = 0;
111 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
113 for (split(/\s+/, $2)) { $todo{$_} = 1; }
117 } elsif (/^1\.\.([0-9]+)(\s*\#\s*[Ss]kip\S*(?>\s+)(.+))?/) {
122 $skip_reason = $3 if not $max and defined $3;
123 } elsif ($max && /^(not\s+)?ok\b/) {
125 if (/^not ok\s*(\d*)/){
126 $this = $1 if $1 > 0;
127 print "${ml}NOK $this" if $ml;
134 } elsif (/^ok\s*(\d*)(\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?/) {
135 $this = $1 if $1 > 0;
136 print "${ml}ok $this/$max" if $ml;
139 $skipped++ if defined $2;
141 $reason = 'unknown reason' if defined $2;
142 $reason = $3 if defined $3;
143 if (defined $reason and defined $skip_reason) {
144 # print "was: '$skip_reason' new '$reason'\n";
145 $skip_reason = 'various reasons'
146 if $skip_reason ne $reason;
147 } elsif (defined $reason) {
148 $skip_reason = $reason;
150 $bonus++, $totbonus++ if $todo{$this};
153 # warn "Test output counter mismatch [test $this]\n";
154 # no need to warn probably
155 push @failed, $next..$this-1;
156 } elsif ($this < $next) {
157 #we have seen more "ok" lines than the number suggests
158 warn "Confused test output: test $this answered after test ", $next-1, "\n";
164 $fh->close; # must close to reap child resource values
165 my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
167 $estatus = ($^O eq 'VMS'
168 ? eval 'use vmsish "status"; $estatus = $?'
171 my ($failed, $canon, $percent) = ('??', '??');
172 printf "${ml}dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
174 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
175 if (corestatus($wstatus)) { # until we have a wait module
176 if ($have_devel_corestack) {
177 Devel::CoreStack::stack($^X);
179 print "\ttest program seems to have generated a core\n";
184 if ($next == $max + 1 and not @failed) {
185 print "\tafter all the subtests completed successfully\n";
187 $failed = 0; # But we do not set $canon!
189 push @failed, $next..$max;
191 (my $txt, $canon) = canonfailed($max,$skipped,@failed);
192 $percent = 100*(scalar @failed)/$max;
196 $failedtests{$test} = { canon => $canon, max => $max || '??',
198 name => $test, percent => $percent,
199 estat => $estatus, wstat => $wstatus,
201 } elsif ($ok == $max && $next == $max+1) {
202 if ($max and $skipped + $bonus) {
204 push(@msg, "$skipped/$max skipped: $skip_reason")
206 push(@msg, "$bonus/$max unexpectedly succeeded")
208 print "${ml}ok, ".join(', ', @msg)."\n";
211 } elsif (defined $skip_reason) {
212 print "skipped: $skip_reason\n";
215 print "skipped test on this platform\n";
221 push @failed, $next..$max;
224 my ($txt, $canon) = canonfailed($max,$skipped,@failed);
226 $failedtests{$test} = { canon => $canon, max => $max,
227 failed => scalar @failed,
228 name => $test, percent => 100*(scalar @failed)/$max,
229 estat => '', wstat => '',
232 print "Don't know which tests failed: got $ok ok, expected $max\n";
233 $failedtests{$test} = { canon => '??', max => $max,
235 name => $test, percent => undef,
236 estat => '', wstat => '',
240 } elsif ($next == 0) {
241 print "FAILED before any test output arrived\n";
243 $failedtests{$test} = { canon => '??', max => '??',
245 name => $test, percent => undef,
246 estat => '', wstat => '',
249 $subtests_skipped += $skipped;
250 if (defined $files_in_dir) {
251 my @new_dir_files = globdir $files_in_dir;
252 if (@new_dir_files != @dir_files) {
254 @f{@new_dir_files} = (1) x @new_dir_files;
255 delete @f{@dir_files};
256 my @f = sort keys %f;
257 print "LEAKED FILES: @f\n";
258 @dir_files = @new_dir_files;
262 my $t_total = timediff(new Benchmark, $t_start);
265 if (defined $old5lib) {
266 $ENV{PERL5LIB} = $old5lib;
268 delete $ENV{PERL5LIB};
272 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
273 " UNEXPECTEDLY SUCCEEDED)")
275 if ($tests_skipped) {
276 $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
277 if ($subtests_skipped) {
278 $bonusmsg .= " and $subtests_skipped subtest"
279 . ($subtests_skipped != 1 ? 's' : '');
281 $bonusmsg .= ' skipped';
283 elsif ($subtests_skipped) {
284 $bonusmsg .= ", $subtests_skipped subtest"
285 . ($subtests_skipped != 1 ? 's' : '')
288 if ($bad == 0 && $totmax) {
289 print "All tests successful$bonusmsg.\n";
291 die "FAILED--no tests were run for some reason.\n";
292 } elsif ($totmax==0) {
293 my $blurb = $total==1 ? "script" : "scripts";
294 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
296 $pct = sprintf("%.2f", $good / $total * 100);
297 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
298 $totmax - $totok, $totmax, 100*$totok/$totmax;
300 for $script (sort keys %failedtests) {
301 $curtest = $failedtests{$script};
305 $bonusmsg =~ s/^,\s*//;
306 print "$bonusmsg.\n" if $bonusmsg;
307 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
310 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
312 return ($bad == 0 && $totmax) ;
315 my $tried_devel_corestack;
320 eval {require 'wait.ph'};
323 $ret = ($st & 0200); # Tim says, this is for 90%
326 $ret = WCOREDUMP($st);
329 eval { require Devel::CoreStack; $have_devel_corestack++ }
330 unless $tried_devel_corestack++;
335 sub canonfailed ($@) {
336 my($max,$skipped,@failed) = @_;
338 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
339 my $failed = @failed;
343 my $last = $min = shift @failed;
346 for (@failed, $failed[-1]) { # don't forget the last one
347 if ($_ > $last+1 || $_ == $last) {
351 push @canon, "$min-$last";
358 push @result, "FAILED tests @canon\n";
361 push @result, "FAILED test $last\n";
365 push @result, "\tFailed $failed/$max tests, ";
366 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
367 my $ender = 's' x ($skipped > 1);
368 my $good = $max - $failed - $skipped;
369 my $goodper = sprintf("%.2f",100*($good/$max));
370 push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
372 my $txt = join "", @result;
381 Test::Harness - run perl standard test scripts with statistics
391 (By using the L<Test> module, you can write test scripts without
392 knowing the exact output this module expects. However, if you need to
393 know the specifics, read on!)
395 Perl test scripts print to standard output C<"ok N"> for each single
396 test, where C<N> is an increasing sequence of integers. The first line
397 output by a standard test script is C<"1..M"> with C<M> being the
398 number of tests that should be run within the test
399 script. Test::Harness::runtests(@tests) runs all the testscripts
400 named as arguments and checks standard output for the expected
403 After all tests have been performed, runtests() prints some
404 performance statistics that are computed by the Benchmark module.
406 =head2 The test script output
408 Any output from the testscript to standard error is ignored and
409 bypassed, thus will be seen by the user. Lines written to standard
410 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
411 runtests(). All other lines are discarded.
413 It is tolerated if the test numbers after C<ok> are omitted. In this
414 case Test::Harness maintains temporarily its own counter until the
415 script supplies test numbers again. So the following test script
429 Failed 3/6 tests, 50.00% okay
431 The global variable $Test::Harness::verbose is exportable and can be
432 used to let runtests() display the standard output of the script
433 without altering the behavior otherwise.
435 The global variable $Test::Harness::switches is exportable and can be
436 used to set perl command line options used for running the test
437 script(s). The default value is C<-w>.
439 If the standard output line contains substring C< # Skip> (with
440 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
441 counted as a skipped test. If the whole testscript succeeds, the
442 count of skipped tests is included in the generated output.
444 C<Test::Harness> reports the text after C< # Skip(whatever)> as a
445 reason for skipping. Similarly, one can include a similar explanation
446 in a C<1..0> line emitted if the test is skipped completely:
448 1..0 # Skipped: no leverage found
452 C<&runtests> is exported by Test::Harness per default.
458 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
460 If all tests are successful some statistics about the performance are
463 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
465 For any single script that has failing subtests statistics like the
468 =item C<Test returned status %d (wstat %d)>
470 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
471 printed in a message similar to the above.
473 =item C<Failed 1 test, %.2f%% okay. %s>
475 =item C<Failed %d/%d tests, %.2f%% okay. %s>
477 If not all tests were successful, the script dies with one of the
484 Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
487 Setting C<HARNESS_NOTTY> to a true value forces it to behave as though
488 STDOUT were not a console. You may need to set this if you don't want
489 harness to output more frequent progress messages using carriage returns.
490 Some consoles may not handle carriage returns properly (which results
491 in a somewhat messy output).
493 Setting C<HARNESS_COMPILE_TEST> to a true value will make harness attempt
494 to compile the test using C<perlcc> before running it.
496 If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
497 will check after each test whether new files appeared in that directory,
500 LEAKED FILES: scr.tmp 0 my.db
502 If relative, directory name is with respect to the current directory at
503 the moment runtests() was called. Putting absolute path into
504 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
506 Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
507 This allows the tests to determine if they are being executed through the
508 harness or by any other means.
512 L<Test> for writing test scripts and also L<Benchmark> for the
513 underlying timing routines.
517 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
518 sure is, that it was inspired by Larry Wall's TEST script that came
519 with perl distributions for ages. Numerous anonymous contributors
520 exist. Current maintainer is Andreas Koenig.
524 Test::Harness uses $^X to determine the perl binary to run the tests
525 with. Test scripts running via the shebang (C<#!>) line may not be
526 portable because $^X is not consistent for shebang scripts across
527 platforms. This is no problem when Test::Harness is run with an
528 absolute path to the perl binary or when $^X can be found in the path.