10 use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
11 @ISA @EXPORT @EXPORT_OK);
12 $have_devel_corestack = 0;
16 $ENV{HARNESS_ACTIVE} = 1;
18 # Some experimental versions of OS/2 build have broken $?
19 my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
21 my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
23 my $tests_skipped = 0;
24 my $subtests_skipped = 0;
27 @EXPORT= qw(&runtests);
28 @EXPORT_OK= qw($verbose $switches);
31 Failed Test Status Wstat Total Fail Failed List of failed
32 -------------------------------------------------------------------------------
36 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
45 ~~ ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
53 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
58 my($test,$te,$ok,$next,$max,$pct,$totok,$totbonus,@failed,%failedtests);
65 # pass -I flags to children
66 my $old5lib = $ENV{PERL5LIB};
67 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC);
69 if ($^O eq 'VMS') { $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g }
71 my @dir_files = globdir $files_in_dir if defined $files_in_dir;
72 my $t_start = new Benchmark;
73 while ($test = shift(@tests)) {
76 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./; }
77 my $blank = (' ' x 77);
78 my $leader = "$te" . '.' x (20 - length($te));
80 $ml = "\r$blank\r$leader" if -t STDOUT and not $ENV{HARNESS_NOTTY};
82 my $fh = new FileHandle;
83 $fh->open($test) or print "can't open $test. $!\n";
86 $s .= q[ "-T"] if $first =~ /^#!.*\bperl.*-\w*T/;
87 $fh->close or print "can't close $test. $!\n";
88 my $cmd = ($ENV{'COMPILE_TEST'})?
89 "./perl -I../lib ../utils/perlcc $test -run 2>> ./compilelog |"
91 $cmd = "MCR $cmd" if $^O eq 'VMS';
92 $fh->open($cmd) or print "can't run $test. $!\n";
93 $ok = $next = $max = 0;
103 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
105 for (split(/\s+/, $2)) { $todo{$_} = 1; }
109 } elsif (/^1\.\.([0-9]+)(\s*\#\s*[Ss]kip\S*(?>\s+)(.+))?/) {
114 $skip_reason = $3 if not $max and defined $3;
115 } elsif ($max && /^(not\s+)?ok\b/) {
117 if (/^not ok\s*(\d*)/){
118 $this = $1 if $1 > 0;
119 print "${ml}NOK $this\n" if $ml;
126 } elsif (/^ok\s*(\d*)(\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?/) {
127 $this = $1 if $1 > 0;
128 print "${ml}ok $this/$max" if $ml;
131 $skipped++ if defined $2;
133 $reason = 'unknown reason' if defined $2;
134 $reason = $3 if defined $3;
135 if (defined $reason and defined $skip_reason) {
136 # print "was: '$skip_reason' new '$reason'\n";
137 $skip_reason = 'various reasons'
138 if $skip_reason ne $reason;
139 } elsif (defined $reason) {
140 $skip_reason = $reason;
142 $bonus++, $totbonus++ if $todo{$this};
145 # warn "Test output counter mismatch [test $this]\n";
146 # no need to warn probably
147 push @failed, $next..$this-1;
148 } elsif ($this < $next) {
149 #we have seen more "ok" lines than the number suggests
150 warn "Confused test output: test $this answered after test ", $next-1, "\n";
156 $fh->close; # must close to reap child resource values
157 my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
159 $estatus = ($^O eq 'VMS'
160 ? eval 'use vmsish "status"; $estatus = $?'
163 my ($failed, $canon, $percent) = ('??', '??');
164 printf "${ml}dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
166 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
167 if (corestatus($wstatus)) { # until we have a wait module
168 if ($have_devel_corestack) {
169 Devel::CoreStack::stack($^X);
171 print "\ttest program seems to have generated a core\n";
176 if ($next == $max + 1 and not @failed) {
177 print "\tafter all the subtests completed successfully\n";
179 $failed = 0; # But we do not set $canon!
181 push @failed, $next..$max;
183 (my $txt, $canon) = canonfailed($max,$skipped,@failed);
184 $percent = 100*(scalar @failed)/$max;
188 $failedtests{$test} = { canon => $canon, max => $max || '??',
190 name => $test, percent => $percent,
191 estat => $estatus, wstat => $wstatus,
193 } elsif ($ok == $max && $next == $max+1) {
194 if ($max and $skipped + $bonus) {
196 push(@msg, "$skipped/$max skipped: $skip_reason")
198 push(@msg, "$bonus/$max unexpectedly succeeded")
200 print "${ml}ok, ".join(', ', @msg)."\n";
203 } elsif (defined $skip_reason) {
204 print "skipped: $skip_reason\n";
207 print "skipped test on this platform\n";
213 push @failed, $next..$max;
216 my ($txt, $canon) = canonfailed($max,$skipped,@failed);
218 $failedtests{$test} = { canon => $canon, max => $max,
219 failed => scalar @failed,
220 name => $test, percent => 100*(scalar @failed)/$max,
221 estat => '', wstat => '',
224 print "Don't know which tests failed: got $ok ok, expected $max\n";
225 $failedtests{$test} = { canon => '??', max => $max,
227 name => $test, percent => undef,
228 estat => '', wstat => '',
232 } elsif ($next == 0) {
233 print "FAILED before any test output arrived\n";
235 $failedtests{$test} = { canon => '??', max => '??',
237 name => $test, percent => undef,
238 estat => '', wstat => '',
241 $subtests_skipped += $skipped;
242 if (defined $files_in_dir) {
243 my @new_dir_files = globdir $files_in_dir;
244 if (@new_dir_files != @dir_files) {
246 @f{@new_dir_files} = (1) x @new_dir_files;
247 delete @f{@dir_files};
248 my @f = sort keys %f;
249 print "LEAKED FILES: @f\n";
250 @dir_files = @new_dir_files;
254 my $t_total = timediff(new Benchmark, $t_start);
257 if (defined $old5lib) {
258 $ENV{PERL5LIB} = $old5lib;
260 delete $ENV{PERL5LIB};
264 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
265 " UNEXPECTEDLY SUCCEEDED)")
267 if ($tests_skipped) {
268 $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
269 if ($subtests_skipped) {
270 $bonusmsg .= " and $subtests_skipped subtest"
271 . ($subtests_skipped != 1 ? 's' : '');
273 $bonusmsg .= ' skipped';
275 elsif ($subtests_skipped) {
276 $bonusmsg .= ", $subtests_skipped subtest"
277 . ($subtests_skipped != 1 ? 's' : '')
280 if ($bad == 0 && $totmax) {
281 print "All tests successful$bonusmsg.\n";
283 die "FAILED--no tests were run for some reason.\n";
284 } elsif ($totmax==0) {
285 my $blurb = $total==1 ? "script" : "scripts";
286 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
288 $pct = sprintf("%.2f", $good / $total * 100);
289 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
290 $totmax - $totok, $totmax, 100*$totok/$totmax;
292 for $script (sort keys %failedtests) {
293 $curtest = $failedtests{$script};
297 $bonusmsg =~ s/^,\s*//;
298 print "$bonusmsg.\n" if $bonusmsg;
299 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
302 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
304 return ($bad == 0 && $totmax) ;
307 my $tried_devel_corestack;
312 eval {require 'wait.ph'};
315 $ret = ($st & 0200); # Tim says, this is for 90%
318 $ret = WCOREDUMP($st);
321 eval { require Devel::CoreStack; $have_devel_corestack++ }
322 unless $tried_devel_corestack++;
327 sub canonfailed ($@) {
328 my($max,$skipped,@failed) = @_;
330 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
331 my $failed = @failed;
335 my $last = $min = shift @failed;
338 for (@failed, $failed[-1]) { # don't forget the last one
339 if ($_ > $last+1 || $_ == $last) {
343 push @canon, "$min-$last";
350 push @result, "FAILED tests @canon\n";
353 push @result, "FAILED test $last\n";
357 push @result, "\tFailed $failed/$max tests, ";
358 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
359 my $ender = 's' x ($skipped > 1);
360 my $good = $max - $failed - $skipped;
361 my $goodper = sprintf("%.2f",100*($good/$max));
362 push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
364 my $txt = join "", @result;
373 Test::Harness - run perl standard test scripts with statistics
383 (By using the L<Test> module, you can write test scripts without
384 knowing the exact output this module expects. However, if you need to
385 know the specifics, read on!)
387 Perl test scripts print to standard output C<"ok N"> for each single
388 test, where C<N> is an increasing sequence of integers. The first line
389 output by a standard test script is C<"1..M"> with C<M> being the
390 number of tests that should be run within the test
391 script. Test::Harness::runtests(@tests) runs all the testscripts
392 named as arguments and checks standard output for the expected
395 After all tests have been performed, runtests() prints some
396 performance statistics that are computed by the Benchmark module.
398 =head2 The test script output
400 Any output from the testscript to standard error is ignored and
401 bypassed, thus will be seen by the user. Lines written to standard
402 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
403 runtests(). All other lines are discarded.
405 It is tolerated if the test numbers after C<ok> are omitted. In this
406 case Test::Harness maintains temporarily its own counter until the
407 script supplies test numbers again. So the following test script
421 Failed 3/6 tests, 50.00% okay
423 The global variable $Test::Harness::verbose is exportable and can be
424 used to let runtests() display the standard output of the script
425 without altering the behavior otherwise.
427 The global variable $Test::Harness::switches is exportable and can be
428 used to set perl command line options used for running the test
429 script(s). The default value is C<-w>.
431 If the standard output line contains substring C< # Skip> (with
432 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
433 counted as a skipped test. If the whole testscript succeeds, the
434 count of skipped tests is included in the generated output.
436 C<Test::Harness> reports the text after C< # Skip(whatever)> as a
437 reason for skipping. Similarly, one can include a similar explanation
438 in a C<1..0> line emitted if the test is skipped completely:
440 1..0 # Skipped: no leverage found
444 C<&runtests> is exported by Test::Harness per default.
450 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
452 If all tests are successful some statistics about the performance are
455 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
457 For any single script that has failing subtests statistics like the
460 =item C<Test returned status %d (wstat %d)>
462 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
463 printed in a message similar to the above.
465 =item C<Failed 1 test, %.2f%% okay. %s>
467 =item C<Failed %d/%d tests, %.2f%% okay. %s>
469 If not all tests were successful, the script dies with one of the
476 Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
479 Setting C<HARNESS_NOTTY> to a true value forces it to behave as though
480 STDOUT were not a console. You may need to set this if you don't want
481 harness to output more frequent progress messages using carriage returns.
482 Some consoles may not handle carriage returns properly (which results
483 in a somewhat messy output).
485 If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
486 will check after each test whether new files appeared in that directory,
489 LEAKED FILES: scr.tmp 0 my.db
491 If relative, directory name is with respect to the current directory at
492 the moment runtests() was called. Putting absolute path into
493 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
495 Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
496 This allows the tests to determine if they are being executed through the
497 harness or by any other means.
501 L<Test> for writing test scripts and also L<Benchmark> for the
502 underlying timing routines.
506 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
507 sure is, that it was inspired by Larry Wall's TEST script that came
508 with perl distributions for ages. Numerous anonymous contributors
509 exist. Current maintainer is Andreas Koenig.
513 Test::Harness uses $^X to determine the perl binary to run the tests
514 with. Test scripts running via the shebang (C<#!>) line may not be
515 portable because $^X is not consistent for shebang scripts across
516 platforms. This is no problem when Test::Harness is run with an
517 absolute path to the perl binary or when $^X can be found in the path.