10 use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
11 @ISA @EXPORT @EXPORT_OK);
12 $have_devel_corestack = 0;
16 # Some experimental versions of OS/2 build have broken $?
17 my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
19 my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
21 my $tests_skipped = 0;
22 my $subtests_skipped = 0;
25 @EXPORT= qw(&runtests);
26 @EXPORT_OK= qw($verbose $switches);
29 Failed Test Status Wstat Total Fail Failed List of failed
30 -------------------------------------------------------------------------------
34 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
43 ~~ ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
51 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
56 my($test,$te,$ok,$next,$max,$pct,$totok,$totbonus,@failed,%failedtests);
63 # pass -I flags to children
64 my $old5lib = $ENV{PERL5LIB};
65 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC);
67 if ($^O eq 'VMS') { $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g }
69 my @dir_files = globdir $files_in_dir if defined $files_in_dir;
70 my $t_start = new Benchmark;
71 while ($test = shift(@tests)) {
74 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./; }
75 print "$te" . '.' x (20 - length($te));
76 my $fh = new FileHandle;
77 $fh->open($test) or print "can't open $test. $!\n";
80 $s .= q[ "-T"] if $first =~ /^#!.*\bperl.*-\w*T/;
81 $fh->close or print "can't close $test. $!\n";
82 my $cmd = ($ENV{'COMPILE_TEST'})?
83 "./perl -I../lib ../utils/perlcc $test -run -verbose dcf -log ./compilelog |"
85 $cmd = "MCR $cmd" if $^O eq 'VMS';
86 $fh->open($cmd) or print "can't run $test. $!\n";
87 $ok = $next = $max = 0;
96 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
98 for (split(/\s+/, $2)) { $todo{$_} = 1; }
102 } elsif (/^1\.\.([0-9]+)/) {
107 } elsif ($max && /^(not\s+)?ok\b/) {
109 if (/^not ok\s*(\d*)/){
110 $this = $1 if $1 > 0;
117 } elsif (/^ok\s*(\d*)(\s*\#\s*[Ss]kip)?/) {
118 $this = $1 if $1 > 0;
121 $skipped++ if defined $2;
122 $bonus++, $totbonus++ if $todo{$this};
125 # warn "Test output counter mismatch [test $this]\n";
126 # no need to warn probably
127 push @failed, $next..$this-1;
128 } elsif ($this < $next) {
129 #we have seen more "ok" lines than the number suggests
130 warn "Confused test output: test $this answered after test ", $next-1, "\n";
136 $fh->close; # must close to reap child resource values
137 my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
139 $estatus = ($^O eq 'VMS'
140 ? eval 'use vmsish "status"; $estatus = $?'
143 my ($failed, $canon, $percent) = ('??', '??');
144 printf "dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
146 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
147 if (corestatus($wstatus)) { # until we have a wait module
148 if ($have_devel_corestack) {
149 Devel::CoreStack::stack($^X);
151 print "\ttest program seems to have generated a core\n";
156 if ($next == $max + 1 and not @failed) {
157 print "\tafter all the subtests completed successfully\n";
159 $failed = 0; # But we do not set $canon!
161 push @failed, $next..$max;
163 (my $txt, $canon) = canonfailed($max,$skipped,@failed);
164 $percent = 100*(scalar @failed)/$max;
168 $failedtests{$test} = { canon => $canon, max => $max || '??',
170 name => $test, percent => $percent,
171 estat => $estatus, wstat => $wstatus,
173 } elsif ($ok == $max && $next == $max+1) {
174 if ($max and $skipped + $bonus) {
176 push(@msg, "$skipped/$max subtest".($skipped>1?'s':'')." skipped")
178 push(@msg, "$bonus subtest".($bonus>1?'s':'').
179 " unexpectedly succeeded")
181 print "ok, ".join(', ', @msg)."\n";
185 print "skipping test on this platform\n";
191 push @failed, $next..$max;
194 my ($txt, $canon) = canonfailed($max,$skipped,@failed);
196 $failedtests{$test} = { canon => $canon, max => $max,
197 failed => scalar @failed,
198 name => $test, percent => 100*(scalar @failed)/$max,
199 estat => '', wstat => '',
202 print "Don't know which tests failed: got $ok ok, expected $max\n";
203 $failedtests{$test} = { canon => '??', max => $max,
205 name => $test, percent => undef,
206 estat => '', wstat => '',
210 } elsif ($next == 0) {
211 print "FAILED before any test output arrived\n";
213 $failedtests{$test} = { canon => '??', max => '??',
215 name => $test, percent => undef,
216 estat => '', wstat => '',
219 $subtests_skipped += $skipped;
220 if (defined $files_in_dir) {
221 my @new_dir_files = globdir $files_in_dir;
222 if (@new_dir_files != @dir_files) {
224 @f{@new_dir_files} = (1) x @new_dir_files;
225 delete @f{@dir_files};
226 my @f = sort keys %f;
227 print "LEAKED FILES: @f\n";
228 @dir_files = @new_dir_files;
232 my $t_total = timediff(new Benchmark, $t_start);
235 if (defined $old5lib) {
236 $ENV{PERL5LIB} = $old5lib;
238 delete $ENV{PERL5LIB};
242 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
243 " UNEXPECTEDLY SUCCEEDED)")
245 if ($tests_skipped) {
246 $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
247 if ($subtests_skipped) {
248 $bonusmsg .= " and $subtests_skipped subtest"
249 . ($subtests_skipped != 1 ? 's' : '');
251 $bonusmsg .= ' skipped';
253 elsif ($subtests_skipped) {
254 $bonusmsg .= ", $subtests_skipped subtest"
255 . ($subtests_skipped != 1 ? 's' : '')
258 if ($bad == 0 && $totmax) {
259 print "All tests successful$bonusmsg.\n";
261 die "FAILED--no tests were run for some reason.\n";
262 } elsif ($totmax==0) {
263 my $blurb = $total==1 ? "script" : "scripts";
264 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
266 $pct = sprintf("%.2f", $good / $total * 100);
267 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
268 $totmax - $totok, $totmax, 100*$totok/$totmax;
270 for $script (sort keys %failedtests) {
271 $curtest = $failedtests{$script};
275 $bonusmsg =~ s/^,\s*//;
276 print "$bonusmsg.\n" if $bonusmsg;
277 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
280 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
282 return ($bad == 0 && $totmax) ;
285 my $tried_devel_corestack;
290 eval {require 'wait.ph'};
293 $ret = ($st & 0200); # Tim says, this is for 90%
296 $ret = WCOREDUMP($st);
299 eval { require Devel::CoreStack; $have_devel_corestack++ }
300 unless $tried_devel_corestack++;
305 sub canonfailed ($@) {
306 my($max,$skipped,@failed) = @_;
308 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
309 my $failed = @failed;
313 my $last = $min = shift @failed;
316 for (@failed, $failed[-1]) { # don't forget the last one
317 if ($_ > $last+1 || $_ == $last) {
321 push @canon, "$min-$last";
328 push @result, "FAILED tests @canon\n";
331 push @result, "FAILED test $last\n";
335 push @result, "\tFailed $failed/$max tests, ";
336 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
337 my $ender = 's' x ($skipped > 1);
338 my $good = $max - $failed - $skipped;
339 my $goodper = sprintf("%.2f",100*($good/$max));
340 push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
342 my $txt = join "", @result;
351 Test::Harness - run perl standard test scripts with statistics
361 (By using the L<Test> module, you can write test scripts without
362 knowing the exact output this module expects. However, if you need to
363 know the specifics, read on!)
365 Perl test scripts print to standard output C<"ok N"> for each single
366 test, where C<N> is an increasing sequence of integers. The first line
367 output by a standard test script is C<"1..M"> with C<M> being the
368 number of tests that should be run within the test
369 script. Test::Harness::runtests(@tests) runs all the testscripts
370 named as arguments and checks standard output for the expected
373 After all tests have been performed, runtests() prints some
374 performance statistics that are computed by the Benchmark module.
376 =head2 The test script output
378 Any output from the testscript to standard error is ignored and
379 bypassed, thus will be seen by the user. Lines written to standard
380 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
381 runtests(). All other lines are discarded.
383 It is tolerated if the test numbers after C<ok> are omitted. In this
384 case Test::Harness maintains temporarily its own counter until the
385 script supplies test numbers again. So the following test script
399 Failed 3/6 tests, 50.00% okay
401 The global variable $Test::Harness::verbose is exportable and can be
402 used to let runtests() display the standard output of the script
403 without altering the behavior otherwise.
405 The global variable $Test::Harness::switches is exportable and can be
406 used to set perl command line options used for running the test
407 script(s). The default value is C<-w>.
409 If the standard output line contains substring C< # Skip> (with
410 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
411 counted as a skipped test. If the whole testscript succeeds, the
412 count of skipped tests is included in the generated output.
416 C<&runtests> is exported by Test::Harness per default.
422 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
424 If all tests are successful some statistics about the performance are
427 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
429 For any single script that has failing subtests statistics like the
432 =item C<Test returned status %d (wstat %d)>
434 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
435 printed in a message similar to the above.
437 =item C<Failed 1 test, %.2f%% okay. %s>
439 =item C<Failed %d/%d tests, %.2f%% okay. %s>
441 If not all tests were successful, the script dies with one of the
448 Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
451 If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
452 will check after each test whether new files appeared in that directory,
455 LEAKED FILES: scr.tmp 0 my.db
457 If relative, directory name is with respect to the current directory at
458 the moment runtests() was called. Putting absolute path into
459 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
463 L<Test> for writing test scripts and also L<Benchmark> for the
464 underlying timing routines.
468 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
469 sure is, that it was inspired by Larry Wall's TEST script that came
470 with perl distributions for ages. Numerous anonymous contributors
471 exist. Current maintainer is Andreas Koenig.
475 Test::Harness uses $^X to determine the perl binary to run the tests
476 with. Test scripts running via the shebang (C<#!>) line may not be
477 portable because $^X is not consistent for shebang scripts across
478 platforms. This is no problem when Test::Harness is run with an
479 absolute path to the perl binary or when $^X can be found in the path.