avoid creating spurious subroutine stubs on failed subroutine
[p5sagit/p5-mst-13.2.git] / lib / Test / Harness.pm
CommitLineData
a0d0e21e 1package Test::Harness;
2
b876d4a6 3BEGIN {require 5.002;}
a0d0e21e 4use Exporter;
5use Benchmark;
4633a7c4 6use Config;
6c31b336 7use FileHandle;
760ac839 8use strict;
9
10use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
11 @ISA @EXPORT @EXPORT_OK);
c0ee6f5c 12$have_devel_corestack = 0;
4633a7c4 13
9b0ceca9 14$VERSION = "1.1602";
15
f19ae7a7 16$ENV{HARNESS_ACTIVE} = 1;
17
9b0ceca9 18# Some experimental versions of OS/2 build have broken $?
19my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
20
17a79f5b 21my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
22
9b0ceca9 23my $tests_skipped = 0;
24my $subtests_skipped = 0;
4633a7c4 25
c07a80fd 26@ISA=('Exporter');
cb1a09d0 27@EXPORT= qw(&runtests);
a0d0e21e 28@EXPORT_OK= qw($verbose $switches);
29
760ac839 30format STDOUT_TOP =
31Failed Test Status Wstat Total Fail Failed List of failed
fb73857a 32-------------------------------------------------------------------------------
760ac839 33.
34
35format STDOUT =
fb73857a 36@<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
760ac839 37{ $curtest->{name},
38 $curtest->{estat},
39 $curtest->{wstat},
40 $curtest->{max},
41 $curtest->{failed},
42 $curtest->{percent},
43 $curtest->{canon}
44}
fb73857a 45~~ ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
46 $curtest->{canon}
760ac839 47.
48
c07a80fd 49
6c31b336 50$verbose = 0;
51$switches = "-w";
a0d0e21e 52
17a79f5b 53sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
54
a0d0e21e 55sub runtests {
56 my(@tests) = @_;
57 local($|) = 1;
7b13a3f5 58 my($test,$te,$ok,$next,$max,$pct,$totok,$totbonus,@failed,%failedtests);
6c31b336 59 my $totmax = 0;
60 my $files = 0;
a0d0e21e 61 my $bad = 0;
62 my $good = 0;
63 my $total = @tests;
774d564b 64
65 # pass -I flags to children
81ff29e3 66 my $old5lib = $ENV{PERL5LIB};
774d564b 67 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC);
68
a5077310 69 if ($^O eq 'VMS') { $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g }
a0d0e21e 70
17a79f5b 71 my @dir_files = globdir $files_in_dir if defined $files_in_dir;
a0d0e21e 72 my $t_start = new Benchmark;
73 while ($test = shift(@tests)) {
c07a80fd 74 $te = $test;
75 chop($te);
68dc0745 76 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./; }
c07a80fd 77 print "$te" . '.' x (20 - length($te));
6c31b336 78 my $fh = new FileHandle;
aa689395 79 $fh->open($test) or print "can't open $test. $!\n";
80 my $first = <$fh>;
81 my $s = $switches;
68dc0745 82 $s .= q[ "-T"] if $first =~ /^#!.*\bperl.*-\w*T/;
aa689395 83 $fh->close or print "can't close $test. $!\n";
52cebf5e 84 my $cmd = ($ENV{'COMPILE_TEST'})?
a6f4eb0a 85"./perl -I../lib ../utils/perlcc $test -run 2>> ./compilelog |"
52cebf5e 86 : "$^X $s $test|";
a5077310 87 $cmd = "MCR $cmd" if $^O eq 'VMS';
aa689395 88 $fh->open($cmd) or print "can't run $test. $!\n";
c07a80fd 89 $ok = $next = $max = 0;
90 @failed = ();
7b13a3f5 91 my %todo = ();
92 my $bonus = 0;
fac76ed7 93 my $skipped = 0;
c07a80fd 94 while (<$fh>) {
6c31b336 95 if( $verbose ){
c07a80fd 96 print $_;
97 }
7b13a3f5 98 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
99 $max = $1;
100 for (split(/\s+/, $2)) { $todo{$_} = 1; }
101 $totmax += $max;
102 $files++;
103 $next = 1;
104 } elsif (/^1\.\.([0-9]+)/) {
c0ee6f5c 105 $max = $1;
106 $totmax += $max;
107 $files++;
108 $next = 1;
109 } elsif ($max && /^(not\s+)?ok\b/) {
110 my $this = $next;
111 if (/^not ok\s*(\d*)/){
112 $this = $1 if $1 > 0;
7b13a3f5 113 if (!$todo{$this}) {
114 push @failed, $this;
115 } else {
116 $ok++;
117 $totok++;
118 }
fac76ed7 119 } elsif (/^ok\s*(\d*)(\s*\#\s*[Ss]kip)?/) {
c0ee6f5c 120 $this = $1 if $1 > 0;
121 $ok++;
122 $totok++;
fac76ed7 123 $skipped++ if defined $2;
7b13a3f5 124 $bonus++, $totbonus++ if $todo{$this};
c07a80fd 125 }
c0ee6f5c 126 if ($this > $next) {
127 # warn "Test output counter mismatch [test $this]\n";
128 # no need to warn probably
129 push @failed, $next..$this-1;
130 } elsif ($this < $next) {
131 #we have seen more "ok" lines than the number suggests
132 warn "Confused test output: test $this answered after test ", $next-1, "\n";
133 $next = $this;
134 }
135 $next = $this + 1;
c07a80fd 136 }
137 }
6c31b336 138 $fh->close; # must close to reap child resource values
9b0ceca9 139 my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
395b061e 140 my $estatus;
141 $estatus = ($^O eq 'VMS'
68dc0745 142 ? eval 'use vmsish "status"; $estatus = $?'
143 : $wstatus >> 8);
144 if ($wstatus) {
aa689395 145 my ($failed, $canon, $percent) = ('??', '??');
fb73857a 146 printf "dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
147 $wstatus,$wstatus;
68dc0745 148 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
c0ee6f5c 149 if (corestatus($wstatus)) { # until we have a wait module
150 if ($have_devel_corestack) {
151 Devel::CoreStack::stack($^X);
152 } else {
153 print "\ttest program seems to have generated a core\n";
154 }
155 }
156 $bad++;
aa689395 157 if ($max) {
158 if ($next == $max + 1 and not @failed) {
159 print "\tafter all the subtests completed successfully\n";
160 $percent = 0;
161 $failed = 0; # But we do not set $canon!
162 } else {
163 push @failed, $next..$max;
164 $failed = @failed;
89d3b7e2 165 (my $txt, $canon) = canonfailed($max,$skipped,@failed);
aa689395 166 $percent = 100*(scalar @failed)/$max;
167 print "DIED. ",$txt;
168 }
169 }
170 $failedtests{$test} = { canon => $canon, max => $max || '??',
171 failed => $failed,
172 name => $test, percent => $percent,
760ac839 173 estat => $estatus, wstat => $wstatus,
174 };
c0ee6f5c 175 } elsif ($ok == $max && $next == $max+1) {
7b13a3f5 176 if ($max and $skipped + $bonus) {
177 my @msg;
89d3b7e2 178 push(@msg, "$skipped/$max subtest".($skipped>1?'s':'')." skipped")
7b13a3f5 179 if $skipped;
180 push(@msg, "$bonus subtest".($bonus>1?'s':'').
181 " unexpectedly succeeded")
182 if $bonus;
183 print "ok, ".join(', ', @msg)."\n";
fac76ed7 184 } elsif ($max) {
c0ee6f5c 185 print "ok\n";
186 } else {
187 print "skipping test on this platform\n";
9b0ceca9 188 $tests_skipped++;
c0ee6f5c 189 }
c07a80fd 190 $good++;
6c31b336 191 } elsif ($max) {
192 if ($next <= $max) {
193 push @failed, $next..$max;
194 }
c07a80fd 195 if (@failed) {
89d3b7e2 196 my ($txt, $canon) = canonfailed($max,$skipped,@failed);
760ac839 197 print $txt;
198 $failedtests{$test} = { canon => $canon, max => $max,
199 failed => scalar @failed,
200 name => $test, percent => 100*(scalar @failed)/$max,
201 estat => '', wstat => '',
202 };
c07a80fd 203 } else {
c0ee6f5c 204 print "Don't know which tests failed: got $ok ok, expected $max\n";
760ac839 205 $failedtests{$test} = { canon => '??', max => $max,
206 failed => '??',
207 name => $test, percent => undef,
208 estat => '', wstat => '',
209 };
c07a80fd 210 }
211 $bad++;
6c31b336 212 } elsif ($next == 0) {
213 print "FAILED before any test output arrived\n";
214 $bad++;
760ac839 215 $failedtests{$test} = { canon => '??', max => '??',
216 failed => '??',
217 name => $test, percent => undef,
218 estat => '', wstat => '',
219 };
6c31b336 220 }
9b0ceca9 221 $subtests_skipped += $skipped;
17a79f5b 222 if (defined $files_in_dir) {
223 my @new_dir_files = globdir $files_in_dir;
224 if (@new_dir_files != @dir_files) {
225 my %f;
226 @f{@new_dir_files} = (1) x @new_dir_files;
227 delete @f{@dir_files};
228 my @f = sort keys %f;
229 print "LEAKED FILES: @f\n";
230 @dir_files = @new_dir_files;
231 }
232 }
a0d0e21e 233 }
234 my $t_total = timediff(new Benchmark, $t_start);
c07a80fd 235
774d564b 236 if ($^O eq 'VMS') {
237 if (defined $old5lib) {
238 $ENV{PERL5LIB} = $old5lib;
b876d4a6 239 } else {
774d564b 240 delete $ENV{PERL5LIB};
241 }
242 }
7b13a3f5 243 my $bonusmsg = '';
244 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
245 " UNEXPECTEDLY SUCCEEDED)")
246 if $totbonus;
9b0ceca9 247 if ($tests_skipped) {
c4f6c246 248 $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
249 if ($subtests_skipped) {
250 $bonusmsg .= " and $subtests_skipped subtest"
251 . ($subtests_skipped != 1 ? 's' : '');
252 }
253 $bonusmsg .= ' skipped';
9b0ceca9 254 }
c4f6c246 255 elsif ($subtests_skipped) {
256 $bonusmsg .= ", $subtests_skipped subtest"
257 . ($subtests_skipped != 1 ? 's' : '')
258 . " skipped";
9b0ceca9 259 }
6c31b336 260 if ($bad == 0 && $totmax) {
7b13a3f5 261 print "All tests successful$bonusmsg.\n";
6c31b336 262 } elsif ($total==0){
263 die "FAILED--no tests were run for some reason.\n";
264 } elsif ($totmax==0) {
265 my $blurb = $total==1 ? "script" : "scripts";
c0ee6f5c 266 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
c07a80fd 267 } else {
268 $pct = sprintf("%.2f", $good / $total * 100);
6c31b336 269 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
270 $totmax - $totok, $totmax, 100*$totok/$totmax;
760ac839 271 my $script;
272 for $script (sort keys %failedtests) {
273 $curtest = $failedtests{$script};
274 write;
275 }
b876d4a6 276 if ($bad) {
9b0ceca9 277 $bonusmsg =~ s/^,\s*//;
278 print "$bonusmsg.\n" if $bonusmsg;
6c31b336 279 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
c07a80fd 280 }
281 }
282 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
f0a9308e 283
284 return ($bad == 0 && $totmax) ;
c07a80fd 285}
286
aa689395 287my $tried_devel_corestack;
c0ee6f5c 288sub corestatus {
289 my($st) = @_;
290 my($ret);
291
292 eval {require 'wait.ph'};
293 if ($@) {
294 SWITCH: {
295 $ret = ($st & 0200); # Tim says, this is for 90%
296 }
297 } else {
298 $ret = WCOREDUMP($st);
299 }
300
aa689395 301 eval { require Devel::CoreStack; $have_devel_corestack++ }
302 unless $tried_devel_corestack++;
c0ee6f5c 303
304 $ret;
305}
306
c07a80fd 307sub canonfailed ($@) {
89d3b7e2 308 my($max,$skipped,@failed) = @_;
6c31b336 309 my %seen;
310 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
c07a80fd 311 my $failed = @failed;
312 my @result = ();
313 my @canon = ();
314 my $min;
315 my $last = $min = shift @failed;
760ac839 316 my $canon;
c07a80fd 317 if (@failed) {
318 for (@failed, $failed[-1]) { # don't forget the last one
319 if ($_ > $last+1 || $_ == $last) {
320 if ($min == $last) {
321 push @canon, $last;
322 } else {
323 push @canon, "$min-$last";
324 }
325 $min = $_;
326 }
327 $last = $_;
328 }
329 local $" = ", ";
330 push @result, "FAILED tests @canon\n";
760ac839 331 $canon = "@canon";
a0d0e21e 332 } else {
c07a80fd 333 push @result, "FAILED test $last\n";
760ac839 334 $canon = $last;
a0d0e21e 335 }
c07a80fd 336
337 push @result, "\tFailed $failed/$max tests, ";
89d3b7e2 338 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
339 my $ender = 's' x ($skipped > 1);
340 my $good = $max - $failed - $skipped;
341 my $goodper = sprintf("%.2f",100*($good/$max));
342 push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
343 push @result, "\n";
760ac839 344 my $txt = join "", @result;
345 ($txt, $canon);
a0d0e21e 346}
347
3481;
cb1a09d0 349__END__
350
351=head1 NAME
352
353Test::Harness - run perl standard test scripts with statistics
354
355=head1 SYNOPSIS
356
357use Test::Harness;
358
359runtests(@tests);
360
361=head1 DESCRIPTION
362
7b13a3f5 363(By using the L<Test> module, you can write test scripts without
364knowing the exact output this module expects. However, if you need to
365know the specifics, read on!)
366
cb1a09d0 367Perl test scripts print to standard output C<"ok N"> for each single
368test, where C<N> is an increasing sequence of integers. The first line
c0ee6f5c 369output by a standard test script is C<"1..M"> with C<M> being the
cb1a09d0 370number of tests that should be run within the test
c0ee6f5c 371script. Test::Harness::runtests(@tests) runs all the testscripts
cb1a09d0 372named as arguments and checks standard output for the expected
373C<"ok N"> strings.
374
c0ee6f5c 375After all tests have been performed, runtests() prints some
cb1a09d0 376performance statistics that are computed by the Benchmark module.
377
6c31b336 378=head2 The test script output
379
380Any output from the testscript to standard error is ignored and
381bypassed, thus will be seen by the user. Lines written to standard
c0ee6f5c 382output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
383runtests(). All other lines are discarded.
6c31b336 384
385It is tolerated if the test numbers after C<ok> are omitted. In this
386case Test::Harness maintains temporarily its own counter until the
387script supplies test numbers again. So the following test script
388
389 print <<END;
390 1..6
391 not ok
392 ok
393 not ok
394 ok
395 ok
396 END
397
398will generate
399
400 FAILED tests 1, 3, 6
401 Failed 3/6 tests, 50.00% okay
402
403The global variable $Test::Harness::verbose is exportable and can be
c0ee6f5c 404used to let runtests() display the standard output of the script
6c31b336 405without altering the behavior otherwise.
406
fb73857a 407The global variable $Test::Harness::switches is exportable and can be
408used to set perl command line options used for running the test
409script(s). The default value is C<-w>.
410
fac76ed7 411If the standard output line contains substring C< # Skip> (with
412variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
413counted as a skipped test. If the whole testscript succeeds, the
414count of skipped tests is included in the generated output.
415
cb1a09d0 416=head1 EXPORT
417
c0ee6f5c 418C<&runtests> is exported by Test::Harness per default.
cb1a09d0 419
420=head1 DIAGNOSTICS
421
422=over 4
423
424=item C<All tests successful.\nFiles=%d, Tests=%d, %s>
425
426If all tests are successful some statistics about the performance are
427printed.
428
6c31b336 429=item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
430
431For any single script that has failing subtests statistics like the
432above are printed.
433
434=item C<Test returned status %d (wstat %d)>
435
81ff29e3 436Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
6c31b336 437printed in a message similar to the above.
438
439=item C<Failed 1 test, %.2f%% okay. %s>
cb1a09d0 440
6c31b336 441=item C<Failed %d/%d tests, %.2f%% okay. %s>
cb1a09d0 442
443If not all tests were successful, the script dies with one of the
444above messages.
445
446=back
447
9b0ceca9 448=head1 ENVIRONMENT
449
17a79f5b 450Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
9b0ceca9 451of child processes.
452
17a79f5b 453If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
454will check after each test whether new files appeared in that directory,
455and report them as
456
457 LEAKED FILES: scr.tmp 0 my.db
458
459If relative, directory name is with respect to the current directory at
460the moment runtests() was called. Putting absolute path into
461C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
462
f19ae7a7 463Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
464This allows the tests to determine if they are being executed through the
465harness or by any other means.
466
cb1a09d0 467=head1 SEE ALSO
468
7b13a3f5 469L<Test> for writing test scripts and also L<Benchmark> for the
470underlying timing routines.
c07a80fd 471
472=head1 AUTHORS
473
474Either Tim Bunce or Andreas Koenig, we don't know. What we know for
475sure is, that it was inspired by Larry Wall's TEST script that came
b876d4a6 476with perl distributions for ages. Numerous anonymous contributors
477exist. Current maintainer is Andreas Koenig.
cb1a09d0 478
479=head1 BUGS
480
481Test::Harness uses $^X to determine the perl binary to run the tests
6c31b336 482with. Test scripts running via the shebang (C<#!>) line may not be
483portable because $^X is not consistent for shebang scripts across
cb1a09d0 484platforms. This is no problem when Test::Harness is run with an
6c31b336 485absolute path to the perl binary or when $^X can be found in the path.
cb1a09d0 486
487=cut