OS2::PrfDB was exploiting a bug in U32 XSUBs
[p5sagit/p5-mst-13.2.git] / lib / Test / Harness.pm
CommitLineData
a0d0e21e 1package Test::Harness;
2
b876d4a6 3BEGIN {require 5.002;}
a0d0e21e 4use Exporter;
5use Benchmark;
4633a7c4 6use Config;
6c31b336 7use FileHandle;
760ac839 8use strict;
9
10use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
11 @ISA @EXPORT @EXPORT_OK);
c0ee6f5c 12$have_devel_corestack = 0;
4633a7c4 13
9b0ceca9 14$VERSION = "1.1602";
15
16# Some experimental versions of OS/2 build have broken $?
17my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
18
17a79f5b 19my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
20
9b0ceca9 21my $tests_skipped = 0;
22my $subtests_skipped = 0;
4633a7c4 23
c07a80fd 24@ISA=('Exporter');
cb1a09d0 25@EXPORT= qw(&runtests);
a0d0e21e 26@EXPORT_OK= qw($verbose $switches);
27
760ac839 28format STDOUT_TOP =
29Failed Test Status Wstat Total Fail Failed List of failed
fb73857a 30-------------------------------------------------------------------------------
760ac839 31.
32
33format STDOUT =
fb73857a 34@<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
760ac839 35{ $curtest->{name},
36 $curtest->{estat},
37 $curtest->{wstat},
38 $curtest->{max},
39 $curtest->{failed},
40 $curtest->{percent},
41 $curtest->{canon}
42}
fb73857a 43~~ ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
44 $curtest->{canon}
760ac839 45.
46
c07a80fd 47
6c31b336 48$verbose = 0;
49$switches = "-w";
a0d0e21e 50
17a79f5b 51sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
52
a0d0e21e 53sub runtests {
54 my(@tests) = @_;
55 local($|) = 1;
7b13a3f5 56 my($test,$te,$ok,$next,$max,$pct,$totok,$totbonus,@failed,%failedtests);
6c31b336 57 my $totmax = 0;
58 my $files = 0;
a0d0e21e 59 my $bad = 0;
60 my $good = 0;
61 my $total = @tests;
774d564b 62
63 # pass -I flags to children
81ff29e3 64 my $old5lib = $ENV{PERL5LIB};
774d564b 65 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC);
66
a5077310 67 if ($^O eq 'VMS') { $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g }
a0d0e21e 68
17a79f5b 69 my @dir_files = globdir $files_in_dir if defined $files_in_dir;
a0d0e21e 70 my $t_start = new Benchmark;
71 while ($test = shift(@tests)) {
c07a80fd 72 $te = $test;
73 chop($te);
68dc0745 74 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./; }
c07a80fd 75 print "$te" . '.' x (20 - length($te));
6c31b336 76 my $fh = new FileHandle;
aa689395 77 $fh->open($test) or print "can't open $test. $!\n";
78 my $first = <$fh>;
79 my $s = $switches;
68dc0745 80 $s .= q[ "-T"] if $first =~ /^#!.*\bperl.*-\w*T/;
aa689395 81 $fh->close or print "can't close $test. $!\n";
52cebf5e 82 my $cmd = ($ENV{'COMPILE_TEST'})?
83"./perl -I../lib ../utils/perlcc $test -run -verbose dcf -log ./compilelog |"
84 : "$^X $s $test|";
a5077310 85 $cmd = "MCR $cmd" if $^O eq 'VMS';
aa689395 86 $fh->open($cmd) or print "can't run $test. $!\n";
c07a80fd 87 $ok = $next = $max = 0;
88 @failed = ();
7b13a3f5 89 my %todo = ();
90 my $bonus = 0;
fac76ed7 91 my $skipped = 0;
c07a80fd 92 while (<$fh>) {
6c31b336 93 if( $verbose ){
c07a80fd 94 print $_;
95 }
7b13a3f5 96 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
97 $max = $1;
98 for (split(/\s+/, $2)) { $todo{$_} = 1; }
99 $totmax += $max;
100 $files++;
101 $next = 1;
102 } elsif (/^1\.\.([0-9]+)/) {
c0ee6f5c 103 $max = $1;
104 $totmax += $max;
105 $files++;
106 $next = 1;
107 } elsif ($max && /^(not\s+)?ok\b/) {
108 my $this = $next;
109 if (/^not ok\s*(\d*)/){
110 $this = $1 if $1 > 0;
7b13a3f5 111 if (!$todo{$this}) {
112 push @failed, $this;
113 } else {
114 $ok++;
115 $totok++;
116 }
fac76ed7 117 } elsif (/^ok\s*(\d*)(\s*\#\s*[Ss]kip)?/) {
c0ee6f5c 118 $this = $1 if $1 > 0;
119 $ok++;
120 $totok++;
fac76ed7 121 $skipped++ if defined $2;
7b13a3f5 122 $bonus++, $totbonus++ if $todo{$this};
c07a80fd 123 }
c0ee6f5c 124 if ($this > $next) {
125 # warn "Test output counter mismatch [test $this]\n";
126 # no need to warn probably
127 push @failed, $next..$this-1;
128 } elsif ($this < $next) {
129 #we have seen more "ok" lines than the number suggests
130 warn "Confused test output: test $this answered after test ", $next-1, "\n";
131 $next = $this;
132 }
133 $next = $this + 1;
c07a80fd 134 }
135 }
6c31b336 136 $fh->close; # must close to reap child resource values
9b0ceca9 137 my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
395b061e 138 my $estatus;
139 $estatus = ($^O eq 'VMS'
68dc0745 140 ? eval 'use vmsish "status"; $estatus = $?'
141 : $wstatus >> 8);
142 if ($wstatus) {
aa689395 143 my ($failed, $canon, $percent) = ('??', '??');
fb73857a 144 printf "dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
145 $wstatus,$wstatus;
68dc0745 146 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
c0ee6f5c 147 if (corestatus($wstatus)) { # until we have a wait module
148 if ($have_devel_corestack) {
149 Devel::CoreStack::stack($^X);
150 } else {
151 print "\ttest program seems to have generated a core\n";
152 }
153 }
154 $bad++;
aa689395 155 if ($max) {
156 if ($next == $max + 1 and not @failed) {
157 print "\tafter all the subtests completed successfully\n";
158 $percent = 0;
159 $failed = 0; # But we do not set $canon!
160 } else {
161 push @failed, $next..$max;
162 $failed = @failed;
89d3b7e2 163 (my $txt, $canon) = canonfailed($max,$skipped,@failed);
aa689395 164 $percent = 100*(scalar @failed)/$max;
165 print "DIED. ",$txt;
166 }
167 }
168 $failedtests{$test} = { canon => $canon, max => $max || '??',
169 failed => $failed,
170 name => $test, percent => $percent,
760ac839 171 estat => $estatus, wstat => $wstatus,
172 };
c0ee6f5c 173 } elsif ($ok == $max && $next == $max+1) {
7b13a3f5 174 if ($max and $skipped + $bonus) {
175 my @msg;
89d3b7e2 176 push(@msg, "$skipped/$max subtest".($skipped>1?'s':'')." skipped")
7b13a3f5 177 if $skipped;
178 push(@msg, "$bonus subtest".($bonus>1?'s':'').
179 " unexpectedly succeeded")
180 if $bonus;
181 print "ok, ".join(', ', @msg)."\n";
fac76ed7 182 } elsif ($max) {
c0ee6f5c 183 print "ok\n";
184 } else {
185 print "skipping test on this platform\n";
9b0ceca9 186 $tests_skipped++;
c0ee6f5c 187 }
c07a80fd 188 $good++;
6c31b336 189 } elsif ($max) {
190 if ($next <= $max) {
191 push @failed, $next..$max;
192 }
c07a80fd 193 if (@failed) {
89d3b7e2 194 my ($txt, $canon) = canonfailed($max,$skipped,@failed);
760ac839 195 print $txt;
196 $failedtests{$test} = { canon => $canon, max => $max,
197 failed => scalar @failed,
198 name => $test, percent => 100*(scalar @failed)/$max,
199 estat => '', wstat => '',
200 };
c07a80fd 201 } else {
c0ee6f5c 202 print "Don't know which tests failed: got $ok ok, expected $max\n";
760ac839 203 $failedtests{$test} = { canon => '??', max => $max,
204 failed => '??',
205 name => $test, percent => undef,
206 estat => '', wstat => '',
207 };
c07a80fd 208 }
209 $bad++;
6c31b336 210 } elsif ($next == 0) {
211 print "FAILED before any test output arrived\n";
212 $bad++;
760ac839 213 $failedtests{$test} = { canon => '??', max => '??',
214 failed => '??',
215 name => $test, percent => undef,
216 estat => '', wstat => '',
217 };
6c31b336 218 }
9b0ceca9 219 $subtests_skipped += $skipped;
17a79f5b 220 if (defined $files_in_dir) {
221 my @new_dir_files = globdir $files_in_dir;
222 if (@new_dir_files != @dir_files) {
223 my %f;
224 @f{@new_dir_files} = (1) x @new_dir_files;
225 delete @f{@dir_files};
226 my @f = sort keys %f;
227 print "LEAKED FILES: @f\n";
228 @dir_files = @new_dir_files;
229 }
230 }
a0d0e21e 231 }
232 my $t_total = timediff(new Benchmark, $t_start);
c07a80fd 233
774d564b 234 if ($^O eq 'VMS') {
235 if (defined $old5lib) {
236 $ENV{PERL5LIB} = $old5lib;
b876d4a6 237 } else {
774d564b 238 delete $ENV{PERL5LIB};
239 }
240 }
7b13a3f5 241 my $bonusmsg = '';
242 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
243 " UNEXPECTEDLY SUCCEEDED)")
244 if $totbonus;
9b0ceca9 245 if ($tests_skipped) {
c4f6c246 246 $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
247 if ($subtests_skipped) {
248 $bonusmsg .= " and $subtests_skipped subtest"
249 . ($subtests_skipped != 1 ? 's' : '');
250 }
251 $bonusmsg .= ' skipped';
9b0ceca9 252 }
c4f6c246 253 elsif ($subtests_skipped) {
254 $bonusmsg .= ", $subtests_skipped subtest"
255 . ($subtests_skipped != 1 ? 's' : '')
256 . " skipped";
9b0ceca9 257 }
6c31b336 258 if ($bad == 0 && $totmax) {
7b13a3f5 259 print "All tests successful$bonusmsg.\n";
6c31b336 260 } elsif ($total==0){
261 die "FAILED--no tests were run for some reason.\n";
262 } elsif ($totmax==0) {
263 my $blurb = $total==1 ? "script" : "scripts";
c0ee6f5c 264 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
c07a80fd 265 } else {
266 $pct = sprintf("%.2f", $good / $total * 100);
6c31b336 267 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
268 $totmax - $totok, $totmax, 100*$totok/$totmax;
760ac839 269 my $script;
270 for $script (sort keys %failedtests) {
271 $curtest = $failedtests{$script};
272 write;
273 }
b876d4a6 274 if ($bad) {
9b0ceca9 275 $bonusmsg =~ s/^,\s*//;
276 print "$bonusmsg.\n" if $bonusmsg;
6c31b336 277 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
c07a80fd 278 }
279 }
280 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
f0a9308e 281
282 return ($bad == 0 && $totmax) ;
c07a80fd 283}
284
aa689395 285my $tried_devel_corestack;
c0ee6f5c 286sub corestatus {
287 my($st) = @_;
288 my($ret);
289
290 eval {require 'wait.ph'};
291 if ($@) {
292 SWITCH: {
293 $ret = ($st & 0200); # Tim says, this is for 90%
294 }
295 } else {
296 $ret = WCOREDUMP($st);
297 }
298
aa689395 299 eval { require Devel::CoreStack; $have_devel_corestack++ }
300 unless $tried_devel_corestack++;
c0ee6f5c 301
302 $ret;
303}
304
c07a80fd 305sub canonfailed ($@) {
89d3b7e2 306 my($max,$skipped,@failed) = @_;
6c31b336 307 my %seen;
308 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
c07a80fd 309 my $failed = @failed;
310 my @result = ();
311 my @canon = ();
312 my $min;
313 my $last = $min = shift @failed;
760ac839 314 my $canon;
c07a80fd 315 if (@failed) {
316 for (@failed, $failed[-1]) { # don't forget the last one
317 if ($_ > $last+1 || $_ == $last) {
318 if ($min == $last) {
319 push @canon, $last;
320 } else {
321 push @canon, "$min-$last";
322 }
323 $min = $_;
324 }
325 $last = $_;
326 }
327 local $" = ", ";
328 push @result, "FAILED tests @canon\n";
760ac839 329 $canon = "@canon";
a0d0e21e 330 } else {
c07a80fd 331 push @result, "FAILED test $last\n";
760ac839 332 $canon = $last;
a0d0e21e 333 }
c07a80fd 334
335 push @result, "\tFailed $failed/$max tests, ";
89d3b7e2 336 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
337 my $ender = 's' x ($skipped > 1);
338 my $good = $max - $failed - $skipped;
339 my $goodper = sprintf("%.2f",100*($good/$max));
340 push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
341 push @result, "\n";
760ac839 342 my $txt = join "", @result;
343 ($txt, $canon);
a0d0e21e 344}
345
3461;
cb1a09d0 347__END__
348
349=head1 NAME
350
351Test::Harness - run perl standard test scripts with statistics
352
353=head1 SYNOPSIS
354
355use Test::Harness;
356
357runtests(@tests);
358
359=head1 DESCRIPTION
360
7b13a3f5 361(By using the L<Test> module, you can write test scripts without
362knowing the exact output this module expects. However, if you need to
363know the specifics, read on!)
364
cb1a09d0 365Perl test scripts print to standard output C<"ok N"> for each single
366test, where C<N> is an increasing sequence of integers. The first line
c0ee6f5c 367output by a standard test script is C<"1..M"> with C<M> being the
cb1a09d0 368number of tests that should be run within the test
c0ee6f5c 369script. Test::Harness::runtests(@tests) runs all the testscripts
cb1a09d0 370named as arguments and checks standard output for the expected
371C<"ok N"> strings.
372
c0ee6f5c 373After all tests have been performed, runtests() prints some
cb1a09d0 374performance statistics that are computed by the Benchmark module.
375
6c31b336 376=head2 The test script output
377
378Any output from the testscript to standard error is ignored and
379bypassed, thus will be seen by the user. Lines written to standard
c0ee6f5c 380output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
381runtests(). All other lines are discarded.
6c31b336 382
383It is tolerated if the test numbers after C<ok> are omitted. In this
384case Test::Harness maintains temporarily its own counter until the
385script supplies test numbers again. So the following test script
386
387 print <<END;
388 1..6
389 not ok
390 ok
391 not ok
392 ok
393 ok
394 END
395
396will generate
397
398 FAILED tests 1, 3, 6
399 Failed 3/6 tests, 50.00% okay
400
401The global variable $Test::Harness::verbose is exportable and can be
c0ee6f5c 402used to let runtests() display the standard output of the script
6c31b336 403without altering the behavior otherwise.
404
fb73857a 405The global variable $Test::Harness::switches is exportable and can be
406used to set perl command line options used for running the test
407script(s). The default value is C<-w>.
408
fac76ed7 409If the standard output line contains substring C< # Skip> (with
410variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
411counted as a skipped test. If the whole testscript succeeds, the
412count of skipped tests is included in the generated output.
413
cb1a09d0 414=head1 EXPORT
415
c0ee6f5c 416C<&runtests> is exported by Test::Harness per default.
cb1a09d0 417
418=head1 DIAGNOSTICS
419
420=over 4
421
422=item C<All tests successful.\nFiles=%d, Tests=%d, %s>
423
424If all tests are successful some statistics about the performance are
425printed.
426
6c31b336 427=item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
428
429For any single script that has failing subtests statistics like the
430above are printed.
431
432=item C<Test returned status %d (wstat %d)>
433
81ff29e3 434Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
6c31b336 435printed in a message similar to the above.
436
437=item C<Failed 1 test, %.2f%% okay. %s>
cb1a09d0 438
6c31b336 439=item C<Failed %d/%d tests, %.2f%% okay. %s>
cb1a09d0 440
441If not all tests were successful, the script dies with one of the
442above messages.
443
444=back
445
9b0ceca9 446=head1 ENVIRONMENT
447
17a79f5b 448Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
9b0ceca9 449of child processes.
450
17a79f5b 451If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
452will check after each test whether new files appeared in that directory,
453and report them as
454
455 LEAKED FILES: scr.tmp 0 my.db
456
457If relative, directory name is with respect to the current directory at
458the moment runtests() was called. Putting absolute path into
459C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
460
cb1a09d0 461=head1 SEE ALSO
462
7b13a3f5 463L<Test> for writing test scripts and also L<Benchmark> for the
464underlying timing routines.
c07a80fd 465
466=head1 AUTHORS
467
468Either Tim Bunce or Andreas Koenig, we don't know. What we know for
469sure is, that it was inspired by Larry Wall's TEST script that came
b876d4a6 470with perl distributions for ages. Numerous anonymous contributors
471exist. Current maintainer is Andreas Koenig.
cb1a09d0 472
473=head1 BUGS
474
475Test::Harness uses $^X to determine the perl binary to run the tests
6c31b336 476with. Test scripts running via the shebang (C<#!>) line may not be
477portable because $^X is not consistent for shebang scripts across
cb1a09d0 478platforms. This is no problem when Test::Harness is run with an
6c31b336 479absolute path to the perl binary or when $^X can be found in the path.
cb1a09d0 480
481=cut