fix diagnostics to report "our" vs "my" correctly
[p5sagit/p5-mst-13.2.git] / lib / Test / Harness.pm
CommitLineData
a0d0e21e 1package Test::Harness;
2
b876d4a6 3BEGIN {require 5.002;}
a0d0e21e 4use Exporter;
5use Benchmark;
4633a7c4 6use Config;
6c31b336 7use FileHandle;
760ac839 8use strict;
9
10use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
11 @ISA @EXPORT @EXPORT_OK);
c0ee6f5c 12$have_devel_corestack = 0;
4633a7c4 13
0d0c0d42 14$VERSION = "1.1604";
9b0ceca9 15
f19ae7a7 16$ENV{HARNESS_ACTIVE} = 1;
17
9b0ceca9 18# Some experimental versions of OS/2 build have broken $?
19my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
20
17a79f5b 21my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
22
9b0ceca9 23my $tests_skipped = 0;
24my $subtests_skipped = 0;
4633a7c4 25
c07a80fd 26@ISA=('Exporter');
cb1a09d0 27@EXPORT= qw(&runtests);
a0d0e21e 28@EXPORT_OK= qw($verbose $switches);
29
760ac839 30format STDOUT_TOP =
31Failed Test Status Wstat Total Fail Failed List of failed
fb73857a 32-------------------------------------------------------------------------------
760ac839 33.
34
35format STDOUT =
fb73857a 36@<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
760ac839 37{ $curtest->{name},
38 $curtest->{estat},
39 $curtest->{wstat},
40 $curtest->{max},
41 $curtest->{failed},
42 $curtest->{percent},
43 $curtest->{canon}
44}
fb73857a 45~~ ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
46 $curtest->{canon}
760ac839 47.
48
c07a80fd 49
6c31b336 50$verbose = 0;
51$switches = "-w";
a0d0e21e 52
17a79f5b 53sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
54
a0d0e21e 55sub runtests {
56 my(@tests) = @_;
57 local($|) = 1;
7b13a3f5 58 my($test,$te,$ok,$next,$max,$pct,$totok,$totbonus,@failed,%failedtests);
6c31b336 59 my $totmax = 0;
60 my $files = 0;
a0d0e21e 61 my $bad = 0;
62 my $good = 0;
63 my $total = @tests;
774d564b 64
65 # pass -I flags to children
81ff29e3 66 my $old5lib = $ENV{PERL5LIB};
774d564b 67 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC);
68
a5077310 69 if ($^O eq 'VMS') { $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g }
a0d0e21e 70
17a79f5b 71 my @dir_files = globdir $files_in_dir if defined $files_in_dir;
a0d0e21e 72 my $t_start = new Benchmark;
73 while ($test = shift(@tests)) {
c07a80fd 74 $te = $test;
75 chop($te);
68dc0745 76 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./; }
45c0de28 77 my $blank = (' ' x 77);
0d0c0d42 78 my $leader = "$te" . '.' x (20 - length($te));
79 my $ml = "";
23c4718a 80 $ml = "\r$blank\r$leader"
81 if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $verbose;
0d0c0d42 82 print $leader;
6c31b336 83 my $fh = new FileHandle;
aa689395 84 $fh->open($test) or print "can't open $test. $!\n";
85 my $first = <$fh>;
86 my $s = $switches;
da4582cd 87 $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
88 if $first =~ /^#!.*\bperl.*-\w*T/;
aa689395 89 $fh->close or print "can't close $test. $!\n";
9636a016 90 my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
91 ? "./perl -I../lib ../utils/perlcc $test "
92 . "-run 2>> ./compilelog |"
93 : "$^X $s $test|";
a5077310 94 $cmd = "MCR $cmd" if $^O eq 'VMS';
aa689395 95 $fh->open($cmd) or print "can't run $test. $!\n";
c07a80fd 96 $ok = $next = $max = 0;
97 @failed = ();
7b13a3f5 98 my %todo = ();
99 my $bonus = 0;
fac76ed7 100 my $skipped = 0;
c854dc73 101 my $skip_reason;
c07a80fd 102 while (<$fh>) {
6c31b336 103 if( $verbose ){
c07a80fd 104 print $_;
105 }
7b13a3f5 106 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
107 $max = $1;
108 for (split(/\s+/, $2)) { $todo{$_} = 1; }
109 $totmax += $max;
110 $files++;
111 $next = 1;
45c0de28 112 } elsif (/^1\.\.([0-9]+)(\s*\#\s*[Ss]kip\S*(?>\s+)(.+))?/) {
c0ee6f5c 113 $max = $1;
114 $totmax += $max;
115 $files++;
116 $next = 1;
45c0de28 117 $skip_reason = $3 if not $max and defined $3;
c0ee6f5c 118 } elsif ($max && /^(not\s+)?ok\b/) {
119 my $this = $next;
120 if (/^not ok\s*(\d*)/){
121 $this = $1 if $1 > 0;
23c4718a 122 print "${ml}NOK $this" if $ml;
7b13a3f5 123 if (!$todo{$this}) {
124 push @failed, $this;
125 } else {
126 $ok++;
127 $totok++;
128 }
c854dc73 129 } elsif (/^ok\s*(\d*)(\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?/) {
c0ee6f5c 130 $this = $1 if $1 > 0;
45c0de28 131 print "${ml}ok $this/$max" if $ml;
c0ee6f5c 132 $ok++;
133 $totok++;
fac76ed7 134 $skipped++ if defined $2;
c854dc73 135 my $reason;
136 $reason = 'unknown reason' if defined $2;
137 $reason = $3 if defined $3;
138 if (defined $reason and defined $skip_reason) {
139 # print "was: '$skip_reason' new '$reason'\n";
140 $skip_reason = 'various reasons'
141 if $skip_reason ne $reason;
142 } elsif (defined $reason) {
143 $skip_reason = $reason;
144 }
7b13a3f5 145 $bonus++, $totbonus++ if $todo{$this};
c07a80fd 146 }
c0ee6f5c 147 if ($this > $next) {
148 # warn "Test output counter mismatch [test $this]\n";
149 # no need to warn probably
150 push @failed, $next..$this-1;
151 } elsif ($this < $next) {
152 #we have seen more "ok" lines than the number suggests
153 warn "Confused test output: test $this answered after test ", $next-1, "\n";
154 $next = $this;
155 }
156 $next = $this + 1;
c07a80fd 157 }
158 }
6c31b336 159 $fh->close; # must close to reap child resource values
9b0ceca9 160 my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
395b061e 161 my $estatus;
162 $estatus = ($^O eq 'VMS'
68dc0745 163 ? eval 'use vmsish "status"; $estatus = $?'
164 : $wstatus >> 8);
165 if ($wstatus) {
aa689395 166 my ($failed, $canon, $percent) = ('??', '??');
0d0c0d42 167 printf "${ml}dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
fb73857a 168 $wstatus,$wstatus;
68dc0745 169 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
c0ee6f5c 170 if (corestatus($wstatus)) { # until we have a wait module
171 if ($have_devel_corestack) {
172 Devel::CoreStack::stack($^X);
173 } else {
174 print "\ttest program seems to have generated a core\n";
175 }
176 }
177 $bad++;
aa689395 178 if ($max) {
179 if ($next == $max + 1 and not @failed) {
180 print "\tafter all the subtests completed successfully\n";
181 $percent = 0;
182 $failed = 0; # But we do not set $canon!
183 } else {
184 push @failed, $next..$max;
185 $failed = @failed;
89d3b7e2 186 (my $txt, $canon) = canonfailed($max,$skipped,@failed);
aa689395 187 $percent = 100*(scalar @failed)/$max;
188 print "DIED. ",$txt;
189 }
190 }
191 $failedtests{$test} = { canon => $canon, max => $max || '??',
192 failed => $failed,
193 name => $test, percent => $percent,
760ac839 194 estat => $estatus, wstat => $wstatus,
195 };
c0ee6f5c 196 } elsif ($ok == $max && $next == $max+1) {
7b13a3f5 197 if ($max and $skipped + $bonus) {
198 my @msg;
45c0de28 199 push(@msg, "$skipped/$max skipped: $skip_reason")
7b13a3f5 200 if $skipped;
45c0de28 201 push(@msg, "$bonus/$max unexpectedly succeeded")
7b13a3f5 202 if $bonus;
45c0de28 203 print "${ml}ok, ".join(', ', @msg)."\n";
fac76ed7 204 } elsif ($max) {
45c0de28 205 print "${ml}ok\n";
206 } elsif (defined $skip_reason) {
207 print "skipped: $skip_reason\n";
208 $tests_skipped++;
c0ee6f5c 209 } else {
45c0de28 210 print "skipped test on this platform\n";
9b0ceca9 211 $tests_skipped++;
c0ee6f5c 212 }
c07a80fd 213 $good++;
6c31b336 214 } elsif ($max) {
215 if ($next <= $max) {
216 push @failed, $next..$max;
217 }
c07a80fd 218 if (@failed) {
89d3b7e2 219 my ($txt, $canon) = canonfailed($max,$skipped,@failed);
760ac839 220 print $txt;
221 $failedtests{$test} = { canon => $canon, max => $max,
222 failed => scalar @failed,
223 name => $test, percent => 100*(scalar @failed)/$max,
224 estat => '', wstat => '',
225 };
c07a80fd 226 } else {
c0ee6f5c 227 print "Don't know which tests failed: got $ok ok, expected $max\n";
760ac839 228 $failedtests{$test} = { canon => '??', max => $max,
229 failed => '??',
230 name => $test, percent => undef,
231 estat => '', wstat => '',
232 };
c07a80fd 233 }
234 $bad++;
6c31b336 235 } elsif ($next == 0) {
236 print "FAILED before any test output arrived\n";
237 $bad++;
760ac839 238 $failedtests{$test} = { canon => '??', max => '??',
239 failed => '??',
240 name => $test, percent => undef,
241 estat => '', wstat => '',
242 };
6c31b336 243 }
9b0ceca9 244 $subtests_skipped += $skipped;
17a79f5b 245 if (defined $files_in_dir) {
246 my @new_dir_files = globdir $files_in_dir;
247 if (@new_dir_files != @dir_files) {
248 my %f;
249 @f{@new_dir_files} = (1) x @new_dir_files;
250 delete @f{@dir_files};
251 my @f = sort keys %f;
252 print "LEAKED FILES: @f\n";
253 @dir_files = @new_dir_files;
254 }
255 }
a0d0e21e 256 }
257 my $t_total = timediff(new Benchmark, $t_start);
c07a80fd 258
774d564b 259 if ($^O eq 'VMS') {
260 if (defined $old5lib) {
261 $ENV{PERL5LIB} = $old5lib;
b876d4a6 262 } else {
774d564b 263 delete $ENV{PERL5LIB};
264 }
265 }
7b13a3f5 266 my $bonusmsg = '';
267 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
268 " UNEXPECTEDLY SUCCEEDED)")
269 if $totbonus;
9b0ceca9 270 if ($tests_skipped) {
c4f6c246 271 $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
272 if ($subtests_skipped) {
273 $bonusmsg .= " and $subtests_skipped subtest"
274 . ($subtests_skipped != 1 ? 's' : '');
275 }
276 $bonusmsg .= ' skipped';
9b0ceca9 277 }
c4f6c246 278 elsif ($subtests_skipped) {
279 $bonusmsg .= ", $subtests_skipped subtest"
280 . ($subtests_skipped != 1 ? 's' : '')
281 . " skipped";
9b0ceca9 282 }
6c31b336 283 if ($bad == 0 && $totmax) {
7b13a3f5 284 print "All tests successful$bonusmsg.\n";
6c31b336 285 } elsif ($total==0){
286 die "FAILED--no tests were run for some reason.\n";
287 } elsif ($totmax==0) {
288 my $blurb = $total==1 ? "script" : "scripts";
c0ee6f5c 289 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
c07a80fd 290 } else {
291 $pct = sprintf("%.2f", $good / $total * 100);
6c31b336 292 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
293 $totmax - $totok, $totmax, 100*$totok/$totmax;
760ac839 294 my $script;
295 for $script (sort keys %failedtests) {
296 $curtest = $failedtests{$script};
297 write;
298 }
b876d4a6 299 if ($bad) {
9b0ceca9 300 $bonusmsg =~ s/^,\s*//;
301 print "$bonusmsg.\n" if $bonusmsg;
6c31b336 302 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
c07a80fd 303 }
304 }
5603f27d 305 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
f0a9308e 306
307 return ($bad == 0 && $totmax) ;
c07a80fd 308}
309
aa689395 310my $tried_devel_corestack;
c0ee6f5c 311sub corestatus {
312 my($st) = @_;
313 my($ret);
314
315 eval {require 'wait.ph'};
316 if ($@) {
317 SWITCH: {
318 $ret = ($st & 0200); # Tim says, this is for 90%
319 }
320 } else {
321 $ret = WCOREDUMP($st);
322 }
323
aa689395 324 eval { require Devel::CoreStack; $have_devel_corestack++ }
325 unless $tried_devel_corestack++;
c0ee6f5c 326
327 $ret;
328}
329
c07a80fd 330sub canonfailed ($@) {
89d3b7e2 331 my($max,$skipped,@failed) = @_;
6c31b336 332 my %seen;
333 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
c07a80fd 334 my $failed = @failed;
335 my @result = ();
336 my @canon = ();
337 my $min;
338 my $last = $min = shift @failed;
760ac839 339 my $canon;
c07a80fd 340 if (@failed) {
341 for (@failed, $failed[-1]) { # don't forget the last one
342 if ($_ > $last+1 || $_ == $last) {
343 if ($min == $last) {
344 push @canon, $last;
345 } else {
346 push @canon, "$min-$last";
347 }
348 $min = $_;
349 }
350 $last = $_;
351 }
352 local $" = ", ";
353 push @result, "FAILED tests @canon\n";
760ac839 354 $canon = "@canon";
a0d0e21e 355 } else {
c07a80fd 356 push @result, "FAILED test $last\n";
760ac839 357 $canon = $last;
a0d0e21e 358 }
c07a80fd 359
360 push @result, "\tFailed $failed/$max tests, ";
89d3b7e2 361 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
362 my $ender = 's' x ($skipped > 1);
363 my $good = $max - $failed - $skipped;
364 my $goodper = sprintf("%.2f",100*($good/$max));
365 push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
366 push @result, "\n";
760ac839 367 my $txt = join "", @result;
368 ($txt, $canon);
a0d0e21e 369}
370
3711;
cb1a09d0 372__END__
373
374=head1 NAME
375
376Test::Harness - run perl standard test scripts with statistics
377
378=head1 SYNOPSIS
379
380use Test::Harness;
381
382runtests(@tests);
383
384=head1 DESCRIPTION
385
7b13a3f5 386(By using the L<Test> module, you can write test scripts without
387knowing the exact output this module expects. However, if you need to
388know the specifics, read on!)
389
cb1a09d0 390Perl test scripts print to standard output C<"ok N"> for each single
391test, where C<N> is an increasing sequence of integers. The first line
c0ee6f5c 392output by a standard test script is C<"1..M"> with C<M> being the
cb1a09d0 393number of tests that should be run within the test
c0ee6f5c 394script. Test::Harness::runtests(@tests) runs all the testscripts
cb1a09d0 395named as arguments and checks standard output for the expected
396C<"ok N"> strings.
397
c0ee6f5c 398After all tests have been performed, runtests() prints some
cb1a09d0 399performance statistics that are computed by the Benchmark module.
400
6c31b336 401=head2 The test script output
402
403Any output from the testscript to standard error is ignored and
404bypassed, thus will be seen by the user. Lines written to standard
c0ee6f5c 405output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
406runtests(). All other lines are discarded.
6c31b336 407
408It is tolerated if the test numbers after C<ok> are omitted. In this
409case Test::Harness maintains temporarily its own counter until the
410script supplies test numbers again. So the following test script
411
412 print <<END;
413 1..6
414 not ok
415 ok
416 not ok
417 ok
418 ok
419 END
420
421will generate
422
423 FAILED tests 1, 3, 6
424 Failed 3/6 tests, 50.00% okay
425
426The global variable $Test::Harness::verbose is exportable and can be
c0ee6f5c 427used to let runtests() display the standard output of the script
6c31b336 428without altering the behavior otherwise.
429
fb73857a 430The global variable $Test::Harness::switches is exportable and can be
431used to set perl command line options used for running the test
432script(s). The default value is C<-w>.
433
fac76ed7 434If the standard output line contains substring C< # Skip> (with
435variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
436counted as a skipped test. If the whole testscript succeeds, the
437count of skipped tests is included in the generated output.
438
45c0de28 439C<Test::Harness> reports the text after C< # Skip(whatever)> as a
440reason for skipping. Similarly, one can include a similar explanation
441in a C<1..0> line emitted if the test is skipped completely:
442
443 1..0 # Skipped: no leverage found
444
cb1a09d0 445=head1 EXPORT
446
c0ee6f5c 447C<&runtests> is exported by Test::Harness per default.
cb1a09d0 448
449=head1 DIAGNOSTICS
450
451=over 4
452
453=item C<All tests successful.\nFiles=%d, Tests=%d, %s>
454
455If all tests are successful some statistics about the performance are
456printed.
457
6c31b336 458=item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
459
460For any single script that has failing subtests statistics like the
461above are printed.
462
463=item C<Test returned status %d (wstat %d)>
464
81ff29e3 465Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
6c31b336 466printed in a message similar to the above.
467
468=item C<Failed 1 test, %.2f%% okay. %s>
cb1a09d0 469
6c31b336 470=item C<Failed %d/%d tests, %.2f%% okay. %s>
cb1a09d0 471
472If not all tests were successful, the script dies with one of the
473above messages.
474
475=back
476
9b0ceca9 477=head1 ENVIRONMENT
478
17a79f5b 479Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
9b0ceca9 480of child processes.
481
0d0c0d42 482Setting C<HARNESS_NOTTY> to a true value forces it to behave as though
483STDOUT were not a console. You may need to set this if you don't want
484harness to output more frequent progress messages using carriage returns.
485Some consoles may not handle carriage returns properly (which results
486in a somewhat messy output).
487
9636a016 488Setting C<HARNESS_COMPILE_TEST> to a true value will make harness attempt
489to compile the test using C<perlcc> before running it.
490
17a79f5b 491If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
492will check after each test whether new files appeared in that directory,
493and report them as
494
495 LEAKED FILES: scr.tmp 0 my.db
496
497If relative, directory name is with respect to the current directory at
498the moment runtests() was called. Putting absolute path into
499C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
500
f19ae7a7 501Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
502This allows the tests to determine if they are being executed through the
503harness or by any other means.
504
cb1a09d0 505=head1 SEE ALSO
506
7b13a3f5 507L<Test> for writing test scripts and also L<Benchmark> for the
508underlying timing routines.
c07a80fd 509
510=head1 AUTHORS
511
512Either Tim Bunce or Andreas Koenig, we don't know. What we know for
513sure is, that it was inspired by Larry Wall's TEST script that came
b876d4a6 514with perl distributions for ages. Numerous anonymous contributors
515exist. Current maintainer is Andreas Koenig.
cb1a09d0 516
517=head1 BUGS
518
519Test::Harness uses $^X to determine the perl binary to run the tests
6c31b336 520with. Test scripts running via the shebang (C<#!>) line may not be
521portable because $^X is not consistent for shebang scripts across
cb1a09d0 522platforms. This is no problem when Test::Harness is run with an
6c31b336 523absolute path to the perl binary or when $^X can be found in the path.
cb1a09d0 524
525=cut