10 use vars qw($VERSION $verbose $switches $have_devel_corestack $curtest
11 @ISA @EXPORT @EXPORT_OK);
12 $have_devel_corestack = 0;
17 @EXPORT= qw(&runtests);
18 @EXPORT_OK= qw($verbose $switches);
21 Failed Test Status Wstat Total Fail Failed List of failed
22 -------------------------------------------------------------------------------
26 @<<<<<<<<<<<<<< @>> @>>>> @>>>> @>>> ^##.##% ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
35 ~~ ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
46 my($test,$te,$ok,$next,$max,$pct,$totok,$totbonus,@failed,%failedtests);
53 # pass -I flags to children
54 my $old5lib = $ENV{PERL5LIB};
55 local($ENV{'PERL5LIB'}) = join($Config{path_sep}, @INC);
57 if ($^O eq 'VMS') { $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g }
59 my $t_start = new Benchmark;
60 while ($test = shift(@tests)) {
63 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./; }
64 print "$te" . '.' x (20 - length($te));
65 my $fh = new FileHandle;
66 $fh->open($test) or print "can't open $test. $!\n";
69 $s .= q[ "-T"] if $first =~ /^#!.*\bperl.*-\w*T/;
70 $fh->close or print "can't close $test. $!\n";
71 my $cmd = ($ENV{'COMPILE_TEST'})?
72 "./perl -I../lib ../utils/perlcc $test -run -verbose dcf -log ./compilelog |"
74 $cmd = "MCR $cmd" if $^O eq 'VMS';
75 $fh->open($cmd) or print "can't run $test. $!\n";
76 $ok = $next = $max = 0;
85 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
87 for (split(/\s+/, $2)) { $todo{$_} = 1; }
91 } elsif (/^1\.\.([0-9]+)/) {
96 } elsif ($max && /^(not\s+)?ok\b/) {
98 if (/^not ok\s*(\d*)/){
106 } elsif (/^ok\s*(\d*)(\s*\#\s*[Ss]kip)?/) {
107 $this = $1 if $1 > 0;
110 $skipped++ if defined $2;
111 $bonus++, $totbonus++ if $todo{$this};
114 # warn "Test output counter mismatch [test $this]\n";
115 # no need to warn probably
116 push @failed, $next..$this-1;
117 } elsif ($this < $next) {
118 #we have seen more "ok" lines than the number suggests
119 warn "Confused test output: test $this answered after test ", $next-1, "\n";
125 $fh->close; # must close to reap child resource values
128 $estatus = ($^O eq 'VMS'
129 ? eval 'use vmsish "status"; $estatus = $?'
132 my ($failed, $canon, $percent) = ('??', '??');
133 printf "dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
135 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
136 if (corestatus($wstatus)) { # until we have a wait module
137 if ($have_devel_corestack) {
138 Devel::CoreStack::stack($^X);
140 print "\ttest program seems to have generated a core\n";
145 if ($next == $max + 1 and not @failed) {
146 print "\tafter all the subtests completed successfully\n";
148 $failed = 0; # But we do not set $canon!
150 push @failed, $next..$max;
152 (my $txt, $canon) = canonfailed($max,@failed);
153 $percent = 100*(scalar @failed)/$max;
157 $failedtests{$test} = { canon => $canon, max => $max || '??',
159 name => $test, percent => $percent,
160 estat => $estatus, wstat => $wstatus,
162 } elsif ($ok == $max && $next == $max+1) {
163 if ($max and $skipped + $bonus) {
165 push(@msg, "$skipped subtest".($skipped>1?'s':'')." skipped")
167 push(@msg, "$bonus subtest".($bonus>1?'s':'').
168 " unexpectedly succeeded")
170 print "ok, ".join(', ', @msg)."\n";
174 print "skipping test on this platform\n";
179 push @failed, $next..$max;
182 my ($txt, $canon) = canonfailed($max,@failed);
184 $failedtests{$test} = { canon => $canon, max => $max,
185 failed => scalar @failed,
186 name => $test, percent => 100*(scalar @failed)/$max,
187 estat => '', wstat => '',
190 print "Don't know which tests failed: got $ok ok, expected $max\n";
191 $failedtests{$test} = { canon => '??', max => $max,
193 name => $test, percent => undef,
194 estat => '', wstat => '',
198 } elsif ($next == 0) {
199 print "FAILED before any test output arrived\n";
201 $failedtests{$test} = { canon => '??', max => '??',
203 name => $test, percent => undef,
204 estat => '', wstat => '',
208 my $t_total = timediff(new Benchmark, $t_start);
211 if (defined $old5lib) {
212 $ENV{PERL5LIB} = $old5lib;
214 delete $ENV{PERL5LIB};
218 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
219 " UNEXPECTEDLY SUCCEEDED)")
221 if ($bad == 0 && $totmax) {
222 print "All tests successful$bonusmsg.\n";
224 die "FAILED--no tests were run for some reason.\n";
225 } elsif ($totmax==0) {
226 my $blurb = $total==1 ? "script" : "scripts";
227 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
229 $pct = sprintf("%.2f", $good / $total * 100);
230 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
231 $totmax - $totok, $totmax, 100*$totok/$totmax;
233 for $script (sort keys %failedtests) {
234 $curtest = $failedtests{$script};
238 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
241 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
243 return ($bad == 0 && $totmax) ;
246 my $tried_devel_corestack;
251 eval {require 'wait.ph'};
254 $ret = ($st & 0200); # Tim says, this is for 90%
257 $ret = WCOREDUMP($st);
260 eval { require Devel::CoreStack; $have_devel_corestack++ }
261 unless $tried_devel_corestack++;
266 sub canonfailed ($@) {
267 my($max,@failed) = @_;
269 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
270 my $failed = @failed;
274 my $last = $min = shift @failed;
277 for (@failed, $failed[-1]) { # don't forget the last one
278 if ($_ > $last+1 || $_ == $last) {
282 push @canon, "$min-$last";
289 push @result, "FAILED tests @canon\n";
292 push @result, "FAILED test $last\n";
296 push @result, "\tFailed $failed/$max tests, ";
297 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay\n";
298 my $txt = join "", @result;
307 Test::Harness - run perl standard test scripts with statistics
317 (By using the L<Test> module, you can write test scripts without
318 knowing the exact output this module expects. However, if you need to
319 know the specifics, read on!)
321 Perl test scripts print to standard output C<"ok N"> for each single
322 test, where C<N> is an increasing sequence of integers. The first line
323 output by a standard test script is C<"1..M"> with C<M> being the
324 number of tests that should be run within the test
325 script. Test::Harness::runtests(@tests) runs all the testscripts
326 named as arguments and checks standard output for the expected
329 After all tests have been performed, runtests() prints some
330 performance statistics that are computed by the Benchmark module.
332 =head2 The test script output
334 Any output from the testscript to standard error is ignored and
335 bypassed, thus will be seen by the user. Lines written to standard
336 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
337 runtests(). All other lines are discarded.
339 It is tolerated if the test numbers after C<ok> are omitted. In this
340 case Test::Harness maintains temporarily its own counter until the
341 script supplies test numbers again. So the following test script
355 Failed 3/6 tests, 50.00% okay
357 The global variable $Test::Harness::verbose is exportable and can be
358 used to let runtests() display the standard output of the script
359 without altering the behavior otherwise.
361 The global variable $Test::Harness::switches is exportable and can be
362 used to set perl command line options used for running the test
363 script(s). The default value is C<-w>.
365 If the standard output line contains substring C< # Skip> (with
366 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
367 counted as a skipped test. If the whole testscript succeeds, the
368 count of skipped tests is included in the generated output.
372 C<&runtests> is exported by Test::Harness per default.
378 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
380 If all tests are successful some statistics about the performance are
383 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
385 For any single script that has failing subtests statistics like the
388 =item C<Test returned status %d (wstat %d)>
390 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
391 printed in a message similar to the above.
393 =item C<Failed 1 test, %.2f%% okay. %s>
395 =item C<Failed %d/%d tests, %.2f%% okay. %s>
397 If not all tests were successful, the script dies with one of the
404 L<Test> for writing test scripts and also L<Benchmark> for the
405 underlying timing routines.
409 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
410 sure is, that it was inspired by Larry Wall's TEST script that came
411 with perl distributions for ages. Numerous anonymous contributors
412 exist. Current maintainer is Andreas Koenig.
416 Test::Harness uses $^X to determine the perl binary to run the tests
417 with. Test scripts running via the shebang (C<#!>) line may not be
418 portable because $^X is not consistent for shebang scripts across
419 platforms. This is no problem when Test::Harness is run with an
420 absolute path to the perl binary or when $^X can be found in the path.