Commit | Line | Data |
a0d0e21e |
1 | package Test::Harness; |
2 | |
17f410f9 |
3 | use 5.005_64; |
a0d0e21e |
4 | use Exporter; |
5 | use Benchmark; |
4633a7c4 |
6 | use Config; |
6c31b336 |
7 | use FileHandle; |
760ac839 |
8 | use strict; |
9 | |
17f410f9 |
10 | our($VERSION, $verbose, $switches, $have_devel_corestack, $curtest, |
0a931e4a |
11 | $columns, @ISA, @EXPORT, @EXPORT_OK); |
c0ee6f5c |
12 | $have_devel_corestack = 0; |
4633a7c4 |
13 | |
0d0c0d42 |
14 | $VERSION = "1.1604"; |
9b0ceca9 |
15 | |
f19ae7a7 |
16 | $ENV{HARNESS_ACTIVE} = 1; |
17 | |
9b0ceca9 |
18 | # Some experimental versions of OS/2 build have broken $? |
19 | my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE}; |
20 | |
17a79f5b |
21 | my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR}; |
22 | |
9b0ceca9 |
23 | my $tests_skipped = 0; |
24 | my $subtests_skipped = 0; |
4633a7c4 |
25 | |
c07a80fd |
26 | @ISA=('Exporter'); |
cb1a09d0 |
27 | @EXPORT= qw(&runtests); |
a0d0e21e |
28 | @EXPORT_OK= qw($verbose $switches); |
29 | |
6c31b336 |
30 | $verbose = 0; |
31 | $switches = "-w"; |
0a931e4a |
32 | $columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80; |
a0d0e21e |
33 | |
17a79f5b |
34 | sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f } |
35 | |
a0d0e21e |
36 | sub runtests { |
37 | my(@tests) = @_; |
38 | local($|) = 1; |
0a931e4a |
39 | my($test,$te,$ok,$next,$max,$pct,$totbonus,@failed,%failedtests); |
6c31b336 |
40 | my $totmax = 0; |
0a931e4a |
41 | my $totok = 0; |
6c31b336 |
42 | my $files = 0; |
a0d0e21e |
43 | my $bad = 0; |
44 | my $good = 0; |
45 | my $total = @tests; |
774d564b |
46 | |
47 | # pass -I flags to children |
81ff29e3 |
48 | my $old5lib = $ENV{PERL5LIB}; |
774d564b |
49 | |
1250aba5 |
50 | # VMS has a 255-byte limit on the length of %ENV entries, so |
51 | # toss the ones that involve perl_root, the install location |
52 | # for VMS |
53 | my $new5lib; |
54 | if ($^O eq 'VMS') { |
55 | $new5lib = join($Config{path_sep}, grep {!/perl_root/i;} @INC); |
56 | $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g; |
57 | } |
58 | else { |
59 | $new5lib = join($Config{path_sep}, @INC); |
60 | } |
61 | |
62 | local($ENV{'PERL5LIB'}) = $new5lib; |
a0d0e21e |
63 | |
17a79f5b |
64 | my @dir_files = globdir $files_in_dir if defined $files_in_dir; |
a0d0e21e |
65 | my $t_start = new Benchmark; |
66 | while ($test = shift(@tests)) { |
c07a80fd |
67 | $te = $test; |
68 | chop($te); |
fe6f1558 |
69 | if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./s; } |
45c0de28 |
70 | my $blank = (' ' x 77); |
0d0c0d42 |
71 | my $leader = "$te" . '.' x (20 - length($te)); |
72 | my $ml = ""; |
23c4718a |
73 | $ml = "\r$blank\r$leader" |
74 | if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $verbose; |
0d0c0d42 |
75 | print $leader; |
6c31b336 |
76 | my $fh = new FileHandle; |
aa689395 |
77 | $fh->open($test) or print "can't open $test. $!\n"; |
78 | my $first = <$fh>; |
79 | my $s = $switches; |
2b32313b |
80 | $s .= " $ENV{'HARNESS_PERL_SWITCHES'}" |
81 | if exists $ENV{'HARNESS_PERL_SWITCHES'}; |
da4582cd |
82 | $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC |
83 | if $first =~ /^#!.*\bperl.*-\w*T/; |
aa689395 |
84 | $fh->close or print "can't close $test. $!\n"; |
9636a016 |
85 | my $cmd = ($ENV{'HARNESS_COMPILE_TEST'}) |
86 | ? "./perl -I../lib ../utils/perlcc $test " |
87 | . "-run 2>> ./compilelog |" |
88 | : "$^X $s $test|"; |
a5077310 |
89 | $cmd = "MCR $cmd" if $^O eq 'VMS'; |
aa689395 |
90 | $fh->open($cmd) or print "can't run $test. $!\n"; |
c07a80fd |
91 | $ok = $next = $max = 0; |
92 | @failed = (); |
7b13a3f5 |
93 | my %todo = (); |
94 | my $bonus = 0; |
fac76ed7 |
95 | my $skipped = 0; |
c854dc73 |
96 | my $skip_reason; |
c07a80fd |
97 | while (<$fh>) { |
6c31b336 |
98 | if( $verbose ){ |
c07a80fd |
99 | print $_; |
100 | } |
7b13a3f5 |
101 | if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) { |
102 | $max = $1; |
103 | for (split(/\s+/, $2)) { $todo{$_} = 1; } |
104 | $totmax += $max; |
105 | $files++; |
106 | $next = 1; |
45c0de28 |
107 | } elsif (/^1\.\.([0-9]+)(\s*\#\s*[Ss]kip\S*(?>\s+)(.+))?/) { |
c0ee6f5c |
108 | $max = $1; |
109 | $totmax += $max; |
110 | $files++; |
111 | $next = 1; |
45c0de28 |
112 | $skip_reason = $3 if not $max and defined $3; |
c0ee6f5c |
113 | } elsif ($max && /^(not\s+)?ok\b/) { |
114 | my $this = $next; |
115 | if (/^not ok\s*(\d*)/){ |
116 | $this = $1 if $1 > 0; |
23c4718a |
117 | print "${ml}NOK $this" if $ml; |
7b13a3f5 |
118 | if (!$todo{$this}) { |
119 | push @failed, $this; |
120 | } else { |
121 | $ok++; |
122 | $totok++; |
123 | } |
c854dc73 |
124 | } elsif (/^ok\s*(\d*)(\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?/) { |
c0ee6f5c |
125 | $this = $1 if $1 > 0; |
45c0de28 |
126 | print "${ml}ok $this/$max" if $ml; |
c0ee6f5c |
127 | $ok++; |
128 | $totok++; |
fac76ed7 |
129 | $skipped++ if defined $2; |
c854dc73 |
130 | my $reason; |
131 | $reason = 'unknown reason' if defined $2; |
132 | $reason = $3 if defined $3; |
133 | if (defined $reason and defined $skip_reason) { |
134 | # print "was: '$skip_reason' new '$reason'\n"; |
135 | $skip_reason = 'various reasons' |
136 | if $skip_reason ne $reason; |
137 | } elsif (defined $reason) { |
138 | $skip_reason = $reason; |
139 | } |
7b13a3f5 |
140 | $bonus++, $totbonus++ if $todo{$this}; |
c07a80fd |
141 | } |
c0ee6f5c |
142 | if ($this > $next) { |
f767ed0d |
143 | # print "Test output counter mismatch [test $this]\n"; |
c0ee6f5c |
144 | # no need to warn probably |
145 | push @failed, $next..$this-1; |
146 | } elsif ($this < $next) { |
147 | #we have seen more "ok" lines than the number suggests |
f767ed0d |
148 | print "Confused test output: test $this answered after test ", $next-1, "\n"; |
c0ee6f5c |
149 | $next = $this; |
150 | } |
151 | $next = $this + 1; |
c07a80fd |
152 | } |
153 | } |
6c31b336 |
154 | $fh->close; # must close to reap child resource values |
9b0ceca9 |
155 | my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ? |
395b061e |
156 | my $estatus; |
157 | $estatus = ($^O eq 'VMS' |
68dc0745 |
158 | ? eval 'use vmsish "status"; $estatus = $?' |
159 | : $wstatus >> 8); |
160 | if ($wstatus) { |
aa689395 |
161 | my ($failed, $canon, $percent) = ('??', '??'); |
0d0c0d42 |
162 | printf "${ml}dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n", |
fb73857a |
163 | $wstatus,$wstatus; |
68dc0745 |
164 | print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS'; |
c0ee6f5c |
165 | if (corestatus($wstatus)) { # until we have a wait module |
166 | if ($have_devel_corestack) { |
167 | Devel::CoreStack::stack($^X); |
168 | } else { |
169 | print "\ttest program seems to have generated a core\n"; |
170 | } |
171 | } |
172 | $bad++; |
aa689395 |
173 | if ($max) { |
174 | if ($next == $max + 1 and not @failed) { |
175 | print "\tafter all the subtests completed successfully\n"; |
176 | $percent = 0; |
177 | $failed = 0; # But we do not set $canon! |
178 | } else { |
179 | push @failed, $next..$max; |
180 | $failed = @failed; |
89d3b7e2 |
181 | (my $txt, $canon) = canonfailed($max,$skipped,@failed); |
aa689395 |
182 | $percent = 100*(scalar @failed)/$max; |
183 | print "DIED. ",$txt; |
184 | } |
185 | } |
186 | $failedtests{$test} = { canon => $canon, max => $max || '??', |
187 | failed => $failed, |
188 | name => $test, percent => $percent, |
760ac839 |
189 | estat => $estatus, wstat => $wstatus, |
190 | }; |
c0ee6f5c |
191 | } elsif ($ok == $max && $next == $max+1) { |
7b13a3f5 |
192 | if ($max and $skipped + $bonus) { |
193 | my @msg; |
45c0de28 |
194 | push(@msg, "$skipped/$max skipped: $skip_reason") |
7b13a3f5 |
195 | if $skipped; |
45c0de28 |
196 | push(@msg, "$bonus/$max unexpectedly succeeded") |
7b13a3f5 |
197 | if $bonus; |
45c0de28 |
198 | print "${ml}ok, ".join(', ', @msg)."\n"; |
fac76ed7 |
199 | } elsif ($max) { |
45c0de28 |
200 | print "${ml}ok\n"; |
201 | } elsif (defined $skip_reason) { |
202 | print "skipped: $skip_reason\n"; |
203 | $tests_skipped++; |
c0ee6f5c |
204 | } else { |
45c0de28 |
205 | print "skipped test on this platform\n"; |
9b0ceca9 |
206 | $tests_skipped++; |
c0ee6f5c |
207 | } |
c07a80fd |
208 | $good++; |
6c31b336 |
209 | } elsif ($max) { |
210 | if ($next <= $max) { |
211 | push @failed, $next..$max; |
212 | } |
c07a80fd |
213 | if (@failed) { |
89d3b7e2 |
214 | my ($txt, $canon) = canonfailed($max,$skipped,@failed); |
e9c9eb1f |
215 | print "${ml}$txt"; |
760ac839 |
216 | $failedtests{$test} = { canon => $canon, max => $max, |
217 | failed => scalar @failed, |
218 | name => $test, percent => 100*(scalar @failed)/$max, |
219 | estat => '', wstat => '', |
220 | }; |
c07a80fd |
221 | } else { |
c0ee6f5c |
222 | print "Don't know which tests failed: got $ok ok, expected $max\n"; |
760ac839 |
223 | $failedtests{$test} = { canon => '??', max => $max, |
224 | failed => '??', |
225 | name => $test, percent => undef, |
226 | estat => '', wstat => '', |
227 | }; |
c07a80fd |
228 | } |
229 | $bad++; |
6c31b336 |
230 | } elsif ($next == 0) { |
231 | print "FAILED before any test output arrived\n"; |
232 | $bad++; |
760ac839 |
233 | $failedtests{$test} = { canon => '??', max => '??', |
234 | failed => '??', |
235 | name => $test, percent => undef, |
236 | estat => '', wstat => '', |
237 | }; |
6c31b336 |
238 | } |
9b0ceca9 |
239 | $subtests_skipped += $skipped; |
17a79f5b |
240 | if (defined $files_in_dir) { |
241 | my @new_dir_files = globdir $files_in_dir; |
242 | if (@new_dir_files != @dir_files) { |
243 | my %f; |
244 | @f{@new_dir_files} = (1) x @new_dir_files; |
245 | delete @f{@dir_files}; |
246 | my @f = sort keys %f; |
247 | print "LEAKED FILES: @f\n"; |
248 | @dir_files = @new_dir_files; |
249 | } |
250 | } |
a0d0e21e |
251 | } |
252 | my $t_total = timediff(new Benchmark, $t_start); |
c07a80fd |
253 | |
774d564b |
254 | if ($^O eq 'VMS') { |
255 | if (defined $old5lib) { |
256 | $ENV{PERL5LIB} = $old5lib; |
b876d4a6 |
257 | } else { |
774d564b |
258 | delete $ENV{PERL5LIB}; |
259 | } |
260 | } |
7b13a3f5 |
261 | my $bonusmsg = ''; |
262 | $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':''). |
263 | " UNEXPECTEDLY SUCCEEDED)") |
264 | if $totbonus; |
9b0ceca9 |
265 | if ($tests_skipped) { |
c4f6c246 |
266 | $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : ''); |
267 | if ($subtests_skipped) { |
268 | $bonusmsg .= " and $subtests_skipped subtest" |
269 | . ($subtests_skipped != 1 ? 's' : ''); |
270 | } |
271 | $bonusmsg .= ' skipped'; |
9b0ceca9 |
272 | } |
c4f6c246 |
273 | elsif ($subtests_skipped) { |
274 | $bonusmsg .= ", $subtests_skipped subtest" |
275 | . ($subtests_skipped != 1 ? 's' : '') |
276 | . " skipped"; |
9b0ceca9 |
277 | } |
6c31b336 |
278 | if ($bad == 0 && $totmax) { |
7b13a3f5 |
279 | print "All tests successful$bonusmsg.\n"; |
6c31b336 |
280 | } elsif ($total==0){ |
281 | die "FAILED--no tests were run for some reason.\n"; |
282 | } elsif ($totmax==0) { |
283 | my $blurb = $total==1 ? "script" : "scripts"; |
c0ee6f5c |
284 | die "FAILED--$total test $blurb could be run, alas--no output ever seen\n"; |
c07a80fd |
285 | } else { |
286 | $pct = sprintf("%.2f", $good / $total * 100); |
6c31b336 |
287 | my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.", |
288 | $totmax - $totok, $totmax, 100*$totok/$totmax; |
0a931e4a |
289 | # Create formats |
290 | # First, figure out max length of test names |
291 | my $failed_str = "Failed Test"; |
292 | my $middle_str = " Status Wstat Total Fail Failed "; |
293 | my $list_str = "List of Failed"; |
294 | my $max_namelen = length($failed_str); |
760ac839 |
295 | my $script; |
0a931e4a |
296 | foreach $script (keys %failedtests) { |
297 | $max_namelen = |
298 | (length $failedtests{$script}->{name} > $max_namelen) ? |
299 | length $failedtests{$script}->{name} : $max_namelen; |
300 | } |
301 | my $list_len = $columns - length($middle_str) - $max_namelen; |
302 | if ($list_len < length($list_str)) { |
303 | $list_len = length($list_str); |
304 | $max_namelen = $columns - length($middle_str) - $list_len; |
305 | if ($max_namelen < length($failed_str)) { |
306 | $max_namelen = length($failed_str); |
307 | $columns = $max_namelen + length($middle_str) + $list_len; |
308 | } |
309 | } |
310 | |
311 | my $fmt_top = "format STDOUT_TOP =\n" |
312 | . sprintf("%-${max_namelen}s", $failed_str) |
313 | . $middle_str |
314 | . $list_str . "\n" |
315 | . "-" x $columns |
316 | . "\n.\n"; |
317 | my $fmt = "format STDOUT =\n" |
318 | . "@" . "<" x ($max_namelen - 1) |
319 | . " @>> @>>>> @>>>> @>>> ^##.##% " |
320 | . "^" . "<" x ($list_len - 1) . "\n" |
321 | . '{ $curtest->{name}, $curtest->{estat},' |
322 | . ' $curtest->{wstat}, $curtest->{max},' |
323 | . ' $curtest->{failed}, $curtest->{percent},' |
324 | . ' $curtest->{canon}' |
325 | . "\n}\n" |
326 | . "~~" . " " x ($columns - $list_len - 2) . "^" |
327 | . "<" x ($list_len - 1) . "\n" |
328 | . '$curtest->{canon}' |
329 | . "\n.\n"; |
330 | |
331 | eval $fmt_top; |
332 | die $@ if $@; |
333 | eval $fmt; |
334 | die $@ if $@; |
335 | |
336 | # Now write to formats |
760ac839 |
337 | for $script (sort keys %failedtests) { |
338 | $curtest = $failedtests{$script}; |
339 | write; |
340 | } |
b876d4a6 |
341 | if ($bad) { |
9b0ceca9 |
342 | $bonusmsg =~ s/^,\s*//; |
343 | print "$bonusmsg.\n" if $bonusmsg; |
6c31b336 |
344 | die "Failed $bad/$total test scripts, $pct% okay.$subpct\n"; |
c07a80fd |
345 | } |
346 | } |
5603f27d |
347 | printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop')); |
f0a9308e |
348 | |
349 | return ($bad == 0 && $totmax) ; |
c07a80fd |
350 | } |
351 | |
aa689395 |
352 | my $tried_devel_corestack; |
c0ee6f5c |
353 | sub corestatus { |
354 | my($st) = @_; |
c0ee6f5c |
355 | |
356 | eval {require 'wait.ph'}; |
0dfaec25 |
357 | my $ret = defined &WCOREDUMP ? WCOREDUMP($st) : $st & 0200; |
c0ee6f5c |
358 | |
aa689395 |
359 | eval { require Devel::CoreStack; $have_devel_corestack++ } |
360 | unless $tried_devel_corestack++; |
c0ee6f5c |
361 | |
362 | $ret; |
363 | } |
364 | |
c07a80fd |
365 | sub canonfailed ($@) { |
89d3b7e2 |
366 | my($max,$skipped,@failed) = @_; |
6c31b336 |
367 | my %seen; |
368 | @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed; |
c07a80fd |
369 | my $failed = @failed; |
370 | my @result = (); |
371 | my @canon = (); |
372 | my $min; |
373 | my $last = $min = shift @failed; |
760ac839 |
374 | my $canon; |
c07a80fd |
375 | if (@failed) { |
376 | for (@failed, $failed[-1]) { # don't forget the last one |
377 | if ($_ > $last+1 || $_ == $last) { |
378 | if ($min == $last) { |
379 | push @canon, $last; |
380 | } else { |
381 | push @canon, "$min-$last"; |
382 | } |
383 | $min = $_; |
384 | } |
385 | $last = $_; |
386 | } |
387 | local $" = ", "; |
388 | push @result, "FAILED tests @canon\n"; |
760ac839 |
389 | $canon = "@canon"; |
a0d0e21e |
390 | } else { |
c07a80fd |
391 | push @result, "FAILED test $last\n"; |
760ac839 |
392 | $canon = $last; |
a0d0e21e |
393 | } |
c07a80fd |
394 | |
395 | push @result, "\tFailed $failed/$max tests, "; |
89d3b7e2 |
396 | push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay"; |
397 | my $ender = 's' x ($skipped > 1); |
398 | my $good = $max - $failed - $skipped; |
399 | my $goodper = sprintf("%.2f",100*($good/$max)); |
400 | push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped; |
401 | push @result, "\n"; |
760ac839 |
402 | my $txt = join "", @result; |
403 | ($txt, $canon); |
a0d0e21e |
404 | } |
405 | |
406 | 1; |
cb1a09d0 |
407 | __END__ |
408 | |
409 | =head1 NAME |
410 | |
411 | Test::Harness - run perl standard test scripts with statistics |
412 | |
413 | =head1 SYNOPSIS |
414 | |
415 | use Test::Harness; |
416 | |
417 | runtests(@tests); |
418 | |
419 | =head1 DESCRIPTION |
420 | |
7b13a3f5 |
421 | (By using the L<Test> module, you can write test scripts without |
422 | knowing the exact output this module expects. However, if you need to |
423 | know the specifics, read on!) |
424 | |
cb1a09d0 |
425 | Perl test scripts print to standard output C<"ok N"> for each single |
426 | test, where C<N> is an increasing sequence of integers. The first line |
c0ee6f5c |
427 | output by a standard test script is C<"1..M"> with C<M> being the |
cb1a09d0 |
428 | number of tests that should be run within the test |
c0ee6f5c |
429 | script. Test::Harness::runtests(@tests) runs all the testscripts |
cb1a09d0 |
430 | named as arguments and checks standard output for the expected |
431 | C<"ok N"> strings. |
432 | |
c0ee6f5c |
433 | After all tests have been performed, runtests() prints some |
cb1a09d0 |
434 | performance statistics that are computed by the Benchmark module. |
435 | |
6c31b336 |
436 | =head2 The test script output |
437 | |
438 | Any output from the testscript to standard error is ignored and |
439 | bypassed, thus will be seen by the user. Lines written to standard |
c0ee6f5c |
440 | output containing C</^(not\s+)?ok\b/> are interpreted as feedback for |
441 | runtests(). All other lines are discarded. |
6c31b336 |
442 | |
443 | It is tolerated if the test numbers after C<ok> are omitted. In this |
444 | case Test::Harness maintains temporarily its own counter until the |
445 | script supplies test numbers again. So the following test script |
446 | |
447 | print <<END; |
448 | 1..6 |
449 | not ok |
450 | ok |
451 | not ok |
452 | ok |
453 | ok |
454 | END |
455 | |
456 | will generate |
457 | |
458 | FAILED tests 1, 3, 6 |
459 | Failed 3/6 tests, 50.00% okay |
460 | |
461 | The global variable $Test::Harness::verbose is exportable and can be |
c0ee6f5c |
462 | used to let runtests() display the standard output of the script |
6c31b336 |
463 | without altering the behavior otherwise. |
464 | |
fb73857a |
465 | The global variable $Test::Harness::switches is exportable and can be |
466 | used to set perl command line options used for running the test |
467 | script(s). The default value is C<-w>. |
468 | |
fac76ed7 |
469 | If the standard output line contains substring C< # Skip> (with |
470 | variations in spacing and case) after C<ok> or C<ok NUMBER>, it is |
471 | counted as a skipped test. If the whole testscript succeeds, the |
472 | count of skipped tests is included in the generated output. |
473 | |
45c0de28 |
474 | C<Test::Harness> reports the text after C< # Skip(whatever)> as a |
475 | reason for skipping. Similarly, one can include a similar explanation |
476 | in a C<1..0> line emitted if the test is skipped completely: |
477 | |
478 | 1..0 # Skipped: no leverage found |
479 | |
cb1a09d0 |
480 | =head1 EXPORT |
481 | |
c0ee6f5c |
482 | C<&runtests> is exported by Test::Harness per default. |
cb1a09d0 |
483 | |
484 | =head1 DIAGNOSTICS |
485 | |
486 | =over 4 |
487 | |
488 | =item C<All tests successful.\nFiles=%d, Tests=%d, %s> |
489 | |
490 | If all tests are successful some statistics about the performance are |
491 | printed. |
492 | |
6c31b336 |
493 | =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.> |
494 | |
495 | For any single script that has failing subtests statistics like the |
496 | above are printed. |
497 | |
498 | =item C<Test returned status %d (wstat %d)> |
499 | |
81ff29e3 |
500 | Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are |
6c31b336 |
501 | printed in a message similar to the above. |
502 | |
503 | =item C<Failed 1 test, %.2f%% okay. %s> |
cb1a09d0 |
504 | |
6c31b336 |
505 | =item C<Failed %d/%d tests, %.2f%% okay. %s> |
cb1a09d0 |
506 | |
507 | If not all tests were successful, the script dies with one of the |
508 | above messages. |
509 | |
510 | =back |
511 | |
9b0ceca9 |
512 | =head1 ENVIRONMENT |
513 | |
17a79f5b |
514 | Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status |
9b0ceca9 |
515 | of child processes. |
516 | |
0d0c0d42 |
517 | Setting C<HARNESS_NOTTY> to a true value forces it to behave as though |
518 | STDOUT were not a console. You may need to set this if you don't want |
519 | harness to output more frequent progress messages using carriage returns. |
520 | Some consoles may not handle carriage returns properly (which results |
521 | in a somewhat messy output). |
522 | |
9636a016 |
523 | Setting C<HARNESS_COMPILE_TEST> to a true value will make harness attempt |
524 | to compile the test using C<perlcc> before running it. |
525 | |
17a79f5b |
526 | If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness |
527 | will check after each test whether new files appeared in that directory, |
528 | and report them as |
529 | |
530 | LEAKED FILES: scr.tmp 0 my.db |
531 | |
532 | If relative, directory name is with respect to the current directory at |
533 | the moment runtests() was called. Putting absolute path into |
534 | C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results. |
535 | |
2b32313b |
536 | The value of C<HARNESS_PERL_SWITCHES> will be prepended to the |
537 | switches used to invoke perl on each test. For example, setting |
538 | C<HARNESS_PERL_SWITCHES> to "-W" will run all tests with all |
539 | warnings enabled. |
540 | |
0a931e4a |
541 | If C<HARNESS_COLUMNS> is set, then this value will be used for the |
542 | width of the terminal. If it is not set then it will default to |
543 | C<COLUMNS>. If this is not set, it will default to 80. Note that users |
544 | of Bourne-sh based shells will need to C<export COLUMNS> for this |
545 | module to use that variable. |
546 | |
f19ae7a7 |
547 | Harness sets C<HARNESS_ACTIVE> before executing the individual tests. |
548 | This allows the tests to determine if they are being executed through the |
549 | harness or by any other means. |
550 | |
cb1a09d0 |
551 | =head1 SEE ALSO |
552 | |
7b13a3f5 |
553 | L<Test> for writing test scripts and also L<Benchmark> for the |
554 | underlying timing routines. |
c07a80fd |
555 | |
556 | =head1 AUTHORS |
557 | |
558 | Either Tim Bunce or Andreas Koenig, we don't know. What we know for |
559 | sure is, that it was inspired by Larry Wall's TEST script that came |
b876d4a6 |
560 | with perl distributions for ages. Numerous anonymous contributors |
561 | exist. Current maintainer is Andreas Koenig. |
cb1a09d0 |
562 | |
563 | =head1 BUGS |
564 | |
565 | Test::Harness uses $^X to determine the perl binary to run the tests |
6c31b336 |
566 | with. Test scripts running via the shebang (C<#!>) line may not be |
567 | portable because $^X is not consistent for shebang scripts across |
cb1a09d0 |
568 | platforms. This is no problem when Test::Harness is run with an |
6c31b336 |
569 | absolute path to the perl binary or when $^X can be found in the path. |
cb1a09d0 |
570 | |
571 | =cut |