Commit | Line | Data |
d667a7e6 |
1 | # -*- Mode: cperl; cperl-indent-level: 4 -*- |
b82fa0b7 |
2 | |
a0d0e21e |
3 | package Test::Harness; |
4 | |
3c87ea76 |
5 | require 5.00405; |
13287dd5 |
6 | use Test::Harness::Straps; |
7 | use Test::Harness::Assert; |
a0d0e21e |
8 | use Exporter; |
9 | use Benchmark; |
4633a7c4 |
10 | use Config; |
760ac839 |
11 | use strict; |
12 | |
ca09b021 |
13 | |
e4fc8a1e |
14 | use vars qw( |
15 | $VERSION |
16 | @ISA @EXPORT @EXPORT_OK |
17 | $Verbose $Switches $Debug |
18 | $verbose $switches $debug |
e4fc8a1e |
19 | $Curtest |
20 | $Columns |
43ef773b |
21 | $Timer |
e4fc8a1e |
22 | $ML $Last_ML_Print |
23 | $Strap |
43ef773b |
24 | $has_time_hires |
e4fc8a1e |
25 | ); |
26 | |
43ef773b |
27 | BEGIN { |
28 | eval "use Time::HiRes 'time'"; |
29 | $has_time_hires = !$@; |
30 | } |
31 | |
e4fc8a1e |
32 | =head1 NAME |
33 | |
34 | Test::Harness - Run Perl standard test scripts with statistics |
35 | |
36 | =head1 VERSION |
37 | |
73ea3450 |
38 | Version 2.56 |
e4fc8a1e |
39 | |
40 | =cut |
41 | |
73ea3450 |
42 | $VERSION = "2.56"; |
4633a7c4 |
43 | |
9c5c68c8 |
44 | # Backwards compatibility for exportable variable names. |
5c0604c3 |
45 | *verbose = *Verbose; |
46 | *switches = *Switches; |
e4fc8a1e |
47 | *debug = *Debug; |
9c5c68c8 |
48 | |
f19ae7a7 |
49 | $ENV{HARNESS_ACTIVE} = 1; |
c0c1f8c2 |
50 | $ENV{HARNESS_VERSION} = $VERSION; |
f19ae7a7 |
51 | |
13287dd5 |
52 | END { |
53 | # For VMS. |
54 | delete $ENV{HARNESS_ACTIVE}; |
c0c1f8c2 |
55 | delete $ENV{HARNESS_VERSION}; |
13287dd5 |
56 | } |
57 | |
9b0ceca9 |
58 | # Some experimental versions of OS/2 build have broken $? |
9c5c68c8 |
59 | my $Ignore_Exitcode = $ENV{HARNESS_IGNORE_EXITCODE}; |
60 | |
61 | my $Files_In_Dir = $ENV{HARNESS_FILELEAK_IN_DIR}; |
9b0ceca9 |
62 | |
d5d4ec93 |
63 | $Strap = Test::Harness::Straps->new; |
17a79f5b |
64 | |
3c87ea76 |
65 | sub strap { return $Strap }; |
66 | |
9c5c68c8 |
67 | @ISA = ('Exporter'); |
68 | @EXPORT = qw(&runtests); |
69 | @EXPORT_OK = qw($verbose $switches); |
4633a7c4 |
70 | |
356733da |
71 | $Verbose = $ENV{HARNESS_VERBOSE} || 0; |
e4fc8a1e |
72 | $Debug = $ENV{HARNESS_DEBUG} || 0; |
9c5c68c8 |
73 | $Switches = "-w"; |
74 | $Columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80; |
b82fa0b7 |
75 | $Columns--; # Some shells have trouble with a full line of text. |
43ef773b |
76 | $Timer = $ENV{HARNESS_TIMER} || 0; |
b82fa0b7 |
77 | |
b82fa0b7 |
78 | =head1 SYNOPSIS |
79 | |
80 | use Test::Harness; |
81 | |
82 | runtests(@test_files); |
83 | |
84 | =head1 DESCRIPTION |
a0d0e21e |
85 | |
3c87ea76 |
86 | B<STOP!> If all you want to do is write a test script, consider |
87 | using Test::Simple. Test::Harness is the module that reads the |
88 | output from Test::Simple, Test::More and other modules based on |
89 | Test::Builder. You don't need to know about Test::Harness to use |
90 | those modules. |
b82fa0b7 |
91 | |
3c87ea76 |
92 | Test::Harness runs tests and expects output from the test in a |
93 | certain format. That format is called TAP, the Test Anything |
94 | Protocol. It is defined in L<Test::Harness::TAP>. |
b82fa0b7 |
95 | |
3c87ea76 |
96 | C<Test::Harness::runtests(@tests)> runs all the testscripts named |
97 | as arguments and checks standard output for the expected strings |
98 | in TAP format. |
b82fa0b7 |
99 | |
3c87ea76 |
100 | The F<prove> utility is a thin wrapper around Test::Harness. |
b82fa0b7 |
101 | |
13287dd5 |
102 | =head2 Taint mode |
103 | |
e4fc8a1e |
104 | Test::Harness will honor the C<-T> or C<-t> in the #! line on your |
105 | test files. So if you begin a test with: |
13287dd5 |
106 | |
107 | #!perl -T |
108 | |
109 | the test will be run with taint mode on. |
110 | |
13287dd5 |
111 | =head2 Configuration variables. |
112 | |
113 | These variables can be used to configure the behavior of |
114 | Test::Harness. They are exported on request. |
115 | |
116 | =over 4 |
117 | |
3c87ea76 |
118 | =item C<$Test::Harness::Verbose> |
13287dd5 |
119 | |
3c87ea76 |
120 | The package variable C<$Test::Harness::Verbose> is exportable and can be |
e4fc8a1e |
121 | used to let C<runtests()> display the standard output of the script |
122 | without altering the behavior otherwise. The F<prove> utility's C<-v> |
123 | flag will set this. |
13287dd5 |
124 | |
3c87ea76 |
125 | =item C<$Test::Harness::switches> |
13287dd5 |
126 | |
3c87ea76 |
127 | The package variable C<$Test::Harness::switches> is exportable and can be |
13287dd5 |
128 | used to set perl command line options used for running the test |
e4fc8a1e |
129 | script(s). The default value is C<-w>. It overrides C<HARNESS_SWITCHES>. |
13287dd5 |
130 | |
43ef773b |
131 | =item C<$Test::Harness::Timer> |
132 | |
133 | If set to true, and C<Time::HiRes> is available, print elapsed seconds |
134 | after each test file. |
135 | |
13287dd5 |
136 | =back |
137 | |
138 | |
b82fa0b7 |
139 | =head2 Failure |
140 | |
3c87ea76 |
141 | When tests fail, analyze the summary report: |
b82fa0b7 |
142 | |
2fe373ce |
143 | t/base..............ok |
144 | t/nonumbers.........ok |
145 | t/ok................ok |
146 | t/test-harness......ok |
147 | t/waterloo..........dubious |
b82fa0b7 |
148 | Test returned status 3 (wstat 768, 0x300) |
149 | DIED. FAILED tests 1, 3, 5, 7, 9, 11, 13, 15, 17, 19 |
150 | Failed 10/20 tests, 50.00% okay |
151 | Failed Test Stat Wstat Total Fail Failed List of Failed |
152 | ----------------------------------------------------------------------- |
153 | t/waterloo.t 3 768 20 10 50.00% 1 3 5 7 9 11 13 15 17 19 |
154 | Failed 1/5 test scripts, 80.00% okay. 10/44 subtests failed, 77.27% okay. |
155 | |
3c87ea76 |
156 | Everything passed but F<t/waterloo.t>. It failed 10 of 20 tests and |
b82fa0b7 |
157 | exited with non-zero status indicating something dubious happened. |
158 | |
159 | The columns in the summary report mean: |
160 | |
161 | =over 4 |
162 | |
163 | =item B<Failed Test> |
164 | |
165 | The test file which failed. |
166 | |
167 | =item B<Stat> |
168 | |
169 | If the test exited with non-zero, this is its exit status. |
170 | |
171 | =item B<Wstat> |
172 | |
e4fc8a1e |
173 | The wait status of the test. |
b82fa0b7 |
174 | |
175 | =item B<Total> |
176 | |
177 | Total number of tests expected to run. |
178 | |
179 | =item B<Fail> |
180 | |
181 | Number which failed, either from "not ok" or because they never ran. |
182 | |
183 | =item B<Failed> |
184 | |
185 | Percentage of the total tests which failed. |
186 | |
187 | =item B<List of Failed> |
188 | |
189 | A list of the tests which failed. Successive failures may be |
190 | abbreviated (ie. 15-20 to indicate that tests 15, 16, 17, 18, 19 and |
191 | 20 failed). |
192 | |
193 | =back |
194 | |
195 | |
196 | =head2 Functions |
197 | |
198 | Test::Harness currently only has one function, here it is. |
199 | |
200 | =over 4 |
201 | |
202 | =item B<runtests> |
203 | |
204 | my $allok = runtests(@test_files); |
205 | |
3c87ea76 |
206 | This runs all the given I<@test_files> and divines whether they passed |
b82fa0b7 |
207 | or failed based on their output to STDOUT (details above). It prints |
208 | out each individual test which failed along with a summary report and |
209 | a how long it all took. |
210 | |
3c87ea76 |
211 | It returns true if everything was ok. Otherwise it will C<die()> with |
d5d4ec93 |
212 | one of the messages in the DIAGNOSTICS section. |
b82fa0b7 |
213 | |
b82fa0b7 |
214 | =cut |
17a79f5b |
215 | |
a0d0e21e |
216 | sub runtests { |
217 | my(@tests) = @_; |
9c5c68c8 |
218 | |
b82fa0b7 |
219 | local ($\, $,); |
220 | |
221 | my($tot, $failedtests) = _run_all_tests(@tests); |
9c5c68c8 |
222 | _show_results($tot, $failedtests); |
223 | |
2fe373ce |
224 | my $ok = _all_ok($tot); |
b82fa0b7 |
225 | |
13287dd5 |
226 | assert(($ok xor keys %$failedtests), |
227 | q{ok status jives with $failedtests}); |
b82fa0b7 |
228 | |
229 | return $ok; |
230 | } |
231 | |
232 | =begin _private |
233 | |
2fe373ce |
234 | =item B<_all_ok> |
235 | |
236 | my $ok = _all_ok(\%tot); |
237 | |
238 | Tells you if this test run is overall successful or not. |
239 | |
240 | =cut |
241 | |
242 | sub _all_ok { |
243 | my($tot) = shift; |
244 | |
245 | return $tot->{bad} == 0 && ($tot->{max} || $tot->{skipped}) ? 1 : 0; |
246 | } |
247 | |
b82fa0b7 |
248 | =item B<_globdir> |
249 | |
250 | my @files = _globdir $dir; |
251 | |
252 | Returns all the files in a directory. This is shorthand for backwards |
3c87ea76 |
253 | compatibility on systems where C<glob()> doesn't work right. |
b82fa0b7 |
254 | |
255 | =cut |
256 | |
257 | sub _globdir { |
258 | opendir DIRH, shift; |
259 | my @f = readdir DIRH; |
260 | closedir DIRH; |
261 | |
262 | return @f; |
9c5c68c8 |
263 | } |
264 | |
b82fa0b7 |
265 | =item B<_run_all_tests> |
266 | |
267 | my($total, $failed) = _run_all_tests(@test_files); |
268 | |
e4fc8a1e |
269 | Runs all the given C<@test_files> (as C<runtests()>) but does it |
270 | quietly (no report). $total is a hash ref summary of all the tests |
271 | run. Its keys and values are this: |
b82fa0b7 |
272 | |
273 | bonus Number of individual todo tests unexpectedly passed |
274 | max Number of individual tests ran |
275 | ok Number of individual tests passed |
276 | sub_skipped Number of individual tests skipped |
2fe373ce |
277 | todo Number of individual todo tests |
b82fa0b7 |
278 | |
279 | files Number of test files ran |
280 | good Number of test files passed |
281 | bad Number of test files failed |
282 | tests Number of test files originally given |
283 | skipped Number of test files skipped |
284 | |
e4fc8a1e |
285 | If C<< $total->{bad} == 0 >> and C<< $total->{max} > 0 >>, you've |
286 | got a successful test. |
b82fa0b7 |
287 | |
288 | $failed is a hash ref of all the test scripts which failed. Each key |
289 | is the name of a test script, each value is another hash representing |
290 | how that script failed. Its keys are these: |
9c5c68c8 |
291 | |
b82fa0b7 |
292 | name Name of the test which failed |
293 | estat Script's exit value |
294 | wstat Script's wait status |
295 | max Number of individual tests |
296 | failed Number which failed |
297 | percent Percentage of tests which failed |
298 | canon List of tests which failed (as string). |
299 | |
e4fc8a1e |
300 | C<$failed> should be empty if everything passed. |
b82fa0b7 |
301 | |
302 | B<NOTE> Currently this function is still noisy. I'm working on it. |
303 | |
304 | =cut |
305 | |
3c87ea76 |
306 | # Turns on autoflush for the handle passed |
307 | sub _autoflush { |
308 | my $flushy_fh = shift; |
309 | my $old_fh = select $flushy_fh; |
310 | $| = 1; |
311 | select $old_fh; |
312 | } |
313 | |
b82fa0b7 |
314 | sub _run_all_tests { |
3c87ea76 |
315 | my @tests = @_; |
316 | |
317 | _autoflush(\*STDOUT); |
318 | _autoflush(\*STDERR); |
319 | |
9c5c68c8 |
320 | my(%failedtests); |
321 | |
322 | # Test-wide totals. |
323 | my(%tot) = ( |
324 | bonus => 0, |
325 | max => 0, |
326 | ok => 0, |
327 | files => 0, |
328 | bad => 0, |
329 | good => 0, |
330 | tests => scalar @tests, |
331 | sub_skipped => 0, |
2fe373ce |
332 | todo => 0, |
9c5c68c8 |
333 | skipped => 0, |
2fe373ce |
334 | bench => 0, |
9c5c68c8 |
335 | ); |
774d564b |
336 | |
76df5e8f |
337 | my @dir_files; |
338 | @dir_files = _globdir $Files_In_Dir if defined $Files_In_Dir; |
ca09b021 |
339 | my $run_start_time = new Benchmark; |
9c5c68c8 |
340 | |
13287dd5 |
341 | my $width = _leader_width(@tests); |
b82fa0b7 |
342 | foreach my $tfile (@tests) { |
5c0604c3 |
343 | $Last_ML_Print = 0; # so each test prints at least once |
7a315204 |
344 | my($leader, $ml) = _mk_leader($tfile, $width); |
308957f5 |
345 | local $ML = $ml; |
e4fc8a1e |
346 | |
b82fa0b7 |
347 | print $leader; |
9c5c68c8 |
348 | |
356733da |
349 | $tot{files}++; |
350 | |
308957f5 |
351 | $Strap->{_seen_header} = 0; |
3c87ea76 |
352 | if ( $Test::Harness::Debug ) { |
353 | print "# Running: ", $Strap->_command_line($tfile), "\n"; |
354 | } |
43ef773b |
355 | my $test_start_time = $Timer ? time : 0; |
0be28027 |
356 | my %results = $Strap->analyze_file($tfile) or |
e4fc8a1e |
357 | do { warn $Strap->{error}, "\n"; next }; |
43ef773b |
358 | my $elapsed; |
359 | if ( $Timer ) { |
360 | $elapsed = time - $test_start_time; |
361 | if ( $has_time_hires ) { |
362 | $elapsed = sprintf( " %8.3fs", $elapsed ); |
363 | } |
364 | else { |
365 | $elapsed = sprintf( " %8ss", $elapsed ? $elapsed : "<1" ); |
366 | } |
367 | } |
368 | else { |
369 | $elapsed = ""; |
370 | } |
308957f5 |
371 | |
9c5c68c8 |
372 | # state of the current test. |
308957f5 |
373 | my @failed = grep { !$results{details}[$_-1]{ok} } |
374 | 1..@{$results{details}}; |
9c5c68c8 |
375 | my %test = ( |
308957f5 |
376 | ok => $results{ok}, |
377 | 'next' => $Strap->{'next'}, |
378 | max => $results{max}, |
379 | failed => \@failed, |
380 | bonus => $results{bonus}, |
381 | skipped => $results{skip}, |
a72fde19 |
382 | skip_reason => $results{skip_reason}, |
c0bb2de7 |
383 | skip_all => $Strap->{skip_all}, |
9c5c68c8 |
384 | ml => $ml, |
385 | ); |
386 | |
308957f5 |
387 | $tot{bonus} += $results{bonus}; |
388 | $tot{max} += $results{max}; |
389 | $tot{ok} += $results{ok}; |
390 | $tot{todo} += $results{todo}; |
391 | $tot{sub_skipped} += $results{skip}; |
9c5c68c8 |
392 | |
308957f5 |
393 | my($estatus, $wstatus) = @results{qw(exit wait)}; |
b82fa0b7 |
394 | |
a72fde19 |
395 | if ($results{passing}) { |
ca09b021 |
396 | # XXX Combine these first two |
2fe373ce |
397 | if ($test{max} and $test{skipped} + $test{bonus}) { |
398 | my @msg; |
399 | push(@msg, "$test{skipped}/$test{max} skipped: $test{skip_reason}") |
400 | if $test{skipped}; |
401 | push(@msg, "$test{bonus}/$test{max} unexpectedly succeeded") |
402 | if $test{bonus}; |
ca09b021 |
403 | print "$test{ml}ok$elapsed\n ".join(', ', @msg)."\n"; |
404 | } |
405 | elsif ( $test{max} ) { |
406 | print "$test{ml}ok$elapsed\n"; |
407 | } |
408 | elsif ( defined $test{skip_all} and length $test{skip_all} ) { |
c0bb2de7 |
409 | print "skipped\n all skipped: $test{skip_all}\n"; |
2fe373ce |
410 | $tot{skipped}++; |
ca09b021 |
411 | } |
412 | else { |
0be28027 |
413 | print "skipped\n all skipped: no reason given\n"; |
2fe373ce |
414 | $tot{skipped}++; |
415 | } |
416 | $tot{good}++; |
417 | } |
b82fa0b7 |
418 | else { |
a72fde19 |
419 | # List unrun tests as failures. |
420 | if ($test{'next'} <= $test{max}) { |
421 | push @{$test{failed}}, $test{'next'}..$test{max}; |
422 | } |
423 | # List overruns as failures. |
424 | else { |
425 | my $details = $results{details}; |
3c87ea76 |
426 | foreach my $overrun ($test{max}+1..@$details) { |
a72fde19 |
427 | next unless ref $details->[$overrun-1]; |
428 | push @{$test{failed}}, $overrun |
b82fa0b7 |
429 | } |
a72fde19 |
430 | } |
431 | |
432 | if ($wstatus) { |
433 | $failedtests{$tfile} = _dubious_return(\%test, \%tot, |
434 | $estatus, $wstatus); |
435 | $failedtests{$tfile}{name} = $tfile; |
436 | } |
437 | elsif($results{seen}) { |
c4b2e1b6 |
438 | if (@{$test{failed}} and $test{max}) { |
60e33a80 |
439 | my ($txt, $canon) = _canonfailed($test{max},$test{skipped}, |
b82fa0b7 |
440 | @{$test{failed}}); |
441 | print "$test{ml}$txt"; |
442 | $failedtests{$tfile} = { canon => $canon, |
443 | max => $test{max}, |
444 | failed => scalar @{$test{failed}}, |
445 | name => $tfile, |
446 | percent => 100*(scalar @{$test{failed}})/$test{max}, |
447 | estat => '', |
448 | wstat => '', |
449 | }; |
ca09b021 |
450 | } |
451 | else { |
b82fa0b7 |
452 | print "Don't know which tests failed: got $test{ok} ok, ". |
453 | "expected $test{max}\n"; |
454 | $failedtests{$tfile} = { canon => '??', |
455 | max => $test{max}, |
456 | failed => '??', |
457 | name => $tfile, |
458 | percent => undef, |
459 | estat => '', |
460 | wstat => '', |
461 | }; |
462 | } |
463 | $tot{bad}++; |
ca09b021 |
464 | } |
465 | else { |
b82fa0b7 |
466 | print "FAILED before any test output arrived\n"; |
467 | $tot{bad}++; |
468 | $failedtests{$tfile} = { canon => '??', |
469 | max => '??', |
470 | failed => '??', |
471 | name => $tfile, |
472 | percent => undef, |
473 | estat => '', |
474 | wstat => '', |
475 | }; |
476 | } |
477 | } |
478 | |
2fe373ce |
479 | if (defined $Files_In_Dir) { |
480 | my @new_dir_files = _globdir $Files_In_Dir; |
481 | if (@new_dir_files != @dir_files) { |
482 | my %f; |
483 | @f{@new_dir_files} = (1) x @new_dir_files; |
484 | delete @f{@dir_files}; |
485 | my @f = sort keys %f; |
486 | print "LEAKED FILES: @f\n"; |
487 | @dir_files = @new_dir_files; |
488 | } |
489 | } |
3c87ea76 |
490 | } # foreach test |
ca09b021 |
491 | $tot{bench} = timediff(new Benchmark, $run_start_time); |
d667a7e6 |
492 | |
13287dd5 |
493 | $Strap->_restore_PERL5LIB; |
9c5c68c8 |
494 | |
495 | return(\%tot, \%failedtests); |
496 | } |
497 | |
b82fa0b7 |
498 | =item B<_mk_leader> |
499 | |
7a315204 |
500 | my($leader, $ml) = _mk_leader($test_file, $width); |
b82fa0b7 |
501 | |
3c87ea76 |
502 | Generates the 't/foo........' leader for the given C<$test_file> as well |
b82fa0b7 |
503 | as a similar version which will overwrite the current line (by use of |
e4fc8a1e |
504 | \r and such). C<$ml> may be empty if Test::Harness doesn't think you're |
2fe373ce |
505 | on TTY. |
506 | |
e4fc8a1e |
507 | The C<$width> is the width of the "yada/blah.." string. |
b82fa0b7 |
508 | |
509 | =cut |
510 | |
511 | sub _mk_leader { |
2fe373ce |
512 | my($te, $width) = @_; |
513 | chomp($te); |
b695f709 |
514 | $te =~ s/\.\w+$/./; |
b82fa0b7 |
515 | |
ca09b021 |
516 | if ($^O eq 'VMS') { |
517 | $te =~ s/^.*\.t\./\[.t./s; |
518 | } |
7a315204 |
519 | my $leader = "$te" . '.' x ($width - length($te)); |
b82fa0b7 |
520 | my $ml = ""; |
521 | |
ca09b021 |
522 | if ( -t STDOUT and not $ENV{HARNESS_NOTTY} and not $Verbose ) { |
523 | $ml = "\r" . (' ' x 77) . "\r$leader" |
524 | } |
b82fa0b7 |
525 | |
526 | return($leader, $ml); |
527 | } |
528 | |
13287dd5 |
529 | =item B<_leader_width> |
530 | |
531 | my($width) = _leader_width(@test_files); |
532 | |
533 | Calculates how wide the leader should be based on the length of the |
534 | longest test name. |
535 | |
536 | =cut |
537 | |
538 | sub _leader_width { |
539 | my $maxlen = 0; |
540 | my $maxsuflen = 0; |
541 | foreach (@_) { |
542 | my $suf = /\.(\w+)$/ ? $1 : ''; |
543 | my $len = length; |
544 | my $suflen = length $suf; |
545 | $maxlen = $len if $len > $maxlen; |
546 | $maxsuflen = $suflen if $suflen > $maxsuflen; |
547 | } |
356733da |
548 | # + 3 : we want three dots between the test name and the "ok" |
549 | return $maxlen + 3 - $maxsuflen; |
13287dd5 |
550 | } |
551 | |
9c5c68c8 |
552 | |
553 | sub _show_results { |
554 | my($tot, $failedtests) = @_; |
555 | |
556 | my $pct; |
557 | my $bonusmsg = _bonusmsg($tot); |
558 | |
2fe373ce |
559 | if (_all_ok($tot)) { |
560 | print "All tests successful$bonusmsg.\n"; |
ca09b021 |
561 | } |
562 | elsif (!$tot->{tests}){ |
2fe373ce |
563 | die "FAILED--no tests were run for some reason.\n"; |
ca09b021 |
564 | } |
565 | elsif (!$tot->{max}) { |
2fe373ce |
566 | my $blurb = $tot->{tests}==1 ? "script" : "scripts"; |
567 | die "FAILED--$tot->{tests} test $blurb could be run, ". |
9c5c68c8 |
568 | "alas--no output ever seen\n"; |
ca09b021 |
569 | } |
570 | else { |
2fe373ce |
571 | $pct = sprintf("%.2f", $tot->{good} / $tot->{tests} * 100); |
572 | my $percent_ok = 100*$tot->{ok}/$tot->{max}; |
573 | my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.", |
574 | $tot->{max} - $tot->{ok}, $tot->{max}, |
575 | $percent_ok; |
0a931e4a |
576 | |
9c5c68c8 |
577 | my($fmt_top, $fmt) = _create_fmts($failedtests); |
0a931e4a |
578 | |
2fe373ce |
579 | # Now write to formats |
580 | for my $script (sort keys %$failedtests) { |
581 | $Curtest = $failedtests->{$script}; |
582 | write; |
583 | } |
584 | if ($tot->{bad}) { |
585 | $bonusmsg =~ s/^,\s*//; |
586 | print "$bonusmsg.\n" if $bonusmsg; |
587 | die "Failed $tot->{bad}/$tot->{tests} test scripts, $pct% okay.". |
9c5c68c8 |
588 | "$subpct\n"; |
2fe373ce |
589 | } |
c07a80fd |
590 | } |
f0a9308e |
591 | |
9c5c68c8 |
592 | printf("Files=%d, Tests=%d, %s\n", |
593 | $tot->{files}, $tot->{max}, timestr($tot->{bench}, 'nop')); |
594 | } |
595 | |
596 | |
3c87ea76 |
597 | my %Handlers = ( |
598 | header => \&header_handler, |
599 | test => \&test_handler, |
600 | bailout => \&bailout_handler, |
601 | ); |
602 | |
603 | $Strap->{callback} = \&strap_callback; |
604 | sub strap_callback { |
308957f5 |
605 | my($self, $line, $type, $totals) = @_; |
606 | print $line if $Verbose; |
9c5c68c8 |
607 | |
308957f5 |
608 | my $meth = $Handlers{$type}; |
609 | $meth->($self, $line, $type, $totals) if $meth; |
610 | }; |
9c5c68c8 |
611 | |
9c5c68c8 |
612 | |
3c87ea76 |
613 | sub header_handler { |
308957f5 |
614 | my($self, $line, $type, $totals) = @_; |
9c5c68c8 |
615 | |
308957f5 |
616 | warn "Test header seen more than once!\n" if $self->{_seen_header}; |
9c5c68c8 |
617 | |
308957f5 |
618 | $self->{_seen_header}++; |
9c5c68c8 |
619 | |
308957f5 |
620 | warn "1..M can only appear at the beginning or end of tests\n" |
621 | if $totals->{seen} && |
622 | $totals->{max} < $totals->{seen}; |
623 | }; |
13287dd5 |
624 | |
3c87ea76 |
625 | sub test_handler { |
308957f5 |
626 | my($self, $line, $type, $totals) = @_; |
9c5c68c8 |
627 | |
308957f5 |
628 | my $curr = $totals->{seen}; |
629 | my $next = $self->{'next'}; |
630 | my $max = $totals->{max}; |
631 | my $detail = $totals->{details}[-1]; |
b82fa0b7 |
632 | |
308957f5 |
633 | if( $detail->{ok} ) { |
5c0604c3 |
634 | _print_ml_less("ok $curr/$max"); |
356733da |
635 | |
308957f5 |
636 | if( $detail->{type} eq 'skip' ) { |
a72fde19 |
637 | $totals->{skip_reason} = $detail->{reason} |
638 | unless defined $totals->{skip_reason}; |
639 | $totals->{skip_reason} = 'various reasons' |
640 | if $totals->{skip_reason} ne $detail->{reason}; |
308957f5 |
641 | } |
b82fa0b7 |
642 | } |
643 | else { |
308957f5 |
644 | _print_ml("NOK $curr"); |
b82fa0b7 |
645 | } |
b82fa0b7 |
646 | |
308957f5 |
647 | if( $curr > $next ) { |
648 | print "Test output counter mismatch [test $curr]\n"; |
649 | } |
650 | elsif( $curr < $next ) { |
651 | print "Confused test output: test $curr answered after ". |
652 | "test ", $next - 1, "\n"; |
653 | } |
b82fa0b7 |
654 | |
308957f5 |
655 | }; |
2fe373ce |
656 | |
3c87ea76 |
657 | sub bailout_handler { |
308957f5 |
658 | my($self, $line, $type, $totals) = @_; |
9c5c68c8 |
659 | |
308957f5 |
660 | die "FAILED--Further testing stopped" . |
661 | ($self->{bailout_reason} ? ": $self->{bailout_reason}\n" : ".\n"); |
662 | }; |
356733da |
663 | |
9c5c68c8 |
664 | |
308957f5 |
665 | sub _print_ml { |
666 | print join '', $ML, @_ if $ML; |
9c5c68c8 |
667 | } |
668 | |
669 | |
43ef773b |
670 | # Print updates only once per second. |
5c0604c3 |
671 | sub _print_ml_less { |
43ef773b |
672 | my $now = CORE::time; |
673 | if ( $Last_ML_Print != $now ) { |
5c0604c3 |
674 | _print_ml(@_); |
43ef773b |
675 | $Last_ML_Print = $now; |
5c0604c3 |
676 | } |
677 | } |
678 | |
9c5c68c8 |
679 | sub _bonusmsg { |
680 | my($tot) = @_; |
681 | |
682 | my $bonusmsg = ''; |
683 | $bonusmsg = (" ($tot->{bonus} subtest".($tot->{bonus} > 1 ? 's' : ''). |
2fe373ce |
684 | " UNEXPECTEDLY SUCCEEDED)") |
685 | if $tot->{bonus}; |
9c5c68c8 |
686 | |
687 | if ($tot->{skipped}) { |
2fe373ce |
688 | $bonusmsg .= ", $tot->{skipped} test" |
9c5c68c8 |
689 | . ($tot->{skipped} != 1 ? 's' : ''); |
2fe373ce |
690 | if ($tot->{sub_skipped}) { |
691 | $bonusmsg .= " and $tot->{sub_skipped} subtest" |
692 | . ($tot->{sub_skipped} != 1 ? 's' : ''); |
693 | } |
694 | $bonusmsg .= ' skipped'; |
9c5c68c8 |
695 | } |
696 | elsif ($tot->{sub_skipped}) { |
2fe373ce |
697 | $bonusmsg .= ", $tot->{sub_skipped} subtest" |
698 | . ($tot->{sub_skipped} != 1 ? 's' : '') |
699 | . " skipped"; |
9c5c68c8 |
700 | } |
701 | |
702 | return $bonusmsg; |
703 | } |
704 | |
9c5c68c8 |
705 | # Test program go boom. |
706 | sub _dubious_return { |
707 | my($test, $tot, $estatus, $wstatus) = @_; |
708 | my ($failed, $canon, $percent) = ('??', '??'); |
709 | |
710 | printf "$test->{ml}dubious\n\tTest returned status $estatus ". |
711 | "(wstat %d, 0x%x)\n", |
712 | $wstatus,$wstatus; |
713 | print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS'; |
714 | |
9c5c68c8 |
715 | $tot->{bad}++; |
716 | |
717 | if ($test->{max}) { |
b82fa0b7 |
718 | if ($test->{'next'} == $test->{max} + 1 and not @{$test->{failed}}) { |
9c5c68c8 |
719 | print "\tafter all the subtests completed successfully\n"; |
720 | $percent = 0; |
2fe373ce |
721 | $failed = 0; # But we do not set $canon! |
9c5c68c8 |
722 | } |
723 | else { |
b82fa0b7 |
724 | push @{$test->{failed}}, $test->{'next'}..$test->{max}; |
9c5c68c8 |
725 | $failed = @{$test->{failed}}; |
60e33a80 |
726 | (my $txt, $canon) = _canonfailed($test->{max},$test->{skipped},@{$test->{failed}}); |
9c5c68c8 |
727 | $percent = 100*(scalar @{$test->{failed}})/$test->{max}; |
728 | print "DIED. ",$txt; |
729 | } |
730 | } |
731 | |
732 | return { canon => $canon, max => $test->{max} || '??', |
733 | failed => $failed, |
66fd8cb9 |
734 | percent => $percent, |
9c5c68c8 |
735 | estat => $estatus, wstat => $wstatus, |
736 | }; |
737 | } |
738 | |
739 | |
9c5c68c8 |
740 | sub _create_fmts { |
741 | my($failedtests) = @_; |
742 | |
b82fa0b7 |
743 | my $failed_str = "Failed Test"; |
744 | my $middle_str = " Stat Wstat Total Fail Failed "; |
9c5c68c8 |
745 | my $list_str = "List of Failed"; |
746 | |
747 | # Figure out our longest name string for formatting purposes. |
748 | my $max_namelen = length($failed_str); |
749 | foreach my $script (keys %$failedtests) { |
750 | my $namelen = length $failedtests->{$script}->{name}; |
751 | $max_namelen = $namelen if $namelen > $max_namelen; |
752 | } |
753 | |
754 | my $list_len = $Columns - length($middle_str) - $max_namelen; |
755 | if ($list_len < length($list_str)) { |
756 | $list_len = length($list_str); |
757 | $max_namelen = $Columns - length($middle_str) - $list_len; |
758 | if ($max_namelen < length($failed_str)) { |
759 | $max_namelen = length($failed_str); |
760 | $Columns = $max_namelen + length($middle_str) + $list_len; |
761 | } |
762 | } |
763 | |
764 | my $fmt_top = "format STDOUT_TOP =\n" |
b82fa0b7 |
765 | . sprintf("%-${max_namelen}s", $failed_str) |
9c5c68c8 |
766 | . $middle_str |
2fe373ce |
767 | . $list_str . "\n" |
768 | . "-" x $Columns |
769 | . "\n.\n"; |
9c5c68c8 |
770 | |
771 | my $fmt = "format STDOUT =\n" |
2fe373ce |
772 | . "@" . "<" x ($max_namelen - 1) |
b82fa0b7 |
773 | . " @>> @>>>> @>>>> @>>> ^##.##% " |
2fe373ce |
774 | . "^" . "<" x ($list_len - 1) . "\n" |
775 | . '{ $Curtest->{name}, $Curtest->{estat},' |
776 | . ' $Curtest->{wstat}, $Curtest->{max},' |
777 | . ' $Curtest->{failed}, $Curtest->{percent},' |
778 | . ' $Curtest->{canon}' |
779 | . "\n}\n" |
780 | . "~~" . " " x ($Columns - $list_len - 2) . "^" |
781 | . "<" x ($list_len - 1) . "\n" |
782 | . '$Curtest->{canon}' |
783 | . "\n.\n"; |
9c5c68c8 |
784 | |
785 | eval $fmt_top; |
786 | die $@ if $@; |
787 | eval $fmt; |
788 | die $@ if $@; |
789 | |
790 | return($fmt_top, $fmt); |
791 | } |
792 | |
60e33a80 |
793 | sub _canonfailed ($$@) { |
89d3b7e2 |
794 | my($max,$skipped,@failed) = @_; |
6c31b336 |
795 | my %seen; |
796 | @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed; |
c07a80fd |
797 | my $failed = @failed; |
798 | my @result = (); |
799 | my @canon = (); |
800 | my $min; |
801 | my $last = $min = shift @failed; |
760ac839 |
802 | my $canon; |
c07a80fd |
803 | if (@failed) { |
2fe373ce |
804 | for (@failed, $failed[-1]) { # don't forget the last one |
805 | if ($_ > $last+1 || $_ == $last) { |
ca09b021 |
806 | push @canon, ($min == $last) ? $last : "$min-$last"; |
2fe373ce |
807 | $min = $_; |
808 | } |
809 | $last = $_; |
810 | } |
811 | local $" = ", "; |
812 | push @result, "FAILED tests @canon\n"; |
813 | $canon = join ' ', @canon; |
ca09b021 |
814 | } |
815 | else { |
2fe373ce |
816 | push @result, "FAILED test $last\n"; |
817 | $canon = $last; |
a0d0e21e |
818 | } |
c07a80fd |
819 | |
820 | push @result, "\tFailed $failed/$max tests, "; |
e93c2686 |
821 | if ($max) { |
822 | push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay"; |
ca09b021 |
823 | } |
824 | else { |
e93c2686 |
825 | push @result, "?% okay"; |
826 | } |
89d3b7e2 |
827 | my $ender = 's' x ($skipped > 1); |
e93c2686 |
828 | if ($skipped) { |
3c87ea76 |
829 | my $good = $max - $failed - $skipped; |
e93c2686 |
830 | my $skipmsg = " (less $skipped skipped test$ender: $good okay, "; |
831 | if ($max) { |
832 | my $goodper = sprintf("%.2f",100*($good/$max)); |
833 | $skipmsg .= "$goodper%)"; |
ca09b021 |
834 | } |
835 | else { |
e93c2686 |
836 | $skipmsg .= "?%)"; |
837 | } |
838 | push @result, $skipmsg; |
839 | } |
89d3b7e2 |
840 | push @result, "\n"; |
760ac839 |
841 | my $txt = join "", @result; |
842 | ($txt, $canon); |
a0d0e21e |
843 | } |
844 | |
b82fa0b7 |
845 | =end _private |
9c5c68c8 |
846 | |
b82fa0b7 |
847 | =back |
d667a7e6 |
848 | |
b82fa0b7 |
849 | =cut |
9c5c68c8 |
850 | |
9c5c68c8 |
851 | |
b82fa0b7 |
852 | 1; |
853 | __END__ |
9c5c68c8 |
854 | |
855 | |
cb1a09d0 |
856 | =head1 EXPORT |
857 | |
e8df9912 |
858 | C<&runtests> is exported by Test::Harness by default. |
cb1a09d0 |
859 | |
e4fc8a1e |
860 | C<$verbose>, C<$switches> and C<$debug> are exported upon request. |
9c5c68c8 |
861 | |
cb1a09d0 |
862 | =head1 DIAGNOSTICS |
863 | |
864 | =over 4 |
865 | |
866 | =item C<All tests successful.\nFiles=%d, Tests=%d, %s> |
867 | |
868 | If all tests are successful some statistics about the performance are |
869 | printed. |
870 | |
6c31b336 |
871 | =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.> |
872 | |
873 | For any single script that has failing subtests statistics like the |
874 | above are printed. |
875 | |
876 | =item C<Test returned status %d (wstat %d)> |
877 | |
9c5c68c8 |
878 | Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> |
879 | and C<$?> are printed in a message similar to the above. |
6c31b336 |
880 | |
881 | =item C<Failed 1 test, %.2f%% okay. %s> |
cb1a09d0 |
882 | |
6c31b336 |
883 | =item C<Failed %d/%d tests, %.2f%% okay. %s> |
cb1a09d0 |
884 | |
885 | If not all tests were successful, the script dies with one of the |
886 | above messages. |
887 | |
308957f5 |
888 | =item C<FAILED--Further testing stopped: %s> |
d667a7e6 |
889 | |
890 | If a single subtest decides that further testing will not make sense, |
891 | the script dies with this message. |
892 | |
cb1a09d0 |
893 | =back |
894 | |
c0c1f8c2 |
895 | =head1 ENVIRONMENT VARIABLES THAT TEST::HARNESS SETS |
896 | |
897 | Test::Harness sets these before executing the individual tests. |
9b0ceca9 |
898 | |
37ce32a7 |
899 | =over 4 |
900 | |
356733da |
901 | =item C<HARNESS_ACTIVE> |
37ce32a7 |
902 | |
c0c1f8c2 |
903 | This is set to a true value. It allows the tests to determine if they |
904 | are being executed through the harness or by any other means. |
905 | |
906 | =item C<HARNESS_VERSION> |
907 | |
908 | This is the version of Test::Harness. |
909 | |
910 | =back |
911 | |
912 | =head1 ENVIRONMENT VARIABLES THAT AFFECT TEST::HARNESS |
913 | |
914 | =over 4 |
37ce32a7 |
915 | |
356733da |
916 | =item C<HARNESS_COLUMNS> |
9b0ceca9 |
917 | |
356733da |
918 | This value will be used for the width of the terminal. If it is not |
919 | set then it will default to C<COLUMNS>. If this is not set, it will |
920 | default to 80. Note that users of Bourne-sh based shells will need to |
921 | C<export COLUMNS> for this module to use that variable. |
0d0c0d42 |
922 | |
b82fa0b7 |
923 | =item C<HARNESS_COMPILE_TEST> |
9636a016 |
924 | |
37ce32a7 |
925 | When true it will make harness attempt to compile the test using |
926 | C<perlcc> before running it. |
927 | |
b82fa0b7 |
928 | B<NOTE> This currently only works when sitting in the perl source |
929 | directory! |
930 | |
e4fc8a1e |
931 | =item C<HARNESS_DEBUG> |
932 | |
933 | If true, Test::Harness will print debugging information about itself as |
934 | it runs the tests. This is different from C<HARNESS_VERBOSE>, which prints |
935 | the output from the test being run. Setting C<$Test::Harness::Debug> will |
936 | override this, or you can use the C<-d> switch in the F<prove> utility. |
937 | |
b82fa0b7 |
938 | =item C<HARNESS_FILELEAK_IN_DIR> |
37ce32a7 |
939 | |
940 | When set to the name of a directory, harness will check after each |
941 | test whether new files appeared in that directory, and report them as |
17a79f5b |
942 | |
943 | LEAKED FILES: scr.tmp 0 my.db |
944 | |
945 | If relative, directory name is with respect to the current directory at |
946 | the moment runtests() was called. Putting absolute path into |
13287dd5 |
947 | C<HARNESS_FILELEAK_IN_DIR> may give more predictable results. |
17a79f5b |
948 | |
356733da |
949 | =item C<HARNESS_IGNORE_EXITCODE> |
950 | |
951 | Makes harness ignore the exit status of child processes when defined. |
952 | |
953 | =item C<HARNESS_NOTTY> |
954 | |
955 | When set to a true value, forces it to behave as though STDOUT were |
956 | not a console. You may need to set this if you don't want harness to |
957 | output more frequent progress messages using carriage returns. Some |
958 | consoles may not handle carriage returns properly (which results in a |
959 | somewhat messy output). |
960 | |
e4fc8a1e |
961 | =item C<HARNESS_PERL> |
962 | |
963 | Usually your tests will be run by C<$^X>, the currently-executing Perl. |
964 | However, you may want to have it run by a different executable, such as |
965 | a threading perl, or a different version. |
966 | |
967 | If you're using the F<prove> utility, you can use the C<--perl> switch. |
60af4b93 |
968 | |
b82fa0b7 |
969 | =item C<HARNESS_PERL_SWITCHES> |
37ce32a7 |
970 | |
971 | Its value will be prepended to the switches used to invoke perl on |
b82fa0b7 |
972 | each test. For example, setting C<HARNESS_PERL_SWITCHES> to C<-W> will |
37ce32a7 |
973 | run all tests with all warnings enabled. |
974 | |
356733da |
975 | =item C<HARNESS_VERBOSE> |
37ce32a7 |
976 | |
356733da |
977 | If true, Test::Harness will output the verbose results of running |
e4fc8a1e |
978 | its tests. Setting C<$Test::Harness::verbose> will override this, |
979 | or you can use the C<-v> switch in the F<prove> utility. |
37ce32a7 |
980 | |
981 | =back |
0a931e4a |
982 | |
b82fa0b7 |
983 | =head1 EXAMPLE |
984 | |
985 | Here's how Test::Harness tests itself |
986 | |
987 | $ cd ~/src/devel/Test-Harness |
988 | $ perl -Mblib -e 'use Test::Harness qw(&runtests $verbose); |
989 | $verbose=0; runtests @ARGV;' t/*.t |
990 | Using /home/schwern/src/devel/Test-Harness/blib |
991 | t/base..............ok |
992 | t/nonumbers.........ok |
993 | t/ok................ok |
994 | t/test-harness......ok |
995 | All tests successful. |
996 | Files=4, Tests=24, 2 wallclock secs ( 0.61 cusr + 0.41 csys = 1.02 CPU) |
f19ae7a7 |
997 | |
cb1a09d0 |
998 | =head1 SEE ALSO |
999 | |
42d29bac |
1000 | The included F<prove> utility for running test scripts from the command line, |
b82fa0b7 |
1001 | L<Test> and L<Test::Simple> for writing test scripts, L<Benchmark> for |
3c87ea76 |
1002 | the underlying timing routines, and L<Devel::Cover> for test coverage |
b82fa0b7 |
1003 | analysis. |
c07a80fd |
1004 | |
b82fa0b7 |
1005 | =head1 TODO |
1006 | |
1007 | Provide a way of running tests quietly (ie. no printing) for automated |
1008 | validation of tests. This will probably take the form of a version |
1009 | of runtests() which rather than printing its output returns raw data |
356733da |
1010 | on the state of the tests. (Partially done in Test::Harness::Straps) |
b82fa0b7 |
1011 | |
cf2ab31a |
1012 | Document the format. |
1013 | |
b82fa0b7 |
1014 | Fix HARNESS_COMPILE_TEST without breaking its core usage. |
1015 | |
1016 | Figure a way to report test names in the failure summary. |
37ce32a7 |
1017 | |
b82fa0b7 |
1018 | Rework the test summary so long test names are not truncated as badly. |
308957f5 |
1019 | (Partially done with new skip test styles) |
b82fa0b7 |
1020 | |
b82fa0b7 |
1021 | Add option for coverage analysis. |
1022 | |
cf2ab31a |
1023 | Trap STDERR. |
1024 | |
1025 | Implement Straps total_results() |
1026 | |
1027 | Remember exit code |
1028 | |
1029 | Completely redo the print summary code. |
1030 | |
1031 | Implement Straps callbacks. (experimentally implemented) |
1032 | |
1033 | Straps->analyze_file() not taint clean, don't know if it can be |
1034 | |
1035 | Fix that damned VMS nit. |
1036 | |
1037 | HARNESS_TODOFAIL to display TODO failures |
1038 | |
1039 | Add a test for verbose. |
1040 | |
1041 | Change internal list of test results to a hash. |
1042 | |
1043 | Fix stats display when there's an overrun. |
1044 | |
1045 | Fix so perls with spaces in the filename work. |
1046 | |
b82fa0b7 |
1047 | Keeping whittling away at _run_all_tests() |
1048 | |
b82fa0b7 |
1049 | Clean up how the summary is printed. Get rid of those damned formats. |
cb1a09d0 |
1050 | |
1051 | =head1 BUGS |
1052 | |
356733da |
1053 | HARNESS_COMPILE_TEST currently assumes it's run from the Perl source |
b82fa0b7 |
1054 | directory. |
1055 | |
e4fc8a1e |
1056 | Please use the CPAN bug ticketing system at L<http://rt.cpan.org/>. |
1057 | You can also mail bugs, fixes and enhancements to |
3c87ea76 |
1058 | C<< <bug-test-harness >> at C<< rt.cpan.org> >>. |
e4fc8a1e |
1059 | |
1060 | =head1 AUTHORS |
1061 | |
3c87ea76 |
1062 | Either Tim Bunce or Andreas Koenig, we don't know. What we know for |
1063 | sure is, that it was inspired by Larry Wall's TEST script that came |
1064 | with perl distributions for ages. Numerous anonymous contributors |
1065 | exist. Andreas Koenig held the torch for many years, and then |
1066 | Michael G Schwern. |
1067 | |
1068 | Current maintainer is Andy Lester C<< <andy at petdance.com> >>. |
e4fc8a1e |
1069 | |
1070 | =head1 COPYRIGHT |
1071 | |
3c87ea76 |
1072 | Copyright 2002-2005 |
1073 | by Michael G Schwern C<< <schwern at pobox.com> >>, |
1074 | Andy Lester C<< <andy at petdance.com> >>. |
e4fc8a1e |
1075 | |
1076 | This program is free software; you can redistribute it and/or |
1077 | modify it under the same terms as Perl itself. |
1078 | |
1079 | See L<http://www.perl.com/perl/misc/Artistic.html>. |
1080 | |
cb1a09d0 |
1081 | =cut |