Document makepatch in Porting/patching
[p5sagit/p5-mst-13.2.git] / lib / Test / Harness.pm
CommitLineData
d667a7e6 1# -*- Mode: cperl; cperl-indent-level: 4 -*-
a0d0e21e 2package Test::Harness;
3
17f410f9 4use 5.005_64;
a0d0e21e 5use Exporter;
6use Benchmark;
4633a7c4 7use Config;
760ac839 8use strict;
9
9c5c68c8 10our($VERSION, $Verbose, $Switches, $Have_Devel_Corestack, $Curtest,
11 $Columns, $verbose, $switches,
12 @ISA, @EXPORT, @EXPORT_OK
13 );
4633a7c4 14
9c5c68c8 15# Backwards compatibility for exportable variable names.
16*verbose = \$Verbose;
17*switches = \$Switches;
18
19$Have_Devel_Corestack = 0;
20
21$VERSION = "1.1702";
9b0ceca9 22
f19ae7a7 23$ENV{HARNESS_ACTIVE} = 1;
24
9b0ceca9 25# Some experimental versions of OS/2 build have broken $?
9c5c68c8 26my $Ignore_Exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
27
28my $Files_In_Dir = $ENV{HARNESS_FILELEAK_IN_DIR};
9b0ceca9 29
17a79f5b 30
9c5c68c8 31@ISA = ('Exporter');
32@EXPORT = qw(&runtests);
33@EXPORT_OK = qw($verbose $switches);
4633a7c4 34
9c5c68c8 35$Verbose = 0;
36$Switches = "-w";
37$Columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
a0d0e21e 38
a0d0e21e 39
17a79f5b 40sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
41
a0d0e21e 42sub runtests {
43 my(@tests) = @_;
9c5c68c8 44
45 my($tot, $failedtests) = _runtests(@tests);
46 _show_results($tot, $failedtests);
47
48 return ($tot->{bad} == 0 && $tot->{max}) ;
49}
50
51
52sub _runtests {
53 my(@tests) = @_;
a0d0e21e 54 local($|) = 1;
9c5c68c8 55 my(%failedtests);
56
57 # Test-wide totals.
58 my(%tot) = (
59 bonus => 0,
60 max => 0,
61 ok => 0,
62 files => 0,
63 bad => 0,
64 good => 0,
65 tests => scalar @tests,
66 sub_skipped => 0,
67 skipped => 0,
68 bench => 0
69 );
774d564b 70
71 # pass -I flags to children
81ff29e3 72 my $old5lib = $ENV{PERL5LIB};
774d564b 73
1250aba5 74 # VMS has a 255-byte limit on the length of %ENV entries, so
75 # toss the ones that involve perl_root, the install location
76 # for VMS
77 my $new5lib;
78 if ($^O eq 'VMS') {
79 $new5lib = join($Config{path_sep}, grep {!/perl_root/i;} @INC);
9c5c68c8 80 $Switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g;
1250aba5 81 }
82 else {
83 $new5lib = join($Config{path_sep}, @INC);
84 }
85
86 local($ENV{'PERL5LIB'}) = $new5lib;
a0d0e21e 87
9c5c68c8 88 my @dir_files = globdir $Files_In_Dir if defined $Files_In_Dir;
a0d0e21e 89 my $t_start = new Benchmark;
9c5c68c8 90
91 foreach my $test (@tests) {
92 my $te = $test;
93 chop($te); # XXX chomp?
94
fe6f1558 95 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./s; }
45c0de28 96 my $blank = (' ' x 77);
0d0c0d42 97 my $leader = "$te" . '.' x (20 - length($te));
98 my $ml = "";
23c4718a 99 $ml = "\r$blank\r$leader"
9c5c68c8 100 if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $Verbose;
0d0c0d42 101 print $leader;
9c5c68c8 102
103 my $s = _set_switches($test);
104
9636a016 105 my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
106 ? "./perl -I../lib ../utils/perlcc $test "
107 . "-run 2>> ./compilelog |"
108 : "$^X $s $test|";
a5077310 109 $cmd = "MCR $cmd" if $^O eq 'VMS';
9c5c68c8 110 open(my $fh, $cmd) or print "can't run $test. $!\n";
111
112 # state of the current test.
113 my %test = (
114 ok => 0,
115 next => 0,
116 max => 0,
117 failed => [],
118 todo => {},
119 bonus => 0,
120 skipped => 0,
121 skip_reason => undef,
122 ml => $ml,
123 );
124
125 my($seen_header, $tests_seen) = (0,0);
c07a80fd 126 while (<$fh>) {
9c5c68c8 127 if( _parse_header($_, \%test, \%tot) ) {
128 warn "Test header seen twice!\n" if $seen_header;
129
130 $seen_header = 1;
131
132 warn "1..M can only appear at the beginning or end of tests\n"
133 if $tests_seen && $test{max} < $tests_seen;
134 }
135 elsif( _parse_test_line($_, \%test, \%tot) ) {
136 $tests_seen++;
d667a7e6 137 }
9c5c68c8 138 # else, ignore it.
c07a80fd 139 }
9c5c68c8 140
141 my($estatus, $wstatus) = _close_fh($fh);
142
68dc0745 143 if ($wstatus) {
9c5c68c8 144 $failedtests{$test} = _dubious_return(\%test, \%tot,
145 $estatus, $wstatus);
146 }
147 elsif ($test{ok} == $test{max} && $test{next} == $test{max}+1) {
148 if ($test{max} and $test{skipped} + $test{bonus}) {
7b13a3f5 149 my @msg;
9c5c68c8 150 push(@msg, "$test{skipped}/$test{max} skipped: $test{skip_reason}")
151 if $test{skipped};
152 push(@msg, "$test{bonus}/$test{max} unexpectedly succeeded")
153 if $test{bonus};
154 print "$test{ml}ok, ".join(', ', @msg)."\n";
155 } elsif ($test{max}) {
156 print "$test{ml}ok\n";
157 } elsif (defined $test{skip_reason}) {
158 print "skipped: $test{skip_reason}\n";
159 $tot{skipped}++;
c0ee6f5c 160 } else {
45c0de28 161 print "skipped test on this platform\n";
9c5c68c8 162 $tot{skipped}++;
c0ee6f5c 163 }
9c5c68c8 164 $tot{good}++;
165 } elsif ($test{max}) {
166 if ($test{next} <= $test{max}) {
167 push @{$test{failed}}, $test{next}..$test{max};
6c31b336 168 }
9c5c68c8 169 if (@{$test{failed}}) {
170 my ($txt, $canon) = canonfailed($test{max},$test{skipped},
171 @{$test{failed}});
172 print "$test{ml}$txt";
173 $failedtests{$test} = { canon => $canon,
174 max => $test{max},
175 failed => scalar @{$test{failed}},
176 name => $test,
177 percent => 100*(scalar @{$test{failed}})/$test{max},
178 estat => '',
179 wstat => '',
760ac839 180 };
c07a80fd 181 } else {
9c5c68c8 182 print "Don't know which tests failed: got $test{ok} ok, ".
183 "expected $test{max}\n";
184 $failedtests{$test} = { canon => '??',
185 max => $test{max},
186 failed => '??',
187 name => $test,
188 percent => undef,
189 estat => '',
190 wstat => '',
760ac839 191 };
c07a80fd 192 }
9c5c68c8 193 $tot{bad}++;
194 } elsif ($test{next} == 0) {
6c31b336 195 print "FAILED before any test output arrived\n";
9c5c68c8 196 $tot{bad}++;
197 $failedtests{$test} = { canon => '??',
198 max => '??',
199 failed => '??',
200 name => $test,
201 percent => undef,
202 estat => '',
203 wstat => '',
760ac839 204 };
6c31b336 205 }
9c5c68c8 206 $tot{sub_skipped} += $test{skipped};
207
208 if (defined $Files_In_Dir) {
209 my @new_dir_files = globdir $Files_In_Dir;
17a79f5b 210 if (@new_dir_files != @dir_files) {
211 my %f;
212 @f{@new_dir_files} = (1) x @new_dir_files;
213 delete @f{@dir_files};
214 my @f = sort keys %f;
215 print "LEAKED FILES: @f\n";
216 @dir_files = @new_dir_files;
217 }
218 }
a0d0e21e 219 }
9c5c68c8 220 $tot{bench} = timediff(new Benchmark, $t_start);
d667a7e6 221
774d564b 222 if ($^O eq 'VMS') {
223 if (defined $old5lib) {
224 $ENV{PERL5LIB} = $old5lib;
b876d4a6 225 } else {
774d564b 226 delete $ENV{PERL5LIB};
227 }
228 }
9c5c68c8 229
230 return(\%tot, \%failedtests);
231}
232
233
234sub _show_results {
235 my($tot, $failedtests) = @_;
236
237 my $pct;
238 my $bonusmsg = _bonusmsg($tot);
239
240 if ($tot->{bad} == 0 && $tot->{max}) {
7b13a3f5 241 print "All tests successful$bonusmsg.\n";
9c5c68c8 242 } elsif ($tot->{tests}==0){
6c31b336 243 die "FAILED--no tests were run for some reason.\n";
9c5c68c8 244 } elsif ($tot->{max} == 0) {
245 my $blurb = $tot->{tests}==1 ? "script" : "scripts";
246 die "FAILED--$tot->{tests} test $blurb could be run, ".
247 "alas--no output ever seen\n";
c07a80fd 248 } else {
9c5c68c8 249 $pct = sprintf("%.2f", $tot->{good} / $tot->{tests} * 100);
6c31b336 250 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
9c5c68c8 251 $tot->{max} - $tot->{ok}, $tot->{max},
252 100*$tot->{ok}/$tot->{max};
0a931e4a 253
9c5c68c8 254 my($fmt_top, $fmt) = _create_fmts($failedtests);
0a931e4a 255
256 # Now write to formats
9c5c68c8 257 for my $script (sort keys %$failedtests) {
258 $Curtest = $failedtests->{$script};
760ac839 259 write;
260 }
9c5c68c8 261 if ($tot->{bad}) {
9b0ceca9 262 $bonusmsg =~ s/^,\s*//;
263 print "$bonusmsg.\n" if $bonusmsg;
9c5c68c8 264 die "Failed $tot->{bad}/$tot->{tests} test scripts, $pct% okay.".
265 "$subpct\n";
c07a80fd 266 }
267 }
f0a9308e 268
9c5c68c8 269 printf("Files=%d, Tests=%d, %s\n",
270 $tot->{files}, $tot->{max}, timestr($tot->{bench}, 'nop'));
271}
272
273
274sub _parse_header {
275 my($line, $test, $tot) = @_;
276
277 my $is_header = 0;
278
279 print $line if $Verbose;
280
281 # 1..10 todo 4 7 10;
282 if ($line =~ /^1\.\.([0-9]+) todo([\d\s]+);?/i) {
283 $test->{max} = $1;
284 for (split(/\s+/, $2)) { $test->{todo}{$_} = 1; }
285
286 $tot->{max} += $test->{max};
287 $tot->{files}++;
288
289 $is_header = 1;
290 }
291 # 1..10
292 # 1..0 # skip Why? Because I said so!
293 elsif ($line =~ /^1\.\.([0-9]+)
294 (\s*\#\s*[Ss]kip\S*(?>\s+) (.+))?
295 /x
296 )
297 {
298 $test->{max} = $1;
299 $tot->{max} += $test->{max};
300 $tot->{files}++;
301 $test->{next} = 1 unless $test->{next};
302 $test->{skip_reason} = $3 if not $test->{max} and defined $3;
303
304 $is_header = 1;
305 }
306 else {
307 $is_header = 0;
308 }
309
310 return $is_header;
c07a80fd 311}
312
9c5c68c8 313
314sub _parse_test_line {
315 my($line, $test, $tot) = @_;
316
317 if ($line =~ /^(not\s+)?ok\b/i) {
318 my $this = $test->{next} || 1;
319 # "not ok 23"
320 if ($line =~ /^not ok\s*(\d*)/){ # test failed
321 $this = $1 if length $1 and $1 > 0;
322 print "$test->{ml}NOK $this" if $test->{ml};
323 if (!$test->{todo}{$this}) {
324 push @{$test->{failed}}, $this;
325 } else {
326 $test->{ok}++;
327 $tot->{ok}++;
328 }
329 }
330 # "ok 23 # skip (you're not cleared for that)"
331 elsif ($line =~ /^ok\s*(\d*)\ *
332 (\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?
333 /x) # test skipped
334 {
335 $this = $1 if length $1 and $1 > 0;
336 print "$test->{ml}ok $this/$test->{max}" if $test->{ml};
337 $test->{ok}++;
338 $tot->{ok}++;
339 $test->{skipped}++ if defined $2;
340 my $reason;
341 $reason = 'unknown reason' if defined $2;
342 $reason = $3 if defined $3;
343 if (defined $reason and defined $test->{skip_reason}) {
344 # print "was: '$skip_reason' new '$reason'\n";
345 $test->{skip_reason} = 'various reasons'
346 if $test->{skip_reason} ne $reason;
347 } elsif (defined $reason) {
348 $test->{skip_reason} = $reason;
349 }
350 $test->{bonus}++, $tot->{bonus}++ if $test->{todo}{$this};
351 }
352 # XXX ummm... dunno
353 elsif ($line =~ /^ok\s*(\d*)\s*\#([^\r]*)$/) { # XXX multiline ok?
354 $this = $1 if $1 > 0;
355 print "$test->{ml}ok $this/$test->{max}" if $test->{ml};
356 $test->{ok}++;
357 $tot->{ok}++;
358 }
359 else {
360 # an ok or not ok not matching the 3 cases above...
361 # just ignore it for compatibility with TEST
362 next;
363 }
364
365 if ($this > $test->{next}) {
366 # print "Test output counter mismatch [test $this]\n";
367 # no need to warn probably
368 push @{$test->{failed}}, $test->{next}..$this-1;
369 }
370 elsif ($this < $test->{next}) {
371 #we have seen more "ok" lines than the number suggests
372 print "Confused test output: test $this answered after ".
373 "test ", $test->{next}-1, "\n";
374 $test->{next} = $this;
375 }
376 $test->{next} = $this + 1;
377
378 }
379 elsif ($line =~ /^Bail out!\s*(.*)/i) { # magic words
380 die "FAILED--Further testing stopped" .
381 ($1 ? ": $1\n" : ".\n");
382 }
383}
384
385
386sub _bonusmsg {
387 my($tot) = @_;
388
389 my $bonusmsg = '';
390 $bonusmsg = (" ($tot->{bonus} subtest".($tot->{bonus} > 1 ? 's' : '').
391 " UNEXPECTEDLY SUCCEEDED)")
392 if $tot->{bonus};
393
394 if ($tot->{skipped}) {
395 $bonusmsg .= ", $tot->{skipped} test"
396 . ($tot->{skipped} != 1 ? 's' : '');
397 if ($tot->{sub_skipped}) {
398 $bonusmsg .= " and $tot->{sub_skipped} subtest"
399 . ($tot->{sub_skipped} != 1 ? 's' : '');
400 }
401 $bonusmsg .= ' skipped';
402 }
403 elsif ($tot->{sub_skipped}) {
404 $bonusmsg .= ", $tot->{sub_skipped} subtest"
405 . ($tot->{sub_skipped} != 1 ? 's' : '')
406 . " skipped";
407 }
408
409 return $bonusmsg;
410}
411
412# VMS has some subtle nastiness with closing the test files.
413sub _close_fh {
414 my($fh) = shift;
415
416 close($fh); # must close to reap child resource values
417
418 my $wstatus = $Ignore_Exitcode ? 0 : $?; # Can trust $? ?
419 my $estatus;
420 $estatus = ($^O eq 'VMS'
421 ? eval 'use vmsish "status"; $estatus = $?'
422 : $wstatus >> 8);
423
424 return($estatus, $wstatus);
425}
426
427
428# Set up the command-line switches to run perl as.
429sub _set_switches {
430 my($test) = shift;
431
432 open(my $fh, $test) or print "can't open $test. $!\n";
433 my $first = <$fh>;
434 my $s = $Switches;
435 $s .= " $ENV{'HARNESS_PERL_SWITCHES'}"
436 if exists $ENV{'HARNESS_PERL_SWITCHES'};
437 $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
438 if $first =~ /^#!.*\bperl.*-\w*T/;
439
440 close($fh) or print "can't close $test. $!\n";
441
442 return $s;
443}
444
445
446# Test program go boom.
447sub _dubious_return {
448 my($test, $tot, $estatus, $wstatus) = @_;
449 my ($failed, $canon, $percent) = ('??', '??');
450
451 printf "$test->{ml}dubious\n\tTest returned status $estatus ".
452 "(wstat %d, 0x%x)\n",
453 $wstatus,$wstatus;
454 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
455
456 if (corestatus($wstatus)) { # until we have a wait module
457 if ($Have_Devel_Corestack) {
458 Devel::CoreStack::stack($^X);
459 } else {
460 print "\ttest program seems to have generated a core\n";
461 }
462 }
463
464 $tot->{bad}++;
465
466 if ($test->{max}) {
467 if ($test->{next} == $test->{max} + 1 and not @{$test->{failed}}) {
468 print "\tafter all the subtests completed successfully\n";
469 $percent = 0;
470 $failed = 0; # But we do not set $canon!
471 }
472 else {
473 push @{$test->{failed}}, $test->{next}..$test->{max};
474 $failed = @{$test->{failed}};
475 (my $txt, $canon) = canonfailed($test->{max},$test->{skipped},@{$test->{failed}});
476 $percent = 100*(scalar @{$test->{failed}})/$test->{max};
477 print "DIED. ",$txt;
478 }
479 }
480
481 return { canon => $canon, max => $test->{max} || '??',
482 failed => $failed,
483 name => $test, percent => $percent,
484 estat => $estatus, wstat => $wstatus,
485 };
486}
487
488
489sub _garbled_output {
490 my($gibberish) = shift;
491 warn "Confusing test output: '$gibberish'\n";
492}
493
494
495sub _create_fmts {
496 my($failedtests) = @_;
497
498 my $failed_str = "Failed Test";
499 my $middle_str = " Status Wstat Total Fail Failed ";
500 my $list_str = "List of Failed";
501
502 # Figure out our longest name string for formatting purposes.
503 my $max_namelen = length($failed_str);
504 foreach my $script (keys %$failedtests) {
505 my $namelen = length $failedtests->{$script}->{name};
506 $max_namelen = $namelen if $namelen > $max_namelen;
507 }
508
509 my $list_len = $Columns - length($middle_str) - $max_namelen;
510 if ($list_len < length($list_str)) {
511 $list_len = length($list_str);
512 $max_namelen = $Columns - length($middle_str) - $list_len;
513 if ($max_namelen < length($failed_str)) {
514 $max_namelen = length($failed_str);
515 $Columns = $max_namelen + length($middle_str) + $list_len;
516 }
517 }
518
519 my $fmt_top = "format STDOUT_TOP =\n"
520 . sprintf("%-${max_namelen}s", $failed_str)
521 . $middle_str
522 . $list_str . "\n"
523 . "-" x $Columns
524 . "\n.\n";
525
526 my $fmt = "format STDOUT =\n"
527 . "@" . "<" x ($max_namelen - 1)
528 . " @>> @>>>> @>>>> @>>> ^##.##% "
529 . "^" . "<" x ($list_len - 1) . "\n"
530 . '{ $Curtest->{name}, $Curtest->{estat},'
531 . ' $Curtest->{wstat}, $Curtest->{max},'
532 . ' $Curtest->{failed}, $Curtest->{percent},'
533 . ' $Curtest->{canon}'
534 . "\n}\n"
535 . "~~" . " " x ($Columns - $list_len - 2) . "^"
536 . "<" x ($list_len - 1) . "\n"
537 . '$Curtest->{canon}'
538 . "\n.\n";
539
540 eval $fmt_top;
541 die $@ if $@;
542 eval $fmt;
543 die $@ if $@;
544
545 return($fmt_top, $fmt);
546}
547
548
aa689395 549my $tried_devel_corestack;
c0ee6f5c 550sub corestatus {
551 my($st) = @_;
c0ee6f5c 552
553 eval {require 'wait.ph'};
0dfaec25 554 my $ret = defined &WCOREDUMP ? WCOREDUMP($st) : $st & 0200;
c0ee6f5c 555
9c5c68c8 556 eval { require Devel::CoreStack; $Have_Devel_Corestack++ }
aa689395 557 unless $tried_devel_corestack++;
c0ee6f5c 558
559 $ret;
560}
561
c07a80fd 562sub canonfailed ($@) {
89d3b7e2 563 my($max,$skipped,@failed) = @_;
6c31b336 564 my %seen;
565 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
c07a80fd 566 my $failed = @failed;
567 my @result = ();
568 my @canon = ();
569 my $min;
570 my $last = $min = shift @failed;
760ac839 571 my $canon;
c07a80fd 572 if (@failed) {
573 for (@failed, $failed[-1]) { # don't forget the last one
574 if ($_ > $last+1 || $_ == $last) {
575 if ($min == $last) {
576 push @canon, $last;
577 } else {
578 push @canon, "$min-$last";
579 }
580 $min = $_;
581 }
582 $last = $_;
583 }
584 local $" = ", ";
585 push @result, "FAILED tests @canon\n";
760ac839 586 $canon = "@canon";
a0d0e21e 587 } else {
c07a80fd 588 push @result, "FAILED test $last\n";
760ac839 589 $canon = $last;
a0d0e21e 590 }
c07a80fd 591
592 push @result, "\tFailed $failed/$max tests, ";
89d3b7e2 593 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
594 my $ender = 's' x ($skipped > 1);
595 my $good = $max - $failed - $skipped;
596 my $goodper = sprintf("%.2f",100*($good/$max));
9c5c68c8 597 push @result, " (-$skipped skipped test$ender: $good okay, ".
598 "$goodper%)"
599 if $skipped;
89d3b7e2 600 push @result, "\n";
760ac839 601 my $txt = join "", @result;
602 ($txt, $canon);
a0d0e21e 603}
604
6051;
cb1a09d0 606__END__
607
608=head1 NAME
609
610Test::Harness - run perl standard test scripts with statistics
611
612=head1 SYNOPSIS
613
614use Test::Harness;
615
616runtests(@tests);
617
618=head1 DESCRIPTION
619
9c5c68c8 620(By using the Test module, you can write test scripts without
7b13a3f5 621knowing the exact output this module expects. However, if you need to
622know the specifics, read on!)
623
cb1a09d0 624Perl test scripts print to standard output C<"ok N"> for each single
625test, where C<N> is an increasing sequence of integers. The first line
c0ee6f5c 626output by a standard test script is C<"1..M"> with C<M> being the
cb1a09d0 627number of tests that should be run within the test
c0ee6f5c 628script. Test::Harness::runtests(@tests) runs all the testscripts
cb1a09d0 629named as arguments and checks standard output for the expected
630C<"ok N"> strings.
631
c0ee6f5c 632After all tests have been performed, runtests() prints some
cb1a09d0 633performance statistics that are computed by the Benchmark module.
634
6c31b336 635=head2 The test script output
636
9c5c68c8 637=over 4
638
639=item B<1..M>
640
641This header tells how many tests there will be. It should be the
642first line output by your test program (but its okay if its preceded
643by comments).
644
645In certain instanced, you may not know how many tests you will
646ultimately be running. In this case, it is permitted (but not
647encouraged) for the 1..M header to appear as the B<last> line output
648by your test (again, it can be followed by further comments). But we
649strongly encourage you to put it first.
650
651Under B<no> circumstances should 1..M appear in the middle of your
652output or more than once.
653
654
655=item B<'ok', 'not ok'. Ok?>
656
6c31b336 657Any output from the testscript to standard error is ignored and
658bypassed, thus will be seen by the user. Lines written to standard
c0ee6f5c 659output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
660runtests(). All other lines are discarded.
6c31b336 661
9c5c68c8 662C</^not ok/> indicates a failed test. C</^ok/> is a successful test.
663
664
665=item B<test numbers>
666
667Perl normally expects the 'ok' or 'not ok' to be followed by a test
668number. It is tolerated if the test numbers after 'ok' are
669omitted. In this case Test::Harness maintains temporarily its own
670counter until the script supplies test numbers again. So the following
671test script
6c31b336 672
673 print <<END;
674 1..6
675 not ok
676 ok
677 not ok
678 ok
679 ok
680 END
681
d667a7e6 682will generate
6c31b336 683
684 FAILED tests 1, 3, 6
685 Failed 3/6 tests, 50.00% okay
686
9c5c68c8 687
688=item B<$Test::Harness::verbose>
689
6c31b336 690The global variable $Test::Harness::verbose is exportable and can be
c0ee6f5c 691used to let runtests() display the standard output of the script
6c31b336 692without altering the behavior otherwise.
693
9c5c68c8 694=item B<$Test::Harness::switches>
695
fb73857a 696The global variable $Test::Harness::switches is exportable and can be
697used to set perl command line options used for running the test
698script(s). The default value is C<-w>.
699
9c5c68c8 700=item B<Skipping tests>
701
fac76ed7 702If the standard output line contains substring C< # Skip> (with
703variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
9c5c68c8 704counted as a skipped test. If the whole testscript succeeds, the
705count of skipped tests is included in the generated output.
e1194749 706
707C<Test::Harness> reports the text after C< # Skip\S*\s+> as a reason
708for skipping. Similarly, one can include a similar explanation in a
9c5c68c8 709C<1..0> line emitted if the test script is skipped completely:
45c0de28 710
711 1..0 # Skipped: no leverage found
712
9c5c68c8 713=item B<Bail out!>
714
d667a7e6 715As an emergency measure, a test script can decide that further tests
716are useless (e.g. missing dependencies) and testing should stop
717immediately. In that case the test script prints the magic words
718
719 Bail out!
720
721to standard output. Any message after these words will be displayed by
722C<Test::Harness> as the reason why testing is stopped.
723
9c5c68c8 724=item B<Comments>
725
726Additional comments may be put into the testing output on their own
727lines. Comment lines should begin with a '#', Test::Harness will
728ignore them.
729
730 ok 1
731 # Life is good, the sun is shining, RAM is cheap.
732 not ok 2
733 # got 'Bush' expected 'Gore'
734
735
cb1a09d0 736=head1 EXPORT
737
c0ee6f5c 738C<&runtests> is exported by Test::Harness per default.
cb1a09d0 739
9c5c68c8 740C<$verbose> and C<$switches> are exported upon request.
741
742
cb1a09d0 743=head1 DIAGNOSTICS
744
745=over 4
746
747=item C<All tests successful.\nFiles=%d, Tests=%d, %s>
748
749If all tests are successful some statistics about the performance are
750printed.
751
6c31b336 752=item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
753
754For any single script that has failing subtests statistics like the
755above are printed.
756
757=item C<Test returned status %d (wstat %d)>
758
9c5c68c8 759Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8>
760and C<$?> are printed in a message similar to the above.
6c31b336 761
762=item C<Failed 1 test, %.2f%% okay. %s>
cb1a09d0 763
6c31b336 764=item C<Failed %d/%d tests, %.2f%% okay. %s>
cb1a09d0 765
766If not all tests were successful, the script dies with one of the
767above messages.
768
d667a7e6 769=item C<FAILED--Further testing stopped%s>
770
771If a single subtest decides that further testing will not make sense,
772the script dies with this message.
773
cb1a09d0 774=back
775
9b0ceca9 776=head1 ENVIRONMENT
777
17a79f5b 778Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
9b0ceca9 779of child processes.
780
0d0c0d42 781Setting C<HARNESS_NOTTY> to a true value forces it to behave as though
782STDOUT were not a console. You may need to set this if you don't want
783harness to output more frequent progress messages using carriage returns.
784Some consoles may not handle carriage returns properly (which results
785in a somewhat messy output).
786
9636a016 787Setting C<HARNESS_COMPILE_TEST> to a true value will make harness attempt
788to compile the test using C<perlcc> before running it.
789
17a79f5b 790If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
791will check after each test whether new files appeared in that directory,
792and report them as
793
794 LEAKED FILES: scr.tmp 0 my.db
795
796If relative, directory name is with respect to the current directory at
797the moment runtests() was called. Putting absolute path into
798C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
799
2b32313b 800The value of C<HARNESS_PERL_SWITCHES> will be prepended to the
801switches used to invoke perl on each test. For example, setting
802C<HARNESS_PERL_SWITCHES> to "-W" will run all tests with all
803warnings enabled.
804
0a931e4a 805If C<HARNESS_COLUMNS> is set, then this value will be used for the
806width of the terminal. If it is not set then it will default to
807C<COLUMNS>. If this is not set, it will default to 80. Note that users
808of Bourne-sh based shells will need to C<export COLUMNS> for this
809module to use that variable.
810
f19ae7a7 811Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
812This allows the tests to determine if they are being executed through the
813harness or by any other means.
814
cb1a09d0 815=head1 SEE ALSO
816
7b13a3f5 817L<Test> for writing test scripts and also L<Benchmark> for the
818underlying timing routines.
c07a80fd 819
820=head1 AUTHORS
821
822Either Tim Bunce or Andreas Koenig, we don't know. What we know for
823sure is, that it was inspired by Larry Wall's TEST script that came
b876d4a6 824with perl distributions for ages. Numerous anonymous contributors
825exist. Current maintainer is Andreas Koenig.
cb1a09d0 826
827=head1 BUGS
828
829Test::Harness uses $^X to determine the perl binary to run the tests
6c31b336 830with. Test scripts running via the shebang (C<#!>) line may not be
831portable because $^X is not consistent for shebang scripts across
cb1a09d0 832platforms. This is no problem when Test::Harness is run with an
6c31b336 833absolute path to the perl binary or when $^X can be found in the path.
cb1a09d0 834
835=cut