-# -*- Mode: cperl; cperl-indent-level: 4 -*-
-
package Test::Harness;
require 5.00405;
-use Test::Harness::Straps;
-use Test::Harness::Assert;
-use Exporter;
-use Benchmark;
-use Config;
+
use strict;
+use constant IS_WIN32 => ( $^O =~ /^(MS)?Win32$/ );
+use constant IS_VMS => ( $^O eq 'VMS' );
+
+use TAP::Harness ();
+use TAP::Parser::Aggregator ();
+use TAP::Parser::Source::Perl ();
+use TAP::Parser::Utils qw( split_shell );
+
+use Config;
+use Exporter;
+
+# TODO: Emulate at least some of these
use vars qw(
- $VERSION
- @ISA @EXPORT @EXPORT_OK
- $Verbose $Switches $Debug
- $verbose $switches $debug
- $Columns
- $Timer
- $ML $Last_ML_Print
- $Strap
- $has_time_hires
+ $VERSION
+ @ISA @EXPORT @EXPORT_OK
+ $Verbose $Switches $Debug
+ $verbose $switches $debug
+ $Columns
+ $Color
+ $Directives
+ $Timer
+ $Strap
+ $has_time_hires
);
+# $ML $Last_ML_Print
+
BEGIN {
- eval "use Time::HiRes 'time'";
+ eval q{use Time::HiRes 'time'};
$has_time_hires = !$@;
}
=head1 VERSION
-Version 2.57_05
+Version 3.09
=cut
-$VERSION = "2.57_05";
-$VERSION = eval $VERSION;
+$VERSION = '3.09';
# Backwards compatibility for exportable variable names.
*verbose = *Verbose;
*switches = *Switches;
*debug = *Debug;
-$ENV{HARNESS_ACTIVE} = 1;
+$ENV{HARNESS_ACTIVE} = 1;
$ENV{HARNESS_VERSION} = $VERSION;
END {
+
# For VMS.
delete $ENV{HARNESS_ACTIVE};
delete $ENV{HARNESS_VERSION};
}
-my $Files_In_Dir = $ENV{HARNESS_FILELEAK_IN_DIR};
-
-$Strap = Test::Harness::Straps->new;
-
-sub strap { return $Strap };
-
-@ISA = ('Exporter');
+@ISA = ('Exporter');
@EXPORT = qw(&runtests);
@EXPORT_OK = qw(&execute_tests $verbose $switches);
-$Verbose = $ENV{HARNESS_VERBOSE} || 0;
-$Debug = $ENV{HARNESS_DEBUG} || 0;
-$Switches = "-w";
-$Columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
-$Columns--; # Some shells have trouble with a full line of text.
-$Timer = $ENV{HARNESS_TIMER} || 0;
+$Verbose = $ENV{HARNESS_VERBOSE} || 0;
+$Debug = $ENV{HARNESS_DEBUG} || 0;
+$Switches = '-w';
+$Columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
+$Columns--; # Some shells have trouble with a full line of text.
+$Timer = $ENV{HARNESS_TIMER} || 0;
+$Color = $ENV{HARNESS_COLOR} || 0;
=head1 SYNOPSIS
=head1 DESCRIPTION
-B<STOP!> If all you want to do is write a test script, consider
-using Test::Simple. Test::Harness is the module that reads the
-output from Test::Simple, Test::More and other modules based on
-Test::Builder. You don't need to know about Test::Harness to use
-those modules.
+Although, for historical reasons, the L<Test::Harness> distribution
+takes its name from this module it now exists only to provide
+L<TAP::Harness> with an interface that is somewhat backwards compatible
+with L<Test::Harness> 2.xx. If you're writing new code consider using
+L<TAP::Harness> directly instead.
-Test::Harness runs tests and expects output from the test in a
-certain format. That format is called TAP, the Test Anything
-Protocol. It is defined in L<Test::Harness::TAP>.
+Emulation is provided for C<runtests> and C<execute_tests> but the
+pluggable 'Straps' interface that previous versions of L<Test::Harness>
+supported is not reproduced here. Straps is now available as a stand
+alone module: L<Test::Harness::Straps>.
-C<Test::Harness::runtests(@tests)> runs all the testscripts named
-as arguments and checks standard output for the expected strings
-in TAP format.
+See L<TAP::Parser>, L<TAP::Harness> for the main documentation for this
+distribution.
-The F<prove> utility is a thin wrapper around Test::Harness.
+=head1 FUNCTIONS
-=head2 Taint mode
+The following functions are available.
-Test::Harness will honor the C<-T> or C<-t> in the #! line on your
-test files. So if you begin a test with:
+=head2 runtests( @test_files )
- #!perl -T
+This runs all the given I<@test_files> and divines whether they passed
+or failed based on their output to STDOUT (details above). It prints
+out each individual test which failed along with a summary report and
+a how long it all took.
-the test will be run with taint mode on.
+It returns true if everything was ok. Otherwise it will C<die()> with
+one of the messages in the DIAGNOSTICS section.
-=head2 Configuration variables.
+=cut
-These variables can be used to configure the behavior of
-Test::Harness. They are exported on request.
+sub _has_taint {
+ my $test = shift;
+ return TAP::Parser::Source::Perl->get_taint(
+ TAP::Parser::Source::Perl->shebang($test) );
+}
-=over 4
+sub _aggregate {
+ my ( $harness, $aggregate, @tests ) = @_;
-=item C<$Test::Harness::Verbose>
+ # Don't propagate to our children
+ local $ENV{HARNESS_OPTIONS};
-The package variable C<$Test::Harness::Verbose> is exportable and can be
-used to let C<runtests()> display the standard output of the script
-without altering the behavior otherwise. The F<prove> utility's C<-v>
-flag will set this.
+ if (IS_VMS) {
-=item C<$Test::Harness::switches>
+ # Jiggery pokery doesn't appear to work on VMS - so disable it
+ # pending investigation.
+ _aggregate_tests( $harness, $aggregate, @tests );
+ }
+ else {
+ my $path_sep = $Config{path_sep};
+ my $path_pat = qr{$path_sep};
+ my @extra_inc = _filtered_inc();
+
+ # Supply -I switches in taint mode
+ $harness->callback(
+ parser_args => sub {
+ my ( $args, $test ) = @_;
+ if ( _has_taint( $test->[0] ) ) {
+ push @{ $args->{switches} }, map {"-I$_"} _filtered_inc();
+ }
+ }
+ );
-The package variable C<$Test::Harness::switches> is exportable and can be
-used to set perl command line options used for running the test
-script(s). The default value is C<-w>. It overrides C<HARNESS_SWITCHES>.
+ my $previous = $ENV{PERL5LIB};
+ local $ENV{PERL5LIB};
-=item C<$Test::Harness::Timer>
+ if ($previous) {
+ push @extra_inc, split( $path_pat, $previous );
+ }
-If set to true, and C<Time::HiRes> is available, print elapsed seconds
-after each test file.
+ if (@extra_inc) {
+ $ENV{PERL5LIB} = join( $path_sep, @extra_inc );
+ }
-=back
+ _aggregate_tests( $harness, $aggregate, @tests );
+ }
+}
+sub _aggregate_tests {
+ my ( $harness, $aggregate, @tests ) = @_;
+ $aggregate->start();
+ $harness->aggregate_tests( $aggregate, @tests );
+ $aggregate->stop();
-=head2 Failure
+}
-When tests fail, analyze the summary report:
+sub runtests {
+ my @tests = @_;
- t/base..............ok
- t/nonumbers.........ok
- t/ok................ok
- t/test-harness......ok
- t/waterloo..........dubious
- Test returned status 3 (wstat 768, 0x300)
- DIED. FAILED tests 1, 3, 5, 7, 9, 11, 13, 15, 17, 19
- Failed 10/20 tests, 50.00% okay
- Failed Test Stat Wstat Total Fail Failed List of Failed
- -----------------------------------------------------------------------
- t/waterloo.t 3 768 20 10 50.00% 1 3 5 7 9 11 13 15 17 19
- Failed 1/5 test scripts, 80.00% okay. 10/44 subtests failed, 77.27% okay.
+ # shield against -l
+ local ( $\, $, );
-Everything passed but F<t/waterloo.t>. It failed 10 of 20 tests and
-exited with non-zero status indicating something dubious happened.
+ my $harness = _new_harness();
+ my $aggregate = TAP::Parser::Aggregator->new();
-The columns in the summary report mean:
+ _aggregate( $harness, $aggregate, @tests );
-=over 4
+ $harness->formatter->summary($aggregate);
-=item B<Failed Test>
+ my $total = $aggregate->total;
+ my $passed = $aggregate->passed;
+ my $failed = $aggregate->failed;
-The test file which failed.
+ my @parsers = $aggregate->parsers;
-=item B<Stat>
+ my $num_bad = 0;
+ for my $parser (@parsers) {
+ $num_bad++ if $parser->has_problems;
+ }
-If the test exited with non-zero, this is its exit status.
+ die(sprintf(
+ "Failed %d/%d test programs. %d/%d subtests failed.\n",
+ $num_bad, scalar @parsers, $failed, $total
+ )
+ ) if $num_bad;
-=item B<Wstat>
+ return $total && $total == $passed;
+}
-The wait status of the test.
+sub _canon {
+ my @list = sort { $a <=> $b } @_;
+ my @ranges = ();
+ my $count = scalar @list;
+ my $pos = 0;
+
+ while ( $pos < $count ) {
+ my $end = $pos + 1;
+ $end++ while $end < $count && $list[$end] <= $list[ $end - 1 ] + 1;
+ push @ranges, ( $end == $pos + 1 )
+ ? $list[$pos]
+ : join( '-', $list[$pos], $list[ $end - 1 ] );
+ $pos = $end;
+ }
-=item B<Total>
+ return join( ' ', @ranges );
+}
-Total number of tests expected to run.
+sub _new_harness {
+ my $sub_args = shift || {};
-=item B<Fail>
+ my ( @lib, @switches );
+ for my $opt (
+ split_shell( $Switches, $ENV{HARNESS_PERL_SWITCHES} ) )
+ {
+ if ( $opt =~ /^ -I (.*) $ /x ) {
+ push @lib, $1;
+ }
+ else {
+ push @switches, $opt;
+ }
+ }
-Number which failed, either from "not ok" or because they never ran.
+ # Do things the old way on VMS...
+ push @lib, _filtered_inc() if IS_VMS;
-=item B<Failed>
+ # If $Verbose isn't numeric default to 1. This helps core.
+ my $verbosity = ( $Verbose ? ( $Verbose !~ /\d/ ) ? 1 : $Verbose : 0 );
-Percentage of the total tests which failed.
+ my $args = {
+ timer => $Timer,
+ directives => $Directives,
+ lib => \@lib,
+ switches => \@switches,
+ color => $Color,
+ verbosity => $verbosity,
+ };
-=item B<List of Failed>
+ $args->{stdout} = $sub_args->{out}
+ if exists $sub_args->{out};
-A list of the tests which failed. Successive failures may be
-abbreviated (ie. 15-20 to indicate that tests 15, 16, 17, 18, 19 and
-20 failed).
+ if ( defined( my $env_opt = $ENV{HARNESS_OPTIONS} ) ) {
+ for my $opt ( split /:/, $env_opt ) {
+ if ( $opt =~ /^j(\d*)$/ ) {
+ $args->{jobs} = $1 || 9;
+ }
+ elsif ( $opt eq 'f' ) {
+ $args->{fork} = 1;
+ }
+ elsif ( $opt eq 'c' ) {
+ $args->{color} = 1;
+ }
+ else {
+ die "Unknown HARNESS_OPTIONS item: $opt\n";
+ }
+ }
+ }
-=back
+ return TAP::Harness->new($args);
+}
+# Get the parts of @INC which are changed from the stock list AND
+# preserve reordering of stock directories.
+sub _filtered_inc {
+ my @inc = grep { !ref } @INC; #28567
-=head1 FUNCTIONS
+ if (IS_VMS) {
-The following functions are available.
+ # VMS has a 255-byte limit on the length of %ENV entries, so
+ # toss the ones that involve perl_root, the install location
+ @inc = grep !/perl_root/i, @inc;
-=head2 runtests( @test_files )
+ }
+ elsif (IS_WIN32) {
-This runs all the given I<@test_files> and divines whether they passed
-or failed based on their output to STDOUT (details above). It prints
-out each individual test which failed along with a summary report and
-a how long it all took.
+ # Lose any trailing backslashes in the Win32 paths
+ s/[\\\/+]$// foreach @inc;
+ }
-It returns true if everything was ok. Otherwise it will C<die()> with
-one of the messages in the DIAGNOSTICS section.
+ my @default_inc = _default_inc();
-=cut
+ my @new_inc;
+ my %seen;
+ for my $dir (@inc) {
+ next if $seen{$dir}++;
-sub runtests {
- my(@tests) = @_;
+ if ( $dir eq ( $default_inc[0] || '' ) ) {
+ shift @default_inc;
+ }
+ else {
+ push @new_inc, $dir;
+ }
- local ($\, $,);
+ shift @default_inc while @default_inc and $seen{ $default_inc[0] };
+ }
- my ($tot, $failedtests,$todo_passed) = execute_tests(tests => \@tests);
- print get_results($tot, $failedtests,$todo_passed);
+ return @new_inc;
+}
- my $ok = _all_ok($tot);
+{
- assert(($ok xor keys %$failedtests),
- q{ok status jives with $failedtests});
+ # Cache this to avoid repeatedly shelling out to Perl.
+ my @inc;
- return $ok;
+ sub _default_inc {
+ return @inc if @inc;
+ my $perl = $ENV{HARNESS_PERL} || $^X;
+ chomp( @inc = `$perl -le "print join qq[\\n], \@INC"` );
+ return @inc;
+ }
}
-# my $ok = _all_ok(\%tot);
-# Tells you if this test run is overall successful or not.
-
-sub _all_ok {
- my($tot) = shift;
+sub _check_sequence {
+ my @list = @_;
+ my $prev;
+ while ( my $next = shift @list ) {
+ return if defined $prev && $next <= $prev;
+ $prev = $next;
+ }
- return $tot->{bad} == 0 && ($tot->{max} || $tot->{skipped}) ? 1 : 0;
+ return 1;
}
-# Returns all the files in a directory. This is shorthand for backwards
-# compatibility on systems where C<glob()> doesn't work right.
+sub execute_tests {
+ my %args = @_;
-sub _globdir {
- local *DIRH;
+ my $harness = _new_harness( \%args );
+ my $aggregate = TAP::Parser::Aggregator->new();
+
+ my %tot = (
+ bonus => 0,
+ max => 0,
+ ok => 0,
+ bad => 0,
+ good => 0,
+ files => 0,
+ tests => 0,
+ sub_skipped => 0,
+ todo => 0,
+ skipped => 0,
+ bench => undef,
+ );
+
+ # Install a callback so we get to see any plans the
+ # harness executes.
+ $harness->callback(
+ made_parser => sub {
+ my $parser = shift;
+ $parser->callback(
+ plan => sub {
+ my $plan = shift;
+ if ( $plan->directive eq 'SKIP' ) {
+ $tot{skipped}++;
+ }
+ }
+ );
+ }
+ );
+
+ _aggregate( $harness, $aggregate, @{ $args{tests} } );
+
+ $tot{bench} = $aggregate->elapsed;
+ my @tests = $aggregate->descriptions;
+
+ # TODO: Work out the circumstances under which the files
+ # and tests totals can differ.
+ $tot{files} = $tot{tests} = scalar @tests;
+
+ my %failedtests = ();
+ my %todo_passed = ();
+
+ for my $test (@tests) {
+ my ($parser) = $aggregate->parsers($test);
+
+ my @failed = $parser->failed;
+
+ my $wstat = $parser->wait;
+ my $estat = $parser->exit;
+ my $planned = $parser->tests_planned;
+ my @errors = $parser->parse_errors;
+ my $passed = $parser->passed;
+ my $actual_passed = $parser->actual_passed;
+
+ my $ok_seq = _check_sequence( $parser->actual_passed );
+
+ # Duplicate exit, wait status semantics of old version
+ $estat ||= '' unless $wstat;
+ $wstat ||= '';
+
+ $tot{max} += ( $planned || 0 );
+ $tot{bonus} += $parser->todo_passed;
+ $tot{ok} += $passed > $actual_passed ? $passed : $actual_passed;
+ $tot{sub_skipped} += $parser->skipped;
+ $tot{todo} += $parser->todo;
+
+ if ( @failed || $estat || @errors ) {
+ $tot{bad}++;
+
+ my $huh_planned = $planned ? undef : '??';
+ my $huh_errors = $ok_seq ? undef : '??';
+
+ $failedtests{$test} = {
+ 'canon' => $huh_planned
+ || $huh_errors
+ || _canon(@failed)
+ || '??',
+ 'estat' => $estat,
+ 'failed' => $huh_planned
+ || $huh_errors
+ || scalar @failed,
+ 'max' => $huh_planned || $planned,
+ 'name' => $test,
+ 'wstat' => $wstat
+ };
+ }
+ else {
+ $tot{good}++;
+ }
- opendir DIRH, shift;
- my @f = readdir DIRH;
- closedir DIRH;
+ my @todo = $parser->todo_passed;
+ if (@todo) {
+ $todo_passed{$test} = {
+ 'canon' => _canon(@todo),
+ 'estat' => $estat,
+ 'failed' => scalar @todo,
+ 'max' => scalar $parser->todo,
+ 'name' => $test,
+ 'wstat' => $wstat
+ };
+ }
+ }
- return @f;
+ return ( \%tot, \%failedtests, \%todo_passed );
}
=head2 execute_tests( tests => \@test_files, out => \*FH )
wstat Script's wait status
max Number of individual tests
failed Number which failed
- percent Percentage of tests which failed
canon List of tests which failed (as string).
C<$failed> should be empty if everything passed.
=cut
-sub execute_tests {
- my %args = @_;
- my @tests = @{$args{tests}};
- my $out = $args{out} || select();
-
- # We allow filehandles that are symbolic refs
- no strict 'refs';
- _autoflush($out);
- _autoflush(\*STDERR);
-
- my %failedtests;
- my %todo_passed;
-
- # Test-wide totals.
- my(%tot) = (
- bonus => 0,
- max => 0,
- ok => 0,
- files => 0,
- bad => 0,
- good => 0,
- tests => scalar @tests,
- sub_skipped => 0,
- todo => 0,
- skipped => 0,
- bench => 0,
- );
-
- my @dir_files;
- @dir_files = _globdir $Files_In_Dir if defined $Files_In_Dir;
- my $run_start_time = new Benchmark;
-
- my $width = _leader_width(@tests);
- foreach my $tfile (@tests) {
- $Last_ML_Print = 0; # so each test prints at least once
- my($leader, $ml) = _mk_leader($tfile, $width);
- local $ML = $ml;
-
- print $out $leader;
-
- $tot{files}++;
-
- $Strap->{_seen_header} = 0;
- if ( $Test::Harness::Debug ) {
- print $out "# Running: ", $Strap->_command_line($tfile), "\n";
- }
- my $test_start_time = $Timer ? time : 0;
- my %results = $Strap->analyze_file($tfile) or
- do { warn $Strap->{error}, "\n"; next };
- my $elapsed;
- if ( $Timer ) {
- $elapsed = time - $test_start_time;
- if ( $has_time_hires ) {
- $elapsed = sprintf( " %8d ms", $elapsed*1000 );
- }
- else {
- $elapsed = sprintf( " %8s s", $elapsed ? $elapsed : "<1" );
- }
- }
- else {
- $elapsed = "";
- }
-
- # state of the current test.
- my @failed = grep { !$results{details}[$_-1]{ok} }
- 1..@{$results{details}};
- my @todo_pass = grep { $results{details}[$_-1]{actual_ok} &&
- $results{details}[$_-1]{type} eq 'todo' }
- 1..@{$results{details}};
-
- my %test = (
- ok => $results{ok},
- 'next' => $Strap->{'next'},
- max => $results{max},
- failed => \@failed,
- todo_pass => \@todo_pass,
- todo => $results{todo},
- bonus => $results{bonus},
- skipped => $results{skip},
- skip_reason => $results{skip_reason},
- skip_all => $Strap->{skip_all},
- ml => $ml,
- );
-
- $tot{bonus} += $results{bonus};
- $tot{max} += $results{max};
- $tot{ok} += $results{ok};
- $tot{todo} += $results{todo};
- $tot{sub_skipped} += $results{skip};
-
- my($estatus, $wstatus) = @results{qw(exit wait)};
-
- if ($results{passing}) {
- # XXX Combine these first two
- if ($test{max} and $test{skipped} + $test{bonus}) {
- my @msg;
- push(@msg, "$test{skipped}/$test{max} skipped: $test{skip_reason}")
- if $test{skipped};
- if ($test{bonus}) {
- my ($txt, $canon) = _canondetail($test{todo},0,'TODO passed',
- @{$test{todo_pass}});
- $todo_passed{$tfile} = {
- canon => $canon,
- max => $test{todo},
- failed => $test{bonus},
- name => $tfile,
- percent => 100*$test{bonus}/$test{todo},
- estat => '',
- wstat => '',
- };
-
- push(@msg, "$test{bonus}/$test{max} unexpectedly succeeded\n$txt");
- }
- print $out "$test{ml}ok$elapsed\n ".join(', ', @msg)."\n";
- }
- elsif ( $test{max} ) {
- print $out "$test{ml}ok$elapsed\n";
- }
- elsif ( defined $test{skip_all} and length $test{skip_all} ) {
- print $out "skipped\n all skipped: $test{skip_all}\n";
- $tot{skipped}++;
- }
- else {
- print $out "skipped\n all skipped: no reason given\n";
- $tot{skipped}++;
- }
- $tot{good}++;
- }
- else {
- # List unrun tests as failures.
- if ($test{'next'} <= $test{max}) {
- push @{$test{failed}}, $test{'next'}..$test{max};
- }
- # List overruns as failures.
- else {
- my $details = $results{details};
- foreach my $overrun ($test{max}+1..@$details) {
- next unless ref $details->[$overrun-1];
- push @{$test{failed}}, $overrun
- }
- }
-
- if ($wstatus) {
- $failedtests{$tfile} = _dubious_return(\%test, \%tot,
- $estatus, $wstatus);
- $failedtests{$tfile}{name} = $tfile;
- }
- elsif($results{seen}) {
- if (@{$test{failed}} and $test{max}) {
- my ($txt, $canon) = _canondetail($test{max},$test{skipped},'Failed',
- @{$test{failed}});
- print $out "$test{ml}$txt";
- $failedtests{$tfile} = { canon => $canon,
- max => $test{max},
- failed => scalar @{$test{failed}},
- name => $tfile,
- percent => 100*(scalar @{$test{failed}})/$test{max},
- estat => '',
- wstat => '',
- };
- }
- else {
- print $out "Don't know which tests failed: got $test{ok} ok, ".
- "expected $test{max}\n";
- $failedtests{$tfile} = { canon => '??',
- max => $test{max},
- failed => '??',
- name => $tfile,
- percent => undef,
- estat => '',
- wstat => '',
- };
- }
- $tot{bad}++;
- }
- else {
- print $out "FAILED before any test output arrived\n";
- $tot{bad}++;
- $failedtests{$tfile} = { canon => '??',
- max => '??',
- failed => '??',
- name => $tfile,
- percent => undef,
- estat => '',
- wstat => '',
- };
- }
- }
-
- if (defined $Files_In_Dir) {
- my @new_dir_files = _globdir $Files_In_Dir;
- if (@new_dir_files != @dir_files) {
- my %f;
- @f{@new_dir_files} = (1) x @new_dir_files;
- delete @f{@dir_files};
- my @f = sort keys %f;
- print $out "LEAKED FILES: @f\n";
- @dir_files = @new_dir_files;
- }
- }
- } # foreach test
- $tot{bench} = timediff(new Benchmark, $run_start_time);
-
- $Strap->_restore_PERL5LIB;
-
- return(\%tot, \%failedtests, \%todo_passed);
-}
-
-# Turns on autoflush for the handle passed
-sub _autoflush {
- my $flushy_fh = shift;
- my $old_fh = select $flushy_fh;
- $| = 1;
- select $old_fh;
-}
-
-=for private _mk_leader
-
- my($leader, $ml) = _mk_leader($test_file, $width);
-
-Generates the 't/foo........' leader for the given C<$test_file> as well
-as a similar version which will overwrite the current line (by use of
-\r and such). C<$ml> may be empty if Test::Harness doesn't think you're
-on TTY.
-
-The C<$width> is the width of the "yada/blah.." string.
-
-=cut
-
-sub _mk_leader {
- my($te, $width) = @_;
- chomp($te);
- $te =~ s/\.\w+$/./;
-
- if ($^O eq 'VMS') {
- $te =~ s/^.*\.t\./\[.t./s;
- }
- my $leader = "$te" . '.' x ($width - length($te));
- my $ml = "";
-
- if ( -t STDOUT and not $ENV{HARNESS_NOTTY} and not $Verbose ) {
- $ml = "\r" . (' ' x 77) . "\r$leader"
- }
-
- return($leader, $ml);
-}
-
-=for private _leader_width
-
- my($width) = _leader_width(@test_files);
-
-Calculates how wide the leader should be based on the length of the
-longest test name.
-
-=cut
-
-sub _leader_width {
- my $maxlen = 0;
- my $maxsuflen = 0;
- foreach (@_) {
- my $suf = /\.(\w+)$/ ? $1 : '';
- my $len = length;
- my $suflen = length $suf;
- $maxlen = $len if $len > $maxlen;
- $maxsuflen = $suflen if $suflen > $maxsuflen;
- }
- # + 3 : we want three dots between the test name and the "ok"
- return $maxlen + 3 - $maxsuflen;
-}
-
-sub get_results {
- my $tot = shift;
- my $failedtests = shift;
- my $todo_passed = shift;
-
- my $out = '';
-
- my $pct;
- my $bonusmsg = _bonusmsg($tot);
-
- if (_all_ok($tot)) {
- $out .= "All tests successful$bonusmsg.\n";
- if ($tot->{bonus}) {
- my($fmt_top, $fmt) = _create_fmts("Passed Todo",$todo_passed);
- # Now write to formats
- for my $script (sort keys %{$todo_passed||{}}) {
- my $Curtest = $todo_passed->{$script};
-
- $out .= swrite( $fmt_top );
- $out .= swrite( $fmt, @{ $Curtest }{qw(name estat wstat max failed percent canon)} );
- }
- }
- }
- elsif (!$tot->{tests}){
- die "FAILED--no tests were run for some reason.\n";
- }
- elsif (!$tot->{max}) {
- my $blurb = $tot->{tests}==1 ? "script" : "scripts";
- die "FAILED--$tot->{tests} test $blurb could be run, ".
- "alas--no output ever seen\n";
- }
- else {
- $pct = sprintf("%.2f", $tot->{good} / $tot->{tests} * 100);
- my $percent_ok = 100*$tot->{ok}/$tot->{max};
- my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
- $tot->{max} - $tot->{ok}, $tot->{max},
- $percent_ok;
-
- my($fmt_top, $fmt1, $fmt2) = _create_fmts("Failed Test",$failedtests);
-
- # Now write to formats
- for my $script (sort keys %$failedtests) {
- my $Curtest = $failedtests->{$script};
- $out .= swrite( $fmt_top );
- $out .= swrite( $fmt1, @{ $Curtest }{qw(name estat wstat max failed percent canon)} );
- $out .= swrite( $fmt2, $Curtest->{canon} );
- }
- if ($tot->{bad}) {
- $bonusmsg =~ s/^,\s*//;
- $out .= "$bonusmsg.\n" if $bonusmsg;
- $out .= "Failed $tot->{bad}/$tot->{tests} test scripts, $pct% okay.$subpct\n";
- }
- }
-
- $out .= sprintf("Files=%d, Tests=%d, %s\n",
- $tot->{files}, $tot->{max}, timestr($tot->{bench}, 'nop'));
- return $out;
-}
-
-sub swrite {
- my $format = shift;
- $^A = '';
- formline($format,@_);
- my $out = $^A;
- $^A = '';
- return $out;
-}
-
-
-my %Handlers = (
- header => \&header_handler,
- test => \&test_handler,
- bailout => \&bailout_handler,
-);
-
-$Strap->{callback} = \&strap_callback;
-sub strap_callback {
- my($self, $line, $type, $totals) = @_;
- print $line if $Verbose;
-
- my $meth = $Handlers{$type};
- $meth->($self, $line, $type, $totals) if $meth;
-};
-
-
-sub header_handler {
- my($self, $line, $type, $totals) = @_;
-
- warn "Test header seen more than once!\n" if $self->{_seen_header};
-
- $self->{_seen_header}++;
-
- warn "1..M can only appear at the beginning or end of tests\n"
- if $totals->{seen} &&
- $totals->{max} < $totals->{seen};
-};
-
-sub test_handler {
- my($self, $line, $type, $totals) = @_;
-
- my $curr = $totals->{seen};
- my $next = $self->{'next'};
- my $max = $totals->{max};
- my $detail = $totals->{details}[-1];
-
- if( $detail->{ok} ) {
- _print_ml_less("ok $curr/$max");
-
- if( $detail->{type} eq 'skip' ) {
- $totals->{skip_reason} = $detail->{reason}
- unless defined $totals->{skip_reason};
- $totals->{skip_reason} = 'various reasons'
- if $totals->{skip_reason} ne $detail->{reason};
- }
- }
- else {
- _print_ml("NOK $curr");
- }
-
- if( $curr > $next ) {
- print "Test output counter mismatch [test $curr]\n";
- }
- elsif( $curr < $next ) {
- print "Confused test output: test $curr answered after ".
- "test ", $next - 1, "\n";
- }
-
-};
-
-sub bailout_handler {
- my($self, $line, $type, $totals) = @_;
-
- die "FAILED--Further testing stopped" .
- ($self->{bailout_reason} ? ": $self->{bailout_reason}\n" : ".\n");
-};
-
-
-sub _print_ml {
- print join '', $ML, @_ if $ML;
-}
-
-
-# Print updates only once per second.
-sub _print_ml_less {
- my $now = CORE::time;
- if ( $Last_ML_Print != $now ) {
- _print_ml(@_);
- $Last_ML_Print = $now;
- }
-}
-
-sub _bonusmsg {
- my($tot) = @_;
-
- my $bonusmsg = '';
- $bonusmsg = (" ($tot->{bonus} subtest".($tot->{bonus} > 1 ? 's' : '').
- " UNEXPECTEDLY SUCCEEDED)")
- if $tot->{bonus};
-
- if ($tot->{skipped}) {
- $bonusmsg .= ", $tot->{skipped} test"
- . ($tot->{skipped} != 1 ? 's' : '');
- if ($tot->{sub_skipped}) {
- $bonusmsg .= " and $tot->{sub_skipped} subtest"
- . ($tot->{sub_skipped} != 1 ? 's' : '');
- }
- $bonusmsg .= ' skipped';
- }
- elsif ($tot->{sub_skipped}) {
- $bonusmsg .= ", $tot->{sub_skipped} subtest"
- . ($tot->{sub_skipped} != 1 ? 's' : '')
- . " skipped";
- }
- return $bonusmsg;
-}
-
-# Test program go boom.
-sub _dubious_return {
- my($test, $tot, $estatus, $wstatus) = @_;
- my ($failed, $canon, $percent) = ('??', '??');
-
- printf "$test->{ml}dubious\n\tTest returned status $estatus ".
- "(wstat %d, 0x%x)\n",
- $wstatus,$wstatus;
- print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
-
- $tot->{bad}++;
-
- if ($test->{max}) {
- if ($test->{'next'} == $test->{max} + 1 and not @{$test->{failed}}) {
- print "\tafter all the subtests completed successfully\n";
- $percent = 0;
- $failed = 0; # But we do not set $canon!
- }
- else {
- push @{$test->{failed}}, $test->{'next'}..$test->{max};
- $failed = @{$test->{failed}};
- (my $txt, $canon) = _canondetail($test->{max},$test->{skipped},'Failed',@{$test->{failed}});
- $percent = 100*(scalar @{$test->{failed}})/$test->{max};
- print "DIED. ",$txt;
- }
- }
-
- return { canon => $canon, max => $test->{max} || '??',
- failed => $failed,
- percent => $percent,
- estat => $estatus, wstat => $wstatus,
- };
-}
-
-
-sub _create_fmts {
- my $failed_str = shift;
- my $failedtests = shift;
-
- my ($type) = split /\s/,$failed_str;
- my $short = substr($type,0,4);
- my $total = $short eq 'Pass' ? 'Todos' : 'Total';
- my $middle_str = " Stat Wstat $total $short $type ";
- my $list_str = "List of $type";
-
- # Figure out our longest name string for formatting purposes.
- my $max_namelen = length($failed_str);
- foreach my $script (keys %$failedtests) {
- my $namelen = length $failedtests->{$script}->{name};
- $max_namelen = $namelen if $namelen > $max_namelen;
- }
-
- my $list_len = $Columns - length($middle_str) - $max_namelen;
- if ($list_len < length($list_str)) {
- $list_len = length($list_str);
- $max_namelen = $Columns - length($middle_str) - $list_len;
- if ($max_namelen < length($failed_str)) {
- $max_namelen = length($failed_str);
- $Columns = $max_namelen + length($middle_str) + $list_len;
- }
- }
-
- my $fmt_top = sprintf("%-${max_namelen}s", $failed_str)
- . $middle_str
- . $list_str . "\n"
- . "-" x $Columns
- . "\n";
-
- my $fmt1 = "@" . "<" x ($max_namelen - 1)
- . " @>> @>>>> @>>>> @>>> ^##.##% "
- . "^" . "<" x ($list_len - 1) . "\n";
- my $fmt2 = "~~" . " " x ($Columns - $list_len - 2) . "^"
- . "<" x ($list_len - 1) . "\n";
-
- return($fmt_top, $fmt1, $fmt2);
-}
-
-sub _canondetail {
- my $max = shift;
- my $skipped = shift;
- my $type = shift;
- my @detail = @_;
- my %seen;
- @detail = sort {$a <=> $b} grep !$seen{$_}++, @detail;
- my $detail = @detail;
- my @result = ();
- my @canon = ();
- my $min;
- my $last = $min = shift @detail;
- my $canon;
- my $uc_type = uc($type);
- if (@detail) {
- for (@detail, $detail[-1]) { # don't forget the last one
- if ($_ > $last+1 || $_ == $last) {
- push @canon, ($min == $last) ? $last : "$min-$last";
- $min = $_;
- }
- $last = $_;
- }
- local $" = ", ";
- push @result, "$uc_type tests @canon\n";
- $canon = join ' ', @canon;
- }
- else {
- push @result, "$uc_type test $last\n";
- $canon = $last;
- }
-
- return (join("", @result), $canon)
- if $type=~/todo/i;
- push @result, "\t$type $detail/$max tests, ";
- if ($max) {
- push @result, sprintf("%.2f",100*(1-$detail/$max)), "% okay";
- }
- else {
- push @result, "?% okay";
- }
- my $ender = 's' x ($skipped > 1);
- if ($skipped) {
- my $good = $max - $detail - $skipped;
- my $skipmsg = " (less $skipped skipped test$ender: $good okay, ";
- if ($max) {
- my $goodper = sprintf("%.2f",100*($good/$max));
- $skipmsg .= "$goodper%)";
- }
- else {
- $skipmsg .= "?%)";
- }
- push @result, $skipmsg;
- }
- push @result, "\n";
- my $txt = join "", @result;
- return ($txt, $canon);
-}
-
1;
__END__
-
=head1 EXPORT
-C<&runtests> is exported by Test::Harness by default.
+C<&runtests> is exported by C<Test::Harness> by default.
C<&execute_tests>, C<$verbose>, C<$switches> and C<$debug> are
exported upon request.
-=head1 DIAGNOSTICS
-
-=over 4
-
-=item C<All tests successful.\nFiles=%d, Tests=%d, %s>
-
-If all tests are successful some statistics about the performance are
-printed.
-
-=item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
-
-For any single script that has failing subtests statistics like the
-above are printed.
-
-=item C<Test returned status %d (wstat %d)>
-
-Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8>
-and C<$?> are printed in a message similar to the above.
-
-=item C<Failed 1 test, %.2f%% okay. %s>
-
-=item C<Failed %d/%d tests, %.2f%% okay. %s>
-
-If not all tests were successful, the script dies with one of the
-above messages.
-
-=item C<FAILED--Further testing stopped: %s>
+=head1 ENVIRONMENT VARIABLES THAT TAP::HARNESS::COMPATIBLE SETS
-If a single subtest decides that further testing will not make sense,
-the script dies with this message.
-
-=back
-
-=head1 ENVIRONMENT VARIABLES THAT TEST::HARNESS SETS
-
-Test::Harness sets these before executing the individual tests.
+C<Test::Harness> sets these before executing the individual tests.
=over 4
=item C<HARNESS_VERSION>
-This is the version of Test::Harness.
+This is the version of C<Test::Harness>.
=back
=over 4
-=item C<HARNESS_COLUMNS>
-
-This value will be used for the width of the terminal. If it is not
-set then it will default to C<COLUMNS>. If this is not set, it will
-default to 80. Note that users of Bourne-sh based shells will need to
-C<export COLUMNS> for this module to use that variable.
-
-=item C<HARNESS_COMPILE_TEST>
-
-When true it will make harness attempt to compile the test using
-C<perlcc> before running it.
-
-B<NOTE> This currently only works when sitting in the perl source
-directory!
-
-=item C<HARNESS_DEBUG>
-
-If true, Test::Harness will print debugging information about itself as
-it runs the tests. This is different from C<HARNESS_VERBOSE>, which prints
-the output from the test being run. Setting C<$Test::Harness::Debug> will
-override this, or you can use the C<-d> switch in the F<prove> utility.
-
-=item C<HARNESS_FILELEAK_IN_DIR>
-
-When set to the name of a directory, harness will check after each
-test whether new files appeared in that directory, and report them as
-
- LEAKED FILES: scr.tmp 0 my.db
-
-If relative, directory name is with respect to the current directory at
-the moment runtests() was called. Putting absolute path into
-C<HARNESS_FILELEAK_IN_DIR> may give more predictable results.
-
-=item C<HARNESS_NOTTY>
-
-When set to a true value, forces it to behave as though STDOUT were
-not a console. You may need to set this if you don't want harness to
-output more frequent progress messages using carriage returns. Some
-consoles may not handle carriage returns properly (which results in a
-somewhat messy output).
-
-=item C<HARNESS_PERL>
-
-Usually your tests will be run by C<$^X>, the currently-executing Perl.
-However, you may want to have it run by a different executable, such as
-a threading perl, or a different version.
-
-If you're using the F<prove> utility, you can use the C<--perl> switch.
+=item C<HARNESS_TIMER>
-=item C<HARNESS_PERL_SWITCHES>
-
-Its value will be prepended to the switches used to invoke perl on
-each test. For example, setting C<HARNESS_PERL_SWITCHES> to C<-W> will
-run all tests with all warnings enabled.
+Setting this to true will make the harness display the number of
+milliseconds each test took. You can also use F<prove>'s C<--timer>
+switch.
=item C<HARNESS_VERBOSE>
-If true, Test::Harness will output the verbose results of running
+If true, C<Test::Harness> will output the verbose results of running
its tests. Setting C<$Test::Harness::verbose> will override this,
or you can use the C<-v> switch in the F<prove> utility.
-=back
-
-=head1 EXAMPLE
-
-Here's how Test::Harness tests itself
-
- $ cd ~/src/devel/Test-Harness
- $ perl -Mblib -e 'use Test::Harness qw(&runtests $verbose);
- $verbose=0; runtests @ARGV;' t/*.t
- Using /home/schwern/src/devel/Test-Harness/blib
- t/base..............ok
- t/nonumbers.........ok
- t/ok................ok
- t/test-harness......ok
- All tests successful.
- Files=4, Tests=24, 2 wallclock secs ( 0.61 cusr + 0.41 csys = 1.02 CPU)
-
-=head1 SEE ALSO
-
-The included F<prove> utility for running test scripts from the command line,
-L<Test> and L<Test::Simple> for writing test scripts, L<Benchmark> for
-the underlying timing routines, and L<Devel::Cover> for test coverage
-analysis.
-
-=head1 TODO
-
-Provide a way of running tests quietly (ie. no printing) for automated
-validation of tests. This will probably take the form of a version
-of runtests() which rather than printing its output returns raw data
-on the state of the tests. (Partially done in Test::Harness::Straps)
+=item C<HARNESS_OPTIONS>
-Document the format.
+Provide additional options to the harness. Currently supported options are:
-Fix HARNESS_COMPILE_TEST without breaking its core usage.
+=over
-Figure a way to report test names in the failure summary.
+=item C<< j<n> >>
-Rework the test summary so long test names are not truncated as badly.
-(Partially done with new skip test styles)
+Run <n> (default 9) parallel jobs.
-Add option for coverage analysis.
+=item C<< f >>
-Trap STDERR.
+Use forked parallelism.
-Implement Straps total_results()
-
-Remember exit code
-
-Completely redo the print summary code.
-
-Implement Straps callbacks. (experimentally implemented)
-
-Straps->analyze_file() not taint clean, don't know if it can be
+=back
-Fix that damned VMS nit.
+Multiple options may be separated by colons:
-HARNESS_TODOFAIL to display TODO failures
+ HARNESS_OPTIONS=j9:f make test
-Add a test for verbose.
+=back
-Change internal list of test results to a hash.
+=head1 Taint Mode
-Fix stats display when there's an overrun.
+Normally when a Perl program is run in taint mode the contents of the
+C<PERL5LIB> environment variable do not appear in C<@INC>.
-Fix so perls with spaces in the filename work.
+Because C<PERL5LIB> is often used during testing to add build
+directories to C<@INC> C<Test::Harness> (actually
+L<TAP::Parser::Source::Perl>) passes the names of any directories found
+in C<PERL5LIB> as -I switches. The net effect of this is that
+C<PERL5LIB> is honoured even in taint mode.
-Keeping whittling away at _run_all_tests()
+=head1 SEE ALSO
-Clean up how the summary is printed. Get rid of those damned formats.
+L<TAP::Harness>
=head1 BUGS
Please report any bugs or feature requests to
C<bug-test-harness at rt.cpan.org>, or through the web interface at
-L<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=Test-Harness>.
-I will be notified, and then you'll automatically be notified of progress on
-your bug as I make changes.
-
-=head1 SUPPORT
-
-You can find documentation for this module with the F<perldoc> command.
-
- perldoc Test::Harness
-
-You can get docs for F<prove> with
-
- prove --man
-
-You can also look for information at:
-
-=over 4
-
-=item * AnnoCPAN: Annotated CPAN documentation
-
-L<http://annocpan.org/dist/Test-Harness>
-
-=item * CPAN Ratings
-
-L<http://cpanratings.perl.org/d/Test-Harness>
-
-=item * RT: CPAN's request tracker
-
-L<http://rt.cpan.org/NoAuth/Bugs.html?Dist=Test-Harness>
-
-=item * Search CPAN
-
-L<http://search.cpan.org/dist/Test-Harness>
-
-=back
-
-=head1 SOURCE CODE
-
-The source code repository for Test::Harness is at
-L<http://svn.perl.org/modules/Test-Harness>.
+L<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=Test-Harness>. I will be
+notified, and then you'll automatically be notified of progress on your bug
+as I make changes.
=head1 AUTHORS
-Either Tim Bunce or Andreas Koenig, we don't know. What we know for
-sure is, that it was inspired by Larry Wall's F<TEST> script that came
-with perl distributions for ages. Numerous anonymous contributors
-exist. Andreas Koenig held the torch for many years, and then
-Michael G Schwern.
+Andy Armstrong C<< <andy@hexten.net> >>
-Current maintainer is Andy Lester C<< <andy at petdance.com> >>.
+L<Test::Harness> 2.64 (maintained by Andy Lester and on which this
+module is based) has this attribution:
-=head1 COPYRIGHT
+ Either Tim Bunce or Andreas Koenig, we don't know. What we know for
+ sure is, that it was inspired by Larry Wall's F<TEST> script that came
+ with perl distributions for ages. Numerous anonymous contributors
+ exist. Andreas Koenig held the torch for many years, and then
+ Michael G Schwern.
-Copyright 2002-2005
-by Michael G Schwern C<< <schwern at pobox.com> >>,
-Andy Lester C<< <andy at petdance.com> >>.
+=head1 LICENCE AND COPYRIGHT
-This program is free software; you can redistribute it and/or
-modify it under the same terms as Perl itself.
+Copyright (c) 2007-2008, Andy Armstrong C<< <andy@hexten.net> >>. All rights reserved.
-See L<http://www.perl.com/perl/misc/Artistic.html>.
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
-=cut