lib/Test/Harness/t/strap.t Test::Harness::Straps test
lib/Test/Harness/t/test-harness.t Test::Harness test
lib/Test/Harness/t/version.t Test::Harness test
+lib/Test/Harness/Util.pm Various utility functions for Test::Harness
lib/Test/More.pm More utilities for writing tests
lib/Test.pm A simple framework for writing test scripts
lib/Test/Simple/Changes Test::Simple changes
@ISA @EXPORT @EXPORT_OK
$Verbose $Switches $Debug
$verbose $switches $debug
- $Curtest
- $Columns
+ $Columns
$Timer
$ML $Last_ML_Print
$Strap
=head1 VERSION
-Version 2.56
+Version 2.57_05
=cut
-$VERSION = "2.56";
+$VERSION = "2.57_05";
# Backwards compatibility for exportable variable names.
*verbose = *Verbose;
delete $ENV{HARNESS_VERSION};
}
-# Some experimental versions of OS/2 build have broken $?
-my $Ignore_Exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
-
my $Files_In_Dir = $ENV{HARNESS_FILELEAK_IN_DIR};
$Strap = Test::Harness::Straps->new;
@ISA = ('Exporter');
@EXPORT = qw(&runtests);
-@EXPORT_OK = qw($verbose $switches);
+@EXPORT_OK = qw(&execute_tests $verbose $switches);
$Verbose = $ENV{HARNESS_VERBOSE} || 0;
$Debug = $ENV{HARNESS_DEBUG} || 0;
=back
-=head2 Functions
-
-Test::Harness currently only has one function, here it is.
+=head1 FUNCTIONS
-=over 4
+The following functions are available.
-=item B<runtests>
-
- my $allok = runtests(@test_files);
+=head2 runtests( @test_files )
This runs all the given I<@test_files> and divines whether they passed
or failed based on their output to STDOUT (details above). It prints
local ($\, $,);
- my($tot, $failedtests) = _run_all_tests(@tests);
- _show_results($tot, $failedtests);
+ my ($tot, $failedtests,$todo_passed) = execute_tests(tests => \@tests);
+ print get_results($tot, $failedtests,$todo_passed);
my $ok = _all_ok($tot);
return $ok;
}
-=begin _private
-
-=item B<_all_ok>
-
- my $ok = _all_ok(\%tot);
-
-Tells you if this test run is overall successful or not.
-
-=cut
+# my $ok = _all_ok(\%tot);
+# Tells you if this test run is overall successful or not.
sub _all_ok {
my($tot) = shift;
return $tot->{bad} == 0 && ($tot->{max} || $tot->{skipped}) ? 1 : 0;
}
-=item B<_globdir>
+# Returns all the files in a directory. This is shorthand for backwards
+# compatibility on systems where C<glob()> doesn't work right.
- my @files = _globdir $dir;
+sub _globdir {
+ local *DIRH;
-Returns all the files in a directory. This is shorthand for backwards
-compatibility on systems where C<glob()> doesn't work right.
-
-=cut
-
-sub _globdir {
- opendir DIRH, shift;
- my @f = readdir DIRH;
- closedir DIRH;
+ opendir DIRH, shift;
+ my @f = readdir DIRH;
+ closedir DIRH;
return @f;
}
-=item B<_run_all_tests>
+=head2 execute_tests( tests => \@test_files, out => \*FH )
- my($total, $failed) = _run_all_tests(@test_files);
+Runs all the given C<@test_files> (just like C<runtests()>) but
+doesn't generate the final report. During testing, progress
+information will be written to the currently selected output
+filehandle (usually C<STDOUT>), or to the filehandle given by the
+C<out> parameter. The I<out> is optional.
-Runs all the given C<@test_files> (as C<runtests()>) but does it
-quietly (no report). $total is a hash ref summary of all the tests
-run. Its keys and values are this:
+Returns a list of two values, C<$total> and C<$failed>, describing the
+results. C<$total> is a hash ref summary of all the tests run. Its
+keys and values are this:
bonus Number of individual todo tests unexpectedly passed
max Number of individual tests ran
If C<< $total->{bad} == 0 >> and C<< $total->{max} > 0 >>, you've
got a successful test.
-$failed is a hash ref of all the test scripts which failed. Each key
+C<$failed> is a hash ref of all the test scripts that failed. Each key
is the name of a test script, each value is another hash representing
how that script failed. Its keys are these:
C<$failed> should be empty if everything passed.
-B<NOTE> Currently this function is still noisy. I'm working on it.
-
=cut
-# Turns on autoflush for the handle passed
-sub _autoflush {
- my $flushy_fh = shift;
- my $old_fh = select $flushy_fh;
- $| = 1;
- select $old_fh;
-}
-
-sub _run_all_tests {
- my @tests = @_;
+sub execute_tests {
+ my %args = @_;
+ my @tests = @{$args{tests}};
+ my $out = $args{out} || select();
- _autoflush(\*STDOUT);
+ # We allow filehandles that are symbolic refs
+ no strict 'refs';
+ _autoflush($out);
_autoflush(\*STDERR);
- my(%failedtests);
+ my %failedtests;
+ my %todo_passed;
# Test-wide totals.
my(%tot) = (
my($leader, $ml) = _mk_leader($tfile, $width);
local $ML = $ml;
- print $leader;
+ print $out $leader;
$tot{files}++;
$Strap->{_seen_header} = 0;
if ( $Test::Harness::Debug ) {
- print "# Running: ", $Strap->_command_line($tfile), "\n";
+ print $out "# Running: ", $Strap->_command_line($tfile), "\n";
}
my $test_start_time = $Timer ? time : 0;
my %results = $Strap->analyze_file($tfile) or
if ( $Timer ) {
$elapsed = time - $test_start_time;
if ( $has_time_hires ) {
- $elapsed = sprintf( " %8.3fs", $elapsed );
+ $elapsed = sprintf( " %8d ms", $elapsed*1000 );
}
else {
- $elapsed = sprintf( " %8ss", $elapsed ? $elapsed : "<1" );
+ $elapsed = sprintf( " %8s s", $elapsed ? $elapsed : "<1" );
}
}
else {
# state of the current test.
my @failed = grep { !$results{details}[$_-1]{ok} }
1..@{$results{details}};
+ my @todo_pass = grep { $results{details}[$_-1]{ok} &&
+ $results{details}[$_-1]{type} eq 'todo' }
+ 1..@{$results{details}};
+
my %test = (
ok => $results{ok},
'next' => $Strap->{'next'},
max => $results{max},
failed => \@failed,
+ todo_pass => \@todo_pass,
bonus => $results{bonus},
skipped => $results{skip},
skip_reason => $results{skip_reason},
my @msg;
push(@msg, "$test{skipped}/$test{max} skipped: $test{skip_reason}")
if $test{skipped};
- push(@msg, "$test{bonus}/$test{max} unexpectedly succeeded")
- if $test{bonus};
- print "$test{ml}ok$elapsed\n ".join(', ', @msg)."\n";
+ if ($test{bonus}) {
+ my ($txt, $canon) = _canondetail($test{max},$test{skipped},'TODO passed',
+ @{$test{todo_pass}});
+ $todo_passed{$tfile} = {
+ canon => $canon,
+ max => $test{max},
+ failed => $test{bonus},
+ name => $tfile,
+ percent => 100*$test{bonus}/$test{max},
+ estat => '',
+ wstat => '',
+ };
+
+ push(@msg, "$test{bonus}/$test{max} unexpectedly succeeded\n$txt");
+ }
+ print $out "$test{ml}ok$elapsed\n ".join(', ', @msg)."\n";
}
elsif ( $test{max} ) {
- print "$test{ml}ok$elapsed\n";
+ print $out "$test{ml}ok$elapsed\n";
}
elsif ( defined $test{skip_all} and length $test{skip_all} ) {
- print "skipped\n all skipped: $test{skip_all}\n";
+ print $out "skipped\n all skipped: $test{skip_all}\n";
$tot{skipped}++;
}
else {
- print "skipped\n all skipped: no reason given\n";
+ print $out "skipped\n all skipped: no reason given\n";
$tot{skipped}++;
}
$tot{good}++;
}
elsif($results{seen}) {
if (@{$test{failed}} and $test{max}) {
- my ($txt, $canon) = _canonfailed($test{max},$test{skipped},
+ my ($txt, $canon) = _canondetail($test{max},$test{skipped},'Failed',
@{$test{failed}});
- print "$test{ml}$txt";
+ print $out "$test{ml}$txt";
$failedtests{$tfile} = { canon => $canon,
max => $test{max},
failed => scalar @{$test{failed}},
};
}
else {
- print "Don't know which tests failed: got $test{ok} ok, ".
+ print $out "Don't know which tests failed: got $test{ok} ok, ".
"expected $test{max}\n";
$failedtests{$tfile} = { canon => '??',
max => $test{max},
$tot{bad}++;
}
else {
- print "FAILED before any test output arrived\n";
+ print $out "FAILED before any test output arrived\n";
$tot{bad}++;
$failedtests{$tfile} = { canon => '??',
max => '??',
@f{@new_dir_files} = (1) x @new_dir_files;
delete @f{@dir_files};
my @f = sort keys %f;
- print "LEAKED FILES: @f\n";
+ print $out "LEAKED FILES: @f\n";
@dir_files = @new_dir_files;
}
}
$Strap->_restore_PERL5LIB;
- return(\%tot, \%failedtests);
+ return(\%tot, \%failedtests, \%todo_passed);
+}
+
+# Turns on autoflush for the handle passed
+sub _autoflush {
+ my $flushy_fh = shift;
+ my $old_fh = select $flushy_fh;
+ $| = 1;
+ select $old_fh;
}
-=item B<_mk_leader>
+=for private _mk_leader
- my($leader, $ml) = _mk_leader($test_file, $width);
+ my($leader, $ml) = _mk_leader($test_file, $width);
Generates the 't/foo........' leader for the given C<$test_file> as well
as a similar version which will overwrite the current line (by use of
return($leader, $ml);
}
-=item B<_leader_width>
+=for private _leader_width
my($width) = _leader_width(@test_files);
return $maxlen + 3 - $maxsuflen;
}
+sub get_results {
+ my $tot = shift;
+ my $failedtests = shift;
+ my $todo_passed = shift;
-sub _show_results {
- my($tot, $failedtests) = @_;
+ my $out = '';
my $pct;
my $bonusmsg = _bonusmsg($tot);
if (_all_ok($tot)) {
- print "All tests successful$bonusmsg.\n";
+ $out .= "All tests successful$bonusmsg.\n";
+ if ($tot->{bonus}) {
+ my($fmt_top, $fmt) = _create_fmts("Passed",$todo_passed);
+ # Now write to formats
+ for my $script (sort keys %{$todo_passed||{}}) {
+ my $Curtest = $todo_passed->{$script};
+
+ $out .= swrite( $fmt_top );
+ $out .= swrite( $fmt, @{ $Curtest }{qw(name estat wstat max failed percent canon)} );
+ }
+ }
}
elsif (!$tot->{tests}){
die "FAILED--no tests were run for some reason.\n";
$tot->{max} - $tot->{ok}, $tot->{max},
$percent_ok;
- my($fmt_top, $fmt) = _create_fmts($failedtests);
+ my($fmt_top, $fmt1, $fmt2) = _create_fmts("Failed",$failedtests);
# Now write to formats
for my $script (sort keys %$failedtests) {
- $Curtest = $failedtests->{$script};
- write;
+ my $Curtest = $failedtests->{$script};
+ $out .= swrite( $fmt_top );
+ $out .= swrite( $fmt1, @{ $Curtest }{qw(name estat wstat max failed percent canon)} );
+ $out .= swrite( $fmt2, $Curtest->{canon} );
}
if ($tot->{bad}) {
$bonusmsg =~ s/^,\s*//;
- print "$bonusmsg.\n" if $bonusmsg;
- die "Failed $tot->{bad}/$tot->{tests} test scripts, $pct% okay.".
- "$subpct\n";
+ $out .= "$bonusmsg.\n" if $bonusmsg;
+ $out .= "Failed $tot->{bad}/$tot->{tests} test scripts, $pct% okay.$subpct\n";
}
}
- printf("Files=%d, Tests=%d, %s\n",
+ $out .= sprintf("Files=%d, Tests=%d, %s\n",
$tot->{files}, $tot->{max}, timestr($tot->{bench}, 'nop'));
+ return $out;
+}
+
+sub swrite {
+ my $format = shift;
+ $^A = '';
+ formline($format,@_);
+ my $out = $^A;
+ $^A = '';
+ return $out;
}
. ($tot->{sub_skipped} != 1 ? 's' : '')
. " skipped";
}
-
return $bonusmsg;
}
else {
push @{$test->{failed}}, $test->{'next'}..$test->{max};
$failed = @{$test->{failed}};
- (my $txt, $canon) = _canonfailed($test->{max},$test->{skipped},@{$test->{failed}});
+ (my $txt, $canon) = _canondetail($test->{max},$test->{skipped},'Failed',@{$test->{failed}});
$percent = 100*(scalar @{$test->{failed}})/$test->{max};
print "DIED. ",$txt;
}
sub _create_fmts {
- my($failedtests) = @_;
+ my $type = shift;
+ my $failedtests = shift;
- my $failed_str = "Failed Test";
- my $middle_str = " Stat Wstat Total Fail Failed ";
- my $list_str = "List of Failed";
+ my $short = substr($type,0,4);
+ my $failed_str = "$type Test";
+ my $middle_str = " Stat Wstat Total $short $type ";
+ my $list_str = "List of $type";
# Figure out our longest name string for formatting purposes.
my $max_namelen = length($failed_str);
}
}
- my $fmt_top = "format STDOUT_TOP =\n"
- . sprintf("%-${max_namelen}s", $failed_str)
+ my $fmt_top = sprintf("%-${max_namelen}s", $failed_str)
. $middle_str
. $list_str . "\n"
. "-" x $Columns
- . "\n.\n";
+ . "\n";
- my $fmt = "format STDOUT =\n"
- . "@" . "<" x ($max_namelen - 1)
+ my $fmt1 = "@" . "<" x ($max_namelen - 1)
. " @>> @>>>> @>>>> @>>> ^##.##% "
- . "^" . "<" x ($list_len - 1) . "\n"
- . '{ $Curtest->{name}, $Curtest->{estat},'
- . ' $Curtest->{wstat}, $Curtest->{max},'
- . ' $Curtest->{failed}, $Curtest->{percent},'
- . ' $Curtest->{canon}'
- . "\n}\n"
- . "~~" . " " x ($Columns - $list_len - 2) . "^"
- . "<" x ($list_len - 1) . "\n"
- . '$Curtest->{canon}'
- . "\n.\n";
-
- eval $fmt_top;
- die $@ if $@;
- eval $fmt;
- die $@ if $@;
-
- return($fmt_top, $fmt);
+ . "^" . "<" x ($list_len - 1) . "\n";
+ my $fmt2 = "~~" . " " x ($Columns - $list_len - 2) . "^"
+ . "<" x ($list_len - 1) . "\n";
+
+ return($fmt_top, $fmt1, $fmt2);
}
-sub _canonfailed ($$@) {
- my($max,$skipped,@failed) = @_;
+sub _canondetail {
+ my $max = shift;
+ my $skipped = shift;
+ my $type = shift;
+ my @detail = @_;
+
my %seen;
- @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
- my $failed = @failed;
+ @detail = sort {$a <=> $b} grep !$seen{$_}++, @detail;
+ my $detail = @detail;
my @result = ();
my @canon = ();
my $min;
- my $last = $min = shift @failed;
+ my $last = $min = shift @detail;
my $canon;
- if (@failed) {
- for (@failed, $failed[-1]) { # don't forget the last one
+ my $uc_type = uc($type);
+ if (@detail) {
+ for (@detail, $detail[-1]) { # don't forget the last one
if ($_ > $last+1 || $_ == $last) {
push @canon, ($min == $last) ? $last : "$min-$last";
$min = $_;
$last = $_;
}
local $" = ", ";
- push @result, "FAILED tests @canon\n";
+ push @result, "$uc_type tests @canon\n";
$canon = join ' ', @canon;
}
else {
- push @result, "FAILED test $last\n";
+ push @result, "$uc_type test $last\n";
$canon = $last;
}
- push @result, "\tFailed $failed/$max tests, ";
+ return (join("", @result), $canon)
+ if $type=~/todo/i;
+ push @result, "\t$type $detail/$max tests, ";
if ($max) {
- push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
+ push @result, sprintf("%.2f",100*(1-$detail/$max)), "% okay";
}
else {
push @result, "?% okay";
}
my $ender = 's' x ($skipped > 1);
if ($skipped) {
- my $good = $max - $failed - $skipped;
+ my $good = $max - $detail - $skipped;
my $skipmsg = " (less $skipped skipped test$ender: $good okay, ";
if ($max) {
my $goodper = sprintf("%.2f",100*($good/$max));
}
push @result, "\n";
my $txt = join "", @result;
- ($txt, $canon);
+ return ($txt, $canon);
}
-=end _private
-
-=back
-
-=cut
-
-
1;
__END__
C<&runtests> is exported by Test::Harness by default.
-C<$verbose>, C<$switches> and C<$debug> are exported upon request.
+C<&execute_tests>, C<$verbose>, C<$switches> and C<$debug> are
+exported upon request.
=head1 DIAGNOSTICS
the moment runtests() was called. Putting absolute path into
C<HARNESS_FILELEAK_IN_DIR> may give more predictable results.
-=item C<HARNESS_IGNORE_EXITCODE>
-
-Makes harness ignore the exit status of child processes when defined.
-
=item C<HARNESS_NOTTY>
When set to a true value, forces it to behave as though STDOUT were
=head1 BUGS
-HARNESS_COMPILE_TEST currently assumes it's run from the Perl source
-directory.
+Please report any bugs or feature requests to
+C<bug-test-harness at rt.cpan.org>, or through the web interface at
+L<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=Test-Harness>.
+I will be notified, and then you'll automatically be notified of progress on
+your bug as I make changes.
+
+=head1 SUPPORT
+
+You can find documentation for this module with the F<perldoc> command.
+
+ perldoc Test::Harness
+
+You can get docs for F<prove> with
+
+ prove --man
+
+You can also look for information at:
+
+=over 4
+
+=item * AnnoCPAN: Annotated CPAN documentation
+
+L<http://annocpan.org/dist/Test-Harness>
+
+=item * CPAN Ratings
+
+L<http://cpanratings.perl.org/d/Test-Harness>
+
+=item * RT: CPAN's request tracker
+
+L<http://rt.cpan.org/NoAuth/Bugs.html?Dist=Test-Harness>
+
+=item * Search CPAN
+
+L<http://search.cpan.org/dist/Test-Harness>
+
+=back
+
+=head1 SOURCE CODE
-Please use the CPAN bug ticketing system at L<http://rt.cpan.org/>.
-You can also mail bugs, fixes and enhancements to
-C<< <bug-test-harness >> at C<< rt.cpan.org> >>.
+The source code repository for Test::Harness is at
+L<http://svn.perl.org/modules/Test-Harness>.
=head1 AUTHORS
Either Tim Bunce or Andreas Koenig, we don't know. What we know for
-sure is, that it was inspired by Larry Wall's TEST script that came
+sure is, that it was inspired by Larry Wall's F<TEST> script that came
with perl distributions for ages. Numerous anonymous contributors
exist. Andreas Koenig held the torch for many years, and then
Michael G Schwern.
-# $Id: Assert.pm 250 2003-09-11 15:57:29Z andy $
-
package Test::Harness::Assert;
use strict;
=head1 AUTHOR
-Michael G Schwern C<< <schwern@pobox.com> >>
+Michael G Schwern C<< <schwern at pobox.com> >>
=head1 SEE ALSO
Revision history for Perl extension Test::Harness
+2.57_05 Wed Apr 19 00:31:10 CDT 2006
+ [ENHANCEMENTS]
+ * Now shows details of the tests that unexpectedly pass, instead of
+ just giving a number. Thanks, demerphq!
+
+ [INTERNALS]
+ * Fixed globbing to work under Perls before 5.6.0. Before Perl 5.6.0,
+ prove just uses the internal glob() function.
+
+2.57_04 Mon Apr 17 13:35:10 CDT 2006
+ [ENHANCEMENTS]
+ * prove's globbing is now done with File::Glob::bsd_glob().
+ Otherwise, "prove c:\program files\svk\t\*" fails because glob()
+ considers it to be two patterns, splitting on whitespace. Thanks to
+ Audrey Tang.
+
+ [DOCUMENTATION]
+ * Added information about other TAP implementations in other languages.
+
+2.57_03 Dec 31 2005
+
+ [THINGS THAT MAY BREAK YOUR CODE]
+ * Internal functions _run_all_tests() and _show_results() no longer
+ exist. You shouldn't have been using them anyway since they're
+ prepended with underscores.
+
+ [INTERNALS]
+ * Added the ability to send test output to a filehandle of
+ one's choosing. Two internal functions are now exposed:
+ execute_tests() and get_results() (formerly _run_all_tests() and
+ _show_results()). This should allow CPANPLUS to work properly
+ with Module::Build. Thanks to Ken Williams.
+
+ [DOCUMENTATION]
+ * Hid the documentation for the private methods in Test::Harness::Straps.
+
+2.57_02 Fri Dec 30 23:51:17 CST 2005
+ [THINGS THAT MAY BREAK YOUR CODE]
+ * prove's --ext option has been removed. I'm betting that nobody used it.
+
+ [ENHANCEMENTS]
+ * prove can now take -w and -W switches, analogous to those in perl.
+ This means that "prove -wlb t/*.t" is exactly the same as "make test".
+ Thanks to Rob Kinyon.
+ * Started a Test::Harness::Util module for code that may be reused
+ by other Harness-using modules.
+
+ [INTERNALS]
+ * The t/prove*.t tests now use $^X to call prove. Thanks to Yves Orton.
+ * Test::Harness::Straps no longer uses Win32::GetShortPathName().
+ Thanks to Gisle Aas.
+
+2.57_01 Mon Dec 26 01:39:07 CST 2005
+ [FIXES]
+ * Removed code and docs mentioning HARNESS_IGNORE_EXITCODE, which
+ is not used anywhere.
+
+ [ENHANCEMENTS]
+ * If we have hi-res timings, then they're shown in integer
+ milliseconds, rather than fractional seconds.
+
+ * Added the --perl switch to prove.
+
+ [DOCUMENTATION]
+ * Added links to CPAN support sites.
+
2.56 Wed Sep 28 16:04:00 CDT 2005
[FIXES]
* Incorporate bleadperl patch to fix Test::Harness on VMS.
return $self;
}
-my $test_line_regex = qr/
- ^
- (not\ )? # failure?
- ok\b
- (?:\s+(\d+))? # optional test number
- \s*
- (.*) # and the rest
-/ox;
-
=head1 from_test_line( $line )
Constructor from a TAP test line, or empty return if the test line
my $line = shift or return;
# We pulverize the line down into pieces in three parts.
- my ($not, $number, $extra) = ($line =~ $test_line_regex ) or return;
+ my ($not, $number, $extra) = ($line =~ /^(not )?ok\b(?:\s+(\d+))?\s*(.*)/) or return;
my $point = $class->new;
$point->set_number( $number );
return $self;
}
-=head2 $strap->_init
+=for private $strap->_init
$strap->_init;
return $line;
}
-=head2 $strap->analyze_fh( $name, $test_filehandle )
+=for private $strap->analyze_fh( $name, $test_filehandle )
my %results = $strap->analyze_fh($name, $test_filehandle);
*_wait2exit = sub { POSIX::WEXITSTATUS($_[0]) }
}
-=head2 $strap->_command_line( $file )
+=for private $strap->_command_line( $file )
Returns the full command line that will be run to test I<$file>.
}
-=head2 $strap->_command()
+=for private $strap->_command()
Returns the command that runs the test. Combine this with C<_switches()>
to build a command line.
sub _command {
my $self = shift;
- return $ENV{HARNESS_PERL} if defined $ENV{HARNESS_PERL};
- return qq("$^X") if $self->{_is_win32} && $^X =~ /[^\w\.\/\\]/;
+ return $ENV{HARNESS_PERL} if defined $ENV{HARNESS_PERL};
+ return qq["$^X"] if $self->{_is_win32} && ($^X =~ /[^\w\.\/\\]/);
return $^X;
}
-=head2 $strap->_switches( $file )
+=for private $strap->_switches( $file )
Formats and returns the switches necessary to run the test.
return join( " ", @existing_switches, @derived_switches );
}
-=head2 $strap->_cleaned_switches( @switches_from_user )
+=for private $strap->_cleaned_switches( @switches_from_user )
Returns only defined, non-blank, trimmed switches from the parms passed.
return @switches;
}
-=head2 $strap->_INC2PERL5LIB
+=for private $strap->_INC2PERL5LIB
local $ENV{PERL5LIB} = $self->_INC2PERL5LIB;
return join $Config{path_sep}, $self->_filtered_INC;
}
-=head2 $strap->_filtered_INC()
+=for private $strap->_filtered_INC()
my @filtered_inc = $self->_filtered_INC;
}
-=head2 $strap->_restore_PERL5LIB()
+=for private $strap->_restore_PERL5LIB()
$self->_restore_PERL5LIB;
Methods for identifying what sort of line you're looking at.
-=head2 C<_is_diagnostic>
+=for private _is_diagnostic
my $is_diagnostic = $strap->_is_diagnostic($line, \$comment);
}
}
-=head2 C<_is_header>
+=for private _is_header
my $is_header = $strap->_is_header($line);
}
}
-=head2 C<_is_bail_out>
+=for private _is_bail_out
my $is_bail_out = $strap->_is_bail_out($line, \$reason);
}
}
-=head2 C<_reset_file_state>
+=for private _reset_file_state
$strap->_reset_file_state;
=head1 AUTHOR
-Michael G Schwern C<< <schwern@pobox.com> >>, currently maintained by
-Andy Lester C<< <andy@petdance.com> >>.
+Michael G Schwern C<< <schwern at pobox.com> >>, currently maintained by
+Andy Lester C<< <andy at petdance.com> >>.
=head1 SEE ALSO
ok - board has 7 tiles + starter tile
1..9
+=head1 Non-Perl TAP
+
+In Perl, we use Test::Simple and Test::More to generate TAP output.
+Other languages have solutions that generate TAP, so that they can take
+advantage of Test::Harness.
+
+The following sections are provided by their maintainers, and may not
+be up-to-date.
+
+=head2 C/C++
+
+libtap makes it easy to write test programs in C that produce
+TAP-compatible output. Modeled on the Test::More API, libtap contains
+all the functions you need to:
+
+=over 4
+
+=item * Specify a test plan
+
+=item * Run tests
+
+=item * Skip tests in certain situations
+
+=item * Have TODO tests
+
+=item * Produce TAP compatible diagnostics
+
+=back
+
+More information about libtap, including download links, checksums,
+anonymous access to the Subersion repository, and a bug tracking
+system, can be found at:
+
+ http://jc.ngo.org.uk/trac-bin/trac.cgi/wiki/LibTap
+
+(Nik Clayton, April 17, 2006)
+
+=head2 Python
+
+PyTap will, when it's done, provide a simple, assertive (Test::More-like)
+interface for writing tests in Python. It will output TAP and will
+include the functionality found in Test::Builder and Test::More. It will
+try to make it easy to add more test code (so you can write your own
+C<TAP.StringDiff>, for example.
+
+Right now, it's got a fair bit of the basics needed to emulate Test::More,
+and I think it's easy to add more stuff -- just like Test::Builder,
+there's a singleton that you can get at easily.
+
+I need to better identify and finish implementing the most basic tests.
+I am not a Python guru, I just use it from time to time, so my aim may
+not be true. I need to write tests for it, which means either relying
+on Perl for the tester tester, or writing one in Python.
+
+Here's a sample test, as found in my Subversion:
+
+ from TAP.Simple import *
+
+ plan(15)
+
+ ok(1)
+ ok(1, "everything is OK!")
+ ok(0, "always fails")
+
+ is_ok(10, 10, "is ten ten?")
+ is_ok(ok, ok, "even ok is ok!")
+ ok(id(ok), "ok is not the null pointer")
+ ok(True, "the Truth will set you ok")
+ ok(not False, "and nothing but the truth")
+ ok(False, "and we'll know if you lie to us")
+
+ isa_ok(10, int, "10")
+ isa_ok('ok', str, "some string")
+
+ ok(0, "zero is true", todo="be more like Ruby!")
+ ok(None, "none is true", skip="not possible in this universe")
+
+ eq_ok("not", "equal", "two strings are not equal");
+
+(Ricardo Signes, April 17, 2006)
+
+=head2 JavaScript
+
+Test.Simple looks and acts just like TAP, although in reality it's
+tracking test results in an object rather than scraping them from a
+print buffer.
+
+ http://openjsan.org/doc/t/th/theory/Test/Simple/
+
+(David Wheeler, April 17, 2006)
+
+=head2 PHP
+
+All the big PHP players now produce TAP
+
+=over
+
+=item * phpt
+
+Outputs TAP by default as of the yet-to-be-released PEAR 1.5.0
+
+ http://pear.php.net/PEAR
+
+=item * PHPUnit
+
+Has a TAP logger (since 2.3.4)
+
+ http://www.phpunit.de/wiki/Main_Page
+
+=item * SimpleTest
+
+There's a third-party TAP reporting extension for SimpleTest
+
+ http://www.digitalsandwich.com/archives/51-Updated-Simpletest+Apache-Test.html
+
+=item * Apache-Test
+
+Apache-Test's PHP writes TAP by default and includes the standalone
+test-more.php
+
+ http://search.cpan.org/dist/Apache-Test/
+
+=back
+
+(Geoffrey Young, April 17, 2006)
+
=head1 AUTHORS
Andy Lester, based on the original Test::Harness documentation by Michael Schwern.
--- /dev/null
+package Test::Harness::Util;
+
+use strict;
+use vars qw($VERSION);
+$VERSION = '0.01';
+
+use Exporter;
+use vars qw( @ISA @EXPORT @EXPORT_OK );
+
+@ISA = qw( Exporter );
+@EXPORT = ();
+@EXPORT_OK = qw( all_in shuffle blibdirs );
+
+=head1 NAME
+
+Test::Harness::Util - Utility functions for Test::Harness::*
+
+=head1 SYNOPSIS
+
+Utility functions for Test::Harness::*
+
+=head1 PUBLIC FUNCTIONS
+
+The following are all available to be imported to your module. No symbols
+are exported by default.
+
+=head2 all_in( {parm => value, parm => value} )
+
+Finds all the F<*.t> in a directory. Knows to skip F<.svn> and F<CVS>
+directories.
+
+Valid parms are:
+
+=over
+
+=item start
+
+Starting point for the search. Defaults to ".".
+
+=item recurse
+
+Flag to say whether it should recurse. Default to true.
+
+=back
+
+=cut
+
+sub all_in {
+ my $parms = shift;
+ my %parms = (
+ start => ".",
+ recurse => 1,
+ %$parms,
+ );
+
+ my @hits = ();
+ my $start = $parms{start};
+
+ local *DH;
+ if ( opendir( DH, $start ) ) {
+ my @files = sort readdir DH;
+ closedir DH;
+ for my $file ( @files ) {
+ next if $file eq File::Spec->updir || $file eq File::Spec->curdir;
+ next if $file eq ".svn";
+ next if $file eq "CVS";
+
+ my $currfile = File::Spec->catfile( $start, $file );
+ if ( -d $currfile ) {
+ push( @hits, all_in( { %parms, start => $currfile } ) ) if $parms{recurse};
+ }
+ else {
+ push( @hits, $currfile ) if $currfile =~ /\.t$/;
+ }
+ }
+ }
+ else {
+ warn "$start: $!\n";
+ }
+
+ return @hits;
+}
+
+=head1 shuffle( @list )
+
+Returns a shuffled copy of I<@list>.
+
+=cut
+
+sub shuffle {
+ # Fisher-Yates shuffle
+ my $i = @_;
+ while ($i) {
+ my $j = rand $i--;
+ @_[$i, $j] = @_[$j, $i];
+ }
+}
+
+
+=head2 blibdir()
+
+Finds all the blib directories. Stolen directly from blib.pm
+
+=cut
+
+sub blibdirs {
+ my $dir = File::Spec->curdir;
+ if ($^O eq 'VMS') {
+ ($dir = VMS::Filespec::unixify($dir)) =~ s-/\z--;
+ }
+ my $archdir = "arch";
+ if ( $^O eq "MacOS" ) {
+ # Double up the MP::A so that it's not used only once.
+ $archdir = $MacPerl::Architecture = $MacPerl::Architecture;
+ }
+
+ my $i = 5;
+ while ($i--) {
+ my $blib = File::Spec->catdir( $dir, "blib" );
+ my $blib_lib = File::Spec->catdir( $blib, "lib" );
+ my $blib_arch = File::Spec->catdir( $blib, $archdir );
+
+ if ( -d $blib && -d $blib_arch && -d $blib_lib ) {
+ return ($blib_arch,$blib_lib);
+ }
+ $dir = File::Spec->catdir($dir, File::Spec->updir);
+ }
+ warn "$0: Cannot find blib\n";
+ return;
+}
+
+1;
use strict;
use Test::Harness;
+use Test::Harness::Util qw( all_in blibdirs shuffle );
+
use Getopt::Long;
use Pod::Usage 1.12;
use File::Spec;
use vars qw( $VERSION );
$VERSION = "1.04";
-my @ext = ();
my $shuffle = 0;
my $dry = 0;
my $blib = 0;
'H|man' => sub {pod2usage({-verbose => 2}); exit},
'I=s@' => \@includes,
'l|lib' => \$lib,
+ 'perl' => \$ENV{HARNESS_PERL},
'r|recurse' => \$recurse,
's|shuffle' => \$shuffle,
't' => sub { unshift @switches, "-t" }, # Always want -t up front
'T' => sub { unshift @switches, "-T" }, # Always want -T up front
+ 'w' => sub { push @switches, '-w' },
+ 'W' => sub { push @switches, '-W' },
'timer' => \$Test::Harness::Timer,
'v|verbose' => \$Test::Harness::verbose,
'V|version' => sub { print_version(); exit; },
- 'ext=s@' => \@ext,
) or exit 1;
$ENV{TEST_VERBOSE} = 1 if $Test::Harness::verbose;
-# Build up extensions regex
-@ext = map { split /,/ } @ext;
-s/^\.// foreach @ext;
-@ext = ("t") unless @ext;
-my $ext_regex = join( "|", map { quotemeta } @ext );
-$ext_regex = qr/\.($ext_regex)$/;
-
# Handle blib includes
if ( $blib ) {
my @blibdirs = blibdirs();
if ( @blibdirs ) {
unshift @includes, @blibdirs;
- } else {
+ }
+ else {
warn "No blib directories found.\n";
}
}
$Test::Harness::Switches = join( " ", @switches );
print "# \$Test::Harness::Switches: $Test::Harness::Switches\n" if $Test::Harness::debug;
-my @tests;
@ARGV = File::Spec->curdir unless @ARGV;
-push( @tests, -d $_ ? all_in( $_ ) : $_ ) for map { glob } @ARGV;
+my @argv_globbed;
+my @tests;
+if ( $] >= 5.006 ) {
+ require File::Glob;
+ @argv_globbed = map { File::Glob::bsd_glob($_) } @ARGV;
+}
+else {
+ @argv_globbed = map { glob } @ARGV;
+}
+
+for ( @argv_globbed ) {
+ push( @tests, -d $_ ? all_in( { recurse => $recurse, start => $_ } ) : $_ )
+}
if ( @tests ) {
shuffle(@tests) if $shuffle;
if ( $dry ) {
print join( "\n", @tests, "" );
- } else {
+ }
+ else {
print "# ", scalar @tests, " tests to run\n" if $Test::Harness::debug;
runtests(@tests);
}
}
-sub all_in {
- my $start = shift;
-
- my @hits = ();
-
- local *DH;
- if ( opendir( DH, $start ) ) {
- my @files = sort readdir DH;
- closedir DH;
- for my $file ( @files ) {
- next if $file eq File::Spec->updir || $file eq File::Spec->curdir;
- next if $file eq ".svn";
- next if $file eq "CVS";
-
- my $currfile = File::Spec->catfile( $start, $file );
- if ( -d $currfile ) {
- push( @hits, all_in( $currfile ) ) if $recurse;
- } else {
- push( @hits, $currfile ) if $currfile =~ $ext_regex;
- }
- }
- } else {
- warn "$start: $!\n";
- }
-
- return @hits;
-}
-
-sub shuffle {
- # Fisher-Yates shuffle
- my $i = @_;
- while ($i) {
- my $j = rand $i--;
- @_[$i, $j] = @_[$j, $i];
- }
-}
-
sub print_version {
printf( "prove v%s, using Test::Harness v%s and Perl v%vd\n",
$VERSION, $Test::Harness::VERSION, $^V );
}
-# Stolen directly from blib.pm
-sub blibdirs {
- my $dir = File::Spec->curdir;
- if ($^O eq 'VMS') {
- ($dir = VMS::Filespec::unixify($dir)) =~ s-/\z--;
- }
- my $archdir = "arch";
- if ( $^O eq "MacOS" ) {
- # Double up the MP::A so that it's not used only once.
- $archdir = $MacPerl::Architecture = $MacPerl::Architecture;
- }
-
- my $i = 5;
- while ($i--) {
- my $blib = File::Spec->catdir( $dir, "blib" );
- my $blib_lib = File::Spec->catdir( $blib, "lib" );
- my $blib_arch = File::Spec->catdir( $blib, $archdir );
-
- if ( -d $blib && -d $blib_arch && -d $blib_lib ) {
- return ($blib_arch,$blib_lib);
- }
- $dir = File::Spec->catdir($dir, File::Spec->updir);
- }
- warn "$0: Cannot find blib\n";
- return;
-}
-
__END__
=head1 NAME
prove [options] [files/directories]
-Options:
+=head1 OPTIONS
- -b, --blib Adds blib/lib to the path for your tests, a la "use blib".
- -d, --debug Includes extra debugging information.
- -D, --dry Dry run: Show the tests to run, but don't run them.
- --ext=x Extensions (defaults to .t)
+ -b, --blib Adds blib/lib to the path for your tests, a la "use blib"
+ -d, --debug Includes extra debugging information
+ -D, --dry Dry run: Show the tests to run, but don't run them
-h, --help Display this help
-H, --man Longer manpage for prove
-I Add libraries to @INC, as Perl's -I
- -l, --lib Add lib to the path for your tests.
- -r, --recurse Recursively descend into directories.
- -s, --shuffle Run the tests in a random order.
+ -l, --lib Add lib to the path for your tests
+ --perl Sets the name of the Perl executable to use
+ -r, --recurse Recursively descend into directories
+ -s, --shuffle Run the tests in a random order
-T Enable tainting checks
-t Enable tainting warnings
--timer Print elapsed time after each test file
- -v, --verbose Display standard output of test scripts while running them.
+ -v, --verbose Display standard output of test scripts while running them
-V, --version Display version info
Single-character options may be stacked. Default options may be set by
current directory.
Shell metacharacters may be used with command lines options and will be exanded
-via C<glob>.
+via C<File::Glob::bsd_glob>.
=head1 PROVE VS. "MAKE TEST"
Dry run: Show the tests to run, but don't run them.
-=head2 --ext=extension
-
-Specify extensions of the test files to run. By default, these are .t,
-but you may have other non-.t test files, most likely .sh shell scripts.
-The --ext is repeatable.
-
=head2 -I
Add libraries to @INC, as Perl's -I.
Add C<lib> to @INC. Equivalent to C<-Ilib>.
+=head2 --perl
+
+Sets the C<HARNESS_PERL> environment variable, which controls what
+Perl executable will run the tests.
+
=head2 -r, --recurse
Descends into subdirectories of any directories specified, looking for tests.
=head1 AUTHORS
-Andy Lester C<< <andy@petdance.com> >>
+Andy Lester C<< <andy at petdance.com> >>
=head1 COPYRIGHT
-Copyright 2005 by Andy Lester C<< <andy@petdance.com> >>.
+Copyright 2005 by Andy Lester C<< <andy at petdance.com> >>.
This program is free software; you can redistribute it and/or
modify it under the same terms as Perl itself.
tie *NULL, 'Dev::Null' or die $!;
select NULL;
-my($tot, $failed) = Test::Harness::_run_all_tests(
- $ENV{PERL_CORE}
- ? 'lib/sample-tests/inc_taint'
- : 't/sample-tests/inc_taint'
+my($tot, $failed) = Test::Harness::execute_tests(
+ tests => [ $ENV{PERL_CORE} ? 'lib/sample-tests/inc_taint' : 't/sample-tests/inc_taint' ]
);
select STDOUT;
plan tests => 1;
-my $prove = File::Spec->catfile( File::Spec->curdir, "blib", "script", "prove" );
my $tests = File::Spec->catfile( 't', 'prove*.t' );
+my $prove = File::Spec->catfile( File::Spec->curdir, "blib", "script", "prove" );
+$prove = "$^X $prove";
GLOBBAGE: {
my @actual = sort qx/$prove --dry $tests/;
my $blib_lib = File::Spec->catfile( $blib, "lib" );
my $blib_arch = File::Spec->catfile( $blib, "arch" );
my $prove = File::Spec->catfile( $blib, "script", "prove" );
+$prove = "$^X $prove";
CAPITAL_TAINT: {
local $ENV{PROVE_SWITCHES};
#!/usr/bin/perl -Tw
+use strict;
+
BEGIN {
if( $ENV{PERL_CORE} ) {
chdir 't';
}
}
-use strict;
-
use Test::More tests => 89;
BEGIN { use_ok('Test::Harness::Straps'); }
},
);
-plan tests => (keys(%samples) * 7);
+my $tests_per_loop = 8;
+plan tests => (keys(%samples) * $tests_per_loop);
use Test::Harness;
my @_INC = map { qq{"-I$_"} } @INC;
for my $test ( sort keys %samples ) {
SKIP: {
- skip "-t introduced in 5.8.0", 7 if $test eq 'taint_warn' and $] < 5.008;
+ skip "-t introduced in 5.8.0", $tests_per_loop
+ if ($test eq 'taint_warn') && ($] < 5.008);
my $expect = $samples{$test};
- # _run_all_tests() runs the tests but skips the formatting.
+ # execute_tests() runs the tests but skips the formatting.
my($totals, $failed);
my $warning = '';
my $test_path = File::Spec->catfile($SAMPLE_TESTS, $test);
print STDERR "# $test\n" if $ENV{TEST_VERBOSE};
eval {
- select NULL; # _run_all_tests() isn't as quiet as it should be.
local $SIG{__WARN__} = sub { $warning .= join '', @_; };
- ($totals, $failed) =
- Test::Harness::_run_all_tests($test_path);
+ ($totals, $failed) = Test::Harness::execute_tests(tests => [$test_path], out => \*NULL);
};
- select STDOUT;
# $? is unreliable in MacPerl, so we'll just fudge it.
$failed->{estat} = $die_estat if $IsMacPerl and $failed;
}
SKIP: {
- skip "don't apply to a bailout", 5 if $test eq 'bailout';
+ skip "don't apply to a bailout", 6 if $test eq 'bailout';
is( $@, '' );
is( Test::Harness::_all_ok($totals), $expect->{all_ok},
"$test - all ok" );
keys %{$expect->{failed}}},
$expect->{failed},
"$test - failed" );
+
+ skip "No tests were run", 1 unless $totals->{max};
+
+ my $output = Test::Harness::get_results($totals, $failed);
+ like( $output, '/All tests successful|List of Failed/' );
}
my $expected_warnings = "";