package Test;
use Test::Harness 1.1601 ();
use Carp;
-use vars (qw($VERSION @ISA @EXPORT $ntest $TestLevel), #public-ish
- qw($ONFAIL %todo %history $planned @FAILDETAIL)); #private-ish
-$VERSION = '1.04';
+use vars (qw($VERSION @ISA @EXPORT @EXPORT_OK $ntest $TestLevel), #public-ish
+ qw($TESTOUT $ONFAIL %todo %history $planned @FAILDETAIL)); #private-ish
+$VERSION = '1.121';
require Exporter;
@ISA=('Exporter');
-@EXPORT= qw(&plan &ok &skip $ntest);
+@EXPORT=qw(&plan &ok &skip);
+@EXPORT_OK=qw($ntest $TESTOUT);
$TestLevel = 0; # how many extra stack frames to skip
$|=1;
#$^W=1; ?
$ntest=1;
+$TESTOUT = *STDOUT{IO};
# Use of this variable is strongly discouraged. It is set mainly to
# help test coverage analyzers know which test is running.
}
my @todo = sort { $a <=> $b } keys %todo;
if (@todo) {
- print "1..$max todo ".join(' ', @todo).";\n";
+ print $TESTOUT "1..$max todo ".join(' ', @todo).";\n";
} else {
- print "1..$max\n";
+ print $TESTOUT "1..$max\n";
}
++$planned;
}
(ref $v or '') eq 'CODE' ? $v->() : $v;
}
-# STDERR is NOT used for diagnostic output which should have been
-# fixed before release. Is this appropriate?
-
sub ok ($;$$) {
croak "ok: plan before you test!" if !$planned;
my ($pkg,$file,$line) = caller($TestLevel);
$ok = $result;
} else {
$expected = to_value(shift);
- # until regex can be manipulated like objects...
my ($regex,$ignore);
- if (($regex) = ($expected =~ m,^ / (.+) / $,sx) or
+ if ((ref($expected)||'') eq 're') {
+ $ok = $result =~ /$expected/;
+ } elsif (($regex) = ($expected =~ m,^ / (.+) / $,sx) or
($ignore, $regex) = ($expected =~ m,^ m([^\w\s]) (.+) \1 $,sx)) {
$ok = $result =~ /$regex/;
} else {
$ok = $result eq $expected;
}
}
- if ($todo{$ntest}) {
- if ($ok) {
- print "ok $ntest # Wow! ($context)\n";
- } else {
- $diag = to_value(shift) if @_;
- if (!$diag) {
- print "not ok $ntest # (failure expected in $context)\n";
- } else {
- print "not ok $ntest # (failure expected: $diag)\n";
- }
- }
+ my $todo = $todo{$ntest};
+ if ($todo and $ok) {
+ $context .= ' TODO?!' if $todo;
+ print $TESTOUT "ok $ntest # ($context)\n";
} else {
- print "not " if !$ok;
- print "ok $ntest\n";
+ print $TESTOUT "not " if !$ok;
+ print $TESTOUT "ok $ntest\n";
if (!$ok) {
my $detail = { 'repetition' => $repetition, 'package' => $pkg,
- 'result' => $result };
+ 'result' => $result, 'todo' => $todo };
$$detail{expected} = $expected if defined $expected;
$diag = $$detail{diagnostic} = to_value(shift) if @_;
+ $context .= ' *TODO*' if $todo;
if (!defined $expected) {
if (!$diag) {
- print STDERR "# Failed test $ntest in $context\n";
+ print $TESTOUT "# Failed test $ntest in $context\n";
} else {
- print STDERR "# Failed test $ntest in $context: $diag\n";
+ print $TESTOUT "# Failed test $ntest in $context: $diag\n";
}
} else {
my $prefix = "Test $ntest";
- print STDERR "# $prefix got: '$result' ($context)\n";
+ print $TESTOUT "# $prefix got: '$result' ($context)\n";
$prefix = ' ' x (length($prefix) - 5);
+ if ((ref($expected)||'') eq 're') {
+ $expected = 'qr/'.$expected.'/'
+ } else {
+ $expected = "'$expected'";
+ }
if (!$diag) {
- print STDERR "# $prefix Expected: '$expected'\n";
+ print $TESTOUT "# $prefix Expected: $expected\n";
} else {
- print STDERR "# $prefix Expected: '$expected' ($diag)\n";
+ print $TESTOUT "# $prefix Expected: $expected ($diag)\n";
}
}
push @FAILDETAIL, $detail;
}
sub skip ($$;$$) {
- if (to_value(shift)) {
- print "ok $ntest # skip\n";
+ my $whyskip = to_value(shift);
+ if ($whyskip) {
+ $whyskip = 'skip' if $whyskip =~ m/^\d+$/;
+ print $TESTOUT "ok $ntest # $whyskip\n";
++ $ntest;
1;
} else {
use strict;
use Test;
- BEGIN { plan tests => 13, todo => [3,4] }
+
+ # use a BEGIN block so we print our plan before MyModule is loaded
+ BEGIN { plan tests => 14, todo => [3,4] }
+
+ # load your module...
+ use MyModule;
ok(0); # failure
ok(1); # success
ok(0,1); # failure: '0' ne '1'
ok('broke','fixed'); # failure: 'broke' ne 'fixed'
ok('fixed','fixed'); # success: 'fixed' eq 'fixed'
+ ok('fixed',qr/x/); # success: 'fixed' =~ qr/x/
ok(sub { 1+1 }, 2); # success: '2' eq '2'
ok(sub { 1+1 }, 3); # failure: '2' ne '3'
=head1 DESCRIPTION
-Test::Harness expects to see particular output when it executes tests.
-This module aims to make writing proper test scripts just a little bit
-easier (and less error prone :-).
+L<Test::Harness> expects to see particular output when it executes
+tests. This module aims to make writing proper test scripts just a
+little bit easier (and less error prone :-).
=head1 TEST TYPES
=item * NORMAL TESTS
-These tests are expected to succeed. If they don't, something's
+These tests are expected to succeed. If they don't something's
screwed up!
=item * SKIPPED TESTS
-Skip tests need a platform specific feature that might or might not be
-available. The first argument should evaluate to true if the required
-feature is NOT available. After the first argument, skip tests work
+Skip is for tests that might or might not be possible to run depending
+on the availability of platform specific features. The first argument
+should evaluate to true (think "yes, please skip") if the required
+feature is not available. After the first argument, skip works
exactly the same way as do normal tests.
=item * TODO TESTS
-TODO tests are designed for maintaining an executable TODO list.
-These tests are expected NOT to succeed (otherwise the feature they
-test would be on the new feature list, not the TODO list).
+TODO tests are designed for maintaining an B<executable TODO list>.
+These tests are expected NOT to succeed. If a TODO test does succeed,
+the feature in question should not be on the TODO list, now should it?
-Packages should NOT be released with successful TODO tests. As soon
+Packages should NOT be released with succeeding TODO tests. As soon
as a TODO test starts working, it should be promoted to a normal test
-and the newly minted feature should be documented in the release
-notes.
+and the newly working feature should be documented in the release
+notes or change log.
=back
+=head1 RETURN VALUE
+
+Both C<ok> and C<skip> return true if their test succeeds and false
+otherwise in a scalar context.
+
=head1 ONFAIL
BEGIN { plan test => 4, onfail => sub { warn "CALL 911!" } }
-The test failures can trigger extra diagnostics at the end of the test
-run. C<onfail> is passed an array ref of hash refs that describe each
-test failure. Each hash will contain at least the following fields:
-package, repetition, and result. (The file, line, and test number are
-not included because their correspondance to a particular test is
-fairly weak.) If the test had an expected value or a diagnostic
-string, these will also be included.
-
-This optional feature might be used simply to print out the version of
-your package and/or how to report problems. It might also be used to
-generate extremely sophisticated diagnostics for a particular test
-failure. It's not a panacea, however. Core dumps or other
-unrecoverable errors will prevent the C<onfail> hook from running.
-(It is run inside an END block.) Besides, C<onfail> is probably
-over-kill in the majority of cases. (Your test code should be simpler
+While test failures should be enough, extra diagnostics can be
+triggered at the end of a test run. C<onfail> is passed an array ref
+of hash refs that describe each test failure. Each hash will contain
+at least the following fields: C<package>, C<repetition>, and
+C<result>. (The file, line, and test number are not included because
+their correspondance to a particular test is tenuous.) If the test
+had an expected value or a diagnostic string, these will also be
+included.
+
+The B<optional> C<onfail> hook might be used simply to print out the
+version of your package and/or how to report problems. It might also
+be used to generate extremely sophisticated diagnostics for a
+particularly bizarre test failure. However it's not a panacea. Core
+dumps or other unrecoverable errors prevent the C<onfail> hook from
+running. (It is run inside an C<END> block.) Besides, C<onfail> is
+probably over-kill in most cases. (Your test code should be simpler
than the code it is testing, yes?)
=head1 SEE ALSO
-L<Test::Harness> and various test coverage analysis tools.
+L<Test::Harness> and, perhaps, test coverage analysis tools.
=head1 AUTHOR