package Test;
use Test::Harness 1.1601 ();
use Carp;
-use vars qw($VERSION @ISA @EXPORT $ntest %todo %history $TestLevel);
-$VERSION = '0.08';
+use vars (qw($VERSION @ISA @EXPORT $ntest $TestLevel), #public-ish
+ qw($ONFAIL %todo %history $planned @FAILDETAIL)); #private-ish
+$VERSION = '1.04';
require Exporter;
@ISA=('Exporter');
@EXPORT= qw(&plan &ok &skip $ntest);
sub plan {
croak "Test::plan(%args): odd number of arguments" if @_ & 1;
+ croak "Test::plan(): should not be called more than once" if $planned;
my $max=0;
for (my $x=0; $x < @_; $x+=2) {
my ($k,$v) = @_[$x,$x+1];
if ($k =~ /^test(s)?$/) { $max = $v; }
elsif ($k eq 'todo' or
$k eq 'failok') { for (@$v) { $todo{$_}=1; }; }
+ elsif ($k eq 'onfail') {
+ ref $v eq 'CODE' or croak "Test::plan(onfail => $v): must be CODE";
+ $ONFAIL = $v;
+ }
else { carp "Test::plan(): skipping unrecognized directive '$k'" }
}
my @todo = sort { $a <=> $b } keys %todo;
} else {
print "1..$max\n";
}
+ ++$planned;
}
sub to_value {
(ref $v or '') eq 'CODE' ? $v->() : $v;
}
-# prototypes are not used for maximum flexibility
-
-# STDERR is NOT used for diagnostic output that should be fixed before
-# the module is released.
+# STDERR is NOT used for diagnostic output which should have been
+# fixed before release. Is this appropriate?
-sub ok {
+sub ok ($;$$) {
+ croak "ok: plan before you test!" if !$planned;
my ($pkg,$file,$line) = caller($TestLevel);
my $repetition = ++$history{"$file:$line"};
my $context = ("$file at line $line".
- ($repetition > 1 ? " (\#$repetition)" : ''));
+ ($repetition > 1 ? " fail \#$repetition" : ''));
my $ok=0;
-
+ my $result = to_value(shift);
+ my ($expected,$diag);
if (@_ == 0) {
- print "not ok $ntest\n";
- print "# test $context: DOESN'T TEST ANYTHING!\n";
+ $ok = $result;
} else {
- my $result = to_value(shift);
- my ($expected,$diag);
- if (@_ == 0) {
- $ok = $result;
+ $expected = to_value(shift);
+ # until regex can be manipulated like objects...
+ my ($regex,$ignore);
+ if (($regex) = ($expected =~ m,^ / (.+) / $,sx) or
+ ($ignore, $regex) = ($expected =~ m,^ m([^\w\s]) (.+) \1 $,sx)) {
+ $ok = $result =~ /$regex/;
} else {
- $expected = to_value(shift);
$ok = $result eq $expected;
}
- if ($todo{$ntest}) {
- if ($ok) {
- print "ok $ntest # Wow!\n";
+ }
+ if ($todo{$ntest}) {
+ if ($ok) {
+ print "ok $ntest # Wow! ($context)\n";
+ } else {
+ $diag = to_value(shift) if @_;
+ if (!$diag) {
+ print "not ok $ntest # (failure expected in $context)\n";
} else {
- $diag = to_value(shift) if @_;
+ print "not ok $ntest # (failure expected: $diag)\n";
+ }
+ }
+ } else {
+ print "not " if !$ok;
+ print "ok $ntest\n";
+
+ if (!$ok) {
+ my $detail = { 'repetition' => $repetition, 'package' => $pkg,
+ 'result' => $result };
+ $$detail{expected} = $expected if defined $expected;
+ $diag = $$detail{diagnostic} = to_value(shift) if @_;
+ if (!defined $expected) {
if (!$diag) {
- print "not ok $ntest # (failure expected)\n";
+ print STDERR "# Failed test $ntest in $context\n";
} else {
- print "not ok $ntest # (failure expected: $diag)\n";
+ print STDERR "# Failed test $ntest in $context: $diag\n";
}
- }
- } else {
- print "not " if !$ok;
- print "ok $ntest\n";
-
- if (!$ok) {
- $diag = to_value(shift) if @_;
- if (!defined $expected) {
- if (!$diag) {
- print STDERR "# Failed $context\n";
- } else {
- print STDERR "# Failed $context: $diag\n";
- }
+ } else {
+ my $prefix = "Test $ntest";
+ print STDERR "# $prefix got: '$result' ($context)\n";
+ $prefix = ' ' x (length($prefix) - 5);
+ if (!$diag) {
+ print STDERR "# $prefix Expected: '$expected'\n";
} else {
- print STDERR "# Got: '$result' ($context)\n";
- if (!$diag) {
- print STDERR "# Expected: '$expected'\n";
- } else {
- print STDERR "# Expected: '$expected' ($diag)\n";
- }
+ print STDERR "# $prefix Expected: '$expected' ($diag)\n";
}
}
+ push @FAILDETAIL, $detail;
}
}
++ $ntest;
$ok;
}
-sub skip {
+sub skip ($$;$$) {
if (to_value(shift)) {
print "ok $ntest # skip\n";
++ $ntest;
1;
} else {
- local($TestLevel) += 1; #ignore this stack frame
- ok(@_);
+ local($TestLevel) = $TestLevel+1; #ignore this stack frame
+ &ok;
}
}
+END {
+ $ONFAIL->(\@FAILDETAIL) if @FAILDETAIL && $ONFAIL;
+}
+
1;
__END__
use strict;
use Test;
- BEGIN { plan tests => 12, todo => [3,4] }
+ BEGIN { plan tests => 13, todo => [3,4] }
ok(0); # failure
ok(1); # success
ok(0, int(rand(2)); # (just kidding! :-)
my @list = (0,0);
- ok(scalar(@list), 3, "\@list=".join(',',@list)); #extra diagnostics
+ ok @list, 3, "\@list=".join(',',@list); #extra diagnostics
+ ok 'segmentation fault', '/(?i)success/'; #regex match
skip($feature_is_missing, ...); #do platform specific test
Packages should NOT be released with successful TODO tests. As soon
as a TODO test starts working, it should be promoted to a normal test
-and the new feature should be documented in the release notes.
+and the newly minted feature should be documented in the release
+notes.
=back
+=head1 ONFAIL
+
+ BEGIN { plan test => 4, onfail => sub { warn "CALL 911!" } }
+
+The test failures can trigger extra diagnostics at the end of the test
+run. C<onfail> is passed an array ref of hash refs that describe each
+test failure. Each hash will contain at least the following fields:
+package, repetition, and result. (The file, line, and test number are
+not included because their correspondance to a particular test is
+fairly weak.) If the test had an expected value or a diagnostic
+string, these will also be included.
+
+This optional feature might be used simply to print out the version of
+your package and/or how to report problems. It might also be used to
+generate extremely sophisticated diagnostics for a particular test
+failure. It's not a panacea, however. Core dumps or other
+unrecoverable errors will prevent the C<onfail> hook from running.
+(It is run inside an END block.) Besides, C<onfail> is probably
+over-kill in the majority of cases. (Your test code should be simpler
+than the code it is testing, yes?)
+
=head1 SEE ALSO
L<Test::Harness> and various test coverage analysis tools.