X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FTest.pm;h=60e9f7eee1021ac9a7267fd92a548a2c5f549807;hb=92dffb52e8549d6a194db47a2e5b989b8338a19a;hp=b10d104dedc0b416e5e779cf8c44c60c08175369;hpb=3238f5fe6293a28fd91d4f803f3a2df14d3a9498;p=p5sagit%2Fp5-mst-13.2.git diff --git a/lib/Test.pm b/lib/Test.pm index b10d104..60e9f7e 100644 --- a/lib/Test.pm +++ b/lib/Test.pm @@ -2,16 +2,19 @@ use strict; package Test; use Test::Harness 1.1601 (); use Carp; -use vars qw($VERSION @ISA @EXPORT $ntest %todo %history $TestLevel); -$VERSION = '0.08'; +our($VERSION, @ISA, @EXPORT, @EXPORT_OK, $ntest, $TestLevel); #public-ish +our($TESTOUT, $ONFAIL, %todo, %history, $planned, @FAILDETAIL); #private-ish +$VERSION = '1.14'; require Exporter; @ISA=('Exporter'); -@EXPORT= qw(&plan &ok &skip $ntest); +@EXPORT=qw(&plan &ok &skip); +@EXPORT_OK=qw($ntest $TESTOUT); $TestLevel = 0; # how many extra stack frames to skip $|=1; #$^W=1; ? $ntest=1; +$TESTOUT = *STDOUT{IO}; # Use of this variable is strongly discouraged. It is set mainly to # help test coverage analyzers know which test is running. @@ -19,20 +22,26 @@ $ENV{REGRESSION_TEST} = $0; sub plan { croak "Test::plan(%args): odd number of arguments" if @_ & 1; + croak "Test::plan(): should not be called more than once" if $planned; my $max=0; for (my $x=0; $x < @_; $x+=2) { my ($k,$v) = @_[$x,$x+1]; if ($k =~ /^test(s)?$/) { $max = $v; } elsif ($k eq 'todo' or $k eq 'failok') { for (@$v) { $todo{$_}=1; }; } + elsif ($k eq 'onfail') { + ref $v eq 'CODE' or croak "Test::plan(onfail => $v): must be CODE"; + $ONFAIL = $v; + } else { carp "Test::plan(): skipping unrecognized directive '$k'" } } my @todo = sort { $a <=> $b } keys %todo; if (@todo) { - print "1..$max todo ".join(' ', @todo).";\n"; + print $TESTOUT "1..$max todo ".join(' ', @todo).";\n"; } else { - print "1..$max\n"; + print $TESTOUT "1..$max\n"; } + ++$planned; } sub to_value { @@ -40,79 +49,93 @@ sub to_value { (ref $v or '') eq 'CODE' ? $v->() : $v; } -# prototypes are not used for maximum flexibility - -# STDERR is NOT used for diagnostic output that should be fixed before -# the module is released. - -sub ok { +sub ok ($;$$) { + croak "ok: plan before you test!" if !$planned; my ($pkg,$file,$line) = caller($TestLevel); my $repetition = ++$history{"$file:$line"}; my $context = ("$file at line $line". - ($repetition > 1 ? " (\#$repetition)" : '')); + ($repetition > 1 ? " fail \#$repetition" : '')); my $ok=0; - + my $result = to_value(shift); + my ($expected,$diag); if (@_ == 0) { - print "not ok $ntest\n"; - print "# test $context: DOESN'T TEST ANYTHING!\n"; + $ok = $result; } else { - my $result = to_value(shift); - my ($expected,$diag); - if (@_ == 0) { - $ok = $result; + $expected = to_value(shift); + my ($regex,$ignore); + if (!defined $expected) { + $ok = !defined $result; + } elsif (!defined $result) { + $ok = 0; + } elsif ((ref($expected)||'') eq 'Regexp') { + $ok = $result =~ /$expected/; + } elsif (($regex) = ($expected =~ m,^ / (.+) / $,sx) or + ($ignore, $regex) = ($expected =~ m,^ m([^\w\s]) (.+) \1 $,sx)) { + $ok = $result =~ /$regex/; } else { - $expected = to_value(shift); $ok = $result eq $expected; } - if ($todo{$ntest}) { - if ($ok) { - print "ok $ntest # Wow!\n"; - } else { - $diag = to_value(shift) if @_; + } + my $todo = $todo{$ntest}; + if ($todo and $ok) { + $context .= ' TODO?!' if $todo; + print $TESTOUT "ok $ntest # ($context)\n"; + } else { + print $TESTOUT "not " if !$ok; + print $TESTOUT "ok $ntest\n"; + + if (!$ok) { + my $detail = { 'repetition' => $repetition, 'package' => $pkg, + 'result' => $result, 'todo' => $todo }; + $$detail{expected} = $expected if defined $expected; + $diag = $$detail{diagnostic} = to_value(shift) if @_; + $context .= ' *TODO*' if $todo; + if (!defined $expected) { if (!$diag) { - print "not ok $ntest # (failure expected)\n"; + print $TESTOUT "# Failed test $ntest in $context\n"; } else { - print "not ok $ntest # (failure expected: $diag)\n"; + print $TESTOUT "# Failed test $ntest in $context: $diag\n"; } - } - } else { - print "not " if !$ok; - print "ok $ntest\n"; - - if (!$ok) { - $diag = to_value(shift) if @_; - if (!defined $expected) { - if (!$diag) { - print STDERR "# Failed $context\n"; - } else { - print STDERR "# Failed $context: $diag\n"; - } + } else { + my $prefix = "Test $ntest"; + print $TESTOUT "# $prefix got: ". + (defined $result? "'$result'":'')." ($context)\n"; + $prefix = ' ' x (length($prefix) - 5); + if ((ref($expected)||'') eq 'Regexp') { + $expected = 'qr/'.$expected.'/' } else { - print STDERR "# Got: '$result' ($context)\n"; - if (!$diag) { - print STDERR "# Expected: '$expected'\n"; - } else { - print STDERR "# Expected: '$expected' ($diag)\n"; - } + $expected = "'$expected'"; + } + if (!$diag) { + print $TESTOUT "# $prefix Expected: $expected\n"; + } else { + print $TESTOUT "# $prefix Expected: $expected ($diag)\n"; } } + push @FAILDETAIL, $detail; } } ++ $ntest; $ok; } -sub skip { - if (to_value(shift)) { - print "ok $ntest # skip\n"; +sub skip ($$;$$) { + my $whyskip = to_value(shift); + if ($whyskip) { + $whyskip = 'skip' if $whyskip =~ m/^\d+$/; + print $TESTOUT "ok $ntest # $whyskip\n"; ++ $ntest; 1; } else { - local($TestLevel) += 1; #ignore this stack frame - ok(@_); + local($TestLevel) = $TestLevel+1; #ignore this stack frame + &ok; } } +END { + $ONFAIL->(\@FAILDETAIL) if @FAILDETAIL && $ONFAIL; +} + 1; __END__ @@ -124,7 +147,12 @@ __END__ use strict; use Test; - BEGIN { plan tests => 12, todo => [3,4] } + + # use a BEGIN block so we print our plan before MyModule is loaded + BEGIN { plan tests => 14, todo => [3,4] } + + # load your module... + use MyModule; ok(0); # failure ok(1); # success @@ -135,21 +163,23 @@ __END__ ok(0,1); # failure: '0' ne '1' ok('broke','fixed'); # failure: 'broke' ne 'fixed' ok('fixed','fixed'); # success: 'fixed' eq 'fixed' + ok('fixed',qr/x/); # success: 'fixed' =~ qr/x/ ok(sub { 1+1 }, 2); # success: '2' eq '2' ok(sub { 1+1 }, 3); # failure: '2' ne '3' - ok(0, int(rand(2)); # (just kidding! :-) + ok(0, int(rand(2)); # (just kidding :-) my @list = (0,0); - ok(scalar(@list), 3, "\@list=".join(',',@list)); #extra diagnostics + ok @list, 3, "\@list=".join(',',@list); #extra diagnostics + ok 'segmentation fault', '/(?i)success/'; #regex match skip($feature_is_missing, ...); #do platform specific test =head1 DESCRIPTION -Test::Harness expects to see particular output when it executes tests. -This module aims to make writing proper test scripts just a little bit -easier (and less error prone :-). +L expects to see particular output when it +executes tests. This module aims to make writing proper test scripts just +a little bit easier (and less error prone :-). =head1 TEST TYPES @@ -157,35 +187,64 @@ easier (and less error prone :-). =item * NORMAL TESTS -These tests are expected to succeed. If they don't, something's +These tests are expected to succeed. If they don't something's screwed up! =item * SKIPPED TESTS -Skip tests need a platform specific feature that might or might not be -available. The first argument should evaluate to true if the required -feature is NOT available. After the first argument, skip tests work +Skip is for tests that might or might not be possible to run depending +on the availability of platform specific features. The first argument +should evaluate to true (think "yes, please skip") if the required +feature is not available. After the first argument, skip works exactly the same way as do normal tests. =item * TODO TESTS -TODO tests are designed for maintaining an executable TODO list. -These tests are expected NOT to succeed (otherwise the feature they -test would be on the new feature list, not the TODO list). +TODO tests are designed for maintaining an B. +These tests are expected NOT to succeed. If a TODO test does succeed, +the feature in question should not be on the TODO list, now should it? -Packages should NOT be released with successful TODO tests. As soon +Packages should NOT be released with succeeding TODO tests. As soon as a TODO test starts working, it should be promoted to a normal test -and the new feature should be documented in the release notes. +and the newly working feature should be documented in the release +notes or change log. =back +=head1 RETURN VALUE + +Both C and C return true if their test succeeds and false +otherwise in a scalar context. + +=head1 ONFAIL + + BEGIN { plan test => 4, onfail => sub { warn "CALL 911!" } } + +While test failures should be enough, extra diagnostics can be +triggered at the end of a test run. C is passed an array ref +of hash refs that describe each test failure. Each hash will contain +at least the following fields: C, C, and +C. (The file, line, and test number are not included because +their correspondence to a particular test is tenuous.) If the test +had an expected value or a diagnostic string, these will also be +included. + +The B C hook might be used simply to print out the +version of your package and/or how to report problems. It might also +be used to generate extremely sophisticated diagnostics for a +particularly bizarre test failure. However it's not a panacea. Core +dumps or other unrecoverable errors prevent the C hook from +running. (It is run inside an C block.) Besides, C is +probably over-kill in most cases. (Your test code should be simpler +than the code it is testing, yes?) + =head1 SEE ALSO -L and various test coverage analysis tools. +L and, perhaps, test coverage analysis tools. =head1 AUTHOR -Copyright © 1998 Joshua Nathaniel Pritikin. All rights reserved. +Copyright (c) 1998-1999 Joshua Nathaniel Pritikin. All rights reserved. This package is free software and is provided "as is" without express or implied warranty. It may be used, redistributed and/or modified