3 use Test::Harness 1.1601 ();
5 our($VERSION, @ISA, @EXPORT, @EXPORT_OK, $ntest, $TestLevel); #public-ish
6 our($TESTOUT, $ONFAIL, %todo, %history, $planned, @FAILDETAIL); #private-ish
10 @EXPORT=qw(&plan &ok &skip);
11 @EXPORT_OK=qw($ntest $TESTOUT);
13 $TestLevel = 0; # how many extra stack frames to skip
17 $TESTOUT = *STDOUT{IO};
19 # Use of this variable is strongly discouraged. It is set mainly to
20 # help test coverage analyzers know which test is running.
21 $ENV{REGRESSION_TEST} = $0;
24 croak "Test::plan(%args): odd number of arguments" if @_ & 1;
25 croak "Test::plan(): should not be called more than once" if $planned;
27 for (my $x=0; $x < @_; $x+=2) {
28 my ($k,$v) = @_[$x,$x+1];
29 if ($k =~ /^test(s)?$/) { $max = $v; }
30 elsif ($k eq 'todo' or
31 $k eq 'failok') { for (@$v) { $todo{$_}=1; }; }
32 elsif ($k eq 'onfail') {
33 ref $v eq 'CODE' or croak "Test::plan(onfail => $v): must be CODE";
36 else { carp "Test::plan(): skipping unrecognized directive '$k'" }
38 my @todo = sort { $a <=> $b } keys %todo;
40 print $TESTOUT "1..$max todo ".join(' ', @todo).";\n";
42 print $TESTOUT "1..$max\n";
49 (ref $v or '') eq 'CODE' ? $v->() : $v;
53 croak "ok: plan before you test!" if !$planned;
54 my ($pkg,$file,$line) = caller($TestLevel);
55 my $repetition = ++$history{"$file:$line"};
56 my $context = ("$file at line $line".
57 ($repetition > 1 ? " fail \#$repetition" : ''));
59 my $result = to_value(shift);
64 $expected = to_value(shift);
66 if (!defined $expected) {
67 $ok = !defined $result;
68 } elsif (!defined $result) {
70 } elsif ((ref($expected)||'') eq 'Regexp') {
71 $ok = $result =~ /$expected/;
72 } elsif (($regex) = ($expected =~ m,^ / (.+) / $,sx) or
73 ($ignore, $regex) = ($expected =~ m,^ m([^\w\s]) (.+) \1 $,sx)) {
74 $ok = $result =~ /$regex/;
76 $ok = $result eq $expected;
79 my $todo = $todo{$ntest};
81 $context .= ' TODO?!' if $todo;
82 print $TESTOUT "ok $ntest # ($context)\n";
84 # Issuing two separate print()s causes severe trouble with
85 # Test::Harness on VMS. The "not "'s for failed tests occur
86 # on a separate line and would not get counted as failures.
87 #print $TESTOUT "not " if !$ok;
88 #print $TESTOUT "ok $ntest\n";
89 # Replace with one of a pair of single print()'s as a workaround:
91 print $TESTOUT "not ok $ntest\n";
94 print $TESTOUT "ok $ntest\n";
98 my $detail = { 'repetition' => $repetition, 'package' => $pkg,
99 'result' => $result, 'todo' => $todo };
100 $$detail{expected} = $expected if defined $expected;
101 $diag = $$detail{diagnostic} = to_value(shift) if @_;
102 $context .= ' *TODO*' if $todo;
103 if (!defined $expected) {
105 print $TESTOUT "# Failed test $ntest in $context\n";
107 print $TESTOUT "# Failed test $ntest in $context: $diag\n";
110 my $prefix = "Test $ntest";
111 print $TESTOUT "# $prefix got: ".
112 (defined $result? "'$result'":'<UNDEF>')." ($context)\n";
113 $prefix = ' ' x (length($prefix) - 5);
114 if ((ref($expected)||'') eq 'Regexp') {
115 $expected = 'qr/'.$expected.'/'
117 $expected = "'$expected'";
120 print $TESTOUT "# $prefix Expected: $expected\n";
122 print $TESTOUT "# $prefix Expected: $expected ($diag)\n";
125 push @FAILDETAIL, $detail;
133 my $whyskip = to_value(shift);
135 $whyskip = 'skip' if $whyskip =~ m/^\d+$/;
136 print $TESTOUT "ok $ntest # $whyskip\n";
140 local($TestLevel) = $TestLevel+1; #ignore this stack frame
146 $ONFAIL->(\@FAILDETAIL) if @FAILDETAIL && $ONFAIL;
154 Test - provides a simple framework for writing test scripts
161 # use a BEGIN block so we print our plan before MyModule is loaded
162 BEGIN { plan tests => 14, todo => [3,4] }
164 # load your module...
170 ok(0); # ok, expected failure (see todo list, above)
171 ok(1); # surprise success!
173 ok(0,1); # failure: '0' ne '1'
174 ok('broke','fixed'); # failure: 'broke' ne 'fixed'
175 ok('fixed','fixed'); # success: 'fixed' eq 'fixed'
176 ok('fixed',qr/x/); # success: 'fixed' =~ qr/x/
178 ok(sub { 1+1 }, 2); # success: '2' eq '2'
179 ok(sub { 1+1 }, 3); # failure: '2' ne '3'
180 ok(0, int(rand(2)); # (just kidding :-)
183 ok @list, 3, "\@list=".join(',',@list); #extra diagnostics
184 ok 'segmentation fault', '/(?i)success/'; #regex match
186 skip($feature_is_missing, ...); #do platform specific test
190 L<Test::Harness|Test::Harness> expects to see particular output when it
191 executes tests. This module aims to make writing proper test scripts just
192 a little bit easier (and less error prone :-).
200 These tests are expected to succeed. If they don't something's
203 =item * SKIPPED TESTS
205 Skip is for tests that might or might not be possible to run depending
206 on the availability of platform specific features. The first argument
207 should evaluate to true (think "yes, please skip") if the required
208 feature is not available. After the first argument, skip works
209 exactly the same way as do normal tests.
213 TODO tests are designed for maintaining an B<executable TODO list>.
214 These tests are expected NOT to succeed. If a TODO test does succeed,
215 the feature in question should not be on the TODO list, now should it?
217 Packages should NOT be released with succeeding TODO tests. As soon
218 as a TODO test starts working, it should be promoted to a normal test
219 and the newly working feature should be documented in the release
226 Both C<ok> and C<skip> return true if their test succeeds and false
227 otherwise in a scalar context.
231 BEGIN { plan test => 4, onfail => sub { warn "CALL 911!" } }
233 While test failures should be enough, extra diagnostics can be
234 triggered at the end of a test run. C<onfail> is passed an array ref
235 of hash refs that describe each test failure. Each hash will contain
236 at least the following fields: C<package>, C<repetition>, and
237 C<result>. (The file, line, and test number are not included because
238 their correspondence to a particular test is tenuous.) If the test
239 had an expected value or a diagnostic string, these will also be
242 The B<optional> C<onfail> hook might be used simply to print out the
243 version of your package and/or how to report problems. It might also
244 be used to generate extremely sophisticated diagnostics for a
245 particularly bizarre test failure. However it's not a panacea. Core
246 dumps or other unrecoverable errors prevent the C<onfail> hook from
247 running. (It is run inside an C<END> block.) Besides, C<onfail> is
248 probably over-kill in most cases. (Your test code should be simpler
249 than the code it is testing, yes?)
253 L<Test::Harness> and, perhaps, test coverage analysis tools.
257 Copyright (c) 1998-1999 Joshua Nathaniel Pritikin. All rights reserved.
259 This package is free software and is provided "as is" without express
260 or implied warranty. It may be used, redistributed and/or modified
261 under the terms of the Perl Artistic License (see
262 http://www.perl.com/perl/misc/Artistic.html)