3 use Test::Harness 1.1601 ();
5 use vars (qw($VERSION @ISA @EXPORT $ntest $TestLevel), #public-ish
6 qw($ONFAIL %todo %history $planned @FAILDETAIL)); #private-ish
10 @EXPORT= qw(&plan &ok &skip $ntest);
12 $TestLevel = 0; # how many extra stack frames to skip
17 # Use of this variable is strongly discouraged. It is set mainly to
18 # help test coverage analyzers know which test is running.
19 $ENV{REGRESSION_TEST} = $0;
22 croak "Test::plan(%args): odd number of arguments" if @_ & 1;
23 croak "Test::plan(): should not be called more than once" if $planned;
25 for (my $x=0; $x < @_; $x+=2) {
26 my ($k,$v) = @_[$x,$x+1];
27 if ($k =~ /^test(s)?$/) { $max = $v; }
28 elsif ($k eq 'todo' or
29 $k eq 'failok') { for (@$v) { $todo{$_}=1; }; }
30 elsif ($k eq 'onfail') {
31 ref $v eq 'CODE' or croak "Test::plan(onfail => $v): must be CODE";
34 else { carp "Test::plan(): skipping unrecognized directive '$k'" }
36 my @todo = sort { $a <=> $b } keys %todo;
38 print "1..$max todo ".join(' ', @todo).";\n";
47 (ref $v or '') eq 'CODE' ? $v->() : $v;
50 # STDERR is NOT used for diagnostic output which should have been
51 # fixed before release. Is this appropriate?
54 croak "ok: plan before you test!" if !$planned;
55 my ($pkg,$file,$line) = caller($TestLevel);
56 my $repetition = ++$history{"$file:$line"};
57 my $context = ("$file at line $line".
58 ($repetition > 1 ? " fail \#$repetition" : ''));
60 my $result = to_value(shift);
65 $expected = to_value(shift);
66 # until regex can be manipulated like objects...
68 if (($regex) = ($expected =~ m,^ / (.+) / $,sx) or
69 ($ignore, $regex) = ($expected =~ m,^ m([^\w\s]) (.+) \1 $,sx)) {
70 $ok = $result =~ /$regex/;
72 $ok = $result eq $expected;
77 print "ok $ntest # Wow! ($context)\n";
79 $diag = to_value(shift) if @_;
81 print "not ok $ntest # (failure expected in $context)\n";
83 print "not ok $ntest # (failure expected: $diag)\n";
91 my $detail = { 'repetition' => $repetition, 'package' => $pkg,
92 'result' => $result };
93 $$detail{expected} = $expected if defined $expected;
94 $diag = $$detail{diagnostic} = to_value(shift) if @_;
95 if (!defined $expected) {
97 print STDERR "# Failed test $ntest in $context\n";
99 print STDERR "# Failed test $ntest in $context: $diag\n";
102 my $prefix = "Test $ntest";
103 print STDERR "# $prefix got: '$result' ($context)\n";
104 $prefix = ' ' x (length($prefix) - 5);
106 print STDERR "# $prefix Expected: '$expected'\n";
108 print STDERR "# $prefix Expected: '$expected' ($diag)\n";
111 push @FAILDETAIL, $detail;
119 if (to_value(shift)) {
120 print "ok $ntest # skip\n";
124 local($TestLevel) = $TestLevel+1; #ignore this stack frame
130 $ONFAIL->(\@FAILDETAIL) if @FAILDETAIL && $ONFAIL;
138 Test - provides a simple framework for writing test scripts
144 BEGIN { plan tests => 13, todo => [3,4] }
149 ok(0); # ok, expected failure (see todo list, above)
150 ok(1); # surprise success!
152 ok(0,1); # failure: '0' ne '1'
153 ok('broke','fixed'); # failure: 'broke' ne 'fixed'
154 ok('fixed','fixed'); # success: 'fixed' eq 'fixed'
156 ok(sub { 1+1 }, 2); # success: '2' eq '2'
157 ok(sub { 1+1 }, 3); # failure: '2' ne '3'
158 ok(0, int(rand(2)); # (just kidding! :-)
161 ok @list, 3, "\@list=".join(',',@list); #extra diagnostics
162 ok 'segmentation fault', '/(?i)success/'; #regex match
164 skip($feature_is_missing, ...); #do platform specific test
168 Test::Harness expects to see particular output when it executes tests.
169 This module aims to make writing proper test scripts just a little bit
170 easier (and less error prone :-).
178 These tests are expected to succeed. If they don't, something's
181 =item * SKIPPED TESTS
183 Skip tests need a platform specific feature that might or might not be
184 available. The first argument should evaluate to true if the required
185 feature is NOT available. After the first argument, skip tests work
186 exactly the same way as do normal tests.
190 TODO tests are designed for maintaining an executable TODO list.
191 These tests are expected NOT to succeed (otherwise the feature they
192 test would be on the new feature list, not the TODO list).
194 Packages should NOT be released with successful TODO tests. As soon
195 as a TODO test starts working, it should be promoted to a normal test
196 and the newly minted feature should be documented in the release
203 BEGIN { plan test => 4, onfail => sub { warn "CALL 911!" } }
205 The test failures can trigger extra diagnostics at the end of the test
206 run. C<onfail> is passed an array ref of hash refs that describe each
207 test failure. Each hash will contain at least the following fields:
208 package, repetition, and result. (The file, line, and test number are
209 not included because their correspondance to a particular test is
210 fairly weak.) If the test had an expected value or a diagnostic
211 string, these will also be included.
213 This optional feature might be used simply to print out the version of
214 your package and/or how to report problems. It might also be used to
215 generate extremely sophisticated diagnostics for a particular test
216 failure. It's not a panacea, however. Core dumps or other
217 unrecoverable errors will prevent the C<onfail> hook from running.
218 (It is run inside an END block.) Besides, C<onfail> is probably
219 over-kill in the majority of cases. (Your test code should be simpler
220 than the code it is testing, yes?)
224 L<Test::Harness> and various test coverage analysis tools.
228 Copyright (C) 1998 Joshua Nathaniel Pritikin. All rights reserved.
230 This package is free software and is provided "as is" without express
231 or implied warranty. It may be used, redistributed and/or modified
232 under the terms of the Perl Artistic License (see
233 http://www.perl.com/perl/misc/Artistic.html)