--- /dev/null
+*.pm eol=lf
+*.t eol=lf
+*.pod eol=lf
+*.pod.proto eol=lf
+*.pl eol=lf
+*.PL eol=lf
+*.bash eol=lf
+*.json eol=lf
+*.yml eol=lf
+*.sql eol=lf
+/* eol=lf
+/script/* eol=lf
+/maint/* eol=lf
+
+* text=auto
# so if someone were to legally change their name, we could use it to fix that
# while maintaining the integrity of the repository
+# https://www.kernel.org/pub/software/scm/git/docs/git-shortlog.html#_mapping_authors
+
Alexander Hartmaier <abraxxa@cpan.org> <alexander.hartmaier@t-systems.at>
Amiri Barksdale <amiribarksdale@gmail.com> <amiri@metalabel.com>
Andrew Rodland <andrew@cleverdomain.org> <arodland@cpan.org>
Brendan Byrd <Perl@ResonatorSoft.org> <GitHub@ResonatorSoft.org>
Brendan Byrd <Perl@ResonatorSoft.org> <perl@resonatorsoft.org>
Brian Phillips <bphillips@cpan.org> <bphillips@digitalriver.com>
+Dagfinn Ilmari Mannsåker <ilmari@ilmari.org><ilmari.mannsaker@net-a-porter.com>
David Kamholz <dkamholz@cpan.org> <davekam@pobox.com>
-David Schmidt <davewood@gmx.at> <d.schmidt@tripwolf.com>
-David Schmidt <davewood@gmx.at> <dt@univie.ac.at>
+David Schmidt <mail@davidschmidt.at> <d.schmidt@tripwolf.com>
+David Schmidt <mail@davidschmidt.at> <dt@univie.ac.at>
+David Schmidt <mail@davidschmidt.at> <davewood@gmx.at>
Devin Austin <dhoss@cpan.org> <devin.austin@gmail.com>
Felix Antonius Wilhelm Ostmann <sadrak@cpan.org> <ostmann@sadraksaemp.intern4.websuche.de>
Gerda Shank <gshank@cpan.org> <gerda.shank@gmail.com>
on_success: change
on_failure: always
+# FIXME - This stuff is not yet available for free OSS accounts, sadpanda
+# First paragrah on http://about.travis-ci.org/docs/user/caching/
+#cache:
+# apt: true
+# directories:
+# - /var/cache/apt/archives
+
language: perl
perl:
- CLEANTEST=true
matrix:
+ fast_finish: true
include:
# this particular perl is quite widespread
- perl: 5.8.8_thr_mb
###
# some permutations of tracing and envvar poisoning
- - perl: 5.18.1_thr_mb
+ - perl: 5.16.2_thr_mb
env:
- CLEANTEST=false
- POISON_ENV=true
- DBIC_TRACE=1
- DBIC_MULTICREATE_DEBUG=0
- BREWOPTS="-Duseithreads -Dusemorebits"
- - BREWVER=5.18.1
+ - BREWVER=5.16.2
- perl: 5.18
env:
- POISON_ENV=true
- DBIC_TRACE_PROFILE=console
- - perl: 5.18
+ - perl: 5.8
env:
- CLEANTEST=true
- POISON_ENV=true
###
# Start of the allow_failures block
- # recentish stable with blead CPAN
- - perl: devcpan_5.18.1_thr_mb
+ # old threaded with blead CPAN
+ - perl: devcpan_5.8.7_thr
env:
- - CLEANTEST=false
+ - CLEANTEST=true
+ - BREWOPTS="-Duseithreads"
+ - BREWVER=5.8.7
+ - DEVREL_DEPS=true
+
+ # 5.10.0 threaded with blead CPAN
+ - perl: devcpan_5.10.0_thr_mb
+ env:
+ - CLEANTEST=true
- BREWOPTS="-Duseithreads -Dusemorebits"
- - BREWVER=5.18.1
+ - BREWVER=5.10.0
- DEVREL_DEPS=true
- # bleadperl with stock CPAN
- - perl: bleadperl_thr_mb
+ # 5.12.2 with blead CPAN
+ - perl: devcpan_5.12.2_thr
+ env:
+ - CLEANTEST=true
+ - BREWOPTS="-Duseithreads"
+ - BREWVER=5.12.2
+ - DEVREL_DEPS=true
+
+ # recentish threaded stable with blead CPAN
+ - perl: devcpan_5.18.2_thr_mb
env:
- CLEANTEST=false
- BREWOPTS="-Duseithreads -Dusemorebits"
+ - BREWVER=5.18.2
+ - DEVREL_DEPS=true
+
+ # bleadperl with stock CPAN, full depchain test
+ - perl: bleadperl
+ env:
+ - CLEANTEST=true
- BREWVER=blead
# bleadperl with blead CPAN
# which ones of the above can fail
allow_failures:
- # Fails tests because of https://github.com/Perl-Toolchain-Gang/CPAN-Meta/issues/25
- # Can't be simply masked due to https://rt.cpan.org/Ticket/Display.html?id=88903
- - perl: 5.10.0_thr_dbg
-
# these run with various dev snapshots - allowed to fail
- - perl: devcpan_5.18.1_thr_mb
- - perl: bleadperl_thr_mb
+ - perl: devcpan_5.8.7_thr
+ - perl: devcpan_5.10.0_thr_mb
+ - perl: devcpan_5.12.2_thr
+ - perl: devcpan_5.18.2_thr_mb
+ - perl: bleadperl
- perl: devcpan_bleadperl_thr_mb
Revision history for DBIx::Class
-0.08901-TRIAL (EXPERIMENTAL BETA RELEASE)
+<unreleased DQ stuff, last was 0.08901-TRIAL>
* Start of experimental Data::Query-based release cycle
- Any and all newly introduced syntax features may very well change
or disappear altogether before the 0.09000 release
- * New Features / Changes
+<unreleased mainline>
+ * Fixes
+ - Fix on_connect_* not always firing in some cases - a race condition
+ existed between storage accessor setters and the determine_driver
+ routines, triggering a connection before the set-cycle is finished
+
+0.08270 2014-01-30 21:54 (PST)
+ * Fixes
+ - Fix 0.08260 regression in DBD::SQLite bound int handling. Inserted
+ data was not affected, but any function <=> integer comparison would
+ have failed (originally fixed way back in 0e773352)
+ - Fix failure to load DateTime formatter when connecting to Firebird
+ over ODBC
+
+ * Misc
+ - All drivers based on ::Storage::DBI::Firebird::Common now return the
+ same sqlt_type value (affects ::DBI::Interbase, ::DBI::Firebird and
+ ::DBI::ODBC::Firebird)
+
+0.08260 2014-01-28 18:52 (UTC)
+ * New Features
- A new zero-to-DBIC style manual: DBIx::Class::Manual::QuickStart
+ * Notable Changes and Deprecations
+ - Explicitly deprecate combination of distinct and selecting a
+ non-column via $rs->get_column()
+
* Fixes
- More robust handling of circular relationship declarations by loading
foreign classes less frequently (should resolve issues like
http://lists.scsys.co.uk/pipermail/dbix-class/2013-June/011374.html)
- - Fix multiple edge cases steming from interaction of a non-selecting
+ Note that none of this is a manifestations of a DBIC bug, but rather
+ unexpected (but correct) behavior of load-order-dependent (hence
+ logically broken) Resultclass hierarchies. In order to deal with this
+ DBIC is scaling back a large number of sanity checks, which are to be
+ reintroduce pending a better framework for source registration
+ - Fix multiple edge cases of complex prefetch combining incorrectly
+ with correlated subquery selections
+ - Fix multiple edge cases stemming from interaction of a non-selecting
order_by specification and distinct and/or complex prefetch
+ - Fix unbound growth of a resultset during repeated execute/exhaust
+ cycles (GH#29)
+ - Work around (and be very vocal about the fact) when DBIC encounters
+ an exception object with broken string overloading
- Clarify ambiguous behavior of distinct when used with ResultSetColumn
i.e. $rs->search({}, { distinct => 1 })->get_column (...)
- Setting quote_names propagates to SQL::Translator when producing
- Back out self-cleaning from DBIx::Class::Carp for the time being
(as a side effect fixes RT#86267)
- Fix incorrect internal use of implicit list context in copy()
+ - Fix 0.08250 regression in driver determination when DBI_DSN is used
- Tests no longer fail if $ENV{DBI_DSN} is set
- Throw clearer exception on ->new_related() with a non-existent
- relationship.
+ relationship
+ - Fix incorrect parethesis unroll with multicolumn in, (never noticed
+ before fixing false positive in SQLA::Test 1.77)
- Fix t/storage/replicated.t class loading problem
- Stop using the deprecated Class::MOP::load_class()
+ - Fix warning in t/54taint.t with explicitly unset PERL5LIB (RT#91972)
+ - Fix t/54taint.t failing under a local::lib with installed earlier
+ DBIC version (RT#92486)
* Misc
+ - Massive incompatible change of ::BlockRunner internals (was never
+ documented as usable externally, this last set of changes settles
+ the design for proper documentation and opening up)
+ - Adjust exceptions in tests to accommodate changes in the upcoming
+ DBD::SQLite based on libsqlite 3.8.2
+ - More robust lock file naming scheme - allow tests to work on exotic
+ MSWin32 filesystems (habitual offender being http://is.gd/iy5XVP)
+ - Better diagnostics when File::Spec->tmpdir gives us crap in testing
- Replace $row with $result in all docs to be consistent and to
clarify various return values
lib/DBIx/Class/Manual/ResultClass.pod.proto
maint/.Generated_Pod
+
+maint/travis-ci_scripts
+.travis.yml
use 5.008001;
use inc::Module::Install 1.06;
+BEGIN { makemaker_args( NORECURS => 1 ) } # needs to happen early for old EUMM
##
## TEMPORARY (and non-portable)
# for that)
BEGIN {
$Module::Install::AUTHOR = 0 if (grep { $ENV{"PERL5_${_}_IS_RUNNING"} } (qw/CPANM CPANPLUS CPAN/) );
- makemaker_args( NORECURS => 1 );
}
homepage 'http://www.dbix-class.org/';
all_from 'lib/DBIx/Class.pm';
Meta->{values}{x_authority} = 'cpan:RIBASUSHI';
+# nothing determined at runtime, except for possibly SQLT dep, see
+# comment further down
+dynamic_config 0;
+
tests_recursive (qw|
t
|);
'namespace::clean' => '0.24',
'Path::Class' => '0.18',
'Scope::Guard' => '0.03',
- 'SQL::Abstract' => '1.73',
+ 'SQL::Abstract' => '1.77',
'Try::Tiny' => '0.07',
# Technically this is not a core dependency - it is only required
};
my $build_requires = {
+};
+
+my $test_requires = {
+ 'File::Temp' => '0.22',
+ 'Test::Deep' => '0.101',
+ 'Test::Exception' => '0.31',
+ 'Test::Warn' => '0.21',
+ 'Test::More' => '0.94',
+
# needed for testing only, not for operation
# we will move away from this dep eventually, perhaps to DBD::CSV or something
###
### often *not* easy or even possible)
###
'DBD::SQLite' => '1.29',
-};
-
-my $test_requires = {
- 'File::Temp' => '0.22',
- 'Test::Deep' => '0.101',
- 'Test::Exception' => '0.31',
- 'Test::Warn' => '0.21',
- 'Test::More' => '0.94',
- # not sure if this is necessary at all, ask schwern some day
- 'Test::Builder' => '0.94',
# this is already a dep of n::c, but just in case - used by t/55namespaces_cleaned.t
# remove and do a manual glob-collection if n::c is no longer a dep
};
# if the user has this env var set and no SQLT installed, tests will fail
-# same rationale for direct test_requires as the strictures stuff above
-# (even though no dist will be created from this)
+# Note - this is added as test_requires *directly*, so it gets properly
+# excluded on META.yml cleansing (even though no dist can be created from this)
# we force this req regarless of author_deps, worst case scenario it will
# be specified twice
+#
+# also note that we *do* set dynamic_config => 0, as this is the only thing
+# that we determine dynamically, and in all fairness if someone sets the
+# envvar *and* is not running a full Makefile/make/maketest cycle - they get
+# to keep the pieces
if ($ENV{DBICTEST_SQLT_DEPLOY}) {
local @INC = ('lib', @INC);
require DBIx::Class::Optional::Dependencies;
# only do author-includes if not part of a `make` run
if ($Module::Install::AUTHOR and ! $ENV{MAKELEVEL}) {
+ invoke_author_mode()
+}
+else {
+ # make sure this Makefile can not be used to make a dist
+ # (without the author includes there are no meta cleanup, no sanity checks, etc)
+ postamble <<EOP;
+create_distdir: nonauthor_stop_distdir_creation
+nonauthor_stop_distdir_creation:
+\t\$(NOECHO) \$(ECHO) Creation of dists in non-author mode is not allowed
+\t\$(NOECHO) \$(FALSE)
+EOP
+}
+
+# compose final req list, for alphabetical ordering
+my %final_req;
+for my $rtype (keys %$reqs) {
+ for my $mod (keys %{$reqs->{$rtype}} ) {
+
+ # sanity check req duplications
+ die "$mod specified as both a '$rtype' and a '$final_req{$mod}[0]'\n"
+ if $final_req{$mod};
+
+ $final_req{$mod} = [ $rtype, $reqs->{$rtype}{$mod}||0 ],
+ }
+}
+
+# actual require
+for my $mod (sort keys %final_req) {
+ my ($rtype, $ver) = @{$final_req{$mod}};
+ no strict 'refs';
+ $rtype->($mod, $ver);
+}
+
+# author-mode or not - this is where we show a list of missing deps
+# IFF we are running interactively
+auto_install();
+
+WriteAll();
+
+exit 0;
+
+# needs to be here to keep 5.8 string eval happy
+# (the include of Makefile.PL.inc loop)
+my $mm_proto;
+
+sub invoke_author_mode {
# get options here, make $args available to all snippets
require Getopt::Long;
my $getopt = Getopt::Long::Parser->new(
# We need the MM facilities to generate the pieces for the final MM run.
# Just instantiate a throaway object here
- my $mm_proto = ExtUtils::MakeMaker->new({
+ #
+ # Also EUMM and MI disagree on what is the format of Meta->name, just
+ # punt here until a new M::I is shipped (if at all)
+ my $name = Meta->name || die 'The Module::Install metadata must be available at this point but is not - did you rearrange the Makefile.PL...?';
+ $name =~ s/\-/::/g;
+ $mm_proto = ExtUtils::MakeMaker->new({
NORECURS => 1,
- NAME => Meta->name || die 'The Module::Install metadata must be available at this point but is not - did you rearrange the Makefile.PL...?',
+ NAME => $name,
});
# Crutch for DISTBUILDING_IN_HELL
;
}
}
-else {
- # make sure this Makefile can not be used to make a dist
- # (without the author includes there are no meta cleanup, no sanity checks, etc)
- postamble <<EOP;
-create_distdir: nonauthor_stop_distdir_creation
-nonauthor_stop_distdir_creation:
-\t\$(NOECHO) \$(ECHO) Creation of dists in non-author mode is not allowed
-\t\$(NOECHO) \$(FALSE)
-EOP
-}
-
-# compose final req list, for alphabetical ordering
-my %final_req;
-for my $rtype (keys %$reqs) {
- for my $mod (keys %{$reqs->{$rtype}} ) {
-
- # sanity check req duplications
- if ($final_req{$mod}) {
- die "$mod specified as both a '$rtype' and a '$final_req{$mod}[0]'\n";
- }
-
- $final_req{$mod} = [ $rtype, $reqs->{$rtype}{$mod}||0 ],
- }
-}
-
-# actual require
-for my $mod (sort keys %final_req) {
- my ($rtype, $ver) = @{$final_req{$mod}};
- no strict 'refs';
- $rtype->($mod, $ver);
-}
-
-# author-mode or not - this is where we show a list of missing deps
-# IFF we are running interactively
-auto_install();
-
-WriteAll();
$VERSION = eval $VERSION if $VERSION =~ /_/; # numify for warning-free dev releases
-BEGIN {
- package # hide from pause
- DBIx::Class::_ENV_;
-
- use Config;
-
- use constant {
-
- # but of course
- BROKEN_FORK => ($^O eq 'MSWin32') ? 1 : 0,
-
- HAS_ITHREADS => $Config{useithreads} ? 1 : 0,
-
- # ::Runmode would only be loaded by DBICTest, which in turn implies t/
- DBICTEST => eval { DBICTest::RunMode->is_author } ? 1 : 0,
-
- # During 5.13 dev cycle HELEMs started to leak on copy
- PEEPEENESS =>
- # request for all tests would force "non-leaky" illusion and vice-versa
- defined $ENV{DBICTEST_ALL_LEAKS} ? !$ENV{DBICTEST_ALL_LEAKS}
- # otherwise confess that this perl is busted ONLY on smokers
- : eval { DBICTest::RunMode->is_smoker } && ($] >= 5.013005 and $] <= 5.013006) ? 1
- # otherwise we are good
- : 0
- ,
-
- ASSERT_NO_INTERNAL_WANTARRAY => $ENV{DBIC_ASSERT_NO_INTERNAL_WANTARRAY} ? 1 : 0,
-
- IV_SIZE => $Config{ivsize},
- };
-
- if ($] < 5.009_005) {
- require MRO::Compat;
- constant->import( OLD_MRO => 1 );
- }
- else {
- require mro;
- constant->import( OLD_MRO => 0 );
- }
-}
-
+use DBIx::Class::_Util;
use mro 'c3';
use DBIx::Class::Optional::Dependencies;
zamolxes: Bogdan Lucaciu <bogdan@wiz.ro>
+Zefram: Andrew Main <zefram@fysh.org>
+
=head1 COPYRIGHT
Copyright (c) 2005 - 2011 the DBIx::Class L</AUTHOR> and L</CONTRIBUTORS>
the DB schemas are named based on the environment (e.g. database1_dev).
However, one can dynamically "map" to the proper DB schema by overriding the
-L<connection|DBIx::Class::Schama/connection> method in your Schema class and
+L<connection|DBIx::Class::Schema/connection> method in your Schema class and
building a renaming facility, like so:
package MyApp::Schema;
1;
-By overriding the L<connection|DBIx::Class::Schama/connection>
+By overriding the L<connection|DBIx::Class::Schema/connection>
method and extracting a custom option from the provided \%attr hashref one can
then simply iterate over all the Schema's ResultSources, renaming them as
needed.
To use this facility, simply add or modify the \%attr hashref that is passed to
-L<connection|DBIx::Class::Schama/connect>, as follows:
+L<connection|DBIx::Class::Schema/connect>, as follows:
my $schema
= MyApp::Schema->connect(
To send the output somewhere else set debugfh:-
- $schema->storage->debugfh(IO::File->new('/tmp/trace.out', 'w');
+ $schema->storage->debugfh(IO::File->new('/tmp/trace.out', 'w'));
Alternatively you can do this with the environment variable, too:-
# POD is generated automatically by calling _gen_pod from the
# Makefile.PL in $AUTHOR mode
-my $json_any = {
+# NOTE: the rationale for 2 JSON::Any versions is that
+# we need the newer only to work around JSON::XS, which
+# itself is an optional dep
+my $min_json_any = {
'JSON::Any' => '1.23',
};
+my $test_and_dist_json_any = {
+ 'JSON::Any' => '1.31',
+};
my $moose_basic = {
'Moose' => '0.98',
my $admin_basic = {
%$moose_basic,
- %$json_any,
+ %$min_json_any,
'MooseX::Types::Path::Class' => '0.05',
'MooseX::Types::JSON' => '0.02',
'namespace::autoclean' => '0.09',
},
test_prettydebug => {
- req => $json_any,
+ req => $min_json_any,
},
test_admin_script => {
req => {
%$admin_script,
- 'JSON::Any' => '1.30',
+ %$test_and_dist_json_any,
'JSON' => 0,
'JSON::PP' => 0,
'Cpanel::JSON::XS' => 0,
}
},
- test_leaks => {
+ test_leaks_heavy => {
req => {
- 'Test::Memory::Cycle' => '0',
- 'Devel::Cycle' => '1.10',
+ 'Class::MethodCache' => '0.02',
+ 'PadWalker' => '1.06',
},
},
dist_dir => {
req => {
+ %$test_and_dist_json_any,
'ExtUtils::MakeMaker' => '6.64',
'Pod::Inherit' => '0.90',
'Pod::Tree' => '0',
# To retrieve the plain id if you used the ugly version:
$book->get_column('author_id');
-
-If the relationship is optional -- i.e. the column containing the
-foreign key can be NULL -- then the belongs_to relationship does the
-right thing. Thus, in the example above C<< $obj->author >> would
-return C<undef>. However in this case you would probably want to set
-the L<join_type|DBIx::Class::Relationship::Base/join_type> attribute so that
-a C<LEFT JOIN> is done, which makes complex resultsets involving
-C<join> or C<prefetch> operations work correctly. The modified
-declaration is shown below:
+If some of the foreign key columns are
+L<nullable|DBIx::Class::ResultSource/is_nullable> you probably want to set
+the L<join_type|DBIx::Class::Relationship::Base/join_type> attribute to
+C<left> explicitly so that SQL expressing this relation is composed with
+a C<LEFT JOIN> (as opposed to C<INNER JOIN> which is default for
+L</belongs_to> relationships). This ensures that relationship traversal
+works consistently in all situations. (i.e. resultsets involving
+L<join|DBIx::Class::ResultSet/join> or
+L<prefetch|DBIx::Class::ResultSet/prefetch>).
+The modified declaration is shown below:
# in a Book class (where Author has_many Books)
__PACKAGE__->belongs_to(
{ join_type => 'left' }
);
-
Cascading deletes are off by default on a C<belongs_to>
relationship. To turn them on, pass C<< cascade_delete => 1 >>
in the $attr hashref.
*$rs_meth_name = subname $rs_meth_name, sub {
my $self = shift;
my $attrs = @_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {};
- my @args = ($f_rel, @_ > 0 ? @_ : undef, { %{$rel_attrs||{}}, %$attrs });
my $rs = $self->search_related($rel)->search_related(
$f_rel, @_ > 0 ? @_ : undef, { %{$rel_attrs||{}}, %$attrs }
);
if $source->isa('DBIx::Class::ResultSourceHandle');
$attrs = { %{$attrs||{}} };
- delete @{$attrs}{qw(_sqlmaker_select_args _related_results_construction)};
+ delete @{$attrs}{qw(_last_sqlmaker_alias_map _related_results_construction)};
if ($attrs->{page}) {
$attrs->{rows} ||= 10;
return undef unless @{$rows||[]};
# sanity check - people are too clever for their own good
- if ($attrs->{collapse} and my $aliastypes = $attrs->{_sqlmaker_select_args}[3]{_aliastypes} ) {
+ if ($attrs->{collapse} and my $aliastypes = $attrs->{_last_sqlmaker_alias_map} ) {
my $multiplied_selectors;
for my $sel_alias ( grep { $_ ne $attrs->{alias} } keys %{ $aliastypes->{selecting} } ) {
$attrs->{from}, $attrs->{select}, $attrs->{where}, $attrs
);
- $self->{_attrs}{_sqlmaker_select_args} = $attrs->{_sqlmaker_select_args};
-
$aq;
}
}
}
- # collapse the selector to a literal so that it survives a possible distinct parse
- # if it turns out to be an aggregate - at least the user will get a proper exception
- # instead of silent drop of the group_by altogether
- my $new = bless {
- _select => \ ($rsrc->storage->sql_maker->_render_sqla(select_select => $select) =~ /^\s*SELECT\s*(.+)/i)[0],
+ return bless {
+ _select => $select,
_as => $column,
- _parent_resultset => $new_parent_rs }, $class;
- return $new;
+ _parent_resultset => $new_parent_rs
+ }, $class;
}
=head2 as_query
sub _resultset {
my $self = shift;
- return $self->{_resultset} ||= $self->{_parent_resultset}->search(undef,
- {
- select => [$self->{_select}],
- as => [$self->{_as}]
+ return $self->{_resultset} ||= do {
+
+ my $select = $self->{_select};
+
+ if ($self->{_parent_resultset}{attrs}{distinct}) {
+ my $alias = $self->{_parent_resultset}->current_source_alias;
+ my $rsrc = $self->{_parent_resultset}->result_source;
+ my %cols = map { $_ => 1, "$alias.$_" => 1 } $rsrc->columns;
+
+ unless( $cols{$select} ) {
+ carp_unique(
+ 'Use of distinct => 1 while selecting anything other than a column '
+ . 'declared on the primary ResultSource is deprecated - please supply '
+ . 'an explicit group_by instead'
+ );
+
+ # collapse the selector to a literal so that it survives the distinct parse
+ # if it turns out to be an aggregate - at least the user will get a proper exception
+ # instead of silent drop of the group_by altogether
+ $select = \ ($rsrc->storage->sql_maker->_render_sqla(select_select => $select) =~ /^\s*SELECT\s*(.+)/i)[0],
+ }
}
- );
+
+ $self->{_parent_resultset}->search(undef, {
+ columns => { $self->{_as} => $select }
+ });
+ };
}
1;
use Try::Tiny;
use List::Util qw(first max);
-use B 'perlstring';
use Scalar::Util qw(blessed);
use DBIx::Class::ResultSource::RowParser::Util qw(
$class->throw_exception("No accessor type declared for prefetched relationship '$relname'")
unless $relinfo->{attrs}{accessor};
+ my $rel_rs = $new->related_resultset($relname);
+
my @rel_objects;
if (
- $prefetch->{$relname}
- and
- @{$prefetch->{$relname}}
+ @{ $prefetch->{$relname} || [] }
and
ref($prefetch->{$relname}) ne $DBIx::Class::ResultSource::RowParser::Util::null_branch_class
) {
- my $rel_rs = $new->related_resultset($relname);
-
if (ref $prefetch->{$relname}[0] eq 'ARRAY') {
my $rel_rsrc = $rel_rs->result_source;
my $rel_class = $rel_rs->result_class;
$new->{_inflated_column}{$relname} = $rel_objects[0];
}
- $new->related_resultset($relname)->set_cache(\@rel_objects);
+ $rel_rs->set_cache(\@rel_objects);
}
}
Depending on the resultset attributes one of:
SELECT * FROM (
- SELECT *, ROWNUM rownum__index FROM (
+ SELECT *, ROWNUM AS rownum__index FROM (
SELECT ...
) WHERE ROWNUM <= ($limit+$offset)
) WHERE rownum__index >= ($offset+1)
or
SELECT * FROM (
- SELECT *, ROWNUM rownum__index FROM (
+ SELECT *, ROWNUM AS rownum__index FROM (
SELECT ...
)
) WHERE rownum__index BETWEEN ($offset+1) AND ($limit+$offset)
return <<EOS;
SELECT $sq_attrs->{selection_outer} FROM (
- SELECT $sq_attrs->{selection_outer}, ROWNUM $idx_name FROM (
+ SELECT $sq_attrs->{selection_outer}, ROWNUM AS $idx_name FROM (
SELECT $sq_attrs->{selection_inner} $sq_attrs->{query_leftover}${order_group_having}
) $qalias WHERE ROWNUM <= ?
) $qalias WHERE $idx_name >= ?
return <<EOS;
SELECT $sq_attrs->{selection_outer} FROM (
- SELECT $sq_attrs->{selection_outer}, ROWNUM $idx_name FROM (
+ SELECT $sq_attrs->{selection_outer}, ROWNUM AS $idx_name FROM (
SELECT $sq_attrs->{selection_inner} $sq_attrs->{query_leftover}${order_group_having}
) $qalias
) $qalias WHERE $idx_name BETWEEN ? AND ?
use DBIx::Class::Carp;
use Try::Tiny;
use Scalar::Util qw/weaken blessed/;
+use DBIx::Class::_Util 'refcount';
use Sub::Name 'subname';
-use B 'svref_2object';
use Devel::GlobalDestruction;
use namespace::clean;
# be stripped.
sub _expand_relative_name {
my ($class, $name) = @_;
- return if !$name;
- $name = $class . '::' . $name if ! ($name =~ s/^\+//);
+ $name =~ s/^\+// or $name = "${class}::${name}";
return $name;
}
# namespace of $class. Untaints all findings as they can be assumed
# to be safe
sub _findallmod {
- my $proto = shift;
- my $ns = shift || ref $proto || $proto;
-
require Module::Find;
-
- # untaint result
- return map { $_ =~ /(.+)/ } Module::Find::findallmod($ns);
+ return map
+ { $_ =~ /(.+)/ } # untaint result
+ Module::Find::findallmod( $_[1] || ref $_[0] || $_[0] )
+ ;
}
# returns a hash of $shortname => $fullname for every package
# found in the given namespaces ($shortname is with the $fullname's
# namespace stripped off)
sub _map_namespaces {
- my ($class, @namespaces) = @_;
-
- my @results_hash;
- foreach my $namespace (@namespaces) {
- push(
- @results_hash,
- map { (substr($_, length "${namespace}::"), $_) }
- $class->_findallmod($namespace)
- );
+ my ($me, $namespaces) = @_;
+
+ my %res;
+ for my $ns (@$namespaces) {
+ $res{ substr($_, length "${ns}::") } = $_
+ for $me->_findallmod($ns);
}
- @results_hash;
+ \%res;
}
# returns the result_source_instance for the passed class/object,
my $result_namespace = delete $args{result_namespace} || 'Result';
my $resultset_namespace = delete $args{resultset_namespace} || 'ResultSet';
+
my $default_resultset_class = delete $args{default_resultset_class};
+ $default_resultset_class = $class->_expand_relative_name($default_resultset_class)
+ if $default_resultset_class;
+
$class->throw_exception('load_namespaces: unknown option(s): '
. join(q{,}, map { qq{'$_'} } keys %args))
if scalar keys %args;
- $default_resultset_class
- = $class->_expand_relative_name($default_resultset_class);
-
for my $arg ($result_namespace, $resultset_namespace) {
- $arg = [ $arg ] if !ref($arg) && $arg;
+ $arg = [ $arg ] if ( $arg and ! ref $arg );
$class->throw_exception('load_namespaces: namespace arguments must be '
. 'a simple string or an arrayref')
$_ = $class->_expand_relative_name($_) for (@$arg);
}
- my %results = $class->_map_namespaces(@$result_namespace);
- my %resultsets = $class->_map_namespaces(@$resultset_namespace);
+ my $results_by_source_name = $class->_map_namespaces($result_namespace);
+ my $resultsets_by_source_name = $class->_map_namespaces($resultset_namespace);
my @to_register;
{
use warnings qw/redefine/;
# ensure classes are loaded and attached in inheritance order
- for my $res (values %results) {
- $class->ensure_class_loaded($res);
+ for my $result_class (values %$results_by_source_name) {
+ $class->ensure_class_loaded($result_class);
}
my %inh_idx;
- my @subclass_last = sort {
+ my @source_names_by_subclass_last = sort {
($inh_idx{$a} ||=
- scalar @{mro::get_linear_isa( $results{$a} )}
+ scalar @{mro::get_linear_isa( $results_by_source_name->{$a} )}
)
<=>
($inh_idx{$b} ||=
- scalar @{mro::get_linear_isa( $results{$b} )}
+ scalar @{mro::get_linear_isa( $results_by_source_name->{$b} )}
)
- } keys(%results);
+ } keys(%$results_by_source_name);
- foreach my $result (@subclass_last) {
- my $result_class = $results{$result};
+ foreach my $source_name (@source_names_by_subclass_last) {
+ my $result_class = $results_by_source_name->{$source_name};
- my $rs_class = delete $resultsets{$result};
- my $rs_set = $class->_ns_get_rsrc_instance ($result_class)->resultset_class;
+ my $preset_resultset_class = $class->_ns_get_rsrc_instance ($result_class)->resultset_class;
+ my $found_resultset_class = delete $resultsets_by_source_name->{$source_name};
- if($rs_set && $rs_set ne 'DBIx::Class::ResultSet') {
- if($rs_class && $rs_class ne $rs_set) {
- carp "We found ResultSet class '$rs_class' for '$result', but it seems "
- . "that you had already set '$result' to use '$rs_set' instead";
+ if($preset_resultset_class && $preset_resultset_class ne 'DBIx::Class::ResultSet') {
+ if($found_resultset_class && $found_resultset_class ne $preset_resultset_class) {
+ carp "We found ResultSet class '$found_resultset_class' matching '$results_by_source_name->{$source_name}', but it seems "
+ . "that you had already set the '$results_by_source_name->{$source_name}' resultet to '$preset_resultset_class' instead";
}
}
- elsif($rs_class ||= $default_resultset_class) {
- $class->ensure_class_loaded($rs_class);
- if(!$rs_class->isa("DBIx::Class::ResultSet")) {
- carp "load_namespaces found ResultSet class $rs_class that does not subclass DBIx::Class::ResultSet";
+ # elsif - there may be *no* default_resultset_class, in which case we fallback to
+ # DBIx::Class::Resultset and there is nothing to check
+ elsif($found_resultset_class ||= $default_resultset_class) {
+ $class->ensure_class_loaded($found_resultset_class);
+ if(!$found_resultset_class->isa("DBIx::Class::ResultSet")) {
+ carp "load_namespaces found ResultSet class '$found_resultset_class' that does not subclass DBIx::Class::ResultSet";
}
- $class->_ns_get_rsrc_instance ($result_class)->resultset_class($rs_class);
+ $class->_ns_get_rsrc_instance ($result_class)->resultset_class($found_resultset_class);
}
- my $source_name = $class->_ns_get_rsrc_instance ($result_class)->source_name || $result;
+ my $source_name = $class->_ns_get_rsrc_instance ($result_class)->source_name || $source_name;
push(@to_register, [ $source_name, $result_class ]);
}
}
- foreach (sort keys %resultsets) {
- carp "load_namespaces found ResultSet class $_ with no "
- . 'corresponding Result class';
+ foreach (sort keys %$resultsets_by_source_name) {
+ carp "load_namespaces found ResultSet class '$resultsets_by_source_name->{$_}' "
+ .'with no corresponding Result class';
}
Class::C3->reinitialize if DBIx::Class::_ENV_::OLD_MRO;
}
} else {
my @comp = map { substr $_, length "${class}::" }
- $class->_findallmod;
+ $class->_findallmod($class);
$comps_for{$class} = \@comp;
}
=cut
-sub sources { return keys %{shift->source_registrations}; }
+sub sources { keys %{shift->source_registrations} }
=head2 source
my ($self, @info) = @_;
return $self if !@info && $self->storage;
- my ($storage_class, $args) = ref $self->storage_type ?
- ($self->_normalize_storage_type($self->storage_type),{}) : ($self->storage_type, {});
+ my ($storage_class, $args) = ref $self->storage_type
+ ? $self->_normalize_storage_type($self->storage_type)
+ : $self->storage_type
+ ;
+
+ $storage_class =~ s/^::/DBIx::Class::Storage::/;
- $storage_class = 'DBIx::Class::Storage'.$storage_class
- if $storage_class =~ m/^::/;
try {
$self->ensure_class_loaded ($storage_class);
}
"Unable to load storage class ${storage_class}: $_"
);
};
- my $storage = $storage_class->new($self=>$args);
+
+ my $storage = $storage_class->new( $self => $args||{} );
$storage->connect_info(\@info);
$self->storage($storage);
return $self;
# which will serve as a signal to not try doing anything else
# however beware - on older perls the exception seems randomly untrappable
# due to some weird race condition during thread joining :(((
- if (ref $srcs->{$source_name} and svref_2object($srcs->{$source_name})->REFCNT > 1) {
+ if (length ref $srcs->{$source_name} and refcount($srcs->{$source_name}) > 1) {
local $@;
eval {
$srcs->{$source_name}->schema($self);
sub txn_do {
my $self = shift;
- my $coderef = shift;
DBIx::Class::Storage::BlockRunner->new(
storage => $self,
- run_code => $coderef,
- run_args => @_
- ? \@_ # take a ref instead of a copy, to preserve @_ aliasing
- : [] # semantics within the coderef, but only if needed
- , # (pseudoforking doesn't like this trick much)
wrap_txn => 1,
- retry_handler => sub { ! ( $_[0]->retried_count or $_[0]->storage->connected ) },
- )->run;
+ retry_handler => sub {
+ $_[0]->failed_attempt_count == 1
+ and
+ ! $_[0]->storage->connected
+ },
+ )->run(@_);
}
=head2 txn_begin
=head2 debugfh
Set or retrieve the filehandle used for trace/debug output. This should be
-an IO::Handle compatible object (only the C<print> method is used. Initially
+an IO::Handle compatible object (only the C<print> method is used). Initially
set to be STDERR - although see information on the
L<DBIC_TRACE> environment variable.
package # hide from pause until we figure it all out
DBIx::Class::Storage::BlockRunner;
-use Sub::Quote 'quote_sub';
+use strict;
+
use DBIx::Class::Exception;
use DBIx::Class::Carp;
use Context::Preserve 'preserve_context';
-use Scalar::Util qw/weaken blessed/;
+use DBIx::Class::_Util 'is_exception';
+use Scalar::Util qw(weaken blessed reftype);
use Try::Tiny;
-use Moo;
+
+# DO NOT edit away without talking to riba first, he will just put it back
+BEGIN {
+ local $ENV{PERL_STRICTURES_EXTRA} = 0;
+ require Moo; Moo->import;
+ require Sub::Quote; Sub::Quote->import('quote_sub');
+}
+use warnings NONFATAL => 'all';
use namespace::clean;
=head1 NAME
has retry_handler => (
is => 'ro',
required => 1,
- isa => quote_sub( q|
- (ref $_[0]) eq 'CODE'
+ isa => quote_sub( q{
+ (Scalar::Util::reftype($_[0])||'') eq 'CODE'
or DBIx::Class::Exception->throw('retry_handler must be a CODE reference')
- |),
-);
-
-has run_code => (
- is => 'ro',
- required => 1,
- isa => quote_sub( q|
- (ref $_[0]) eq 'CODE'
- or DBIx::Class::Exception->throw('run_code must be a CODE reference')
- |),
-);
-
-has run_args => (
- is => 'ro',
- isa => quote_sub( q|
- (ref $_[0]) eq 'ARRAY'
- or DBIx::Class::Exception->throw('run_args must be an ARRAY reference')
- |),
- default => quote_sub( '[]' ),
+ }),
);
has retry_debug => (
is => 'rw',
+ # use a sub - to be evaluated on the spot lazily
default => quote_sub( '$ENV{DBIC_STORAGE_RETRY_DEBUG}' ),
+ lazy => 1,
);
-has max_retried_count => (
+has max_attempts => (
is => 'ro',
- default => quote_sub( '20' ),
+ default => 20,
);
-has retried_count => (
+has failed_attempt_count => (
is => 'ro',
- init_arg => undef,
- writer => '_set_retried_count',
- clearer => '_reset_retried_count',
- default => quote_sub(q{ 0 }),
+ init_arg => undef, # ensures one can't pass the value in
+ writer => '_set_failed_attempt_count',
+ default => 0,
lazy => 1,
trigger => quote_sub(q{
$_[0]->throw_exception( sprintf (
- 'Exceeded max_retried_count amount of %d, latest exception: %s',
- $_[0]->max_retried_count, $_[0]->last_exception
- )) if $_[0]->max_retried_count < ($_[1]||0);
+ 'Reached max_attempts amount of %d, latest exception: %s',
+ $_[0]->max_attempts, $_[0]->last_exception
+ )) if $_[0]->max_attempts <= ($_[1]||0);
}),
);
sub run {
my $self = shift;
- $self->throw_exception('run() takes no arguments') if @_;
-
$self->_reset_exception_stack;
- $self->_reset_retried_count;
+ $self->_set_failed_attempt_count(0);
+
+ my $cref = shift;
+
+ $self->throw_exception('run() requires a coderef to execute as its first argument')
+ if ( reftype($cref)||'' ) ne 'CODE';
+
my $storage = $self->storage;
- return $self->run_code->( @{$self->run_args} )
- if (! $self->wrap_txn and $storage->{_in_do_block});
+ return $cref->( @_ ) if (
+ $storage->{_in_do_block}
+ and
+ ! $self->wrap_txn
+ );
local $storage->{_in_do_block} = 1 unless $storage->{_in_do_block};
- return $self->_run;
+ return $self->_run($cref, @_);
}
# this is the actual recursing worker
sub _run {
- # warnings here mean I did not anticipate some ueber-complex case
- # fatal warnings are not warranted
- no warnings;
- use warnings;
+ # internal method - we know that both refs are strong-held by the
+ # calling scope of run(), hence safe to weaken everything
+ weaken( my $self = shift );
+ weaken( my $cref = shift );
- my $self = shift;
+ my $args = @_ ? \@_ : [];
# from this point on (defined $txn_init_depth) is an indicator for wrap_txn
# save a bit on method calls
my $run_err = '';
- weaken (my $weakself = $self);
-
return preserve_context {
try {
if (defined $txn_init_depth) {
- $weakself->storage->txn_begin;
+ $self->storage->txn_begin;
$txn_begin_ok = 1;
}
- $weakself->run_code->( @{$weakself->run_args} );
+ $cref->( @$args );
} catch {
$run_err = $_;
(); # important, affects @_ below
} replace => sub {
my @res = @_;
- my $storage = $weakself->storage;
+ my $storage = $self->storage;
my $cur_depth = $storage->transaction_depth;
if (defined $txn_init_depth and $run_err eq '') {
'Unexpected reduction of transaction depth by %d after execution of '
. '%s, skipping txn_commit()',
$delta_txn,
- $weakself->run_code,
+ $cref,
) unless $delta_txn == 1 and $cur_depth == 0;
}
else {
}
# something above threw an error (could be the begin, the code or the commit)
- if ($run_err ne '') {
+ if ( is_exception $run_err ) {
# attempt a rollback if we did begin in the first place
if ($txn_begin_ok) {
}
}
- push @{ $weakself->exception_stack }, $run_err;
+ push @{ $self->exception_stack }, $run_err;
+
+ # this will throw if max_attempts is reached
+ $self->_set_failed_attempt_count($self->failed_attempt_count + 1);
# init depth of > 0 ( > 1 with AC) implies nesting - no retry attempt queries
$storage->throw_exception($run_err) if (
# FIXME - we assume that $storage->{_dbh_autocommit} is there if
# txn_init_depth is there, but this is a DBI-ism
$txn_init_depth > ( $storage->{_dbh_autocommit} ? 0 : 1 )
- ) or ! $weakself->retry_handler->($weakself)
+ ) or ! $self->retry_handler->($self)
);
- $weakself->_set_retried_count($weakself->retried_count + 1);
-
# we got that far - let's retry
- carp( sprintf 'Retrying %s (run %d) after caught exception: %s',
- $weakself->run_code,
- $weakself->retried_count + 1,
+ carp( sprintf 'Retrying %s (attempt %d) after caught exception: %s',
+ $cref,
+ $self->failed_attempt_count + 1,
$run_err,
- ) if $weakself->retry_debug;
+ ) if $self->retry_debug;
$storage->ensure_connected;
# if txn_depth is > 1 this means something was done to the
$storage->transaction_depth,
) if (defined $txn_init_depth and $storage->transaction_depth);
- return $weakself->_run;
+ return $self->_run($cref, @$args);
}
return wantarray ? @res : $res[0];
__PACKAGE__->sql_name_sep('.');
__PACKAGE__->mk_group_accessors('simple' => qw/
- _connect_info _dbi_connect_info _dbic_connect_attributes _driver_determined
+ _connect_info _dbic_connect_attributes _driver_determined
_dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts _dbh_autocommit
_perform_autoinc_retrieval _autoinc_supplied_for_op
/);
sub _determine_supports_join_optimizer { 1 };
# Each of these methods need _determine_driver called before itself
-# in order to function reliably. This is a purely DRY optimization
+# in order to function reliably. We also need to separate accessors
+# from plain old method calls, since an accessor called as a setter
+# does *not* need the driver determination loop fired (and in fact
+# can produce hard to find bugs, like e.g. losing on_connect_*
+# semantics on fresh connections)
#
-# get_(use)_dbms_capability need to be called on the correct Storage
-# class, as _use_X may be hardcoded class-wide, and _supports_X calls
-# _determine_supports_X which obv. needs a correct driver as well
-my @rdbms_specific_methods = qw/
+# The construct below is simply a parameterized around()
+my $storage_accessor_idx = { map { $_ => 1 } qw(
sqlt_type
- deployment_statements
+ datetime_parser_type
sql_maker
cursor_class
+)};
+for my $meth (keys %$storage_accessor_idx, qw(
+ deployment_statements
build_datetime_parser
- datetime_parser_type
txn_begin
_server_info
_get_server_version
-/;
-
-for my $meth (@rdbms_specific_methods) {
+)) {
my $orig = __PACKAGE__->can ($meth)
or die "$meth is not a ::Storage::DBI method!";
- no strict qw/refs/;
- no warnings qw/redefine/;
+ no strict 'refs';
+ no warnings 'redefine';
*{__PACKAGE__ ."::$meth"} = subname $meth => sub {
if (
# only fire when invoked on an instance, a valid class-based invocation
and
! $_[0]->{_in_determine_driver}
and
- ($_[0]->_dbi_connect_info||[])->[0]
+ # if this is a known *setter* - just set it, no need to connect
+ # and determine the driver
+ ! ( $storage_accessor_idx->{$meth} and @_ > 1 )
+ and
+ # Only try to determine stuff if we have *something* that either is or can
+ # provide a DSN. Allows for bare $schema's generated with a plain ->connect()
+ # to still be marginally useful
+ $_[0]->_dbi_connect_info->[0]
) {
$_[0]->_determine_driver;
my %seek_and_destroy;
sub _arm_global_destructor {
+
+ # quick "garbage collection" pass - prevents the registry
+ # from slowly growing with a bunch of undef-valued keys
+ defined $seek_and_destroy{$_} or delete $seek_and_destroy{$_}
+ for keys %seek_and_destroy;
+
weaken (
$seek_and_destroy{ refaddr($_[0]) } = $_[0]
);
$info = $self->_normalize_connect_info($info)
if ref $info eq 'ARRAY';
- for my $storage_opt (keys %{ $info->{storage_options} }) {
- my $value = $info->{storage_options}{$storage_opt};
-
- $self->$storage_opt($value);
- }
-
- # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only
- # the new set of options
- $self->_sql_maker(undef);
- $self->_sql_maker_opts({});
-
- for my $sql_maker_opt (keys %{ $info->{sql_maker_options} }) {
- my $value = $info->{sql_maker_options}{$sql_maker_opt};
-
- $self->_sql_maker_opts->{$sql_maker_opt} = $value;
- }
-
my %attrs = (
%{ $self->_default_dbi_connect_attributes || {} },
%{ $info->{attributes} || {} },
push @args, \%attrs if keys %attrs;
}
+
+ # this is the authoritative "always an arrayref" thing fed to DBI->connect
+ # OR a single-element coderef-based $dbh factory
$self->_dbi_connect_info(\@args);
+ # extract the individual storage options
+ for my $storage_opt (keys %{ $info->{storage_options} }) {
+ my $value = $info->{storage_options}{$storage_opt};
+
+ $self->$storage_opt($value);
+ }
+
+ # Extract the individual sqlmaker options
+ #
+ # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only
+ # the new set of options
+ $self->_sql_maker(undef);
+ $self->_sql_maker_opts({});
+
+ for my $sql_maker_opt (keys %{ $info->{sql_maker_options} }) {
+ my $value = $info->{sql_maker_options}{$sql_maker_opt};
+
+ $self->_sql_maker_opts->{$sql_maker_opt} = $value;
+ }
+
# FIXME - dirty:
- # save attributes them in a separate accessor so they are always
+ # save attributes in a separate accessor so they are always
# introspectable, even in case of a CODE $dbhmaker
$self->_dbic_connect_attributes (\%attrs);
return $self->_connect_info;
}
+sub _dbi_connect_info {
+ my $self = shift;
+
+ return $self->{_dbi_connect_info} = $_[0]
+ if @_;
+
+ my $conninfo = $self->{_dbi_connect_info} || [];
+
+ # last ditch effort to grab a DSN
+ if ( ! defined $conninfo->[0] and $ENV{DBI_DSN} ) {
+ my @new_conninfo = @$conninfo;
+ $new_conninfo[0] = $ENV{DBI_DSN};
+ $conninfo = \@new_conninfo;
+ }
+
+ return $conninfo;
+}
+
+
sub _normalize_connect_info {
my ($self, $info_arg) = @_;
my %info;
sub dbh_do {
my $self = shift;
- my $run_target = shift;
+ my $run_target = shift; # either a coderef or a method name
# short circuit when we know there is no need for a runner
#
DBIx::Class::Storage::BlockRunner->new(
storage => $self,
- run_code => sub { $self->$run_target ($self->_get_dbh, @$args ) },
wrap_txn => 0,
- retry_handler => sub { ! ( $_[0]->retried_count or $_[0]->storage->connected ) },
- )->run;
+ retry_handler => sub {
+ $_[0]->failed_attempt_count == 1
+ and
+ ! $_[0]->storage->connected
+ },
+ )->run(sub {
+ $self->$run_target ($self->_get_dbh, @$args )
+ });
}
sub txn_do {
return $self->_dbh;
}
+# *DELIBERATELY* not a setter (for the time being)
+# Too intertwined with everything else for any kind of sanity
sub sql_maker {
- my ($self) = @_;
+ my $self = shift;
+
+ $self->throw_exception('sql_maker() is not a setter method') if @_;
+
unless ($self->_sql_maker) {
my $sql_maker_class = $self->sql_maker_class;
sub _populate_dbh {
my ($self) = @_;
- my @info = @{$self->_dbi_connect_info || []};
$self->_dbh(undef); # in case ->connected failed we might get sent here
$self->_dbh_details({}); # reset everything we know
+ $self->_sql_maker(undef); # this may also end up being different
- $self->_dbh($self->_connect(@info));
+ $self->_dbh($self->_connect);
$self->_conn_pid($$) unless DBIx::Class::_ENV_::BROKEN_FORK; # on win32 these are in fact threads
require DBI::Const::GetInfoReturn;
my $self = shift;
- $self->ensure_connected;
+
+ my $drv;
+ try {
+ $drv = $self->_extract_driver_from_connect_info;
+ $self->ensure_connected;
+ };
+
+ $drv = "DBD::$drv" if $drv;
my $res = {
DBIC_DSN => $self->_dbi_connect_info->[0],
DBI_VER => DBI->VERSION,
DBIC_VER => DBIx::Class->VERSION,
DBIC_DRIVER => ref $self,
+ $drv ? (
+ DBD => $drv,
+ DBD_VER => try { $drv->VERSION },
+ ) : (),
};
+ # try to grab data even if we never managed to connect
+ # will cover us in cases of an oddly broken half-connect
for my $inf (
#keys %DBI::Const::GetInfoType::GetInfoType,
qw/
$started_connected = 1;
}
else {
- # if connect_info is a CODEREF, we have no choice but to connect
- if (ref $self->_dbi_connect_info->[0] &&
- reftype $self->_dbi_connect_info->[0] eq 'CODE') {
- $self->_populate_dbh;
- $driver = $self->_dbh->{Driver}{Name};
- }
- else {
- # try to use dsn to not require being connected, the driver may still
- # force a connection in _rebless to determine version
- # (dsn may not be supplied at all if all we do is make a mock-schema)
- my $dsn = $self->_dbi_connect_info->[0] || $ENV{DBI_DSN} || '';
- ($driver) = $dsn =~ /dbi:([^:]+):/i;
- $driver ||= $ENV{DBI_DRIVER};
- }
+ $driver = $self->_extract_driver_from_connect_info;
}
if ($driver) {
}
}
+sub _extract_driver_from_connect_info {
+ my $self = shift;
+
+ my $drv;
+
+ # if connect_info is a CODEREF, we have no choice but to connect
+ if (
+ ref $self->_dbi_connect_info->[0]
+ and
+ reftype $self->_dbi_connect_info->[0] eq 'CODE'
+ ) {
+ $self->_populate_dbh;
+ $drv = $self->_dbh->{Driver}{Name};
+ }
+ else {
+ # try to use dsn to not require being connected, the driver may still
+ # force a connection later in _rebless to determine version
+ # (dsn may not be supplied at all if all we do is make a mock-schema)
+ ($drv) = ($self->_dbi_connect_info->[0] || '') =~ /^dbi:([^:]+):/i;
+ $drv ||= $ENV{DBI_DRIVER};
+ }
+
+ return $drv;
+}
+
sub _determine_connector_driver {
my ($self, $conn) = @_;
}
sub _connect {
- my ($self, @info) = @_;
+ my $self = shift;
+
+ my $info = $self->_dbi_connect_info;
$self->throw_exception("You did not provide any connection_info")
- if ( ! defined $info[0] and ! $ENV{DBI_DSN} and ! $ENV{DBI_DRIVER} );
+ unless defined $info->[0];
my ($old_connect_via, $dbh);
};
try {
- if(ref $info[0] eq 'CODE') {
- $dbh = $info[0]->();
+ if(ref $info->[0] eq 'CODE') {
+ $dbh = $info->[0]->();
}
else {
require DBI;
- $dbh = DBI->connect(@info);
+ $dbh = DBI->connect(@$info);
}
die $DBI::errstr unless $dbh;
die sprintf ("%s fresh DBI handle with a *false* 'Active' attribute. "
. 'This handle is disconnected as far as DBIC is concerned, and we can '
. 'not continue',
- ref $info[0] eq 'CODE'
- ? "Connection coderef $info[0] returned a"
+ ref $info->[0] eq 'CODE'
+ ? "Connection coderef $info->[0] returned a"
: 'DBI->connect($schema->storage->connect_info) resulted in a'
) unless $dbh->FETCH('Active');
# Default via _default_dbi_connect_attributes is 1, hence it was an explicit
# request, or an external handle. Complain and set anyway
unless ($dbh->{RaiseError}) {
- carp( ref $info[0] eq 'CODE'
+ carp( ref $info->[0] eq 'CODE'
? "The 'RaiseError' of the externally supplied DBI handle is set to false. "
."DBIx::Class will toggle it back to true, unless the 'unsafe' connect "
};
$self->_dbh_autocommit($dbh->{AutoCommit});
- $dbh;
+ return $dbh;
}
sub txn_begin {
# soooooo much better now. But that is also another
# battle...
#return (
- # 'select', @{$orig_attrs->{_sqlmaker_select_args}}
- #) if $orig_attrs->{_sqlmaker_select_args};
+ # 'select', $orig_attrs->{!args_as_stored_at_the_end_of_this_method!}
+ #) if $orig_attrs->{!args_as_stored_at_the_end_of_this_method!};
my $sql_maker = $self->sql_maker;
my $alias2source = $self->_resolve_ident_sources ($ident);
($attrs->{from}, $attrs->{_aliastypes}) = $self->_prune_unused_joins ($attrs);
}
+ # FIXME this is a gross, inefficient, largely incorrect and fragile hack
+ # during the result inflation stage we *need* to know what was the aliastype
+ # map as sqla saw it when the final pieces of SQL were being assembled
+ # Originally we simply carried around the entirety of $attrs, but this
+ # resulted in resultsets that are being reused growing continuously, as
+ # the hash in question grew deeper and deeper.
+ # Instead hand-pick what to take with us here (we actually don't need much
+ # at this point just the map itself)
+ $orig_attrs->{_last_sqlmaker_alias_map} = $attrs->{_aliastypes};
+
###
# This would be the point to deflate anything found in $attrs->{where}
# (and leave $attrs->{bind} intact). Problem is - inflators historically
# invoked, and that's just bad...
###
- return ( 'select', @{ $orig_attrs->{_sqlmaker_select_args} = [
- @{$attrs}{qw(from select where)}, $attrs, @limit_args
- ]} );
+ return ( 'select', @{$attrs}{qw(from select where)}, $attrs, @limit_args );
}
# Returns a counting SELECT for a simple count
attrs => $attrs,
}, ref $class || $class;
- weaken( $cursor_registry{ refaddr($self) } = $self )
- if DBIx::Class::_ENV_::HAS_ITHREADS;
+ if (DBIx::Class::_ENV_::HAS_ITHREADS) {
+
+ # quick "garbage collection" pass - prevents the registry
+ # from slowly growing with a bunch of undef-valued keys
+ defined $cursor_registry{$_} or delete $cursor_registry{$_}
+ for keys %cursor_registry;
+
+ weaken( $cursor_registry{ refaddr($self) } = $self )
+ }
return $self;
}
# in ::Storage::DBI::InterBase as opposed to inheriting
# directly from ::Storage::DBI::Firebird::Common
use base qw/DBIx::Class::Storage::DBI::InterBase/;
-
use mro 'c3';
+1;
+
=head1 NAME
DBIx::Class::Storage::DBI::Firebird - Driver for the Firebird RDBMS via
This is an empty subclass of L<DBIx::Class::Storage::DBI::InterBase> for use
with L<DBD::Firebird>, see that driver for details.
-=cut
-
-1;
-
=head1 AUTHOR
See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
You may distribute this code under the same terms as Perl itself.
-=cut
-# vim:sts=2 sw=2:
__PACKAGE__->sql_limit_dialect ('FirstSkip');
__PACKAGE__->sql_quote_char ('"');
+__PACKAGE__->datetime_parser_type(
+ 'DBIx::Class::Storage::DBI::InterBase::DateTime::Format'
+);
+
+sub sqlt_type {
+ return 'Firebird';
+}
+
sub _sequence_fetch {
my ($self, $nextval, $sequence) = @_;
});
}
+package # hide from PAUSE
+ DBIx::Class::Storage::DBI::InterBase::DateTime::Format;
+
+my $timestamp_format = '%Y-%m-%d %H:%M:%S.%4N'; # %F %T
+my $date_format = '%Y-%m-%d';
+
+my ($timestamp_parser, $date_parser);
+
+sub parse_datetime {
+ shift;
+ require DateTime::Format::Strptime;
+ $timestamp_parser ||= DateTime::Format::Strptime->new(
+ pattern => $timestamp_format,
+ on_error => 'croak',
+ );
+ return $timestamp_parser->parse_datetime(shift);
+}
+
+sub format_datetime {
+ shift;
+ require DateTime::Format::Strptime;
+ $timestamp_parser ||= DateTime::Format::Strptime->new(
+ pattern => $timestamp_format,
+ on_error => 'croak',
+ );
+ return $timestamp_parser->format_datetime(shift);
+}
+
+sub parse_date {
+ shift;
+ require DateTime::Format::Strptime;
+ $date_parser ||= DateTime::Format::Strptime->new(
+ pattern => $date_format,
+ on_error => 'croak',
+ );
+ return $date_parser->parse_datetime(shift);
+}
+
+sub format_date {
+ shift;
+ require DateTime::Format::Strptime;
+ $date_parser ||= DateTime::Format::Strptime->new(
+ pattern => $date_format,
+ on_error => 'croak',
+ );
+ return $date_parser->format_datetime(shift);
+}
+
1;
=head1 CAVEATS
=cut
-__PACKAGE__->datetime_parser_type(
- 'DBIx::Class::Storage::DBI::InterBase::DateTime::Format'
-);
-
sub _ping {
my $self = shift;
$self->_get_dbh->{ib_time_all} = 'ISO';
}
-
-package # hide from PAUSE
- DBIx::Class::Storage::DBI::InterBase::DateTime::Format;
-
-my $timestamp_format = '%Y-%m-%d %H:%M:%S.%4N'; # %F %T
-my $date_format = '%Y-%m-%d';
-
-my ($timestamp_parser, $date_parser);
-
-sub parse_datetime {
- shift;
- require DateTime::Format::Strptime;
- $timestamp_parser ||= DateTime::Format::Strptime->new(
- pattern => $timestamp_format,
- on_error => 'croak',
- );
- return $timestamp_parser->parse_datetime(shift);
-}
-
-sub format_datetime {
- shift;
- require DateTime::Format::Strptime;
- $timestamp_parser ||= DateTime::Format::Strptime->new(
- pattern => $timestamp_format,
- on_error => 'croak',
- );
- return $timestamp_parser->format_datetime(shift);
-}
-
-sub parse_date {
- shift;
- require DateTime::Format::Strptime;
- $date_parser ||= DateTime::Format::Strptime->new(
- pattern => $date_format,
- on_error => 'croak',
- );
- return $date_parser->parse_datetime(shift);
-}
-
-sub format_date {
- shift;
- require DateTime::Format::Strptime;
- $date_parser ||= DateTime::Format::Strptime->new(
- pattern => $date_format,
- on_error => 'croak',
- );
- return $date_parser->format_datetime(shift);
-}
-
1;
=head1 CAVEATS
=cut
-__PACKAGE__->datetime_parser_type ('DBIx::Class::Storage::DBI::ODBC::Firebird::DateTime::Format');
-
# batch operations in DBD::ODBC 1.35 do not work with the official ODBC driver
sub _run_connection_actions {
my $self = shift;
};
}
-package # hide from PAUSE
- DBIx::Class::Storage::DBI::ODBC::Firebird::DateTime::Format;
-
-# inherit parse/format date
-our @ISA = 'DBIx::Class::Storage::DBI::InterBase::DateTime::Format';
-
-my $timestamp_format = '%Y-%m-%d %H:%M:%S.%4N'; # %F %T
-my $timestamp_parser;
-
-sub parse_datetime {
- shift;
- require DateTime::Format::Strptime;
- $timestamp_parser ||= DateTime::Format::Strptime->new(
- pattern => $timestamp_format,
- on_error => 'croak',
- );
- return $timestamp_parser->parse_datetime(shift);
-}
-
-sub format_datetime {
- shift;
- require DateTime::Format::Strptime;
- $timestamp_parser ||= DateTime::Format::Strptime->new(
- pattern => $timestamp_format,
- on_error => 'croak',
- );
- return $timestamp_parser->format_datetime(shift);
-}
-
-1;
-
=head1 AUTHOR
See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
=cut
# vim:sts=2 sw=2:
+
+1;
sub _dbh_execute {
#my ($self, $dbh, $sql, $bind, $bind_attrs) = @_;
- my ($self, $bind) = @_[0,3];
+ my ($self, $sql, $bind) = @_[0,2,3];
# Turn off sth caching for multi-part LOBs. See _prep_for_execute below
local $self->{disable_sth_caching} = 1 if first {
return shift->$next(@_)
if $self->transaction_depth;
- # cheat the blockrunner - we do want to rerun things regardless of outer state
+ # cheat the blockrunner we are just about to create
+ # we do want to rerun things regardless of outer state
local $self->{_in_do_block};
return DBIx::Class::Storage::BlockRunner->new(
storage => $self,
- run_code => $next,
- run_args => \@_,
wrap_txn => 0,
retry_handler => sub {
# ORA-01003: no statement parsed (someone changed the table somehow,
# invalidating your cursor.)
- return 0 if ($_[0]->retried_count or $_[0]->last_exception !~ /ORA-01003/);
-
- # re-prepare towards new table data
- if (my $dbh = $_[0]->storage->_dbh) {
- delete $dbh->{CachedKids}{$_[0]->run_args->[2]};
+ if (
+ $_[0]->failed_attempt_count == 1
+ and
+ $_[0]->last_exception =~ /ORA-01003/
+ and
+ my $dbh = $_[0]->storage->_dbh
+ ) {
+ delete $dbh->{CachedKids}{$sql};
+ return 1;
+ }
+ else {
+ return 0;
}
- return 1;
},
- )->run;
+ )->run( $next, @_ );
}
sub _dbh_execute_for_fetch {
my $alias = $self->next::method(@_);
# we need to shorten here in addition to the shortening in SQLA itself,
- # since the final relnames are a crucial for the join optimizer
+ # since the final relnames are crucial for the join optimizer
return $self->sql_maker->_shorten_identifier($alias);
}
that the Pool object should get.
my $schema = Schema::Class->clone;
- $schema->storage_type( ['::DBI::Replicated', {balancer=>'::Random'}] );
+ $schema->storage_type(['::DBI::Replicated', { balancer_type => '::Random' }]);
$schema->connection(...);
Next, you need to add in the Replicants. Basically this is an array of
_dbh_get_info
_determine_connector_driver
+ _extract_driver_from_connect_info
_describe_connection
_warn_undetermined_driver
my $result = $resultset->search(undef, {force_pool=>'master'})->find($pk);
-This attribute will safely be ignore by non replicated storages, so you can use
+This attribute will safely be ignored by non replicated storages, so you can use
the same code for both types of systems.
Lastly, you can use the L</execute_reliably> method, which works very much like
# DBD::SQLite warns on binding >32 bit values with 32 bit IVs
sub _dbh_execute {
if (
- DBIx::Class::_ENV_::IV_SIZE < 8
+ (
+ DBIx::Class::_ENV_::IV_SIZE < 8
+ or
+ DBIx::Class::_ENV_::OS_NAME eq 'MSWin32'
+ )
and
! defined $DBD::SQLite::__DBIC_CHECK_dbd_mishandles_bound_BIGINT
) {
|
\d+ \s type \s @{[ DBI::SQL_BIGINT() ]} \s as \s [-+]? \d+ (?: \. 0*)?
)
- /x ) if DBIx::Class::_ENV_::IV_SIZE < 8 and $DBD::SQLite::__DBIC_CHECK_dbd_mishandles_bound_BIGINT;
+ /x ) if (
+ (
+ DBIx::Class::_ENV_::IV_SIZE < 8
+ or
+ DBIx::Class::_ENV_::OS_NAME eq 'MSWin32'
+ )
+ and
+ $DBD::SQLite::__DBIC_CHECK_dbd_mishandles_bound_BIGINT
+ );
shift->next::method(@_);
}
my $bindattrs = $self->next::method($ident, $bind);
- # somewhere between 1.33 and 1.37 things went horribly wrong
if (! defined $DBD::SQLite::__DBIC_CHECK_dbd_can_bind_bigint_values) {
- $DBD::SQLite::__DBIC_CHECK_dbd_can_bind_bigint_values = (
- modver_gt_or_eq('DBD::SQLite', '1.34')
- and
- ! modver_gt_or_eq('DBD::SQLite', '1.37')
- ) ? 0 : 1;
+ $DBD::SQLite::__DBIC_CHECK_dbd_can_bind_bigint_values
+ = modver_gt_or_eq('DBD::SQLite', '1.37') ? 1 : 0;
}
# an attempt to detect former effects of RT#79576, bug itself present between
}
elsif (
! $DBD::SQLite::__DBIC_CHECK_dbd_can_bind_bigint_values
- and
+ ) {
# unsigned 32 bit ints have a range of −2,147,483,648 to 2,147,483,647
# alternatively expressed as the hexadecimal numbers below
# the comparison math will come out right regardless of ivsize, since
# we are operating within 31 bits
# P.S. 31 because one bit is lost for the sign
- ($bind->[$i][1] > 0x7fff_ffff or $bind->[$i][1] < -0x8000_0000)
- ) {
- carp_unique( sprintf (
- "An integer value occupying more than 32 bits was supplied for column '%s' "
- . 'which your version of DBD::SQLite (%s) can not bind properly so DBIC '
- . 'will treat it as a string instead, consider upgrading to at least '
- . 'DBD::SQLite version 1.37',
- $bind->[$i][0]{dbic_colname} || "# $i",
- DBD::SQLite->VERSION,
- ) );
- undef $bindattrs->[$i];
+ if ($bind->[$i][1] > 0x7fff_ffff or $bind->[$i][1] < -0x8000_0000) {
+ carp_unique( sprintf (
+ "An integer value occupying more than 32 bits was supplied for column '%s' "
+ . 'which your version of DBD::SQLite (%s) can not bind properly so DBIC '
+ . 'will treat it as a string instead, consider upgrading to at least '
+ . 'DBD::SQLite version 1.37',
+ $bind->[$i][0]{dbic_colname} || "# $i",
+ DBD::SQLite->VERSION,
+ ) );
+ undef $bindattrs->[$i];
+ }
+ else {
+ $bindattrs->[$i] = DBI::SQL_INTEGER()
+ }
}
}
}
if ($dbh->{syb_no_child_con}) {
return try {
- $self->_connect(@{$self->_dbi_connect_info || [] })
- ->do('select 1');
+ $self->_connect->do('select 1');
1;
}
catch {
sub sql_maker {
my $self = shift;
- unless ($self->_sql_maker) {
- my $maker = $self->next::method (@_);
+ # it is critical to get the version *before* calling next::method
+ # otherwise the potential connect will obliterate the sql_maker
+ # next::method will populate in the _sql_maker accessor
+ my $mysql_ver = $self->_server_info->{normalized_dbms_version};
- # mysql 3 does not understand a bare JOIN
- my $mysql_ver = $self->_dbh_get_info('SQL_DBMS_VER');
- $maker->needs_inner_join(1) if $mysql_ver =~ /^3/;
- }
+ my $sm = $self->next::method(@_);
+
+ # mysql 3 does not understand a bare JOIN
+ $sm->needs_inner_join(1) if $mysql_ver < 4;
- return $self->_sql_maker;
+ $sm;
}
sub sqlt_type {
),
],
selecting => [
- ($attrs->{select}
- ? ($sql_maker->_render_sqla(select_select => $attrs->{select}))[0]
- : ()),
+ map { $sql_maker->_render_sqla(select_select => $_) =~ /^SELECT\s+(.+)/ } @{$attrs->{select}||[]},
],
ordering => [
map { $_->[0] } $self->_extract_order_criteria ($attrs->{order_by}, $sql_maker),
],
};
- # throw away empty chunks
- $_ = [ map { $_ || () } @$_ ] for values %$to_scan;
+ # throw away empty chunks and all 2-value arrayrefs: the thinking is that these are
+ # bind value specs left in by the sloppy renderer above. It is ok to do this
+ # at this point, since we are going to end up rewriting this crap anyway
+ for my $v (values %$to_scan) {
+ my @nv;
+ for (@$v) {
+ next if (
+ ! defined $_
+ or
+ (
+ ref $_ eq 'ARRAY'
+ and
+ ( @$_ == 0 or @$_ == 2 )
+ )
+ );
+
+ if (ref $_) {
+ require Data::Dumper::Concise;
+ $self->throw_exception("Unexpected ref in scan-plan: " . Data::Dumper::Concise::Dumper($v) );
+ }
+
+ push @nv, $_;
+ }
+
+ $v = \@nv;
+ }
+
+ # kill all selectors which look like a proper subquery
+ # this is a sucky heuristic *BUT* - if we get it wrong the query will simply
+ # fail to run, so we are relatively safe
+ $to_scan->{selecting} = [ grep {
+ $_ !~ / \A \s* \( \s* SELECT \s+ .+? \s+ FROM \s+ .+? \) \s* \z /xsi
+ } @{ $to_scan->{selecting} || [] } ];
# first see if we have any exact matches (qualified or unqualified)
for my $type (keys %$to_scan) {
use Try::Tiny;
use Scalar::Util qw/weaken blessed refaddr/;
use DBIx::Class;
+use DBIx::Class::_Util 'is_exception';
use DBIx::Class::Carp;
use namespace::clean;
# FIXME FRAGILE - any eval that fails but *does not* rethrow between here
# and the unwind will trample over $@ and invalidate the entire mechanism
# There got to be a saner way of doing this...
- if (defined $@ and "$@" ne '') {
+ if (is_exception $@) {
weaken(
- $guard->{existing_exception_ref} = (ref $@ eq '') ? \$@ : $@
+ $guard->{existing_exception_ref} = (ref($@) eq '') ? \$@ : $@
);
}
return unless $self->{dbh};
my $exception = $@ if (
- defined $@
- and
- "$@" ne ''
+ is_exception $@
and
(
! defined $self->{existing_exception_ref}
or
- refaddr( ref $@ eq '' ? \$@ : $@ ) != refaddr($self->{existing_exception_ref})
+ refaddr( ref($@) eq '' ? \$@ : $@ ) != refaddr($self->{existing_exception_ref})
)
);
use constant SPURIOUS_VERSION_CHECK_WARNINGS => ($] < 5.010 ? 1 : 0);
-use Carp;
-use Scalar::Util qw(refaddr weaken);
+BEGIN {
+ package # hide from pause
+ DBIx::Class::_ENV_;
+
+ use Config;
+
+ use constant {
+
+ # but of course
+ BROKEN_FORK => ($^O eq 'MSWin32') ? 1 : 0,
+
+ HAS_ITHREADS => $Config{useithreads} ? 1 : 0,
+
+ # ::Runmode would only be loaded by DBICTest, which in turn implies t/
+ DBICTEST => eval { DBICTest::RunMode->is_author } ? 1 : 0,
+
+ # During 5.13 dev cycle HELEMs started to leak on copy
+ PEEPEENESS =>
+ # request for all tests would force "non-leaky" illusion and vice-versa
+ defined $ENV{DBICTEST_ALL_LEAKS} ? !$ENV{DBICTEST_ALL_LEAKS}
+ # otherwise confess that this perl is busted ONLY on smokers
+ : eval { DBICTest::RunMode->is_smoker } && ($] >= 5.013005 and $] <= 5.013006) ? 1
+ # otherwise we are good
+ : 0
+ ,
+
+ ASSERT_NO_INTERNAL_WANTARRAY => $ENV{DBIC_ASSERT_NO_INTERNAL_WANTARRAY} ? 1 : 0,
+
+ IV_SIZE => $Config{ivsize},
+
+ OS_NAME => $^O,
+ };
+
+ if ($] < 5.009_005) {
+ require MRO::Compat;
+ constant->import( OLD_MRO => 1 );
+ }
+ else {
+ require mro;
+ constant->import( OLD_MRO => 0 );
+ }
+}
+
+# FIXME - this is not supposed to be here
+# Carp::Skip to the rescue soon
+use DBIx::Class::Carp '^DBIx::Class|^DBICTest';
+
+use Carp 'croak';
+use Scalar::Util qw(weaken blessed reftype);
use base 'Exporter';
-our @EXPORT_OK = qw(sigwarn_silencer modver_gt_or_eq fail_on_internal_wantarray);
+our @EXPORT_OK = qw(sigwarn_silencer modver_gt_or_eq fail_on_internal_wantarray refcount hrefaddr is_exception);
-sub sigwarn_silencer {
+sub sigwarn_silencer ($) {
my $pattern = shift;
croak "Expecting a regexp" if ref $pattern ne 'Regexp';
return sub { &$orig_sig_warn unless $_[0] =~ $pattern };
}
-sub modver_gt_or_eq {
+sub hrefaddr ($) { sprintf '0x%x', &Scalar::Util::refaddr }
+
+sub refcount ($) {
+ croak "Expecting a reference" if ! length ref $_[0];
+
+ require B;
+ # No tempvars - must operate on $_[0], otherwise the pad
+ # will count as an extra ref
+ B::svref_2object($_[0])->REFCNT;
+}
+
+sub is_exception ($) {
+ my $e = $_[0];
+
+ # this is not strictly correct - an eval setting $@ to undef
+ # is *not* the same as an eval setting $@ to ''
+ # but for the sake of simplicity assume the following for
+ # the time being
+ return 0 unless defined $e;
+
+ my ($not_blank, $suberror);
+ {
+ local $@;
+ eval {
+ $not_blank = ($e ne '') ? 1 : 0;
+ 1;
+ } or $suberror = $@;
+ }
+
+ if (defined $suberror) {
+ if (length (my $class = blessed($e) )) {
+ carp_unique( sprintf(
+ 'External exception object %s=%s(%s) implements partial (broken) '
+ . 'overloading preventing it from being used in simple ($x eq $y) '
+ . 'comparisons. Given Perl\'s "globally cooperative" exception '
+ . 'handling this type of brokenness is extremely dangerous on '
+ . 'exception objects, as it may (and often does) result in silent '
+ . '"exception substitution". DBIx::Class tries to work around this '
+ . 'as much as possible, but other parts of your software stack may '
+ . 'not be even aware of this. Please submit a bugreport against the '
+ . 'distribution containing %s and in the meantime apply a fix similar '
+ . 'to the one shown at %s, in order to ensure your exception handling '
+ . 'is saner application-wide. What follows is the actual error text '
+ . "as generated by Perl itself:\n\n%s\n ",
+ $class,
+ reftype $e,
+ hrefaddr $e,
+ $class,
+ 'http://v.gd/DBIC_overload_tempfix/',
+ $suberror,
+ ));
+
+ # workaround, keeps spice flowing
+ $not_blank = ("$e" ne '') ? 1 : 0;
+ }
+ else {
+ # not blessed yet failed the 'ne'... this makes 0 sense...
+ # just throw further
+ die $suberror
+ }
+ }
+
+ return $not_blank;
+}
+
+sub modver_gt_or_eq ($$) {
my ($mod, $ver) = @_;
croak "Nonsensical module name supplied"
my $obj = shift;
DBIx::Class::Exception->throw( sprintf (
- "Improper use of %s(0x%x) instance in list context at %s line %d\n\n\tStacktrace starts",
- ref($obj), refaddr($obj), (caller($cf))[1,2]
+ "Improper use of %s(%s) instance in list context at %s line %d\n\n\tStacktrace starts",
+ ref($obj), hrefaddr($obj), (caller($cf))[1,2]
), 'with_stacktrace');
}
die "You may not specify more than one deploy path via --deploy-to\n"
if @{$args->{'deploy-to'}||[]} > 1;
+local $ENV{DBI_DSN};
my $schema = use_module( $args->{'schema-class'}[0] )->connect(
$args->{'deploy-to'}
? ( "DBI:SQLite:$args->{'deploy-to'}[0]", undef, undef, { on_connect_do => "PRAGMA synchronous = OFF" } )
# The oneliner is a tad convoluted - basicaly what we do is
# slurp the entire file and get the index off the last
# `processor : XX` line
-export NUMTHREADS=$(( $(perl -0777 -n -e 'print (/ (?: .+ ^ processor \s+ : \s+ (\d+) ) (?! ^ processor ) /smx)' < /proc/cpuinfo) + 1 ))
+export NUMTHREADS="$(( $(perl -0777 -n -e 'print (/ (?: .+ ^ processor \s+ : \s+ (\d+) ) (?! ^ processor ) /smx)' < /proc/cpuinfo) + 1 ))"
+
+export CACHE_DIR="/tmp/poormanscache"
+
+# install some common tools from APT, more below unless CLEANTEST
+apt_install libapp-nopaste-perl tree apt-transport-https
-run_or_err "Installing common tools from APT" "sudo apt-get install --allow-unauthenticated -y libapp-nopaste-perl tree"
# FIXME - the debian package is oddly broken - uses a bin/env based shebang
# so nothing works under a brew. Fix here until #debian-perl patches it up
sudo /usr/bin/perl -p -i -e 's|#!/usr/bin/env perl|#!/usr/bin/perl|' $(which nopaste)
sudo bash -c 'echo -e "firebird2.5-super\tshared/firebird/enabled\tboolean\ttrue" | debconf-set-selections'
sudo bash -c 'echo -e "firebird2.5-super\tshared/firebird/sysdba_password/new_password\tpassword\t123" | debconf-set-selections'
- APT_PACKAGES="memcached firebird2.5-super firebird2.5-dev expect"
- run_or_err "Installing packages ($APT_PACKAGES)" "sudo apt-get install --allow-unauthenticated -y $APT_PACKAGES"
+ # add extra APT repo for Oracle
+ # (https is critical - apt-get update can't seem to follow the 302)
+ sudo bash -c 'echo -e "\ndeb [arch=i386] https://oss.oracle.com/debian unstable main non-free" >> /etc/apt/sources.list'
+
+ run_or_err "Cloning poor man's cache from github" "git clone --depth=1 --branch=poor_mans_travis_cache https://github.com/ribasushi/travis_futzing.git $CACHE_DIR && $CACHE_DIR/reassemble"
+
+ run_or_err "Priming up the APT cache with $(echo $(ls -d $CACHE_DIR/apt_cache/*.deb))" "sudo cp $CACHE_DIR/apt_cache/*.deb /var/cache/apt/archives"
+
+ apt_install memcached firebird2.5-super firebird2.5-dev unixodbc-dev expect oracle-xe
### config memcached
+ run_or_err "Starting memcached" "sudo /etc/init.d/memcached start"
export DBICTEST_MEMCACHED=127.0.0.1:11211
### config mysql
expect "Password for SYSDBA"
send "123\r"
sleep 1
- wait
- sleep 1
+ expect eof
'
# creating testdb
# FIXME - this step still fails from time to time >:(((
if run_or_err "Creating Firebird TestDB" \
"echo \"CREATE DATABASE '/var/lib/firebird/2.5/data/dbic_test.fdb';\" | sudo isql-fb -u sysdba -p 123"
then
+
+ run_or_err "Fetching and building Firebird ODBC driver" '
+ cd "$(mktemp -d)"
+ wget -qO- http://sourceforge.net/projects/firebird/files/firebird-ODBC-driver/2.0.2-Release/OdbcFb-Source-2.0.2.153.gz/download | tar -zx
+ cd Builds/Gcc.lin
+ perl -p -i -e "s|/usr/lib64|/usr/lib/x86_64-linux-gnu|g" ../makefile.environ
+ make -f makefile.linux
+ sudo make -f makefile.linux install
+ '
+
+ sudo bash -c 'cat >> /etc/odbcinst.ini' <<< "
+[Firebird]
+Description = InterBase/Firebird ODBC Driver
+Driver = /usr/lib/x86_64-linux-gnu/libOdbcFb.so
+Setup = /usr/lib/x86_64-linux-gnu/libOdbcFb.so
+Threading = 1
+FileUsage = 1
+"
+
export DBICTEST_FIREBIRD_DSN=dbi:Firebird:dbname=/var/lib/firebird/2.5/data/dbic_test.fdb
export DBICTEST_FIREBIRD_USER=SYSDBA
export DBICTEST_FIREBIRD_PASS=123
export DBICTEST_FIREBIRD_INTERBASE_USER=SYSDBA
export DBICTEST_FIREBIRD_INTERBASE_PASS=123
+ export DBICTEST_FIREBIRD_ODBC_DSN="dbi:ODBC:Driver=Firebird;Dbname=/var/lib/firebird/2.5/data/dbic_test.fdb"
+ export DBICTEST_FIREBIRD_ODBC_USER=SYSDBA
+ export DBICTEST_FIREBIRD_ODBC_PASS=123
+
break
fi
done
-### oracle
- # FIXME: todo
- #DBICTEST_ORA_DSN=dbi:Oracle:host=localhost;sid=XE
- #DBICTEST_ORA_USER=dbic_test
- #DBICTEST_ORA_PASS=123
- #DBICTEST_ORA_EXTRAUSER_DSN=dbi:Oracle:host=localhost;sid=XE
- #DBICTEST_ORA_EXTRAUSER_USER=dbic_test_extra
- #DBICTEST_ORA_EXTRAUSER_PASS=123
- #ORACLE_HOME=/usr/lib/oracle/xe/app/oracle/product/10.2.0/client
+### config oracle
+ SRV_ORA_HOME=/usr/lib/oracle/xe/app/oracle/product/10.2.0/server
+
+ # without this some of the more zealous tests can exhaust the amount
+ # of listeners and oracle is too slow to spin extras up :(
+ sudo bash -c "echo -e '\nprocesses=150' >> $SRV_ORA_HOME/config/scripts/init.ora"
+
+ EXPECT_ORA_SCRIPT='
+ spawn /etc/init.d/oracle-xe configure
+
+ sleep 1
+ set send_slow {1 .005}
+
+ expect "Specify the HTTP port that will be used for Oracle Application Express"
+ sleep 0.5
+ send -s "8021\r"
+
+ expect "Specify a port that will be used for the database listener"
+ sleep 0.5
+ send -s "1521\r"
+
+ expect "Specify a password to be used for database accounts"
+ sleep 0.5
+ send -s "adminpass\r"
+
+ expect "Confirm the password"
+ sleep 0.5
+ send -s "adminpass\r"
+
+ expect "Do you want Oracle Database 10g Express Edition to be started on boot"
+ sleep 0.5
+ send -s "n\r"
+
+ sleep 0.5
+ expect "Configuring Database"
+
+ sleep 1
+ expect eof
+ wait
+ '
+
+ # if we do not redirect to some random file, but instead try to capture
+ # into a var the way run_or_err does - everything hangs
+ # FIXME: I couldn't figure it out after 3 hours of headdesking,
+ # would be nice to know the reason eventually
+ run_or_err "Configuring OracleXE" "sudo $(which expect) -c '$EXPECT_ORA_SCRIPT' &>/tmp/ora_configure_10.2.log"
+
+ export DBICTEST_ORA_DSN=dbi:Oracle://localhost:1521/XE
+ export DBICTEST_ORA_USER=dbic_test
+ export DBICTEST_ORA_PASS=abc123456
+ export DBICTEST_ORA_EXTRAUSER_DSN="$DBICTEST_ORA_DSN"
+ export DBICTEST_ORA_EXTRAUSER_USER=dbic_test_extra
+ export DBICTEST_ORA_EXTRAUSER_PASS=abc123456
+
+ run_or_err "Create Oracle users" "ORACLE_SID=XE ORACLE_HOME=$SRV_ORA_HOME $SRV_ORA_HOME/bin/sqlplus -L -S system/adminpass @/dev/stdin <<< '
+ CREATE USER $DBICTEST_ORA_USER IDENTIFIED BY $DBICTEST_ORA_PASS;
+ GRANT connect,resource TO $DBICTEST_ORA_USER;
+ CREATE USER $DBICTEST_ORA_EXTRAUSER_USER IDENTIFIED BY $DBICTEST_ORA_EXTRAUSER_PASS;
+ GRANT connect,resource TO $DBICTEST_ORA_EXTRAUSER_USER;
+ '"
+
+ export ORACLE_HOME="$CACHE_DIR/ora_instaclient/x86-64/oracle_instaclient_10.2.0.5.0"
fi
if ! [[ "$CPAN_MIRROR" =~ "http://" ]] ; then
echo_err "Unable to extract primary cpan mirror from PERL_CPANM_OPT - something is wrong"
echo_err "PERL_CPANM_OPT: $PERL_CPANM_OPT"
- CPAN_MIRROR="http://cpan.metacpan.org/"
+ CPAN_MIRROR="https://cpan.metacpan.org/"
PERL_CPANM_OPT="$PERL_CPANM_OPT --mirror $CPAN_MIRROR"
echo_err "Using $CPAN_MIRROR for the time being"
fi
# try CPAN's latest offering if requested
if [[ "$DEVREL_DEPS" == "true" ]] ; then
- if [[ "$CLEANTEST" == "true" ]] ; then
- echo_err "DEVREL_DEPS combined with CLEANTEST makes no sense - it is only possible with cpanm"
- exit 1
- fi
-
PERL_CPANM_OPT="$PERL_CPANM_OPT --dev"
# FIXME inline-upgrade cpanm, work around https://github.com/travis-ci/travis-ci/issues/1477
source maint/travis-ci_scripts/common.bash
if [[ -n "$SHORT_CIRCUIT_SMOKE" ]] ; then return ; fi
-# poison the environment - basically look through lib, find all mentioned
-# ENVvars and set them to true and see if anything explodes
+# poison the environment
if [[ "$POISON_ENV" = "true" ]] ; then
+
+ # look through lib, find all mentioned ENVvars and set them
+ # to true and see if anything explodes
for var in $(grep -P '\$ENV\{' -r lib/ | grep -oP 'DBIC_\w+' | sort -u | grep -v DBIC_TRACE) ; do
if [[ -z "${!var}" ]] ; then
export $var=1
fi
done
+ # bogus nonexisting DBI_*
export DBI_DSN="dbi:ODBC:server=NonexistentServerAddress"
export DBI_DRIVER="ADO"
+ # make sure tests do not rely on implicid order of returned results
export DBICTEST_SQLITE_REVERSE_DEFAULT_ORDER=1
+
+ # emulate a local::lib-like env
+ # trick cpanm into executing true as shell - we just need the find+unpack
+ run_or_err "Downloading latest stable DBIC from CPAN" \
+ "SHELL=/bin/true cpanm --look DBIx::Class"
+
+ export PERL5LIB="$( ls -d ~/.cpanm/latest-build/DBIx-Class-*/lib | tail -n1 ):$PERL5LIB"
+
+ # perldoc -l <mod> searches $(pwd)/lib in addition to PERL5LIB etc, hence the cd /
+ echo_err "Latest stable DBIC (without deps) locatable via \$PERL5LIB at $(cd / && perldoc -l DBIx::Class)"
+
+ # FIXME - this is a kludge in place of proper MDV testing. For the time
+ # being simply use the minimum versions of our DBI/DBDstack, to avoid
+ # fuckups like 0.08260 (went unnoticed for 5 months)
+ #
+ # use url-spec for DBI due to https://github.com/miyagawa/cpanminus/issues/328
+ if perl -M5.013003 -e1 &>/dev/null ; then
+ # earlier DBI will not compile without PERL_POLLUTE which was gone in 5.14
+ parallel_installdeps_notest T/TI/TIMB/DBI-1.614.tar.gz
+ else
+ parallel_installdeps_notest T/TI/TIMB/DBI-1.57.tar.gz
+ fi
+
+ # Test both minimum DBD::SQLite and minimum BigInt SQLite
+ if [[ "$CLEANTEST" = "true" ]]; then
+ parallel_installdeps_notest DBD::SQLite@1.37
+ else
+ parallel_installdeps_notest DBD::SQLite@1.29
+ fi
+
fi
if [[ "$CLEANTEST" = "true" ]]; then
# effects from travis preinstalls)
# trick cpanm into executing true as shell - we just need the find+unpack
- run_or_err "Downloading DBIC inc/ from CPAN" \
+ [[ -d ~/.cpanm/latest-build/DBIx-Class-*/inc ]] || run_or_err "Downloading latest stable DBIC inc/ from CPAN" \
"SHELL=/bin/true cpanm --look DBIx::Class"
mv ~/.cpanm/latest-build/DBIx-Class-*/inc .
# So instead we still use our stock (possibly old) CPAN, and add some
# handholding
- # no configure_requires - we will need the usual suspects anyway
- # without pre-installign these in one pass things like extract_prereqs won't work
- CPAN_is_sane || installdeps ExtUtils::MakeMaker ExtUtils::CBuilder Module::Build
+ if [[ "$DEVREL_DEPS" == "true" ]] ; then
+ # Many dists still do not pass tests under tb1.5 properly (and it itself
+ # does not even install on things like 5.10). Install the *stable-dev*
+ # latest T::B here, so that it will not show up as a dependency, and
+ # hence it will not get installed a second time as an unsatisfied dep
+ # under cpanm --dev
+ #
+ # We are also not "quite ready" for SQLA 1.99, do not consider it
+ #
+ installdeps 'Test::Builder~<1.005' 'SQL::Abstract~<1.99'
+
+ elif ! CPAN_is_sane ; then
+ # no configure_requires - we will need the usual suspects anyway
+ # without pre-installing these in one pass things like extract_prereqs won't work
+ installdeps ExtUtils::MakeMaker ExtUtils::CBuilder Module::Build
+
+ fi
else
# we will be running all dbic tests - preinstall lots of stuff, run basic tests
# using SQLT and set up whatever databases necessary
export DBICTEST_SQLT_DEPLOY=1
+ # FIXME - need new TB1.5 devrel
# if we run under --dev install latest github of TB1.5 first
# (unreleased workaround for precedence warnings)
if [[ "$DEVREL_DEPS" == "true" ]] ; then
parallel_installdeps_notest ExtUtils::MakeMaker
parallel_installdeps_notest File::Path
parallel_installdeps_notest Carp
- parallel_installdeps_notest Module::Build Module::Runtime
- parallel_installdeps_notest File::Spec Data::Dumper
+ parallel_installdeps_notest Module::Build
+ parallel_installdeps_notest File::Spec Data::Dumper Module::Runtime
parallel_installdeps_notest Test::Exception Encode::Locale Test::Fatal
parallel_installdeps_notest Test::Warn B::Hooks::EndOfScope Test::Differences HTTP::Status
parallel_installdeps_notest Test::Pod::Coverage Test::EOL Devel::GlobalDestruction Sub::Name MRO::Compat Class::XSAccessor URI::Escape HTML::Entities
- parallel_installdeps_notest YAML LWP Class::Trigger JSON::XS DBI DateTime::Format::Builder Class::Accessor::Grouped Package::Variant
- parallel_installdeps_notest Moose Module::Install JSON SQL::Translator File::Which indirect multidimensional bareword::filehandles
+ parallel_installdeps_notest YAML LWP Class::Trigger JSON::XS DateTime::Format::Builder Class::Accessor::Grouped Package::Variant
+ parallel_installdeps_notest 'SQL::Abstract~<1.99' Moose Module::Install JSON SQL::Translator File::Which
- if [[ -n "DBICTEST_FIREBIRD_DSN" ]] ; then
+ if [[ -n "$DBICTEST_FIREBIRD_INTERBASE_DSN" ]] ; then
# the official version is very much outdated and does not compile on 5.14+
# use this rather updated source tree (needs to go to PAUSE):
# https://github.com/pilcrow/perl-dbd-interbase
- run_or_err "Fetching patched DBD::InterBase" \
- "git clone https://github.com/dbsrgits/perl-dbd-interbase ~/dbd-interbase"
-
- parallel_installdeps_notest ~/dbd-interbase/
+ parallel_installdeps_notest git://github.com/dbsrgits/perl-dbd-interbase.git
fi
fi
# we may need to prepend some stuff to that list
HARD_DEPS="$(echo $(make listdeps))"
-##### TEMPORARY WORKAROUNDS
- if ! CPAN_is_sane ; then
+##### TEMPORARY WORKAROUNDS needed in case we will be using CPAN.pm
+ if [[ "$DEVREL_DEPS" != "true" ]] && ! CPAN_is_sane ; then
# combat dzillirium on harness-wide level, otherwise breakage happens weekly
echo_err "$(tstamp) Ancient CPAN.pm: engaging TAP::Harness::IgnoreNonessentialDzilAutogeneratedTests during dep install"
- perl -MTAP::Harness=3.18 -e1 &>/dev/null || run_or_err "Upgrading TAP::Harness for HARNESS_SUBCLASS support" "cpan TAP::Harness"
+ perl -MTAP::Harness\ 3.18 -e1 &>/dev/null || run_or_err "Upgrading TAP::Harness for HARNESS_SUBCLASS support" "cpan TAP::Harness"
export PERL5LIB="$(pwd)/maint/travis-ci_scripts/lib:$PERL5LIB"
export HARNESS_SUBCLASS="TAP::Harness::IgnoreNonessentialDzilAutogeneratedTests"
# sanity check, T::H does not report sensible errors when the subclass fails to load
perl -MTAP::Harness::IgnoreNonessentialDzilAutogeneratedTests -e1
# DBD::SQLite reasonably wants DBI at config time
- HARD_DEPS="DBI $HARD_DEPS"
+ perl -MDBI -e1 &>/dev/null || HARD_DEPS="DBI $HARD_DEPS"
+
+ # this is a fucked CPAN - won't understand configure_requires of
+ # various pieces we may run into
+ # FIXME - need to get these off metacpan or something instead
+ HARD_DEPS="ExtUtils::Depends B::Hooks::OP::Check $HARD_DEPS"
# FIXME
# parent is temporary due to Carp https://rt.cpan.org/Ticket/Display.html?id=88494
else
# listalldeps is deliberate - will upgrade everything it can find
- parallel_installdeps_notest $(make listalldeps)
+ # we exclude SQLA specifically, since we do not want to pull
+ # in 1.99_xx on bleadcpan runs
+ deplist="$(make listalldeps | grep -vP '^(SQL::Abstract)$')"
+ # assume MDV on POISON_ENV, do not touch DBI/SQLite
+ if [[ "$POISON_ENV" = "true" ]] ; then
+ deplist="$(grep -vP '^(DBI|DBD::SQLite)$' <<< "$deplist")"
+ fi
+
+ parallel_installdeps_notest "$deplist"
fi
echo_err "$(tstamp) Dependency installation finished"
exit 1
fi
+# check that our MDV somewhat works
+if [[ "$POISON_ENV" = "true" ]] && ( perl -MDBD::SQLite\ 1.38 -e1 || perl -MDBI\ 1.615 -e1 ) &>/dev/null ; then
+ echo_err "Something went wrong - higher versions of DBI and/or DBD::SQLite than we expected"
+ exit 1
+fi
+
+
# announce what are we running
echo_err "
===================== DEPENDENCY CONFIGURATION COMPLETE =====================
= Meminfo
$(free -m -t)
+= Kernel info
+$(uname -a)
+
+= Network Configuration
+$(ip addr)
+
+= Network Sockets Status
+$(sudo netstat -an46p | grep -Pv '\s(CLOSING|(FIN|TIME|CLOSE)_WAIT.?|LAST_ACK)\s')
+
= Environment
$(env | grep -P 'TEST|HARNESS|MAKE|TRAVIS|PERL|DBIC' | LC_ALL=C sort | cat -v)
echo "============================================================="
echo "End of test run STDERR output ($STDERR_LOG_SIZE lines)"
echo
+ echo
)$POSTMORTEM"
fi
if [[ -n "$SHORT_CIRCUIT_SMOKE" ]] ; then return ; fi
if [[ "$CLEANTEST" != "true" ]] ; then
- run_or_err "Install Pod::POM separately via cpan" "cpan -f Pod::POM || /bin/true"
parallel_installdeps_notest $(perl -Ilib -MDBIx::Class -e 'print join " ", keys %{DBIx::Class::Optional::Dependencies->req_list_for("dist_dir")}')
run_or_err "Attempt to build a dist with all prereqs present" "make dist"
+ echo "Contents of the resulting dist tarball:"
+ echo "==========================================="
+ tar -vzxf DBIx-Class-*.tar.gz
+ echo "==========================================="
+ run_or_err 'Attempt to configure from re-extracted distdir' \
+ 'bash -c "cd \$(find DBIx-Class-* -maxdepth 0 -type d | head -n 1) && perl Makefile.PL"'
fi
set -e
TEST_STDERR_LOG=/tmp/dbictest.stderr
+TIMEOUT_CMD="/usr/bin/timeout --kill-after=9.5m --signal=TERM 9m"
echo_err() { echo "$@" 1>&2 ; }
run_or_err() {
echo_err -n "$(tstamp) $1 ... "
+ LASTCMD="$2"
LASTEXIT=0
START_TIME=$SECONDS
- LASTOUT=$( bash -c "$2" 2>&1 ) || LASTEXIT=$?
+
+ PRMETER_PIDFILE="$(tempfile)_$SECONDS"
+ # the double bash is to hide the job control messages
+ bash -c "bash -c 'echo \$\$ >> $PRMETER_PIDFILE; while true; do sleep 10; echo -n \"\${SECONDS}s ... \"; done' &"
+
+ LASTOUT=$( eval "$2" 2>&1 ) || LASTEXIT=$?
+
+ # stop progress meter
+ for p in $(cat "$PRMETER_PIDFILE"); do kill $p ; done
+
DELTA_TIME=$(( $SECONDS - $START_TIME ))
if [[ "$LASTEXIT" != "0" ]] ; then
- echo_err "FAILED !!! (after ${DELTA_TIME}s)"
- echo_err "Command executed:"
- echo_err "$2"
- echo_err "STDOUT+STDERR:"
- echo_err "$LASTOUT"
+ if [[ -z "$3" ]] ; then
+ echo_err "FAILED !!! (after ${DELTA_TIME}s)"
+ echo_err "Command executed:"
+ echo_err "$LASTCMD"
+ echo_err "STDOUT+STDERR:"
+ echo_err "$LASTOUT"
+ fi
return $LASTEXIT
else
fi
}
+apt_install() {
+ # flatten
+ pkgs="$@"
+
+ # Need to do this at every step, the sources list may very well have changed
+ run_or_err "Updating APT available package list" "sudo apt-get update"
+
+ run_or_err "Installing Debian APT packages: $pkgs" "sudo apt-get install --allow-unauthenticated --no-install-recommends -y $pkgs"
+}
+
extract_prereqs() {
# once --verbose is set, --no-verbose can't disable it
# do this by hand
|| LASTEXIT=$?
OUT=${COMBINED_OUT#*!!!STDERRSTDOUTSEPARATOR!!!}
- ERR=$(grep -v " is up to date." <<< "${COMBINED_OUT%!!!STDERRSTDOUTSEPARATOR!!!*}")
+ ERR=${COMBINED_OUT%!!!STDERRSTDOUTSEPARATOR!!!*}
if [[ "$LASTEXIT" != "0" ]] ; then
echo_err "Error occured (exit code $LASTEXIT) retrieving dependencies of $@:"
exit 1
fi
- # throw away ascii art, convert to modnames
- PQ=$(perl -p -e 's/^[^a-z]+//i; s/\-[^\-]+$/ /; s/\-/::/g' <<< "$OUT")
+ # throw away warnings, up-to-date diag, ascii art, convert to modnames
+ PQ=$(perl -p -e '
+ s/^.*?is up to date.*$//;
+ s/^\!.*//;
+ s/^[^a-z]+//i;
+ s/\-[^\-]+$/ /; # strip version part
+ s/\-/::/g
+ ' <<< "$OUT")
# throw away what was in $@
for m in "$@" ; do
# one module spec per line
MODLIST="$(printf '%s\n' "$@")"
- # The reason we do things so "non-interactively" is that xargs -P will have the
- # latest cpanm instance overwrite the buildlog. There seems to be no way to
- # specify a custom buildlog, hence we just collect the verbose output
- # and display it in case of "worker" failure
+ # We want to trap the output of each process and serially append them to
+ # each other as opposed to just dumping a jumbled up mass-log that would
+ # need careful unpicking by a human
+ #
+ # While cpanm does maintain individual buildlogs in more recent versions,
+ # we are not terribly interested in trying to figure out which log is which
+ # dist. The verbose-output + trap STDIO technique is vastly superior in this
+ # particular case
#
# Explanation of inline args:
#
"echo \\
\"$MODLIST\" \\
| xargs -d '\\n' -n 1 -P $NUMTHREADS bash -c \\
- 'OUT=\$(cpanm --notest --no-man-pages \"\$@\" 2>&1 ) || (LASTEXIT=\$?; echo \"\$OUT\"; exit \$LASTEXIT)' \\
+ 'OUT=\$($TIMEOUT_CMD cpanm --notest \"\$@\" 2>&1 ) || (LASTEXIT=\$?; echo \"\$OUT\"; exit \$LASTEXIT)' \\
'giant space monkey penises'
"
}
installdeps() {
if [[ -z "$@" ]] ; then return; fi
- echo_err "$(tstamp) Processing dependencies: $@"
+ MODLIST=$(printf "%q " "$@" | perl -pe 's/^\s+|\s+$//g')
local -x HARNESS_OPTIONS
HARNESS_OPTIONS="j$NUMTHREADS"
- echo_err -n "Attempting install of $# modules under parallel ($HARNESS_OPTIONS) testing ... "
+ if ! run_or_err "Attempting install of $# modules under parallel ($HARNESS_OPTIONS) testing ($MODLIST)" "_dep_inst_with_test $MODLIST" quiet_fail ; then
+ local errlog="failed after ${DELTA_TIME}s Exit:$LASTEXIT Log:$(/usr/bin/nopaste -q -s Shadowcat -d "Parallel testfail" <<< "$LASTOUT")"
+ echo "$errlog"
- LASTEXIT=0
- START_TIME=$SECONDS
- LASTOUT=$( cpan_inst "$@" ) || LASTEXIT=$?
- DELTA_TIME=$(( $SECONDS - $START_TIME ))
-
- if [[ "$LASTEXIT" = "0" ]] ; then
- echo_err "done (took ${DELTA_TIME}s)"
- else
- local errlog="after ${DELTA_TIME}s Exit:$LASTEXIT Log:$(/usr/bin/nopaste -q -s Shadowcat -d "Parallel installfail" <<< "$LASTOUT")"
- echo_err -n "failed ($errlog) retrying with sequential testing ... "
POSTMORTEM="$POSTMORTEM$(
echo
- echo "Depinstall under $HARNESS_OPTIONS parallel testing failed $errlog"
- echo "============================================================="
- echo "Attempted installation of: $@"
- echo "============================================================="
+ echo "Depinstall of $MODLIST under $HARNESS_OPTIONS parallel testing $errlog"
)"
HARNESS_OPTIONS=""
- LASTEXIT=0
- START_TIME=$SECONDS
- LASTOUT=$( cpan_inst "$@" ) || LASTEXIT=$?
- DELTA_TIME=$(( $SECONDS - $START_TIME ))
-
- if [[ "$LASTEXIT" = "0" ]] ; then
- echo_err "done (took ${DELTA_TIME}s)"
- else
- echo_err "FAILED !!! (after ${DELTA_TIME}s)"
- echo_err "STDOUT+STDERR:"
- echo_err "$LASTOUT"
- exit 1
- fi
+ run_or_err "Retrying same $# modules without parallel testing" "_dep_inst_with_test $MODLIST"
fi
INSTALLDEPS_OUT="${INSTALLDEPS_OUT}${LASTOUT}"
}
-cpan_inst() {
- /usr/bin/timeout --kill-after=9.5m --signal=TERM 9m cpan "$@" 2>&1
+_dep_inst_with_test() {
+ if [[ "$DEVREL_DEPS" == "true" ]] ; then
+ # --dev is already part of CPANM_OPT
+ LASTCMD="$TIMEOUT_CMD cpanm $@"
+ $LASTCMD 2>&1
+ else
+ LASTCMD="$TIMEOUT_CMD cpan $@"
+ $LASTCMD 2>&1
- # older perls do not have a CPAN which can exit with error on failed install
- for m in "$@"; do
- if ! perl -e '
+ # older perls do not have a CPAN which can exit with error on failed install
+ for m in "$@"; do
+ if ! perl -e '
-eval ( q{require } . (
+my $mod = (
$ARGV[0] =~ m{ \/ .*? ([^\/]+) $ }x
? do { my @p = split (/\-/, $1); pop @p; join "::", @p }
: $ARGV[0]
-) ) or ( print $@ and exit 1)' "$m" 2> /dev/null ; then
+);
- echo -e "$m installation seems to have failed"
- return 1
- fi
- done
+$mod = q{List::Util} if $mod eq q{Scalar::List::Utils};
+
+eval qq{require($mod)} or ( print $@ and exit 1)
+
+ ' "$m" 2> /dev/null ; then
+ echo -e "$m installation seems to have failed"
+ return 1
+ fi
+ done
+ fi
}
CPAN_is_sane() { perl -MCPAN\ 1.94_56 -e 1 &>/dev/null ; }
__PACKAGE__->load_namespaces;
};
ok(!$@, 'load_namespaces doesnt die') or diag $@;
-like($warnings, qr/load_namespaces found ResultSet class C with no corresponding Result class/, 'Found warning about extra ResultSet classes');
+like($warnings, qr/load_namespaces found ResultSet class 'DBICNSTest::ResultSet::C' with no corresponding Result class/, 'Found warning about extra ResultSet classes');
-like($warnings, qr/load_namespaces found ResultSet class DBICNSTest::ResultSet::D that does not subclass DBIx::Class::ResultSet/, 'Found warning about ResultSets with incorrect subclass');
+like($warnings, qr/load_namespaces found ResultSet class 'DBICNSTest::ResultSet::D' that does not subclass DBIx::Class::ResultSet/, 'Found warning about ResultSets with incorrect subclass');
my $source_a = DBICNSTest->source('A');
isa_ok($source_a, 'DBIx::Class::ResultSource::Table');
);
};
ok(!$@) or diag $@;
-like($warnings, qr/load_namespaces found ResultSet class C with no corresponding Result class/);
+like($warnings, qr/load_namespaces found ResultSet class 'DBICNSTest::RSet::C' with no corresponding Result class/);
my $source_a = DBICNSTest->source('A');
isa_ok($source_a, 'DBIx::Class::ResultSource::Table');
resultset_namespace => '+DBICNSTest::RSet',
);
},
- qr/load_namespaces found ResultSet class C with no corresponding Result class/,
+ qr/load_namespaces found ResultSet class 'DBICNSTest::RSet::C' with no corresponding Result class/,
);
});
__PACKAGE__->load_namespaces( default_resultset_class => 'RSBase' );
};
ok(!$@) or diag $@;
-like($warnings, qr/load_namespaces found ResultSet class C with no corresponding Result class/);
+like($warnings, qr/load_namespaces found ResultSet class 'DBICNSTest::ResultSet::C' with no corresponding Result class/);
my $source_a = DBICNSTest->source('A');
isa_ok($source_a, 'DBIx::Class::ResultSource::Table');
package DBICNSTest::RtBug41083;
use base 'DBIx::Class::Schema';
__PACKAGE__->load_namespaces(
- result_namespace => 'Schema_A',
+ result_namespace => 'Result_A',
resultset_namespace => 'ResultSet_A',
default_resultset_class => 'ResultSet'
);
package DBICNSTest::RtBug41083;
use base 'DBIx::Class::Schema';
__PACKAGE__->load_namespaces(
- result_namespace => 'Schema',
- resultset_namespace => 'ResultSet',
default_resultset_class => 'ResultSet'
);
};
print "1..0 # SKIP your perl does not support ithreads\n";
exit 0;
}
+
+ if ($INC{'Devel/Cover.pm'}) {
+ print "1..0 # SKIP Devel::Cover does not work with threads yet\n";
+ exit 0;
+ }
}
use threads;
print "1..0 # SKIP your perl does not support ithreads\n";
exit 0;
}
+
+ if ($INC{'Devel/Cover.pm'}) {
+ print "1..0 # SKIP Devel::Cover does not work with threads yet\n";
+ exit 0;
+ }
}
use threads;
print "1..0 # SKIP your perl does not support ithreads\n";
exit 0;
}
+
+ if ($INC{'Devel/Cover.pm'}) {
+ print "1..0 # SKIP Devel::Cover does not work with threads yet\n";
+ exit 0;
+ }
}
use threads;
use lib qw(t/lib);
use DBICTest::RunMode;
-use DBICTest::Util::LeakTracer qw/populate_weakregistry assert_empty_weakregistry/;
-use Scalar::Util 'refaddr';
+use DBICTest::Util::LeakTracer qw(populate_weakregistry assert_empty_weakregistry visit_refs);
+use Scalar::Util qw(weaken blessed reftype);
use DBIx::Class;
+use DBIx::Class::_Util qw(hrefaddr sigwarn_silencer);
BEGIN {
plan skip_all => "Your perl version $] appears to leak like a sieve - skipping test"
if DBIx::Class::_ENV_::PEEPEENESS;
# re-populate the registry while checking it, ewwww!)
return $obj if (ref $obj) =~ /^TB2::/;
- # weaken immediately to avoid weird side effects
+ # populate immediately to avoid weird side effects
return populate_weakregistry ($weak_registry, $obj );
};
my $getcol_rs = $cds_rs->get_column('me.cdid');
my $pref_getcol_rs = $cds_with_stuff->get_column('me.cdid');
- # fire the column getters
- my @throwaway = $pref_getcol_rs->all;
-
my $base_collection = {
resultset => $rs,
get_column_rs_pref => $pref_getcol_rs,
# twice so that we make sure only one H::M object spawned
- chained_resultset => $rs->search_rs ({}, { '+columns' => [ 'foo' ] } ),
- chained_resultset2 => $rs->search_rs ({}, { '+columns' => [ 'bar' ] } ),
+ chained_resultset => $rs->search_rs ({}, { '+columns' => { foo => 'artistid' } } ),
+ chained_resultset2 => $rs->search_rs ({}, { '+columns' => { bar => 'artistid' } } ),
row_object => $row_obj,
leaky_resultset_cond => $cond_rowobj,
};
- # this needs to fire, even if it can't find anything
- # see FIXME below
- # we run this only on smokers - trying to establish a pattern
- $rs_bind_circref->next
- if ( ($ENV{TRAVIS}||'') ne 'true' and DBICTest::RunMode->is_smoker);
+ # fire all resultsets multiple times, once here, more below
+ # some of these can't find anything (notably leaky_resultset)
+ my @rsets = grep {
+ blessed $_
+ and
+ (
+ $_->isa('DBIx::Class::ResultSet')
+ or
+ $_->isa('DBIx::Class::ResultSetColumn')
+ )
+ } values %$base_collection;
+
+
+ my $fire_resultsets = sub {
+ local $ENV{DBIC_COLUMNS_INCLUDE_FILTER_RELS} = 1;
+ local $SIG{__WARN__} = sigwarn_silencer(
+ qr/Unable to deflate 'filter'-type relationship 'artist'.+related object primary key not retrieved/
+ );
+
+ map
+ { $_, (blessed($_) ? { $_->get_columns } : ()) }
+ map
+ { $_->all }
+ @rsets
+ ;
+ };
+
+ push @{$base_collection->{random_results}}, $fire_resultsets->();
+
+ # FIXME - something throws a Storable for a spin if we keep
+ # the results in-collection. The same problem is seen above,
+ # swept under the rug back in 0a03206a, damned lazy ribantainer
+{
+ local $base_collection->{random_results};
require Storable;
%$base_collection = (
fresh_pager => $rs->page(5)->pager,
pager => $pager,
);
+}
+
+ # FIXME - ideally this kind of collector ought to be global, but attempts
+ # with an invasive debugger-based tracer did not quite work out... yet
+ # Manually scan the innards of everything we have in the base collection
+ # we assembled so far (skip the DT madness below) *recursively*
+ #
+ # Only do this when we do have the bits to look inside CVs properly,
+ # without it we are liable to pick up object defaults that are locked
+ # in method closures
+ if (DBICTest::Util::LeakTracer::CV_TRACING) {
+ visit_refs(
+ refs => [ $base_collection ],
+ action => sub {
+ populate_weakregistry ($weak_registry, $_[0]);
+ 1; # true means "keep descending"
+ },
+ );
+
+ # do a heavy-duty fire-and-compare loop on all resultsets
+ # this is expensive - not running on install
+ my $typecounts = {};
+ if (
+ ! DBICTest::RunMode->is_plain
+ and
+ ! $ENV{DBICTEST_IN_PERSISTENT_ENV}
+ and
+ # FIXME - investigate wtf is going on with 5.18
+ ! ( $] > 5.017 and $ENV{DBIC_TRACE_PROFILE} )
+ ) {
+
+ # FIXME - ideally we should be able to just populate an alternative
+ # registry, subtract everything from the main one, and arrive at
+ # an "empty" resulting hash
+ # However due to gross inefficiencies in the ::ResultSet code we
+ # end up recalculating a new set of aliasmaps which could have very
+ # well been cached if it wasn't for... anyhow
+ # What we do here for the time being is similar to the lazy approach
+ # of Devel::LeakTrace - we just make sure we do not end up with more
+ # reftypes than when we started. At least we are not blanket-counting
+ # SVs like D::LT does, but going by reftype... sigh...
+
+ for (values %$weak_registry) {
+ if ( my $r = reftype($_->{weakref}) ) {
+ $typecounts->{$r}--;
+ }
+ }
+
+ # For now we can only reuse the same registry, see FIXME above/below
+ #for my $interim_wr ({}, {}) {
+ for my $interim_wr ( ($weak_registry) x 4 ) {
+
+ visit_refs(
+ refs => [ $fire_resultsets->(), @rsets ],
+ action => sub {
+ populate_weakregistry ($interim_wr, $_[0]);
+ 1; # true means "keep descending"
+ },
+ );
+
+ # FIXME - this is what *should* be here
+ #
+ ## anything we have seen so far is cool
+ #delete @{$interim_wr}{keys %$weak_registry};
+ #
+ ## moment of truth - the rest ought to be gone
+ #assert_empty_weakregistry($interim_wr);
+ }
+
+ for (values %$weak_registry) {
+ if ( my $r = reftype($_->{weakref}) ) {
+ $typecounts->{$r}++;
+ }
+ }
+ }
+
+ for (keys %$typecounts) {
+ fail ("Amount of $_ refs changed by $typecounts->{$_} during resultset mass-execution")
+ if ( abs ($typecounts->{$_}) > 1 ); # there is a pad caught somewhere, the +1/-1 can be ignored
+ }
+ }
if ($has_dt) {
my $rs = $base_collection->{icdt_rs} = $schema->resultset('Event');
$base_collection->{"DBI handle $_"} = $_;
}
- SKIP: {
- if ( DBIx::Class::Optional::Dependencies->req_ok_for ('test_leaks') ) {
- my @w;
- local $SIG{__WARN__} = sub { $_[0] =~ /\QUnhandled type: REGEXP/ ? push @w, @_ : warn @_ };
-
- Test::Memory::Cycle::memory_cycle_ok ($base_collection, 'No cycles in the object collection');
-
- if ( $] > 5.011 ) {
- local $TODO = 'Silence warning due to RT56681';
- is (@w, 0, 'No Devel::Cycle emitted warnings');
- }
- }
- else {
- skip 'Circular ref test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for ('test_leaks'), 1;
- }
- }
-
populate_weakregistry ($weak_registry, $base_collection->{$_}, "basic $_")
for keys %$base_collection;
}
# Naturally we have some exceptions
my $cleared;
-for my $slot (keys %$weak_registry) {
- if ($slot =~ /^Test::Builder/) {
+for my $addr (keys %$weak_registry) {
+ my $names = join "\n", keys %{$weak_registry->{$addr}{slot_names}};
+
+ if ($names =~ /^Test::Builder/m) {
# T::B 2.0 has result objects and other fancyness
- delete $weak_registry->{$slot};
+ delete $weak_registry->{$addr};
}
- elsif ($slot =~ /^Method::Generate::(?:Accessor|Constructor)/) {
- # Moo keeps globals around, this is normal
- delete $weak_registry->{$slot};
+ elsif ($names =~ /^Hash::Merge/m) {
+ # only clear one object of a specific behavior - more would indicate trouble
+ delete $weak_registry->{$addr}
+ unless $cleared->{hash_merge_singleton}{$weak_registry->{$addr}{weakref}{behavior}}++;
}
- elsif ($slot =~ /^SQL::Translator::Generator::DDL::SQLite/) {
+ elsif (
+# # if we can look at closed over pieces - we will register it as a global
+# !DBICTest::Util::LeakTracer::CV_TRACING
+# and
+ $names =~ /^SQL::Translator::Generator::DDL::SQLite/m
+ ) {
# SQLT::Producer::SQLite keeps global generators around for quoted
# and non-quoted DDL, allow one for each quoting style
- delete $weak_registry->{$slot}
- unless $cleared->{sqlt_ddl_sqlite}->{@{$weak_registry->{$slot}{weakref}->quote_chars}}++;
- }
- elsif ($slot =~ /^Hash::Merge/) {
- # only clear one object of a specific behavior - more would indicate trouble
- delete $weak_registry->{$slot}
- unless $cleared->{hash_merge_singleton}{$weak_registry->{$slot}{weakref}{behavior}}++;
- }
- elsif ($slot =~ /^DateTime::TimeZone/) {
- # DT is going through a refactor it seems - let it leak zones for now
- delete $weak_registry->{$slot};
+ delete $weak_registry->{$addr}
+ unless $cleared->{sqlt_ddl_sqlite}->{@{$weak_registry->{$addr}{weakref}->quote_chars}}++;
}
}
# FIXME !!!
# There is an actual strong circular reference taking place here, but because
-# half of it is in XS no leaktracer sees it, and Devel::FindRef is equally
-# stumped when trying to trace the origin. The problem is:
+# half of it is in XS, so it is a bit harder to track down (it stumps D::FR)
+# (our tracker does not yet do it, but it'd be nice)
+# The problem is:
#
# $cond_object --> result_source --> schema --> storage --> $dbh --> {CachedKids}
# ^ /
# \-------- bound value on prepared/cached STH <-----------/
#
{
- local $TODO = 'This fails intermittently - see RT#82942';
- if ( my $r = $weak_registry->{'basic leaky_resultset_cond'}{weakref} ) {
+ my @circreffed;
+
+ for my $r (map
+ { $_->{weakref} }
+ grep
+ { $_->{slot_names}{'basic leaky_resultset_cond'} }
+ values %$weak_registry
+ ) {
+ local $TODO = 'Needs Data::Entangled or somesuch - see RT#82942';
ok(! defined $r, 'Self-referential RS conditions no longer leak!')
- or $r->result_source(undef);
+ or push @circreffed, $r;
+ }
+
+ if (@circreffed) {
+ is (scalar @circreffed, 1, 'One resultset expected to leak');
+
+ # this is useless on its own, it is to showcase the circref-diag
+ # and eventually test it when it is operational
+ local $TODO = 'Needs Data::Entangled or somesuch - see RT#82942';
+ while (@circreffed) {
+ weaken (my $r = shift @circreffed);
+
+ populate_weakregistry( (my $mini_registry = {}), $r );
+ assert_empty_weakregistry( $mini_registry );
+
+ $r->result_source(undef);
+ }
}
}
-#!/usr/bin/env perl -T
-
-# the above line forces Test::Harness into taint-mode
-# DO NOT REMOVE
-
use strict;
use warnings;
+use Config;
+
+# there is talk of possible perl compilations where -T is fatal or just
+# doesn't work. We don't want to have the user deal with that.
+BEGIN { unless ($INC{'t/lib/DBICTest/WithTaint.pm'}) {
+
+ # it is possible the test itself is initially invoked in taint mode
+ # and with relative paths *and* with a relative $^X and some other
+ # craziness... in short: just be proactive
+ require File::Spec;
+
+ if (length $ENV{PATH}) {
+ ( $ENV{PATH} ) = join ( $Config{path_sep},
+ map { length($_) ? File::Spec->rel2abs($_) : () }
+ split /\Q$Config{path_sep}/, $ENV{PATH}
+ ) =~ /\A(.+)\z/;
+ }
+
+ my ($perl) = $^X =~ /\A(.+)\z/;
+
+ {
+ local $ENV{PATH} = "/nosuchrootbindir";
+ system( $perl => -T => -e => '
+ use warnings;
+ use strict;
+ eval { my $x = $ENV{PATH} . (kill (0)); 1 } or exit 42;
+ exit 0;
+ ');
+ }
+
+ if ( ($? >> 8) != 42 ) {
+ print "1..0 # SKIP Your perl does not seem to like/support -T...\n";
+ exit 0;
+ }
+
+ exec( $perl, qw( -I. -Mt::lib::DBICTest::WithTaint -T ), __FILE__ );
+}}
# When in taint mode, PERL5LIB is ignored (but *not* unset)
# Put it back in INC so that local-lib users can actually
-# run this test
-use Config;
-BEGIN {
- for (map { defined $ENV{$_} ? $ENV{$_} : () } (qw/PERLLIB PERL5LIB/) ) { # we unshift, so reverse precedence
- my ($envvar) = ($_ =~ /^(.+)$/); # untaint
- unshift @INC, map { length($_) ? $_ : () } (split /\Q$Config{path_sep}\E/, $envvar);
- }
-}
+# run this test. Use lib.pm instead of an @INC unshift as
+# it will correctly add any arch subdirs encountered
+
+use lib (
+ grep { length }
+ map { split /\Q$Config{path_sep}\E/, (/^(.*)$/)[0] } # untainting regex
+ grep { defined }
+ @ENV{qw(PERL5LIB PERLLIB)} # precedence preserved by lib
+);
# We need to specify 'lib' here as well because even if it was already in
# @INC, the above will have put our local::lib in front of it, so now an
# In some cases, prove will have supplied ./lib as an absolute path so it
# doesn't seem worth trying to remove the second copy since it won't hurt
# anything.
-use lib qw(lib);
-use lib qw(t/lib);
+use lib qw(t/lib lib);
use Test::More;
use Test::Exception;
sub { $ENV{PATH} . (kill (0)) },
qr/Insecure dependency in kill/,
'taint mode active'
-);
+) if length $ENV{PATH};
{
package DBICTest::Taint::Classes;
}, 'Loading classes with Module::Find/load_namespaces worked in taint mode' );
}
+# check that we can create a database and all
+{
+ my $s = DBICTest->init_schema( sqlite_use_file => 1 );
+ my $art = $s->resultset('Artist')->search({}, {
+ prefetch => 'cds', order_by => 'artistid',
+ })->next;
+ is ($art->artistid, 1, 'got artist');
+}
+
done_testing;
use Test::More;
use Test::Exception;
+use Test::Warn;
use DBI::Const::GetInfoType;
use Scalar::Util qw/weaken/;
my $cd = $rs->next;
is ($cd->artist->name, $artist->name, 'Prefetched artist');
}, 'join does not throw (mysql 3 test)';
-
- # induce a jointype override, make sure it works even if we don't have mysql3
- my $needs_inner_join = $schema->storage->sql_maker->needs_inner_join;
- $schema->storage->sql_maker->needs_inner_join(1);
- is_same_sql_bind (
- $rs->as_query,
- '(
- SELECT `me`.`cdid`, `me`.`artist`, `me`.`title`, `me`.`year`, `me`.`genreid`, `me`.`single_track`,
- `artist`.`artistid`, `artist`.`name`, `artist`.`rank`, `artist`.`charfield`
- FROM cd `me`
- INNER JOIN `artist` `artist` ON `artist`.`artistid` = `me`.`artist`
- )',
- [],
- 'overridden default join type works',
- );
- $schema->storage->sql_maker->needs_inner_join($needs_inner_join);
}
## Can we properly deal with the null search problem?
select => [ \ 'YEAR(year)' ], as => ['y'], distinct => 1,
});
- is_deeply (
- [ sort ($rs->get_column ('y')->all) ],
+ my $y_rs = $rs->get_column ('y');
+
+ warnings_exist { is_deeply (
+ [ sort ($y_rs->all) ],
[ sort keys %$cds_per_year ],
'Years group successfully',
- );
+ ) } qr/
+ \QUse of distinct => 1 while selecting anything other than a column \E
+ \Qdeclared on the primary ResultSource is deprecated\E
+ /x, 'deprecation warning';
+
$rs->create ({ artist => 1, year => '0-1-1', title => 'Jesus Rap' });
is_deeply (
- [ sort $rs->get_column ('y')->all ],
+ [ sort $y_rs->all ],
[ 0, sort keys %$cds_per_year ],
'Zero-year groups successfully',
);
year => { '!=', undef }
]});
- is_deeply (
+ warnings_exist { is_deeply (
[ $restrict_rs->get_column('y')->all ],
- [ $rs->get_column ('y')->all ],
+ [ $y_rs->all ],
'Zero year was correctly excluded from resultset',
- );
+ ) } qr/
+ \QUse of distinct => 1 while selecting anything other than a column \E
+ \Qdeclared on the primary ResultSource is deprecated\E
+ /x, 'deprecation warning';
}
# make sure find hooks determine driver
# create identically named tables/sequences in the other schema
do_creates($dbh2, $q);
- # grand select privileges to the 2nd user
+ # grant select privileges to the 2nd user
$dbh->do("GRANT INSERT ON ${q}artist${q} TO " . uc $user2);
$dbh->do("GRANT SELECT ON ${q}artist${q} TO " . uc $user2);
$dbh->do("GRANT SELECT ON ${q}artist_pk_seq${q} TO " . uc $user2);
do_clean ($dbh2);
}}
+# test driver determination issues that led to the diagnosis/fix in 37b5ab51
+# observed side-effect when count-is-first on a fresh env-based connect
+ {
+ local $ENV{DBI_DSN};
+ ($ENV{DBI_DSN}, my @user_pass_args) = @{ $schema->storage->connect_info };
+ my $s2 = DBICTest::Schema->connect( undef, @user_pass_args );
+ ok (! $s2->storage->connected, 'Not connected' );
+ is (ref $s2->storage, 'DBIx::Class::Storage::DBI', 'Undetermined driver' );
+
+ ok (
+ $s2->resultset('Artist')->search({ 'me.name' => { like => '%' } }, { prefetch => 'cds' })->count,
+ 'Some artist count'
+ );
+ ok (
+ scalar $s2->resultset('CD')->search({}, { join => 'tracks' } )->all,
+ 'Some cds returned'
+ );
+ $s2->storage->disconnect;
+ }
+
do_clean ($dbh);
}
use DBICTest;
use DBICTest::Schema;
-use DBIC::SqlMakerTest;
-
-use DBIx::Class::SQLMaker::LimitDialects;
-my $ROWS = DBIx::Class::SQLMaker::LimitDialects->__rows_bindtype,
-my $TOTAL = DBIx::Class::SQLMaker::LimitDialects->__total_bindtype,
my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
connect_by => { parentid => { -prior => { -ident => 'artistid' } } },
});
- is_same_sql_bind (
- $rs->as_query,
- '(
- SELECT me.artistid, me.name, me.rank, me.charfield, me.parentid
- FROM artist me
- START WITH name = ?
- CONNECT BY parentid = PRIOR artistid
- )',
- [ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 }
- => 'root'] ],
- );
is_deeply (
[ $rs->get_column ('name')->all ],
[ qw/root child1 grandchild greatgrandchild child2/ ],
'got artist tree',
);
- is_same_sql_bind (
- $rs->count_rs->as_query,
- '(
- SELECT COUNT( * )
- FROM artist me
- START WITH name = ?
- CONNECT BY parentid = PRIOR artistid
- )',
- [ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 }
- => 'root'] ],
- );
-
is( $rs->count, 5, 'Connect By count ok' );
}
order_siblings_by => { -desc => 'name' },
});
- is_same_sql_bind (
- $rs->as_query,
- '(
- SELECT me.artistid, me.name, me.rank, me.charfield, me.parentid
- FROM artist me
- START WITH name = ?
- CONNECT BY parentid = PRIOR artistid
- ORDER SIBLINGS BY name DESC
- )',
- [ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 }
- => 'root'] ],
- );
-
is_deeply (
[ $rs->get_column ('name')->all ],
[ qw/root child2 child1 grandchild greatgrandchild/ ],
connect_by => { parentid => { -prior => { -ident => 'artistid' } } },
});
- is_same_sql_bind (
- $rs->as_query,
- '(
- SELECT me.artistid, me.name, me.rank, me.charfield, me.parentid
- FROM artist me
- WHERE ( parentid IS NULL )
- START WITH name = ?
- CONNECT BY parentid = PRIOR artistid
- )',
- [ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 }
- => 'root'] ],
- );
-
is_deeply(
[ $rs->get_column('name')->all ],
[ 'root' ],
}
);
- is_same_sql_bind (
- $rs->as_query,
- '(
- SELECT me.artistid, me.name, me.rank, me.charfield, me.parentid
- FROM artist me
- LEFT JOIN cd cds ON cds.artist = me.artistid
- WHERE ( cds.title LIKE ? )
- START WITH me.name = ?
- CONNECT BY parentid = PRIOR artistid
- )',
- [
- [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'cds.title', 'sqlt_size' => 100 }
- => '%cd'],
- [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'me.name', 'sqlt_size' => 100 }
- => 'root'],
- ],
- );
-
is_deeply(
[ $rs->get_column('name')->all ],
[ 'grandchild' ],
'Connect By with a join result name ok'
);
- is_same_sql_bind (
- $rs->count_rs->as_query,
- '(
- SELECT COUNT( * )
- FROM artist me
- LEFT JOIN cd cds ON cds.artist = me.artistid
- WHERE ( cds.title LIKE ? )
- START WITH me.name = ?
- CONNECT BY parentid = PRIOR artistid
- )',
- [
- [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'cds.title', 'sqlt_size' => 100 }
- => '%cd'],
- [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'me.name', 'sqlt_size' => 100 }
- => 'root'],
- ],
- );
-
is( $rs->count, 1, 'Connect By with a join; count ok' );
}
order_by => { -asc => [ 'LEVEL', 'name' ] },
});
- is_same_sql_bind (
- $rs->as_query,
- '(
- SELECT me.artistid, me.name, me.rank, me.charfield, me.parentid
- FROM artist me
- START WITH name = ?
- CONNECT BY parentid = PRIOR artistid
- ORDER BY LEVEL ASC, name ASC
- )',
- [
- [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 }
- => 'root'],
- ],
- );
-
-
# Don't use "$rs->get_column ('name')->all" they build a query arround the $rs.
# If $rs has a order by, the order by is in the subquery and this doesn't work with Oracle 8i.
# TODO: write extra test and fix order by handling on Oracle 8i
rows => 2,
});
- is_same_sql_bind (
- $rs->as_query,
- '(
- SELECT me.artistid, me.name, me.rank, me.charfield, me.parentid
- FROM (
- SELECT me.artistid, me.name, me.rank, me.charfield, me.parentid
- FROM artist me
- START WITH name = ?
- CONNECT BY parentid = PRIOR artistid
- ORDER BY name ASC, artistid DESC
- ) me
- WHERE ROWNUM <= ?
- )',
- [
- [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 }
- => 'root'], [ $ROWS => 2 ],
- ],
- );
-
is_deeply (
[ $rs->get_column ('name')->all ],
[qw/child1 child2/],
'LIMIT a Connect By query - correct names'
);
- is_same_sql_bind (
- $rs->count_rs->as_query,
- '(
- SELECT COUNT( * )
- FROM (
- SELECT me.artistid
- FROM (
- SELECT me.artistid
- FROM artist me
- START WITH name = ?
- CONNECT BY parentid = PRIOR artistid
- ) me
- WHERE ROWNUM <= ?
- ) me
- )',
- [
- [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 }
- => 'root'],
- [ $ROWS => 2 ],
- ],
- );
-
is( $rs->count, 2, 'Connect By; LIMIT count ok' );
}
having => \[ 'count(rank) < ?', [ cnt => 2 ] ],
});
- is_same_sql_bind (
- $rs->as_query,
- '(
- SELECT COUNT(rank) + ?
- FROM artist me
- START WITH name = ?
- CONNECT BY parentid = PRIOR artistid
- GROUP BY( rank + ? ) HAVING count(rank) < ?
- )',
- [
- [ { dbic_colname => '__cbind' }
- => 3 ],
- [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 }
- => 'root'],
- [ { dbic_colname => '__gbind' }
- => 1 ],
- [ { dbic_colname => 'cnt' }
- => 2 ],
- ],
- );
-
is_deeply (
[ $rs->get_column ('cnt')->all ],
[4, 4],
connect_by_nocycle => { parentid => { -prior => { -ident => 'artistid' } } },
});
- is_same_sql_bind (
- $rs->as_query,
- '(
- SELECT me.artistid, me.name, me.rank, me.charfield, me.parentid, CONNECT_BY_ISCYCLE
- FROM artist me
- START WITH name = ?
- CONNECT BY NOCYCLE parentid = PRIOR artistid
- )',
- [
- [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 }
- => 'cycle-root'],
- ],
- );
is_deeply (
[ $rs->get_column ('name')->all ],
[ qw/cycle-root cycle-child1 cycle-grandchild cycle-child2/ ],
'got artist tree with nocycle (CONNECT_BY_ISCYCLE)',
);
- is_same_sql_bind (
- $rs->count_rs->as_query,
- '(
- SELECT COUNT( * )
- FROM artist me
- START WITH name = ?
- CONNECT BY NOCYCLE parentid = PRIOR artistid
- )',
- [
- [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 }
- => 'cycle-root'],
- ],
- );
-
is( $rs->count, 4, 'Connect By Nocycle count ok' );
}
}
use lib qw(t/lib);
use DBICTest;
-use DBIC::SqlMakerTest;
-use DBIx::Class::SQLMaker::LimitDialects;
-
-my $OFFSET = DBIx::Class::SQLMaker::LimitDialects->__offset_bindtype;
-my $TOTAL = DBIx::Class::SQLMaker::LimitDialects->__total_bindtype;
my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_MSSQL_ODBC_${_}" } qw/DSN USER PASS/};
},
);
- is_same_sql_bind (
- $owners->page(3)->as_query,
- $dialect eq 'Top'
- ? '(
- SELECT TOP 2147483647 [me].[id], [me].[name],
- [books].[id], [books].[source], [books].[owner], [books].[title], [books].[price]
- FROM (
- SELECT TOP 2147483647 [me].[id], [me].[name]
- FROM (
- SELECT TOP 3 [me].[id], [me].[name], [ORDER__BY__001]
- FROM (
- SELECT TOP 9 [me].[id], [me].[name], name + ? AS [ORDER__BY__001]
- FROM [owners] [me]
- LEFT JOIN [books] [books]
- ON [books].[owner] = [me].[id]
- WHERE [books].[id] IS NOT NULL AND [me].[name] != ?
- GROUP BY [me].[id], [me].[name]
- ORDER BY name + ? ASC, [me].[id]
- ) [me]
- ORDER BY [ORDER__BY__001] DESC, [me].[id] DESC
- ) [me]
- ORDER BY [ORDER__BY__001] ASC, [me].[id]
- ) [me]
- LEFT JOIN [books] [books]
- ON [books].[owner] = [me].[id]
- WHERE [books].[id] IS NOT NULL AND [me].[name] != ?
- ORDER BY name + ? ASC, [me].[id]
- )'
- : '(
- SELECT TOP 2147483647 [me].[id], [me].[name],
- [books].[id], [books].[source], [books].[owner], [books].[title], [books].[price]
- FROM (
- SELECT TOP 2147483647 [me].[id], [me].[name]
- FROM (
- SELECT [me].[id], [me].[name],
- ROW_NUMBER() OVER( ORDER BY [ORDER__BY__001] ASC, [me].[id] ) AS [rno__row__index]
- FROM (
- SELECT [me].[id], [me].[name], name + ? AS [ORDER__BY__001]
- FROM [owners] [me]
- LEFT JOIN [books] [books]
- ON [books].[owner] = [me].[id]
- WHERE [books].[id] IS NOT NULL AND [me].[name] != ?
- GROUP BY [me].[id], [me].[name]
- ) [me]
- ) [me]
- WHERE [rno__row__index] >= ? AND [rno__row__index] <= ?
- ) [me]
- LEFT JOIN [books] [books]
- ON [books].[owner] = [me].[id]
- WHERE [books].[id] IS NOT NULL AND [me].[name] != ?
- ORDER BY name + ? ASC, [me].[id]
- )'
- ,
- [
- [ { dbic_colname => 'test' }
- => 'xxx' ],
- [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'me.name' }
- => 'somebogusstring' ],
-
- ($dialect eq 'Top'
- ? [ { dbic_colname => 'test' } => 'xxx' ] # the extra re-order bind
- : ([ $OFFSET => 7 ], [ $TOTAL => 9 ]) # parameterised RNO
- ),
-
- [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'me.name' }
- => 'somebogusstring' ],
- [ { dbic_colname => 'test' }
- => 'xxx' ],
- ],
- ) if $quoted;
-
is ($owners->page(1)->all, 3, "$test_type: has_many prefetch returns correct number of rows");
is ($owners->page(1)->count, 3, "$test_type: has-many prefetch returns correct count");
},
);
- is_same_sql_bind (
- $books->page(3)->as_query,
- $dialect eq 'Top'
- ? '(
- SELECT TOP 2147483647 [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price],
- [owner].[id], [owner].[name]
- FROM (
- SELECT TOP 2147483647 [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price]
- FROM (
- SELECT TOP 2 [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price]
- FROM (
- SELECT TOP 6 [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price]
- FROM [books] [me]
- JOIN [owners] [owner]
- ON [owner].[id] = [me].[owner]
- WHERE ( [owner].[name] = ? OR [owner].[name] = ? ) AND [source] = ?
- GROUP BY [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price]
- HAVING 1 = ?
- ORDER BY [me].[owner] DESC, [me].[id]
- ) [me]
- ORDER BY [me].[owner] ASC, [me].[id] DESC
- ) [me]
- ORDER BY [me].[owner] DESC, [me].[id]
- ) [me]
- JOIN [owners] [owner]
- ON [owner].[id] = [me].[owner]
- WHERE ( [owner].[name] = ? OR [owner].[name] = ? ) AND [source] = ?
- ORDER BY [me].[owner] DESC, [me].[id]
- )'
- : '(
- SELECT TOP 2147483647 [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price],
- [owner].[id], [owner].[name]
- FROM (
- SELECT TOP 2147483647 [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price]
- FROM (
- SELECT [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price],
- ROW_NUMBER() OVER( ORDER BY [me].[owner] DESC, [me].[id] ) AS [rno__row__index]
- FROM (
- SELECT [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price]
- FROM [books] [me]
- JOIN [owners] [owner]
- ON [owner].[id] = [me].[owner]
- WHERE ( [owner].[name] = ? OR [owner].[name] = ? ) AND [source] = ?
- GROUP BY [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price]
- HAVING 1 = ?
- ) [me]
- ) [me]
- WHERE [rno__row__index] >= ? AND [rno__row__index] <= ?
- ) [me]
- JOIN [owners] [owner]
- ON [owner].[id] = [me].[owner]
- WHERE ( [owner].[name] = ? OR [owner].[name] = ? ) AND [source] = ?
- ORDER BY [me].[owner] DESC, [me].[id]
- )'
- ,
- [
- # inner
- [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'owner.name' }
- => 'wiggle' ],
- [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'owner.name' }
- => 'woggle' ],
- [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' }
- => 'Library' ],
- [ { dbic_colname => 'test' }
- => '1' ],
-
- # top(?)
- $dialect eq 'Top'
- ? ()
- : ( [ $OFFSET => 5 ], [ $TOTAL => 6 ] )
- ,
-
- # outer
- [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'owner.name' }
- => 'wiggle' ],
- [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'owner.name' }
- => 'woggle' ],
- [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' }
- => 'Library' ],
- ],
- ) if $quoted;
-
is ($books->page(1)->all, 2, "$test_type: Prefetched grouped search returns correct number of rows");
is ($books->page(1)->count, 2, "$test_type: Prefetched grouped search returns correct count");
use strict;
use warnings;
-# use this if you keep a copy of DBD::Sybase linked to FreeTDS somewhere else
-BEGIN {
- if (my $lib_dirs = $ENV{DBICTEST_MSSQL_PERL5LIB}) {
- unshift @INC, $_ for split /:/, $lib_dirs;
- }
-}
-
use Test::More;
use Test::Exception;
use Scalar::Util 'weaken';
s/^'//, s/'\z// for @bind;
+ # test is duplicated in t/sqlmaker/msaccess.t, keep a duplicate here anyway, just to be safe
+ # -- ribasushi
is_same_sql_bind(
$sql,
\@bind,
s/^'//, s/'\z// for @bind;
+ # test is duplicated in t/sqlmaker/msaccess.t, keep a duplicate here anyway, just to be safe
+ # -- ribasushi
is_same_sql_bind(
$sql,
\@bind,
my $key = 'tmp_dbic_84serialize_memcached_test';
$stores{memcached} = sub {
- $memcached->set( $key, $_[0], 60 );
+ $memcached->set( $key, $_[0], 60 )
+ or die "Unable to insert into $ENV{DBICTEST_MEMCACHED} - is server running?";
local $DBIx::Class::ResultSourceHandle::thaw_schema = $schema;
return $memcached->get($key);
};
# would silently drop the group_by entirely, likely ending up with nonsensival results
# With the current behavior the user will at least get a nice fat exception from the
# RDBMS (or maybe the RDBMS will even decide to handle the situation sensibly...)
- is_same_sql_bind(
+ warnings_exist { is_same_sql_bind(
$rstypes->{'implicitly grouped'}->get_column('cnt')->as_query,
'(
SELECT COUNT( me.cdid )
=> 'evancarrol'
] ],
'Expected (though nonsensical) SQL generated on rscol-with-distinct-over-function',
- );
+ ) } qr/
+ \QUse of distinct => 1 while selecting anything other than a column \E
+ \Qdeclared on the primary ResultSource is deprecated\E
+ /x, 'deprecation warning';
{
local $TODO = 'multiplying join leaks through to the count aggregate... this may never actually work';
$dsn3 && DBIx::Class::Optional::Dependencies->req_ok_for('test_rdbms_mssql_ado'))
or (not $dsn || $dsn2 || $dsn3);
-# use this if you keep a copy of DBD::Sybase linked to FreeTDS somewhere else
-BEGIN {
- if (my $lib_dirs = $ENV{DBICTEST_MSSQL_PERL5LIB}) {
- unshift @INC, $_ for split /:/, $lib_dirs;
- }
-}
-
if (not ($dsn || $dsn2 || $dsn3)) {
plan skip_all =>
'Set $ENV{DBICTEST_MSSQL_ODBC_DSN} and/or $ENV{DBICTEST_MSSQL_DSN} and/or '
use warnings;
use Test::More;
+
+use DBIx::Class::_Util 'modver_gt_or_eq';
+use base();
+BEGIN {
+ plan skip_all => 'base.pm 2.20 (only present in perl 5.19.7) is known to break this test'
+ if modver_gt_or_eq(base => '2.19_01') and ! modver_gt_or_eq(base => '2.21');
+}
+
use Test::Exception;
use lib qw(t/lib);
use DBICTest;
-package DBICNSTest::RtBug41083::Schema::Foo;
+package DBICNSTest::RtBug41083::Result::Foo;
use strict;
use warnings;
use base 'DBIx::Class::Core';
--- /dev/null
+package DBICNSTest::RtBug41083::Result::Foo::Sub;
+use strict;
+use warnings;
+use base 'DBICNSTest::RtBug41083::Result::Foo';
+1;
-package DBICNSTest::RtBug41083::Schema_A::A;
+package DBICNSTest::RtBug41083::Result_A::A;
use strict;
use warnings;
use base 'DBIx::Class::Core';
--- /dev/null
+package DBICNSTest::RtBug41083::Result_A::A::Sub;
+use strict;
+use warnings;
+use base 'DBICNSTest::RtBug41083::Result_A::A';
+1;
+++ /dev/null
-package DBICNSTest::RtBug41083::Schema::Foo::Sub;
-use strict;
-use warnings;
-use base 'DBICNSTest::RtBug41083::Schema::Foo';
-1;
+++ /dev/null
-package DBICNSTest::RtBug41083::Schema_A::A::Sub;
-use strict;
-use warnings;
-use base 'DBICNSTest::RtBug41083::Schema_A::A';
-1;
use Path::Class::File ();
use File::Spec;
use Fcntl qw/:DEFAULT :flock/;
+use Config;
=head1 NAME
sub import {
my $self = shift;
- my $tmpdir = DBICTest::RunMode->tmpdir;
- my $lockpath = $tmpdir->file('.dbictest_global.lock');
+ my $lockpath = DBICTest::RunMode->tmpdir->file('_dbictest_global.lock');
{
my $u = local_umask(0); # so that the file opens as 666, and any user can lock
- sysopen ($global_lock_fh, $lockpath, O_RDWR|O_CREAT) or do {
- my $err = $!;
-
- my @x_tests = map { (defined $_) ? ( $_ ? 1 : 0 ) : 'U' } map {(-e, -d, -f, -r, -w, -x, -o)} ($tmpdir, $lockpath);
-
- die sprintf <<"EOE", $lockpath, $err, scalar $>, scalar $), (stat($tmpdir))[4,5,2], @x_tests;
-Unable to open %s: %s
-Process EUID/EGID: %s / %s
-TmpDir UID/GID: %s / %s
-TmpDir StatMode: %o
-TmpDir X-tests: -e:%s -d:%s -f:%s -r:%s -w:%s -x:%s -o:%s
-TmpFile X-tests: -e:%s -d:%s -f:%s -r:%s -w:%s -x:%s -o:%s
-EOE
- };
+ sysopen ($global_lock_fh, $lockpath, O_RDWR|O_CREAT)
+ or die "Unable to open $lockpath: $!";
}
for (@_) {
# this is executed on every connect, and thus installs a disconnect/DESTROY
# guard for every new $dbh
on_connect_do => sub {
+
my $storage = shift;
my $dbh = $storage->_get_dbh;
# no fsync on commit
$dbh->do ('PRAGMA synchronous = OFF');
- if ($ENV{DBICTEST_SQLITE_REVERSE_DEFAULT_ORDER}) {
-
- $storage->throw_exception(
- 'PRAGMA reverse_unordered_selects does not work correctly before libsqlite 3.7.9'
- ) if $storage->_server_info->{normalized_dbms_version} < 3.007009;
-
+ if (
+ $ENV{DBICTEST_SQLITE_REVERSE_DEFAULT_ORDER}
+ and
+ # the pragma does not work correctly before libsqlite 3.7.9
+ $storage->_server_info->{normalized_dbms_version} >= 3.007009
+ ) {
$dbh->do ('PRAGMA reverse_unordered_selects = ON');
}
my $cur_inode = (stat($db_file))[1];
if ($orig_inode != $cur_inode) {
- # pack/unpack to match the unsigned longs returned by `stat`
- $fail_reason = sprintf 'was recreated (initially inode %s, now %s)', (
- map { unpack ('L', pack ('l', $_) ) } ($orig_inode, $cur_inode )
- );
+ my @inodes = ($orig_inode, $cur_inode);
+ # unless this is a fixed perl (P5RT#84590) pack/unpack before display
+ # to match the unsigned longs returned by `stat`
+ @inodes = map { unpack ('L', pack ('l', $_) ) } @inodes
+ unless $Config{st_ino_size};
+
+ $fail_reason = sprintf
+ 'was recreated (initially inode %s, now %s)',
+ @inodes
+ ;
}
}
}
use Path::Class qw/file dir/;
-use File::Spec;
+use Fcntl ':DEFAULT';
+use File::Spec ();
+use File::Temp ();
+use DBICTest::Util 'local_umask';
_check_author_makefile() unless $ENV{DBICTEST_NO_MAKEFILE_VERIFICATION};
sub tmpdir {
dir ($tmpdir ||= do {
+ # works but not always
my $dir = dir(File::Spec->tmpdir);
+ my $reason_dir_unusable;
my @parts = File::Spec->splitdir($dir);
- if (@parts == 2 and $parts[1] eq '') {
- # This means we were give the root dir (C:\ or something equally unacceptable)
+ if (@parts == 2 and $parts[1] =~ /^ [ \\ \/ ]? $/x ) {
+ $reason_dir_unusable =
+ 'File::Spec->tmpdir returned a root directory instead of a designated '
+ . 'tempdir (possibly https://rt.cpan.org/Ticket/Display.html?id=76663)';
+ }
+ else {
+ # make sure we can actually create and sysopen a file in this dir
+ local $@;
+ my $u = local_umask(0); # match the umask we use in DBICTest(::Schema)
+ my $tempfile = '<NONCREATABLE>';
+ eval {
+ $tempfile = File::Temp->new(
+ TEMPLATE => '_dbictest_writability_test_XXXXXX',
+ DIR => "$dir",
+ UNLINK => 1,
+ );
+ close $tempfile or die "closing $tempfile failed: $!\n";
+
+ sysopen (my $tempfh2, "$tempfile", O_RDWR) or die "reopening $tempfile failed: $!\n";
+ print $tempfh2 'deadbeef' x 1024 or die "printing to $tempfile failed: $!\n";
+ close $tempfh2 or die "closing $tempfile failed: $!\n";
+ 1;
+ } or do {
+ chomp( my $err = $@ );
+ my @x_tests = map { (defined $_) ? ( $_ ? 1 : 0 ) : 'U' } map {(-e, -d, -f, -r, -w, -x, -o)} ("$dir", "$tempfile");
+ $reason_dir_unusable = sprintf <<"EOE", "$tempfile"||'', $err, scalar $>, scalar $), umask(), (stat($dir))[4,5,2], @x_tests;
+File::Spec->tmpdir returned a directory which appears to be non-writeable:
+Error encountered while testing '%s': %s
+Process EUID/EGID: %s / %s
+Effective umask: %o
+TmpDir UID/GID: %s / %s
+TmpDir StatMode: %o
+TmpDir X-tests: -e:%s -d:%s -f:%s -r:%s -w:%s -x:%s -o:%s
+TmpFile X-tests: -e:%s -d:%s -f:%s -r:%s -w:%s -x:%s -o:%s
+EOE
+ };
+ }
+
+ if ($reason_dir_unusable) {
# Replace with our local project tmpdir. This will make multiple runs
# from different runs conflict with each other, but is much better than
- # polluting the root dir with random crap
- $dir = _find_co_root()->subdir('t')->subdir('var');
- $dir->mkpath;
+ # polluting the root dir with random crap or failing outright
+ my $local_dir = _find_co_root()->subdir('t')->subdir('var');
+ $local_dir->mkpath;
+
+ warn "\n\nUsing '$local_dir' as test scratch-dir instead of '$dir': $reason_dir_unusable\n";
+ $dir = $local_dir;
}
$dir->stringify;
so often results in a lot of wasted time for other contributors
trying to assist you with spurious "its broken!" problems.
-By default DBICs Makefile.PL turns all optional dependenciess into
+By default DBICs Makefile.PL turns all optional dependencies into
*HARD REQUIREMENTS*, in order to make sure that the entire test
suite is executed, and no tests are skipped due to missing modules.
If you for some reason need to disable this behavior - supply the
;
};
- # DBD::Firebird and DBD::InterBase could very well talk to the same RDBMS
- # make an educated guesstimate based on the DSN
- # (worst case scenario we are wrong and the scripts have to wait on each
- # other even without actually being able to interfere among themselves)
- if (
- ($locktype||'') eq 'InterBase'
- and
- $_[0] =~ /firebird/i
- ) {
- $locktype = 'Firebird';
- }
-
# Never hold more than one lock. This solves the "lock in order" issues
# unrelated tests may have
# Also if there is no connection - there is no lock to be had
# which is fine since the type does not match as checked above
undef $locker;
- my $lockpath = DBICTest::RunMode->tmpdir->file(".dbictest_$locktype.lock");
+ my $lockpath = DBICTest::RunMode->tmpdir->file("_dbictest_$locktype.lock");
#warn "$$ $0 $locktype GRABBING LOCK";
my $lock_fh;
);
__PACKAGE__->many_to_many('artworks', 'artwork_to_artist', 'artwork');
+__PACKAGE__->has_many(
+ cds_without_genre => 'DBICTest::Schema::CD',
+ sub {
+ my $args = shift;
+ return (
+ {
+ "$args->{foreign_alias}.artist" => { -ident => "$args->{self_alias}.artistid" },
+ "$args->{foreign_alias}.genreid" => undef,
+ }, $args->{self_rowobj} && {
+ "$args->{foreign_alias}.artist" => $args->{self_rowobj}->artistid,
+ "$args->{foreign_alias}.genreid" => undef,
+ }
+ ),
+ },
+);
sub sqlt_deploy_hook {
my ($self, $sqlt_table) = @_;
use strict;
use Carp;
-use Scalar::Util qw/isweak weaken blessed reftype refaddr/;
-use B 'svref_2object';
+use Scalar::Util qw(isweak weaken blessed reftype);
+use DBIx::Class::_Util qw(refcount hrefaddr);
+use DBIx::Class::Optional::Dependencies;
+use Data::Dumper::Concise;
use DBICTest::Util 'stacktrace';
+use constant {
+ CV_TRACING => DBIx::Class::Optional::Dependencies->req_ok_for ('test_leaks_heavy'),
+ SKIP_SCALAR_REFS => ( $] > 5.017 ) ? 1 : 0,
+};
use base 'Exporter';
-our @EXPORT_OK = qw/populate_weakregistry assert_empty_weakregistry/;
+our @EXPORT_OK = qw(populate_weakregistry assert_empty_weakregistry visit_refs);
my $refs_traced = 0;
-my $leaks_found;
+my $leaks_found = 0;
my %reg_of_regs;
+# so we don't trigger stringification
+sub _describe_ref {
+ sprintf '%s%s(%s)',
+ (defined blessed $_[0]) ? blessed($_[0]) . '=' : '',
+ reftype $_[0],
+ hrefaddr $_[0],
+ ;
+}
+
sub populate_weakregistry {
- my ($weak_registry, $target, $slot) = @_;
+ my ($weak_registry, $target, $note) = @_;
croak 'Expecting a registry hashref' unless ref $weak_registry eq 'HASH';
croak 'Target is not a reference' unless length ref $target;
- my $refaddr = refaddr $target;
+ my $refaddr = hrefaddr $target;
- $slot ||= (sprintf '%s%s(0x%x)', # so we don't trigger stringification
- (defined blessed $target) ? blessed($target) . '=' : '',
- reftype $target,
- $refaddr,
- );
+ # a registry could be fed to itself or another registry via recursive sweeps
+ return $target if $reg_of_regs{$refaddr};
- if (defined $weak_registry->{$slot}{weakref}) {
- if ( $weak_registry->{$slot}{refaddr} != $refaddr ) {
- print STDERR "Bail out! Weak Registry slot collision $slot: $weak_registry->{$slot}{weakref} / $target\n";
- exit 255;
- }
+ weaken( $reg_of_regs{ hrefaddr($weak_registry) } = $weak_registry )
+ unless( $reg_of_regs{ hrefaddr($weak_registry) } );
+
+ # an explicit "garbage collection" pass every time we store a ref
+ # if we do not do this the registry will keep growing appearing
+ # as if the traced program is continuously slowly leaking memory
+ for my $reg (values %reg_of_regs) {
+ (defined $reg->{$_}{weakref}) or delete $reg->{$_}
+ for keys %$reg;
}
- else {
- $weak_registry->{$slot} = {
+
+ # FIXME/INVESTIGATE - something fishy is going on with refs to plain
+ # strings, perhaps something to do with the CoW work etc...
+ return $target if SKIP_SCALAR_REFS and reftype($target) eq 'SCALAR';
+
+ if (! defined $weak_registry->{$refaddr}{weakref}) {
+ $weak_registry->{$refaddr} = {
stacktrace => stacktrace(1),
- refaddr => $refaddr,
- renumber => $_[2] ? 0 : 1,
+ weakref => $target,
};
- weaken( $weak_registry->{$slot}{weakref} = $target );
+ weaken( $weak_registry->{$refaddr}{weakref} );
$refs_traced++;
}
- weaken( $reg_of_regs{ refaddr($weak_registry) } = $weak_registry )
- unless( $reg_of_regs{ refaddr($weak_registry) } );
+ my $desc = _describe_ref($target);
+ $weak_registry->{$refaddr}{slot_names}{$desc} = 1;
+ if ($note) {
+ $note =~ s/\s*\Q$desc\E\s*//g;
+ $weak_registry->{$refaddr}{slot_names}{$note} = 1;
+ }
$target;
}
-# Renumber everything we auto-named on a thread spawn
+# Regenerate the slots names on a thread spawn
sub CLONE {
my @individual_regs = grep { scalar keys %{$_||{}} } values %reg_of_regs;
%reg_of_regs = ();
for my $reg (@individual_regs) {
- my @live_slots = grep { defined $reg->{$_}{weakref} } keys %$reg
+ my @live_slots = grep { defined $_->{weakref} } values %$reg
or next;
- my @live_instances = @{$reg}{@live_slots};
-
$reg = {}; # get a fresh hashref in the new thread ctx
- weaken( $reg_of_regs{refaddr($reg)} = $reg );
+ weaken( $reg_of_regs{hrefaddr($reg)} = $reg );
- while (@live_slots) {
- my $slot = shift @live_slots;
- my $inst = shift @live_instances;
+ for my $slot_info (@live_slots) {
+ my $new_addr = hrefaddr $slot_info->{weakref};
- my $refaddr = $inst->{refaddr} = refaddr($inst);
+ # replace all slot names
+ $slot_info->{slot_names} = { map {
+ my $name = $_;
+ $name =~ s/\(0x[0-9A-F]+\)/sprintf ('(%s)', $new_addr)/ieg;
+ ($name => 1);
+ } keys %{$slot_info->{slot_names}} };
- $slot =~ s/0x[0-9A-F]+/'0x' . sprintf ('0x%x', $refaddr)/ieg
- if $inst->{renumber};
-
- $reg->{$slot} = $inst;
+ $reg->{$new_addr} = $slot_info;
}
}
}
+sub visit_refs {
+ my $args = { (ref $_[0]) ? %{$_[0]} : @_ };
+
+ $args->{seen_refs} ||= {};
+
+ my $visited_cnt = '0E0';
+ for my $i (0 .. $#{$args->{refs}} ) {
+
+ next unless length ref $args->{refs}[$i]; # not-a-ref
+
+ my $addr = hrefaddr $args->{refs}[$i];
+
+ # no diving into weakregistries
+ next if $reg_of_regs{$addr};
+
+ next if $args->{seen_refs}{$addr}++;
+ $visited_cnt++;
+
+ my $r = $args->{refs}[$i];
+
+ $args->{action}->($r) or next;
+
+ # This may end up being necessarry some day, but do not slow things
+ # down for now
+ #if ( defined( my $t = tied($r) ) ) {
+ # $visited_cnt += visit_refs({ %$args, refs => [ $t ] });
+ #}
+
+ my $type = reftype $r;
+
+ local $@;
+ eval {
+ if ($type eq 'HASH') {
+ $visited_cnt += visit_refs({ %$args, refs => [ map {
+ ( !isweak($r->{$_}) ) ? $r->{$_} : ()
+ } keys %$r ] });
+ }
+ elsif ($type eq 'ARRAY') {
+ $visited_cnt += visit_refs({ %$args, refs => [ map {
+ ( !isweak($r->[$_]) ) ? $r->[$_] : ()
+ } 0..$#$r ] });
+ }
+ elsif ($type eq 'REF' and !isweak($$r)) {
+ $visited_cnt += visit_refs({ %$args, refs => [ $$r ] });
+ }
+ elsif (CV_TRACING and $type eq 'CODE') {
+ $visited_cnt += visit_refs({ %$args, refs => [ map {
+ ( !isweak($_) ) ? $_ : ()
+ } scalar PadWalker::closed_over($r) ] }); # scalar due to RT#92269
+ }
+ 1;
+ } or warn "Could not descend into @{[ _describe_ref($r) ]}: $@\n";
+ }
+ $visited_cnt;
+}
+
+sub visit_namespaces {
+ my $args = { (ref $_[0]) ? %{$_[0]} : @_ };
+
+ my $visited = 1;
+
+ $args->{package} ||= '::';
+ $args->{package} = '::' if $args->{package} eq 'main';
+
+ if ( $args->{action}->($args->{package}) ) {
+
+ my $base = $args->{package};
+ $base = '' if $base eq '::';
+
+
+ $visited += visit_namespaces({ %$args, package => $_ }) for map
+ { $_ =~ /(.+?)::$/ && "${base}::$1" }
+ grep
+ { $_ =~ /(?<!^main)::$/ }
+ do { no strict 'refs'; keys %{ $base . '::'} }
+ }
+
+ return $visited;
+}
+
+# compiles a list of addresses stored as globals (possibly even catching
+# class data in the form of method closures), so we can skip them further on
+sub symtable_referenced_addresses {
+
+ my $refs_per_pkg;
+
+ my $dummy_addresslist;
+
+ my $seen_refs = {};
+ visit_namespaces(
+ action => sub {
+
+ no strict 'refs';
+
+ my $pkg = shift;
+ $pkg = '' if $pkg eq '::';
+ $pkg .= '::';
+
+ # the unless regex at the end skips some dangerous namespaces outright
+ # (but does not prevent descent)
+ $refs_per_pkg->{$pkg} += visit_refs (
+ seen_refs => $seen_refs,
+
+ # FIXME FIXME FIXME
+ # This is so damn odd - if we feed a constsub {1} (or in fact almost
+ # anything other than the actionsub below, any scalarref will show
+ # up as a leak, trapped by... something...
+ # Ideally we should be able to const this to sub{1} and just return
+ # $seen_refs (in fact it is identical to the dummy list at the end of
+ # a run here). Alas this doesn't seem to work, so punt for now...
+ action => sub { ++$dummy_addresslist->{ hrefaddr $_[0] } },
+
+ refs => [ map { my $sym = $_;
+ # *{"$pkg$sym"}{CODE} won't simply work - MRO-cached CVs are invisible there
+ ( CV_TRACING ? Class::MethodCache::get_cv("${pkg}$sym") : () ),
+
+ ( defined *{"$pkg$sym"}{SCALAR} and length ref ${"$pkg$sym"} and ! isweak( ${"$pkg$sym"} ) )
+ ? ${"$pkg$sym"} : ()
+ ,
+
+ ( map {
+ ( defined *{"$pkg$sym"}{$_} and ! isweak(defined *{"$pkg$sym"}{$_}) )
+ ? *{"$pkg$sym"}{$_}
+ : ()
+ } qw(HASH ARRAY IO GLOB) ),
+
+ } keys %$pkg ],
+ ) unless $pkg =~ /^ :: (?:
+ DB | next | B | .+? ::::ISA (?: ::CACHE ) | Class::C3
+ ) :: $/x;
+ }
+ );
+
+# use Devel::Dwarn;
+# Ddie [ map
+# { { $_ => $refs_per_pkg->{$_} } }
+# sort
+# {$refs_per_pkg->{$a} <=> $refs_per_pkg->{$b} }
+# keys %$refs_per_pkg
+# ];
+
+ $seen_refs;
+}
+
sub assert_empty_weakregistry {
my ($weak_registry, $quiet) = @_;
+ # in case we hooked bless any extra object creation will wreak
+ # havoc during the assert phase
+ local *CORE::GLOBAL::bless;
+ *CORE::GLOBAL::bless = sub { CORE::bless( $_[0], (@_ > 1) ? $_[1] : caller() ) };
+
croak 'Expecting a registry hashref' unless ref $weak_registry eq 'HASH';
+ defined $weak_registry->{$_}{weakref} or delete $weak_registry->{$_}
+ for keys %$weak_registry;
+
return unless keys %$weak_registry;
my $tb = eval { Test::Builder->new }
- or croak 'Calling test_weakregistry without a loaded Test::Builder makes no sense';
+ or croak "Calling assert_empty_weakregistry in $0 without a loaded Test::Builder makes no sense";
+
+ for my $addr (keys %$weak_registry) {
+ $weak_registry->{$addr}{display_name} = join ' | ', (
+ sort
+ { length $a <=> length $b or $a cmp $b }
+ keys %{$weak_registry->{$addr}{slot_names}}
+ );
- for my $slot (sort keys %$weak_registry) {
- next if ! defined $weak_registry->{$slot}{weakref};
- $tb->BAILOUT("!!!! WEAK REGISTRY SLOT $slot IS NOT A WEAKREF !!!!")
- unless isweak( $weak_registry->{$slot}{weakref} );
+ $tb->BAILOUT("!!!! WEAK REGISTRY SLOT $weak_registry->{$addr}{display_name} IS NOT A WEAKREF !!!!")
+ if defined $weak_registry->{$addr}{weakref} and ! isweak( $weak_registry->{$addr}{weakref} );
}
+ # the walk is very expensive - if we are $quiet (running in an END block)
+ # we do not really need to be too thorough
+ unless ($quiet) {
+ delete $weak_registry->{$_} for keys %{ symtable_referenced_addresses() };
+ }
- # compile a list of refs stored as CAG class data, so we can skip them
- # intelligently below
- my ($classdata_refcounts, $symwalker, $refwalker);
- $refwalker = sub {
- return unless length ref $_[0];
+ for my $addr (sort { $weak_registry->{$a}{display_name} cmp $weak_registry->{$b}{display_name} } keys %$weak_registry) {
- my $seen = $_[1] || {};
- return if $seen->{refaddr $_[0]}++;
+ next if ! defined $weak_registry->{$addr}{weakref};
- $classdata_refcounts->{refaddr $_[0]}++;
+ $leaks_found++ unless $tb->in_todo;
+ $tb->ok (0, "Leaked $weak_registry->{$addr}{display_name}");
- my $type = reftype $_[0];
- if ($type eq 'HASH') {
- $refwalker->($_, $seen) for values %{$_[0]};
- }
- elsif ($type eq 'ARRAY') {
- $refwalker->($_, $seen) for @{$_[0]};
- }
- elsif ($type eq 'REF') {
- $refwalker->($$_, $seen);
- }
- };
-
- $symwalker = sub {
- no strict 'refs';
- my $pkg = shift || '::';
-
- $refwalker->(${"${pkg}$_"}) for grep { $_ =~ /__cag_(?!pkg_gen__|supers__)/ } keys %$pkg;
-
- $symwalker->("${pkg}$_") for grep { $_ =~ /(?<!^main)::$/ } keys %$pkg;
- };
-
- # run things twice, some cycles will be broken, introducing new
- # candidates for pseudo-GC
- for (1,2) {
- undef $classdata_refcounts;
-
- $symwalker->();
-
- for my $slot (keys %$weak_registry) {
- if (
- defined $weak_registry->{$slot}{weakref}
- and
- my $expected_refcnt = $classdata_refcounts->{$weak_registry->{$slot}{refaddr}}
- ) {
- # need to store the SVref and examine it separately,
- # to push the weakref instance off the pad
- my $sv = svref_2object($weak_registry->{$slot}{weakref});
- delete $weak_registry->{$slot} if $sv->REFCNT == $expected_refcnt;
- }
- }
- }
+ my $diag = do {
+ local $Data::Dumper::Maxdepth = 1;
+ sprintf "\n%s (refcnt %d) => %s\n",
+ $weak_registry->{$addr}{display_name},
+ refcount($weak_registry->{$addr}{weakref}),
+ (
+ ref($weak_registry->{$addr}{weakref}) eq 'CODE'
+ and
+ B::svref_2object($weak_registry->{$addr}{weakref})->XSUB
+ ) ? '__XSUB__' : Dumper( $weak_registry->{$addr}{weakref} )
+ ;
+ };
- for my $slot (sort keys %$weak_registry) {
- ! defined $weak_registry->{$slot}{weakref} and next if $quiet;
+ # FIXME - need to add a circular reference seeker based on the visitor
+ # (will need a bunch of modifications, punting with just a stub for now)
- $tb->ok (! defined $weak_registry->{$slot}{weakref}, "No leaks of $slot") or do {
- $leaks_found = 1;
+ $diag .= Devel::FindRef::track ($weak_registry->{$addr}{weakref}, 50) . "\n"
+ if ( $ENV{TEST_VERBOSE} && eval { require Devel::FindRef });
- my $diag = '';
+ $diag =~ s/^/ /mg;
- $diag .= Devel::FindRef::track ($weak_registry->{$slot}{weakref}, 20) . "\n"
- if ( $ENV{TEST_VERBOSE} && eval { require Devel::FindRef });
+ if (my $stack = $weak_registry->{$addr}{stacktrace}) {
+ $diag .= " Reference first seen$stack";
+ }
- if (my $stack = $weak_registry->{$slot}{stacktrace}) {
- $diag .= " Reference first seen$stack";
- }
+ $tb->diag($diag);
+
+# if ($leaks_found == 1) {
+# # using the fh dumper due to intermittent buffering issues
+# # in case we decide to exit soon after (possibly via _exit)
+# require Devel::MAT::Dumper;
+# local $Devel::MAT::Dumper::MAX_STRING = -1;
+# open( my $fh, '>:raw', "leaked_${addr}_pid$$.pmat" ) or die $!;
+# Devel::MAT::Dumper::dumpfh( $fh );
+# close ($fh) or die $!;
+#
+# use POSIX;
+# POSIX::_exit(1);
+# }
+ }
- $tb->diag($diag) if $diag;
- };
+ if (! $quiet and !$leaks_found and ! $tb->in_todo) {
+ $tb->ok(1, sprintf "No leaks found at %s line %d", (caller())[1,2] );
}
}
--- /dev/null
+# keep stricture tests happy
+use strict;
+use warnings;
+1;
#$t->cd($t->new_related('cd', { artist => undef } ) );
#$t->{_rel_in_storage} = 0;
$t->insert;
-}, qr/cd.artist may not be NULL/, "Exception propogated properly");
+}, qr/DBI Exception.+(?x:
+ \QNOT NULL constraint failed: cd.artist\E
+ |
+ \Qcd.artist may not be NULL\E
+)/s, "Exception propogated properly");
lives_ok ( sub {
$schema->resultset('CD')->create ({
'Expected SQL on correlated realiased subquery'
);
+# test for subselect identifier leakage
+# NOTE - the hodge-podge mix of literal and regular identifuers is *deliberate*
+for my $quote_names (0,1) {
+ my $schema = DBICTest->init_schema( quote_names => $quote_names );
+
+ my ($ql, $qr) = $schema->storage->sql_maker->_quote_chars;
+
+ my $art_rs = $schema->resultset('Artist')->search ({}, {
+ order_by => 'me.artistid',
+ prefetch => 'cds',
+ rows => 2,
+ });
+
+ my $inner_lim_bindtype = { sqlt_datatype => 'integer' };
+
+ for my $inner_relchain (qw( cds_unordered cds ) ) {
+
+ my $stupid_latest_competition_release_query = $schema->resultset('Artist')->search(
+ { 'competition.artistid' => { '!=', { -ident => 'me.artistid' } } },
+ { alias => 'competition' },
+ )->search_related( $inner_relchain, {}, {
+ rows => 1, order_by => 'year', columns => { year => \'year' }, distinct => 1
+ })->get_column(\'year')->max_rs;
+
+ my $final_query = $art_rs->search( {}, {
+ '+columns' => { max_competition_release => \[
+ @${ $stupid_latest_competition_release_query->as_query }
+ ]},
+ });
+
+ # we are using cds_unordered explicitly above - do the sorting manually
+ my @results = sort { $a->{artistid} <=> $b->{artistid} } @{$final_query->all_hri};
+ @$_ = sort { $a->{cdid} <=> $b->{cdid} } @$_ for map { $_->{cds} } @results;
+
+ is_deeply (
+ \@results,
+ [
+ { artistid => 1, charfield => undef, max_competition_release => 1998, name => "Caterwauler McCrae", rank => 13, cds => [
+ { artist => 1, cdid => 1, genreid => 1, single_track => undef, title => "Spoonful of bees", year => 1999 },
+ { artist => 1, cdid => 2, genreid => undef, single_track => undef, title => "Forkful of bees", year => 2001 },
+ { artist => 1, cdid => 3, genreid => undef, single_track => undef, title => "Caterwaulin' Blues", year => 1997 },
+ ] },
+ { artistid => 2, charfield => undef, max_competition_release => 1997, name => "Random Boy Band", rank => 13, cds => [
+ { artist => 2, cdid => 4, genreid => undef, single_track => undef, title => "Generic Manufactured Singles", year => 2001 },
+ ] },
+ ],
+ "Expected result from weird query",
+ );
+
+ # the decomposition to sql/bind is *deliberate* in both instances
+ # we want to ensure this keeps working for lietral sql, even when
+ # as_query switches to return an overloaded dq node
+ my ($sql, @bind) = @${ $final_query->as_query };
+
+ my $correlated_sql = qq{ (
+ SELECT MAX( year )
+ FROM (
+ SELECT year
+ FROM ${ql}artist${qr} ${ql}competition${qr}
+ JOIN cd ${ql}${inner_relchain}${qr}
+ ON ${ql}${inner_relchain}${qr}.${ql}artist${qr} = ${ql}competition${qr}.${ql}artistid${qr}
+ WHERE ${ql}competition${qr}.${ql}artistid${qr} != ${ql}me${qr}.${ql}artistid${qr}
+ GROUP BY year
+ ORDER BY MIN( ${ql}year${qr} )
+ LIMIT ?
+ ) ${ql}${inner_relchain}${qr}
+ )};
+
+ is_same_sql_bind(
+ $sql,
+ \@bind,
+ qq{ (
+ SELECT ${ql}me${qr}.${ql}artistid${qr}, ${ql}me${qr}.${ql}name${qr}, ${ql}me${qr}.${ql}rank${qr}, ${ql}me${qr}.${ql}charfield${qr},
+ $correlated_sql,
+ ${ql}cds${qr}.${ql}cdid${qr}, ${ql}cds${qr}.${ql}artist${qr}, ${ql}cds${qr}.${ql}title${qr}, ${ql}cds${qr}.${ql}year${qr}, ${ql}cds${qr}.${ql}genreid${qr}, ${ql}cds${qr}.${ql}single_track${qr}
+ FROM (
+ SELECT ${ql}me${qr}.${ql}artistid${qr}, ${ql}me${qr}.${ql}name${qr}, ${ql}me${qr}.${ql}rank${qr}, ${ql}me${qr}.${ql}charfield${qr},
+ $correlated_sql
+ FROM ${ql}artist${qr} ${ql}me${qr}
+ ORDER BY ${ql}me${qr}.${ql}artistid${qr}
+ LIMIT ?
+ ) ${ql}me${qr}
+ LEFT JOIN cd ${ql}cds${qr}
+ ON ${ql}cds${qr}.${ql}artist${qr} = ${ql}me${qr}.${ql}artistid${qr}
+ ORDER BY ${ql}me${qr}.${ql}artistid${qr}
+ ) },
+ [
+ [ $inner_lim_bindtype
+ => 1 ],
+ [ $inner_lim_bindtype
+ => 1 ],
+ [ { sqlt_datatype => 'integer' }
+ => 2 ],
+ ],
+ "No leakage of correlated subquery identifiers (quote_names => $quote_names, inner alias '$inner_relchain')"
+ );
+ }
+}
+
done_testing;
my $schema = DBICTest->init_schema();
-my $mo_rs = $schema->resultset('Artist')->search(
- { 'me.artistid' => 4 },
- {
- prefetch => [
- {
- cds => [
- { tracks => { cd_single => 'tracks' } },
- { cd_to_producer => 'producer' }
- ]
- },
- { artwork_to_artist => 'artwork' }
- ],
-
- result_class => 'DBIx::Class::ResultClass::HashRefInflator',
-
- order_by => [qw/tracks.position tracks.trackid producer.producerid tracks_2.trackid artwork.cd_id/],
- }
-);
-
$schema->resultset('Artist')->create(
{
name => 'mo',
}
);
-my $mo = $mo_rs->next;
-
-is( @{$mo->{cds}}, 2, 'two CDs' );
-
-cmp_deeply( $mo, {
+my $artist_with_extras = {
artistid => 4, charfield => undef, name => 'mo', rank => 1337,
artwork_to_artist => [
{ artist_id => 4, artwork_cd_id => 1, artwork => { cd_id => 1 } },
],
}
],
+};
+
+my $art_rs = $schema->resultset('Artist')->search({ 'me.artistid' => 4 });
+
+
+my $art_rs_prefetch = $art_rs->search({}, {
+ order_by => [qw/tracks.position tracks.trackid producer.producerid tracks_2.trackid artwork.cd_id/],
+ result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+ prefetch => [
+ {
+ cds => [
+ { tracks => { cd_single => 'tracks' } },
+ { cd_to_producer => 'producer' }
+ ]
+ },
+ { artwork_to_artist => 'artwork' }
+ ],
});
+cmp_deeply( $art_rs_prefetch->next, $artist_with_extras );
+
+
done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+
+use lib 't/lib';
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $artist_rs = $schema->resultset('Artist');
+
+for my $rel_rs(
+ $artist_rs->search_related_rs(
+ cds_without_genre => { artist => 1 }, { order_by => 'cdid' }
+ ),
+ $artist_rs->find(1)->search_related_rs(
+ cds_without_genre => {}, { order_by => 'cdid' }
+ ),
+) {
+
+ is_deeply(
+ $rel_rs->all_hri,
+ [
+ {
+ artist => 1,
+ cdid => 2,
+ genreid => undef,
+ single_track => undef,
+ title => "Forkful of bees",
+ year => 2001
+ },
+ {
+ artist => 1,
+ cdid => 3,
+ genreid => undef,
+ single_track => undef,
+ title => "Caterwaulin' Blues",
+ year => 1997
+ },
+ ]
+ );
+}
+
+done_testing;
year => 2020,
title => 'the best thing since sliced bread',
})
-} qr/\Qcd.artist may not be NULL/, 'ambiguous find + create failed';
+} qr/DBI Exception.+(?x:
+ \QNOT NULL constraint failed: cd.artist\E
+ |
+ \Qcd.artist may not be NULL\E
+)/s, 'ambiguous find + create failed'
+;
# expect a create, after a failed search using *only* the
# *current* relationship and the unique column constraints
is ($fresh->count_rs({ cdid => 1})->next, 1 );
ok (! exists $fresh->{cursor}, 'Still no cursor on fresh rs');
- ok (! exists $fresh->{_attrs}{_sqlmaker_select_args}, 'select args did not leak through' );
+ ok (! exists $fresh->{_attrs}{_last_sqlmaker_alias_map}, 'aliasmap did not leak through' );
+
+ my $n = $fresh->next;
+
+ # check that we are not testing for deprecated slotnames
+ ok ($fresh->{cursor}, 'Cursor at expected slot after fire');
+ ok (exists $fresh->{_attrs}{_last_sqlmaker_alias_map}, 'aliasmap at expected slot after fire' );
}
done_testing;
],
},
sqlbind => \[
- "( SELECT (SELECT id FROM cd me LIMIT ?) FROM artist me )",
+ "( SELECT (SELECT me.id FROM cd me LIMIT ?) FROM artist me )",
[ $ROWS => 1 ],
],
},
],
},
sqlbind => \[
- "( SELECT me.artistid, me.name, me.rank, me.charfield, (SELECT id FROM cd me LIMIT ?) FROM artist me )",
+ "( SELECT me.artistid, me.name, me.rank, me.charfield, (SELECT me.id FROM cd me LIMIT ?) FROM artist me )",
[ $ROWS => 1 ],
],
},
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+
+use DBIx::Class::Optional::Dependencies;
+plan skip_all => 'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for ('id_shortener')
+ unless DBIx::Class::Optional::Dependencies->req_ok_for ('id_shortener');
+
+use DBICTest::Schema::Artist;
+BEGIN {
+ DBICTest::Schema::Artist->add_column('parentid');
+
+ DBICTest::Schema::Artist->has_many(
+ children => 'DBICTest::Schema::Artist',
+ { 'foreign.parentid' => 'self.artistid' }
+ );
+
+ DBICTest::Schema::Artist->belongs_to(
+ parent => 'DBICTest::Schema::Artist',
+ { 'foreign.artistid' => 'self.parentid' }
+ );
+}
+
+use DBICTest;
+use DBICTest::Schema;
+use DBIC::SqlMakerTest;
+
+use DBIx::Class::SQLMaker::LimitDialects;
+my $ROWS = DBIx::Class::SQLMaker::LimitDialects->__rows_bindtype;
+my $TOTAL = DBIx::Class::SQLMaker::LimitDialects->__total_bindtype;
+
+for my $q ( '', '"' ) {
+
+ my $schema = DBICTest->init_schema(
+ storage_type => 'DBIx::Class::Storage::DBI::Oracle::Generic',
+ no_deploy => 1,
+ quote_char => $q,
+ );
+
+ # select the whole tree
+ {
+ my $rs = $schema->resultset('Artist')->search({}, {
+ start_with => { name => 'root' },
+ connect_by => { parentid => { -prior => { -ident => 'artistid' } } },
+ });
+
+ is_same_sql_bind (
+ $rs->as_query,
+ "(
+ SELECT ${q}me${q}.${q}artistid${q}, ${q}me${q}.${q}name${q}, ${q}me${q}.${q}rank${q}, ${q}me${q}.${q}charfield${q}, ${q}me${q}.${q}parentid${q}
+ FROM ${q}artist${q} ${q}me${q}
+ START WITH ${q}name${q} = ?
+ CONNECT BY ${q}parentid${q} = PRIOR ${q}artistid${q}
+ )",
+ [ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 }
+ => 'root'] ],
+ );
+
+ is_same_sql_bind (
+ $rs->count_rs->as_query,
+ "(
+ SELECT COUNT( * )
+ FROM ${q}artist${q} ${q}me${q}
+ START WITH ${q}name${q} = ?
+ CONNECT BY ${q}parentid${q} = PRIOR ${q}artistid${q}
+ )",
+ [ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 }
+ => 'root'] ],
+ );
+ }
+
+ # use order siblings by statement
+ {
+ my $rs = $schema->resultset('Artist')->search({}, {
+ start_with => { name => 'root' },
+ connect_by => { parentid => { -prior => { -ident => 'artistid' } } },
+ order_siblings_by => { -desc => 'name' },
+ });
+
+ is_same_sql_bind (
+ $rs->as_query,
+ "(
+ SELECT ${q}me${q}.${q}artistid${q}, ${q}me${q}.${q}name${q}, ${q}me${q}.${q}rank${q}, ${q}me${q}.${q}charfield${q}, ${q}me${q}.${q}parentid${q}
+ FROM ${q}artist${q} ${q}me${q}
+ START WITH ${q}name${q} = ?
+ CONNECT BY ${q}parentid${q} = PRIOR ${q}artistid${q}
+ ORDER SIBLINGS BY ${q}name${q} DESC
+ )",
+ [ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 }
+ => 'root'] ],
+ );
+ }
+
+ # get the root node
+ {
+ my $rs = $schema->resultset('Artist')->search({ parentid => undef }, {
+ start_with => { name => 'root' },
+ connect_by => { parentid => { -prior => { -ident => 'artistid' } } },
+ });
+
+ is_same_sql_bind (
+ $rs->as_query,
+ "(
+ SELECT ${q}me${q}.${q}artistid${q}, ${q}me${q}.${q}name${q}, ${q}me${q}.${q}rank${q}, ${q}me${q}.${q}charfield${q}, ${q}me${q}.${q}parentid${q}
+ FROM ${q}artist${q} ${q}me${q}
+ WHERE ( ${q}parentid${q} IS NULL )
+ START WITH ${q}name${q} = ?
+ CONNECT BY ${q}parentid${q} = PRIOR ${q}artistid${q}
+ )",
+ [ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 }
+ => 'root'] ],
+ );
+ }
+
+ # combine a connect by with a join
+ {
+ my $rs = $schema->resultset('Artist')->search(
+ {'cds.title' => { -like => '%cd'} },
+ {
+ join => 'cds',
+ start_with => { 'me.name' => 'root' },
+ connect_by => { parentid => { -prior => { -ident => 'artistid' } } },
+ }
+ );
+
+ is_same_sql_bind (
+ $rs->as_query,
+ "(
+ SELECT ${q}me${q}.${q}artistid${q}, ${q}me${q}.${q}name${q}, ${q}me${q}.${q}rank${q}, ${q}me${q}.${q}charfield${q}, ${q}me${q}.${q}parentid${q}
+ FROM ${q}artist${q} ${q}me${q}
+ LEFT JOIN cd ${q}cds${q} ON ${q}cds${q}.${q}artist${q} = ${q}me${q}.${q}artistid${q}
+ WHERE ( ${q}cds${q}.${q}title${q} LIKE ? )
+ START WITH ${q}me${q}.${q}name${q} = ?
+ CONNECT BY ${q}parentid${q} = PRIOR ${q}artistid${q}
+ )",
+ [
+ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'cds.title', 'sqlt_size' => 100 }
+ => '%cd'],
+ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'me.name', 'sqlt_size' => 100 }
+ => 'root'],
+ ],
+ );
+
+ is_same_sql_bind (
+ $rs->count_rs->as_query,
+ "(
+ SELECT COUNT( * )
+ FROM ${q}artist${q} ${q}me${q}
+ LEFT JOIN cd ${q}cds${q} ON ${q}cds${q}.${q}artist${q} = ${q}me${q}.${q}artistid${q}
+ WHERE ( ${q}cds${q}.${q}title${q} LIKE ? )
+ START WITH ${q}me${q}.${q}name${q} = ?
+ CONNECT BY ${q}parentid${q} = PRIOR ${q}artistid${q}
+ )",
+ [
+ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'cds.title', 'sqlt_size' => 100 }
+ => '%cd'],
+ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'me.name', 'sqlt_size' => 100 }
+ => 'root'],
+ ],
+ );
+ }
+
+ # combine a connect by with order_by
+ {
+ my $rs = $schema->resultset('Artist')->search({}, {
+ start_with => { name => 'root' },
+ connect_by => { parentid => { -prior => { -ident => 'artistid' } } },
+ order_by => { -asc => [ 'LEVEL', 'name' ] },
+ });
+
+ is_same_sql_bind (
+ $rs->as_query,
+ "(
+ SELECT ${q}me${q}.${q}artistid${q}, ${q}me${q}.${q}name${q}, ${q}me${q}.${q}rank${q}, ${q}me${q}.${q}charfield${q}, ${q}me${q}.${q}parentid${q}
+ FROM ${q}artist${q} ${q}me${q}
+ START WITH ${q}name${q} = ?
+ CONNECT BY ${q}parentid${q} = PRIOR ${q}artistid${q}
+ ORDER BY ${q}LEVEL${q} ASC, ${q}name${q} ASC
+ )",
+ [
+ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 }
+ => 'root'],
+ ],
+ );
+ }
+
+ # limit a connect by
+ {
+ my $rs = $schema->resultset('Artist')->search({}, {
+ start_with => { name => 'root' },
+ connect_by => { parentid => { -prior => { -ident => 'artistid' } } },
+ order_by => [ { -asc => 'name' }, { -desc => 'artistid' } ],
+ rows => 2,
+ });
+
+ is_same_sql_bind (
+ $rs->as_query,
+ "(
+ SELECT ${q}me${q}.${q}artistid${q}, ${q}me${q}.${q}name${q}, ${q}me${q}.${q}rank${q}, ${q}me${q}.${q}charfield${q}, ${q}me${q}.${q}parentid${q}
+ FROM (
+ SELECT ${q}me${q}.${q}artistid${q}, ${q}me${q}.${q}name${q}, ${q}me${q}.${q}rank${q}, ${q}me${q}.${q}charfield${q}, ${q}me${q}.${q}parentid${q}
+ FROM ${q}artist${q} ${q}me${q}
+ START WITH ${q}name${q} = ?
+ CONNECT BY ${q}parentid${q} = PRIOR ${q}artistid${q}
+ ORDER BY ${q}name${q} ASC, ${q}artistid${q} DESC
+ ) ${q}me${q}
+ WHERE ROWNUM <= ?
+ )",
+ [
+ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 }
+ => 'root'], [ $ROWS => 2 ],
+ ],
+ );
+
+ is_same_sql_bind (
+ $rs->count_rs->as_query,
+ "(
+ SELECT COUNT( * )
+ FROM (
+ SELECT ${q}me${q}.${q}artistid${q}
+ FROM (
+ SELECT ${q}me${q}.${q}artistid${q}
+ FROM ${q}artist${q} ${q}me${q}
+ START WITH ${q}name${q} = ?
+ CONNECT BY ${q}parentid${q} = PRIOR ${q}artistid${q}
+ ) ${q}me${q}
+ WHERE ROWNUM <= ?
+ ) ${q}me${q}
+ )",
+ [
+ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 }
+ => 'root'],
+ [ $ROWS => 2 ],
+ ],
+ );
+ }
+
+ # combine a connect_by with group_by and having
+ # add some bindvals to make sure things still work
+ {
+ my $rs = $schema->resultset('Artist')->search({}, {
+ select => \[ 'COUNT(rank) + ?', [ __cbind => 3 ] ],
+ as => 'cnt',
+ start_with => { name => 'root' },
+ connect_by => { parentid => { -prior => { -ident => 'artistid' } } },
+ group_by => \[ 'rank + ? ', [ __gbind => 1] ],
+ having => \[ 'count(rank) < ?', [ cnt => 2 ] ],
+ });
+
+ is_same_sql_bind (
+ $rs->as_query,
+ "(
+ SELECT COUNT(rank) + ?
+ FROM ${q}artist${q} ${q}me${q}
+ START WITH ${q}name${q} = ?
+ CONNECT BY ${q}parentid${q} = PRIOR ${q}artistid${q}
+ GROUP BY( rank + ? )
+ HAVING count(rank) < ?
+ )",
+ [
+ [ { dbic_colname => '__cbind' }
+ => 3 ],
+ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 }
+ => 'root'],
+ [ { dbic_colname => '__gbind' }
+ => 1 ],
+ [ { dbic_colname => 'cnt' }
+ => 2 ],
+ ],
+ );
+ }
+
+ # select the whole cycle tree with nocylce
+ {
+ my $rs = $schema->resultset('Artist')->search({}, {
+ start_with => { name => 'cycle-root' },
+ '+select' => \ 'CONNECT_BY_ISCYCLE',
+ '+as' => [ 'connector' ],
+ connect_by_nocycle => { parentid => { -prior => { -ident => 'artistid' } } },
+ });
+
+ is_same_sql_bind (
+ $rs->as_query,
+ "(
+ SELECT ${q}me${q}.${q}artistid${q}, ${q}me${q}.${q}name${q}, ${q}me${q}.${q}rank${q}, ${q}me${q}.${q}charfield${q}, ${q}me${q}.${q}parentid${q}, CONNECT_BY_ISCYCLE
+ FROM ${q}artist${q} ${q}me${q}
+ START WITH ${q}name${q} = ?
+ CONNECT BY NOCYCLE ${q}parentid${q} = PRIOR ${q}artistid${q}
+ )",
+ [
+ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 }
+ => 'cycle-root'],
+ ],
+ );
+
+ is_same_sql_bind (
+ $rs->count_rs->as_query,
+ "(
+ SELECT COUNT( * )
+ FROM ${q}artist${q} ${q}me${q}
+ START WITH ${q}name${q} = ?
+ CONNECT BY NOCYCLE ${q}parentid${q} = PRIOR ${q}artistid${q}
+ )",
+ [
+ [ { 'sqlt_datatype' => 'varchar', 'dbic_colname' => 'name', 'sqlt_size' => 100 }
+ => 'cycle-root'],
+ ],
+ );
+ }
+}
+
+done_testing;
exselect_outer => 'ORDER__BY__001, ORDER__BY__002, ORDER__BY__003',
exselect_inner => 'title AS ORDER__BY__001, bar AS ORDER__BY__002, sensors AS ORDER__BY__003',
},
+
+ {
+ order_by => [
+ 'name',
+ ],
+ order_inner => 'name',
+ order_outer => 'name DESC',
+ order_req => 'name',
+ },
) {
my $o_sel = $ord_set->{exselect_outer}
? ', ' . $ord_set->{exselect_outer}
: ''
;
+ my $rs = $books_45_and_owners->search ({}, {order_by => $ord_set->{order_by}});
+
+ # query actually works
+ ok( defined $rs->count, 'Query actually works' );
+
is_same_sql_bind(
- $books_45_and_owners->search ({}, {order_by => $ord_set->{order_by}})->as_query,
+ $rs->as_query,
"(SELECT me.id, me.source, me.owner, me.price, owner__id, owner__name
FROM (
SELECT me.id, me.source, me.owner, me.price, owner__id, owner__name$o_sel
[ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' }
=> 'Library' ] ],
);
+
}
# with groupby
--- /dev/null
+use strict;
+use warnings;
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBIC::SqlMakerTest;
+use DBIx::Class::SQLMaker::LimitDialects;
+my $OFFSET = DBIx::Class::SQLMaker::LimitDialects->__offset_bindtype;
+my $TOTAL = DBIx::Class::SQLMaker::LimitDialects->__total_bindtype;
+
+my $schema = DBICTest->init_schema (
+ storage_type => 'DBIx::Class::Storage::DBI::MSSQL',
+ no_deploy => 1,
+ quote_names => 1
+);
+# prime caches
+$schema->storage->sql_maker;
+
+# more involved limit dialect torture testcase migrated from the
+# live mssql tests
+my $tests = {
+ pref_hm_and_page_and_group_rs => {
+
+ rs => scalar $schema->resultset ('Owners')->search (
+ {
+ 'books.id' => { '!=', undef },
+ 'me.name' => { '!=', 'somebogusstring' },
+ },
+ {
+ prefetch => 'books',
+ order_by => [ { -asc => \['name + ?', [ test => 'xxx' ]] }, 'me.id' ], # test bindvar propagation
+ group_by => [ map { "me.$_" } $schema->source('Owners')->columns ], # the literal order_by requires an explicit group_by
+ rows => 3,
+ unsafe_subselect_ok => 1,
+ },
+ )->page(3),
+
+ result => {
+ Top => [
+ '(
+ SELECT TOP 2147483647 [me].[id], [me].[name],
+ [books].[id], [books].[source], [books].[owner], [books].[title], [books].[price]
+ FROM (
+ SELECT TOP 2147483647 [me].[id], [me].[name]
+ FROM (
+ SELECT TOP 3 [me].[id], [me].[name], [ORDER__BY__001]
+ FROM (
+ SELECT TOP 9 [me].[id], [me].[name], name + ? AS [ORDER__BY__001]
+ FROM [owners] [me]
+ LEFT JOIN [books] [books]
+ ON [books].[owner] = [me].[id]
+ WHERE [books].[id] IS NOT NULL AND [me].[name] != ?
+ GROUP BY [me].[id], [me].[name]
+ ORDER BY name + ? ASC, [me].[id]
+ ) [me]
+ ORDER BY [ORDER__BY__001] DESC, [me].[id] DESC
+ ) [me]
+ ORDER BY [ORDER__BY__001] ASC, [me].[id]
+ ) [me]
+ LEFT JOIN [books] [books]
+ ON [books].[owner] = [me].[id]
+ WHERE [books].[id] IS NOT NULL AND [me].[name] != ?
+ ORDER BY name + ? ASC, [me].[id]
+ )',
+ [
+ [ { dbic_colname => 'test' }
+ => 'xxx' ],
+
+ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'me.name' }
+ => 'somebogusstring' ],
+
+ [ { dbic_colname => 'test' } => 'xxx' ], # the extra re-order bind
+
+ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'me.name' }
+ => 'somebogusstring' ],
+
+ [ { dbic_colname => 'test' }
+ => 'xxx' ],
+ ],
+ ],
+
+ RowNumberOver => [
+ '(
+ SELECT TOP 2147483647 [me].[id], [me].[name],
+ [books].[id], [books].[source], [books].[owner], [books].[title], [books].[price]
+ FROM (
+ SELECT TOP 2147483647 [me].[id], [me].[name]
+ FROM (
+ SELECT [me].[id], [me].[name],
+ ROW_NUMBER() OVER( ORDER BY [ORDER__BY__001] ASC, [me].[id] ) AS [rno__row__index]
+ FROM (
+ SELECT [me].[id], [me].[name], name + ? AS [ORDER__BY__001]
+ FROM [owners] [me]
+ LEFT JOIN [books] [books]
+ ON [books].[owner] = [me].[id]
+ WHERE [books].[id] IS NOT NULL AND [me].[name] != ?
+ GROUP BY [me].[id], [me].[name]
+ ) [me]
+ ) [me]
+ WHERE [rno__row__index] >= ? AND [rno__row__index] <= ?
+ ) [me]
+ LEFT JOIN [books] [books]
+ ON [books].[owner] = [me].[id]
+ WHERE [books].[id] IS NOT NULL AND [me].[name] != ?
+ ORDER BY name + ? ASC, [me].[id]
+ )',
+ [
+ [ { dbic_colname => 'test' }
+ => 'xxx' ],
+
+ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'me.name' }
+ => 'somebogusstring' ],
+
+ [ $OFFSET => 7 ], # parameterised RNO
+
+ [ $TOTAL => 9 ],
+
+ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'me.name' }
+ => 'somebogusstring' ],
+
+ [ { dbic_colname => 'test' }
+ => 'xxx' ],
+ ],
+ ],
+ }
+ },
+
+ pref_bt_and_page_and_group_rs => {
+
+ rs => scalar $schema->resultset ('BooksInLibrary')->search (
+ {
+ 'owner.name' => [qw/wiggle woggle/],
+ },
+ {
+ distinct => 1,
+ having => \['1 = ?', [ test => 1 ] ], #test having propagation
+ prefetch => 'owner',
+ rows => 2, # 3 results total
+ order_by => [{ -desc => 'me.owner' }, 'me.id'],
+ unsafe_subselect_ok => 1,
+ },
+ )->page(3),
+
+ result => {
+ Top => [
+ '(
+ SELECT TOP 2147483647 [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price],
+ [owner].[id], [owner].[name]
+ FROM (
+ SELECT TOP 2147483647 [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price]
+ FROM (
+ SELECT TOP 2 [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price]
+ FROM (
+ SELECT TOP 6 [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price]
+ FROM [books] [me]
+ JOIN [owners] [owner]
+ ON [owner].[id] = [me].[owner]
+ WHERE ( [owner].[name] = ? OR [owner].[name] = ? ) AND [source] = ?
+ GROUP BY [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price]
+ HAVING 1 = ?
+ ORDER BY [me].[owner] DESC, [me].[id]
+ ) [me]
+ ORDER BY [me].[owner] ASC, [me].[id] DESC
+ ) [me]
+ ORDER BY [me].[owner] DESC, [me].[id]
+ ) [me]
+ JOIN [owners] [owner]
+ ON [owner].[id] = [me].[owner]
+ WHERE ( [owner].[name] = ? OR [owner].[name] = ? ) AND [source] = ?
+ ORDER BY [me].[owner] DESC, [me].[id]
+ )',
+ [
+ # inner
+ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'owner.name' }
+ => 'wiggle' ],
+ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'owner.name' }
+ => 'woggle' ],
+ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' }
+ => 'Library' ],
+ [ { dbic_colname => 'test' }
+ => '1' ],
+
+ # outer
+ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'owner.name' }
+ => 'wiggle' ],
+ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'owner.name' }
+ => 'woggle' ],
+ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' }
+ => 'Library' ],
+ ],
+ ],
+ RowNumberOver => [
+ '(
+ SELECT TOP 2147483647 [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price],
+ [owner].[id], [owner].[name]
+ FROM (
+ SELECT TOP 2147483647 [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price]
+ FROM (
+ SELECT [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price],
+ ROW_NUMBER() OVER( ORDER BY [me].[owner] DESC, [me].[id] ) AS [rno__row__index]
+ FROM (
+ SELECT [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price]
+ FROM [books] [me]
+ JOIN [owners] [owner]
+ ON [owner].[id] = [me].[owner]
+ WHERE ( [owner].[name] = ? OR [owner].[name] = ? ) AND [source] = ?
+ GROUP BY [me].[id], [me].[source], [me].[owner], [me].[title], [me].[price]
+ HAVING 1 = ?
+ ) [me]
+ ) [me]
+ WHERE [rno__row__index] >= ? AND [rno__row__index] <= ?
+ ) [me]
+ JOIN [owners] [owner]
+ ON [owner].[id] = [me].[owner]
+ WHERE ( [owner].[name] = ? OR [owner].[name] = ? ) AND [source] = ?
+ ORDER BY [me].[owner] DESC, [me].[id]
+ )',
+ [
+ # inner
+ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'owner.name' }
+ => 'wiggle' ],
+ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'owner.name' }
+ => 'woggle' ],
+ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' }
+ => 'Library' ],
+ [ { dbic_colname => 'test' }
+ => '1' ],
+
+ [ $OFFSET => 5 ],
+ [ $TOTAL => 6 ],
+
+ # outer
+ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'owner.name' }
+ => 'wiggle' ],
+ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'owner.name' }
+ => 'woggle' ],
+ [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' }
+ => 'Library' ],
+ ],
+ ],
+ },
+ },
+};
+
+for my $tname (keys %$tests) {
+ for my $limtype (keys %{$tests->{$tname}{result}} ) {
+
+ delete $schema->storage->_sql_maker->{_cached_syntax};
+ $schema->storage->_sql_maker->limit_dialect ($limtype);
+
+ is_same_sql_bind(
+ $tests->{$tname}{rs}->as_query,
+ @{ $tests->{$tname}{result}{$limtype} },
+ "Correct SQL for $limtype on $tname",
+ );
+ }
+}
+
+done_testing;
'(
SELECT me.id, owner__id, owner__name, bar, baz
FROM (
- SELECT me.id, owner__id, owner__name, bar, baz, ROWNUM rownum__index
+ SELECT me.id, owner__id, owner__name, bar, baz, ROWNUM AS rownum__index
FROM (
SELECT me.id, owner.id AS owner__id, owner.name AS owner__name, ? * ? AS bar, ? AS baz
FROM books me
'(
SELECT me.id, owner__id, owner__name, bar, baz
FROM (
- SELECT me.id, owner__id, owner__name, bar, baz, ROWNUM rownum__index
+ SELECT me.id, owner__id, owner__name, bar, baz, ROWNUM AS rownum__index
FROM (
SELECT me.id, owner.id AS owner__id, owner.name AS owner__name, ? * ? AS bar, ? AS baz
FROM books me
FROM (
SELECT me.name, me.id
FROM (
- SELECT me.name, me.id, ROWNUM rownum__index
+ SELECT me.name, me.id, ROWNUM AS rownum__index
FROM (
SELECT me.name, me.id
FROM owners me
use DBICTest;
use DBIC::SqlMakerTest;
-use DBIx::Class::SQLMaker::ACCESS ();
+# the entire point of the subclass is that parenthesis have to be
+# just right for ACCESS to be happy
+# globalize for entirety of the test
+$SQL::Abstract::Test::parenthesis_significant = 1;
-my $sa = DBIx::Class::SQLMaker::ACCESS->new;
+my $schema = DBICTest->init_schema (storage_type => 'DBIx::Class::Storage::DBI::ACCESS', no_deploy => 1, quote_names => 1);
+
+is_same_sql_bind(
+ $schema->resultset('Artist')->search(
+ {
+ artistid => 1,
+ },
+ {
+ join => [{ cds => 'tracks' }],
+ '+select' => [ 'tracks.title' ],
+ '+as' => [ 'track_title' ],
+ }
+ )->as_query,
+ '(
+ SELECT [me].[artistid], [me].[name], [me].[rank], [me].[charfield],
+ [tracks].[title]
+ FROM (
+ (
+ [artist] [me]
+ LEFT JOIN cd [cds]
+ ON [cds].[artist] = [me].[artistid]
+ )
+ LEFT JOIN [track] [tracks]
+ ON [tracks].[cd] = [cds].[cdid]
+ )
+ WHERE [artistid] = ?
+ )',
+ [
+ [{ sqlt_datatype => 'integer', dbic_colname => 'artistid' }
+ => 1 ],
+ ],
+ 'correct SQL for two-step left join'
+);
+
+is_same_sql_bind(
+ $schema->resultset('Track')->search(
+ {
+ trackid => 1,
+ },
+ {
+ join => [{ cd => 'artist' }],
+ '+select' => [ 'artist.name' ],
+ '+as' => [ 'artist_name' ],
+ }
+ )->as_query,
+ '(
+ SELECT [me].[trackid], [me].[cd], [me].[position], [me].[title], [me].[last_updated_on], [me].[last_updated_at],
+ [artist].[name]
+ FROM (
+ (
+ [track] [me]
+ INNER JOIN cd [cd]
+ ON [cd].[cdid] = [me].[cd]
+ )
+ INNER JOIN [artist] [artist]
+ ON [artist].[artistid] = [cd].[artist]
+ )
+ WHERE [trackid] = ?
+ )',
+ [
+ [{ sqlt_datatype => 'integer', dbic_colname => 'trackid' }
+ => 1 ],
+ ],
+ 'correct SQL for two-step inner join',
+);
+
+
+my $sa = $schema->storage->sql_maker;
+# the legacy tests assume no quoting - leave things as-is
+$sa->quote_char(undef);
# my ($self, $table, $fields, $where, $order, @rest) = @_;
my ($sql, @bind) = $sa->select(
my $schema = DBICTest::Schema->connect (DBICTest->_database, { quote_char => '`' });
# cheat
require DBIx::Class::Storage::DBI::mysql;
+*DBIx::Class::Storage::DBI::mysql::_get_server_version = sub { 5 };
bless ( $schema->storage, 'DBIx::Class::Storage::DBI::mysql' );
# check that double-subqueries are properly wrapped
FROM (
SELECT `artist`.`artistid`
FROM cd `me`
- INNER JOIN `artist` `artist`
+ JOIN `artist` `artist`
ON `artist`.`artistid` = `me`.`artist`
WHERE `artist`.`name` LIKE ?
) `_forced_double_subquery`
);
}
+# Test support for inner joins on mysql v3
+for (
+ [ 3 => 'INNER JOIN' ],
+ [ 4 => 'JOIN' ],
+) {
+ my ($ver, $join_op) = @$_;
+
+ no warnings 'redefine';
+ local *DBIx::Class::Storage::DBI::mysql::_get_server_version = sub { $ver };
+
+ # we do not care at this point if data is available, just do a reconnect cycle
+ # to clear all caches
+ $schema->storage->disconnect;
+ $schema->storage->ensure_connected;
+
+ is_same_sql_bind (
+ $schema->resultset('CD')->search ({}, { prefetch => 'artist' })->as_query,
+ "(
+ SELECT `me`.`cdid`, `me`.`artist`, `me`.`title`, `me`.`year`, `me`.`genreid`, `me`.`single_track`,
+ `artist`.`artistid`, `artist`.`name`, `artist`.`rank`, `artist`.`charfield`
+ FROM cd `me`
+ $join_op `artist` `artist` ON `artist`.`artistid` = `me`.`artist`
+ )",
+ [],
+ "default join type works for version $ver",
+ );
+}
+
done_testing;
}
# make sure connection-less storages do not throw on _determine_driver
-{
- local $ENV{DBI_DSN};
- local $ENV{DBI_DRIVER};
+# but work with ENV at the same time
+SKIP: for my $env_dsn (undef, (DBICTest->_database)[0] ) {
+ skip 'Subtest relies on being connected to SQLite', 1
+ if $env_dsn and $env_dsn !~ /\:SQLite\:/;
- my $s = DBICTest::Schema->connect;
+ local $ENV{DBI_DSN} = $env_dsn || '';
+
+ my $s = DBICTest::Schema->connect();
is_deeply (
$s->storage->connect_info,
[],
- 'Starting with no connection info',
+ 'Starting with no explicitly passed in connect info'
+ . ($env_dsn ? ' (with DBI_DSN)' : ''),
);
- isa_ok(
- $s->storage->sql_maker,
- 'DBIx::Class::SQLMaker',
- 'Getting back an SQLMaker succesfully',
- );
+ my $sm = $s->storage->sql_maker;
+
+ ok (! $s->storage->connected, 'Storage does not appear connected after SQLMaker instance is taken');
- ok (! $s->storage->_driver_determined, 'Driver undetermined');
+ if ($env_dsn) {
+ isa_ok($sm, 'DBIx::Class::SQLMaker');
- ok (! $s->storage->connected, 'Storage does not appear connected');
+ ok ( $s->storage->_driver_determined, 'Driver determined (with DBI_DSN)');
+ isa_ok ( $s->storage, 'DBIx::Class::Storage::DBI::SQLite' );
+ }
+ else {
+ isa_ok($sm, 'DBIx::Class::SQLMaker');
- throws_ok {
- $s->storage->ensure_connected
- } qr/You did not provide any connection_info/,
- 'sensible exception on empty conninfo connect'
+ ok (! $s->storage->_driver_determined, 'Driver undetermined');
+
+ throws_ok {
+ $s->storage->ensure_connected
+ } qr/You did not provide any connection_info/,
+ 'sensible exception on empty conninfo connect';
+ }
}
done_testing;
-
-1;
use DBICTest;
use Test::More;
use Test::Exception;
+use DBIx::Class::_Util 'sigwarn_silencer';
BEGIN { delete @ENV{qw(DBI_DSN DBI_DRIVER)} }
sub count_sheep {
my $schema = shift;
+
+ local $SIG{__WARN__} = sigwarn_silencer(
+ qr/
+ \QThis version of DBIC does not yet seem to supply a driver for your particular RDBMS\E
+ |
+ \QUnable to extract a driver name from connect info\E
+ |
+ \QYour storage class (DBIx::Class::Storage::DBI) does not set sql_limit_dialect\E
+ /x
+ );
+
scalar $schema->resultset('Artist')->search( { name => "Exploding Sheep" } )
->all;
}
lives_ok { count_sheep($schema) } 'SQLite in DBI_DRIVER (not DBI_DSN)';
isa_ok $schema->storage, 'DBIx::Class::Storage::DBI::SQLite';
+# make sure that dynamically setting DBI_DSN post-connect works
+{
+ local $ENV{DBI_DSN};
+
+ my $s = DBICTest::Schema->connect();
+
+ throws_ok {
+ $s->storage->ensure_connected
+ } qr/You did not provide any connection_info/,
+ 'sensible exception on empty conninfo connect';
+
+ $ENV{DBI_DSN} = 'dbi:SQLite::memory:';
+
+ lives_ok { $s->storage->ensure_connected } 'Second connection attempt worked';
+ isa_ok ( $s->storage, 'DBIx::Class::Storage::DBI::SQLite' );
+}
+
done_testing;
unless DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')
}
+local $ENV{DBI_DSN};
+
# this is how maint/gen_schema did it (connect() to force a storage
# instance, but no conninfo)
# there ought to be more code like this in the wild
sub {
$schema->resultset('CD')->create({ title => 'vacation in antarctica' })
},
- qr/DBI Exception.+cd\.artist.+NULL/s
+ qr/DBI Exception.+(?x:
+ \QNOT NULL constraint failed: cd.artist\E
+ |
+ \Qcd.artist may not be NULL\E
+ )/s
); # as opposed to some other error
}, [], 'No warnings besides exception' );
ok $schema->connection(
sub { DBI->connect(DBICTest->_database, undef, undef, { AutoCommit => 0 }) },
{
+ # DO NOT REMOVE - this seems like an unrelated piece of info,
+ # but is in fact a test for a bug where setting an accessor-via-option
+ # would trigger an early connect *bypassing* the on_connect_* pieces
+ cursor_class => 'DBIx::Class::Storage::Cursor',
+
on_connect_do => [
'CREATE TABLE TEST_empty (id INTEGER)',
[ 'INSERT INTO TEST_empty VALUES (?)', {}, 2 ],
}
}
- ## Cleanup after ourselves. Unlink all gthe slave paths.
+ ## Cleanup after ourselves. Unlink all the slave paths.
sub cleanup {
my $self = shift @_;
+ $_->disconnect for values %{ $self->schema->storage->replicants };
foreach my $slave (@{$self->slave_paths}) {
if(-e $slave) {
unlink $slave;
is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
}
+
## Delete the old database files
$replicated->cleanup;
(ref $schema)->txn_do(sub{});
}, qr/storage/, "can't call txn_do without storage");
- throws_ok ( sub {
+ throws_ok {
$schema->txn_do('');
- }, qr/must be a CODE reference/, '$coderef parameter check ok');
+ } qr/\Qrun() requires a coderef to execute as its first argument/,
+ '$coderef parameter check ok';
}
# Test successful txn_do() - scalar/list context
# this always fails
! $pre_poison
or
- # I do not underdtand why but on <= 5.8.8 and $pre_poison && $post_poison passes...
- $] > 5.008008
+ # I do not understand why but on <= 5.8.8 and on 5.10.0 "$pre_poison && $post_poison" passes...
+ ($] > 5.008008 and $] < 5.010000 ) or $] > 5.010000
));
is (@w, 2, "Both expected warnings found - \$\@ pre-poison: $pre_poison, post-poison: $post_poison" );
require Text::Balanced;
- my $great_success;
- {
- local $TODO = 'RT#74994 *STILL* not fixed';
-
- lives_ok {
- # this is what poisons $@
- Text::Balanced::extract_bracketed( '(foo', '()' );
-
- my $s = DBICTest->init_schema( deploy => 0 );
- my $g = $s->txn_scope_guard;
- $g->commit;
- $great_success++;
- } 'Text::Balanced is no longer screwing up $@';
- }
-
- # delete all of this when T::B dep is bumped
- unless ($great_success) {
-
-# hacky workaround for desperate folk
-# intended to be copypasted into your app
- {
- require Text::Balanced;
- require overload;
-
- local $@;
-
- # this is what poisons $@
- Text::Balanced::extract_bracketed( '(foo', '()' );
+ my @w;
+ local $SIG{__WARN__} = sub {
+ $_[0] =~ /External exception object .+? \Qimplements partial (broken) overloading/
+ ? push @w, @_
+ : warn @_
+ };
- if ($@ and overload::Overloaded($@) and ! overload::Method($@,'fallback') ) {
- my $class = ref $@;
- eval "package $class; overload->import(fallback => 1);"
- }
- }
-# end of hacky workaround
+ lives_ok {
+ # this is what poisons $@
+ Text::Balanced::extract_bracketed( '(foo', '()' );
- lives_ok {
- # this is what poisons $@
- Text::Balanced::extract_bracketed( '(foo', '()' );
+ my $s = DBICTest::Schema->connect('dbi:SQLite::memory:');
+ my $g = $s->txn_scope_guard;
+ $g->commit;
+ } 'Broken Text::Balanced is not screwing up txn_guard';
- my $s = DBICTest->init_schema( deploy => 0 );
- my $g = $s->txn_scope_guard;
- $g->commit;
- } 'Monkeypatched Text::Balanced is no longer screwing up $@';
- }
+ local $TODO = 'RT#74994 *STILL* not fixed';
+ is(scalar @w, 0, 'no warnings \o/');
}
done_testing;
use warnings;
use strict;
+use DBIx::Class::_Util 'sigwarn_silencer';
+use if DBIx::Class::_ENV_::BROKEN_FORK, 'threads';
+
use Test::More;
use File::Find;
+use Time::HiRes 'sleep';
-use DBIx::Class::_Util 'sigwarn_silencer';
use lib 't/lib';
+my $worker = sub {
+ my $fn = shift;
+
+ if (my @offenders = grep { $_ !~ m{DBIx/Class/(?:_Util|Carp)\.pm} } grep { $_ =~ /(^|\/)DBI/ } keys %INC) {
+ die "Wtf - DBI* modules present in %INC: @offenders";
+ }
+
+ local $SIG{__WARN__} = sigwarn_silencer( qr/\bdeprecated\b/i );
+ require( ( $fn =~ m| t/lib/ (.+) |x )[0] ); # untaint and strip lib-part (. is unavailable under -T)
+
+ return 42;
+};
+
+
find({
wanted => sub {
return unless ( -f $_ and $_ =~ /\.pm$/ );
- my $pid = fork();
- if (! defined $pid) {
- die "fork failed: $!"
+ if (DBIx::Class::_ENV_::BROKEN_FORK) {
+ # older perls crash if threads are spawned way too quickly, sleep for 100 msecs
+ my $t = threads->create(sub { $worker->($_) });
+ sleep 0.1;
+ is ($t->join, 42, "Thread loading $_ did not finish successfully")
+ || diag ($t->can('error') ? $t->error : 'threads.pm too old to retrieve the error :(' );
}
- elsif (!$pid) {
- if (my @offenders = grep { $_ ne 'DBIx/Class/_Util.pm' } grep { $_ =~ /(^|\/)DBI/ } keys %INC) {
- die "Wtf - DBI* modules present in %INC: @offenders";
+ else {
+ my $pid = fork();
+ if (! defined $pid) {
+ die "fork failed: $!"
+ }
+ elsif (!$pid) {
+ $worker->($_);
+ exit 0;
}
- local $SIG{__WARN__} = sigwarn_silencer( qr/\bdeprecated\b/i );
- require( ( $_ =~ m| t/lib/ (.+) |x )[0] ); # untaint and strip lib-part (. is unavailable under -T)
- exit 0;
+ is ( waitpid($pid, 0), $pid, "Fork $pid terminated sucessfully");
+ my $ex = $? >> 8;
+ is ( $ex, 0, "Loading $_ ($pid) exitted with $ex" );
}
-
- is ( waitpid($pid, 0), $pid, "Fork $pid terminated sucessfully");
- my $ex = $? >> 8;
- is ( $ex, 0, "Loading $_ ($pid) exitted with $ex" );
},
no_chdir => 1,