_build/
blib/
inc/
-lib/DBIx/Class/Optional/Dependencies.pod
DBIx-Class-*/
DBIx-Class-*.tar.*
pm_to_blib
t/var/
.*.sw?
+*#
+.#*
+*~
+maint/.Generated_Pod
# so if someone were to legally change their name, we could use it to fix that
# while maintaining the integrity of the repository
-# I've mapped the old single quote version of my name to the double quote
-# version for consistency
-Arthur Axel "fREW" Schmidt <frioux@gmail.com> Arthur Axel 'fREW' Schmidt <frioux@gmail.com>
-Andrew Rodland <andrew@cleverdomain.org> Andrew Rodland <arodland@cpan.org>
\ No newline at end of file
+Alexander Hartmaier <abraxxa@cpan.org> <alexander.hartmaier@t-systems.at>
+Amiri Barksdale <amiribarksdale@gmail.com> <amiri@metalabel.com>
+Andrew Rodland <andrew@cleverdomain.org> <arodland@cpan.org>
+Arthur Axel "fREW" Schmidt <frioux@gmail.com>
+Brendan Byrd <Perl@ResonatorSoft.org> <byrd.b@insightcom.com>
+Brendan Byrd <Perl@ResonatorSoft.org> <GitHub@ResonatorSoft.org>
+Brendan Byrd <Perl@ResonatorSoft.org> <perl@resonatorsoft.org>
+Brian Phillips <bphillips@cpan.org> <bphillips@digitalriver.com>
+David Kamholz <dkamholz@cpan.org> <davekam@pobox.com>
+David Schmidt <davewood@gmx.at> <d.schmidt@tripwolf.com>
+Devin Austin <dhoss@cpan.org> <devin.austin@gmail.com>
+Felix Antonius Wilhelm Ostmann <sadrak@cpan.org> <ostmann@sadraksaemp.intern4.websuche.de>
+Gerda Shank <gshank@cpan.org> <gerda.shank@gmail.com>
+Gianni Ceccarelli <dakkar@thenautilus.net> <gianni.ceccarelli@net-a-porter.com>
+Gordon Irving <goraxe@cpan.org> <goraxe@goraxe.me.uk>
+Hakim Cassimally <osfameron@cpan.org> <hakim@vm-participo.(none)>
+Jonathan Chu <milki@rescomp.berkeley.edu> <milki@rescomp.berkeley.edu>
+Matt Phillips <mattp@cpan.org> <mphillips@oanda.com>
+Roman Filippov <romanf@cpan.org> <moltar@moltar.net>
+Peter Rabbitson <ribasushi@cpan.org> <rabbit@viator.rabbit.us>
+Tim Bunce <Tim.Bunce@pobox.com> <Tim.Bunce@ig.co.uk>
+Toby Corkindale <tjc@cpan.org> <toby@dryft.net>
+Wallace Reis <wreis@cpan.org> <wallace@reis.org.br>
--- /dev/null
+# Some overall notes on how this works
+#
+# * We smoke using the system provided latest, and custom built "oddball perls"
+# The reason for not having a blanket matrix is to conserve travis resources
+# as a full DBIC depchain isn't cheap
+#
+# * Minimum perl officially supported by DBIC is 5.8.3. This *includes* the
+# basic depchain. On failure either attempt to fix it or bring it to the
+# attention of ribasushi. *DO NOT* disable 5.8 testing - it is here for a
+# reason
+#
+# * The matrix is built from two main modes - CLEANTEST = [true|false].
+# - In the first case we test with minimal deps available, and skip everything
+# listed in DBIC::OptDesps. The modules are installed with classic CPAN
+# invocations and are *fully tested*. In other words we simulate what would
+# happen if a user tried to install on a just-compiled virgin perl
+# - Without CLEANTEST we bring the armada of RDBMS and install the maximum
+# possible set of deps *without testing them*. This ensures we stay within
+# a reasonable build-time and still run as many of our tests as possible
+#
+# * The perl builds and the DBIC tests run under NUMTHREADS number of threads.
+# The testing of dependencies under CLEANTEST runs single-threaded, at least
+# until we fix our entire dep-chain to safely pass under -j
+#
+# * The way .travis.yml is fed to the command controller is idiotic - it
+# makes using multiline `bash -c` statements impossible. Therefore to
+# aid readability (our travis logic is rather complex), the bulk of
+# functionality is moved to scripts. More about the problem (and the
+# WONTFIX "explanation") here: https://github.com/travis-ci/travis-ci/issues/497
+#
+
+#
+# Smoke only specific branches to a) not overload the queue and b) not
+# overspam the notification channels
+#
+# Furthermore if the branch is ^topic/ - the custom compiled smokes will
+# not run at all, again in order to conserve queue resources
+#
+# Additionally bleadperl tests do not run on master (but do run on smoke/*)
+#
+branches:
+ only:
+ - master
+ - /^smoke\//
+ - /^topic\//
+
+notifications:
+ irc:
+ channels:
+ - "irc.perl.org#dbic-smoke"
+ template:
+ - "%{branch}#%{build_number} by %{author}: %{message} (%{build_url})"
+ on_success: change
+ on_failure: always
+ use_notice: true
+
+ email:
+ recipients:
+ - ribasushi@cpan.org
+ # Temporary - if it proves to be too noisy, we'll shut it off
+ #- dbix-class-devel@lists.scsys.co.uk
+ on_success: change
+ on_failure: change
+
+language: perl
+
+perl:
+ - "5.16"
+
+env:
+ - CLEANTEST=false
+ - CLEANTEST=true
+
+matrix:
+ include:
+ # bleadperl
+ - perl: bleadperl_thr_mb
+ env:
+ - CLEANTEST=false
+ - BREWOPTS="-Duseithreads -Dusemorebits"
+ - BREWVER=blead
+
+ # minimum supported with threads
+ - perl: 5.8.5_thr
+ env:
+ - CLEANTEST=false
+ - BREWOPTS="-Duseithreads"
+ - BREWVER=5.8.5
+
+ # minimum supported without threads
+ - perl: 5.8.3_nt
+ env:
+ - CLEANTEST=false
+ - BREWOPTS=""
+ - BREWVER=5.8.3
+
+ # check CLEANTEST of minimum supported
+ - perl: 5.8.3_nt_mb
+ env:
+ - CLEANTEST=true
+ - BREWOPTS="-Dusemorebits"
+ - BREWVER=5.8.3
+
+ # this is the perl suse ships
+ - perl: 5.10.0_thr_dbg
+ env:
+ - CLEANTEST=true
+ - BREWOPTS="-DDEBUGGING -Duseithreads"
+ - BREWVER=5.10.0
+
+ # this particular perl is quite widespread
+ - perl: 5.8.8_thr_mb
+ env:
+ - CLEANTEST=true
+ - BREWOPTS="-Duseithreads -Dusemorebits"
+ - BREWVER=5.8.8
+
+# sourcing the files is *EXTREMELY* important - otherwise
+# no envvars will survive
+
+# the entire run times out after 50 minutes, or after 5 minutes without
+# console output
+
+before_install:
+ # Sets global envvars, downloads/configures debs based on CLEANTEST
+ # Sets extra DBICTEST_* envvars
+ #
+ - source maint/travis-ci_scripts/10_before_install.bash
+
+install:
+ # Build and switch to a custom perl if requested
+ # Configure the perl env, preinstall some generic toolchain parts
+ #
+ - source maint/travis-ci_scripts/20_install.bash
+
+before_script:
+ # Preinstall/install deps based on envvars/CLEANTEST
+ #
+ - source maint/travis-ci_scripts/30_before_script.bash
+
+script:
+ # Run actual tests
+ #
+ - source maint/travis-ci_scripts/40_script.bash
+
+after_success:
+ # Check if we can assemble a dist properly if not in CLEANTEST
+ #
+ - source maint/travis-ci_scripts/50_after_success.bash
+
+after_failure:
+ # No tasks yet
+ #
+ #- source maint/travis-ci_scripts/50_after_failure.bash
+
+after_script:
+ # No tasks yet
+ #
+ #- source maint/travis-ci_scripts/60_after_script.bash
+
+ # if we do not unset this before we terminate the travis teardown will
+ # mark the entire job as failed
+ - set +e
Revision history for DBIx::Class
+ * Fixes
+ - Fix duplicated selected columns when calling 'count' when a same
+ aggregate function is used more than once in a 'having' clause
+ (RT#83305)
+
+ * Misc
+ - Fixup our distbuilding process to stop creating world-writable
+ tarball contents (implicitly fixes RT#83084)
+ - Added strict and warnings tests for all lib and test files
+
+0.08206 2013-02-08
+ * Fixes
+ - Fix dbh_do() failing to properly reconnect (regression in 0.08205)
+ - Extra sanity check of a fresh DBI handle ($dbh). Fixes
+ connection coderefs returning garbage (seen in the wild)
+
+ * Misc
+ - Only allow known globals in SQL::Translator leak allowance
+ - General cleanup of error message texts - quote names/identifiers
+ for easier reading
+ - Stop t/52leaks.t from failing when AUTOMATED_TESTING=1
+
+0.08205 2013-01-22
+ * New Features / Changes
+ - The emulate_limit() arbitrary limit dialect emulation mechanism is
+ now deprecated, and will be removed when DBIx::Class migrates to
+ Data::Query
+ - Support for the source_bind_attributes() storage method has been
+ removed after a lengthy deprecation cycle
+ * Fixes
+ - When performing resultset update/delete only strip condition
+ qualifiers - leave the source name alone (RT#80015, RT#78844)
+ - Fix incorrect behavior on resultset update/delete invoked on
+ composite resultsets (e.g. as_subselect_rs)
+ - Fix update/delete operations referencing the updated table failing
+ on MySQL, due to its refusal to modify a table being directly
+ queried. As a workaround induce in-memory temp-table creation
+ (RT#81378, RT#81897)
+ - More robust behavior under heavily threaded environments - make
+ sure we do not have refaddr reuse in the global storage registry
+ - Fix failing test on 5.8 under Win32 (RT#81114)
+ - Fix hash-randomization test issues (RT#81638)
+ - Disallow erroneous calling of connect_info on a replicated storage
+ (RT#78436)
+ * Misc
+ - Improve the populate docs in ::Schema and ::ResultSet
+ - ::Storage::DBI::source_bind_attributes() removed as announced
+ on Jan 2011 in 0e773352a
+
+0.08204 2012-11-08
+ * New Features / Changes
+ - SQLMaker now accepts \'literal' with the 'for' rs attribute as an
+ override to the builtin FOR options
+ * Fixes
+ - Fix unique constraint violations in Ordered.pm blanket movement
+ (RT#79773, rolls back short-sighted 5e6fde33e)
+ - Fix API mismatch between new_result() and new_related() (originally
+ broken by fea3d045)
+ - Fix test failure on perl 5.8
+ * Misc
+ - Much more extensive diagnostics when a new RDBMS/DSN combination is
+ encountered (RT#80431)
+
+0.08203 2012-10-18
+ * Fixes
+ - Really fix inadequate $dbh->ping SQLite implementation (what shipped
+ in 0.08201 tickled other deficiencies in DBD::SQLite itself)
+
+0.08202 2012-10-06
+ * Fixes
+ - Replace inadequate $dbh->ping SQLite implementation with our own,
+ fixes RT#78420
+
+0.08200 2012-08-24 (UTC)
+ * Fixes
+ - Change one of the new tests for the previous release to not require
+ SQL::Translator
+
+0.08199 2012-08-22 (UTC)
+ * Fixes
+ - Roll back incomplete (and broken) internal changes - restore prefetch functionality
+
+0.08198 2012-07-11 03:43 (UTC)
+ * Fixes
+ - Fix a number of Win32 Test issues
+ - Fix silent Oracle connection failures
+
+0.08197 2012-07-10 10:32 (UTC)
* New Features / Changes
- Issue a warning when DateTime objects are passed to ->search
- Fast populate() in void context is now even more efficient by
- Nomalization of retrieved GUID values
* Fixes
+ - Fix complex has_many prefetch with resultsets not selecting identity
+ columns from the root result source
- Fix SkipFirst and FirstSkip limit dialects (Informix and Firebird)
- Fix "Skimming limit" dialects (Top, FetchFirst) to properly check
the order_by criteria for stability
\.orig$
\.rej$
+lib/DBIx/Class/Manual/ResultClass.pod.proto
+maint/.Generated_Pod
# for that)
BEGIN {
$Module::Install::AUTHOR = 0 if (grep { $ENV{"PERL5_${_}_IS_RUNNING"} } (qw/CPANM CPANPLUS CPAN/) );
+ makemaker_args( NORECURS => 1 );
}
+homepage 'http://www.dbix-class.org/';
+resources 'IRC' => 'irc://irc.perl.org/#dbix-class';
+resources 'license' => 'http://dev.perl.org/licenses/';
+resources 'repository' => 'https://github.com/dbsrgits/DBIx-Class';
+resources 'MailingList' => 'http://lists.scsys.co.uk/cgi-bin/mailman/listinfo/dbix-class';
+resources 'bugtracker' => 'http://rt.cpan.org/NoAuth/Bugs.html?Dist=DBIx-Class';
+
name 'DBIx-Class';
perl_version '5.008001';
all_from 'lib/DBIx/Class.pm';
+Meta->{values}{x_authority} = 'cpan:RIBASUSHI';
tests_recursive (qw|
t
script/dbicadmin
|);
-homepage 'http://www.dbix-class.org/';
-resources 'IRC' => 'irc://irc.perl.org/#dbix-class';
-resources 'license' => 'http://dev.perl.org/licenses/';
-resources 'repository' => 'git://git.shadowcat.co.uk/dbsrgits/DBIx-Class.git';
-resources 'MailingList' => 'http://lists.scsys.co.uk/cgi-bin/mailman/listinfo/dbix-class';
-resources 'bugtracker' => 'http://rt.cpan.org/NoAuth/Bugs.html?Dist=DBIx-Class';
-
###
### DO NOT ADD OPTIONAL DEPENDENCIES HERE, EVEN AS recommends()
### All of them *MUST* go to DBIx::Class::Optional::Dependencies
###
my $runtime_requires = {
+ # FIXME - temporary workaround for RT#83143 (Path::Class)
+ 'File::Spec' => '3.30',
+
# FIXME - temporary, needs throwing out for something more efficient
'Data::Compare' => '1.22',
- # Moo does not yet depend on this higher version
- 'strictures' => '1.003001',
-
# DBI itself should be capable of installation and execution in pure-perl
# mode. However it has never been tested yet, so consider XS for the time
# being
'DBI' => '1.57',
+ # on older versions first() leaks
+ # for the time being make it a hard dep - when we get
+ # rid of Sub::Name will revisit this (possibility is
+ # to use Devel::HideXS to force the pure-perl version
+ # or something like that)
+ 'List::Util' => '1.16',
+
# XS (or XS-dependent) libs
'Sub::Name' => '0.04',
# pure-perl (FatPack-able) libs
- 'Class::Accessor::Grouped' => '0.10002',
+ 'Class::Accessor::Grouped' => '0.10009',
'Class::C3::Componentised' => '1.0009',
'Class::Inspector' => '1.24',
- 'Class::Method::Modifiers' => '1.06',
'Config::Any' => '0.20',
'Context::Preserve' => '0.01',
'Data::Dumper::Concise' => '2.020',
'Data::Page' => '2.00',
+ 'Devel::GlobalDestruction' => '0.09',
'Hash::Merge' => '0.12',
- 'Moo' => '0.009014',
- 'MRO::Compat' => '0.09',
- 'Module::Find' => '0.06',
- 'namespace::clean' => '0.20',
+ 'Moo' => '1.000006',
+ 'MRO::Compat' => '0.12',
+ 'Module::Find' => '0.07',
+ 'namespace::clean' => '0.24',
'Path::Class' => '0.18',
'Scope::Guard' => '0.03',
- 'SQL::Abstract' => '1.72',
- 'Try::Tiny' => '0.04',
+ 'SQL::Abstract' => '1.73',
+ 'Try::Tiny' => '0.07',
- # dual-life corelibs needing a specific bugfixed version
- 'File::Path' => '2.07',
+ # Technically this is not a core dependency - it is only required
+ # by the MySQL codepath. However this particular version is bundled
+ # since 5.10.0 and is a pure-perl module anyway - let it slide
+ 'Text::Balanced' => '2.00',
};
my $build_requires = {
warn "\nIgnoring unrecognized option(s): @ARGV\n\n";
}
+ # We need the MM facilities to generate the pieces for the final MM run.
+ # Just instantiate a throaway object here
+ my $mm_proto = ExtUtils::MakeMaker->new({
+ NORECURS => 1,
+ NAME => Meta->name || die 'The Module::Install metadata must be available at this point but is not - did you rearrange the Makefile.PL...?',
+ });
+
+ # Crutch for DISTBUILDING_IN_HELL
+ # Spits back a working dos2unix snippet to be used on the supplied path(s)
+ # Ironically EUMM's dos2unix is broken on win32 itself - it does
+ # not take into account the CRLF layer present on win32
+ my $crlf_fixup = sub {
+ return '' unless ($^O eq 'MSWin32' or $^O eq 'cygwin');
+ my $targets = join ', ', map { "q($_)" } @_;
+ "\t" . $mm_proto->oneliner( qq(\$ENV{PERLIO}='unix' and system( \$^X, qw( -MExtUtils::Command -e dos2unix -- ), $targets ) ) );
+ };
+
+ # we are in the process of (re)writing the makefile - some things we
+ # call below very well may fail
+ local $ENV{DBICTEST_NO_MAKEFILE_VERIFICATION} = 1;
+
require File::Spec;
# string-eval, not do(), because we need to provide the
- # $reqs and $*_requires lexicals to the included file
+ # $mm_proto, $reqs and $*_requires lexicals to the included file
# (some includes *do* modify $reqs above)
- for (sort glob ( File::Spec->catfile('maint', 'Makefile.PL.inc', '*') ) ) {
- eval scalar do { local (@ARGV, $/) = $_; <> }
- or die ($@ || $!);
+ for my $inc (sort glob ( File::Spec->catfile('maint', 'Makefile.PL.inc', '*') ) ) {
+ my $src = do { local (@ARGV, $/) = $inc; <> } or die $!;
+ eval "use warnings; use strict; $src" or die sprintf
+ "Failed execution of %s: %s\n",
+ $inc,
+ ($@ || $! || 'Unknown error'),
+ ;
}
}
else {
# make sure this Makefile can not be used to make a dist
# (without the author includes there are no meta cleanup, no sanity checks, etc)
postamble <<EOP;
-.PHONY: nonauthor_stop_distdir_creation
create_distdir: nonauthor_stop_distdir_creation
nonauthor_stop_distdir_creation:
\t\$(NOECHO) \$(ECHO) Creation of dists in non-author mode is not allowed
--- /dev/null
+* a48693f4 adds 5 files for a test that may even be the same as that from
+571df676 - please rewrite using the existing schema and delete the rest
package MyDatabase::Main;
+
+use warnings;
+use strict;
+
use base qw/DBIx::Class::Schema/;
__PACKAGE__->load_namespaces;
CREATE TABLE artist (
artistid INTEGER PRIMARY KEY,
- name TEXT NOT NULL
+ name TEXT NOT NULL
);
CREATE TABLE cd (
trackid INTEGER PRIMARY KEY,
cd INTEGER NOT NULL REFERENCES cd(cdid),
title TEXT NOT NULL
-);
\ No newline at end of file
+);
# $VERSION declaration must stay up here, ahead of any other package
# declarations, as to not confuse various modules attempting to determine
# this ones version, whether that be s.c.o. or Module::Metadata, etc
-$VERSION = '0.08196';
+$VERSION = '0.08206';
$VERSION = eval $VERSION if $VERSION =~ /_/; # numify for warning-free dev releases
package # hide from pause
DBIx::Class::_ENV_;
- if ($] < 5.009_005) {
- require MRO::Compat;
- *OLD_MRO = sub () { 1 };
- }
- else {
- require mro;
- *OLD_MRO = sub () { 0 };
- }
-
- # ::Runmode would only be loaded by DBICTest, which in turn implies t/
- *DBICTEST = eval { DBICTest::RunMode->is_author }
- ? sub () { 1 }
- : sub () { 0 }
- ;
+ use Config;
- # There was a brief period of p5p insanity when $@ was invisible in a DESTROY
- *INVISIBLE_DOLLAR_AT = ($] >= 5.013001 and $] <= 5.013007)
- ? sub () { 1 }
- : sub () { 0 }
- ;
+ use constant {
- # During 5.13 dev cycle HELEMs started to leak on copy
- *PEEPEENESS = (defined $ENV{DBICTEST_ALL_LEAKS}
- # request for all tests would force "non-leaky" illusion and vice-versa
- ? ! $ENV{DBICTEST_ALL_LEAKS}
+ # but of course
+ BROKEN_FORK => ($^O eq 'MSWin32') ? 1 : 0,
- # otherwise confess that this perl is busted ONLY on smokers
- : do {
- if (eval { DBICTest::RunMode->is_smoker }) {
+ HAS_ITHREADS => $Config{useithreads} ? 1 : 0,
- # leaky 5.13.6 (fixed in blead/cefd5c7c)
- if ($] == '5.013006') { 1 }
+ # ::Runmode would only be loaded by DBICTest, which in turn implies t/
+ DBICTEST => eval { DBICTest::RunMode->is_author } ? 1 : 0,
- # not sure why this one leaks, but disable anyway - ANDK seems to make it weep
- elsif ($] == '5.013005') { 1 }
-
- else { 0 }
- }
- else { 0 }
- }
- ) ? sub () { 1 } : sub () { 0 };
+ # During 5.13 dev cycle HELEMs started to leak on copy
+ PEEPEENESS =>
+ # request for all tests would force "non-leaky" illusion and vice-versa
+ defined $ENV{DBICTEST_ALL_LEAKS} ? !$ENV{DBICTEST_ALL_LEAKS}
+ # otherwise confess that this perl is busted ONLY on smokers
+ : eval { DBICTest::RunMode->is_smoker } && ($] >= 5.013005 and $] <= 5.013006) ? 1
+ # otherwise we are good
+ : 0
+ ,
+ };
+ if ($] < 5.009_005) {
+ require MRO::Compat;
+ constant->import( OLD_MRO => 1 );
+ }
+ else {
+ require mro;
+ constant->import( OLD_MRO => 0 );
+ }
}
use mro 'c3';
use base qw/DBIx::Class::Componentised DBIx::Class::AccessorGroup/;
use DBIx::Class::StartupCheck;
+use DBIx::Class::Exception;
__PACKAGE__->mk_group_accessors(inherited => '_skip_namespace_frames');
__PACKAGE__->_skip_namespace_frames('^DBIx::Class|^SQL::Abstract|^Try::Tiny|^Class::Accessor::Grouped|^Context::Preserve');
=over
-=item * Web Site: L<http://www.dbix-class.org/>
-
=item * IRC: irc.perl.org#dbix-class
=for html
=item * Mailing list: L<http://lists.scsys.co.uk/mailman/listinfo/dbix-class>
+=item * Twitter L<http://www.twitter.com/dbix_class>
+
+=item * Web Site: L<http://www.dbix-class.org/>
+
=item * RT Bug Tracker: L<https://rt.cpan.org/Dist/Display.html?Queue=DBIx-Class>
-=item * gitweb: L<http://git.shadowcat.co.uk/gitweb/gitweb.cgi?p=dbsrgits/DBIx-Class.git>
+=back
+
+The project is maintained in a git repository, accessible from the following sources:
+
+=over
=item * git: L<git://git.shadowcat.co.uk/dbsrgits/DBIx-Class.git>
-=item * twitter L<http://www.twitter.com/dbix_class>
+=item * gitweb: L<http://git.shadowcat.co.uk/gitweb/gitweb.cgi?p=dbsrgits/DBIx-Class.git>
+
+=item * github mirror: L<https://github.com/dbsrgits/DBIx-Class>
+
+=item * authorized committers: L<ssh://dbsrgits@git.shadowcat.co.uk/DBIx-Class.git>
+
+=item * Travis-CI log: L<http://travis-ci.org/dbsrgits/dbix-class/builds>
+
+=for html
+<img src="https://secure.travis-ci.org/dbsrgits/dbix-class.png?branch=master"></img>
=back
my $cd = $millennium_cds_rs->next; # SELECT ... FROM cds JOIN artists ...
my $cd_artist_name = $cd->artist->name; # Already has the data so no 2nd query
- # new() makes a DBIx::Class::Row object but doesnt insert it into the DB.
+ # new() makes a Result object but doesnt insert it into the DB.
# create() is the same as new() then insert().
my $new_cd = $schema->resultset('CD')->new({ title => 'Spoon' });
$new_cd->artist($cd->artist);
Alexander Keusch <cpan@keusch.at>
+alexrj: Alessandro Ranellucci <aar@cpan.org>
+
alnewkirk: Al Newkirk <we@ana.im>
amiri: Amiri Barksdale <amiri@metalabel.com>
amoore: Andrew Moore <amoore@cpan.org>
+andrewalker: Andre Walker <andre@andrewalker.net>
+
andyg: Andy Grundman <andy@hybridized.org>
ank: Andres Kievsky
jhannah: Jay Hannah <jay@jays.net>
+jmac: Jason McIntosh <jmac@appleseed-sc.com>
+
jnapiorkowski: John Napiorkowski <jjn1056@yahoo.com>
jon: Jon Schutz <jjschutz@cpan.org>
milki: Jonathan Chu <milki@rescomp.berkeley.edu>
+mjemmeson: Michael Jemmeson <michael.jemmeson@gmail.com>
+
mstratman: Mark A. Stratman <stratman@gmail.com>
ned: Neil de Carteret
Robert Olson <bob@rdolson.org>
-Roman: Roman Filippov <romanf@cpan.org>
+moltar: Roman Filippov <romanf@cpan.org>
Sadrak: Felix Antonius Wilhelm Ostmann <sadrak@cpan.org>
use warnings;
use base qw/Class::Accessor::Grouped/;
-use Scalar::Util qw/weaken/;
+use Scalar::Util qw/weaken blessed/;
use namespace::clean;
my $successfully_loaded_components;
sub get_component_class {
my $class = $_[0]->get_inherited($_[1]);
+ # It's already an object, just go for it.
+ return $class if blessed $class;
+
if (defined $class and ! $successfully_loaded_components->{$class} ) {
$_[0]->ensure_class_loaded($class);
This class now exists in its own right on CPAN as Class::Accessor::Grouped
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Matt S. Trout <mst@shadowcatsystems.co.uk>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
package # hide from PAUSE
DBIx::Class::Admin::Descriptive;
+use warnings;
+use strict;
use base 'Getopt::Long::Descriptive';
package # hide from PAUSE
DBIx::Class::Admin::Usage;
+use warnings;
+use strict;
use base 'Getopt::Long::Descriptive::Usage';
=back
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Matt S. Trout <mst@shadowcatsystems.co.uk>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
-package DBIx::Class::Carp;
+package # hide from pause
+ DBIx::Class::Carp;
use strict;
use warnings;
## FIXME FIXME FIXME - something is tripping up V::M on 5.8.1, leading
# to segfaults. When n::c/B::H::EndOfScope is rewritten in terms of tie()
# see if this starts working
- unless DBIx::Class::_ENV_::BROKEN_NAMESPACE_CLEAN();
+ unless DBIx::Class::_ENV_::BROKEN_NAMESPACE_CLEAN;
}
sub unimport {
=back
-=head1 AUTHORS
+A better overview of the methods found in a Result class can be found
+in L<DBIx::Class::Manual::ResultClass>.
-Matt S. Trout <mst@shadowcatsystems.co.uk>
+=head1 AUTHOR AND CONTRIBUTORS
+
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
=head1 SYNOPSIS
my $cursor = $schema->resultset('CD')->cursor();
- my $first_cd = $cursor->next;
+
+ # raw values off the database handle in resultset columns/select order
+ my @next_cd_column_values = $cursor->next;
+
+ # list of all raw values as arrayrefs
+ my @all_cds_column_values = $cursor->all;
=head1 DESCRIPTION
Hidden.
-=begin hidden head2 storage
+=begin hidden
+
+=head2 storage
Sets or gets the storage backend. Defaults to L<DBIx::Class::Storage::DBI>.
=cut
-=begin hidden head2 class_resolver
+=begin hidden
+
+=head2 class_resolver
****DEPRECATED****
__PACKAGE__->mk_classdata('class_resolver' =>
'DBIx::Class::ClassResolver::PassThrough');
-=begin hidden head2 connection
+=begin hidden
+
+=head2 connection
__PACKAGE__->connection($dsn, $user, $pass, $attrs);
$class->schema_instance->connection(@info);
}
-=begin hidden head2 setup_schema_instance
+=begin hidden
+
+=head2 setup_schema_instance
Creates a class method ->schema_instance which contains a DBIx::Class::Schema;
all class-method operations are proxies through to this object. If you don't
$class->mk_classdata('schema_instance' => $schema);
}
-=begin hidden head2 txn_begin
+=begin hidden
+
+=head2 txn_begin
Begins a transaction (does nothing if AutoCommit is off).
sub txn_begin { shift->schema_instance->txn_begin(@_); }
-=begin hidden head2 txn_commit
+=begin hidden
+
+=head2 txn_commit
Commits the current transaction.
sub txn_commit { shift->schema_instance->txn_commit(@_); }
-=begin hidden head2 txn_rollback
+=begin hidden
+
+=head2 txn_rollback
Rolls back the current transaction.
sub txn_rollback { shift->schema_instance->txn_rollback(@_); }
-=begin hidden head2 txn_do
+=begin hidden
+
+=head2 txn_do
Executes a block of code transactionally. If this code reference
throws an exception, the transaction is rolled back and the exception
}
}
-=begin hidden head2 resultset_instance
+=begin hidden
+
+=head2 resultset_instance
Returns an instance of a resultset for this class - effectively
mapping the L<Class::DBI> connection-as-classdata paradigm into the
$_[0]->result_source_instance->resultset
}
-=begin hidden head2 result_source_instance
+=begin hidden
+
+=head2 result_source_instance
Returns an instance of the result source for this class
return $source;
}
-=begin hidden head2 resolve_class
+=begin hidden
+
+=head2 resolve_class
****DEPRECATED****
=end hidden
-=begin hidden head2 dbi_commit
+=begin hidden
+
+=head2 dbi_commit
****DEPRECATED****
=end hidden
-=begin hidden head2 dbi_rollback
+=begin hidden
+
+=head2 dbi_rollback
****DEPRECATED****
=end hidden
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Matt S. Trout <mst@shadowcatsystems.co.uk>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
-You may distribute this code under the same terms as Perl itself.
+You may distribute this code under the same terms as Perl itself
=cut
die shift;
}
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Brandon L. Black <blblack@gmail.com>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
+++ /dev/null
-# This is just a concept-test. If works as intended will ship in its own
-# right as Devel::GlobalDestruction::PP or perhaps even as part of rafls
-# D::GD itself
-
-package # hide from pause
- DBIx::Class::GlobalDestruction;
-
-use strict;
-use warnings;
-
-use base 'Exporter';
-our @EXPORT = 'in_global_destruction';
-
-use DBIx::Class::Exception;
-
-if (defined ${^GLOBAL_PHASE}) {
- eval 'sub in_global_destruction () { ${^GLOBAL_PHASE} eq q[DESTRUCT] }';
-}
-elsif (eval { require Devel::GlobalDestruction }) { # use the XS version if available
- *in_global_destruction = \&Devel::GlobalDestruction::in_global_destruction;
-}
-else {
- my ($in_global_destruction, $before_is_installed);
-
- eval <<'PP_IGD';
-
-sub in_global_destruction () { $in_global_destruction }
-
-END {
- # SpeedyCGI runs END blocks every cycle but keeps object instances
- # hence we have to disable the globaldestroy hatch, and rely on the
- # eval traps (which appears to work, but are risky done so late)
- $in_global_destruction = 1 unless $CGI::SpeedyCGI::i_am_speedy;
-}
-
-# threads do not execute the global ENDs (it would be stupid). However
-# one can register a new END via simple string eval within a thread, and
-# achieve the same result. A logical place to do this would be CLONE, which
-# is claimed to run in the context of the new thread. However this does
-# not really seem to be the case - any END evaled in a CLONE is ignored :(
-# Hence blatantly hooking threads::create
-if ($INC{'threads.pm'}) {
- require Class::Method::Modifiers;
- Class::Method::Modifiers::install_modifier( threads => before => create => sub {
- my $orig_target_cref = $_[1];
- $_[1] = sub {
- { local $@; eval 'END { $in_global_destruction = 1 }' }
- $orig_target_cref->();
- };
- });
- $before_is_installed = 1;
-}
-
-# just in case threads got loaded after DBIC (silly)
-sub CLONE {
- DBIx::Class::Exception->throw("You must load the 'threads' module before @{[ __PACKAGE__ ]}")
- unless $before_is_installed;
-}
-
-PP_IGD
-
-}
-
-1;
=head1 SYNOPSIS
- # In your table classes
- __PACKAGE__->inflate_column('column_name', {
- inflate => sub { ... },
- deflate => sub { ... },
- });
+ # In your table classes
+ __PACKAGE__->inflate_column('column_name', {
+ inflate => sub {
+ my ($raw_value_from_db, $result_object) = @_;
+ ...
+ },
+ deflate => sub {
+ my ($inflated_value_from_user, $result_object) = @_;
+ ...
+ },
+ });
=head1 DESCRIPTION
corresponding table class using something like:
__PACKAGE__->inflate_column('insert_time', {
- inflate => sub { DateTime::Format::Pg->parse_datetime(shift); },
- deflate => sub { DateTime::Format::Pg->format_datetime(shift); },
+ inflate => sub {
+ my ($insert_time_raw_value, $event_result_object) = @_;
+ DateTime->from_epoch( epoch => $insert_time_raw_value );
+ },
+ deflate => sub {
+ my ($insert_time_dt_object, $event_result_object) = @_;
+ $insert_time_dt_object->epoch;
+ },
});
-(Replace L<DateTime::Format::Pg> with the appropriate module for your
-database, or consider L<DateTime::Format::DBI>.)
-
The coderefs you set for inflate and deflate are called with two parameters,
-the first is the value of the column to be inflated/deflated, the second is the
-row object itself. Thus you can call C<< ->result_source->schema->storage->dbh >> in your inflate/defalte subs, to feed to L<DateTime::Format::DBI>.
+the first is the value of the column to be inflated/deflated, the second is
+the result object itself.
In this example, calls to an event's C<insert_time> accessor return a
-L<DateTime> object. This L<DateTime> object is later "deflated" when
-used in the database layer.
+L<DateTime> object. This L<DateTime> object is later "deflated" back
+to the integer epoch representation when used in the database layer.
+For a much more thorough handling of the above example, please see
+L<DBIx::Class::DateTime::Epoch>
=cut
$parser->$method($value);
}
catch {
- $self->throw_exception ("Error while inflating ${value} for $info->{__dbic_colname} on ${self}: $_")
+ $self->throw_exception ("Error while inflating '$value' for $info->{__dbic_colname} on ${self}: $_")
unless $info->{datetime_undef_if_invalid};
undef; # rv
};
__PACKAGE__->table_class('DBIx::Class::ResultSource::View');
- # ->table, ->add_columns, etc.
+ # For the time being this is necessary even for virtual views
+ __PACKAGE__->table($view_name);
+
+ #
+ # ->add_columns, etc.
+ #
# do not attempt to deploy() this view
__PACKAGE__->result_source_instance->is_virtual(1);
my $rs = $cdrs->search({
year => {
'=' => $cdrs->search(
- { artist_id => { '=' => { -ident => 'me.artist_id' } } },
- { alias => 'inner' }
+ { artist_id => { -ident => 'me.artist_id' } },
+ { alias => 'sub_query' }
)->get_column('year')->max_rs->as_query,
},
});
SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
FROM cd me
- WHERE year = (
- SELECT MAX(inner.year)
- FROM cd inner
- WHERE artist_id = me.artist_id
- )
+ WHERE year = (
+ SELECT MAX(sub_query.year)
+ FROM cd sub_query
+ WHERE artist_id = me.artist_id
+ )
=head2 Predefined searches
See also L<SQL::Abstract/Literal SQL with placeholders and bind values
(subqueries)>.
+=head2 Software Limits
+
+When your RDBMS does not have a working SQL limit mechanism (e.g. Sybase ASE)
+and L<GenericSubQ|SQL::Abstract::Limit/GenericSubQ> is either too slow or does
+not work at all, you can try the
+L<software_limit|DBIx::Class::ResultSet/software_limit>
+L<DBIx::Class::ResultSet> attribute, which skips over records to simulate limits
+in the Perl layer.
+
+For example:
+
+ my $paged_rs = $rs->search({}, {
+ rows => 25,
+ page => 3,
+ order_by => [ 'me.last_name' ],
+ software_limit => 1,
+ });
+
+You can set it as a default for your schema by placing the following in your
+C<Schema.pm>:
+
+ __PACKAGE__->default_resultset_attributes({ software_limit => 1 });
+
+B<WARNING:> If you are dealing with large resultsets and your L<DBI> or
+ODBC/ADO driver does not have proper cursor support (i.e. it loads the whole
+resultset into memory) then this feature will be extremely slow and use huge
+amounts of memory at best, and may cause your process to run out of memory and
+cause instability on your server at worst, beware!
+
=head1 JOINS AND PREFETCHING
=head2 Using joins and prefetch
=head1 ROW-LEVEL OPERATIONS
-=head2 Retrieving a row object's Schema
+=head2 Retrieving a result object's Schema
-It is possible to get a Schema object from a row object like so:
+It is possible to get a Schema object from a result object like so:
my $schema = $cd->result_source->schema;
# use the schema as normal:
Alternatively you can use L<DBIx::Class::DynamicSubclass> that implements
exactly the above functionality.
-=head2 Skip row object creation for faster results
+=head2 Skip result object creation for faster results
DBIx::Class is not built for speed, it's built for convenience and
ease of use, but sometimes you just need to get the data, and skip the
=head2 Creating a result set from a set of rows
-Sometimes you have a (set of) row objects that you want to put into a
+Sometimes you have a (set of) result objects that you want to put into a
resultset without the need to hit the DB again. You can do that by using the
L<set_cache|DBIx::Class::Resultset/set_cache> method:
For example, say that you have three columns, C<id>, C<number>, and
C<squared>. You would like to make changes to C<number> and have
C<squared> be automagically set to the value of C<number> squared.
-You can accomplish this by wrapping the C<number> accessor with
-L<Class::Method::Modifiers>:
+You can accomplish this by wrapping the C<number> accessor with the C<around>
+method modifier, available through either L<Class::Method::Modifiers>,
+L<Moose|Moose::Manual::MethodModifiers> or L<Moose-like|Moo> modules):
around number => sub {
my ($orig, $self) = (shift, shift);
}
$self->$orig(@_);
- }
+ };
Note that the hard work is done by the call to C<< $self->$orig >>, which
redispatches your call to store_column in the superclass(es).
=item *
Use L<populate|DBIx::Class::ResultSet/populate> in void context to insert data
-when you don't need the resulting L<DBIx::Class::Row> objects, if possible, but
-see the caveats.
+when you don't need the resulting L<result|DBIx::Class::Manual::ResultClass> objects,
+if possible, but see the caveats.
When inserting many rows, for best results, populate a large number of rows at a
time, but not so large that the table is locked for an unacceptably long time.
=item .. insert a row with an auto incrementing primary key?
This happens automatically. After
-L<creating|DBIx::Class::ResultSet/create> a row object, the primary
+L<creating|DBIx::Class::ResultSet/create> a result object, the primary
key value created by your database can be fetched by calling C<id> (or
the access of your primary key column) on the object.
if you create a resultset using C<search> in scalar context, no query
is executed. You can create further resultset refinements by calling
search again or relationship accessors. The SQL query is only run when
-you ask the resultset for an actual row object.
+you ask the resultset for an actual result object.
=item How do I deal with tables that lack a primary key?
=item How do I reduce the overhead of database queries?
You can reduce the overhead of object creation within L<DBIx::Class>
-using the tips in L<DBIx::Class::Manual::Cookbook/"Skip row object creation for faster results">
+using the tips in L<DBIx::Class::Manual::Cookbook/"Skip result object creation for faster results">
and L<DBIx::Class::Manual::Cookbook/"Get raw data for blindingly fast results">
=item How do I override a run time method (e.g. a relationship accessor)?
=head2 Result class
A Result class defines both a source of data (usually one per table),
-and the methods that will be available in the L</Row> objects created
-using that source.
+and the methods that will be available in the L</Result> objects
+created using that source.
One Result class is needed per data source (table, view, query) used
in your application, they should inherit from L<DBIx::Class::Core>.
+See also: L<DBIx::Class::Manual::ResultClass>
+
=head2 ResultSource
ResultSource objects represent the source of your data, these are
=head2 Record
-See Row.
+See Result.
=head2 Row
-Row objects contain your actual data. They are returned from ResultSet objects.
+See Result.
+
+=head2 Result
+
+Result objects contain your actual data. They are returned from
+ResultSet objects. These are sometimes (incorrectly) called
+row objects, including older versions of the DBIC documentation.
+
+See also: L<DBIx::Class::Manual::ResultClass>
=head2 Object
-See Row.
+See Result.
=head2 join
+See Join.
+
=head2 prefetch
+Similiar to a join, except the related result objects are fetched and
+cached for future use, instead of used directly from the ResultSet. This
+allows you to jump to different relationships within a Result without
+worrying about generating a ton of extra SELECT statements.
=head1 SQL TERMS
+=head2 CRUD
+
+Create, Read, Update, Delete. A general concept of something that can
+do all four operations (INSERT, SELECT, UPDATE, DELETE), usually at a
+row-level.
+
=head2 Join
This is an SQL keyword, it is used to link multiple tables in one SQL
=head2 Related data
In SQL, related data actually refers to data that are normalised into
-the same table. (Yes. DBIC does mis-use this term).
+the same table. (Yes. DBIC does mis-use this term.)
+
+=head1 AUTHOR AND CONTRIBUTORS
+
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
=head2 Search results are returned as Rows
Rows of the search from the database are blessed into
-L<DBIx::Class::Row> objects.
+L<Result|DBIx::Class::Manual::ResultClass> objects.
=head1 SETTING UP DBIx::Class
size => 16,
is_nullable => 0,
is_auto_increment => 1,
- default_value => '',
},
artist =>
{ data_type => 'integer',
size => 16,
is_nullable => 0,
- is_auto_increment => 0,
- default_value => '',
},
title =>
{ data_type => 'varchar',
size => 256,
is_nullable => 0,
- is_auto_increment => 0,
- default_value => '',
},
rank =>
{ data_type => 'integer',
size => 16,
is_nullable => 0,
- is_auto_increment => 0,
- default_value => '',
+ default_value => 0,
}
);
DBIx::Class doesn't directly use most of this data yet, but various related
-modules such as L<DBIx::Class::WebForm> make use of it. Also it allows you to
-create your database tables from your Schema, instead of the other way around.
+modules such as L<HTML::FormHandler::Model::DBIC> make use of it.
+Also it allows you to create your database tables from your Schema,
+instead of the other way around.
See L<DBIx::Class::Schema/deploy> for details.
See L<DBIx::Class::ResultSource> for more details of the possible column
my @albums = My::Schema->resultset('Album')->search(
{ artist => 'Bob Marley' },
- { rows => 2, order_by => 'year DESC' }
+ { rows => 2, order_by => { -desc => 'year' } }
);
C<@albums> then holds the two most recent Bob Marley albums.
on all your result sources B<even if the underlying RDBMS does not have one>.
In a pinch one can always declare each row identifiable by all its columns:
- __PACKAGE__->set_primary_keys (__PACKAGE__->columns);
+ __PACKAGE__->set_primary_key(__PACKAGE__->columns);
Note that DBIx::Class is smart enough to store a copy of the PK values before
any row-object changes take place, so even if you change the values of PK
=item *
+L<$obj|DBIx::Class> - Reference to the source class or object definition
+
+All arguments and return values should provide a link to the object's
+class documentation or definition, even if it's the same class as the current
+documentation. For example:
+
+ ## Correct, if stated within DBIx::Class::ResultSet
+ L<$resultset|/new>
+
+ ## Correct, if stated outside DBIx::Class::ResultSet
+ L<$resultset|DBIx::Class::ResultSet>
+
+=item *
+
? - Optional, should be placed after the argument type and name.
## Correct
=back
-The second item starts with the text "Return value:". The remainder of
-the line is either the text "undefined", a text describing the result of
-the method, or a variable with a descriptive name.
+The second item starts with the text "Return Value:". The remainder of
+the line is either the text "not defined" or a variable with a descriptive
+name.
## Good examples
- =item Return value: undefined
- =item Return value: A schema object
- =item Return value: $classname
+ =item Return Value: not defined
+ =item Return Value: L<$schema|DBIx::Class::Schema>
+ =item Return Value: $classname
## Bad examples
- =item Return value: The names
+ =item Return Value: The names
-"undefined" means the method does not deliberately return a value, and
-the caller should not use or rely on anything it does return. (Perl
+"not defined" means the method does not deliberately return a value, and
+the caller should not use or rely on anything it does return. (Perl
functions always return something, usually the result of the last code
-statement, if there is no explicit return statement.)
+statement, if there is no explicit return statement.) This is different
+than specifying "undef", which means that it explicitly returns undef,
+though usually this is used an alternate return (like C<$obj | undef>).
=item *
-The argument list is followed by a single paragraph describing what
+The argument/return list is followed by a single paragraph describing what
the method does.
=item *
=item *
-The argument list is followed by some examples of how to use the
+The argument/return list is followed by some examples of how to use the
method, using its various types of arguments.
The examples can also include ways to use the results if
=back
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-see L<DBIx::Class>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
You may distribute this code under the same terms as Perl itself.
=cut
-
-
-
--- /dev/null
+#
+# This is what eventually becomes lib/DBIx/Class/Manual/ResultClass.pod
+# Courtesy of maint/gen_pod_inherit and Pod::Inherit
+#
+
+=head1 NAME
+
+DBIx::Class::Manual::ResultClass - Representing a single result (row) from
+a DB query
+
+=head1 SYNOPSIS
+
+ package My::Schema::Result::Track;
+
+ use parent 'DBIx::Class::Core';
+
+ __PACKAGE__->table('tracks');
+
+ __PACKAGE__->add_columns({
+ id => {
+ data_type => 'int',
+ is_auto_increment => 1,
+ },
+ cd_id => {
+ data_type => 'int',
+ },
+ title => {
+ data_type => 'varchar',
+ size => 50,
+ },
+ rank => {
+ data_type => 'int',
+ is_nullable => 1,
+ },
+ });
+
+ __PACKAGE__->set_primary_key('id');
+ __PACKAGE__->add_unique_constraint(u_title => ['cd_id', 'title']);
+
+=head1 DESCRIPTION
+
+In L<DBIx::Class>, a user normally receives query results as instances of a
+certain C<Result Class>, depending on the main query source. Besides being
+the primary "toolset" for interaction with your data, a C<Result Class> also
+serves to establish source metadata, which is then used during initialization
+of your L<DBIx::Class::Schema> instance.
+
+Because of these multiple seemingly conflicting purposes, it is hard to
+aggregate the documentation of various methods available on a typical
+C<Result Class>. This document serves as a general overview of C<Result Class>
+declaration best practices, and offers an index of the available methods
+(and the Components/Roles which provide them).
+
+=head1 AUTHOR AND CONTRIBUTORS
+
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
This issue is due to perl doing an exhaustive search of blessed objects
under certain circumstances. The problem shows up as performance
-degradation exponential to the number of L<DBIx::Class> row objects in
+degradation exponential to the number of L<DBIx::Class> result objects in
memory, so can be unnoticeable with certain data sets, but with huge
performance impacts on other datasets.
It has been observed, using L<DBD::ODBC>, that creating a L<DBIx::Class::Row>
object which includes a column of data type TEXT/BLOB/etc. will allocate
LongReadLen bytes. This allocation does not leak, but if LongReadLen
-is large in size, and many such row objects are created, e.g. as the
+is large in size, and many such result objects are created, e.g. as the
output of a ResultSet query, the memory footprint of the Perl interpreter
can grow very large.
'namespace::autoclean' => '0.09',
};
+my $admin_script = {
+ %$moose_basic,
+ %$admin_basic,
+ 'Getopt::Long::Descriptive' => '0.081',
+ 'Text::CSV' => '1.16',
+};
+
my $datetime_basic = {
'DateTime' => '0.55',
'DateTime::Format::Strptime' => '1.2',
};
my $reqs = {
- dist => {
- #'Module::Install::Pod::Inherit' => '0.01',
+ dist_podinherit => {
+ req => {
+ 'Pod::Inherit' => '0.90',
+ 'Pod::Tree' => '0',
+ }
},
replicated => {
admin_script => {
req => {
- %$moose_basic,
- %$admin_basic,
- 'Getopt::Long::Descriptive' => '0.081',
- 'Text::CSV' => '1.16',
+ %$admin_script,
},
pod => {
title => 'dbicadmin',
deploy => {
req => {
- 'SQL::Translator' => '0.11006',
+ 'SQL::Translator' => '0.11016',
},
pod => {
title => 'Storage::DBI::deploy()',
},
},
- test_notabs => {
+ test_whitespace => {
req => {
+ 'Test::EOL' => '1.0',
'Test::NoTabs' => '0.9',
},
},
- test_eol => {
+ test_strictures => {
req => {
- 'Test::EOL' => '1.0',
+ 'Test::Strict' => '0.16',
},
},
req => $json_any,
},
+ test_admin_script => {
+ req => {
+ %$admin_script,
+ 'JSON' => 0,
+ 'JSON::XS' => 0,
+ $^O eq 'MSWin32'
+ # for t/admin/10script.t
+ ? ('Win32::ShellQuote' => 0)
+ # DWIW does not compile (./configure even) on win32
+ : ('JSON::DWIW' => 0 )
+ ,
+ }
+ },
+
test_leaks => {
req => {
'Test::Memory::Cycle' => '0',
rdbms_pg => {
req => {
+ # when changing this list make sure to adjust xt/optional_deps.t
%$rdbms_pg,
},
pod => {
req => {
$ENV{DBICTEST_PG_DSN}
? (
+ # when changing this list make sure to adjust xt/optional_deps.t
%$rdbms_pg,
($^O ne 'MSWin32' ? ('Sys::SigAction' => '0') : ()),
'DBD::Pg' => '2.009002',
# This is to be called by the author only (automatically in Makefile.PL)
sub _gen_pod {
- my ($class, $distver) = @_;
-
- my $modfn = __PACKAGE__ . '.pm';
- $modfn =~ s/\:\:/\//g;
+ my ($class, $distver, $pod_dir) = @_;
- my $podfn = __FILE__;
- $podfn =~ s/\.pm$/\.pod/;
+ die "No POD root dir supplied" unless $pod_dir;
$distver ||=
eval { require DBIx::Class; DBIx::Class->VERSION; }
"\n\n---------------------------------------------------------------------\n"
;
+ # do not ask for a recent version, use 1.x API calls
+ # this *may* execute on a smoker with old perl or whatnot
+ require File::Path;
+
+ (my $modfn = __PACKAGE__ . '.pm') =~ s|::|/|g;
+
+ (my $podfn = "$pod_dir/$modfn") =~ s/\.pm$/\.pod/;
+ (my $dir = $podfn) =~ s|/[^/]+$||;
+
+ File::Path::mkpath([$dir]);
+
my $sqltver = $class->req_list_for ('deploy')->{'SQL::Translator'}
or die "Hrmm? No sqlt dep?";
my @chunks = (
- <<'EOC',
+ <<"EOC",
#########################################################################
##################### A U T O G E N E R A T E D ########################
#########################################################################
'=head2 req_group_list',
'=over',
'=item Arguments: none',
- '=item Returns: \%list_of_requirement_groups',
+ '=item Return Value: \%list_of_requirement_groups',
'=back',
<<'EOD',
This method should be used by DBIx::Class packagers, to get a hashref of all
'=head2 req_list_for',
'=over',
'=item Arguments: $group_name',
- '=item Returns: \%list_of_module_version_pairs',
+ '=item Return Value: \%list_of_module_version_pairs',
'=back',
<<'EOD',
This method should be used by DBIx::Class extension authors, to determine the
'=head2 req_ok_for',
'=over',
'=item Arguments: $group_name',
- '=item Returns: 1|0',
+ '=item Return Value: 1|0',
'=back',
<<'EOD',
Returns true or false depending on whether all modules required by
'=head2 req_missing_for',
'=over',
'=item Arguments: $group_name',
- '=item Returns: $error_message_string',
+ '=item Return Value: $error_message_string',
'=back',
<<"EOD",
Returns a single line string suitable for inclusion in larger error messages.
'=head2 req_errorlist_for',
'=over',
'=item Arguments: $group_name',
- '=item Returns: \%list_of_loaderrors_per_module',
+ '=item Return Value: \%list_of_loaderrors_per_module',
'=back',
<<'EOD',
Returns a hashref containing the actual errors that occured while attempting
open (my $fh, '>', $podfn) or Carp::croak "Unable to write to $podfn: $!";
print $fh join ("\n\n", @chunks);
+ print $fh "\n";
close ($fh);
}
return defined $lsib ? $lsib : 0;
}
-# an optimized method to get the last sibling position value without inflating a row object
+# an optimized method to get the last sibling position value without inflating a result object
sub _last_sibling_posval {
my $self = shift;
my $position_column = $self->position_column;
$ord = 'desc';
}
- $self->_group_rs
- ->search ({ $position_column => { -between => \@between } })
- ->update ({ $position_column => \ "$position_column $op 1" } );
+ my $shift_rs = $self->_group_rs-> search ({ $position_column => { -between => \@between } });
+
+ # some databases (sqlite, pg, perhaps others) are dumb and can not do a
+ # blanket increment/decrement without violating a unique constraint.
+ # So what we do here is check if the position column is part of a unique
+ # constraint, and do a one-by-one update if this is the case.
+ my $rsrc = $self->result_source;
+
+ # set in case there are more cascades combined with $rs->update => $rs_update_all overrides
+ local $rsrc->schema->{_ORDERED_INTERNAL_UPDATE} = 1;
+ my @pcols = $rsrc->primary_columns;
+ if (
+ first { $_ eq $position_column } ( map { @$_ } (values %{{ $rsrc->unique_constraints }} ) )
+ ) {
+ my $cursor = $shift_rs->search (
+ {}, { order_by => { "-$ord", $position_column }, select => [$position_column, @pcols] }
+ )->cursor;
+ my $rs = $rsrc->resultset;
+
+ my @all_data = $cursor->all;
+ while (my $data = shift @all_data) {
+ my $pos = shift @$data;
+ my $cond;
+ for my $i (0.. $#pcols) {
+ $cond->{$pcols[$i]} = $data->[$i];
+ }
+
+ $rs->find($cond)->update ({ $position_column => $pos + ( ($op eq '+') ? 1 : -1 ) });
+ }
+ }
+ else {
+ $shift_rs->update ({ $position_column => \ "$position_column $op 1" } );
+ }
}
if (@missing && $self->in_storage) {
$self->throw_exception (
- 'Unable to uniquely identify row object with missing PK columns: '
+ 'Unable to uniquely identify result object with missing PK columns: '
. join (', ', @missing )
);
}
=head2 ID
-Returns a unique id string identifying a row object by primary key.
+Returns a unique id string identifying a result object by primary key.
Used by L<DBIx::Class::CDBICompat::LiveObjectIndex> and
L<DBIx::Class::ObjectCache>.
if (@undef && $self->in_storage) {
$self->throw_exception (
- 'Unable to construct row object identity condition due to NULL PK columns: '
+ 'Unable to construct result object identity condition due to NULL PK columns: '
. join (', ', @undef)
);
}
1;
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Matt S. Trout <mst@shadowcatsystems.co.uk>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
The code that was handled here is now in ResultSource, and is being proxied to
Row as well.
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Matt S. Trout <mst@shadowcatsystems.co.uk>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
Just load PK::Auto instead; auto-inc is now handled by Storage.
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Matt S Trout <mst@shadowcatsystems.co.uk>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
Just load PK::Auto instead; auto-inc is now handled by Storage.
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Matt S Trout <mst@shadowcatsystems.co.uk>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
Just load PK::Auto instead; auto-inc is now handled by Storage.
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Matt S Trout <mst@shadowcatsystems.co.uk>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
Just load PK::Auto instead; auto-inc is now handled by Storage.
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Matt S Trout <mst@shadowcatsystems.co.uk>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
Just load PK::Auto instead; auto-inc is now handled by Storage.
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Matt S Trout <mst@shadowcatsystems.co.uk>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
Just load PK::Auto instead; auto-inc is now handled by Storage.
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Matt S Trout <mst@shadowcatsystems.co.uk>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
L<DBIx::Class::Manual::Glossary/"Row"> objects that represent the items
of your table. From L<DBIx::Class::Manual::Glossary/"ResultSet"> objects,
the relationships can be searched using the "search_related" method.
-In list context, each returns a list of Row objects for the related class,
+In list context, each returns a list of Result objects for the related class,
in scalar context, a new ResultSet representing the joined tables is
returned. Thus, the calls can be chained to produce complex queries.
Since the database is not actually queried until you attempt to retrieve
=item accessor_name
This argument is the name of the method you can call on a
-L<DBIx::Class::Row> object to retrieve the instance of the foreign
+L<Result|DBIx::Class::Manual::ResultClass> object to retrieve the instance of the foreign
class matching this relationship. This is often called the
C<relation(ship) name>.
=over 4
-=item Arguments: $accessor_name, $related_class, $their_fk_column|\%cond|\@cond|\&cond?, \%attrs?
+=item Arguments: $accessor_name, $related_class, $their_fk_column|\%cond|\@cond|\&cond?, L<\%attrs?|DBIx::Class::ResultSet/ATTRIBUTES>
=back
=item accessor_name
This argument is the name of the method you can call on a
-L<DBIx::Class::Row> object to retrieve a resultset of the related
-class restricted to the ones related to the row object. In list
-context it returns the row objects. This is often called the
+L<Result|DBIx::Class::Manual::ResultClass> object to retrieve a resultset of the related
+class restricted to the ones related to the result object. In list
+context it returns the result objects. This is often called the
C<relation(ship) name>.
Use this accessor_name in L<DBIx::Class::ResultSet/join>
=over 4
-=item Arguments: $accessor_name, $related_class, $their_fk_column|\%cond|\@cond|\&cond?, \%attrs?
+=item Arguments: $accessor_name, $related_class, $their_fk_column|\%cond|\@cond|\&cond?, L<\%attrs?|DBIx::Class::ResultSet/ATTRIBUTES>
=back
=item accessor_name
This argument is the name of the method you can call on a
-L<DBIx::Class::Row> object to retrieve the instance of the foreign
+L<Result|DBIx::Class::Manual::ResultClass> object to retrieve the instance of the foreign
class matching this relationship. This is often called the
C<relation(ship) name>.
=over 4
-=item Arguments: $accessor_name, $related_class, $their_fk_column|\%cond|\@cond|\&cond?, \%attrs?
+=item Arguments: $accessor_name, $related_class, $their_fk_column|\%cond|\@cond|\&cond?, L<\%attrs?|DBIx::Class::ResultSet/ATTRIBUTES>
=back
=item accessor_name
This argument is the name of the method you can call on a
-L<DBIx::Class::Row> object to retrieve the instance of the foreign
+L<Result|DBIx::Class::Manual::ResultClass> object to retrieve the instance of the foreign
class matching this relationship. This is often called the
C<relation(ship) name>.
=over 4
-=item Arguments: $accessor_name, $link_rel_name, $foreign_rel_name, \%attrs?
+=item Arguments: $accessor_name, $link_rel_name, $foreign_rel_name, L<\%attrs?|DBIx::Class::ResultSet/ATTRIBUTES>
=back
=item accessor_name
This argument is the name of the method you can call on a
-L<DBIx::Class::Row> object to retrieve the rows matching this
+L<Result|DBIx::Class::Manual::ResultClass> object to retrieve the rows matching this
relationship.
On a many_to_many, unlike other relationships, this cannot be used in
1;
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-see L<DBIx::Class>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
}
};
} elsif ($acc_type eq 'filter') {
- $class->throw_exception("No such column $rel to filter")
+ $class->throw_exception("No such column '$rel' to filter")
unless $class->has_column($rel);
my $f_class = $class->relationship_info($rel)->{class};
$class->inflate_column($rel,
},
deflate => sub {
my ($val, $self) = @_;
- $self->throw_exception("$val isn't a $f_class") unless $val->isa($f_class);
+ $self->throw_exception("'$val' isn't a $f_class") unless $val->isa($f_class);
return ($val->_ident_values)[0];
# WARNING: probably breaks for multi-pri sometimes. FIXME
}
$meth{"${rel}_rs"} = sub { shift->search_related_rs($rel, @_) };
$meth{"add_to_${rel}"} = sub { shift->create_related($rel, @_); };
} else {
- $class->throw_exception("No such relationship accessor type $acc_type");
+ $class->throw_exception("No such relationship accessor type '$acc_type'");
}
{
no strict 'refs';
While every coderef-based condition must return a valid C<ON> clause, it may
elect to additionally return a simplified join-free condition hashref when
-invoked as C<< $row_object->relationship >>, as opposed to
-C<< $rs->related_resultset('relationship') >>. In this case C<$row_object> is
+invoked as C<< $result->relationship >>, as opposed to
+C<< $rs->related_resultset('relationship') >>. In this case C<$result> is
passed to the coderef as C<< $args->{self_rowobj} >>, so a user can do the
following:
metadata. Currently the supplied coderef is executed as:
$relationship_info->{cond}->({
- self_alias => The alias of the invoking resultset ('me' in case of a row object),
+ self_alias => The alias of the invoking resultset ('me' in case of a result object),
foreign_alias => The alias of the to-be-joined resultset (often matches relname),
self_resultsource => The invocant's resultsource,
foreign_relname => The relationship name (does *not* always match foreign_alias),
- self_rowobj => The invocant itself in case of $row_obj->relationship
+ self_rowobj => The invocant itself in case of a $result_object->$relationship call
});
=head3 attributes
=item proxy =E<gt> $column | \@columns | \%column
+The 'proxy' attribute can be used to retrieve values, and to perform
+updates if the relationship has 'cascade_update' set. The 'might_have'
+and 'has_one' relationships have this set by default; if you want a proxy
+to update across a 'belongs_to' relationship, you must set the attribute
+yourself.
+
=over 4
=item \@columns
$cd->notes('Notes go here'); # set notes -- LinerNotes object is
# created if it doesn't exist
+For a 'belongs_to relationship, note the 'cascade_update':
+
+ MyApp::Schema::Track->belongs_to( cd => 'DBICTest::Schema::CD', 'cd,
+ { proxy => ['title'], cascade_update => 1 }
+ );
+ $track->title('New Title');
+ $track->update; # updates title in CD
+
=item \%column
A hashref where each key is the accessor you want installed in the main class,
proxy => { cd_title => 'title' },
});
-This will create an accessor named C<cd_title> on the C<$track> row object.
+This will create an accessor named C<cd_title> on the C<$track> result object.
=back
per-relationship basis by supplying C<< cascade_update => 0 >> in
the relationship attributes.
+The C<belongs_to> relationship does not update across relationships
+by default, so if you have a 'proxy' attribute on a belongs_to and want to
+use 'update' on it, you muse set C<< cascade_update => 1 >>.
+
This is not a RDMS style cascade update - it purely means that when
an object has update called on it, all the related objects also
have update called. It will not change foreign keys automatically -
=over 4
-=item Arguments: $relname, $rel_info
+=item Arguments: $rel_name, $rel_info
=back
=over 4
-=item Arguments: $relationship_name
+=item Arguments: $rel_name
-=item Return Value: $related_resultset
+=item Return Value: L<$related_resultset|DBIx::Class::ResultSet>
=back
$rs = $cd->related_resultset('artist');
Returns a L<DBIx::Class::ResultSet> for the relationship named
-$relationship_name.
+$rel_name.
+
+=head2 $relationship_accessor
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass> | L<$related_resultset|DBIx::Class::ResultSet> | undef
+
+=back
+
+ # These pairs do the same thing
+ $row = $cd->related_resultset('artist')->single; # has_one relationship
+ $row = $cd->artist;
+ $rs = $cd->related_resultset('tracks'); # has_many relationship
+ $rs = $cd->tracks;
+
+This is the recommended way to traverse through relationships, based
+on the L</accessor> name given in the relationship definition.
+
+This will return either a L<Result|DBIx::Class::Manual::ResultClass> or a
+L<ResultSet|DBIx::Class::ResultSet>, depending on if the relationship is
+C<single> (returns only one row) or C<multi> (returns many rows). The
+method may also return C<undef> if the relationship doesn't exist for
+this instance (like in the case of C<might_have> relationships).
=cut
unless ref $self;
my $rel = shift;
my $rel_info = $self->relationship_info($rel);
- $self->throw_exception( "No such relationship ${rel}" )
+ $self->throw_exception( "No such relationship '$rel'" )
unless $rel_info;
return $self->{related_resultsets}{$rel} ||= do {
# keep in mind that the following if() block is part of a do{} - no return()s!!!
if ($is_crosstable) {
$self->throw_exception (
- "A cross-table relationship condition returned for statically declared '$rel'")
- unless ref $rel_info->{cond} eq 'CODE';
+ "A cross-table relationship condition returned for statically declared '$rel'"
+ ) unless ref $rel_info->{cond} eq 'CODE';
# A WHOREIFFIC hack to reinvoke the entire condition resolution
# with the correct alias. Another way of doing this involves a
=head2 search_related
- @objects = $rs->search_related('relname', $cond, $attrs);
- $objects_rs = $rs->search_related('relname', $cond, $attrs);
+=over 4
+
+=item Arguments: $rel_name, $cond?, L<\%attrs?|DBIx::Class::ResultSet/ATTRIBUTES>
+
+=item Return Value: L<$resultset|DBIx::Class::ResultSet> (scalar context) | L<@result_objs|DBIx::Class::Manual::ResultClass> (list context)
+
+=back
Run a search on a related resultset. The search will be restricted to the
-item or items represented by the L<DBIx::Class::ResultSet> it was called
-upon. This method can be called on a ResultSet, a Row or a ResultSource class.
+results represented by the L<DBIx::Class::ResultSet> it was called
+upon.
+
+See L<DBIx::Class::ResultSet/search_related> for more information.
=cut
=head2 search_related_rs
- ( $objects_rs ) = $rs->search_related_rs('relname', $cond, $attrs);
-
This method works exactly the same as search_related, except that
it guarantees a resultset, even in list context.
=head2 count_related
- $obj->count_related('relname', $cond, $attrs);
+=over 4
+
+=item Arguments: $rel_name, $cond?, L<\%attrs?|DBIx::Class::ResultSet/ATTRIBUTES>
-Returns the count of all the items in the related resultset, restricted by the
-current item or where conditions. Can be called on a
-L<DBIx::Class::Manual::Glossary/"ResultSet"> or a
-L<DBIx::Class::Manual::Glossary/"Row"> object.
+=item Return Value: $count
+
+=back
+
+Returns the count of all the rows in the related resultset, restricted by the
+current result or where conditions.
=cut
sub count_related {
- my $self = shift;
- return $self->search_related(@_)->count;
+ shift->search_related(@_)->count;
}
=head2 new_related
- my $new_obj = $obj->new_related('relname', \%col_data);
+=over 4
+
+=item Arguments: $rel_name, \%col_data
+
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass>
-Create a new item of the related foreign class. If called on a
-L<Row|DBIx::Class::Manual::Glossary/"Row"> object, it will magically
-set any foreign key columns of the new object to the related primary
-key columns of the source object for you. The newly created item will
-not be saved into your storage until you call L<DBIx::Class::Row/insert>
-on it.
+=back
+
+Create a new result object of the related foreign class. It will magically set
+any foreign key columns of the new object to the related primary key columns
+of the source object for you. The newly created result will not be saved into
+your storage until you call L<DBIx::Class::Row/insert> on it.
=cut
sub new_related {
- my ($self, $rel, $values, $attrs) = @_;
+ my ($self, $rel, $values) = @_;
# FIXME - this is a bad position for this (also an identical copy in
# set_from_related), but I have no saner way to hook, and I absolutely
}
}
- my $row = $self->search_related($rel)->new($values, $attrs);
- return $row;
+ return $self->search_related($rel)->new_result($values);
}
=head2 create_related
- my $new_obj = $obj->create_related('relname', \%col_data);
+=over 4
+
+=item Arguments: $rel_name, \%col_data
+
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass>
+
+=back
+
+ my $result = $obj->create_related($rel_name, \%col_data);
-Creates a new item, similarly to new_related, and also inserts the item's data
-into your storage medium. See the distinction between C<create> and C<new>
-in L<DBIx::Class::ResultSet> for details.
+Creates a new result object, similarly to new_related, and also inserts the
+result's data into your storage medium. See the distinction between C<create>
+and C<new> in L<DBIx::Class::ResultSet> for details.
=cut
=head2 find_related
- my $found_item = $obj->find_related('relname', @pri_vals | \%pri_vals);
+=over 4
+
+=item Arguments: $rel_name, \%col_data | @pk_values, { key => $unique_constraint, L<%attrs|DBIx::Class::ResultSet/ATTRIBUTES> }?
+
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass> | undef
+
+=back
+
+ my $result = $obj->find_related($rel_name, \%col_data);
Attempt to find a related object using its primary key or unique constraints.
See L<DBIx::Class::ResultSet/find> for details.
=cut
sub find_related {
- my $self = shift;
- my $rel = shift;
- return $self->search_related($rel)->find(@_);
+ #my ($self, $rel, @args) = @_;
+ return shift->search_related(shift)->find(@_);
}
=head2 find_or_new_related
- my $new_obj = $obj->find_or_new_related('relname', \%col_data);
+=over 4
+
+=item Arguments: $rel_name, \%col_data, { key => $unique_constraint, L<%attrs|DBIx::Class::ResultSet/ATTRIBUTES> }?
-Find an item of a related class. If none exists, instantiate a new item of the
-related class. The object will not be saved into your storage until you call
-L<DBIx::Class::Row/insert> on it.
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass>
+
+=back
+
+Find a result object of a related class. See L<DBIx::Class::ResultSet/find_or_new>
+for details.
=cut
=head2 find_or_create_related
- my $new_obj = $obj->find_or_create_related('relname', \%col_data);
+=over 4
-Find or create an item of a related class. See
+=item Arguments: $rel_name, \%col_data, { key => $unique_constraint, L<%attrs|DBIx::Class::ResultSet/ATTRIBUTES> }?
+
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass>
+
+=back
+
+Find or create a result object of a related class. See
L<DBIx::Class::ResultSet/find_or_create> for details.
=cut
=head2 update_or_create_related
- my $updated_item = $obj->update_or_create_related('relname', \%col_data, \%attrs?);
+=over 4
+
+=item Arguments: $rel_name, \%col_data, { key => $unique_constraint, L<%attrs|DBIx::Class::ResultSet/ATTRIBUTES> }?
+
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass>
+
+=back
-Update or create an item of a related class. See
+Update or create a result object of a related class. See
L<DBIx::Class::ResultSet/update_or_create> for details.
=cut
sub update_or_create_related {
- my $self = shift;
- my $rel = shift;
- return $self->related_resultset($rel)->update_or_create(@_);
+ #my ($self, $rel, @args) = @_;
+ shift->related_resultset(shift)->update_or_create(@_);
}
=head2 set_from_related
+=over 4
+
+=item Arguments: $rel_name, L<$result|DBIx::Class::Manual::ResultClass>
+
+=item Return Value: not defined
+
+=back
+
$book->set_from_related('author', $author_obj);
$book->author($author_obj); ## same thing
my $rsrc = $self->result_source;
my $rel_info = $rsrc->relationship_info($rel)
- or $self->throw_exception( "No such relationship ${rel}" );
+ or $self->throw_exception( "No such relationship '$rel'" );
if (defined $f_obj) {
my $f_class = $rel_info->{class};
- $self->throw_exception( "Object $f_obj isn't a ".$f_class )
+ $self->throw_exception( "Object '$f_obj' isn't a ".$f_class )
unless blessed $f_obj and $f_obj->isa($f_class);
}
=head2 update_from_related
+=over 4
+
+=item Arguments: $rel_name, L<$result|DBIx::Class::Manual::ResultClass>
+
+=item Return Value: not defined
+
+=back
+
$book->update_from_related('author', $author_obj);
The same as L</"set_from_related">, but the changes are immediately updated
=head2 delete_related
- $obj->delete_related('relname', $cond, $attrs);
+=over 4
+
+=item Arguments: $rel_name, $cond?, L<\%attrs?|DBIx::Class::ResultSet/ATTRIBUTES>
+
+=item Return Value: $underlying_storage_rv
+
+=back
+
+Delete any related row, subject to the given conditions. Internally, this
+calls:
+
+ $self->search_related(@_)->delete
-Delete any related item subject to the given conditions.
+And returns the result of that.
=cut
=head2 add_to_$rel
-B<Currently only available for C<has_many>, C<many-to-many> and 'multi' type
+B<Currently only available for C<has_many>, C<many_to_many> and 'multi' type
relationships.>
+=head3 has_many / multi
+
+=over 4
+
+=item Arguments: \%col_data
+
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass>
+
+=back
+
+Creates/inserts a new result object. Internally, this calls:
+
+ $self->create_related($rel, @_)
+
+And returns the result of that.
+
+=head3 many_to_many
+
=over 4
-=item Arguments: ($foreign_vals | $obj), $link_vals?
+=item Arguments: (\%col_data | L<$result|DBIx::Class::Manual::ResultClass>), \%link_col_data?
+
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass>
=back
my $role = $schema->resultset('Role')->find(1);
$actor->add_to_roles($role);
- # creates a My::DBIC::Schema::ActorRoles linking table row object
+ # creates a My::DBIC::Schema::ActorRoles linking table result object
$actor->add_to_roles({ name => 'lead' }, { salary => 15_000_000 });
- # creates a new My::DBIC::Schema::Role row object and the linking table
+ # creates a new My::DBIC::Schema::Role result object and the linking table
# object with an extra column in the link
-Adds a linking table object for C<$obj> or C<$foreign_vals>. If the first
-argument is a hash reference, the related object is created first with the
-column values in the hash. If an object reference is given, just the linking
-table object is created. In either case, any additional column values for the
-linking table object can be specified in C<$link_vals>.
+Adds a linking table object. If the first argument is a hash reference, the
+related object is created first with the column values in the hash. If an object
+reference is given, just the linking table object is created. In either case,
+any additional column values for the linking table object can be specified in
+C<\%link_col_data>.
+
+See L<DBIx::Class::Relationship/many_to_many> for additional details.
=head2 set_$rel
-B<Currently only available for C<many-to-many> relationships.>
+B<Currently only available for C<many_to_many> relationships.>
=over 4
-=item Arguments: (\@hashrefs | \@objs), $link_vals?
+=item Arguments: (\@hashrefs_of_col_data | L<\@result_objs|DBIx::Class::Manual::ResultClass>), $link_vals?
+
+=item Return Value: not defined
=back
=head2 remove_from_$rel
-B<Currently only available for C<many-to-many> relationships.>
+B<Currently only available for C<many_to_many> relationships.>
=over 4
-=item Arguments: $obj
+=item Arguments: L<$result|DBIx::Class::Manual::ResultClass>
+
+=item Return Value: not defined
=back
my $role = $schema->resultset('Role')->find(1);
$actor->remove_from_roles($role);
- # removes $role's My::DBIC::Schema::ActorRoles linking table row object
+ # removes $role's My::DBIC::Schema::ActorRoles linking table result object
Removes the link between the current object and the related object. Note that
the related object itself won't be deleted unless you call ->delete() on
it. This method just removes the link between the two objects.
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Matt S. Trout <mst@shadowcatsystems.co.uk>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
$class->ensure_class_loaded($f_class);
my %f_primaries = map { $_ => 1 } try { $f_class->_pri_cols }
catch {
- $class->throw_exception( "Can't infer join condition for ${rel} on ${class}: $_");
+ $class->throw_exception( "Can't infer join condition for '$rel' on ${class}: $_");
};
my ($pri, $too_many) = keys %f_primaries;
$class->throw_exception(
- "Can't infer join condition for ${rel} on ${class}; ".
- "${f_class} has multiple primary keys"
+ "Can't infer join condition for '$rel' on ${class}: "
+ . "${f_class} has multiple primary keys"
) if $too_many;
my $fk = defined $cond ? $cond : $rel;
$class->throw_exception(
- "Can't infer join condition for ${rel} on ${class}; ".
- "$fk is not a column of $class"
+ "Can't infer join condition for '$rel' on ${class}: "
+ . "'$fk' is not a column of $class"
) unless $class->has_column($fk);
$cond = { "foreign.${pri}" => "self.${fk}" };
$class->ensure_class_loaded($f_class);
my ($pri, $too_many) = try { $class->_pri_cols }
catch {
- $class->throw_exception("Can't infer join condition for ${rel} on ${class}: $_");
+ $class->throw_exception("Can't infer join condition for '$rel' on ${class}: $_");
};
$class->throw_exception(
my $f_class_loaded = try { $f_class->columns };
$class->throw_exception(
- "No such column ${f_key} on foreign class ${f_class} ($guess)"
+ "No such column '$f_key' on foreign class ${f_class} ($guess)"
) if $f_class_loaded && !$f_class->has_column($f_key);
$cond = { "foreign.${f_key}" => "self.${pri}" };
$guess = "using primary key of foreign class for foreign key";
}
$class->throw_exception(
- "No such column ${f_key} on foreign class ${f_class} ($guess)"
+ "No such column '$f_key' on foreign class ${f_class} ($guess)"
) if $f_class_loaded && !$f_class->has_column($f_key);
$cond = { "foreign.${f_key}" => "self.${pri}" };
}
# warning
return unless $self_id =~ /^self\.(.*)$/;
my $key = $1;
- $class->throw_exception("Defining rel on ${class} that includes ${key} but no such column defined here yet")
+ $class->throw_exception("Defining rel on ${class} that includes '$key' but no such column defined here yet")
unless $class->has_column($key);
my $column_info = $class->column_info($key);
if ( $column_info->{is_nullable} ) {
DBIx::Class is faster than older ORMs like Class::DBI but it still isn't
designed primarily for speed. Sometimes you need to quickly retrieve the data
-from a massive resultset, while skipping the creation of fancy row objects.
+from a massive resultset, while skipping the creation of fancy result objects.
Specifying this class as a C<result_class> for a resultset will change C<< $rs->next >>
to return a plain data hash-ref (or a list of such hash-refs if C<< $rs->all >> is used).
use warnings;
use base qw/DBIx::Class/;
use DBIx::Class::Carp;
-use DBIx::Class::Exception;
use DBIx::Class::ResultSetColumn;
use Scalar::Util qw/blessed weaken/;
use Try::Tiny;
=head1 SYNOPSIS
- my $users_rs = $schema->resultset('User');
+ my $users_rs = $schema->resultset('User');
while( $user = $users_rs->next) {
print $user->username;
}
- my $registered_users_rs = $schema->resultset('User')->search({ registered => 1 });
+ my $registered_users_rs = $schema->resultset('User')->search({ registered => 1 });
my @cds_in_2005 = $schema->resultset('CD')->search({ year => 2005 })->all();
=head1 DESCRIPTION
=over 4
-=item Arguments: $source, \%$attrs
+=item Arguments: L<$source|DBIx::Class::ResultSource>, L<\%attrs?|/ATTRIBUTES>
-=item Return Value: $rs
+=item Return Value: L<$resultset|/search>
=back
L</ATTRIBUTES> below). Does not perform any queries -- these are
executed as needed by the other methods.
-Generally you won't need to construct a resultset manually. You'll
-automatically get one from e.g. a L</search> called in scalar context:
+Generally you never construct a resultset manually. Instead you get one
+from e.g. a
+C<< $schema->L<resultset|DBIx::Class::Schema/resultset>('$source_name') >>
+or C<< $another_resultset->L<search|/search>(...) >> (the later called in
+scalar context):
my $rs = $schema->resultset('CD')->search({ title => '100th Window' });
-IMPORTANT: If called on an object, proxies to new_result instead so
+=over
+
+=item WARNING
+
+If called on an object, proxies to L</new_result> instead, so
my $cd = $schema->resultset('CD')->new({ title => 'Spoon' });
-will return a CD object, not a ResultSet.
+will return a CD object, not a ResultSet, and is equivalent to:
+
+ my $cd = $schema->resultset('CD')->new_result({ title => 'Spoon' });
+
+Please also keep in mind that many internals call L</new_result> directly,
+so overloading this method with the idea of intercepting new result object
+creation B<will not work>. See also warning pertaining to L</create>.
+
+=back
=cut
=over 4
-=item Arguments: $cond, \%attrs?
+=item Arguments: L<$cond|DBIx::Class::SQLMaker> | undef, L<\%attrs?|/ATTRIBUTES>
-=item Return Value: $resultset (scalar context) || @row_objs (list context)
+=item Return Value: $resultset (scalar context) | L<@result_objs|DBIx::Class::Manual::ResultClass> (list context)
=back
# year = 2005 OR year = 2004
In list context, C<< ->all() >> is called implicitly on the resultset, thus
-returning a list of row objects instead. To avoid that, use L</search_rs>.
+returning a list of L<result|DBIx::Class::Manual::ResultClass> objects instead.
+To avoid that, use L</search_rs>.
If you need to pass in additional attributes but no additional condition,
call it as C<search(undef, \%attrs)>.
Note that L</search> does not process/deflate any of the values passed in the
L<SQL::Abstract>-compatible search condition structure. This is unlike other
-condition-bound methods L</new>, L</create> and L</find>. The user must ensure
+condition-bound methods L</new_result>, L</create> and L</find>. The user must ensure
manually that any value passed to this method will stringify to something the
RDBMS knows how to deal with. A notable example is the handling of L<DateTime>
objects, for more info see:
=over 4
-=item Arguments: $cond, \%attrs?
+=item Arguments: L<$cond|DBIx::Class::SQLMaker>, L<\%attrs?|/ATTRIBUTES>
-=item Return Value: $resultset
+=item Return Value: L<$resultset|/search>
=back
sub search_rs {
my $self = shift;
- # Special-case handling for (undef, undef).
- if ( @_ == 2 && !defined $_[1] && !defined $_[0] ) {
- @_ = ();
- }
+ my $rsrc = $self->result_source;
+ my ($call_cond, $call_attrs);
- my $call_attrs = {};
- if (@_ > 1) {
- if (ref $_[-1] eq 'HASH') {
- # copy for _normalize_selection
- $call_attrs = { %{ pop @_ } };
- }
- elsif (! defined $_[-1] ) {
- pop @_; # search({}, undef)
+ # Special-case handling for (undef, undef) or (undef)
+ # Note that (foo => undef) is valid deprecated syntax
+ @_ = () if not scalar grep { defined $_ } @_;
+
+ # just a cond
+ if (@_ == 1) {
+ $call_cond = shift;
+ }
+ # fish out attrs in the ($condref, $attr) case
+ elsif (@_ == 2 and ( ! defined $_[0] or (ref $_[0]) ne '') ) {
+ ($call_cond, $call_attrs) = @_;
+ }
+ elsif (@_ % 2) {
+ $self->throw_exception('Odd number of arguments to search')
+ }
+ # legacy search
+ elsif (@_) {
+ carp_unique 'search( %condition ) is deprecated, use search( \%condition ) instead'
+ unless $rsrc->result_class->isa('DBIx::Class::CDBICompat');
+
+ for my $i (0 .. $#_) {
+ next if $i % 2;
+ $self->throw_exception ('All keys in condition key/value pairs must be plain scalars')
+ if (! defined $_[$i] or ref $_[$i] ne '');
}
+
+ $call_cond = { @_ };
}
# see if we can keep the cache (no $rs changes)
$cache = $self->get_cache;
}
- my $rsrc = $self->result_source;
-
my $old_attrs = { %{$self->{attrs}} };
my $old_having = delete $old_attrs->{having};
my $old_where = delete $old_attrs->{where};
my $new_attrs = { %$old_attrs };
# take care of call attrs (only if anything is changing)
- if (keys %$call_attrs) {
+ if ($call_attrs and keys %$call_attrs) {
+
+ # copy for _normalize_selection
+ $call_attrs = { %$call_attrs };
my @selector_attrs = qw/select as columns cols +select +as +columns include_columns/;
}
- # rip apart the rest of @_, parse a condition
- my $call_cond = do {
-
- if (ref $_[0] eq 'HASH') {
- (keys %{$_[0]}) ? $_[0] : undef
- }
- elsif (@_ == 1) {
- $_[0]
- }
- elsif (@_ % 2) {
- $self->throw_exception('Odd number of arguments to search')
- }
- else {
- +{ @_ }
- }
-
- } if @_;
-
- if( @_ > 1 and ! $rsrc->result_class->isa('DBIx::Class::CDBICompat') ) {
- carp_unique 'search( %condition ) is deprecated, use search( \%condition ) instead';
- }
-
for ($old_where, $call_cond) {
if (defined $_) {
$new_attrs->{where} = $self->_stack_cond (
=head2 search_literal
+B<CAVEAT>: C<search_literal> is provided for Class::DBI compatibility and
+should only be used in that context. C<search_literal> is a convenience
+method. It is equivalent to calling C<< $schema->search(\[]) >>, but if you
+want to ensure columns are bound correctly, use L</search>.
+
+See L<DBIx::Class::Manual::Cookbook/Searching> and
+L<DBIx::Class::Manual::FAQ/Searching> for searching techniques that do not
+require C<search_literal>.
+
=over 4
-=item Arguments: $sql_fragment, @bind_values
+=item Arguments: $sql_fragment, @standalone_bind_values
-=item Return Value: $resultset (scalar context) || @row_objs (list context)
+=item Return Value: L<$resultset|/search> (scalar context) | L<@result_objs|DBIx::Class::Manual::ResultClass> (list context)
=back
Pass a literal chunk of SQL to be added to the conditional part of the
resultset query.
-CAVEAT: C<search_literal> is provided for Class::DBI compatibility and should
-only be used in that context. C<search_literal> is a convenience method.
-It is equivalent to calling $schema->search(\[]), but if you want to ensure
-columns are bound correctly, use C<search>.
-
Example of how to use C<search> instead of C<search_literal>
my @cds = $cd_rs->search_literal('cdid = ? AND (artist = ? OR artist = ?)', (2, 1, 2));
my @cds = $cd_rs->search(\[ 'cdid = ? AND (artist = ? OR artist = ?)', [ 'cdid', 2 ], [ 'artist', 1 ], [ 'artist', 2 ] ]);
-
-See L<DBIx::Class::Manual::Cookbook/Searching> and
-L<DBIx::Class::Manual::FAQ/Searching> for searching techniques that do not
-require C<search_literal>.
-
=cut
sub search_literal {
if ( @bind && ref($bind[-1]) eq 'HASH' ) {
$attr = pop @bind;
}
- return $self->search(\[ $sql, map [ __DUMMY__ => $_ ], @bind ], ($attr || () ));
+ return $self->search(\[ $sql, map [ {} => $_ ], @bind ], ($attr || () ));
}
=head2 find
=over 4
-=item Arguments: \%columns_values | @pk_values, \%attrs?
+=item Arguments: \%columns_values | @pk_values, { key => $unique_constraint, L<%attrs|/ATTRIBUTES> }?
-=item Return Value: $row_object | undef
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass> | undef
=back
you need to search with arbitrary conditions - use L</search>. If the query
resulting from this fallback produces more than one row, a warning to the
effect is issued, though only the first row is constructed and returned as
-C<$row_object>.
+C<$result_object>.
In addition to C<key>, L</find> recognizes and applies standard
L<resultset attributes|/ATTRIBUTES> in the same way as L</search> does.
and
!$ENV{DBIC_NULLABLE_KEY_NOWARN}
and
- my @undefs = grep { ! defined $final_cond->{$_} } (keys %$final_cond)
+ my @undefs = sort grep { ! defined $final_cond->{$_} } (keys %$final_cond)
) {
carp_unique ( sprintf (
"NULL/undef values supplied for requested unique constraint '%s' (NULL "
=over 4
-=item Arguments: $rel, $cond?, \%attrs?
+=item Arguments: $rel_name, $cond?, L<\%attrs?|/ATTRIBUTES>
-=item Return Value: $new_resultset (scalar context) || @row_objs (list context)
+=item Return Value: L<$resultset|/search> (scalar context) | L<@result_objs|DBIx::Class::Manual::ResultClass> (list context)
=back
attributes for matching records. See L</ATTRIBUTES> for more information.
In list context, C<< ->all() >> is called implicitly on the resultset, thus
-returning a list of row objects instead. To avoid that, use L</search_related_rs>.
+returning a list of result objects instead. To avoid that, use L</search_related_rs>.
See also L</search_related_rs>.
=item Arguments: none
-=item Return Value: $cursor
+=item Return Value: L<$cursor|DBIx::Class::Cursor>
=back
=cut
sub cursor {
- my ($self) = @_;
-
- my $attrs = $self->_resolved_attrs_copy;
+ my $self = shift;
- return $self->{cursor}
- ||= $self->result_source->storage->select($attrs->{from}, $attrs->{select},
- $attrs->{where},$attrs);
+ return $self->{cursor} ||= do {
+ my $attrs = { %{$self->_resolved_attrs } };
+ $self->result_source->storage->select(
+ $attrs->{from}, $attrs->{select}, $attrs->{where}, $attrs
+ );
+ };
}
=head2 single
=over 4
-=item Arguments: $cond?
+=item Arguments: L<$cond?|DBIx::Class::SQLMaker>
-=item Return Value: $row_object | undef
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass> | undef
=back
$self->throw_exception('single() only takes search conditions, no attributes. You want ->search( $cond, $attrs )->single()');
}
- my $attrs = $self->_resolved_attrs_copy;
+ my $attrs = { %{$self->_resolved_attrs} };
$self->throw_exception(
'single() can not be used on resultsets prefetching has_many. Use find( \%cond ) or next() instead'
=over 4
-=item Arguments: $cond?
+=item Arguments: L<$cond?|DBIx::Class::SQLMaker>
-=item Return Value: $resultsetcolumn
+=item Return Value: L<$resultsetcolumn|DBIx::Class::ResultSetColumn>
=back
=over 4
-=item Arguments: $cond, \%attrs?
+=item Arguments: L<$cond|DBIx::Class::SQLMaker>, L<\%attrs?|/ATTRIBUTES>
-=item Return Value: $resultset (scalar context) || @row_objs (list context)
+=item Return Value: L<$resultset|/search> (scalar context) | L<@result_objs|DBIx::Class::Manual::ResultClass> (list context)
=back
=item Arguments: $first, $last
-=item Return Value: $resultset (scalar context) || @row_objs (list context)
+=item Return Value: L<$resultset|/search> (scalar context) | L<@result_objs|DBIx::Class::Manual::ResultClass> (list context)
=back
=item Arguments: none
-=item Return Value: $result | undef
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass> | undef
=back
push @$rows, do { my @r = $cursor->next; @r ? \@r : () };
}
# instead of looping over ->next, use ->all in stealth mode
+ # *without* calling a ->reset afterwards
# FIXME - encapsulation breach, got to be a better way
- elsif (! $cursor->{done}) {
+ elsif (! $cursor->{_done}) {
push @$rows, $cursor->all;
- $cursor->{done} = 1;
+ $cursor->{_done} = 1;
$fetch_all = 1;
}
}
=over 4
-=item Arguments: $result_source?
+=item Arguments: L<$result_source?|DBIx::Class::ResultSource>
-=item Return Value: $result_source
+=item Return Value: L<$result_source|DBIx::Class::ResultSource>
=back
=back
-An accessor for the class to use when creating row objects. Defaults to
+An accessor for the class to use when creating result objects. Defaults to
C<< result_source->result_class >> - which in most cases is the name of the
L<"table"|DBIx::Class::Manual::Glossary/"ResultSource"> class.
=over 4
-=item Arguments: $cond, \%attrs??
+=item Arguments: L<$cond|DBIx::Class::SQLMaker>, L<\%attrs?|/ATTRIBUTES>
=item Return Value: $count
return $self->search(@_)->count if @_ and defined $_[0];
return scalar @{ $self->get_cache } if $self->get_cache;
- my $attrs = $self->_resolved_attrs_copy;
+ my $attrs = { %{ $self->_resolved_attrs } };
# this is a little optimization - it is faster to do the limit
# adjustments in software, instead of a subquery
=over 4
-=item Arguments: $cond, \%attrs??
+=item Arguments: L<$cond|DBIx::Class::SQLMaker>, L<\%attrs?|/ATTRIBUTES>
-=item Return Value: $count_rs
+=item Return Value: L<$count_rs|DBIx::Class::ResultSetColumn>
=back
my ($lquote, $rquote, $sep) = map { quotemeta $_ } ($sql_maker->_quote_chars, $sql_maker->name_sep);
- my $sql = $sql_maker->_parse_rs_attrs ({ having => $attrs->{having} });
+ my $having_sql = $sql_maker->_parse_rs_attrs ({ having => $attrs->{having} });
+ my %seen_having;
# search for both a proper quoted qualified string, for a naive unquoted scalarref
# and if all fails for an utterly naive quoted scalar-with-function
- while ($sql =~ /
+ while ($having_sql =~ /
$rquote $sep $lquote (.+?) $rquote
|
[\s,] \w+ \. (\w+) [\s,]
|
[\s,] $lquote (.+?) $rquote [\s,]
/gx) {
- push @parts, ($1 || $2 || $3); # one of them matched if we got here
+ my $part = $1 || $2 || $3; # one of them matched if we got here
+ unless ($seen_having{$part}++) {
+ push @parts, $part;
+ }
}
}
=head2 count_literal
+B<CAVEAT>: C<count_literal> is provided for Class::DBI compatibility and
+should only be used in that context. See L</search_literal> for further info.
+
=over 4
-=item Arguments: $sql_fragment, @bind_values
+=item Arguments: $sql_fragment, @standalone_bind_values
=item Return Value: $count
=item Arguments: none
-=item Return Value: @objects
+=item Return Value: L<@result_objs|DBIx::Class::Manual::ResultClass>
=back
sub reset {
my ($self) = @_;
- delete @{$self}{qw/_attrs stashed_rows stashed_objects/};
-
+ delete @{$self}{qw/stashed_rows stashed_objects/};
$self->{all_cache_position} = 0;
$self->cursor->reset;
return $self;
=item Arguments: none
-=item Return Value: $object | undef
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass> | undef
=back
-Resets the resultset and returns an object for the first result (or C<undef>
-if the resultset is empty).
+L<Resets|/reset> the resultset (causing a fresh query to storage) and returns
+an object for the first result (or C<undef> if the resultset is empty).
=cut
sub _rs_update_delete {
my ($self, $op, $values) = @_;
- my $cond = $self->{cond};
my $rsrc = $self->result_source;
my $storage = $rsrc->schema->storage;
my $attrs = { %{$self->_resolved_attrs} };
- # "needs" is a strong word here - if the subquery is part of an IN clause - no point of
- # even adding the group_by. It will really be used only when composing a poor-man's
- # multicolumn-IN equivalent OR set
- my $needs_group_by_subq = defined $attrs->{group_by};
-
- # simplify the joinmap and maybe decide if a grouping (and thus subquery) is necessary
- my $relation_classifications;
- if (ref($attrs->{from}) eq 'ARRAY') {
- $attrs->{from} = $storage->_prune_unused_joins ($attrs->{from}, $attrs->{select}, $cond, $attrs);
-
- $relation_classifications = $storage->_resolve_aliastypes_from_select_args (
- [ @{$attrs->{from}}[1 .. $#{$attrs->{from}}] ],
- $attrs->{select},
- $cond,
- $attrs
- ) unless $needs_group_by_subq; # we already know we need a group, no point of resolving them
- }
- else {
- $needs_group_by_subq ||= 1; # if {from} is unparseable assume the worst
- }
-
- $needs_group_by_subq ||= exists $relation_classifications->{multiplying};
+ my $join_classifications;
+ my $existing_group_by = delete $attrs->{group_by};
- # if no subquery - life is easy-ish
- unless (
- $needs_group_by_subq
+ # do we need a subquery for any reason?
+ my $needs_subq = (
+ defined $existing_group_by
or
- keys %$relation_classifications # if any joins at all - need to wrap a subq
+ # if {from} is unparseable wrap a subq
+ ref($attrs->{from}) ne 'ARRAY'
or
- $self->_has_resolved_attr(qw/rows offset/) # limits call for a subq
- ) {
- # Most databases do not allow aliasing of tables in UPDATE/DELETE. Thus
- # a condition containing 'me' or other table prefixes will not work
- # at all. What this code tries to do (badly) is to generate a condition
- # with the qualifiers removed, by exploiting the quote mechanism of sqla
- #
- # this is atrocious and should be replaced by normal sqla introspection
- # one sunny day
- my ($sql, @bind) = do {
- my $sqla = $rsrc->storage->sql_maker;
- local $sqla->{_dequalify_idents} = 1;
- $sqla->_recurse_where($self->{cond});
- } if $self->{cond};
+ # limits call for a subq
+ $self->_has_resolved_attr(qw/rows offset/)
+ );
- return $rsrc->storage->$op(
- $rsrc,
- $op eq 'update' ? $values : (),
- $self->{cond} ? \[$sql, @bind] : (),
- );
+ # simplify the joinmap, so we can further decide if a subq is necessary
+ if (!$needs_subq and @{$attrs->{from}} > 1) {
+ $attrs->{from} = $storage->_prune_unused_joins ($attrs->{from}, $attrs->{select}, $self->{cond}, $attrs);
+
+ # check if there are any joins left after the prune
+ if ( @{$attrs->{from}} > 1 ) {
+ $join_classifications = $storage->_resolve_aliastypes_from_select_args (
+ [ @{$attrs->{from}}[1 .. $#{$attrs->{from}}] ],
+ $attrs->{select},
+ $self->{cond},
+ $attrs
+ );
+
+ # any non-pruneable joins imply subq
+ $needs_subq = scalar keys %{ $join_classifications->{restricting} || {} };
+ }
}
- # we got this far - means it is time to wrap a subquery
- my $idcols = $rsrc->_identifying_column_set || $self->throw_exception(
- sprintf(
- "Unable to perform complex resultset %s() without an identifying set of columns on source '%s'",
- $op,
- $rsrc->source_name,
- )
+ # check if the head is composite (by now all joins are thrown out unless $needs_subq)
+ $needs_subq ||= (
+ (ref $attrs->{from}[0]) ne 'HASH'
+ or
+ ref $attrs->{from}[0]{ $attrs->{from}[0]{-alias} }
);
- my $existing_group_by = delete $attrs->{group_by};
-
- # make a new $rs selecting only the PKs (that's all we really need for the subq)
- delete @{$attrs}{qw/collapse select _prefetch_selector_range as/};
- $attrs->{columns} = [ map { "$attrs->{alias}.$_" } @$idcols ];
- $attrs->{group_by} = \ ''; # FIXME - this is an evil hack, it causes the optimiser to kick in and throw away the LEFT joins
- my $subrs = (ref $self)->new($rsrc, $attrs);
- if (@$idcols == 1) {
- return $storage->$op (
- $rsrc,
- $op eq 'update' ? $values : (),
- { $idcols->[0] => { -in => $subrs->as_query } },
- );
+ my ($cond, $guard);
+ # do we need anything like a subquery?
+ if (! $needs_subq) {
+ # Most databases do not allow aliasing of tables in UPDATE/DELETE. Thus
+ # a condition containing 'me' or other table prefixes will not work
+ # at all. Tell SQLMaker to dequalify idents via a gross hack.
+ $cond = do {
+ my $sqla = $rsrc->storage->sql_maker;
+ local $sqla->{_dequalify_idents} = 1;
+ \[ $sqla->_recurse_where($self->{cond}) ];
+ };
}
- elsif ($storage->_use_multicolumn_in) {
- # This is hideously ugly, but SQLA does not understand multicol IN expressions
- my $sql_maker = $storage->sql_maker;
- my ($sql, @bind) = @${$subrs->as_query};
- $sql = sprintf ('(%s) IN %s', # the as_query already comes with a set of parenthesis
- join (', ', map { $sql_maker->_quote ($_) } @$idcols),
- $sql,
+ else {
+ # we got this far - means it is time to wrap a subquery
+ my $idcols = $rsrc->_identifying_column_set || $self->throw_exception(
+ sprintf(
+ "Unable to perform complex resultset %s() without an identifying set of columns on source '%s'",
+ $op,
+ $rsrc->source_name,
+ )
);
- return $storage->$op (
- $rsrc,
- $op eq 'update' ? $values : (),
- \[$sql, @bind],
- );
- }
- else {
- # if all else fails - get all primary keys and operate over a ORed set
- # wrap in a transaction for consistency
- # this is where the group_by starts to matter
- my $subq_group_by;
- if ($needs_group_by_subq) {
- $subq_group_by = $attrs->{columns};
-
- # make sure if there is a supplied group_by it matches the columns compiled above
- # perfectly. Anything else can not be sanely executed on most databases so croak
- # right then and there
- if ($existing_group_by) {
- my @current_group_by = map
- { $_ =~ /\./ ? $_ : "$attrs->{alias}.$_" }
- @$existing_group_by
- ;
+ # make a new $rs selecting only the PKs (that's all we really need for the subq)
+ delete $attrs->{$_} for qw/collapse select _prefetch_selector_range as/;
+ $attrs->{columns} = [ map { "$attrs->{alias}.$_" } @$idcols ];
+ $attrs->{group_by} = \ ''; # FIXME - this is an evil hack, it causes the optimiser to kick in and throw away the LEFT joins
+ my $subrs = (ref $self)->new($rsrc, $attrs);
- if (
- join ("\x00", sort @current_group_by)
- ne
- join ("\x00", sort @$subq_group_by )
- ) {
- $self->throw_exception (
- "You have just attempted a $op operation on a resultset which does group_by"
- . ' on columns other than the primary keys, while DBIC internally needs to retrieve'
- . ' the primary keys in a subselect. All sane RDBMS engines do not support this'
- . ' kind of queries. Please retry the operation with a modified group_by or'
- . ' without using one at all.'
- );
+ if (@$idcols == 1) {
+ $cond = { $idcols->[0] => { -in => $subrs->as_query } };
+ }
+ elsif ($storage->_use_multicolumn_in) {
+ # no syntax for calling this properly yet
+ # !!! EXPERIMENTAL API !!! WILL CHANGE !!!
+ $cond = $storage->sql_maker->_where_op_multicolumn_in (
+ $idcols, # how do I convey a list of idents...? can binds reside on lhs?
+ $subrs->as_query
+ ),
+ }
+ else {
+ # if all else fails - get all primary keys and operate over a ORed set
+ # wrap in a transaction for consistency
+ # this is where the group_by/multiplication starts to matter
+ if (
+ $existing_group_by
+ or
+ keys %{ $join_classifications->{multiplying} || {} }
+ ) {
+ # make sure if there is a supplied group_by it matches the columns compiled above
+ # perfectly. Anything else can not be sanely executed on most databases so croak
+ # right then and there
+ if ($existing_group_by) {
+ my @current_group_by = map
+ { $_ =~ /\./ ? $_ : "$attrs->{alias}.$_" }
+ @$existing_group_by
+ ;
+
+ if (
+ join ("\x00", sort @current_group_by)
+ ne
+ join ("\x00", sort @{$attrs->{columns}} )
+ ) {
+ $self->throw_exception (
+ "You have just attempted a $op operation on a resultset which does group_by"
+ . ' on columns other than the primary keys, while DBIC internally needs to retrieve'
+ . ' the primary keys in a subselect. All sane RDBMS engines do not support this'
+ . ' kind of queries. Please retry the operation with a modified group_by or'
+ . ' without using one at all.'
+ );
+ }
}
+
+ $subrs = $subrs->search({}, { group_by => $attrs->{columns} });
}
- }
- my $guard = $storage->txn_scope_guard;
+ $guard = $storage->txn_scope_guard;
- my @op_condition;
- for my $row ($subrs->search({}, { group_by => $subq_group_by })->cursor->all) {
- push @op_condition, { map
- { $idcols->[$_] => $row->[$_] }
- (0 .. $#$idcols)
- };
+ $cond = [];
+ for my $row ($subrs->cursor->all) {
+ push @$cond, { map
+ { $idcols->[$_] => $row->[$_] }
+ (0 .. $#$idcols)
+ };
+ }
}
+ }
- my $res = $storage->$op (
- $rsrc,
- $op eq 'update' ? $values : (),
- \@op_condition,
- );
+ my $res = $storage->$op (
+ $rsrc,
+ $op eq 'update' ? $values : (),
+ $cond,
+ );
- $guard->commit;
+ $guard->commit if $guard;
- return $res;
- }
+ return $res;
}
=head2 update
=item Arguments: \%values
-=item Return Value: $storage_rv
+=item Return Value: $underlying_storage_rv
=back
Sets the specified columns in the resultset to the supplied values in a
single query. Note that this will not run any accessor/set_column/update
-triggers, nor will it update any row object instances derived from this
+triggers, nor will it update any result object instances derived from this
resultset (this includes the contents of the L<resultset cache|/set_cache>
if any). See L</update_all> if you need to execute any on-update
triggers or cascades defined either by you or a
=item Arguments: none
-=item Return Value: $storage_rv
+=item Return Value: $underlying_storage_rv
=back
Deletes the rows matching this resultset in a single query. Note that this
will not run any delete triggers, nor will it alter the
-L<in_storage|DBIx::Class::Row/in_storage> status of any row object instances
+L<in_storage|DBIx::Class::Row/in_storage> status of any result object instances
derived from this resultset (this includes the contents of the
L<resultset cache|/set_cache> if any). See L</delete_all> if you need to
execute any on-delete triggers or cascades defined either by you or a
=over 4
-=item Arguments: \@data;
+=item Arguments: [ \@column_list, \@row_values+ ] | [ \%col_data+ ]
+
+=item Return Value: L<\@result_objects|DBIx::Class::Manual::ResultClass> (scalar context) | L<@result_objects|DBIx::Class::Manual::ResultClass> (list context)
=back
-Accepts either an arrayref of hashrefs or alternatively an arrayref of arrayrefs.
-For the arrayref of hashrefs style each hashref should be a structure suitable
-for submitting to a $resultset->create(...) method.
+Accepts either an arrayref of hashrefs or alternatively an arrayref of
+arrayrefs.
+
+=over
+
+=item NOTE
-In void context, C<insert_bulk> in L<DBIx::Class::Storage::DBI> is used
-to insert the data, as this is a faster method.
+The context of this method call has an important effect on what is
+submitted to storage. In void context data is fed directly to fastpath
+insertion routines provided by the underlying storage (most often
+L<DBI/execute_for_fetch>), bypassing the L<new|DBIx::Class::Row/new> and
+L<insert|DBIx::Class::Row/insert> calls on the
+L<Result|DBIx::Class::Manual::ResultClass> class, including any
+augmentation of these methods provided by components. For example if you
+are using something like L<DBIx::Class::UUIDColumns> to create primary
+keys for you, you will find that your PKs are empty. In this case you
+will have to explicitly force scalar or list context in order to create
+those values.
-Otherwise, each set of data is inserted into the database using
-L<DBIx::Class::ResultSet/create>, and the resulting objects are
-accumulated into an array. The array itself, or an array reference
-is returned depending on scalar or list context.
+=back
-Example: Assuming an Artist Class that has many CDs Classes relating:
+In non-void (scalar or list) context, this method is simply a wrapper
+for L</create>. Depending on list or scalar context either a list of
+L<Result|DBIx::Class::Manual::ResultClass> objects or an arrayref
+containing these objects is returned.
+
+When supplying data in "arrayref of arrayrefs" invocation style, the
+first element should be a list of column names and each subsequent
+element should be a data value in the earlier specified column order.
+For example:
+
+ $Arstist_rs->populate([
+ [ qw( artistid name ) ],
+ [ 100, 'A Formally Unknown Singer' ],
+ [ 101, 'A singer that jumped the shark two albums ago' ],
+ [ 102, 'An actually cool singer' ],
+ ]);
- my $Artist_rs = $schema->resultset("Artist");
+For the arrayref of hashrefs style each hashref should be a structure
+suitable for passing to L</create>. Multi-create is also permitted with
+this syntax.
- ## Void Context Example
- $Artist_rs->populate([
+ $schema->resultset("Artist")->populate([
{ artistid => 4, name => 'Manufactured Crap', cds => [
{ title => 'My First CD', year => 2006 },
{ title => 'Yet More Tweeny-Pop crap', year => 2007 },
},
]);
- ## Array Context Example
- my ($ArtistOne, $ArtistTwo, $ArtistThree) = $Artist_rs->populate([
- { name => "Artist One"},
- { name => "Artist Two"},
- { name => "Artist Three", cds=> [
- { title => "First CD", year => 2007},
- { title => "Second CD", year => 2008},
- ]}
- ]);
-
- print $ArtistOne->name; ## response is 'Artist One'
- print $ArtistThree->cds->count ## reponse is '2'
-
-For the arrayref of arrayrefs style, the first element should be a list of the
-fieldsnames to which the remaining elements are rows being inserted. For
-example:
-
- $Arstist_rs->populate([
- [qw/artistid name/],
- [100, 'A Formally Unknown Singer'],
- [101, 'A singer that jumped the shark two albums ago'],
- [102, 'An actually cool singer'],
- ]);
-
-Please note an important effect on your data when choosing between void and
-wantarray context. Since void context goes straight to C<insert_bulk> in
-L<DBIx::Class::Storage::DBI> this will skip any component that is overriding
-C<insert>. So if you are using something like L<DBIx-Class-UUIDColumns> to
-create primary keys for you, you will find that your PKs are empty. In this
-case you will have to use the wantarray context in order to create those
-values.
+If you attempt a void-context multi-create as in the example above (each
+Artist also has the related list of CDs), and B<do not> supply the
+necessary autoinc foreign key information, this method will proxy to the
+less efficient L</create>, and then throw the Result objects away. In this
+case there are obviously no benefits to using this method over L</create>.
=cut
return unless @$data;
if(defined wantarray) {
- my @created;
- foreach my $item (@$data) {
- push(@created, $self->create($item));
- }
+ my @created = map { $self->create($_) } @$data;
return wantarray ? @created : \@created;
}
else {
## inherit the data locked in the conditions of the resultset
my ($rs_data) = $self->_merge_with_rscond({});
delete @{$rs_data}{@columns};
- my @inherit_cols = keys %$rs_data;
- my @inherit_data = values %$rs_data;
## do bulk insert on current row
$rsrc->storage->insert_bulk(
$rsrc,
- [@columns, @inherit_cols],
- [ map { [ @$_{@columns}, @inherit_data ] } @$data ],
+ [@columns, keys %$rs_data],
+ [ map { [ @$_{@columns}, values %$rs_data ] } @$data ],
);
## do the has_many relationships
=item Arguments: none
-=item Return Value: $pager
+=item Return Value: L<$pager|Data::Page>
=back
-Return Value a L<Data::Page> object for the current resultset. Only makes
+Returns a L<Data::Page> object for the current resultset. Only makes
sense for queries with a C<page> attribute.
To get the full count of entries for a paged resultset, call
=item Arguments: $page_number
-=item Return Value: $rs
+=item Return Value: L<$resultset|/search>
=back
=over 4
-=item Arguments: \%vals
+=item Arguments: \%col_data
-=item Return Value: $rowobject
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass>
=back
-Creates a new row object in the resultset's result class and returns
+Creates a new result object in the resultset's result class and returns
it. The row is not inserted into the database at this point, call
L<DBIx::Class::Row/insert> to do that. Calling L<DBIx::Class::Row/in_storage>
-will tell you whether the row object has been inserted or not.
+will tell you whether the result object has been inserted or not.
Passes the hashref of input on to L<DBIx::Class::Row/new>.
sub new_result {
my ($self, $values) = @_;
- $self->throw_exception( "new_result needs a hash" )
+
+ $self->throw_exception( "new_result takes only one argument - a hashref of values" )
+ if @_ > 2;
+
+ $self->throw_exception( "new_result expects a hashref" )
unless (ref $values eq 'HASH');
my ($merged_cond, $cols_from_relations) = $self->_merge_with_rscond($values);
=item Arguments: none
-=item Return Value: \[ $sql, @bind ]
+=item Return Value: \[ $sql, L<@bind_values|/DBIC BIND VALUES> ]
=back
sub as_query {
my $self = shift;
- my $attrs = $self->_resolved_attrs_copy;
+ my $attrs = { %{ $self->_resolved_attrs } };
# For future use:
#
=over 4
-=item Arguments: \%vals, \%attrs?
+=item Arguments: \%col_data, { key => $unique_constraint, L<%attrs|/ATTRIBUTES> }?
-=item Return Value: $rowobject
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass>
=back
=over 4
-=item Arguments: \%vals
+=item Arguments: \%col_data
-=item Return Value: a L<DBIx::Class::Row> $object
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass>
=back
with an arbitrary depth and width, as long as the relationships actually
exists and the correct column data has been supplied.
-
Instead of hashrefs of plain related data (key/value pairs), you may
also pass new or inserted objects. New objects (not inserted yet, see
-L</new>), will be inserted into their appropriate tables.
+L</new_result>), will be inserted into their appropriate tables.
-Effectively a shortcut for C<< ->new_result(\%vals)->insert >>.
+Effectively a shortcut for C<< ->new_result(\%col_data)->insert >>.
Example of creating a new row.
When subclassing ResultSet never attempt to override this method. Since
it is a simple shortcut for C<< $self->new_result($attrs)->insert >>, a
lot of the internals simply never call it, so your override will be
-bypassed more often than not. Override either L<new|DBIx::Class::Row/new>
-or L<insert|DBIx::Class::Row/insert> depending on how early in the
-L</create> process you need to intervene.
+bypassed more often than not. Override either L<DBIx::Class::Row/new>
+or L<DBIx::Class::Row/insert> depending on how early in the
+L</create> process you need to intervene. See also warning pertaining to
+L</new>.
=back
=over 4
-=item Arguments: \%vals, \%attrs?
+=item Arguments: \%col_data, { key => $unique_constraint, L<%attrs|/ATTRIBUTES> }?
-=item Return Value: $rowobject
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass>
=back
year => 2005,
});
- if( $cd->in_storage ) {
+ if( !$cd->in_storage ) {
# do some stuff
$cd->insert;
}
=over 4
-=item Arguments: \%col_values, { key => $unique_constraint }?
+=item Arguments: \%col_data, { key => $unique_constraint, L<%attrs|/ATTRIBUTES> }?
-=item Return Value: $row_object
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass>
=back
$resultset->update_or_create({ col => $val, ... });
Like L</find_or_create>, but if a row is found it is immediately updated via
-C<< $found_row->update (\%col_values) >>.
+C<< $found_row->update (\%col_data) >>.
Takes an optional C<key> attribute to search on a specific unique constraint.
to call L<DBIx::Class::Row/insert> to save the newly created row to the
database!
- my $cd = $schema->resultset('CD')->update_or_new(
- {
- artist => 'Massive Attack',
- title => 'Mezzanine',
- year => 1998,
- },
- { key => 'cd_artist_title' }
- );
-
- if( $cd->in_storage ) {
- # do some stuff
- $cd->insert;
- }
-
=cut
sub update_or_create {
=over 4
-=item Arguments: \%col_values, { key => $unique_constraint }?
+=item Arguments: \%col_data, { key => $unique_constraint, L<%attrs|/ATTRIBUTES> }?
-=item Return Value: $rowobject
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass>
=back
$resultset->update_or_new({ col => $val, ... });
Like L</find_or_new> but if a row is found it is immediately updated via
-C<< $found_row->update (\%col_values) >>.
+C<< $found_row->update (\%col_data) >>.
For example:
=item Arguments: none
-=item Return Value: \@cache_objects | undef
+=item Return Value: L<\@result_objs|DBIx::Class::Manual::ResultClass> | undef
=back
=over 4
-=item Arguments: \@cache_objects
+=item Arguments: L<\@result_objs|DBIx::Class::Manual::ResultClass>
-=item Return Value: \@cache_objects
+=item Return Value: L<\@result_objs|DBIx::Class::Manual::ResultClass>
=back
Sets the contents of the cache for the resultset. Expects an arrayref
of objects of the same class as those produced by the resultset. Note that
-if the cache is set the resultset will return the cached objects rather
+if the cache is set, the resultset will return the cached objects rather
than re-querying the database even if the cache attr is not set.
The contents of the cache can also be populated by using the
=over 4
-=item Arguments: $relationship_name
+=item Arguments: $rel_name
-=item Return Value: $resultset
+=item Return Value: L<$resultset|/search>
=back
=cut
sub current_source_alias {
- my ($self) = @_;
-
- return ($self->{attrs} || {})->{alias} || 'me';
+ return (shift->{attrs} || {})->{alias} || 'me';
}
=head2 as_subselect_rs
=item Arguments: none
-=item Return Value: $resultset
+=item Return Value: L<$resultset|/search>
=back
return {%$attrs, from => $from, seen_join => $seen};
}
-# too many times we have to do $attrs = { %{$self->_resolved_attrs} }
-sub _resolved_attrs_copy {
- my $self = shift;
- return { %{$self->_resolved_attrs (@_)} };
-}
-
sub _resolved_attrs {
my $self = shift;
return $self->{_attrs} if $self->{_attrs};
if (my $cols = delete $attrs->{columns}) {
for my $c (ref $cols eq 'ARRAY' ? @$cols : $cols) {
if (ref $c eq 'HASH') {
- for my $as (keys %$c) {
+ for my $as (sort keys %$c) {
push @sel, $c->{$as};
push @as, $as;
}
# default order for collapsing unless the user asked for something
$attrs->{order_by} = [ map { "$alias.$_" } $source->primary_columns ];
$attrs->{_ordered_for_collapse} = 1;
+ $attrs->{_order_is_artificial} = 1;
}
# if both page and offset are specified, produce a combined offset
C<\%attrs> argument. See L</search>, L</search_rs>, L</find>,
L</count>.
+Default attributes can be set on the result class using
+L<DBIx::Class::ResultSource/resultset_attributes>. (Please read
+the CAVEATS on that feature before using it!)
+
These are in no particular order:
=head2 order_by
If you want to fetch related objects from other tables as well, see C<prefetch>
below.
+ NOTE: An internal join-chain pruner will discard certain joins while
+ constructing the actual SQL query, as long as the joins in question do not
+ affect the retrieved result. This for example includes 1:1 left joins
+ that are not part of the restriction specification (WHERE/HAVING) nor are
+ a part of the query selection.
+
For more help on using joins with search, see L<DBIx::Class::Manual::Joining>.
=head2 prefetch
}
);
-In fact, C<DBIx::Class> will emit the following warning:
-
- Prefetching multiple has_many rels tracks and cd_to_producer at top
- level will explode the number of row objects retrievable via ->next
- or ->all. Use at your own risk.
-
The collapser currently can't identify duplicate tuples for multiple
L<has_many|DBIx::Class::Relationship/has_many> relationships and as a
result the second L<has_many|DBIx::Class::Relationship/has_many>
=back
+=head2 alias
+
+=over 4
+
+=item Value: $source_alias
+
+=back
+
+Sets the source alias for the query. Normally, this defaults to C<me>, but
+nested search queries (sub-SELECTs) might need specific aliases set to
+reference inner queries. For example:
+
+ my $q = $rs
+ ->related_resultset('CDs')
+ ->related_resultset('Tracks')
+ ->search({
+ 'track.id' => { -ident => 'none_search.id' },
+ })
+ ->as_query;
+
+ my $ids = $self->search({
+ -not_exists => $q,
+ }, {
+ alias => 'none_search',
+ group_by => 'none_search.id',
+ })->get_column('id')->as_query;
+
+ $self->search({ id => { -in => $ids } })
+
+This attribute is directly tied to L</current_source_alias>.
+
=head2 page
=over 4
Adds to the WHERE clause.
# only return rows WHERE deleted IS NULL for all searches
- __PACKAGE__->resultset_attributes({ where => { deleted => undef } }); )
+ __PACKAGE__->resultset_attributes({ where => { deleted => undef } });
Can be overridden by passing C<< { where => undef } >> as an attribute
to a resultset.
=over 4
-=item Value: ( 'update' | 'shared' )
+=item Value: ( 'update' | 'shared' | \$scalar )
=back
Set to 'update' for a SELECT ... FOR UPDATE or 'shared' for a SELECT
-... FOR SHARED.
+... FOR SHARED. If \$scalar is passed, this is taken directly and embedded in the
+query.
+
+=head1 DBIC BIND VALUES
+
+Because DBIC may need more information to bind values than just the column name
+and value itself, it uses a special format for both passing and receiving bind
+values. Each bind value should be composed of an arrayref of
+C<< [ \%args => $val ] >>. The format of C<< \%args >> is currently:
+
+=over 4
+
+=item dbd_attrs
+
+If present (in any form), this is what is being passed directly to bind_param.
+Note that different DBD's expect different bind args. (e.g. DBD::SQLite takes
+a single numerical type, while DBD::Pg takes a hashref if bind options.)
+
+If this is specified, all other bind options described below are ignored.
+
+=item sqlt_datatype
+
+If present, this is used to infer the actual bind attribute by passing to
+C<< $resolved_storage->bind_attribute_by_data_type() >>. Defaults to the
+"data_type" from the L<add_columns column info|DBIx::Class::ResultSource/add_columns>.
+
+Note that the data type is somewhat freeform (hence the sqlt_ prefix);
+currently drivers are expected to "Do the Right Thing" when given a common
+datatype name. (Not ideal, but that's what we got at this point.)
+
+=item sqlt_size
+
+Currently used to correctly allocate buffers for bind_param_inout().
+Defaults to "size" from the L<add_columns column info|DBIx::Class::ResultSource/add_columns>,
+or to a sensible value based on the "data_type".
+
+=item dbic_colname
+
+Used to fill in missing sqlt_datatype and sqlt_size attributes (if they are
+explicitly specified they are never overriden). Also used by some weird DBDs,
+where the column name should be available at bind_param time (e.g. Oracle).
+
+=back
+
+For backwards compatibility and convenience, the following shortcuts are
+supported:
+
+ [ $name => $val ] === [ { dbic_colname => $name }, $val ]
+ [ \$dt => $val ] === [ { sqlt_datatype => $dt }, $val ]
+ [ undef, $val ] === [ {}, $val ]
+
+=head1 AUTHOR AND CONTRIBUTORS
+
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
=cut
use base 'DBIx::Class';
use DBIx::Class::Carp;
-use DBIx::Class::Exception;
# not importing first() as it will clash with our own method
use List::Util ();
=item Arguments: none
-=item Return Value: \[ $sql, @bind ]
+=item Return Value: \[ $sql, L<@bind_values|DBIx::Class::ResultSet/DBIC BIND VALUES> ]
=back
there are none).
Much like L<DBIx::Class::ResultSet/all> but returns values rather
-than row objects.
+than result objects.
=cut
=item Arguments: none
-=item Return Value: $resultset
+=item Return Value: L<$resultset|DBIx::Class::ResultSet>
=back
=item Arguments: none
-=item Return Value: $resultset
+=item Return Value: L<$resultset|DBIx::Class::ResultSet>
=back
=item Arguments: none
-=item Return Value: $resultset
+=item Return Value: L<$resultset|DBIx::Class::ResultSet>
=back
=item Arguments: $function
-=item Return Value: $resultset
+=item Return Value: L<$resultset|DBIx::Class::ResultSet>
=back
=cut
sub throw_exception {
- my $self=shift;
+ my $self = shift;
if (ref $self && $self->{_parent_resultset}) {
$self->{_parent_resultset}->throw_exception(@_);
1;
-=head1 AUTHORS
-
-Luke Saunders <luke.saunders@gmail.com>
+=head1 AUTHOR AND CONTRIBUTORS
-Jess Robinson
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
use DBIx::Class::ResultSet;
use DBIx::Class::ResultSourceHandle;
-use DBIx::Class::Exception;
use DBIx::Class::Carp;
-use DBIx::Class::GlobalDestruction;
+use Devel::GlobalDestruction;
use Try::Tiny;
use List::Util 'first';
use Scalar::Util qw/blessed weaken isweak/;
$schema->source($source_name);
-=item From a Row object:
+=item From a Result object:
$row->result_source;
=item Arguments: @columns
-=item Return value: The ResultSource object
+=item Return Value: L<$result_source|/new>
=back
calls of this method will add more columns, not replace them.
The column names given will be created as accessor methods on your
-L<DBIx::Class::Row> objects. You can change the name of the accessor
+L<Result|DBIx::Class::Manual::ResultClass> objects. You can change the name of the accessor
by supplying an L</accessor> in the column_info hash.
If a column name beginning with a plus sign ('+col1') is provided, the
=item Arguments: $colname, \%columninfo?
-=item Return value: 1/0 (true/false)
+=item Return Value: 1/0 (true/false)
=back
=item Arguments: $colname
-=item Return value: 1/0 (true/false)
+=item Return Value: 1/0 (true/false)
=back
=item Arguments: $colname
-=item Return value: Hashref of info
+=item Return Value: Hashref of info
=back
=over
-=item Arguments: None
+=item Arguments: none
-=item Return value: Ordered list of column names
+=item Return Value: Ordered list of column names
=back
=item Arguments: \@colnames ?
-=item Return value: Hashref of column name/info pairs
+=item Return Value: Hashref of column name/info pairs
=back
=item Arguments: @colnames
-=item Return value: undefined
+=item Return Value: not defined
=back
=item Arguments: $colname
-=item Return value: undefined
+=item Return Value: not defined
=back
=item Arguments: @cols
-=item Return value: undefined
+=item Return Value: not defined
=back
=over 4
-=item Arguments: None
+=item Arguments: none
-=item Return value: Ordered list of primary column names
+=item Return Value: Ordered list of primary column names
=back
=item Arguments: $sequence_name
-=item Return value: undefined
+=item Return Value: not defined
=back
=item Arguments: $name?, \@colnames
-=item Return value: undefined
+=item Return Value: not defined
=back
=item Arguments: @constraints
-=item Return value: undefined
+=item Return Value: not defined
=back
=item Arguments: \@colnames
-=item Return value: Constraint name
+=item Return Value: Constraint name
=back
=over 4
-=item Arguments: None
+=item Arguments: none
-=item Return value: Hash of unique constraint data
+=item Return Value: Hash of unique constraint data
=back
=over 4
-=item Arguments: None
+=item Arguments: none
-=item Return value: Unique constraint names
+=item Return Value: Unique constraint names
=back
=item Arguments: $constraintname
-=item Return value: List of constraint columns
+=item Return Value: List of constraint columns
=back
=item Arguments: $callback_name | \&callback_code
-=item Return value: $callback_name | \&callback_code
+=item Return Value: $callback_name | \&callback_code
=back
}
}
+=head2 result_class
+
+=over 4
+
+=item Arguments: $classname
+
+=item Return Value: $classname
+
+=back
+
+ use My::Schema::ResultClass::Inflator;
+ ...
+
+ use My::Schema::Artist;
+ ...
+ __PACKAGE__->result_class('My::Schema::ResultClass::Inflator');
+
+Set the default result class for this source. You can use this to create
+and use your own result inflator. See L<DBIx::Class::ResultSet/result_class>
+for more details.
+
+Please note that setting this to something like
+L<DBIx::Class::ResultClass::HashRefInflator> will make every result unblessed
+and make life more difficult. Inflators like those are better suited to
+temporary usage via L<DBIx::Class::ResultSet/result_class>.
+
=head2 resultset
=over 4
-=item Arguments: None
+=item Arguments: none
-=item Return value: $resultset
+=item Return Value: L<$resultset|DBIx::Class::ResultSet>
=back
=item Arguments: $classname
-=item Return value: $classname
+=item Return Value: $classname
=back
=over 4
-=item Arguments: \%attrs
+=item Arguments: L<\%attrs|DBIx::Class::ResultSet/ATTRIBUTES>
-=item Return value: \%attrs
+=item Return Value: L<\%attrs|DBIx::Class::ResultSet/ATTRIBUTES>
=back
$source->resultset_attributes({ order_by => [ 'id' ] });
Store a collection of resultset attributes, that will be set on every
-L<DBIx::Class::ResultSet> produced from this result source. For a full
-list see L<DBIx::Class::ResultSet/ATTRIBUTES>.
+L<DBIx::Class::ResultSet> produced from this result source.
+
+B<CAVEAT>: C<resultset_attributes> comes with its own set of issues and
+bugs! While C<resultset_attributes> isn't deprecated per se, its usage is
+not recommended!
+
+Since relationships use attributes to link tables together, the "default"
+attributes you set may cause unpredictable and undesired behavior. Furthermore,
+the defaults cannot be turned off, so you are stuck with them.
+
+In most cases, what you should actually be using are project-specific methods:
+
+ package My::Schema::ResultSet::Artist;
+ use base 'DBIx::Class::ResultSet';
+ ...
+
+ # BAD IDEA!
+ #__PACKAGE__->resultset_attributes({ prefetch => 'tracks' });
+
+ # GOOD IDEA!
+ sub with_tracks { shift->search({}, { prefetch => 'tracks' }) }
+
+ # in your code
+ $schema->resultset('Artist')->with_tracks->...
+
+This gives you the flexibility of not using it when you don't need it.
+
+For more complex situations, another solution would be to use a virtual view
+via L<DBIx::Class::ResultSource::View>.
=cut
=over 4
-=item Arguments: None
+=item Arguments: none
=item Result value: $name
=over 4
-=item Arguments: None
+=item Arguments: none
-=item Return value: FROM clause
+=item Return Value: FROM clause
=back
=over 4
-=item Arguments: $schema
+=item Arguments: L<$schema?|DBIx::Class::Schema>
-=item Return value: A schema object
+=item Return Value: L<$schema|DBIx::Class::Schema>
=back
=over 4
-=item Arguments: None
+=item Arguments: none
-=item Return value: A Storage object
+=item Return Value: L<$storage|DBIx::Class::Storage>
=back
$source->storage->debug(1);
-Returns the storage handle for the current schema.
-
-See also: L<DBIx::Class::Storage>
+Returns the L<storage handle|DBIx::Class::Storage> for the current schema.
=cut
=over 4
-=item Arguments: $relname, $related_source_name, \%cond, [ \%attrs ]
+=item Arguments: $rel_name, $related_source_name, \%cond, \%attrs?
-=item Return value: 1/true if it succeeded
+=item Return Value: 1/true if it succeeded
=back
- $source->add_relationship('relname', 'related_source', $cond, $attrs);
+ $source->add_relationship('rel_name', 'related_source', $cond, $attrs);
L<DBIx::Class::Relationship> describes a series of methods which
create pre-defined useful types of relationships. Look there first
=over 4
-=item Arguments: None
+=item Arguments: none
-=item Return value: List of relationship names
+=item Return Value: L<@rel_names|DBIx::Class::Relationship>
=back
=over 4
-=item Arguments: $relname
+=item Arguments: L<$rel_name|DBIx::Class::Relationship>
-=item Return value: Hashref of relation data,
+=item Return Value: L<\%rel_data|DBIx::Class::Relationship::Base/add_relationship>
=back
Returns a hash of relationship information for the specified relationship
-name. The keys/values are as specified for L</add_relationship>.
+name. The keys/values are as specified for L<DBIx::Class::Relationship::Base/add_relationship>.
=cut
sub relationship_info {
- my ($self, $rel) = @_;
- return $self->_relationships->{$rel};
+ #my ($self, $rel) = @_;
+ return shift->_relationships->{+shift};
}
=head2 has_relationship
=over 4
-=item Arguments: $rel
+=item Arguments: L<$rel_name|DBIx::Class::Relationship>
-=item Return value: 1/0 (true/false)
+=item Return Value: 1/0 (true/false)
=back
=cut
sub has_relationship {
- my ($self, $rel) = @_;
- return exists $self->_relationships->{$rel};
+ #my ($self, $rel) = @_;
+ return exists shift->_relationships->{+shift};
}
=head2 reverse_relationship_info
=over 4
-=item Arguments: $relname
+=item Arguments: L<$rel_name|DBIx::Class::Relationship>
-=item Return value: Hashref of relationship data
+=item Return Value: L<\%rel_data|DBIx::Class::Relationship::Base/add_relationship>
=back
# having already been inserted. Takes the name of the relationship and a
# hashref of columns of the related object.
sub _pk_depends_on {
- my ($self, $relname, $rel_data) = @_;
+ my ($self, $rel_name, $rel_data) = @_;
- my $relinfo = $self->relationship_info($relname);
+ my $relinfo = $self->relationship_info($rel_name);
# don't assume things if the relationship direction is specified
return $relinfo->{attrs}{is_foreign_key_constraint}
# assume anything that references our PK probably is dependent on us
# rather than vice versa, unless the far side is (a) defined or (b)
# auto-increment
- my $rel_source = $self->related_source($relname);
+ my $rel_source = $self->related_source($rel_name);
foreach my $p ($self->primary_columns) {
if (exists $keyhash->{$p}) {
# list of non-triviail values (notmally conditions) returned as a part
# of a joinfree condition hash
sub _resolve_condition {
- my ($self, $cond, $as, $for, $relname) = @_;
+ my ($self, $cond, $as, $for, $rel_name) = @_;
my $obj_rel = !!blessed $for;
self_alias => $obj_rel ? $as : $for,
foreign_alias => $relalias,
self_resultsource => $self,
- foreign_relname => $relname || ($obj_rel ? $as : $for),
+ foreign_relname => $rel_name || ($obj_rel ? $as : $for),
self_rowobj => $obj_rel ? $for : undef
});
# FIXME sanity check until things stabilize, remove at some point
$self->throw_exception (
- "A join-free condition returned for relationship '$relname' without a row-object to chain from"
+ "A join-free condition returned for relationship '$rel_name' without a row-object to chain from"
) unless $obj_rel;
# FIXME another sanity check
first { $_ !~ /^\Q$relalias.\E.+/ } keys %$joinfree_cond
) {
$self->throw_exception (
- "The join-free condition returned for relationship '$relname' must be a hash "
+ "The join-free condition returned for relationship '$rel_name' must be a hash "
.'reference with all keys being valid columns on the related result source'
);
}
}
# see which parts of the joinfree cond are conditionals
- my $relcol_list = { map { $_ => 1 } $self->related_source($relname)->columns };
+ my $relcol_list = { map { $_ => 1 } $self->related_source($rel_name)->columns };
for my $c (keys %$joinfree_cond) {
my ($colname) = $c =~ /^ (?: \Q$relalias.\E )? (.+)/x;
elsif (ref $cond eq 'ARRAY') {
my (@ret, $crosstable);
for (@$cond) {
- my ($cond, $crosstab) = $self->_resolve_condition($_, $as, $for, $relname);
+ my ($cond, $crosstab) = $self->_resolve_condition($_, $as, $for, $rel_name);
push @ret, $cond;
$crosstable ||= $crosstab;
}
return wantarray ? (\@ret, $crosstable) : \@ret;
}
else {
- $self->throw_exception ("Can't handle condition $cond for relationship '$relname' yet :(");
+ $self->throw_exception ("Can't handle condition $cond for relationship '$rel_name' yet :(");
}
}
=over 4
-=item Arguments: $relname
+=item Arguments: $rel_name
-=item Return value: $source
+=item Return Value: $source
=back
=over 4
-=item Arguments: $relname
+=item Arguments: $rel_name
-=item Return value: $classname
+=item Return Value: $classname
=back
=over 4
-=item Arguments: None
+=item Arguments: none
-=item Return value: $source_handle
+=item Return Value: L<$source_handle|DBIx::Class::ResultSourceHandle>
=back
=item Arguments: 1/0 (default: 0)
-=item Return value: 1/0
+=item Return Value: 1/0
=back
should not be used. It will be removed before 1.0.
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Matt S. Trout <mst@shadowcatsystems.co.uk>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
1;
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Matt S. Trout <mst@shadowcatsystems.co.uk>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
use base qw/DBIx::Class/;
-use DBIx::Class::Exception;
use Try::Tiny;
-
use namespace::clean;
use overload
return $class->result_source_instance->name;
}
+=head2 table_class
+
+ __PACKAGE__->table_class('DBIx::Class::ResultSource::Table');
+
+Gets or sets the table class used for construction and validation.
+
+=cut
+
=head2 has_column
if ($obj->has_column($col)) { ... }
1;
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Matt S. Trout <mst@shadowcatsystems.co.uk>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
use base qw/DBIx::Class/;
-use DBIx::Class::Exception;
use Scalar::Util 'blessed';
use List::Util 'first';
use Try::Tiny;
This class is responsible for defining and doing basic operations on rows
derived from L<DBIx::Class::ResultSource> objects.
-Row objects are returned from L<DBIx::Class::ResultSet>s using the
+Result objects are returned from L<DBIx::Class::ResultSet>s using the
L<create|DBIx::Class::ResultSet/create>, L<find|DBIx::Class::ResultSet/find>,
L<next|DBIx::Class::ResultSet/next> and L<all|DBIx::Class::ResultSet/all> methods,
as well as invocations of 'single' (
L<belongs_to|DBIx::Class::Relationship/belongs_to>,
L<has_one|DBIx::Class::Relationship/has_one> or
L<might_have|DBIx::Class::Relationship/might_have>)
-relationship accessors of L<DBIx::Class::Row> objects.
+relationship accessors of L<Result|DBIx::Class::Manual::ResultClass> objects.
+
+=head1 NOTE
+
+All "Row objects" derived from a Schema-attached L<DBIx::Class::ResultSet>
+object (such as a typical C<< L<search|DBIx::Class::ResultSet/search
+>->L<next|DBIx::Class::ResultSet/next> >> call) are actually Result
+instances, based on your application's
+L<Result class|DBIx::Class::Manual::Glossary/Result_class>.
+
+L<DBIx::Class::Row> implements most of the row-based communication with the
+underlying storage, but a Result class B<should not inherit from it directly>.
+Usually, Result classes inherit from L<DBIx::Class::Core>, which in turn
+combines the methods from several classes, one of them being
+L<DBIx::Class::Row>. Therefore, while many of the methods available to a
+L<DBIx::Class::Core>-derived Result class are described in the following
+documentation, it does not detail all of the methods available to Result
+objects. Refer to L<DBIx::Class::Manual::ResultClass> for more info.
=head1 METHODS
=item Arguments: \%attrs or \%colsandvalues
-=item Returns: A Row object
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass>
=back
-While you can create a new row object by calling C<new> directly on
+While you can create a new result object by calling C<new> directly on
this class, you are better off calling it on a
L<DBIx::Class::ResultSet> object.
next;
}
}
- $new->throw_exception("No such column $key on $class")
+ $new->throw_exception("No such column '$key' on $class")
unless $class->has_column($key);
$new->store_column($key => $attrs->{$key});
}
return $new;
}
+=head2 $column_accessor
+
+ # Each pair does the same thing
+
+ # (un-inflated, regular column)
+ my $val = $row->get_column('first_name');
+ my $val = $row->first_name;
+
+ $row->set_column('first_name' => $val);
+ $row->first_name($val);
+
+ # (inflated column via DBIx::Class::InflateColumn::DateTime)
+ my $val = $row->get_inflated_column('last_modified');
+ my $val = $row->last_modified;
+
+ $row->set_inflated_column('last_modified' => $val);
+ $row->last_modified($val);
+
+=over
+
+=item Arguments: $value?
+
+=item Return Value: $value
+
+=back
+
+A column accessor method is created for each column, which is used for
+getting/setting the value for that column.
+
+The actual method name is based on the
+L<accessor|DBIx::Class::ResultSource/accessor> name given during the
+L<Result Class|DBIx::Class::Manual::ResultClass> L<column definition
+|DBIx::Class::ResultSource/add_columns>. Like L</set_column>, this
+will not store the data in the database until L</insert> or L</update>
+is called on the row.
+
=head2 insert
$row->insert;
=item Arguments: none
-=item Returns: The Row object
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass>
=back
it isn't already in there. Returns the object itself. To insert an
entirely new row into the database, use L<DBIx::Class::ResultSet/create>.
-To fetch an uninserted row object, call
-L<new|DBIx::Class::ResultSet/new> on a resultset.
+To fetch an uninserted result object, call
+L<new_result|DBIx::Class::ResultSet/new_result> on a resultset.
This will also insert any uninserted, related objects held inside this
one, see L<DBIx::Class::ResultSet/create> for more details.
=item Arguments: none or 1|0
-=item Returns: 1|0
+=item Return Value: 1|0
=back
L<DBIx::Class::ResultSet/create> or L<DBIx::Class::ResultSet/insert>
are used.
-Creating a row object using L<DBIx::Class::ResultSet/new>, or calling
-L</delete> on one, sets it to false.
+Creating a result object using L<DBIx::Class::ResultSet/new_result>, or
+calling L</delete> on one, sets it to false.
=cut
=item Arguments: none or a hashref
-=item Returns: The Row object
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass>
=back
-Throws an exception if the row object is not yet in the database,
+Throws an exception if the result object is not yet in the database,
according to L</in_storage>.
This method issues an SQL UPDATE query to commit any changes to the
$row->update({ last_modified => \'NOW()' });
The update will pass the values verbatim into SQL. (See
-L<SQL::Abstract> docs). The values in your Row object will NOT change
+L<SQL::Abstract> docs). The values in your Result object will NOT change
as a result of the update call, if you want the object to be updated
with the actual values from the database, call L</discard_changes>
after the update.
=item Arguments: none
-=item Returns: The Row object
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass>
=back
rows.
If you delete an object within a txn_do() (see L<DBIx::Class::Storage/txn_do>)
-and the transaction subsequently fails, the row object will remain marked as
+and the transaction subsequently fails, the result object will remain marked as
not being in storage. If you know for a fact that the object is still in
storage (i.e. by inspecting the cause of the transaction's failure), you can
use C<< $obj->in_storage(1) >> to restore consistency between the object and
=item Arguments: $columnname
-=item Returns: The value of the column
+=item Return Value: The value of the column
=back
Throws an exception if the column name given doesn't exist according
to L<has_column|DBIx::Class::ResultSource/has_column>.
-Returns a raw column value from the row object, if it has already
+Returns a raw column value from the result object, if it has already
been fetched from the database or set by an accessor.
If an L<inflated value|DBIx::Class::InflateColumn> has been set, it
=item Arguments: $columnname
-=item Returns: 0|1
+=item Return Value: 0|1
=back
=item Arguments: none
-=item Returns: A hash of columnname, value pairs.
+=item Return Value: A hash of columnname, value pairs.
=back
=item Arguments: none
-=item Returns: A hash of column, value pairs
+=item Return Value: A hash of column, value pairs
=back
=item Arguments: $columnname
-=item Returns: undefined
+=item Return Value: not defined
=back
=item Arguments: none
-=item Returns: A hash of column, object|value pairs
+=item Return Value: A hash of column, object|value pairs
=back
=item Arguments: $columnname, $value
-=item Returns: $value
+=item Return Value: $value
=back
If passed an object or reference as a value, this method will happily
attempt to store it, and a later L</insert> or L</update> will try and
stringify/numify as appropriate. To set an object to be deflated
-instead, see L</set_inflated_columns>.
+instead, see L</set_inflated_columns>, or better yet, use L</$column_accessor>.
=cut
=item Arguments: \%columndata
-=item Returns: The Row object
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass>
=back
=item Arguments: \%columndata
-=item Returns: The Row object
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass>
=back
Sets more than one column value at once. Any inflated values are
deflated and the raw values stored.
-Any related values passed as Row objects, using the relation name as a
+Any related values passed as Result objects, using the relation name as a
key, are reduced to the appropriate foreign key values and stored. If
-instead of related row objects, a hashref of column, value data is
+instead of related result objects, a hashref of column, value data is
passed, will create the related object first then store.
Will even accept arrayrefs of data as a value to a
=item Arguments: \%replacementdata
-=item Returns: The Row object copy
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass> copy
=back
=item Arguments: $columnname, $value
-=item Returns: The value sent to storage
+=item Return Value: The value sent to storage
=back
method is used internally by L</set_column> which you should probably
be using.
-This is the lowest level at which data is set on a row object,
+This is the lowest level at which data is set on a result object,
extend this method to catch all data setting methods.
=cut
=over
-=item Arguments: $result_source, \%columndata, \%prefetcheddata
+=item Arguments: L<$result_source|DBIx::Class::ResultSource>, \%columndata, \%prefetcheddata
-=item Returns: A Row object
+=item Return Value: L<$result|DBIx::Class::Manual::ResultClass>
=back
All L<DBIx::Class::ResultSet> methods that retrieve data from the
-database and turn it into row objects call this method.
+database and turn it into result objects call this method.
Extend this method in your Result classes to hook into this process,
for example to rebless the result into a different class.
=item Arguments: none
-=item Returns: Result of update or insert operation
+=item Return Value: Result of update or insert operation
=back
=item Arguments: none
-=item Returns: 0|1 or @columnnames
+=item Return Value: 0|1 or @columnnames
=back
=item Arguments: $columname
-=item Returns: 0|1
+=item Return Value: 0|1
=back
=over
-=item Arguments: $result_source_instance
+=item Arguments: L<$result_source?|DBIx::Class::ResultSource>
-=item Returns: a ResultSource instance
+=item Return Value: L<$result_source|DBIx::Class::ResultSource>
=back
=item Arguments: $columnname, \%columninfo
-=item Returns: undefined
+=item Return Value: not defined
=back
=item Arguments: \%attrs
-=item Returns: A Row object
+=item Return Value: A Result object
=back
-Fetches a fresh copy of the Row object from the database and returns it.
+Fetches a fresh copy of the Result object from the database and returns it.
Throws an exception if a proper WHERE clause identifying the database row
can not be constructed (i.e. if the original object does not contain its
entire
). If passed the \%attrs argument, will first apply these attributes to
the resultset used to find the row.
-This copy can then be used to compare to an existing row object, to
+This copy can then be used to compare to an existing result object, to
determine if any changes have been made in the database since it was
created.
-To just update your Row object with any latest changes from the
+To just update your Result object with any latest changes from the
database, use L</discard_changes> instead.
The \%attrs argument should be compatible with
return $resultset->find($self->_storage_ident_condition);
}
-=head2 discard_changes ($attrs?)
+=head2 discard_changes
$row->discard_changes
=item Arguments: none or $attrs
-=item Returns: self (updates object in-place)
+=item Return Value: self (updates object in-place)
=back
Returns the primary key(s) for a row. Can't be called as a class method.
Actually implemented in L<DBIx::Class::PK>
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Matt S. Trout <mst@shadowcatsystems.co.uk>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
package # Hide from PAUSE
DBIx::Class::SQLAHacks;
+use warnings;
+use strict;
+
use base qw/DBIx::Class::SQLMaker/;
1;
package # Hide from PAUSE
DBIx::Class::SQLAHacks::MSSQL;
+use warnings;
+use strict;
+
use base qw( DBIx::Class::SQLMaker::MSSQL );
1;
package # Hide from PAUSE
DBIx::Class::SQLAHacks::MySQL;
+use warnings;
+use strict;
+
use base qw( DBIx::Class::SQLMaker::MySQL );
1;
package # Hide from PAUSE
DBIx::Class::SQLAHacks::Oracle;
+use warnings;
+use strict;
+
use base qw( DBIx::Class::SQLMaker::Oracle );
1;
package # Hide from PAUSE
DBIx::Class::SQLAHacks::OracleJoins;
+use warnings;
+use strict;
+
use base qw( DBIx::Class::SQLMaker::OracleJoins );
1;
package # Hide from PAUSE
DBIx::Class::SQLAHacks::SQLite;
+use warnings;
+use strict;
+
use base qw( DBIx::Class::SQLMaker::SQLite );
1;
=item * Support of C<...FOR UPDATE> type of select statement modifiers
-=item * The L</-ident> operator
-
-=item * The L</-value> operator
-
=back
=cut
use Sub::Name 'subname';
use DBIx::Class::Carp;
-use DBIx::Class::Exception;
use namespace::clean;
__PACKAGE__->mk_group_accessors (simple => qw/quote_char name_sep limit_dialect/);
my($func) = (caller(1))[3];
__PACKAGE__->throw_exception("[$func] Fatal: " . join ('', @_));
};
-
- # Current SQLA pollutes its namespace - clean for the time being
- namespace::clean->clean_subroutines(qw/SQL::Abstract carp croak confess/);
}
# the "oh noes offset/top without limit" constant
);
}
-sub new {
- my $self = shift->next::method(@_);
-
- # use the same coderefs, they are prepared to handle both cases
- my @extra_dbic_syntax = (
- { regex => qr/^ ident $/xi, handler => '_where_op_IDENT' },
- { regex => qr/^ value $/xi, handler => '_where_op_VALUE' },
- );
-
- push @{$self->{special_ops}}, @extra_dbic_syntax;
- push @{$self->{unary_ops}}, @extra_dbic_syntax;
-
- $self;
-}
-
-sub _where_op_IDENT {
- my $self = shift;
- my ($op, $rhs) = splice @_, -2;
- if (ref $rhs) {
- $self->throw_exception("-$op takes a single scalar argument (a quotable identifier)");
- }
-
- # in case we are called as a top level special op (no '=')
- my $lhs = shift;
-
- $_ = $self->_convert($self->_quote($_)) for ($lhs, $rhs);
-
- return $lhs
- ? "$lhs = $rhs"
- : $rhs
- ;
-}
-
-sub _where_op_VALUE {
- my $self = shift;
- my ($op, $rhs) = splice @_, -2;
-
- # in case we are called as a top level special op (no '=')
- my $lhs = shift;
-
- my @bind = [
- ($lhs || $self->{_nested_func_lhs} || $self->throw_exception("Unable to find bindtype for -value $rhs") ),
- $rhs
- ];
-
- return $lhs
- ? (
- $self->_convert($self->_quote($lhs)) . ' = ' . $self->_convert('?'),
- @bind
- )
- : (
- $self->_convert('?'),
- @bind,
- )
- ;
-}
-
sub _where_op_NEST {
carp_unique ("-nest in search conditions is deprecated, you most probably wanted:\n"
.q|{..., -and => [ \%cond0, \@cond1, \'cond2', \[ 'cond3', [ col => bind ] ], etc. ], ... }|
($sql, @bind) = $self->next::method ($table, $fields, $where);
- my $limiter =
- $self->can ('emulate_limit') # also backcompat hook from SQLA::Limit
- ||
- do {
- my $dialect = $self->limit_dialect
- or $self->throw_exception( "Unable to generate SQL-limit - no limit dialect specified on $self, and no emulate_limit method found" );
- $self->can ("_$dialect")
- or $self->throw_exception(__PACKAGE__ . " does not implement the requested dialect '$dialect'");
- }
- ;
+ my $limiter;
+
+ if( $limiter = $self->can ('emulate_limit') ) {
+ carp_unique(
+ 'Support for the legacy emulate_limit() mechanism inherited from '
+ . 'SQL::Abstract::Limit has been deprecated, and will be removed when '
+ . 'DBIC transitions to Data::Query. If your code uses this type of '
+ . 'limit specification please file an RT and provide the source of '
+ . 'your emulate_limit() implementation, so an acceptable upgrade-path '
+ . 'can be devised'
+ );
+ }
+ else {
+ my $dialect = $self->limit_dialect
+ or $self->throw_exception( "Unable to generate SQL-limit - no limit dialect specified on $self" );
+
+ $limiter = $self->can ("_$dialect")
+ or $self->throw_exception(__PACKAGE__ . " does not implement the requested dialect '$dialect'");
+ }
$sql = $self->$limiter (
$sql,
};
sub _lock_select {
my ($self, $type) = @_;
- my $sql = $for_syntax->{$type} || $self->throw_exception( "Unknown SELECT .. FOR type '$type' requested" );
+
+ my $sql;
+ if (ref($type) eq 'SCALAR') {
+ $sql = "FOR $$type";
+ }
+ else {
+ $sql = $for_syntax->{$type} || $self->throw_exception( "Unknown SELECT .. FOR type '$type' requested" );
+ }
+
return " $sql";
}
return $self->_recurse_where($cond);
}
-1;
-
-=head1 OPERATORS
-
-=head2 -ident
-
-Used to explicitly specify an SQL identifier. Takes a plain string as value
-which is then invariably treated as a column name (and is being properly
-quoted if quoting has been requested). Most useful for comparison of two
-columns:
-
- my %where = (
- priority => { '<', 2 },
- requestor => { -ident => 'submitter' }
- );
-
-which results in:
-
- $stmt = 'WHERE "priority" < ? AND "requestor" = "submitter"';
- @bind = ('2');
-
-=head2 -value
+# This is hideously ugly, but SQLA does not understand multicol IN expressions
+# FIXME TEMPORARY - DQ should have native syntax for this
+# moved here to raise API questions
+#
+# !!! EXPERIMENTAL API !!! WILL CHANGE !!!
+sub _where_op_multicolumn_in {
+ my ($self, $lhs, $rhs) = @_;
+
+ if (! ref $lhs or ref $lhs eq 'ARRAY') {
+ my (@sql, @bind);
+ for (ref $lhs ? @$lhs : $lhs) {
+ if (! ref $_) {
+ push @sql, $self->_quote($_);
+ }
+ elsif (ref $_ eq 'SCALAR') {
+ push @sql, $$_;
+ }
+ elsif (ref $_ eq 'REF' and ref $$_ eq 'ARRAY') {
+ my ($s, @b) = @$$_;
+ push @sql, $s;
+ push @bind, @b;
+ }
+ else {
+ $self->throw_exception("ARRAY of @{[ ref $_ ]}es unsupported for multicolumn IN lhs...");
+ }
+ }
+ $lhs = \[ join(', ', @sql), @bind];
+ }
+ elsif (ref $lhs eq 'SCALAR') {
+ $lhs = \[ $$lhs ];
+ }
+ elsif (ref $lhs eq 'REF' and ref $$lhs eq 'ARRAY' ) {
+ # noop
+ }
+ else {
+ $self->throw_exception( ref($lhs) . "es unsupported for multicolumn IN lhs...");
+ }
-The -value operator signals that the argument to the right is a raw bind value.
-It will be passed straight to DBI, without invoking any of the SQL::Abstract
-condition-parsing logic. This allows you to, for example, pass an array as a
-column value for databases that support array datatypes, e.g.:
+ # is this proper...?
+ $rhs = \[ $self->_recurse_where($rhs) ];
- my %where = (
- array => { -value => [1, 2, 3] }
- );
+ for ($lhs, $rhs) {
+ $$_->[0] = "( $$_->[0] )"
+ unless $$_->[0] =~ /^ \s* \( .* \) \s* ^/xs;
+ }
-which results in:
+ \[ join( ' IN ', shift @$$lhs, shift @$$rhs ), @$$lhs, @$$rhs ];
+}
- $stmt = 'WHERE array = ?';
- @bind = ([1, 2, 3]);
+1;
=head1 AUTHORS
# Whatever order bindvals there are, they will be realiased and
# reselected, and need to show up at end of the initial inner select
push @{$self->{select_bind}}, @{$self->{order_bind}};
-
- # if this is a part of something bigger, we need to add back all
- # the extra order_by's, as they may be relied upon by the outside
- # of a prefetch or something
- if ($rs_attrs->{_is_internal_subuery}) {
- $sq_attrs->{selection_outer} .= sprintf ", $extra_order_sel->{$_} AS $_"
- for sort
- { $extra_order_sel->{$a} cmp $extra_order_sel->{$b} }
- grep { $_ !~ /[^\w\-]/ } # ignore functions
- keys %$extra_order_sel
- ;
- }
}
# and this is order re-alias magic
next if $in_sel_index->{$chunk};
$extra_order_sel->{$chunk} ||= $self->_quote (
- 'ORDER__BY__' . scalar keys %{$extra_order_sel||{}}
+ 'ORDER__BY__' . sprintf '%03d', scalar keys %{$extra_order_sel||{}}
);
}
package # Hide from PAUSE
DBIx::Class::SQLMaker::MSSQL;
+use warnings;
+use strict;
+
use base qw( DBIx::Class::SQLMaker );
#
package # Hide from PAUSE
DBIx::Class::SQLMaker::MySQL;
+use warnings;
+use strict;
+
use base qw( DBIx::Class::SQLMaker );
#
return $self->next::method($join_type);
}
+my $force_double_subq;
+$force_double_subq = sub {
+ my ($self, $sql) = @_;
+
+ require Text::Balanced;
+ my $new_sql;
+ while (1) {
+
+ my ($prefix, $parenthesized);
+
+ ($parenthesized, $sql, $prefix) = do {
+ # idiotic design - writes to $@ but *DOES NOT* throw exceptions
+ local $@;
+ Text::Balanced::extract_bracketed( $sql, '()', qr/[^\(]*/ );
+ };
+
+ # this is how an error is indicated, in addition to crapping in $@
+ last unless $parenthesized;
+
+ if ($parenthesized =~ $self->{_modification_target_referenced_re}) {
+ # is this a select subquery?
+ if ( $parenthesized =~ /^ \( \s* SELECT \s+ /xi ) {
+ $parenthesized = "( SELECT * FROM $parenthesized `_forced_double_subquery` )";
+ }
+ # then drill down until we find it (if at all)
+ else {
+ $parenthesized =~ s/^ \( (.+) \) $/$1/x;
+ $parenthesized = join ' ', '(', $self->$force_double_subq( $parenthesized ), ')';
+ }
+ }
+
+ $new_sql .= $prefix . $parenthesized;
+ }
+
+ return $new_sql . $sql;
+};
+
+sub update {
+ my $self = shift;
+
+ # short-circuit unless understood identifier
+ return $self->next::method(@_) unless $self->{_modification_target_referenced_re};
+
+ my ($sql, @bind) = $self->next::method(@_);
+
+ $sql = $self->$force_double_subq($sql)
+ if $sql =~ $self->{_modification_target_referenced_re};
+
+ return ($sql, @bind);
+}
+
+sub delete {
+ my $self = shift;
+
+ # short-circuit unless understood identifier
+ return $self->next::method(@_) unless $self->{_modification_target_referenced_re};
+
+ my ($sql, @bind) = $self->next::method(@_);
+
+ $sql = $self->$force_double_subq($sql)
+ if $sql =~ $self->{_modification_target_referenced_re};
+
+ return ($sql, @bind);
+}
+
# LOCK IN SHARE MODE
my $for_syntax = {
update => 'FOR UPDATE',
package # Hide from PAUSE
DBIx::Class::SQLMaker::SQLite;
+use warnings;
+use strict;
+
use base qw( DBIx::Class::SQLMaker );
#
use strict;
use warnings;
-use DBIx::Class::Exception;
use DBIx::Class::Carp;
use Try::Tiny;
use Scalar::Util qw/weaken blessed/;
use Sub::Name 'subname';
use B 'svref_2object';
-use DBIx::Class::GlobalDestruction;
+use Devel::GlobalDestruction;
use namespace::clean;
use base qw/DBIx::Class/;
my $ns = shift || ref $proto || $proto;
require Module::Find;
- my @mods = Module::Find::findallmod($ns);
- # try to untaint module names. mods where this fails
- # are left alone so we don't have to change the old behavior
- no locale; # localized \w doesn't untaint expression
- return map { $_ =~ m/^( (?:\w+::)* \w+ )$/x ? $1 : $_ } @mods;
+ # untaint result
+ return map { $_ =~ /(.+)/ } Module::Find::findallmod($ns);
}
# returns a hash of $shortname => $fullname for every package
=item Arguments: $storage_type|{$storage_type, \%args}
-=item Return value: $storage_type|{$storage_type, \%args}
+=item Return Value: $storage_type|{$storage_type, \%args}
=item Default value: DBIx::Class::Storage::DBI
=item Arguments: $code_reference
-=item Return value: $code_reference
+=item Return Value: $code_reference
=item Default value: None
=over 4
-=item Arguments: $source_name
+=item Arguments: L<$source_name|DBIx::Class::ResultSource/source_name>
-=item Return Value: $resultset
+=item Return Value: L<$resultset|DBIx::Class::ResultSet>
=back
=cut
sub resultset {
- my ($self, $moniker) = @_;
+ my ($self, $source_name) = @_;
$self->throw_exception('resultset() expects a source name')
- unless defined $moniker;
- return $self->source($moniker)->resultset;
+ unless defined $source_name;
+ return $self->source($source_name)->resultset;
}
=head2 sources
=over 4
-=item Return Value: @source_names
+=item Return Value: L<@source_names|DBIx::Class::ResultSource/source_name>
=back
=over 4
-=item Arguments: $source_name
+=item Arguments: L<$source_name|DBIx::Class::ResultSource/source_name>
-=item Return Value: $result_source
+=item Return Value: L<$result_source|DBIx::Class::ResultSource>
=back
$self->throw_exception("source() expects a source name")
unless @_;
- my $moniker = shift;
+ my $source_name = shift;
my $sreg = $self->source_registrations;
- return $sreg->{$moniker} if exists $sreg->{$moniker};
+ return $sreg->{$source_name} if exists $sreg->{$source_name};
# if we got here, they probably passed a full class name
- my $mapped = $self->class_mappings->{$moniker};
- $self->throw_exception("Can't find source for ${moniker}")
+ my $mapped = $self->class_mappings->{$source_name};
+ $self->throw_exception("Can't find source for ${source_name}")
unless $mapped && exists $sreg->{$mapped};
return $sreg->{$mapped};
}
=over 4
-=item Arguments: $source_name
+=item Arguments: L<$source_name|DBIx::Class::ResultSource/source_name>
=item Return Value: $classname
=cut
sub class {
- my ($self, $moniker) = @_;
- return $self->source($moniker)->result_class;
+ return shift->source(shift)->result_class;
}
=head2 txn_do
=over 4
-=item Arguments: $source_name, \@data;
+=item Arguments: L<$source_name|DBIx::Class::ResultSource/source_name>, [ \@column_list, \@row_values+ ] | [ \%col_data+ ]
-=item Return value: \@$objects | nothing
+=item Return Value: L<\@result_objects|DBIx::Class::Manual::ResultClass> (scalar context) | L<@result_objects|DBIx::Class::Manual::ResultClass> (list context)
=back
-Pass this method a resultsource name, and an arrayref of
-arrayrefs. The arrayrefs should contain a list of column names,
-followed by one or many sets of matching data for the given columns.
-
-In void context, C<insert_bulk> in L<DBIx::Class::Storage::DBI> is used
-to insert the data, as this is a fast method. However, insert_bulk currently
-assumes that your datasets all contain the same type of values, using scalar
-references in a column in one row, and not in another will probably not work.
+A convenience shortcut to L<DBIx::Class::ResultSet/populate>. Equivalent to:
-Otherwise, each set of data is inserted into the database using
-L<DBIx::Class::ResultSet/create>, and a arrayref of the resulting row
-objects is returned.
+ $schema->resultset($source_name)->populate([...]);
-e.g.
+=over 4
- $schema->populate('Artist', [
- [ qw/artistid name/ ],
- [ 1, 'Popular Band' ],
- [ 2, 'Indie Band' ],
- ...
- ]);
+=item NOTE
-Since wantarray context is basically the same as looping over $rs->create(...)
-you won't see any performance benefits and in this case the method is more for
-convenience. Void context sends the column information directly to storage
-using <DBI>s bulk insert method. So the performance will be much better for
-storages that support this method.
+The context of this method call has an important effect on what is
+submitted to storage. In void context data is fed directly to fastpath
+insertion routines provided by the underlying storage (most often
+L<DBI/execute_for_fetch>), bypassing the L<new|DBIx::Class::Row/new> and
+L<insert|DBIx::Class::Row/insert> calls on the
+L<Result|DBIx::Class::Manual::ResultClass> class, including any
+augmentation of these methods provided by components. For example if you
+are using something like L<DBIx::Class::UUIDColumns> to create primary
+keys for you, you will find that your PKs are empty. In this case you
+will have to explicitly force scalar or list context in order to create
+those values.
-Because of this difference in the way void context inserts rows into your
-database you need to note how this will effect any loaded components that
-override or augment insert. For example if you are using a component such
-as L<DBIx::Class::UUIDColumns> to populate your primary keys you MUST use
-wantarray context if you want the PKs automatically created.
+=back
=cut
sub populate {
my ($self, $name, $data) = @_;
- if(my $rs = $self->resultset($name)) {
- if(defined wantarray) {
- return $rs->populate($data);
- } else {
- $rs->populate($data);
- }
- } else {
- $self->throw_exception("$name is not a resultset");
- }
+ my $rs = $self->resultset($name)
+ or $self->throw_exception("'$name' is not a resultset");
+
+ return $rs->populate($data);
}
=head2 connection
# my ($self, $target, $base) = @_;
# my $schema = $self->clone;
-# foreach my $moniker ($schema->sources) {
-# my $source = $schema->source($moniker);
-# my $target_class = "${target}::${moniker}";
+# foreach my $source_name ($schema->sources) {
+# my $source = $schema->source($source_name);
+# my $target_class = "${target}::${source_name}";
# $self->inject_base(
# $target_class => $source->result_class, ($base ? $base : ())
# );
# $source->result_class($target_class);
# $target_class->result_source_instance($source)
# if $target_class->can('result_source_instance');
-# $schema->register_source($moniker, $source);
+# $schema->register_source($source_name, $source);
# }
# return $schema;
# }
use warnings qw/redefine/;
no strict qw/refs/;
- foreach my $moniker ($self->sources) {
- my $orig_source = $self->source($moniker);
+ foreach my $source_name ($self->sources) {
+ my $orig_source = $self->source($source_name);
- my $target_class = "${target}::${moniker}";
+ my $target_class = "${target}::${source_name}";
$self->inject_base($target_class, $orig_source->result_class, ($base || ()) );
# register_source examines result_class, and then returns us a clone
- my $new_source = $schema->register_source($moniker, bless
+ my $new_source = $schema->register_source($source_name, bless
{ %$orig_source, result_class => $target_class },
ref $orig_source,
);
$self->class_mappings({ %{$from->class_mappings} });
$self->source_registrations({ %{$from->source_registrations} });
- foreach my $moniker ($from->sources) {
- my $source = $from->source($moniker);
+ foreach my $source_name ($from->sources) {
+ my $source = $from->source($source_name);
my $new = $source->new($source);
# we use extra here as we want to leave the class_mappings as they are
# but overwrite the source_registrations entry with the new source
- $self->register_extra_source($moniker => $new);
+ $self->register_extra_source($source_name => $new);
}
if ($from->storage) {
=cut
-my $false_exception_action_warned;
sub throw_exception {
my $self = shift;
." (original error: $_[0])"
);
}
- elsif(! $false_exception_action_warned++) {
- carp (
- "The exception_action handler installed on $self returned false instead"
- .' of throwing an exception. This behavior has been deprecated, adjust your'
- .' handler to always rethrow the supplied error.'
- );
- }
+
+ carp_unique (
+ "The exception_action handler installed on $self returned false instead"
+ .' of throwing an exception. This behavior has been deprecated, adjust your'
+ .' handler to always rethrow the supplied error.'
+ );
}
DBIx::Class::Exception->throw($_[0], $self->stacktrace);
=item Arguments: See L<DBIx::Class::Storage::DBI/deployment_statements>
-=item Return value: $listofstatements
+=item Return Value: $listofstatements
=back
=item Arguments: $database-type, $version, $directory, $preversion
-=item Return value: $normalised_filename
+=item Return Value: $normalised_filename
=back
=over 4
-=item Arguments: $moniker, $component_class
+=item Arguments: $source_name, $component_class
=back
Registers a class which isa DBIx::Class::ResultSourceProxy. Equivalent to
calling:
- $schema->register_source($moniker, $component_class->result_source_instance);
+ $schema->register_source($source_name, $component_class->result_source_instance);
=cut
sub register_class {
- my ($self, $moniker, $to_register) = @_;
- $self->register_source($moniker => $to_register->result_source_instance);
+ my ($self, $source_name, $to_register) = @_;
+ $self->register_source($source_name => $to_register->result_source_instance);
}
=head2 register_source
=over 4
-=item Arguments: $moniker, $result_source
+=item Arguments: $source_name, L<$result_source|DBIx::Class::ResultSource>
=back
This method is called by L</register_class>.
Registers the L<DBIx::Class::ResultSource> in the schema with the given
-moniker.
+source name.
=cut
=over 4
-=item Arguments: $moniker
+=item Arguments: $source_name
=back
-Removes the L<DBIx::Class::ResultSource> from the schema for the given moniker.
+Removes the L<DBIx::Class::ResultSource> from the schema for the given source name.
=cut
=over 4
-=item Arguments: $moniker, $result_source
+=item Arguments: $source_name, L<$result_source|DBIx::Class::ResultSource>
=back
sub register_extra_source { shift->_register_source(@_, { extra => 1 }) }
sub _register_source {
- my ($self, $moniker, $source, $params) = @_;
+ my ($self, $source_name, $source, $params) = @_;
- $source = $source->new({ %$source, source_name => $moniker });
+ $source = $source->new({ %$source, source_name => $source_name });
$source->schema($self);
weaken $source->{schema} if ref($self);
my %reg = %{$self->source_registrations};
- $reg{$moniker} = $source;
+ $reg{$source_name} = $source;
$self->source_registrations(\%reg);
return $source if $params->{extra};
if (
exists $map{$rs_class}
and
- $map{$rs_class} ne $moniker
+ $map{$rs_class} ne $source_name
and
$rsrc ne $_[2] # orig_source
) {
;
}
- $map{$rs_class} = $moniker;
+ $map{$rs_class} = $source_name;
$self->class_mappings(\%map);
}
my $self = shift;
my $srcs = $self->source_registrations;
- for my $moniker (keys %$srcs) {
+ for my $source_name (keys %$srcs) {
# find first source that is not about to be GCed (someone other than $self
# holds a reference to it) and reattach to it, weakening our own link
#
# which will serve as a signal to not try doing anything else
# however beware - on older perls the exception seems randomly untrappable
# due to some weird race condition during thread joining :(((
- if (ref $srcs->{$moniker} and svref_2object($srcs->{$moniker})->REFCNT > 1) {
+ if (ref $srcs->{$source_name} and svref_2object($srcs->{$source_name})->REFCNT > 1) {
local $@;
eval {
- $srcs->{$moniker}->schema($self);
- weaken $srcs->{$moniker};
+ $srcs->{$source_name}->schema($self);
+ weaken $srcs->{$source_name};
1;
} or do {
$global_phase_destroy = 1;
}
sub _unregister_source {
- my ($self, $moniker) = @_;
+ my ($self, $source_name) = @_;
my %reg = %{$self->source_registrations};
- my $source = delete $reg{$moniker};
+ my $source = delete $reg{$source_name};
$self->source_registrations(\%reg);
if ($source->result_class) {
my %map = %{$self->class_mappings};
if ($self eq $target) {
# Pathological case, largely caused by the docs on early C::M::DBIC::Plain
- foreach my $moniker ($self->sources) {
- my $source = $self->source($moniker);
+ foreach my $source_name ($self->sources) {
+ my $source = $self->source($source_name);
my $class = $source->result_class;
$self->inject_base($class, $base);
$class->mk_classdata(resultset_instance => $source->resultset);
}
$schema->connection(@info);
- foreach my $moniker ($schema->sources) {
- my $source = $schema->source($moniker);
+ foreach my $source_name ($schema->sources) {
+ my $source = $schema->source($source_name);
my $class = $source->result_class;
- #warn "$moniker $class $source ".$source->storage;
+ #warn "$source_name $class $source ".$source->storage;
$class->mk_classdata(result_source_instance => $source);
$class->mk_classdata(resultset_instance => $source->resultset);
$class->mk_classdata(class_resolver => $schema);
1;
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Matt S. Trout <mst@shadowcatsystems.co.uk>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
=over 4
-=item Returns: a list of version numbers, ordered from lowest to highest
+=item Return Value: a list of version numbers, ordered from lowest to highest
=back
1;
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Jess Robinson <castaway@desert-island.me.uk>
-Luke Saunders <luke@shadowcatsystems.co.uk>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
=head1 DESCRIPTION
-This component adds hooks for Storable so that row objects can be
-serialized. It assumes that your row object class (C<result_class>) is
+This component adds hooks for Storable so that result objects can be
+serialized. It assumes that your result object class (C<result_class>) is
the same as your table class, which is the normal situation.
=head1 HOOKS
The deserializing hook called on the object during deserialization.
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-David Kamholz <dkamholz@cpan.org>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
package DBIx::Class::StartupCheck;
+use strict;
+use warnings;
+
=head1 NAME
DBIx::Class::StartupCheck - Run environment checks on startup
DBIx::Class::Storage::BlockRunner->new(
storage => $self,
run_code => $coderef,
- run_args => \@_, # take a ref instead of a copy, to preserve coderef @_ aliasing semantics
+ run_args => @_
+ ? \@_ # take a ref instead of a copy, to preserve @_ aliasing
+ : [] # semantics within the coderef, but only if needed
+ , # (pseudoforking doesn't like this trick much)
wrap_txn => 1,
retry_handler => sub { ! ( $_[0]->retried_count or $_[0]->storage->connected ) },
)->run;
L<DBIx::Class::Storage::DBI> - reference storage implementation using
SQL::Abstract and DBI.
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Matt S. Trout <mst@shadowcatsystems.co.uk>
-
-Andy Grundman <andy@hybridized.org>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
default => quote_sub(q{ 0 }),
lazy => 1,
trigger => quote_sub(q{
- DBIx::Class::Exception->throw(sprintf (
+ $_[0]->throw_exception( sprintf (
'Exceeded max_retried_count amount of %d, latest exception: %s',
$_[0]->max_retried_count, $_[0]->last_exception
)) if $_[0]->max_retried_count < ($_[1]||0);
sub last_exception { shift->exception_stack->[-1] }
+sub throw_exception { shift->storage->throw_exception (@_) }
+
sub run {
my $self = shift;
- DBIx::Class::Exception->throw('run() takes no arguments') if @_;
+ $self->throw_exception('run() takes no arguments') if @_;
$self->_reset_exception_stack;
$self->_reset_retried_count;
};
}
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-see L<DBIx::Class>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
use mro 'c3';
use DBIx::Class::Carp;
-use DBIx::Class::Exception;
use Scalar::Util qw/refaddr weaken reftype blessed/;
use List::Util qw/first/;
use Sub::Name 'subname';
# class, as _use_X may be hardcoded class-wide, and _supports_X calls
# _determine_supports_X which obv. needs a correct driver as well
my @rdbms_specific_methods = qw/
- deployment_statements
sqlt_type
sql_maker
build_datetime_parser
my %seek_and_destroy;
sub _arm_global_destructor {
- my $self = shift;
- my $key = refaddr ($self);
- $seek_and_destroy{$key} = $self;
- weaken ($seek_and_destroy{$key});
+ weaken (
+ $seek_and_destroy{ refaddr($_[0]) } = $_[0]
+ );
}
END {
local $?; # just in case the DBI destructor changes it somehow
- # destroy just the object if not native to this process/thread
+ # destroy just the object if not native to this process
$_->_verify_pid for (grep
{ defined $_ }
values %seek_and_destroy
# As per DBI's recommendation, DBIC disconnects all handles as
# soon as possible (DBIC will reconnect only on demand from within
# the thread)
- for (values %seek_and_destroy) {
- next unless $_;
+ my @instances = grep { defined $_ } values %seek_and_destroy;
+ for (@instances) {
$_->{_dbh_gen}++; # so that existing cursors will drop as well
$_->_dbh(undef);
$_->transaction_depth(0);
$_->savepoints([]);
}
+
+ # properly renumber all existing refs
+ %seek_and_destroy = ();
+ $_->_arm_global_destructor for @instances;
}
}
my $self = shift;
# some databases spew warnings on implicit disconnect
- $self->_verify_pid;
+ $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
local $SIG{__WARN__} = sub {};
$self->_dbh(undef);
return $self->$run_target($self->_get_dbh, @_)
if $self->{_in_do_block} or $self->transaction_depth;
- my $args = \@_;
+ # take a ref instead of a copy, to preserve @_ aliasing
+ # semantics within the coderef, but only if needed
+ # (pseudoforking doesn't like this trick much)
+ my $args = @_ ? \@_ : [];
DBIx::Class::Storage::BlockRunner->new(
storage => $self,
sub _seems_connected {
my $self = shift;
- $self->_verify_pid;
+ $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
my $dbh = $self->_dbh
or return 0;
# this is the internal "get dbh or connect (don't check)" method
sub _get_dbh {
my $self = shift;
- $self->_verify_pid;
+ $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
$self->_populate_dbh unless $self->_dbh;
return $self->_dbh;
}
$self->_dbh($self->_connect(@info));
- $self->_conn_pid($$) if $^O ne 'MSWin32'; # on win32 these are in fact threads
+ $self->_conn_pid($$) unless DBIx::Class::_ENV_::BROKEN_FORK; # on win32 these are in fact threads
$self->_determine_driver;
$info = {};
- my $server_version = try { $self->_get_server_version };
+ my $server_version = try {
+ $self->_get_server_version
+ } catch {
+ # driver determination *may* use this codepath
+ # in which case we must rethrow
+ $self->throw_exception($_) if $self->{_in_determine_driver};
+
+ # $server_version on failure
+ undef;
+ };
if (defined $server_version) {
$info->{dbms_version} = $server_version;
unless defined $info;
}
- return try { $self->_get_dbh->get_info($info) } || undef;
+ return $self->_get_dbh->get_info($info);
+}
+
+sub _describe_connection {
+ require DBI::Const::GetInfoReturn;
+
+ my $self = shift;
+ $self->ensure_connected;
+
+ my $res = {
+ DBIC_DSN => $self->_dbi_connect_info->[0],
+ DBI_VER => DBI->VERSION,
+ DBIC_VER => DBIx::Class->VERSION,
+ DBIC_DRIVER => ref $self,
+ };
+
+ for my $inf (
+ #keys %DBI::Const::GetInfoType::GetInfoType,
+ qw/
+ SQL_CURSOR_COMMIT_BEHAVIOR
+ SQL_CURSOR_ROLLBACK_BEHAVIOR
+ SQL_CURSOR_SENSITIVITY
+ SQL_DATA_SOURCE_NAME
+ SQL_DBMS_NAME
+ SQL_DBMS_VER
+ SQL_DEFAULT_TXN_ISOLATION
+ SQL_DM_VER
+ SQL_DRIVER_NAME
+ SQL_DRIVER_ODBC_VER
+ SQL_DRIVER_VER
+ SQL_EXPRESSIONS_IN_ORDERBY
+ SQL_GROUP_BY
+ SQL_IDENTIFIER_CASE
+ SQL_IDENTIFIER_QUOTE_CHAR
+ SQL_MAX_CATALOG_NAME_LEN
+ SQL_MAX_COLUMN_NAME_LEN
+ SQL_MAX_IDENTIFIER_LEN
+ SQL_MAX_TABLE_NAME_LEN
+ SQL_MULTIPLE_ACTIVE_TXN
+ SQL_MULT_RESULT_SETS
+ SQL_NEED_LONG_DATA_LEN
+ SQL_NON_NULLABLE_COLUMNS
+ SQL_ODBC_VER
+ SQL_QUALIFIER_NAME_SEPARATOR
+ SQL_QUOTED_IDENTIFIER_CASE
+ SQL_TXN_CAPABLE
+ SQL_TXN_ISOLATION_OPTION
+ /
+ ) {
+ my $v = $self->_dbh_get_info($inf);
+ next unless defined $v;
+
+ #my $key = sprintf( '%s(%s)', $inf, $DBI::Const::GetInfoType::GetInfoType{$inf} );
+ my $expl = DBI::Const::GetInfoReturn::Explain($inf, $v);
+ $res->{$inf} = DBI::Const::GetInfoReturn::Format($inf, $v) . ( $expl ? " ($expl)" : '' );
+ }
+
+ $res;
}
sub _determine_driver {
if ($self->_dbh) { # we are connected
$driver = $self->_dbh->{Driver}{Name};
$started_connected = 1;
- } else {
+ }
+ else {
# if connect_info is a CODEREF, we have no choice but to connect
if (ref $self->_dbi_connect_info->[0] &&
reftype $self->_dbi_connect_info->[0] eq 'CODE') {
bless $self, $storage_class;
$self->_rebless();
}
+ else {
+ $self->_warn_undetermined_driver(
+ 'This version of DBIC does not yet seem to supply a driver for '
+ . "your particular RDBMS and/or connection method ('$driver')."
+ );
+ }
+ }
+ else {
+ $self->_warn_undetermined_driver(
+ 'Unable to extract a driver name from connect info - this '
+ . 'should not have happened.'
+ );
}
}
Class::C3->reinitialize() if DBIx::Class::_ENV_::OLD_MRO;
+ if ($self->can('source_bind_attributes')) {
+ $self->throw_exception(
+ "Your storage subclass @{[ ref $self ]} provides (or inherits) the method "
+ . 'source_bind_attributes() for which support has been removed as of Jan 2013. '
+ . 'If you are not sure how to proceed please contact the development team via '
+ . 'http://search.cpan.org/dist/DBIx-Class/lib/DBIx/Class.pm#GETTING_HELP/SUPPORT'
+ );
+ }
+
$self->_init; # run driver-specific initializations
$self->_run_connection_actions
}
}
+sub _determine_connector_driver {
+ my ($self, $conn) = @_;
+
+ my $dbtype = $self->_dbh_get_info('SQL_DBMS_NAME');
+
+ if (not $dbtype) {
+ $self->_warn_undetermined_driver(
+ 'Unable to retrieve RDBMS type (SQL_DBMS_NAME) of the engine behind your '
+ . "$conn connector - this should not have happened."
+ );
+ return;
+ }
+
+ $dbtype =~ s/\W/_/gi;
+
+ my $subclass = "DBIx::Class::Storage::DBI::${conn}::${dbtype}";
+ return if $self->isa($subclass);
+
+ if ($self->load_optional_class($subclass)) {
+ bless $self, $subclass;
+ $self->_rebless;
+ }
+ else {
+ $self->_warn_undetermined_driver(
+ 'This version of DBIC does not yet seem to supply a driver for '
+ . "your particular RDBMS and/or connection method ('$conn/$dbtype')."
+ );
+ }
+}
+
+sub _warn_undetermined_driver {
+ my ($self, $msg) = @_;
+
+ require Data::Dumper::Concise;
+
+ carp_once ($msg . ' While we will attempt to continue anyway, the results '
+ . 'are likely to be underwhelming. Please upgrade DBIC, and if this message '
+ . "does not go away, file a bugreport including the following info:\n"
+ . Data::Dumper::Concise::Dumper($self->_describe_connection)
+ );
+}
+
sub _do_connection_actions {
my $self = shift;
my $method_prefix = shift;
$dbh = DBI->connect(@info);
}
- if (!$dbh) {
- die $DBI::errstr;
- }
+ die $DBI::errstr unless $dbh;
+
+ die sprintf ("%s fresh DBI handle with a *false* 'Active' attribute. "
+ . 'This handle is disconnected as far as DBIC is concerned, and we can '
+ . 'not continue',
+ ref $info[0] eq 'CODE'
+ ? "Connection coderef $info[0] returned a"
+ : 'DBI->connect($schema->storage->connect_info) resulted in a'
+ ) unless $dbh->FETCH('Active');
+ # sanity checks unless asked otherwise
unless ($self->unsafe) {
$self->throw_exception(
sub txn_commit {
my $self = shift;
- $self->_verify_pid if $self->_dbh;
+ $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
$self->throw_exception("Unable to txn_commit() on a disconnected storage")
unless $self->_dbh;
sub txn_rollback {
my $self = shift;
- $self->_verify_pid if $self->_dbh;
+ $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
$self->throw_exception("Unable to txn_rollback() on a disconnected storage")
unless $self->_dbh;
no strict qw/refs/;
*{__PACKAGE__ ."::$meth"} = subname $meth => sub {
my $self = shift;
- $self->_verify_pid if $self->_dbh;
+ $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
$self->throw_exception("Unable to $meth() on a disconnected storage")
unless $self->_dbh;
$self->next::method(@_);
sub _gen_sql_bind {
my ($self, $op, $ident, $args) = @_;
- my ($sql, @bind) = $self->sql_maker->$op(
- blessed($ident) ? $ident->from : $ident,
- @$args,
- );
+ my ($colinfos, $from);
+ if ( blessed($ident) ) {
+ $from = $ident->from;
+ $colinfos = $ident->columns_info;
+ }
+
+ my ($sql, @bind) = $self->sql_maker->$op( ($from || $ident), @$args );
if (
! $ENV{DBIC_DT_SEARCH_OK}
}
return( $sql, $self->_resolve_bindattrs(
- $ident, [ @{$args->[2]{bind}||[]}, @bind ]
+ $ident, [ @{$args->[2]{bind}||[]}, @bind ], $colinfos
));
}
if $self->debug;
}
-my $sba_compat;
sub _dbi_attrs_for_bind {
my ($self, $ident, $bind) = @_;
- if (! defined $sba_compat) {
- $self->_determine_driver;
- $sba_compat = $self->can('source_bind_attributes') == \&source_bind_attributes
- ? 0
- : 1
- ;
- }
-
- my $sba_attrs;
- if ($sba_compat) {
- my $class = ref $self;
- carp_unique (
- "The source_bind_attributes() override in $class relies on a deprecated codepath. "
- .'You are strongly advised to switch your code to override bind_attribute_by_datatype() '
- .'instead. This legacy compat shim will also disappear some time before DBIC 0.09'
- );
-
- my $sba_attrs = $self->source_bind_attributes
- }
-
my @attrs;
for (map { $_->[0] } @$bind) {
}
$cache->{$_->{sqlt_datatype}};
}
- elsif ($sba_attrs and $_->{dbic_colname}) {
- $sba_attrs->{$_->{dbic_colname}} || undef;
- }
else {
undef; # always push something at this position
}
'_dbh_execute',
$sql,
$bind,
- $self->_dbi_attrs_for_bind($ident, $bind)
+ $ident,
);
}
sub _dbh_execute {
- my ($self, undef, $sql, $bind, $bind_attrs) = @_;
+ my ($self, undef, $sql, $bind, $ident) = @_;
$self->_query_start( $sql, $bind );
+
+ my $bind_attrs = $self->_dbi_attrs_for_bind($ident, $bind);
+
my $sth = $self->_sth($sql);
for my $i (0 .. $#$bind) {
}
sub _prefetch_autovalues {
- my ($self, $source, $to_insert) = @_;
-
- my $colinfo = $source->columns_info;
+ my ($self, $source, $colinfo, $to_insert) = @_;
my %values;
for my $col (keys %$colinfo) {
sub insert {
my ($self, $source, $to_insert) = @_;
- my $prefetched_values = $self->_prefetch_autovalues($source, $to_insert);
+ my $col_infos = $source->columns_info;
+
+ my $prefetched_values = $self->_prefetch_autovalues($source, $col_infos, $to_insert);
# fuse the values, but keep a separate list of prefetched_values so that
# they can be fused once again with the final return
# FIXME - we seem to assume undef values as non-supplied. This is wrong.
# Investigate what does it take to s/defined/exists/
- my $col_infos = $source->columns_info;
my %pcols = map { $_ => 1 } $source->primary_columns;
my (%retrieve_cols, $autoinc_supplied, $retrieve_autoinc_col);
for my $col ($source->columns) {
# can't just hand SQLA a set of some known "values" (e.g. hashrefs that
# can be later matched up by address), because we want to supply a real
# value on which perhaps e.g. datatype checks will be performed
- my ($proto_data, $value_type_idx);
+ my ($proto_data, $value_type_by_col_idx);
for my $i (@col_range) {
my $colname = $cols->[$i];
if (ref $data->[0][$i] eq 'SCALAR') {
# store value-less (attrs only) bind info - we will be comparing all
# supplied binds against this for sanity
- $value_type_idx->{$i} = [ map { $_->[0] } @$resolved_bind ];
+ $value_type_by_col_idx->{$i} = [ map { $_->[0] } @$resolved_bind ];
$proto_data->{$colname} = \[ $sql, map { [
# inject slice order to use for $proto_bind construction
- { %{$resolved_bind->[$_][0]}, _bind_data_slice_idx => $i }
+ { %{$resolved_bind->[$_][0]}, _bind_data_slice_idx => $i, _literal_bind_subindex => $_+1 }
=>
$resolved_bind->[$_][1]
] } (0 .. $#bind)
];
}
else {
- $value_type_idx->{$i} = 0;
+ $value_type_by_col_idx->{$i} = undef;
$proto_data->{$colname} = \[ '?', [
{ dbic_colname => $colname, _bind_data_slice_idx => $i }
[ $proto_data ],
);
- if (! @$proto_bind and keys %$value_type_idx) {
+ if (! @$proto_bind and keys %$value_type_by_col_idx) {
# if the bindlist is empty and we had some dynamic binds, this means the
# storage ate them away (e.g. the NoBindVars component) and interpolated
# them directly into the SQL. This obviously can't be good for multi-inserts
for my $row_idx (1..$#$data) { # we are comparing against what we got from [0] above, hence start from 1
my $val = $data->[$row_idx][$col_idx];
- if (! exists $value_type_idx->{$col_idx}) { # literal no binds
+ if (! exists $value_type_by_col_idx->{$col_idx}) { # literal no binds
if (ref $val ne 'SCALAR') {
$bad_slice_report_cref->(
"Incorrect value (expecting SCALAR-ref \\'$$reference_val')",
);
}
}
- elsif (! $value_type_idx->{$col_idx} ) { # regular non-literal value
+ elsif (! defined $value_type_by_col_idx->{$col_idx} ) { # regular non-literal value
if (ref $val eq 'SCALAR' or (ref $val eq 'REF' and ref $$val eq 'ARRAY') ) {
$bad_slice_report_cref->("Literal SQL found where a plain bind value is expected", $row_idx, $col_idx);
}
# need to check the bind attrs - a bind will happen only once for
# the entire dataset, so any changes further down will be ignored.
elsif (! Data::Compare::Compare(
- $value_type_idx->{$col_idx},
+ $value_type_by_col_idx->{$col_idx},
[
map
{ $_->[0] }
# alphabetical ordering by colname). We actually do want to
# preserve this behavior so that prepare_cached has a better
# chance of matching on unrelated calls
- my %data_reorder = map { $proto_bind->[$_][0]{_bind_data_slice_idx} => $_ } @idx_range;
my $fetch_row_idx = -1; # saner loop this way
my $fetch_tuple = sub {
return undef if ++$fetch_row_idx > $#$data;
- return [ map
- { (ref $_ eq 'REF' and ref $$_ eq 'ARRAY')
- ? map { $_->[-1] } @{$$_}[1 .. $#$$_]
- : $_
- }
- map
- { $data->[$fetch_row_idx][$_]}
- sort
- { $data_reorder{$a} <=> $data_reorder{$b} }
- keys %data_reorder
- ];
+ return [ map { defined $_->{_literal_bind_subindex}
+ ? ${ $data->[ $fetch_row_idx ]->[ $_->{_bind_data_slice_idx} ]}
+ ->[ $_->{_literal_bind_subindex} ]
+ ->[1]
+ : $data->[ $fetch_row_idx ]->[ $_->{_bind_data_slice_idx} ]
+ } map { $_->[0] } @$proto_bind];
};
my $tuple_status = [];
}
# try to simplify the joinmap further (prune unreferenced type-single joins)
- $ident = $self->_prune_unused_joins ($ident, $select, $where, $attrs);
+ if (
+ ref $ident
+ and
+ reftype $ident eq 'ARRAY'
+ and
+ @$ident != 1
+ ) {
+ $ident = $self->_prune_unused_joins ($ident, $select, $where, $attrs);
+ }
###
# This would be the point to deflate anything found in $where
# (and leave $attrs->{bind} intact). Problem is - inflators historically
- # expect a row object. And all we have is a resultsource (it is trivial
+ # expect a result object. And all we have is a resultsource (it is trivial
# to extract deflator coderefs via $alias2source above).
#
# I don't see a way forward other than changing the way deflators are
return { count => '*' };
}
-sub source_bind_attributes {
- shift->throw_exception(
- 'source_bind_attributes() was never meant to be a callable public method - '
- .'please contact the DBIC dev-team and describe your use case so that a reasonable '
- .'solution can be provided'
- ."\nhttp://search.cpan.org/dist/DBIx-Class/lib/DBIx/Class.pm#GETTING_HELP/SUPPORT"
- );
-}
-
=head2 select
=over 4
attribute for C<< $dbh->bind_param($val,$attribute) >> or nothing if we will
let the database planner just handle it.
-Generally only needed for special case column types, like bytea in postgres.
+This method is always called after the driver has been determined and a DBI
+connection has been established. Therefore you can refer to C<DBI::$constant>
+and/or C<DBD::$driver::$constant> directly, without worrying about loading
+the correct modules.
=cut
=over 4
-=item Arguments: $schema \@databases, $version, $directory, $preversion, \%sqlt_args
+=item Arguments: $schema, \@databases, $version, $directory, $preversion, \%sqlt_args
=back
} else {
-d $dir
or
- (require File::Path and File::Path::make_path ("$dir")) # make_path does not like objects (i.e. Path::Class::Dir)
+ (require File::Path and File::Path::mkpath (["$dir"])) # mkpath does not like objects (i.e. Path::Class::Dir)
or
$self->throw_exception(
"Failed to create '$dir': " . ($! || $@ || 'error unknown')
=item Arguments: $relname, $join_count
+=item Return Value: $alias
+
=back
L<DBIx::Class> uses L<DBIx::Class::Relationship> names as table aliases in
|national\s*character\s*varying))\z/xi);
}
+# Determine if a data_type is some type of a binary type
+sub _is_binary_type {
+ my ($self, $data_type) = @_;
+ $data_type && ($self->_is_binary_lob_type($data_type)
+ || $data_type =~ /(?:var)?(?:binary|bit|graphic)(?:\s*varying)?/i);
+}
+
1;
=head1 USAGE NOTES
be with raw DBI.
-=head1 AUTHORS
-
-Matt S. Trout <mst@shadowcatsystems.co.uk>
+=head1 AUTHOR AND CONTRIBUTORS
-Andy Grundman <andy@hybridized.org>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
$columns_info->{$_}{is_auto_increment}
} keys %$columns_info;
- if (not $autoinc_col) {
- $self->throw_exception(
-'empty insert only supported for tables with an autoincrement column'
- );
- }
+ $self->throw_exception(
+ 'empty insert only supported for tables with an autoincrement column'
+ ) unless $autoinc_col;
my $table = $source->from;
$table = $$table if ref $table;
package DBIx::Class::Storage::DBI::ADO;
+use warnings;
+use strict;
+
use base 'DBIx::Class::Storage::DBI';
use mro 'c3';
=cut
-sub _rebless {
- my $self = shift;
-
- my $dbtype = $self->_dbh_get_info('SQL_DBMS_NAME');
-
- if (not $dbtype) {
- warn "Unable to determine ADO driver, failling back to generic support.\n";
- return;
- }
-
- $dbtype =~ s/\W/_/gi;
-
- my $subclass = "DBIx::Class::Storage::DBI::ADO::${dbtype}";
-
- return if $self->isa($subclass);
-
- if ($self->load_optional_class($subclass)) {
- bless $self, $subclass;
- $self->_rebless;
- }
- else {
- warn "Expected driver '$subclass' not found, using generic support. " .
- "Please file an RT.\n";
- }
-}
+sub _rebless { shift->_determine_connector_driver('ADO') }
# cleanup some warnings from DBD::ADO
# RT#65563, not fixed as of DBD::ADO v2.98
our @EXPORT_OK = qw/_normalize_guids _strip_trailing_binary_nulls/;
sub _strip_trailing_binary_nulls {
- my ($select, $col_infos, $data) = @_;
+ my ($select, $col_infos, $data, $storage) = @_;
foreach my $select_idx (0..$#$select) {
or next;
$data->[$select_idx] =~ s/\0+\z//
- if $data_type =~ /binary|image/i;
+ if $storage->_is_binary_type($data_type);
}
}
_normalize_guids($select, $col_infos, \@row, $self);
- _strip_trailing_binary_nulls($select, $col_infos, \@row);
+ _strip_trailing_binary_nulls($select, $col_infos, \@row, $self);
return @row;
}
my $select = $self->args->[1];
_normalize_guids($select, $col_infos, \@row, $storage);
- _strip_trailing_binary_nulls($select, $col_infos, \@row);
+ _strip_trailing_binary_nulls($select, $col_infos, \@row, $storage);
return @row;
}
for (@rows) {
_normalize_guids($select, $col_infos, $_, $storage);
- _strip_trailing_binary_nulls($select, $col_infos, $_);
+ _strip_trailing_binary_nulls($select, $col_infos, $_, $storage);
}
return @rows;
use namespace::clean;
__PACKAGE__->mk_group_accessors('simple' =>
- qw/sth storage args pos attrs _dbh_gen/
+ qw/sth storage args attrs/
);
=head1 NAME
=head1 SYNOPSIS
my $cursor = $schema->resultset('CD')->cursor();
- my $first_cd = $cursor->next;
+
+ # raw values off the database handle in resultset columns/select order
+ my @next_cd_column_values = $cursor->next;
+
+ # list of all raw values as arrayrefs
+ my @all_cds_column_values = $cursor->all;
=head1 DESCRIPTION
my $new = {
storage => $storage,
args => $args,
- pos => 0,
attrs => $attrs,
_dbh_gen => $storage->{_dbh_gen},
+ _pos => 0,
+ _done => 0,
};
return bless ($new, $class);
if (
$self->{attrs}{software_limit}
&& $self->{attrs}{rows}
- && $self->{pos} >= $self->{attrs}{rows}
+ && $self->{_pos} >= $self->{attrs}{rows}
) {
$self->sth->finish if $self->sth->{Active};
$self->sth(undef);
- $self->{done} = 1;
+ $self->{_done} = 1;
}
- return if $self->{done};
+
+ return if $self->{_done};
+
unless ($self->sth) {
$self->sth(($storage->_select(@{$self->{args}}))[1]);
if ($self->{attrs}{software_limit}) {
}
my @row = $self->sth->fetchrow_array;
if (@row) {
- $self->{pos}++;
+ $self->{_pos}++;
} else {
$self->sth(undef);
- $self->{done} = 1;
+ $self->{_done} = 1;
}
return @row;
}
my ($self) = @_;
$self->sth(undef);
- delete $self->{done};
- $self->{pos} = 0;
+ $self->{_done} = 0;
+ $self->{_pos} = 0;
}
sub _check_dbh_gen {
sub _execute {
my $self = shift;
- my ($op) = @_;
my ($rv, $sth, @rest) = $self->next::method(@_);
$self->__last_insert_id($sth->{ix_sqlerrd}[1])
sub _execute {
my $self = shift;
- my ($op) = @_;
# always list ctx - we need the $sth
my ($rv, $sth, @bind) = $self->next::method(@_);
scalar $self->_extract_order_criteria ($attrs->{order_by})
) {
$self->throw_exception(
- 'An ordered subselect encountered - this is not safe! Please see "Ordered Subselects" in DBIx::Class::Storage::DBI::MSSQL
- ') unless $attrs->{unsafe_subselect_ok};
+ 'An ordered subselect encountered - this is not safe! Please see "Ordered Subselects" in DBIx::Class::Storage::DBI::MSSQL'
+ ) unless $attrs->{unsafe_subselect_ok};
my $max = $self->sql_maker->__max_int;
$sql =~ s/^ \s* SELECT \s/SELECT TOP $max /xi;
}
use base qw/DBIx::Class::Storage::DBI/;
use mro 'c3';
-sub _rebless {
- my ($self) = @_;
-
- if (my $dbtype = $self->_dbh_get_info('SQL_DBMS_NAME')) {
- # Translate the backend name into a perl identifier
- $dbtype =~ s/\W/_/gi;
- my $subclass = "DBIx::Class::Storage::DBI::ODBC::${dbtype}";
-
- return if $self->isa($subclass);
-
- if ($self->load_optional_class($subclass)) {
- bless $self, $subclass;
- $self->_rebless;
- }
- else {
- warn "Expected driver '$subclass' not found, using generic support. " .
- "Please file an RT.\n";
- }
- }
- else {
- warn "Could not determine your database type, using generic support.\n";
- }
-}
+sub _rebless { shift->_determine_connector_driver('ODBC') }
-# Whether or not we are connecting via the freetds ODBC driver.
+# Whether or not we are connecting via the freetds ODBC driver
sub _using_freetds {
my $self = shift;
my $self = shift;
my $dbh = $self->_get_dbh;
- if (eval { DBD::ODBC->VERSION('1.35_01') }) {
+ if (eval { DBD::ODBC->VERSION(1.35_01) }) {
$dbh->{odbc_array_operations} = 0;
}
- elsif (eval { DBD::ODBC->VERSION('1.33_01') }) {
+ elsif (eval { DBD::ODBC->VERSION(1.33_01) }) {
$dbh->{odbc_disable_array_operations} = 1;
}
}
sudo aptitude install tdsodbc libdbd-odbc-perl unixodbc
-In case it is not already there put the following in C</etc/odbcinst.ini>:
+In case it is not already there put the following (adjust for non-64bit arch) in
+C</etc/odbcinst.ini>:
[FreeTDS]
Description = FreeTDS
- Driver = /usr/lib/odbc/libtdsodbc.so
- Setup = /usr/lib/odbc/libtdsS.so
+ Driver = /usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so
+ Setup = /usr/lib/x86_64-linux-gnu/odbc/libtdsS.so
UsageCount = 1
Set your C<$dsn> in L<connect_info|DBIx::Class::Storage::DBI/connect_info> as follows:
}
if (my ($data_source) = $dsn =~ /^dbi:ODBC:([\w-]+)\z/i) { # prefix with DSN
- warn "Bare DSN in ODBC connect string, rewriting as 'dsn=$data_source'"
+ carp_unique "Bare DSN in ODBC connect string, rewriting as 'dsn=$data_source'"
." for MARS\n";
$dsn = "dbi:ODBC:dsn=$data_source";
}
to your Schema class.
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-David Jack Olrik C<< <djo@cpan.org> >>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
}
else {
$self->throw_exception( sprintf (
- "Unable to introspect trigger '%s' for column %s.%s (references multiple sequences). "
+ "Unable to introspect trigger '%s' for column '%s.%s' (references multiple sequences). "
. "You need to specify the correct 'sequence' explicitly in '%s's column_info.",
$triggers[0]{name},
$source_name,
}
else {
$self->throw_exception( sprintf (
- "Unable to reliably select a BEFORE INSERT trigger for column %s.%s (possibilities: %s). "
+ "Unable to reliably select a BEFORE INSERT trigger for column '%s.%s' (possibilities: %s). "
. "You need to specify the correct 'sequence' explicitly in '%s's column_info.",
$source_name,
$col,
}
$self->throw_exception( sprintf (
- "No suitable BEFORE INSERT triggers found for column %s.%s. "
+ "No suitable BEFORE INSERT triggers found for column '%s.%s'. "
. "You need to specify the correct 'sequence' explicitly in '%s's column_info.",
$source_name,
$col,
}
sub _dbh_execute {
- my ($self, $dbh, $sql, $bind) = @_;
+ #my ($self, $dbh, $sql, $bind, $ident) = @_;
+ my ($self, $bind) = @_[0,3];
- # Turn off sth caching for multi-part LOBs. See _prep_for_execute above.
+ # Turn off sth caching for multi-part LOBs. See _prep_for_execute below
local $self->{disable_sth_caching} = 1 if first {
($_->[0]{_ora_lob_autosplit_part}||0)
>
my ($final_sql, @final_binds);
if ($op eq 'update') {
- $self->throw_exception('Update with complex WHERE clauses currently not supported')
+ $self->throw_exception('Update with complex WHERE clauses involving BLOB columns currently not supported')
if $sql =~ /\bWHERE\b .+ \bWHERE\b/xs;
my $where_sql;
for my $col (@cols) {
my $seq = ( $col_info->{$col}{sequence} ||= $self->dbh_do('_dbh_get_autoinc_seq', $source, $col) )
or $self->throw_exception( sprintf(
- 'could not determine sequence for column %s.%s, please consider adding a schema-qualified sequence to its column info',
+ "Could not determine sequence for column '%s.%s', please consider adding a schema-qualified sequence to its column info",
$source->name,
$col,
));
$seq_expr = '' unless defined $seq_expr;
$schema = "$schema." if defined $schema && length $schema;
$self->throw_exception( sprintf (
- 'no sequence found for %s%s.%s, check the RDBMS table definition or explicitly set the '.
+ "No sequence found for '%s%s.%s', check the RDBMS table definition or explicitly set the ".
"'sequence' for this column in %s",
$schema ? "$schema." : '',
$table,
use namespace::clean -except => 'meta';
+=encoding utf8
+
=head1 NAME
DBIx::Class::Storage::DBI::Replicated - BETA Replicated database support
_arm_global_destructor
_verify_pid
- source_bind_attributes
-
get_use_dbms_capability
set_use_dbms_capability
get_dbms_capability
_dbh_details
_dbh_get_info
+ _determine_connector_driver
+ _describe_connection
+ _warn_undetermined_driver
+
sql_limit_dialect
sql_quote_char
sql_name_sep
_max_column_bytesize
_is_lob_type
_is_binary_lob_type
+ _is_binary_type
_is_text_lob_type
sth
for my $method (@{$method_dispatch->{unimplemented}}) {
__PACKAGE__->meta->add_method($method, sub {
my $self = shift;
- $self->throw_exception("$method must not be called on ".(blessed $self).' objects');
+ $self->throw_exception("$method() must not be called on ".(blessed $self).' objects');
});
}
around connect_info => sub {
my ($next, $self, $info, @extra) = @_;
+ $self->throw_exception(
+ 'connect_info can not be retrieved from a replicated storage - '
+ . 'accessor must be called on a specific pool instance'
+ ) unless defined $info;
+
my $merge = Hash::Merge->new('LEFT_PRECEDENT');
my %opts;
} elsif(my $replicant = $self->pool->replicants->{$forced_pool}) {
return $replicant;
} else {
- $self->master->throw_exception("$forced_pool is not a named replicant.");
+ $self->master->throw_exception("'$forced_pool' is not a named replicant.");
}
}
sub _prefetch_autovalues {
my $self = shift;
- my ($source, $to_insert) = @_;
+ my ($source, $colinfo, $to_insert) = @_;
my $values = $self->next::method(@_);
- my $colinfo = $source->columns_info;
-
my $identity_col =
first { $colinfo->{$_}{is_auto_increment} } keys %$colinfo;
use DBIx::Class::Carp;
use Scalar::Util 'looks_like_number';
+use Try::Tiny;
use namespace::clean;
__PACKAGE__->sql_maker_class('DBIx::Class::SQLMaker::SQLite');
$self->_dbh->do("ROLLBACK TRANSACTION TO SAVEPOINT $name");
}
+sub _ping {
+ my $self = shift;
+
+ # Be extremely careful what we do here. SQLite is notoriously bad at
+ # synchronizing its internal transaction state with {AutoCommit}
+ # https://metacpan.org/source/ADAMK/DBD-SQLite-1.37/lib/DBD/SQLite.pm#L921
+ # There is a function http://www.sqlite.org/c3ref/get_autocommit.html
+ # but DBD::SQLite does not expose it (nor does it seem to properly use it)
+
+ # Therefore only execute a "ping" when we have no other choice *AND*
+ # scrutinize the thrown exceptions to make sure we are where we think we are
+ my $dbh = $self->_dbh or return undef;
+ return undef unless $dbh->FETCH('Active');
+ return undef unless $dbh->ping;
+
+ # since we do not have access to sqlite3_get_autocommit(), do a trick
+ # to attempt to *safely* determine what state are we *actually* in.
+ # FIXME
+ # also using T::T here leads to bizarre leaks - will figure it out later
+ my $really_not_in_txn = do {
+ local $@;
+
+ # older versions of DBD::SQLite do not properly detect multiline BEGIN/COMMIT
+ # statements to adjust their {AutoCommit} state. Hence use such a statement
+ # pair here as well, in order to escape from poking {AutoCommit} needlessly
+ # https://rt.cpan.org/Public/Bug/Display.html?id=80087
+ eval {
+ # will fail instantly if already in a txn
+ $dbh->do("-- multiline\nBEGIN");
+ $dbh->do("-- multiline\nCOMMIT");
+ 1;
+ } or do {
+ ($@ =~ /transaction within a transaction/)
+ ? 0
+ : undef
+ ;
+ };
+ };
+
+ my $ping_fail;
+
+ # if we were unable to determine this - we may very well be dead
+ if (not defined $really_not_in_txn) {
+ $ping_fail = 1;
+ }
+ # check the AC sync-state
+ elsif ($really_not_in_txn xor $dbh->{AutoCommit}) {
+ carp_unique (sprintf
+ 'Internal transaction state of handle %s (apparently %s a transaction) does not seem to '
+ . 'match its AutoCommit attribute setting of %s - this is an indication of a '
+ . 'potentially serious bug in your transaction handling logic',
+ $dbh,
+ $really_not_in_txn ? 'NOT in' : 'in',
+ $dbh->{AutoCommit} ? 'TRUE' : 'FALSE',
+ );
+
+ # it is too dangerous to execute anything else in this state
+ # assume everything works (safer - worst case scenario next statement throws)
+ return 1;
+ }
+ else {
+ # do the actual test
+ $ping_fail = ! try { $dbh->do('SELECT * FROM sqlite_master LIMIT 1'); 1 };
+ }
+
+ if ($ping_fail) {
+ # it is possible to have a proper "connection", and have "ping" return
+ # false anyway (e.g. corrupted file). In such cases DBD::SQLite still
+ # keeps the actual file handle open. We don't really want this to happen,
+ # so force-close the handle via DBI itself
+ #
+ local $@; # so that we do not clober the real error as set above
+ eval { $dbh->disconnect }; # if it fails - it fails
+ return undef # the actual RV of _ping()
+ }
+ else {
+ return 1;
+ }
+}
+
sub deployment_statements {
my $self = shift;
my ($schema, $type, $version, $dir, $sqltargs, @rest) = @_;
sub bind_attribute_by_data_type {
$_[1] =~ /^ (?: int(?:eger)? | (?:tiny|small|medium)int ) $/ix
- ? do { require DBI; DBI::SQL_INTEGER() }
+ ? DBI::SQL_INTEGER()
: undef
;
}
1;
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Matt S. Trout <mst@shadowcatsystems.co.uk>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
try {
$dbtype = @{$self->_get_dbh->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})}[2]
} catch {
- $self->throw_exception("Unable to estable connection to determine database type: $_")
+ $self->throw_exception("Unable to establish connection to determine database type: $_")
};
if ($dbtype) {
sub _prep_for_execute {
my $self = shift;
- my ($op, $ident) = @_;
+ my $ident = $_[1];
#
### This is commented out because all tests pass. However I am leaving it
### BTW it doesn't currently work exactly - need better sensitivity to
# currently set value
#
+ #my ($op, $ident) = @_;
+ #
# inherit these from the parent for the duration of _prep_for_execute
# Don't know how to make a localizing loop with if's, otherwise I would
#local $self->{_autoinc_supplied_for_op}
sub _execute {
my $self = shift;
- my ($op) = @_;
-
my ($rv, $sth, @bind) = $self->next::method(@_);
$self->_identity( ($sth->fetchall_arrayref)->[0][0] )
See L</connect_call_datetime_setup> to setup date formats
for L<DBIx::Class::InflateColumn::DateTime>.
+=head1 LIMITED QUERIES
+
+Because ASE does not have a good way to limit results in SQL that works for all
+types of queries, the limit dialect is set to
+L<GenericSubQ|SQL::Abstract::Limit/GenericSubQ>.
+
+Fortunately, ASE and L<DBD::Sybase> support cursors properly, so when
+L<GenericSubQ|SQL::Abstract::Limit/GenericSubQ> is too slow you can use
+the L<software_limit|DBIx::Class::ResultSet/software_limit>
+L<DBIx::Class::ResultSet> attribute to simulate limited queries by skipping over
+records.
+
=head1 TEXT/IMAGE COLUMNS
L<DBD::Sybase> compiled with FreeTDS will B<NOT> allow you to insert or update
package DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars;
+use warnings;
+use strict;
+
use base qw/
DBIx::Class::Storage::DBI::NoBindVars
DBIx::Class::Storage::DBI::Sybase::ASE
$schema->storage_type('::DBI::Sybase::MSSQL');
$schema->connect_info('dbi:Sybase:....', ...);
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Brandon L Black <blblack@gmail.com>
-
-Justin Hunter <justin.d.hunter@gmail.com>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
sub _prefetch_autovalues {
my $self = shift;
- my ($source, $to_insert) = @_;
-
- my $col_info = $source->columns_info;
+ my ($source, $col_info, $to_insert) = @_;
my %guid_cols;
my @pk_cols = $source->primary_columns;
if (not defined $guid_method) {
$self->throw_exception(
- 'You must set new_guid on your storage. See perldoc '
+ 'You must set new_guid() on your storage. See perldoc '
.'DBIx::Class::Storage::DBI::UniqueIdentifier'
);
}
use base qw/DBIx::Class::Storage::DBI/;
+use List::Util 'first';
+use namespace::clean;
+
__PACKAGE__->sql_maker_class('DBIx::Class::SQLMaker::MySQL');
__PACKAGE__->sql_limit_dialect ('LimitXY');
__PACKAGE__->sql_quote_char ('`');
$dbh->{mysql_insertid};
}
+sub _prep_for_execute {
+ my $self = shift;
+ #(my $op, $ident, $args) = @_;
+
+ # Only update and delete need special double-subquery treatment
+ # Insert referencing the same table (i.e. SELECT MAX(id) + 1) seems
+ # to work just fine on MySQL
+ return $self->next::method(@_) if ( $_[0] eq 'select' or $_[0] eq 'insert' );
+
+
+ # FIXME FIXME FIXME - this is a terrible, gross, incomplete hack
+ # it should be trivial for mst to port this to DQ (and a good
+ # exercise as well, since we do not yet have such wide tree walking
+ # in place). For the time being this will work in limited cases,
+ # mainly complex update/delete, which is really all we want it for
+ # currently (allows us to fix some bugs without breaking MySQL in
+ # the process, and is also crucial for Shadow to be usable)
+
+ # extract the source name, construct modification indicator re
+ my $sm = $self->sql_maker;
+
+ my $target_name = $_[1]->from;
+
+ if (ref $target_name) {
+ if (
+ ref $target_name eq 'SCALAR'
+ and
+ $$target_name =~ /^ (?:
+ \` ( [^`]+ ) \` #`
+ | ( [\w\-]+ )
+ ) $/x
+ ) {
+ # this is just a plain-ish name, which has been literal-ed for
+ # whatever reason
+ $target_name = first { defined $_ } ($1, $2);
+ }
+ else {
+ # this is something very complex, perhaps a custom result source or whatnot
+ # can't deal with it
+ undef $target_name;
+ }
+ }
+
+ local $sm->{_modification_target_referenced_re} =
+ qr/ (?<!DELETE) [\s\)] FROM \s (?: \` \Q$target_name\E \` | \Q$target_name\E ) [\s\(] /xi
+ if $target_name;
+
+ $self->next::method(@_);
+}
+
# here may seem like an odd place to override, but this is the first
# method called after we are connected *and* the driver is determined
# ($self is reblessed). See code flow in ::Storage::DBI::_populate_dbh
$self->throw_exception ('Complex prefetches are not supported on resultsets with a custom from attribute')
if (ref $from ne 'ARRAY' || ref $from->[0] ne 'HASH' || ref $from->[1] ne 'ARRAY');
-
# generate inner/outer attribute lists, remove stuff that doesn't apply
my $outer_attrs = { %$attrs };
delete $outer_attrs->{$_} for qw/where bind rows offset group_by having/;
- my $inner_attrs = { %$attrs, _is_internal_subuery => 1 };
+ my $inner_attrs = { %$attrs };
delete $inner_attrs->{$_} for qw/for collapse _prefetch_selector_range select as/;
+ # if the user did not request it, there is no point using it inside
+ delete $inner_attrs->{order_by} if delete $inner_attrs->{_order_is_artificial};
+
# generate the inner/outer select lists
# for inside we consider only stuff *not* brought in by the prefetch
# on the outside we substitute any function for its alias
my $outer_select = [ @$select ];
my $inner_select = [];
+ my ($root_source, $root_source_offset);
+
+ for my $i (0 .. $#$from) {
+ my $node = $from->[$i];
+ my $h = (ref $node eq 'HASH') ? $node
+ : (ref $node eq 'ARRAY' and ref $node->[0] eq 'HASH') ? $node->[0]
+ : next
+ ;
+
+ if ( ($h->{-alias}||'') eq $attrs->{alias} and $root_source = $h->{-rsrc} ) {
+ $root_source_offset = $i;
+ last;
+ }
+ }
+
+ $self->throw_exception ('Complex prefetches are not supported on resultsets with a custom from attribute')
+ unless $root_source;
+
+ # use the heavy duty resolver to take care of aliased/nonaliased naming
+ my $colinfo = $self->_resolve_column_info($from);
+ my $selected_root_columns;
+
my ($p_start, $p_end) = @{$outer_attrs->{_prefetch_selector_range}};
for my $i (0 .. $p_start - 1, $p_end + 1 .. $#$outer_select) {
my $sel = $outer_select->[$i];
$sel->{-as} ||= $attrs->{as}[$i];
$outer_select->[$i] = join ('.', $attrs->{alias}, ($sel->{-as} || "inner_column_$i") );
}
+ elsif (! ref $sel and my $ci = $colinfo->{$sel}) {
+ $selected_root_columns->{$ci->{-colname}} = 1;
+ }
push @$inner_select, $sel;
push @{$inner_attrs->{as}}, $attrs->{as}[$i];
}
+ # We will need to fetch all native columns in the inner subquery, which may be a part
+ # of an *outer* join condition. We can not just fetch everything because a potential
+ # has_many restricting join collapse *will not work* on heavy data types.
+ # Time for more horrible SQL parsing, aughhhh
+
+ # MASSIVE FIXME - in fact when we are fully transitioned to DQ and the support is
+ # is sane - we will need to trim the select list to *only* fetch stuff that is
+ # necessary to build joins. In the current implementation if I am selecting a blob
+ # and the group_by kicks in - we are fucked, and all the user can do is not select
+ # that column. This is silly!
+
+ my $retardo_sqla_cache = {};
+ for my $cond ( map { $_->[1] } @{$from}[$root_source_offset + 1 .. $#$from] ) {
+ for my $col (@{$self->_extract_condition_columns($cond, $retardo_sqla_cache)}) {
+ my $ci = $colinfo->{$col};
+ if (
+ $ci
+ and
+ $ci->{-source_alias} eq $attrs->{alias}
+ and
+ ! $selected_root_columns->{$ci->{-colname}}++
+ ) {
+ # adding it to both to keep limits not supporting dark selectors happy
+ push @$inner_select, $ci->{-fq_colname};
+ push @{$inner_attrs->{as}}, $ci->{-fq_colname};
+ }
+ }
+ }
+
# construct the inner $from and lock it in a subquery
# we need to prune first, because this will determine if we need a group_by below
# the fake group_by is so that the pruner throws away all non-selecting, non-restricting
# - it is part of the restrictions, in which case we need to collapse the outer
# result by tackling yet another group_by to the outside of the query
+ # work on a shallow copy
$from = [ @$from ];
- # so first generate the outer_from, up to the substitution point
my @outer_from;
- while (my $j = shift @$from) {
- $j = [ $j ] unless ref $j eq 'ARRAY'; # promote the head-from to an AoH
- if ($j->[0]{-alias} eq $attrs->{alias}) { # time to swap
+ # we may not be the head
+ if ($root_source_offset) {
+ # first generate the outer_from, up to the substitution point
+ @outer_from = splice @$from, 0, $root_source_offset;
- push @outer_from, [
- {
- -alias => $attrs->{alias},
- -rsrc => $j->[0]{-rsrc},
- $attrs->{alias} => $inner_subq,
- },
- @{$j}[1 .. $#$j],
- ];
- last; # we'll take care of what's left in $from below
- }
- else {
- push @outer_from, $j;
- }
+ my $root_node = shift @$from;
+
+ push @outer_from, [
+ {
+ -alias => $attrs->{alias},
+ -rsrc => $root_node->[0]{-rsrc},
+ $attrs->{alias} => $inner_subq,
+ },
+ @{$root_node}[1 .. $#$root_node],
+ ];
+ }
+ else {
+ my $root_node = shift @$from;
+
+ @outer_from = {
+ -alias => $attrs->{alias},
+ -rsrc => $root_node->{-rsrc},
+ $attrs->{alias} => $inner_subq,
+ };
}
# scan the *remaining* from spec against different attributes, and see which joins are needed
}
}
- # demote the outer_from head
- $outer_from[0] = $outer_from[0][0];
-
if ($need_outer_group_by and ! $outer_attrs->{group_by}) {
my $unprocessed_order_chunks;
# yet another atrocity: attempt to extract all columns from a
# where condition by hooking _quote
sub _extract_condition_columns {
- my ($self, $cond, $sql_maker) = @_;
+ my ($self, $cond, $sql_maker_cache) = @_;
return [] unless $cond;
- $sql_maker ||= $self->{_sql_ident_capturer} ||= do {
+ my $sm = $sql_maker_cache->{condparser} ||= $self->{_sql_ident_capturer} ||= do {
# FIXME - replace with a Moo trait
my $orig_sm_class = ref $self->sql_maker;
my $smic_class = "${orig_sm_class}::_IdentCapture_";
$smic_class->new();
};
- $sql_maker->_recurse_where($cond);
+ $sm->_recurse_where($cond);
- return [ sort keys %{$sql_maker->_get_captured_idents} ];
+ return [ sort keys %{$sm->_get_captured_idents} ];
}
sub _extract_order_criteria {
1;
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Cory G. Watson <gphat@cpan.org>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE
-You may distribute this code under the same license as Perl itself.
+You may distribute this code under the same terms as Perl itself.
=cut
use Try::Tiny;
use Scalar::Util qw/weaken blessed refaddr/;
use DBIx::Class;
-use DBIx::Class::Exception;
use DBIx::Class::Carp;
use namespace::clean;
-my ($guards_count, $compat_handler, $foreign_handler);
-
sub new {
my ($class, $storage) = @_;
bless $guard, ref $class || $class;
- # install a callback carefully
- if (DBIx::Class::_ENV_::INVISIBLE_DOLLAR_AT and !$guards_count) {
-
- # if the thrown exception is a plain string, wrap it in our
- # own exception class
- # this is actually a pretty cool idea, may very well keep it
- # after perl is fixed
- $compat_handler ||= bless(
- sub {
- $@ = (blessed($_[0]) or ref($_[0]))
- ? $_[0]
- : bless ( { msg => $_[0] }, 'DBIx::Class::Exception')
- ;
- die;
- },
- '__TxnScopeGuard__FIXUP__',
- );
-
- if ($foreign_handler = $SIG{__DIE__}) {
- $SIG{__DIE__} = bless (
- sub {
- # we trust the foreign handler to do whatever it wants, all we do is set $@
- eval { $compat_handler->(@_) };
- $foreign_handler->(@_);
- },
- '__TxnScopeGuard__FIXUP__',
- );
- }
- else {
- $SIG{__DIE__} = $compat_handler;
- }
- }
-
- $guards_count++;
-
$guard;
}
sub DESTROY {
my $self = shift;
- $guards_count--;
-
- # don't touch unless it's ours, and there are no more of us left
- if (
- DBIx::Class::_ENV_::INVISIBLE_DOLLAR_AT
- and
- !$guards_count
- ) {
-
- if (ref $SIG{__DIE__} eq '__TxnScopeGuard__FIXUP__') {
- # restore what we saved
- if ($foreign_handler) {
- $SIG{__DIE__} = $foreign_handler;
- }
- else {
- delete $SIG{__DIE__};
- }
- }
-
- # make sure we do not leak the foreign one in case it exists
- undef $foreign_handler;
- }
-
return if $self->{inactivated};
# if our dbh is not ours anymore, the $dbh weakref will go undef
- $self->{storage}->_verify_pid;
+ $self->{storage}->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
return unless $self->{dbh};
my $exception = $@ if (
}
}
- $@ = $exception unless DBIx::Class::_ENV_::INVISIBLE_DOLLAR_AT;
+ $@ = $exception;
}
1;
use SQL::Translator::Utils qw(debug normalize_name);
use DBIx::Class::Carp qw/^SQL::Translator|^DBIx::Class|^Try::Tiny/;
use DBIx::Class::Exception;
-use Scalar::Util qw/weaken blessed/;
+use Scalar::Util 'blessed';
use Try::Tiny;
use namespace::clean;
$dbicschema ||= $args->{'package'};
my $limit_sources = $args->{'sources'};
- # this is a hack to prevent schema leaks due to a retarded SQLT implementation
- # DO NOT REMOVE (until SQLT2 is out, the all of this will be rewritten anyway)
- ref $_ and weaken $_
- for $_[1], $dbicschema, @{$args}{qw/DBIx::Schema DBIx::Class::Schema package/};
-
DBIx::Class::Exception->throw('No DBIx::Class::Schema') unless ($dbicschema);
if (!ref $dbicschema) {
eval "require $dbicschema"
my $relsource = try { $source->related_source($rel) };
unless ($relsource) {
- warn "Ignoring relationship '$rel' - related resultsource '$rel_info->{class}' is not registered with this schema\n";
+ carp "Ignoring relationship '$rel' - related resultsource '$rel_info->{class}' is not registered with this schema\n";
next;
};
);
}, 'xt');
-my $xt_tests = join (' ', map { File::Spec->catfile($_, '*.t') } sort keys %$xt_dirs );
+my @xt_tests = map { File::Spec->catfile($_, '*.t') } sort keys %$xt_dirs;
# this will add the xt tests to the `make test` target among other things
-Meta->tests(join (' ', map { $_ || () } Meta->tests, $xt_tests ) );
+Meta->tests(join (' ', map { $_ || () } Meta->tests, @xt_tests ) );
-# inject an explicit xt test run for making a tarball (distdir is exempt)
+# inject an explicit xt test run, mainly to check the contents of
+# lib and the generated POD's *before* anything is copied around
+#
+# at the end rerun the whitespace test in the distdir, to make sure everything
+# is pristine
postamble <<"EOP";
-.PHONY: test_xt
+dbic_clonedir_copy_generated_pod : test_xt
-dist : test_xt
+test_xt : pm_to_blib
+@{[
+ # When xt tests are explicitly requested, we want to run with RELEASE_TESTING=1
+ # so that all optdeps are turned into a hard failure
+ # However portably modifying ENV for a single command is surprisingly hard
+ # So instead we (ab)use perl's ability to stack -e options, and simply modify
+ # the ENV from within perl itself
+ $mm_proto->test_via_harness(
+ # perl cmd
+ join( ' ',
+ '$(ABSPERLRUN)',
+ map { $mm_proto->quote_literal($_) } qw(-e $ENV{RELEASE_TESTING}=1;$ENV{DBICTEST_NO_MAKEFILE_VERIFICATION}=1;)
+ ),
+ # test list
+ join( ' ',
+ map { $mm_proto->quote_literal($_) } @xt_tests
+ ),
+ )
+]}
-test_xt :
-\tPERL_DL_NONLAZY=1 RELEASE_TESTING=1 \$(FULLPERLRUN) "-MExtUtils::Command::MM" "-e" "test_harness(\$(TEST_VERBOSE), 'inc', '\$(INST_LIB)', '\$(INST_ARCHLIB)')" $xt_tests
+create_distdir : dbic_distdir_retest_whitespace
-EOP
+dbic_distdir_retest_whitespace :
+\t@{[
+ $mm_proto->cd (
+ '$(DISTVNAME)',
+ $mm_proto->test_via_harness(
+ # perl cmd
+ join( ' ',
+ '$(ABSPERLRUN)',
+ map { $mm_proto->quote_literal($_) } qw(-Ilib -e $ENV{RELEASE_TESTING}=1;$ENV{DBICTEST_NO_MAKEFILE_VERIFICATION}=1;)
+ ),
+ 'xt/whitespace.t'
+ )
+ )
+]}
+EOP
# keep the Makefile.PL eval happy
1;
# this will run after the Makefile is written and the main Makefile.PL terminates
#
END {
+ # shit already hit the fan
+ return if $?;
+
# Re-write META.yml at the end to _exclude_ all forced build-requires (we do not
# want to ship this) We are also not using M::I::AuthorRequires as this will be
# an extra dep, and deps in Makefile.PL still suck
Meta->write;
}
+ # strip possible crlf from META
+ if ($^O eq 'MSWin32' or $^O eq 'cygwin') {
+ local $ENV{PERLIO} = 'unix';
+ system( $^X, qw( -MExtUtils::Command -e dos2unix -- META.yml), );
+ }
+
# test that we really took things away (just in case, happened twice somehow)
if (! -f 'META.yml') {
warn "No META.yml generated?! aborting...\n";
lib/DBIx/Class/Admin
lib/DBIx/Class/PK/Auto
lib/DBIx/Class/CDBICompat
+ maint
|);
no_index package => $_ for (qw/
DBIx::Class::Storage::DBIHacks
+ DBIx::Class::Storage::BlockRunner
DBIx::Class::Carp
DBIx::Class::ResultSet::Pager
/);
--- /dev/null
+
+my $dbic_ver_re = qr/ (\d) \. (\d{2}) (\d{3}) (?: _ (\d{2}) )? /x; # not anchored!!!
+
+my $version_string = Meta->version;
+my $version_value = eval $version_string;
+
+my ($v_maj, $v_min, $v_point, $v_dev) = $version_string =~ /^$dbic_ver_re$/
+ or die sprintf (
+ "Invalid version %s (as specified in %s)\nCurrently valid version formats are M.VVPPP or M.VVPPP_DD\n",
+ $version_string,
+ Meta->{values}{version_from} || Meta->{values}{all_from} || 'Makefile.PL',
+ )
+;
+
+if ($v_maj != 0 or $v_min > 8) {
+ die "Illegal version $version_string - we are still in the 0.08 cycle\n"
+}
+
+
+# all odd releases *after* 0.08200 generate a -TRIAL, no exceptions
+Meta->makemaker_args->{DISTVNAME} = Meta->name . "-$version_string-TRIAL"
+ if ( $v_point > 200 and int($v_point / 100) % 2 );
+
+
+my $tags = { map { chomp $_; $_ => 1} `git tag` };
+# git may not be available
+if (keys %$tags) {
+ my $shipped_versions;
+ my $shipped_dev_versions;
+
+ for (keys %$tags) {
+ if ($_ =~ /^v$dbic_ver_re$/) {
+ if (defined $4) {
+ $shipped_dev_versions->{"$1.$2$3$4"} = 1;
+ }
+ else {
+ $shipped_versions->{"$1.$2$3"} = 1;
+ }
+ delete $tags->{$_};
+ }
+ }
+
+ die sprintf "Tags in unknown format found: %s\n", join ', ', keys %$tags
+ if keys %$tags;
+}
+
+# keep the Makefile.PL eval happy
+1;
--- /dev/null
+# Split create_distdir into several subtargets, allowing us to generate
+# stuff, inject it into lib/, manifest it, and then clean all of it up
+{
+ package MY;
+ sub distdir {
+ (my $snippet = shift->SUPER::distdir(@_)) =~ s/^create_distdir :/create_distdir_copy_manifested :/;
+ return <<"EOM";
+$snippet
+
+create_distdir : clonedir_generate_files clonedir_post_generate_files fresh_manifest create_distdir_copy_manifested clonedir_cleanup_generated_files
+\t\$(NOECHO) \$(NOOP)
+
+clonedir_generate_files :
+\t\$(NOECHO) \$(NOOP)
+
+clonedir_post_generate_files :
+\t\$(NOECHO) \$(NOOP)
+
+clonedir_cleanup_generated_files :
+\t\$(NOECHO) \$(NOOP)
+
+EOM
+ }
+}
+
+# EU::MM BUG - workaround
+# somehow the init_PM of EUMM (in MM_Unix) interprets ResultClass.pod.proto
+# as a valid ResultClass.pod. While this has no effect on dist-building
+# it royally screws up the local Makefile.PL $TO_INST_PM and friends,
+# making it impossible to make/make test from a checkout
+# just rip it out here (remember - this is only executed under author mode)
+{
+ package MY;
+ sub init_PM {
+ my $self = shift;
+ my $rv = $self->SUPER::init_PM(@_);
+ delete @{$self->{PM}}{qw(lib/DBIx/Class/Manual/ResultClass.pod lib/DBIx/Class/Manual/ResultClass.pod.proto)};
+ $rv
+ }
+}
+
+# make the install (and friends) target a noop - instead of
+# doing a perl Makefile.PL && make && make install (which will leave pod
+# behind), one ought to assemble a distdir first
+
+{
+ package MY;
+ sub install {
+ (my $snippet = shift->SUPER::install(@_))
+ =~ s/^( (?: install [^\:]+ | \w+_install \s) \:+ )/$1 block_install_from_checkout/mxg;
+ return <<"EOM";
+$snippet
+
+block_install_from_checkout :
+\t\$(NOECHO) \$(ECHO) Installation directly from a checkout is not possible. You need to prepare a distdir, enter it, and run the installation from within.
+\t\$(NOECHO) \$(FALSE)
+
+EOM
+ }
+}
+
+# keep the Makefile.PL eval happy
+1;
+++ /dev/null
-# Makefile syntax allows adding extra dep-specs for already-existing targets,
-# and simply appends them on *LAST*-come *FIRST*-serve basis.
-# This allows us to inject extra depenencies for standard EUMM targets
-
-postamble <<"EOP";
-
-.PHONY: dbic_clonedir_cleanup_readme dbic_clonedir_gen_readme
-
-distdir : dbic_clonedir_cleanup_readme
-
-create_distdir : dbic_clonedir_gen_readme
-
-dbic_clonedir_gen_readme :
-\tpod2text lib/DBIx/Class.pm > README
-
-dbic_clonedir_cleanup_readme :
-\t\$(RM_F) README
-
-EOP
-
-# keep the Makefile.PL eval happy
-1;
--- /dev/null
+# make sure manifest is deleted and generated anew on distdir
+# preparation, and is deleted on realclean
+
+postamble <<"EOM";
+
+fresh_manifest : remove_manifest manifest
+
+remove_manifest :
+\t\$(RM_F) MANIFEST
+
+realclean :: remove_manifest
+
+EOM
+
+# keep the Makefile.PL eval happy
+1;
--- /dev/null
+# When a long-standing branch is updated a README may still linger around
+unlink 'README' if -f 'README';
+
+# Makefile syntax allows adding extra dep-specs for already-existing targets,
+# and simply appends them on *LAST*-come *FIRST*-serve basis.
+# This allows us to inject extra depenencies for standard EUMM targets
+
+require File::Spec;
+my $dir = File::Spec->catdir(qw(maint .Generated_Pod));
+my $fn = File::Spec->catfile($dir, 'README');
+
+postamble <<"EOP";
+
+clonedir_generate_files : dbic_clonedir_gen_readme
+
+dbic_clonedir_gen_readme :
+\t@{[ $mm_proto->oneliner('mkpath', ['-MExtUtils::Command']) ]} $dir
+\tpod2text lib/DBIx/Class.pm > $fn
+
+EOP
+
+# keep the Makefile.PL eval happy
+1;
+++ /dev/null
-# Makefile syntax allows adding extra dep-specs for already-existing targets,
-# and simply appends them on *LAST*-come *FIRST*-serve basis.
-# This allows us to inject extra depenencies for standard EUMM targets
-
-postamble <<"EOP";
-
-.PHONY: dbic_distdir_dbicadmin_pod_inject
-
-distdir : dbic_distdir_dbicadmin_pod_inject
-
-# The pod self-injection code is in fact a hidden option in
-# dbicadmin itself, we execute the one in the distdir
-dbic_distdir_dbicadmin_pod_inject :
-\t\$(ABSPERL) -I\$(DISTVNAME)/lib \$(DISTVNAME)/script/dbicadmin --selfinject-pod
-
-EOP
-
-# keep the Makefile.PL eval happy
-1;
+++ /dev/null
-# generate the pod as both a clone-dir step, and a makefile distdir step
-my $ver = Meta->version;
-
-print "Regenerating Optional/Dependencies.pod\n";
-require DBIx::Class::Optional::Dependencies;
-DBIx::Class::Optional::Dependencies->_gen_pod ($ver);
-
-postamble <<"EOP";
-
-.PHONY: dbic_clonedir_gen_optdeps_pod
-
-create_distdir : dbic_clonedir_gen_optdeps_pod
-
-dbic_clonedir_gen_optdeps_pod :
-\t\$(ABSPERL) -Ilib -MDBIx::Class::Optional::Dependencies -e 'DBIx::Class::Optional::Dependencies->_gen_pod($ver)'
-
-EOP
-
-
-# keep the Makefile.PL eval happy
-1;
--- /dev/null
+use File::Path();
+use File::Glob();
+
+# leftovers in old checkouts
+unlink 'lib/DBIx/Class/Optional/Dependencies.pod'
+ if -f 'lib/DBIx/Class/Optional/Dependencies.pod';
+File::Path::rmtree( File::Glob::bsd_glob('.generated_pod'), { verbose => 0 } )
+ if -d '.generated_pod';
+
+my $pod_dir = 'maint/.Generated_Pod';
+my $ver = Meta->version;
+
+# cleanup the generated pod dir (again - kill leftovers from old checkouts)
+if (-d $pod_dir) {
+ File::Path::rmtree( File::Glob::bsd_glob("$pod_dir/*"), { verbose => 0 } );
+}
+else {
+ mkdir $pod_dir or die "Unable to create $pod_dir: $!";
+}
+
+# generate the OptDeps pod both in the clone-dir and during the makefile distdir
+{
+ print "Regenerating Optional/Dependencies.pod\n";
+
+ # this should always succeed - hence no error checking
+ # if someone breaks OptDeps - travis should catch it
+ require DBIx::Class::Optional::Dependencies;
+ DBIx::Class::Optional::Dependencies->_gen_pod ($ver, "$pod_dir/lib");
+
+ postamble <<"EOP";
+
+clonedir_generate_files : dbic_clonedir_gen_optdeps_pod
+
+dbic_clonedir_gen_optdeps_pod :
+\t@{[
+ $mm_proto->oneliner("DBIx::Class::Optional::Dependencies->_gen_pod(q($ver), q($pod_dir/lib))", [qw/-Ilib -MDBIx::Class::Optional::Dependencies/])
+]}
+
+EOP
+}
+
+
+# generate the script/dbicadmin pod
+{
+ print "Regenerating script/dbicadmin.pod\n";
+
+ # generating it in the root of $pod_dir
+ # it will *not* be copied over due to not being listed at the top
+ # of MANIFEST.SKIP - this is a *good* thing
+ # we only want to ship a script/dbicadmin, with the POD appended
+ # (see inject_dbicadmin_pod.pl), but still want to spellcheck and
+ # whatnot the intermediate step
+ my $pod_fn = "$pod_dir/dbicadmin.pod";
+
+ # if the author doesn't have the prereqs, don't fail the initial "perl Makefile.pl" step
+ my $great_success;
+ {
+ local @ARGV = ('--documentation-as-pod', $pod_fn);
+ local *CORE::GLOBAL::exit = sub { $great_success++; die; };
+ do 'script/dbicadmin';
+ }
+ if (!$great_success and ($@ || $!) ) {
+ printf ("FAILED!!! Subsequent `make dist` will fail. %s\n",
+ $ENV{DBICDIST_DEBUG}
+ ? 'Full error: ' . ($@ || $!)
+ : 'Re-run with $ENV{DBICDIST_DEBUG} set for more info'
+ );
+ }
+
+ postamble <<"EOP";
+
+clonedir_generate_files : dbic_clonedir_gen_dbicadmin_pod
+
+dbic_clonedir_gen_dbicadmin_pod :
+\t\$(ABSPERLRUN) -Ilib -- script/dbicadmin --documentation-as-pod @{[ $mm_proto->quote_literal($pod_fn) ]}
+
+EOP
+}
+
+
+# generate the inherit pods only during distbuilding phase
+# it is too slow to do at regular Makefile.PL
+{
+ postamble <<"EOP";
+
+clonedir_generate_files : dbic_clonedir_gen_inherit_pods
+
+dbic_clonedir_gen_inherit_pods :
+\t\$(ABSPERLRUN) -Ilib maint/gen_pod_inherit
+
+EOP
+}
+
+
+# on some OSes generated files may have an incorrect \n - fix it
+# so that the xt tests pass on a fresh checkout (also shipping a
+# dist with CRLFs is beyond obnoxious)
+if ($^O eq 'MSWin32' or $^O eq 'cygwin') {
+ {
+ local $ENV{PERLIO} = 'unix';
+ system( $^X, qw( -MExtUtils::Command -e dos2unix -- ), $pod_dir );
+ }
+
+ postamble <<"EOP";
+
+clonedir_post_generate_files : pod_crlf_fixup
+
+pod_crlf_fixup :
+@{[ $crlf_fixup->($pod_dir) ]}
+
+EOP
+}
+
+{
+ postamble <<"EOP";
+
+clonedir_post_generate_files : dbic_clonedir_copy_generated_pod
+
+dbic_clonedir_copy_generated_pod :
+\t\$(RM_F) $pod_dir.packlist
+\t@{[
+ $mm_proto->oneliner("install([ from_to => {q($pod_dir) => File::Spec->curdir(), write => q($pod_dir.packlist)}, verbose => 0, uninstall_shadows => 0, skip => [] ])", ['-MExtUtils::Install'])
+]}
+
+EOP
+}
+
+
+# everything that came from $pod_dir, needs to be removed from the workdir
+{
+ postamble <<"EOP";
+
+clonedir_cleanup_generated_files : dbic_clonedir_cleanup_generated_pod_copies
+
+dbic_clonedir_cleanup_generated_pod_copies :
+\t@{[ $mm_proto->oneliner('chomp && unlink || die', ['-n']) ]} $pod_dir.packlist
+\t\$(RM_F) $pod_dir.packlist
+
+EOP
+}
+
+# keep the Makefile.PL eval happy
+1;
+++ /dev/null
-# FIXME Disabled due to unsolved issues, ask theorbtwo
-#require Module::Install::Pod::Inherit;
-#PodInherit();
-
-# keep the Makefile.PL eval happy
-1;
--- /dev/null
+require File::Spec;
+my $ddl_fn = File::Spec->catfile(qw(t lib sqlite.sql));
+
+# If the author doesn't have the prereqs, we will end up obliterating
+# the ddl file, and all tests will fail, therefore don't do anything
+# on error
+# The EUMM build-stage generation will run unconditionally and
+# errors will not be trapped
+require DBIx::Class::Optional::Dependencies;
+if ( DBIx::Class::Optional::Dependencies->req_ok_for ('deploy') ) {
+ print "Regenerating t/lib/sqlite.sql\n";
+ if (my $out = ` "$^X" -Ilib maint/gen_schema `) {
+ open (my $fh, '>:unix', $ddl_fn) or die "Unable to open $ddl_fn: $!";
+ print $fh $out;
+ close $fh;
+
+ # if we don't do it some git tools (e.g. gitk) get confused that the
+ # ddl file is modified, when it clearly isn't
+ system('git status --porcelain >' . File::Spec->devnull);
+ }
+}
+
+postamble <<"EOP";
+
+clonedir_generate_files : dbic_clonedir_regen_test_ddl
+
+dbic_clonedir_regen_test_ddl :
+\t\$(ABSPERLRUN) -Ilib -- maint/gen_schema > @{[ $mm_proto->quote_literal($ddl_fn) ]}
+@{[ $crlf_fixup->($ddl_fn) ]}
+EOP
+
+# keep the Makefile.PL eval happy
+1;
+++ /dev/null
-# Makefile syntax allows adding extra dep-specs for already-existing targets,
-# and simply appends them on *LAST*-come *FIRST*-serve basis.
-# This allows us to inject extra depenencies for standard EUMM targets
-
-print "Removing MANIFEST, will regenerate on next `make dist(dir)`\n";
-unlink 'MANIFEST';
-
-# preamble. so that the manifest target is first, hence executes last
-preamble <<"EOP";
-
-create_distdir : manifest
-
-EOP
-
-# keep the Makefile.PL eval happy
-1;
--- /dev/null
+# without having the pod in the file itself, perldoc may very
+# well show a *different* document, because perl and perldoc
+# search @INC differently (crazy right?)
+#
+# make sure we delete and re-create the file - just an append
+# will not do what one expects, because on unixy systems the
+# target is symlinked to the original
+postamble <<"EOP";
+
+create_distdir : dbic_distdir_dbicadmin_pod_inject
+
+dbic_distdir_dbicadmin_pod_inject :
+\t\$(RM_F) \$(DISTVNAME)/script/dbicadmin
+\t@{[ $mm_proto->oneliner('cat', ['-MExtUtils::Command']) ]} script/dbicadmin maint/.Generated_Pod/dbicadmin.pod > \$(DISTVNAME)/script/dbicadmin
+
+# FIXME also on win32 EU::Command::cat() adds crlf even if the
+# source files do not contain any :(
+@{[ $crlf_fixup->('$(DISTVNAME)/script/dbicadmin') ]}
+EOP
+
+# keep the Makefile.PL eval happy
+1;
--- /dev/null
+#!/usr/bin/env perl
+
+use warnings;
+use strict;
+
+my $lib_dir = 'lib';
+my $pod_dir = 'maint/.Generated_Pod';
+
+my $result_metapod_fn = "$lib_dir/DBIx/Class/Manual/ResultClass.pod";
+
+die "POD generator must be executed from the dist root\n"
+ unless -d $lib_dir and -d $pod_dir;
+
+require File::Copy;
+File::Copy::copy(
+ "$result_metapod_fn.proto",
+ "$result_metapod_fn",
+) or die "Copying ResultClass proto pod ($result_metapod_fn) failed: $!";
+
+# cleanup
+END {
+ local ($@, $!, $?);
+ unlink $result_metapod_fn;
+}
+
+require Pod::Inherit;
+
+Pod::Inherit->new({
+ input_files => $lib_dir,
+ out_dir => "$pod_dir/lib",
+ force_permissions => 1,
+ class_map => {
+ "DBIx::Class::Relationship::HasMany" => "DBIx::Class::Relationship",
+ "DBIx::Class::Relationship::HasOne" => "DBIx::Class::Relationship",
+ "DBIx::Class::Relationship::BelongsTo" => "DBIx::Class::Relationship",
+ "DBIx::Class::Relationship::ManyToMany" => "DBIx::Class::Relationship",
+ "DBIx::Class::ResultSourceProxy" => "DBIx::Class::ResultSource",
+ },
+ # skip the deprecated classes that give out *DEPRECATED* warnings
+ skip_classes => [ qw(
+ lib/DBIx/Class/Storage/DBI/Sybase/MSSQL.pm
+ lib/DBIx/Class/Serialize/Storable.pm
+ lib/DBIx/Class/ResultSetManager.pm
+ lib/DBIx/Class/InflateColumn/File.pm
+ lib/DBIx/Class/DB.pm
+ lib/DBIx/Class/CDBICompat/
+ lib/DBIx/Class/CDBICompat.pm
+ ),
+ # skip the ::Storage:: family for now
+ qw(
+ lib/DBIx/Class/Storage/
+ lib/DBIx/Class/Storage.pm
+ ),
+ 'lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm', # this one just errors out with: The 'add_attribute' method cannot be called on an immutable instance
+ 'lib/DBIx/Class/Relationship.pm', # it already documents its own inheritors
+ 'lib/DBIx/Class/Core.pm', # we actually don't want this populated in favor of redirecting users to the ResultClass docs
+ 'lib/DBIx/Class/Optional/Dependencies.pm' # the POD is already auto-generated
+ ],
+ # these appear everywhere, and are typically lower-level methods not used by the general user
+ skip_inherits => [ qw/
+ DBIx::Class
+ DBIx::Class::Componentised
+ Class::C3::Componentised
+ DBIx::Class::AccessorGroup
+ Class::Accessor::Grouped
+ Moose::Object
+ Exporter
+ / ],
+ force_inherits => {
+ 'DBIx::Class::Manual::ResultClass' => 'DBIx::Class::Core', # this forces the contents of ::Core to be dumped into the POD doc for ::ResultClass
+ },
+ dead_links => '',
+ method_format => 'L<%m|%c/%m>',
+ #debug => 1,
+})->write_pod;
+
+# important - write_pod returns undef >.<
+1;
'SQLite',
undef,
undef,
- { producer_args => { no_transaction => 1 } }
+ {
+ producer_args => { no_transaction => 1 },
+ quote_identifiers => 1,
+ no_comments => 1,
+ },
));
--- /dev/null
+#!/bin/bash
+
+source maint/travis-ci_scripts/common.bash
+if [[ -n "$SHORT_CIRCUIT_SMOKE" ]] ; then return ; fi
+
+if [[ -n "$BREWVER" ]] ; then
+ # .travis.yml already restricts branches to master, topic/* and smoke/*
+ # do some extra short-circuiting here
+
+ # when smoking master do not attempt bleadperl (not release-critical)
+ if [[ "$TRAVIS_PULL_REQUEST" != "false" ]]; then
+ echo_err "$(tstamp) pull-request smoking with custom perl compilation requested - bailing out"
+ export SHORT_CIRCUIT_SMOKE=1
+ elif [[ "$TRAVIS_BRANCH" = "master" ]] && [[ "$BREWVER" = "blead" ]]; then
+ echo_err "$(tstamp) master branch is not smoked with bleadperl - bailing out"
+ export SHORT_CIRCUIT_SMOKE=1
+ # on topic/ branches test only with travis perls
+ elif [[ "$TRAVIS_BRANCH" =~ "topic/" ]]; then
+ echo_err "$(tstamp) non-smoke branch and custom perl compilation requested - bailing out"
+ export SHORT_CIRCUIT_SMOKE=1
+ fi
+
+ if [[ -n "$SHORT_CIRCUIT_SMOKE" ]]; then
+ sleep 20 # give the console time to attach, otherwise it hangs
+ return # this is like an `exit 0` in sourcing
+ fi
+fi
+
+# different boxes we run on may have different amount of hw threads
+# hence why we need to query
+# result is 1.5 times the physical threads
+export NUMTHREADS=$(( ( $(cut -f 2 -d '-' /sys/devices/system/cpu/online) + 1 ) * 15 / 10 ))
+
+if [[ "$CLEANTEST" != "true" ]]; then
+### apt-get invocation - faster to grab everything at once
+ #
+ # FIXME these debconf lines should automate the firebird config but do not :(((
+ sudo bash -c 'echo -e "firebird2.5-super\tshared/firebird/enabled\tboolean\ttrue" | debconf-set-selections'
+ sudo bash -c 'echo -e "firebird2.5-super\tshared/firebird/sysdba_password/new_password\tpassword\t123" | debconf-set-selections'
+
+ APT_PACKAGES="memcached firebird2.5-super firebird2.5-dev expect"
+ run_or_err "Installing packages ($APT_PACKAGES)" "sudo apt-get install --allow-unauthenticated -y $APT_PACKAGES"
+
+### config memcached
+ export DBICTEST_MEMCACHED=127.0.0.1:11211
+
+### config mysql
+ run_or_err "Creating MySQL TestDB" "mysql -e 'create database dbic_test;'"
+ export DBICTEST_MYSQL_DSN='dbi:mysql:database=dbic_test;host=127.0.0.1'
+ export DBICTEST_MYSQL_USER=root
+
+### config pg
+ run_or_err "Creating PostgreSQL TestDB" "psql -c 'create database dbic_test;' -U postgres"
+ export DBICTEST_PG_DSN='dbi:Pg:database=dbic_test;host=127.0.0.1'
+ export DBICTEST_PG_USER=postgres
+
+### conig firebird
+ # poor man's deb config
+ EXPECT_FB_SCRIPT='
+ spawn dpkg-reconfigure --frontend=text firebird2.5-super
+ expect "Enable Firebird server?"
+ send "\177\177\177\177yes\r"
+ expect "Password for SYSDBA"
+ send "123\r"
+ sleep 1
+ wait
+ sleep 1
+ '
+ # creating testdb
+ # FIXME - this step still fails from time to time >:(((
+ # has to do with the FB reconfiguration I suppose
+ # for now if it fails twice - simply skip FB testing
+ for i in 1 2 ; do
+
+ run_or_err "Re-configuring Firebird" "
+ sync
+ DEBIAN_FRONTEND=text sudo expect -c '$EXPECT_FB_SCRIPT'
+ sleep 1
+ sync
+ # restart the server for good measure
+ sudo /etc/init.d/firebird2.5-super stop || true
+ sleep 1
+ sync
+ sudo /etc/init.d/firebird2.5-super start
+ sleep 1
+ sync
+ "
+
+ if run_or_err "Creating Firebird TestDB" \
+ "echo \"CREATE DATABASE '/var/lib/firebird/2.5/data/dbic_test.fdb';\" | sudo isql-fb -u sysdba -p 123"
+ then
+ export DBICTEST_FIREBIRD_DSN=dbi:Firebird:dbname=/var/lib/firebird/2.5/data/dbic_test.fdb
+ export DBICTEST_FIREBIRD_USER=SYSDBA
+ export DBICTEST_FIREBIRD_PASS=123
+
+ export DBICTEST_FIREBIRD_INTERBASE_DSN=dbi:InterBase:dbname=/var/lib/firebird/2.5/data/dbic_test.fdb
+ export DBICTEST_FIREBIRD_INTERBASE_USER=SYSDBA
+ export DBICTEST_FIREBIRD_INTERBASE_PASS=123
+
+ break
+ fi
+
+ done
+
+### oracle
+ # FIXME: todo
+ #DBICTEST_ORA_DSN=dbi:Oracle:host=localhost;sid=XE
+ #DBICTEST_ORA_USER=dbic_test
+ #DBICTEST_ORA_PASS=123
+ #DBICTEST_ORA_EXTRAUSER_DSN=dbi:Oracle:host=localhost;sid=XE
+ #DBICTEST_ORA_EXTRAUSER_USER=dbic_test_extra
+ #DBICTEST_ORA_EXTRAUSER_PASS=123
+ #ORACLE_HOME=/usr/lib/oracle/xe/app/oracle/product/10.2.0/client
+fi
--- /dev/null
+#!/bin/bash
+
+source maint/travis-ci_scripts/common.bash
+if [[ -n "$SHORT_CIRCUIT_SMOKE" ]] ; then return ; fi
+
+TRAVIS_CPAN_MIRROR=$(echo "$PERL_CPANM_OPT" | grep -oP -- '--mirror\s+\S+' | head -n 1 | cut -d ' ' -f 2)
+if ! [[ "$TRAVIS_CPAN_MIRROR" =~ "http://" ]] ; then
+ echo_err "Unable to extract primary cpan mirror from PERL_CPANM_OPT - something is wrong"
+ echo_err "PERL_CPANM_OPT: $PERL_CPANM_OPT"
+ exit 1
+fi
+
+export PERL_MM_USE_DEFAULT=1 PERL_MM_NONINTERACTIVE=1 PERL_AUTOINSTALL_PREFER_CPAN=1 PERLBREW_CPAN_MIRROR="$TRAVIS_CPAN_MIRROR"
+
+# Fixup CPANM_OPT to behave more like a traditional cpan client
+export PERL_CPANM_OPT="$( echo $PERL_CPANM_OPT | sed 's/--skip-satisfied//' ) --verbose --no-interactive"
+
+if [[ -n "$BREWVER" ]] ; then
+ run_or_err "Compiling/installing Perl $BREWVER (without testing, may take up to 5 minutes)" \
+ "perlbrew install --as $BREWVER --notest --verbose $BREWOPTS -j $NUMTHREADS $BREWVER"
+
+ # can not do 'perlbrew uss' in the run_or_err subshell above
+ perlbrew use $BREWVER || \
+ ( echo_err -e "Unable to switch to $BREWVER - compillation failed?\n$LASTOUT"; exit 1 )
+fi
+
+# configure CPAN.pm - older versions go into an endless loop
+# when trying to autoconf themselves
+CPAN_CFG_SCRIPT="
+ require CPAN;
+ require CPAN::FirstTime;
+ *CPAN::FirstTime::conf_sites = sub {};
+ CPAN::Config->load;
+ \$CPAN::Config->{urllist} = [qw{ $TRAVIS_CPAN_MIRROR }];
+ \$CPAN::Config->{halt_on_failure} = 1;
+ CPAN::Config->commit;
+"
+run_or_err "Configuring CPAN.pm" "perl -e '$CPAN_CFG_SCRIPT'"
--- /dev/null
+#!/bin/bash
+
+source maint/travis-ci_scripts/common.bash
+if [[ -n "$SHORT_CIRCUIT_SMOKE" ]] ; then return ; fi
+
+# try Schwern's latest offering on a stock perl and a threaded blead
+# can't do this with CLEANTEST=true yet because a lot of our deps fail
+# tests left and right under T::B 1.5
+if [[ "$CLEANTEST" != "true" ]] && ( [[ -z "$BREWVER" ]] || [[ "$BREWVER" = "blead" ]] ) ; then
+ # FIXME - there got to be a way to ask metacpan for this dynamically
+ TEST_BUILDER_BETA_CPAN_TARBALL="M/MS/MSCHWERN/Test-Simple-1.005000_005.tar.gz"
+fi
+
+
+if [[ "$CLEANTEST" = "true" ]]; then
+ # get the last inc/ off cpan - we will get rid of MI
+ # soon enough, but till then this will do
+ # the point is to have a *really* clean perl (the ones
+ # we build are guaranteed to be clean, without side
+ # effects from travis preinstalls)
+
+ # trick cpanm into executing true as shell - we just need the find+unpack
+ run_or_err "Downloading DBIC inc/ from CPAN" \
+ "SHELL=/bin/true cpanm --look DBIx::Class"
+
+ mv ~/.cpanm/latest-build/DBIx-Class-*/inc .
+
+ # this should be installable anywhere, regardles of prereqs
+ if [[ -n "$TEST_BUILDER_BETA_CPAN_TARBALL" ]] ; then
+ run_or_err "Pre-installing dev-beta of Test::Builder ($TEST_BUILDER_BETA_CPAN_TARBALL)" \
+ "cpan $TEST_BUILDER_BETA_CPAN_TARBALL"
+ fi
+
+ # older perls do not have a CPAN which understands configure_requires
+ # properly and what is worse a `cpan Foo` run exits with 0 even if some
+ # modules failed to install
+ # The first CPAN which is somewhat sane is around 1.94_56 (perl 5.12)
+ # The problem is that the first sane version also brings a *lot* of
+ # deps with it, notably things like YAML and HTTP::Tiny
+ # The goal of CLEANTEST is to have as little extra stuff installed as
+ # possible, mainly to catch "but X is perl core" mistakes
+ # So instead we still use our stock (possibly old) CPAN, and add some
+ # handholding
+ CPAN_is_sane || \
+ run_or_err "Pre-installing ExtUtils::MakeMaker and Module::Build" \
+ "cpan ExtUtils::MakeMaker Module::Build"
+
+ if ! perl -MModule::Build -e 1 &> /dev/null ; then
+ echo_err -e "Module::Build installation failed\n$LASTOUT"
+ exit 1
+ fi
+
+ # DBI has by far the longest test runtime - run less tests
+ # FIXME horrible horrible hack, need to implement in DBI itself
+ run_or_err "Downloading latest DBI distdir from CPAN" \
+ "SHELL=/bin/true cpanm --look DBI"
+ cd ~/.cpanm/latest-build/DBI-*/
+ perl -p -i -e 's/(create_.+?_tests) => 1/$1 => 0/' Makefile.PL
+ run_or_err "Pre-installing DBI, but running less tests" "perl Makefile.PL && make && make test && make install"
+ cd - &>/dev/null
+
+else
+ # we will be running all dbic tests - preinstall lots of stuff, run basic tests
+ # using SQLT and set up whatever databases necessary
+ export DBICTEST_SQLT_DEPLOY=1
+
+ # do the preinstall in several passes to minimize amount of cross-deps installing
+ # multiple times, and to avoid module re-architecture breaking another install
+ # (e.g. once Carp is upgraded there's no more Carp::Heavy)
+ #
+ parallel_installdeps_notest ExtUtils::MakeMaker
+ parallel_installdeps_notest Carp
+ parallel_installdeps_notest Module::Build
+ parallel_installdeps_notest Module::Runtime ExtUtils::Depends File::Spec Data::Dumper
+ parallel_installdeps_notest Test::Exception LWP
+ parallel_installdeps_notest Test::Fatal Test::Warn bareword::filehandles
+ parallel_installdeps_notest namespace::clean Class::XSAccessor MRO::Compat
+ parallel_installdeps_notest DBD::SQLite Moo Class::Accessor::Grouped
+ parallel_installdeps_notest Module::Install DateTime::Format::Strptime
+ parallel_installdeps_notest JSON::DWIW JSON JSON::XS Test::Pod::Coverage Test::EOL
+ parallel_installdeps_notest MooseX::Types JSON::Any Class::DBI
+
+ if [[ -n "DBICTEST_FIREBIRD_DSN" ]] ; then
+ # the official version is full of 5.10-isms, but works perfectly fine on 5.8
+ # pull in our patched copy
+ run_or_err "Fetching patched DBD::Firebird" \
+ "git clone https://github.com/dbsrgits/perl-dbd-firebird-5.8.git ~/dbd-firebird"
+
+ # the official version is very much outdated and does not compile on 5.14+
+ # use this rather updated source tree (needs to go to PAUSE):
+ # https://github.com/pilcrow/perl-dbd-interbase
+ run_or_err "Fetching patched DBD::InterBase" \
+ "git clone https://github.com/dbsrgits/perl-dbd-interbase ~/dbd-interbase"
+
+ parallel_installdeps_notest ~/dbd-interbase/ ~/dbd-firebird/
+ fi
+
+fi
+
+# generate the makefile which will have different deps depending on
+# the runmode and envvars set above
+run_or_err "Configure on current branch" "perl Makefile.PL"
+
+# install (remaining) dependencies, sometimes with a gentle push
+if [[ "$CLEANTEST" = "true" ]]; then
+ # we may need to prepend some stuff to that list
+ HARD_DEPS="$(echo $(make listdeps))"
+
+ # this is a fucked CPAN - won't understand configure_requires of
+ # various pieces we may run into
+ CPAN_is_sane || HARD_DEPS="ExtUtils::Depends B::Hooks::OP::Check $HARD_DEPS"
+
+##### TEMPORARY WORKAROUNDS
+
+ # The unicode-in-yaml bug on older cpan clients
+ # FIXME there got to be a saner way to fix this...
+ perl -M5.008008 -e 1 &> /dev/null || \
+ run_or_err "Installing multidimensional and bareword::filehandles via cpanm" \
+ "cpanm multidimensional bareword::filehandles"
+
+ # work around Params::Validate not having a Makefile.PL so really old
+ # toolchains can not figure out what the prereqs are ;(
+ # Need to do more research before filing a bug requesting Makefile inclusion
+ perl -M5.008008 -e 1 &> /dev/null || \
+ HARD_DEPS="$(extract_prereqs Params::Validate) $HARD_DEPS"
+
+##### END TEMPORARY WORKAROUNDS
+
+ run_or_err "Installing/testing dependencies (may take up to 3 minutes): $HARD_DEPS" "cpan $HARD_DEPS"
+
+ # this is a fucked CPAN - save the log as we may need it
+ CPAN_is_sane || INSTALLDEPS_OUT="$LASTOUT"
+
+else
+ # listalldeps is deliberate - will upgrade everything it can find
+ parallel_installdeps_notest $(make listalldeps)
+
+ if [[ -n "$TEST_BUILDER_BETA_CPAN_TARBALL" ]] ; then
+ parallel_installdeps_notest $TEST_BUILDER_BETA_CPAN_TARBALL
+ fi
+fi
+
+echo_err "$(tstamp) Dependency configuration finished"
+# this will display list of available versions
+perl Makefile.PL
+
+# make sure we got everything we need
+if [[ -n "$(make listdeps)" ]] ; then
+ echo_err "$(tstamp) Not all deps installed - something went wrong :("
+ sleep 1 # without this the echo below confuses the console listener >.<
+ CPAN_is_sane || echo_err -e "Outdated CPAN.pm used - full logs follows\n$INSTALLDEPS_OUT\n\nSearch for 'NOT OK' in the text above\n\nDeps still missing:"
+ sleep 3 # without this the above echo confuses the console listener >.<
+ make listdeps
+ exit 1
+fi
+
+# announce what are we running
+echo_err "
+===================== DEPENDENCY CONFIGURATION COMPLETE =====================
+$(tstamp) Configuration phase seems to have taken $(date -ud "@$SECONDS" '+%H:%M:%S') (@$SECONDS)
+
+= CPUinfo
+$(perl -0777 -p -e 's/.+\n\n(?!\z)//s' < /proc/cpuinfo)
+
+= Meminfo
+$(free -m -t)
+
+= Environment
+$(env | grep -P 'TEST|TRAVIS|PERL|DBIC' | LC_ALL=C sort | cat -v)
+
+= Perl in use
+$(perl -V)
+============================================================================="
--- /dev/null
+#!/bin/bash
+
+source maint/travis-ci_scripts/common.bash
+if [[ -n "$SHORT_CIRCUIT_SMOKE" ]] ; then return ; fi
+
+export HARNESS_TIMER=1 HARNESS_OPTIONS=c:j$NUMTHREADS
+
+START_TIME=$SECONDS
+if [[ "$CLEANTEST" = "true" ]] ; then
+ echo_err "$(tstamp) Running tests with plain \`make test\`"
+ run_or_err "Prepare blib" "make pure_all"
+ make test
+else
+ PROVECMD="prove -lrswj$NUMTHREADS t xt"
+ echo_err "$(tstamp) running tests with \`$PROVECMD\`"
+ $PROVECMD
+fi
+
+echo "$(tstamp) Testing took a total of $(( $SECONDS - $START_TIME ))s"
--- /dev/null
+#!/bin/bash
+
+# !!! Nothing here will be executed !!!
+# The source-line calling this script is commented out in .travis.yml
+
+source maint/travis-ci_scripts/common.bash
+if [[ -n "$SHORT_CIRCUIT_SMOKE" ]] ; then return ; fi
+
+echo_err "Nothing to do"
+
+return 0
--- /dev/null
+#!/bin/bash
+
+source maint/travis-ci_scripts/common.bash
+if [[ -n "$SHORT_CIRCUIT_SMOKE" ]] ; then return ; fi
+
+[[ "$CLEANTEST" = "true" ]] || run_or_err "Attempt to build a dist with all prereqs present" "make dist"
--- /dev/null
+#!/bin/bash
+
+# !!! Nothing here will be executed !!!
+# The source-line calling this script is commented out in .travis.yml
+
+source maint/travis-ci_scripts/common.bash
+if [[ -n "$SHORT_CIRCUIT_SMOKE" ]] ; then return ; fi
+
+echo_err "Nothing to do"
+
+return 0
--- /dev/null
+#!/bin/bash
+
+set -e
+
+echo_err() { echo "$@" 1>&2 ; }
+
+if [[ "$TRAVIS" != "true" ]] ; then
+ echo_err "Running this script makes no sense outside of travis-ci"
+ exit 1
+fi
+
+tstamp() { echo -n "[$(date '+%H:%M:%S')]" ; }
+
+run_or_err() {
+ echo_err -n "$(tstamp) $1 ... "
+
+ LASTEXIT=0
+ START_TIME=$SECONDS
+ LASTOUT=$( bash -c "$2" 2>&1 ) || LASTEXIT=$?
+ DELTA_TIME=$(( $SECONDS - $START_TIME ))
+
+ if [[ "$LASTEXIT" != "0" ]] ; then
+ echo_err -e "FAILED !!! (after ${DELTA_TIME}s)\nCommand executed:\n$2\nSTDOUT+STDERR:\n$LASTOUT"
+ return $LASTEXIT
+ else
+ echo_err "done (took ${DELTA_TIME}s)"
+ fi
+}
+
+extract_prereqs() {
+ # once --verbose is set, --no-verbose can't disable it
+ # do this by hand
+ ORIG_CPANM_OPT="$PERL_CPANM_OPT"
+ PERL_CPANM_OPT="$( echo $PERL_CPANM_OPT | sed 's/--verbose//' )"
+
+ # hack-hack-hack
+ LASTEXIT=0
+ COMBINED_OUT="$( { stdout="$(cpanm --quiet --scandeps --format tree "$@")" ; } 2>&1; echo "!!!STDERRSTDOUTSEPARATOR!!!$stdout")" \
+ || LASTEXIT=$?
+
+ PERL_CPANM_OPT="$ORIG_CPANM_OPT"
+
+ OUT=${COMBINED_OUT#*!!!STDERRSTDOUTSEPARATOR!!!}
+ ERR=$(grep -v " is up to date." <<< "${COMBINED_OUT%!!!STDERRSTDOUTSEPARATOR!!!*}")
+
+ if [[ "$LASTEXIT" != "0" ]] || [[ -n "$ERR" ]] ; then
+ echo_err "$(echo -e "Error occured (exit code $LASTEXIT) retrieving dependencies of $@:\n$ERR\n$OUT")"
+ exit 1
+ fi
+
+ # throw away non-children (what was in $@), throw away ascii art, convert to modnames
+ perl -p -e 's/^[a-z].+//i; s/^[^a-z]+//i; s/\-[^\-]+$/ /; s/\-/::/g' <<< "$OUT"
+}
+
+parallel_installdeps_notest() {
+ if [[ -z "$@" ]] ; then return; fi
+
+ # flatten list into one string
+ MODLIST=$(echo "$@")
+
+ # The reason we do things so "non-interactively" is that xargs -P will have the
+ # latest cpanm instance overwrite the buildlog. There seems to be no way to
+ # specify a custom buildlog, hence we just collect the verbose output
+ # and display it in case of failure
+ run_or_err "Installing (without testing) $MODLIST" \
+ "echo $MODLIST | xargs -n 1 -P $NUMTHREADS cpanm --notest --no-man-pages"
+}
+
+
+CPAN_is_sane() { perl -MCPAN\ 1.94_56 -e 1 &>/dev/null ; }
['Actions'],
["action" => hidden => { one_of => [
['create' => 'Create version diffs needs preversion'],
- ['upgrade' => 'Upgrade the database to the current schema '],
+ ['upgrade' => 'Upgrade the database to the current schema'],
['install' => 'Install the schema version tables to an existing database'],
['deploy' => 'Deploy the schema to the database'],
['select' => 'Select data from the schema'],
['delete' => 'Delete data from the schema'],
['op:s' => 'compatiblity option all of the above can be suppied as --op=<action>'],
['help' => 'display this help', { implies => { schema_class => '__dummy__' } } ],
- ['selfinject-pod' => 'hidden', { implies => { schema_class => '__dummy__' } } ],
+ ['documentation-as-pod:s' => 'hidden', { implies => { schema_class => '__dummy__' } } ],
], required => 1 }],
['Arguments'],
["configuration" => hidden => { one_of => [
)
);
-if($opts->{selfinject_pod}) {
-
- die "This is an internal method, do not call!!!\n"
- unless $ENV{MAKELEVEL};
-
- $usage->synopsis($synopsis_text);
- $usage->short_description($short_description);
- exec (
- $^X,
- qw/-p -0777 -i -e/,
- (
- 's/^# auto_pod_begin.*^# auto_pod_end/'
- . quotemeta($usage->pod)
- . '/ms'
- ),
- __FILE__
- );
+if(defined (my $fn = $opts->{documentation_as_pod}) ) {
+ $usage->synopsis($synopsis_text);
+ $usage->short_description($short_description);
+
+ if ($fn) {
+ require File::Spec;
+ require File::Path;
+ my $dir = File::Spec->catpath( (File::Spec->splitpath($fn))[0,1] );
+ File::Path::mkpath([$dir]);
+ }
+
+ local *STDOUT if $fn;
+ open (STDOUT, '>', $fn) or die "Unable to open $fn: $!\n" if $fn;
+
+ print STDOUT "\n";
+ print STDOUT $usage->pod;
+ print STDOUT "\n";
+
+ close STDOUT if $fn;
+ exit 0;
}
# FIXME - lowercasing will eventually go away when Getopt::Long::Descriptive is fixed
}
}
+1;
__END__
-
-# auto_pod_begin
-#
-# This will be replaced by the actual pod when selfinject-pod is invoked
-#
-# auto_pod_end
-
-# vim: et ft=perl
#################### DEPLOY
- $schema->deploy( { add_drop_table => 1 } );
+ $schema->deploy;
#################### DOES ORDERING WORK?
#################### DEPLOY2
- warnings_exist { $schema2->deploy( { add_drop_table => 1 } ) }
+ warnings_exist { $schema2->deploy }
[qr/no such table: main.aba_name_artists/],
"Deploying the bad schema produces a warning: aba_name_artists was not created.";
--- /dev/null
+package ResultClassInflator;
+
+sub new { bless {}, __PACKAGE__ }
+
+1;
+
+package main;
+
+use strict;
+use warnings;
+
+use Test::More tests => 6;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $source = $schema->source('CD');
+
+lives_ok {
+ $source->result_class('ResultClassInflator');
+ is($source->result_class => 'ResultClassInflator', "result_class gives us back class");
+ is($source->get_component_class('result_class') => 'ResultClassInflator',
+ "and so does get_component_class");
+
+ } 'Result class still works with class';
+lives_ok {
+ my $obj = ResultClassInflator->new();
+ $source->result_class($obj);
+ is($source->result_class => $obj, "result_class gives us back obj");
+ is($source->get_component_class('result_class') => $obj, "and so does get_component_class");
+ } 'Result class works with object';
+
+done_testing;
use strict;
+use warnings;
use Test::More;
use Data::Dumper;
use warnings;
use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
plan skip_all => 'DBIC does not actively support threads before perl 5.8.5'
if $] < '5.008005';
-use lib qw(t/lib);
-use DBICTest;
+plan skip_all => 'Potential problems on Win32 Perl < 5.14 and Variable::Magic - investigation pending'
+ if $^O eq 'MSWin32' && $] < 5.014 && DBICTest::RunMode->is_plain;
# README: If you set the env var to a number greater than 10,
# we will use that many children
use lib qw(t/lib);
use DBICTest::RunMode;
-use DBICTest::Util qw/populate_weakregistry assert_empty_weakregistry/;
+use DBICTest::Util::LeakTracer qw/populate_weakregistry assert_empty_weakregistry/;
+use Scalar::Util 'refaddr';
use DBIx::Class;
use B 'svref_2object';
BEGIN {
require DBI;
require DBD::SQLite;
require FileHandle;
+ require Moo;
%$weak_registry = ();
}
leaky_resultset => $rs_bind_circref,
leaky_resultset_cond => $cond_rowobj,
- leaky_resultset_member => $rs_bind_circref->next,
};
+ # this needs to fire, even if it can't find anything
+ # see FIXME below
+ # we run this only on smokers - trying to establish a pattern
+ $rs_bind_circref->next
+ if ( ($ENV{TRAVIS}||'') ne 'true' and DBICTest::RunMode->is_smoker);
+
require Storable;
%$base_collection = (
%$base_collection,
# Moo keeps globals around, this is normal
delete $weak_registry->{$slot};
}
- elsif ($slot =~ /^SQL::Translator/) {
- # SQLT is a piece of shit, leaks all over
- delete $weak_registry->{$slot};
+ elsif ($slot =~ /^SQL::Translator::Generator::DDL::SQLite/) {
+ # SQLT::Producer::SQLite keeps global generators around for quoted
+ # and non-quoted DDL, allow one for each quoting style
+ delete $weak_registry->{$slot}
+ unless $cleared->{sqlt_ddl_sqlite}->{@{$weak_registry->{$slot}{weakref}->quote_chars}}++;
}
elsif ($slot =~ /^Hash::Merge/) {
# only clear one object of a specific behavior - more would indicate trouble
delete $weak_registry->{$slot}
unless $cleared->{mk_row_parser_dd_singleton}++;
}
- elsif (DBIx::Class::_ENV_::INVISIBLE_DOLLAR_AT and $slot =~ /^__TxnScopeGuard__FIXUP__/) {
- delete $weak_registry->{$slot}
- }
elsif ($slot =~ /^DateTime::TimeZone/) {
# DT is going through a refactor it seems - let it leak zones for now
delete $weak_registry->{$slot};
# half of it is in XS no leaktracer sees it, and Devel::FindRef is equally
# stumped when trying to trace the origin. The problem is:
#
-# $cond_object --> result_source --> schema --> storage --> $dbh --> {cached_kids}
+# $cond_object --> result_source --> schema --> storage --> $dbh --> {CachedKids}
# ^ /
# \-------- bound value on prepared/cached STH <-----------/
#
-TODO: {
- local $TODO = 'Not sure how to fix this yet, an entanglment could be an option';
- my $r = $weak_registry->{'basic leaky_resultset_cond'}{weakref};
- ok(! defined $r, 'We no longer leak!')
- or $r->result_source(undef);
+{
+ local $TODO = 'This fails intermittently - see RT#82942';
+ if ( my $r = $weak_registry->{'basic leaky_resultset_cond'}{weakref} ) {
+ ok(! defined $r, 'Self-referential RS conditions no longer leak!')
+ or $r->result_source(undef);
+ }
}
assert_empty_weakregistry ($weak_registry);
strict
warnings
+ constant
+ Config
+
base
mro
overload
Exporter
B
- locale
-
+ Devel::GlobalDestruction
namespace::clean
Try::Tiny
Context::Preserve
# this subclass is expected to inherit whatever crap comes
# from the parent
'DBIx::Class::ResultSet::Pager',
-
- # this is not part of the inheritance tree (plus is a temporary fix anyway)
- 'DBIx::Class::GlobalDestruction',
-
- # Moo does not name its generated methods, fix pending
- 'DBIx::Class::Storage::BlockRunner',
) };
my $has_cmop = eval { require Class::MOP };
for my $name (keys %all_method_like) {
- next if ( DBIx::Class::_ENV_::BROKEN_NAMESPACE_CLEAN() and $name =~ /^carp(?:_unique|_once)?$/ );
+ next if ( DBIx::Class::_ENV_::BROKEN_NAMESPACE_CLEAN and $name =~ /^carp(?:_unique|_once)?$/ );
# overload is a funky thing - it is not cleaned, and its imports are named funny
next if $name =~ /^\(/;
my $gv = svref_2object($all_method_like{$name})->GV;
my $origin = $gv->STASH->NAME;
- TODO: {
- local $TODO;
- if ($name =~ /^__CAG_/) {
- $TODO = 'CAG does not clean its BEGIN constants';
- }
-
- is ($gv->NAME, $name, "Properly named $name method at $origin" . ($origin eq $mod
- ? ''
- : " (inherited by $mod)"
- ));
- }
+ is ($gv->NAME, $name, "Properly named $name method at $origin" . ($origin eq $mod
+ ? ''
+ : " (inherited by $mod)"
+ ));
next if $seen->{"${origin}:${name}"}++;
}
}
- next if DBIx::Class::_ENV_::BROKEN_NAMESPACE_CLEAN();
+ next if DBIx::Class::_ENV_::BROKEN_NAMESPACE_CLEAN;
# some common import names (these should never ever be methods)
for my $f (qw/carp carp_once carp_unique croak confess cluck try catch finally/) {
);
}
+# test to make sure that calling ->new() on a resultset object gives
+# us a row object
+{
+ my $new_artist = $schema->resultset('Artist')->new({});
+ isa_ok( $new_artist, 'DBIx::Class::Row', '$rs->new gives a row object' );
+}
+
+
# make sure we got rid of the compat shims
SKIP: {
- skip "Remove in 0.082", 3 if $DBIx::Class::VERSION < 0.082;
+ my $remove_version = 0.083;
+ skip "Remove in $remove_version", 3 if $DBIx::Class::VERSION < $remove_version;
for (qw/compare_relationship_keys pk_depends_on resolve_condition/) {
- ok (! DBIx::Class::ResultSource->can ($_), "$_ no longer provided by DBIx::Class::ResultSource");
+ ok (! DBIx::Class::ResultSource->can ($_), "$_ no longer provided by DBIx::Class::ResultSource, removed before $remove_version");
}
}
my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_MYSQL_${_}" } qw/DSN USER PASS/};
-#warn "$dsn $user $pass";
-
plan skip_all => 'Set $ENV{DBICTEST_MYSQL_DSN}, _USER and _PASS to run this test'
unless ($dsn && $user);
-my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
+my $schema = DBICTest::Schema->connect($dsn, $user, $pass, { quote_names => 1 });
my $dbh = $schema->storage->dbh;
is_same_sql_bind (
$rs->as_query,
'(
- SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
- artist.artistid, artist.name, artist.rank, artist.charfield
- FROM cd me
- INNER JOIN artist artist ON artist.artistid = me.artist
+ SELECT `me`.`cdid`, `me`.`artist`, `me`.`title`, `me`.`year`, `me`.`genreid`, `me`.`single_track`,
+ `artist`.`artistid`, `artist`.`name`, `artist`.`rank`, `artist`.`charfield`
+ FROM cd `me`
+ INNER JOIN `artist` `artist` ON `artist`.`artistid` = `me`.`artist`
)',
[],
'overriden default join type works',
is_same_sql_bind (
$cdsrc->resultset->search({}, { prefetch => 'straight_artist' })->as_query,
'(
- SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
- straight_artist.artistid, straight_artist.name, straight_artist.rank, straight_artist.charfield
- FROM cd me
- STRAIGHT_JOIN artist straight_artist ON straight_artist.artistid = me.artist
+ SELECT `me`.`cdid`, `me`.`artist`, `me`.`title`, `me`.`year`, `me`.`genreid`, `me`.`single_track`,
+ `straight_artist`.`artistid`, `straight_artist`.`name`, `straight_artist`.`rank`, `straight_artist`.`charfield`
+ FROM cd `me`
+ STRAIGHT_JOIN `artist` `straight_artist` ON `straight_artist`.`artistid` = `me`.`artist`
)',
[],
'straight joins correctly supported for mysql'
}, 'count on grouped columns with the same name does not throw');
}
+# a more contrived^Wcomplicated self-referential double-subquery test
+{
+ my $rs = $schema->resultset('Artist')->search({ name => { -like => 'baby_%' } });
+
+ $rs->populate([map { [$_] } ('name', map { "baby_$_" } (1..10) ) ]);
+
+ my ($count_sql, @count_bind) = @${$rs->count_rs->as_query};
+
+ my $complex_rs = $schema->resultset('Artist')->search(
+ { artistid => {
+ -in => $rs->get_column('artistid')
+ ->as_query
+ } },
+ );
+
+ $complex_rs->update({ name => \[ "CONCAT( `name`, '_bell_out_of_', $count_sql )", @count_bind ] });
+
+ for (1..10) {
+ is (
+ $schema->resultset('Artist')->search({ name => "baby_${_}_bell_out_of_10" })->count,
+ 1,
+ "Correctly updated babybell $_",
+ );
+ }
+
+ my $ac = $schema->resultset('Artist')->count_rs;
+ my $old_count = $ac->next;
+ $ac->reset;
+
+ my $orig_debug = $schema->storage->debug;
+ $schema->storage->debug(1);
+ my $query_count = 0;
+ $schema->storage->debugcb(sub { $query_count++ });
+ $complex_rs->delete;
+ $schema->storage->debugcb(undef);
+ $schema->storage->debug($orig_debug);
+
+ is ($query_count, 1, 'One delete query fired');
+ is ($old_count - $ac->next, 10, '10 Artists correctly deleted');
+}
+
ZEROINSEARCH: {
my $cds_per_year = {
2001 => 2,
# kill our $dbh
$schema_autorecon->storage->_dbh(undef);
- TODO: {
+ {
local $TODO = "Perl $] is known to leak like a sieve"
- if DBIx::Class::_ENV_::PEEPEENESS();
+ if DBIx::Class::_ENV_::PEEPEENESS;
ok (! defined $orig_dbh, 'Parent $dbh handle is gone');
}
# try to do something dbic-esque
$rs->create({ name => "Hardcore Forker $$" });
- TODO: {
+ {
local $TODO = "Perl $] is known to leak like a sieve"
- if DBIx::Class::_ENV_::PEEPEENESS();
+ if DBIx::Class::_ENV_::PEEPEENESS;
ok (! defined $orig_dbh, 'DBIC operation triggered reconnect - old $dbh is gone');
}
} 'find by arrayref (equal)';
# test inferred condition for creation
- TODO: for my $cond (
+ for my $cond (
{ -value => [3,4] },
\[ '= ?' => [arrayfield => [3, 4]] ],
) {
'insert returning capability guessed correctly'
);
+isa_ok (DBICTest::Schema->connect($dsn, $user, $pass)->storage->sql_maker, 'DBIx::Class::SQLMaker::Oracle');
+
+# see if determining a driver with bad credentials throws propely
+throws_ok {
+ DBICTest::Schema->connect($dsn, "BORKED BORKED USER $user", $pass)->storage->sql_maker;
+} qr/DBI Connection failed/;
+
##########
# the recyclebin (new for 10g) sometimes comes in the way
my $on_connect_sql = $v >= 10 ? ["ALTER SESSION SET recyclebin = OFF"] : [];
);
# test complex join (exercise orajoins)
- lives_ok {
- my @hri = $schema->resultset('CD')->search(
+ lives_ok { is_deeply (
+ $schema->resultset('CD')->search(
{ 'artist.name' => 'pop_art_1', 'me.cdid' => { '!=', 999} },
{ join => 'artist', prefetch => 'tracks', rows => 4, order_by => 'tracks.trackid' }
- )->hri_dump->all;
-
- my $expect = [{
+ )->all_hri,
+ [{
artist => 1,
cdid => 1,
genreid => undef,
},
],
year => 2003
- }];
-
- is_deeply (
- \@hri,
- $expect,
- 'Correct set of data prefetched',
- );
-
- } 'complex prefetch ok';
+ }],
+ 'Correct set of data prefetched',
+ ) } 'complex prefetch ok';
# test sequence detection from a different schema
SKIP: {
# http://download.oracle.com/docs/cd/A87860_01/doc/server.817/a76961/ch294.htm#993
# Oracle Database Reference 10g Release 2 (10.2)
# http://download.oracle.com/docs/cd/B19306_01/server.102/b14237/statviews_2107.htm#sthref1297
- local $TODO = "On Oracle8i all_triggers view is empty, i don't yet know why..."
+ todo_skip "On Oracle8i all_triggers view is empty, i don't yet know why...", 1
if $schema->storage->_server_info->{normalized_dbms_version} < 9;
my $schema2 = $schema->connect($dsn2, $user2, $pass2, $opt);
sub _run_blob_tests {
SKIP: {
-TODO: {
my ($schema, $opt) = @_;
my %binstr = ( 'small' => join('', map { chr($_) } ( 1 .. 127 )) );
$binstr{'large'} = $binstr{'small'} x 1024;
ok (try { $objs[0]->blob }||'' eq "blob:$str", 'blob inserted/retrieved correctly');
ok (try { $objs[0]->clob }||'' eq "clob:$str", 'clob inserted/retrieved correctly');
- TODO: {
+ {
local $TODO = '-like comparison on blobs not tested before ora 10 (fails on 8i)'
if $schema->storage->_server_info->{normalized_dbms_version} < 10;
}
$schema->storage->debug ($orig_debug);
-}}
+}
do_clean ($dbh);
}
is( $lim->all, 2, 'Number of ->all objects matches count' );
# Limit with select-lock
-TODO: {
+{
local $TODO = "Seems we can't SELECT ... FOR ... on subqueries";
lives_ok {
$schema->txn_do (sub {
my $test_type = "Dialect:$dialect Quoted:$quoted";
# basic limit support
- TODO: {
+ {
my $art_rs = $schema->resultset ('Artist');
$art_rs->delete;
$art_rs->create({ name => 'Artist ' . $_ }) for (1..6);
is ($owners->page(1)->count, 3, "$test_type: has-many prefetch returns correct count");
is ($owners->page(3)->count, 2, "$test_type: has-many prefetch returns correct count");
- TODO: {
+ {
local $TODO = "Top-limit does not work when your limit ends up past the resultset"
if $dialect eq 'Top';
is ($owners->page(3)->all, 2, "$test_type: has_many prefetch returns correct number of rows");
is ($books->page(1)->count, 2, "$test_type: Prefetched grouped search returns correct count");
is ($books->page(2)->count, 1, "$test_type: Prefetched grouped search returns correct count");
- TODO: {
+ {
local $TODO = "Top-limit does not work when your limit ends up past the resultset"
if $dialect eq 'Top';
is ($books->page(2)->all, 1, "$test_type: Prefetched grouped search returns correct number of rows");
SQL
});
- TODO: {
+ {
my $freetds_and_dynamic_cursors = 1
if $opts_name eq 'use_dynamic_cursors' &&
$schema->storage->_using_freetds;
}
# test insert in an outer transaction when there's an active cursor
- TODO: {
+ {
local $TODO = 'this should work once we have eager cursors';
# clear state, or we get a deadlock on $row->delete
$rs->delete;
}
- # test transaction handling on a disconnected handle
my $wrappers = {
no_transaction => sub { shift->() },
txn_do => sub { my $code = shift; $schema->txn_do(sub { $code->() } ) },
txn_begin => sub { $schema->txn_begin; shift->(); $schema->txn_commit },
txn_guard => sub { my $g = $schema->txn_scope_guard; shift->(); $g->commit },
};
+
+ # test transaction handling on a disconnected handle
for my $wrapper (keys %$wrappers) {
$rs->delete;
} "transaction on disconnected handle with $wrapper wrapper";
}
- TODO: {
+ # test transaction handling on a disconnected handle with multiple active
+ # statements
+ for my $wrapper (keys %$wrappers) {
+ $schema->storage->disconnect;
+ $rs->delete;
+ $rs->reset;
+ $rs->create({ amount => 1000 + $_ }) for (1..3);
+
+ my $artist_rs = $schema->resultset('Artist')->search({
+ name => { -like => 'Artist %' }
+ });;
+
+ $rs->next;
+
+ my $map = [ ['Artist 1', '1002.00'], ['Artist 2', '1003.00'] ];
+
+ weaken(my $a_rs_cp = $artist_rs);
+
local $TODO = 'Transaction handling with multiple active statements will '
- .'need eager cursor support.';
-
- # test transaction handling on a disconnected handle with multiple active
- # statements
- my $wrappers = {
- no_transaction => sub { shift->() },
- txn_do => sub { my $code = shift; $schema->txn_do(sub { $code->() } ) },
- txn_begin => sub { $schema->txn_begin; shift->(); $schema->txn_commit },
- txn_guard => sub { my $g = $schema->txn_scope_guard; shift->(); $g->commit },
- };
- for my $wrapper (keys %$wrappers) {
- $rs->reset;
- $rs->delete;
- $rs->create({ amount => 1000 + $_ }) for (1..3);
-
- my $artist_rs = $schema->resultset('Artist')->search({
- name => { -like => 'Artist %' }
- });;
-
- $rs->next;
-
- my $map = [ ['Artist 1', '1002.00'], ['Artist 2', '1003.00'] ];
-
- weaken(my $a_rs_cp = $artist_rs);
-
- lives_and {
- my @results;
- $wrappers->{$wrapper}->( sub {
- while (my $money = $rs_cp->next) {
- my $artist = $a_rs_cp->next;
- push @results, [ $artist->name, $money->amount ];
- };
- });
-
- is_deeply \@results, $map;
- } "transactions with multiple active statement with $wrapper wrapper";
- }
+ .'need eager cursor support.'
+ unless $wrapper eq 'no_transaction';
+
+ lives_and {
+ my @results;
+
+ $wrappers->{$wrapper}->( sub {
+ while (my $money = $rs_cp->next) {
+ my $artist = $a_rs_cp->next;
+ push @results, [ $artist->name, $money->amount ];
+ };
+ });
+
+ is_deeply \@results, $map;
+ } "transactions with multiple active statement with $wrapper wrapper";
}
# test RNO detection when version detection fails
next unless $dsn;
+ note "Testing with ${prefix}_DSN";
+
skip ("Testing with ${prefix}_DSN needs " . DBIx::Class::Optional::Dependencies->req_missing_for( $env2optdep->{$prefix} ), 1)
unless DBIx::Class::Optional::Dependencies->req_ok_for($env2optdep->{$prefix});
# test savepoints
throws_ok {
$schema->txn_do(sub {
+ my ($schema, $ars) = @_;
eval {
$schema->txn_do(sub {
$ars->create({ name => 'in_savepoint' });
'savepoint rolled back');
$ars->create({ name => 'in_outer_txn' });
die "rolling back outer txn";
- });
+ }, $schema, $ars);
} qr/rolling back outer txn/,
'correct exception for rollback';
use Test::More;
use Test::Exception;
use Test::Warn;
+use Time::HiRes 'time';
use Config;
use lib qw(t/lib);
'rollback from inner transaction';
}
+# check that we work somewhat OK with braindead SQLite transaction handling
+#
+# As per https://metacpan.org/source/ADAMK/DBD-SQLite-1.37/lib/DBD/SQLite.pm#L921
+# SQLite does *not* try to synchronize
+
+for my $prefix_comment (qw/Begin_only Commit_only Begin_and_Commit/) {
+ note "Testing with comment prefixes on $prefix_comment";
+
+ # FIXME warning won't help us for the time being
+ # perhaps when (if ever) DBD::SQLite gets fixed,
+ # we can do something extra here
+ local $SIG{__WARN__} = sub { warn @_ if $_[0] !~ /Internal transaction state .+? does not seem to match/ }
+ unless $ENV{TEST_VERBOSE};
+
+ my ($c_begin, $c_commit) = map { $prefix_comment =~ $_ ? 1 : 0 } (qr/Begin/, qr/Commit/);
+
+ my $schema = DBICTest->init_schema( no_deploy => 1 );
+ my $ars = $schema->resultset('Artist');
+
+ ok (! $schema->storage->connected, 'No connection yet');
+
+ $schema->storage->dbh->do(<<'DDL');
+CREATE TABLE artist (
+ artistid INTEGER PRIMARY KEY NOT NULL,
+ name varchar(100),
+ rank integer DEFAULT 13,
+ charfield char(10) NULL
+);
+DDL
+
+ my $artist = $ars->create({ name => 'Artist_' . time() });
+ is ($ars->count, 1, 'Inserted artist ' . $artist->name);
+
+ ok ($schema->storage->connected, 'Connected');
+ ok ($schema->storage->_dbh->{AutoCommit}, 'DBD not in txn yet');
+
+ $schema->storage->dbh->do(join "\n",
+ $c_begin ? '-- comment' : (),
+ 'BEGIN TRANSACTION'
+ );
+ ok ($schema->storage->connected, 'Still connected');
+ {
+ local $TODO = 'SQLite is retarded wrt detecting BEGIN' if $c_begin;
+ ok (! $schema->storage->_dbh->{AutoCommit}, "DBD aware of txn begin with comments on $prefix_comment");
+ }
+
+ $schema->storage->dbh->do(join "\n",
+ $c_commit ? '-- comment' : (),
+ 'COMMIT'
+ );
+ ok ($schema->storage->connected, 'Still connected');
+ {
+ local $TODO = 'SQLite is retarded wrt detecting COMMIT' if $c_commit and ! $c_begin;
+ ok ($schema->storage->_dbh->{AutoCommit}, "DBD aware txn ended with comments on $prefix_comment");
+ }
+
+ is ($ars->count, 1, 'Inserted artists still there');
+
+ {
+ # this never worked in the 1st place
+ local $TODO = 'SQLite is retarded wrt detecting COMMIT' if ! $c_begin and $c_commit;
+
+ # odd argument passing, because such nested crefs leak on 5.8
+ lives_ok {
+ $schema->storage->txn_do (sub {
+ ok ($_[0]->find({ name => $_[1] }), "Artist still where we left it after cycle with comments on $prefix_comment");
+ }, $ars, $artist->name );
+ } "Succesfull transaction with comments on $prefix_comment";
+ }
+}
+
+
my $schema = DBICTest->init_schema();
# make sure the side-effects of RT#67581 do not result in data loss
# test upper/lower boundaries for sqlite and some values inbetween
# range is -(2**63) .. 2**63 - 1
-for my $bi (qw/
- -9223372036854775808
- -9223372036854775807
- -8694837494948124658
- -6848440844435891639
- -5664812265578554454
- -5380388020020483213
- -2564279463598428141
- 2442753333597784273
- 4790993557925631491
- 6773854980030157393
- 7627910776496326154
- 8297530189347439311
- 9223372036854775806
- 9223372036854775807
-/) {
- $row = $schema->resultset('BigIntArtist')->create({ bigint => $bi });
- is ($row->bigint, $bi, "value in object correct ($bi)");
-
- TODO: {
- local $TODO = 'This perl does not seem to have 64bit int support - DBI roundtrip of large int will fail'
- unless $Config{ivsize} >= 8;
+SKIP: {
+ skip 'This perl does not seem to have 64bit int support - DBI roundtrip of large int will fail with DBD::SQLite < 1.37', 1
+ if ($Config{ivsize} < 8 and ! eval { DBD::SQLite->VERSION(1.37); 1 });
+
+ for my $bi (qw/
+ -9223372036854775808
+ -9223372036854775807
+ -8694837494948124658
+ -6848440844435891639
+ -5664812265578554454
+ -5380388020020483213
+ -2564279463598428141
+ 2442753333597784273
+ 4790993557925631491
+ 6773854980030157393
+ 7627910776496326154
+ 8297530189347439311
+ 9223372036854775806
+ 9223372036854775807
+ /) {
+ $row = $schema->resultset('BigIntArtist')->create({ bigint => $bi });
+ is ($row->bigint, $bi, "value in object correct ($bi)");
$row->discard_changes;
is ($row->bigint, $bi, "value in database correct ($bi)");
# bind values are always alphabetically ordered by column, thus [1]
# the single quotes are an artefact of the debug-system
-TODO: {
+{
local $TODO = "This has been broken since rev 1191, Mar 2006";
is ($bind[1], "'$bytestream_title'", 'INSERT: raw bytes sent to the database');
}
$cd->title('something_else');
ok( $cd->is_column_changed('title'), 'column is dirty after setting to something completely different');
-TODO: {
+{
local $TODO = 'There is currently no way to propagate aliases to inflate_result()';
$cd = $schema->resultset('CD')->find ({ title => $utf8_title }, { select => 'title', as => 'name' });
ok (utf8::is_utf8( $cd->get_column ('name') ), 'utf8 flag propagates via as');
use Test::More;
use Config;
+use File::Spec;
use lib qw(t/lib);
use DBICTest;
BEGIN {
- require DBIx::Class;
- plan skip_all => 'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for('admin_script')
- unless DBIx::Class::Optional::Dependencies->req_ok_for('admin_script');
+ require DBIx::Class;
+ plan skip_all => 'Test needs ' .
+ DBIx::Class::Optional::Dependencies->req_missing_for('test_admin_script')
+ unless DBIx::Class::Optional::Dependencies->req_ok_for('test_admin_script');
+
+ # just in case the user env has stuff in it
+ delete $ENV{JSON_ANY_ORDER};
}
+use JSON::Any;
+
$ENV{PATH} = '';
$ENV{PERL5LIB} = join ($Config{path_sep}, @INC);
SKIP: {
skip ("JSON backend $js is not available, skip testing", 1) if $@;
- $ENV{JSON_ANY_ORDER} = $js;
+ local $ENV{JSON_ANY_ORDER} = $js;
eval { test_dbicadmin () };
diag $@ if $@;
}
test_exec( default_args(), qw|--op=insert --set={"name":"Aran"}| );
SKIP: {
- skip ("MSWin32 doesn't support -| either", 1) if $^O eq 'MSWin32';
+ skip ("MSWin32 doesn't support -|", 1) if $^O eq 'MSWin32';
my ($perl) = $^X =~ /(.*)/;
- open(my $fh, "-|", ( $perl, 'script/dbicadmin', default_args(), qw|--op=select --attrs={"order_by":"name"}| ) ) or die $!;
+ open(my $fh, "-|", ( $perl, '-MDBICTest::RunMode', 'script/dbicadmin', default_args(), qw|--op=select --attrs={"order_by":"name"}| ) ) or die $!;
my $data = do { local $/; <$fh> };
close($fh);
if (!ok( ($data=~/Aran.*Trout/s), "$ENV{JSON_ANY_ORDER}: select with attrs" )) {
}
sub default_args {
- my $dbname = DBICTest->_sqlite_dbfilename;
+ my $dsn = JSON::Any->encode([
+ 'dbi:SQLite:dbname=' . DBICTest->_sqlite_dbfilename,
+ '',
+ '',
+ { AutoCommit => 1 },
+ ]);
+
return (
qw|--quiet --schema=DBICTest::Schema --class=Employee|,
- qq|--connect=["dbi:SQLite:dbname=$dbname","","",{"AutoCommit":1}]|,
+ qq|--connect=$dsn|,
qw|--force -I testincludenoniterference|,
);
}
-# Why do we need this crap? Apparently MSWin32 can not pass through quotes properly
-# (sometimes it will and sometimes not, depending on what compiler was used to build
-# perl). So we go the extra mile to escape all the quotes. We can't also use ' instead
-# of ", because JSON::XS (proudly) does not support "malformed JSON" as the author
-# calls it. Bleh.
-#
sub test_exec {
my ($perl) = $^X =~ /(.*)/;
- my @args = ('script/dbicadmin', @_);
+ my @args = ($perl, '-MDBICTest::RunMode', File::Spec->catfile(qw(script dbicadmin)), @_);
- if ( $^O eq 'MSWin32' ) {
- $perl = qq|"$perl"|; # execution will fail if $^X contains paths
- for (@args) {
- $_ =~ s/"/\\"/g;
- }
+ if ($^O eq 'MSWin32') {
+ require Win32::ShellQuote; # included in test optdeps
+ @args = Win32::ShellQuote::quote_system_list(@args);
}
- system ($perl, @args);
+ system @args;
}
use strict;
+use warnings;
use Test::More;
use lib 't/cdbi/testlib';
use strict;
+use warnings;
+
use Test::More;
use Scalar::Util 'refaddr';
use namespace::clean;
use strict;
+use warnings;
+
use Test::More;
#----------------------------------------------------------------------
use strict;
+use warnings;
use Test::More;
@YA::Film::ISA = 'Film';
use strict;
+use warnings;
use Test::More;
use lib 't/cdbi/testlib';
use strict;
+use warnings;
use Test::More;
use lib 't/cdbi/testlib';
like $@, qr/class/, "add_to_actors must be object method";
eval { my $pj = $btaste->add_to_actors(%pj_data) };
-like $@, qr/needs/, "add_to_actors takes hash";
+like $@, qr/expects a hashref/, "add_to_actors takes hash";
ok(
my $pj = $btaste->add_to_actors(
use strict;
+use warnings;
use Test::More;
use lib 't/cdbi/testlib';
use strict;
+use warnings;
use Test::More;
use lib 't/cdbi/testlib';
use strict;
+use warnings;
use Test::More;
use lib 't/cdbi/testlib';
use strict;
+use warnings;
use Test::More;
use lib 't/cdbi/testlib';
use strict;
+use warnings;
use Test::More;
INIT {
};
is $@, '', "No errors";
-TODO: { local $TODO = 'TODOifying failing tests, waiting for Schwern'; ok (1, 'remove me');
eval {
my $data = { %$data };
$data->{NumExplodingSheep} = 3;
ok my $bt = Film->find_or_create($data),
"find_or_create Modified accessor - create with column name";
isa_ok $bt, "Film";
+
+ local $TODO = 'TODOifying failing tests, waiting for Schwern';
is $bt->sheep, 3, 'sheep bursting violently';
};
is $@, '', "No errors";
ok my $bt = Film->find_or_create($data),
"find_or_create Modified accessor - create with accessor";
isa_ok $bt, "Film";
+
+ local $TODO = 'TODOifying failing tests, waiting for Schwern';
is $bt->sheep, 4, 'sheep bursting violently';
};
is $@, '', "No errors";
my @film = Film->search({ sheep => 1 });
is @film, 2, "Can search with modified accessor";
};
-is $@, '', "No errors";
-
+{
+ local $TODO = 'TODOifying failing tests, waiting for Schwern';
+ is $@, '', "No errors";
}
{
use strict;
+use warnings;
use Test::More;
use lib 't/cdbi/testlib';
use strict;
+use warnings;
use Test::More;
use lib 't/cdbi/testlib';
use strict;
+use warnings;
use Test::More;
use lib 't/cdbi/testlib';
use strict;
+use warnings;
use Test::More;
use lib 't/cdbi/testlib';
$| = 1;
+use warnings;
use strict;
use Test::More;
-use Test::More;
-
use strict;
+use warnings;
+use Test::More;
use lib 't/cdbi/testlib';
use Actor;
use strict;
+use warnings;
use Test::More;
use Data::Dumper;
use strict;
+use warnings;
use Test::More;
use Test::Warn;
use strict;
+use warnings;
use Test::More;
use lib 't/cdbi/testlib';
use strict;
+use warnings;
use Test::More;
use lib 't/cdbi/testlib';
use strict;
+use warnings;
use Test::More;
#----------------------------------------------------------------------
use strict;
+use warnings;
use Test::More;
use lib 't/cdbi/testlib';
use strict;
+use warnings;
use Test::More;
use Test::Warn;
use strict;
+use warnings;
use Test::More;
use lib 't/cdbi/testlib';
use strict;
+use warnings;
use Test::More;
INIT {
use strict;
+use warnings;
use Test::More;
INIT {
use strict;
+use warnings;
use Test::More;
use strict;
+use warnings;
use Test::More;
use Class::Inspector ();
use strict;
+use warnings;
use Test::More;
use lib 't/cdbi/testlib';
use strict;
+use warnings;
use Test::More;
#----------------------------------------------------------------------
use strict;
+use warnings;
use Test::More;
INIT {
use strict;
+use warnings;
use Test::More;
use lib 't/cdbi/testlib';
use strict;
+use warnings;
use Test::More;
$| = 1;
use strict;
+use warnings;
use Test::More;
INIT {
use strict;
+use warnings;
use Test::More;
use lib 't/cdbi/testlib';
use strict;
+use warnings;
use Test::More;
use Test::Exception;
package # hide from PAUSE
Blurb;
+use warnings;
use strict;
+
use base 'DBIC::Test::SQLite';
__PACKAGE__->set_table('Blurbs');
package # hide from PAUSE
CDBase;
+use warnings;
use strict;
+
use base qw(DBIC::Test::SQLite);
1;
package # hide from PAUSE
Director;
+use warnings;
use strict;
+
use base 'DBIC::Test::SQLite';
__PACKAGE__->set_table('Directors');
package # hide from PAUSE
Film;
-use base 'DBIC::Test::SQLite';
+use warnings;
use strict;
+use base 'DBIC::Test::SQLite';
+
__PACKAGE__->set_table('Movies');
__PACKAGE__->columns('Primary', 'Title');
__PACKAGE__->columns('Essential', qw( Title ));
package # hide from PAUSE
Lazy;
-use base 'DBIC::Test::SQLite';
+use warnings;
use strict;
+use base 'DBIC::Test::SQLite';
+
__PACKAGE__->set_table("Lazy");
__PACKAGE__->columns('Primary', qw(this));
__PACKAGE__->columns('Essential', qw(opop));
package # hide from PAUSE
Log;
+use warnings;
+use strict;
+
use base 'MyBase';
-use strict;
use Time::Piece::MySQL;
use POSIX;
package # hide from PAUSE
MyBase;
+use warnings;
use strict;
+
use DBI;
use lib 't/lib';
package # hide from PAUSE
MyFilm;
+use warnings;
+use strict;
+
use base 'MyBase';
use MyStarLink;
-use strict;
-
__PACKAGE__->set_table();
__PACKAGE__->columns(All => qw/filmid title/);
__PACKAGE__->has_many(_stars => 'MyStarLink');
package # hide from PAUSE
MyFoo;
+use warnings;
+use strict;
+
use base 'MyBase';
use Date::Simple 3.03;
-use strict;
-
__PACKAGE__->set_table();
__PACKAGE__->columns(All => qw/myid name val tdate/);
__PACKAGE__->has_a(
package # hide from PAUSE
MyStar;
-use base 'MyBase';
-
+use warnings;
use strict;
+use base 'MyBase';
+
__PACKAGE__->set_table();
__PACKAGE__->columns(All => qw/starid name/);
__PACKAGE__->has_many(films => [ MyStarLink => 'film' ]);
package # hide from PAUSE
MyStarLink;
-use base 'MyBase';
-
+use warnings;
use strict;
+use base 'MyBase';
+
__PACKAGE__->set_table();
__PACKAGE__->columns(All => qw/linkid film star/);
__PACKAGE__->has_a(film => 'MyFilm');
package # hide from PAUSE
MyStarLinkMCPK;
+use warnings;
+use strict;
+
use base 'MyBase';
use MyStar;
use MyFilm;
-use strict;
-
# This is a many-to-many mapping table that uses the two foreign keys
# as its own primary key - there's no extra 'auto-inc' column here
package # hide from PAUSE
Order;
+use warnings;
use strict;
+
use base 'DBIC::Test::SQLite';
__PACKAGE__->set_table('orders');
package # hide from PAUSE
OtherFilm;
+use warnings;
use strict;
+
use base 'Film';
__PACKAGE__->set_table('Different_Film');
package OtherThing;
+
+use warnings;
+use strict;
+
use base 'DBIC::Test::SQLite';
OtherThing->set_table("other_thing");
package Thing;
+
+use warnings;
+use strict;
+
use base 'DBIC::Test::SQLite';
Thing->set_table("thing");
is ($crs->next, 2, 'Correct artist count (each with one 2001 cd)');
}
+# count with two having clauses
+{
+ my $rs = $schema->resultset("Artist")->search(
+ {},
+ {
+ join => 'cds',
+ group_by => 'me.artistid',
+ '+select' => [ { max => 'cds.year', -as => 'newest_cd_year' } ],
+ '+as' => ['newest_cd_year'],
+ having => { 'newest_cd_year' => [ '1998', '2001' ] }
+ }
+ );
+
+ my $crs = $rs->count_rs;
+
+ is_same_sql_bind (
+ $crs->as_query,
+ '(SELECT COUNT( * )
+ FROM (
+ SELECT me.artistid, MAX( cds.year ) AS newest_cd_year
+ FROM artist me
+ LEFT JOIN cd cds ON cds.artist = me.artistid
+ GROUP BY me.artistid
+ HAVING newest_cd_year = ? OR newest_cd_year = ?
+ ) me
+ )',
+ [
+ [ { dbic_colname => 'newest_cd_year' }
+ => '1998' ],
+ [ { dbic_colname => 'newest_cd_year' }
+ => '2001' ],
+ ],
+ 'count with having clause keeps sql as alias',
+ );
+
+ is ($crs->next, 3, 'Correct artist count (each with one 1998 or 2001 cd)');
+}
+
done_testing;
throws_ok(
sub { my $row = $schema->resultset('Tag')->search({}, { select => { distinct => [qw/tag cd/] } })->first },
- qr/select => { distinct => \.\.\. } syntax is not supported for multiple columns/,
+ qr/\Qselect => { distinct => ... } syntax is not supported for multiple columns/,
'throw on unsupported syntax'
);
is ($rs->next->get_column ('num_cds'), 3, 'Function aliased correctly');
}
+# and check distinct has_many join count
+{
+ my $rs = $schema->resultset('Artist')->search(
+ { 'cds.title' => { '!=', 'fooooo' } },
+ {
+ join => 'cds',
+ distinct => 1,
+ '+select' => [ { count => 'cds.cdid', -as => 'amount_of_cds' } ],
+ '+as' => [qw/num_cds/],
+ order_by => { -desc => 'amount_of_cds' },
+ }
+ );
+
+ is_same_sql_bind (
+ $rs->as_query,
+ '(
+ SELECT me.artistid, me.name, me.rank, me.charfield, COUNT( cds.cdid ) AS amount_of_cds
+ FROM artist me
+ LEFT JOIN cd cds
+ ON cds.artist = me.artistid
+ WHERE cds.title != ?
+ GROUP BY me.artistid, me.name, me.rank, me.charfield
+ ORDER BY amount_of_cds DESC
+ )',
+ [
+ [{
+ sqlt_datatype => 'varchar',
+ dbic_colname => 'cds.title',
+ sqlt_size => 100,
+ } => 'fooooo' ],
+ ],
+ );
+
+ is_same_sql_bind (
+ $rs->count_rs->as_query,
+ '(
+ SELECT COUNT( * )
+ FROM (
+ SELECT me.artistid, me.name, me.rank, me.charfield
+ FROM artist me
+ LEFT JOIN cd cds
+ ON cds.artist = me.artistid
+ WHERE cds.title != ?
+ GROUP BY me.artistid, me.name, me.rank, me.charfield
+ ) me
+ )',
+ [
+ [{
+ sqlt_datatype => 'varchar',
+ dbic_colname => 'cds.title',
+ sqlt_size => 100,
+ } => 'fooooo' ],
+ ],
+ );
+
+ is ($rs->next->get_column ('num_cds'), 3, 'Function aliased correctly');
+}
+
# These two rely on the database to throw an exception. This might not be the case one day. Please revise.
dies_ok(sub { my $count = $schema->resultset('Tag')->search({}, { '+select' => \'tagid AS tag_id', distinct => 1 })->count }, 'expecting to die');
$a2_cds->search ({}, { rows => 1})->delete;
is ($cdrs->count, $total_cds -= 1, 'related + limit delete ok');
-TODO: {
+{
local $TODO = 'delete_related is based on search_related which is based on search which does not understand object arguments';
local $SIG{__WARN__} = sub {}; # trap the non-numeric warning, remove when the TODO is removed
} [$dt_warn_re],
'using a DateTime object in ->search generates a warning';
-TODO: {
+{
local $TODO = "We can't do this yet before 0.09" if DBIx::Class->VERSION < 0.09;
is(eval { $row->id }, 1, 'DT in search');
use DBICTest;
use Scope::Guard ();
-my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_FIREBIRD_${_}" } qw/DSN USER PASS/};
-my ($dsn2, $user2, $pass2) = @ENV{map { "DBICTEST_FIREBIRD_INTERBASE_${_}" } qw/DSN USER PASS/};
-my ($dsn3, $user3, $pass3) = @ENV{map { "DBICTEST_FIREBIRD_ODBC_${_}" } qw/DSN USER PASS/};
-
-plan skip_all => 'Test needs ' .
- (join ' and ', map { $_ ? $_ : () }
- DBIx::Class::Optional::Dependencies->req_missing_for('test_dt'),
- (join ' or ', map { $_ ? $_ : () }
- DBIx::Class::Optional::Dependencies->req_missing_for('test_rdbms_firebird'),
- DBIx::Class::Optional::Dependencies->req_missing_for('test_rdbms_firebird_interbase'),
- DBIx::Class::Optional::Dependencies->req_missing_for('test_rdbms_firebird_odbc')))
- unless
- DBIx::Class::Optional::Dependencies->req_ok_for ('test_dt') && (
- $dsn && DBIx::Class::Optional::Dependencies->req_ok_for('test_rdbms_firebird')
- or
- $dsn2 && DBIx::Class::Optional::Dependencies->req_ok_for('test_rdbms_firebird_interbase')
- or
- $dsn3 && DBIx::Class::Optional::Dependencies->req_ok_for('test_rdbms_firebird_odbc'))
- or (not $dsn || $dsn2 || $dsn3);
-
-if (not ($dsn || $dsn2)) {
- plan skip_all => <<'EOF';
-Set $ENV{DBICTEST_FIREBIRD_DSN} and/or $ENV{DBICTEST_FIREBIRD_INTERBASE_DSN}
-and/or $ENV{DBICTEST_FIREBIRD_ODBC_DSN}
-_USER and _PASS to run this test'.
-Warning: This test drops and creates a table called 'event'";
-EOF
-}
+my $env2optdep = {
+ DBICTEST_FIREBIRD => 'test_rdbms_firebird',
+ DBICTEST_FIREBIRD_INTERBASE => 'test_rdbms_firebird_interbase',
+ DBICTEST_FIREBIRD_ODBC => 'test_rdbms_firebird_odbc',
+};
+
+plan skip_all => join (' ',
+ 'Set $ENV{DBICTEST_FIREBIRD_DSN} and/or $ENV{DBICTEST_FIREBIRD_INTERBASE_DSN}',
+ 'and/or $ENV{DBICTEST_FIREBIRD_ODBC_DSN},',
+ '_USER and _PASS to run these tests.',
-my @info = (
- [ $dsn, $user, $pass ],
- [ $dsn2, $user2, $pass2 ],
- [ $dsn3, $user3, $pass3 ],
-);
+ "WARNING: This test drops and creates a table called 'event'",
+) unless grep { $ENV{"${_}_DSN"} } keys %$env2optdep;
+
+plan skip_all => ( 'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for('test_dt') )
+ unless DBIx::Class::Optional::Dependencies->req_ok_for ('test_dt');
my $schema;
-foreach my $conn_idx (0..$#info) {
- my ($dsn, $user, $pass) = @{ $info[$conn_idx] || [] };
+for my $prefix (keys %$env2optdep) { SKIP: {
+
+ my ($dsn, $user, $pass) = map { $ENV{"${prefix}_$_"} } qw/DSN USER PASS/;
next unless $dsn;
+ note "Testing with ${prefix}_DSN";
+
+ skip ("Testing with ${prefix}_DSN needs " . DBIx::Class::Optional::Dependencies->req_missing_for( $env2optdep->{$prefix} ), 1)
+ unless DBIx::Class::Optional::Dependencies->req_ok_for($env2optdep->{$prefix});
+
$schema = DBICTest::Schema->connect($dsn, $user, $pass, {
quote_char => '"',
name_sep => '.',
'fractional part of a second survived';
is $row->starts_at, $date_only, 'DATE as DateTime roundtrip';
-}
+} }
done_testing;
};
$dbh->do("CREATE TABLE track (trackid NUMBER(12), cd NUMBER(12), position NUMBER(12), title VARCHAR(255), last_updated_on DATE, last_updated_at $timestamp_datatype)");
-TODO: {
+# TODO is in effect for the rest of the tests
local $TODO = 'FIXME - something odd is going on with Oracle < 9 datetime support'
if ($schema->storage->_server_info->{normalized_dbms_version}||0) < 9;
+
lives_ok {
# insert a row to play with
is( int $track->last_updated_at->nanosecond, int 500_000_000,
'TIMESTAMP nanoseconds survived' );
-} 'dateteime operations executed correctly' } # end of lives_ok/TODO block
+} 'dateteime operations executed correctly';
done_testing;
$fc->file({ handle => $fh, filename => $new_fname });
$fc->update;
-TODO: {
+{
local $TODO = 'design change required';
ok ( ! -e $storage, 'old storage does not exist' );
};
$fc->file->{filename},
);
-TODO: {
+{
local $TODO = 'need resultset delete override to delete_all';
$rs->delete;
ok ( ! -e $storage, 'storage does not exist after $rs->delete' );
-};
+}
package DBICNSTest::Bogus::A;
+
+use warnings;
+use strict;
+
use base qw/DBIx::Class::Core/;
__PACKAGE__->table('a');
__PACKAGE__->add_columns('a');
package DBICNSTest::Result::B;
+
+use warnings;
+use strict;
+
use base qw/DBIx::Class::Core/;
__PACKAGE__->table('b');
__PACKAGE__->add_columns('b');
package DBICNSTest::Bogus::Bigos;
+use warnings;
+use strict;
+
+
1;
package DBICNSTest::OtherRslt::D;
+
+use warnings;
+use strict;
+
use base qw/DBIx::Class::Core/;
__PACKAGE__->table('d');
__PACKAGE__->add_columns('d');
package DBICNSTest::RSBase;
+
+use warnings;
+use strict;
+
use base qw/DBIx::Class::ResultSet/;
1;
package DBICNSTest::RSet::A;
+
+use warnings;
+use strict;
+
use base qw/DBIx::Class::ResultSet/;
1;
package DBICNSTest::RSet::C;
+
+use warnings;
+use strict;
+
use base qw/DBIx::Class::ResultSet/;
1;
package DBICNSTest::Result::A;
+
+use warnings;
+use strict;
+
use base qw/DBIx::Class::Core/;
__PACKAGE__->table('a');
__PACKAGE__->add_columns('a');
package DBICNSTest::Result::B;
+
+use warnings;
+use strict;
+
use base qw/DBIx::Class::Core/;
__PACKAGE__->table('b');
__PACKAGE__->add_columns('b');
package DBICNSTest::Result::D;
+
+use warnings;
+use strict;
+
use base qw/DBIx::Class::Core/;
__PACKAGE__->table('d');
__PACKAGE__->add_columns('d');
package DBICNSTest::ResultSet::A;
+
+use warnings;
+use strict;
+
use base qw/DBIx::Class::ResultSet/;
1;
package DBICNSTest::ResultSet::C;
+
+use warnings;
+use strict;
+
use base qw/DBIx::Class::ResultSet/;
1;
package DBICNSTest::ResultSet::D;
+
+use warnings;
+use strict;
+
1;
package DBICNSTest::Rslt::A;
+
+use warnings;
+use strict;
+
use base qw/DBIx::Class::Core/;
__PACKAGE__->table('a');
__PACKAGE__->add_columns('a');
package DBICNSTest::Rslt::B;
+
+use warnings;
+use strict;
+
use base qw/DBIx::Class::Core/;
__PACKAGE__->table('b');
__PACKAGE__->add_columns('b');
use warnings;
use DBICTest::RunMode;
use DBICTest::Schema;
-use DBICTest::Util qw/populate_weakregistry assert_empty_weakregistry local_umask/;
+use DBICTest::Util::LeakTracer qw/populate_weakregistry assert_empty_weakregistry/;
+use DBICTest::Util 'local_umask';
use Carp;
use Path::Class::File ();
use File::Spec;
-use Fcntl qw/:flock/;
+use Fcntl qw/:DEFAULT :flock/;
=head1 NAME
sub import {
my $self = shift;
- my $lockpath = File::Spec->tmpdir . '/.dbictest_global.lock';
+ my $lockpath = DBICTest::RunMode->tmpdir->file('.dbictest_global.lock');
{
my $u = local_umask(0); # so that the file opens as 666, and any user can lock
- open ($global_lock_fh, '>', $lockpath)
+ sysopen ($global_lock_fh, $lockpath, O_RDWR|O_CREAT)
or die "Unable to open $lockpath: $!";
}
}
sub __mk_disconnect_guard {
- return if DBIx::Class::_ENV_::PEEPEENESS(); # leaks handles, delaying DESTROY, can't work right
+ return if DBIx::Class::_ENV_::PEEPEENESS; # leaks handles, delaying DESTROY, can't work right
my $db_file = shift;
return unless -f $db_file;
use strict;
use warnings;
+# must load before any DBIx::Class* namespaces
+use DBICTest::RunMode;
+
+use base 'DBIx::Class::Core';
+
#use base qw/DBIx::Class::Relationship::Cascade::Rekey DBIx::Class::Core/;
-use base qw/DBIx::Class::Core/;
-use DBICTest::BaseResultSet;
__PACKAGE__->table ('bogus');
__PACKAGE__->resultset_class ('DBICTest::BaseResultSet');
use strict;
use warnings;
-use base qw/DBIx::Class::ResultSet/;
+# must load before any DBIx::Class* namespaces
+use DBICTest::RunMode;
-sub hri_dump {
- return shift->search ({}, { result_class => 'DBIx::Class::ResultClass::HashRefInflator' });
+use base 'DBIx::Class::ResultSet';
+
+sub all_hri {
+ return [ shift->search ({}, { result_class => 'DBIx::Class::ResultClass::HashRefInflator' })->all ];
}
1;
--- /dev/null
+package #hide from pause
+ DBICTest::BaseSchema;
+
+use strict;
+use warnings;
+
+# must load before any DBIx::Class* namespaces
+use DBICTest::RunMode;
+
+use base 'DBIx::Class::Schema';
+
+1;
package # hide from PAUSE
DBICTest::ResultSetManager;
-use base 'DBIx::Class::Schema';
+
+use warnings;
+use strict;
+
+use base 'DBICTest::BaseSchema';
__PACKAGE__->load_classes("Foo");
package # hide from PAUSE
DBICTest::ResultSetManager::Foo;
+
+use warnings;
+use strict;
+
use base 'DBIx::Class::Core';
__PACKAGE__->load_components(qw/ ResultSetManager /);
}
use Path::Class qw/file dir/;
+use File::Spec;
_check_author_makefile() unless $ENV{DBICTEST_NO_MAKEFILE_VERIFICATION};
+# PathTools has a bug where on MSWin32 it will often return / as a tmpdir.
+# This is *really* stupid and the result of having our lockfiles all over
+# the place is also rather obnoxious. So we use our own heuristics instead
+# https://rt.cpan.org/Ticket/Display.html?id=76663
+my $tmpdir;
+sub tmpdir {
+ dir ($tmpdir ||= do {
+
+ my $dir = dir(File::Spec->tmpdir);
+
+ my @parts = File::Spec->splitdir($dir);
+ if (@parts == 2 and $parts[1] eq '') {
+ # This means we were give the root dir (C:\ or something equally unacceptable)
+ # Replace with our local project tmpdir. This will make multiple runs
+ # from different runs conflict with each other, but is much better than
+ # polluting the root dir with random crap
+ $dir = _find_co_root()->subdir('t')->subdir('var');
+ $dir->mkpath;
+ }
+
+ $dir->stringify;
+ });
+}
+
+
# Die if the author did not update his makefile
#
# This is pretty heavy handed, so the check is pretty solid:
}
sub is_smoker {
- return ( $ENV{AUTOMATED_TESTING} && ! $ENV{PERL5_CPANM_IS_RUNNING} && ! $ENV{RELEASE_TESTING} )
+ return
+ ( ($ENV{TRAVIS}||'') eq 'true' )
+ ||
+ ( $ENV{AUTOMATED_TESTING} && ! $ENV{PERL5_CPANM_IS_RUNNING} && ! $ENV{RELEASE_TESTING} )
+ ;
}
sub is_plain {
use warnings;
no warnings 'qw';
-use base 'DBIx::Class::Schema';
+use base 'DBICTest::BaseSchema';
use Fcntl qw/:DEFAULT :seek :flock/;
use Time::HiRes 'sleep';
-use Path::Class::File;
-use File::Spec;
-use DBICTest::Util qw/populate_weakregistry assert_empty_weakregistry local_umask/;
+use DBICTest::RunMode;
+use DBICTest::Util::LeakTracer qw/populate_weakregistry assert_empty_weakregistry/;
+use DBICTest::Util 'local_umask';
use namespace::clean;
__PACKAGE__->mk_group_accessors(simple => 'custom_attr');
# Also if there is no connection - there is no lock to be had
if ($locktype and (!$locker or $locker->{type} ne $locktype)) {
- warn "$$ $0 $locktype" if $locktype eq 'generic' or $locktype eq 'SQLite';
+ warn "$$ $0 $locktype" if (
+ ($locktype eq 'generic' or $locktype eq 'SQLite')
+ and
+ DBICTest::RunMode->is_author
+ );
- my $lockpath = File::Spec->tmpdir . "/.dbictest_$locktype.lock";
+ my $lockpath = DBICTest::RunMode->tmpdir->file(".dbictest_$locktype.lock");
my $lock_fh;
{
package # hide from PAUSE
DBICTest::Schema::Artist;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
use Carp qw/confess/;
package # hide from PAUSE
DBICTest::Schema::ArtistGUID;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
# test MSSQL uniqueidentifier type
package # hide from PAUSE
DBICTest::Schema::ArtistSourceName;
+use warnings;
+use strict;
+
use base 'DBICTest::Schema::Artist';
__PACKAGE__->table(__PACKAGE__->table);
__PACKAGE__->source_name('SourceNameArtists');
package # hide from PAUSE
DBICTest::Schema::ArtistSubclass;
+use warnings;
+use strict;
+
use base 'DBICTest::Schema::Artist';
__PACKAGE__->table(__PACKAGE__->table);
package # hide from PAUSE
DBICTest::Schema::ArtistUndirectedMap;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('artist_undirected_map');
package # hide from PAUSE
DBICTest::Schema::Artwork;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
use Carp qw/confess/;
package # hide from PAUSE
DBICTest::Schema::Artwork_to_Artist;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
use Carp qw/confess/;
package # hide from PAUSE
DBICTest::Schema::BindType;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('bindtype_test');
package # hide from PAUSE
DBICTest::Schema::Bookmark;
-use base qw/DBICTest::BaseResult/;
-
use strict;
use warnings;
+use base qw/DBICTest::BaseResult/;
+
__PACKAGE__->table('bookmark');
__PACKAGE__->add_columns(
'id' => {
package # hide from PAUSE
DBICTest::Schema::BooksInLibrary;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('books');
package # hide from PAUSE
DBICTest::Schema::CD;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
# this tests table name as scalar ref
package # hide from PAUSE
DBICTest::Schema::CD_to_Producer;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('cd_to_producer');
package # hide from PAUSE
DBICTest::Schema::Collection;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('collection');
package # hide from PAUSE
DBICTest::Schema::CollectionObject;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('collection_object');
# for sybase and mssql computed column tests
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('computed_column_test');
package # hide from PAUSE
DBICTest::Schema::CustomSql;
+use warnings;
+use strict;
+
use base qw/DBICTest::Schema::Artist/;
__PACKAGE__->table('dummy');
package # hide from PAUSE
DBICTest::Schema::Dummy;
-use base qw/DBICTest::BaseResult/;
-
use strict;
use warnings;
+use base qw/DBICTest::BaseResult/;
+
__PACKAGE__->table('dummy');
__PACKAGE__->add_columns(
'id' => {
package # hide from PAUSE
DBICTest::Schema::Employee;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->load_components(qw( Ordered ));
package # hide from PAUSE
DBICTest::Schema::Encoded;
-use base qw/DBICTest::BaseResult/;
-
use strict;
use warnings;
+use base qw/DBICTest::BaseResult/;
+
__PACKAGE__->table('encoded');
__PACKAGE__->add_columns(
'id' => {
use strict;
use warnings;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->load_components(qw/InflateColumn::DateTime/);
use strict;
use warnings;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->load_components(qw/InflateColumn::DateTime/);
use strict;
use warnings;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->load_components(qw/InflateColumn::DateTime/);
use strict;
use warnings;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->load_components(qw/InflateColumn::DateTime/);
use strict;
use warnings;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->load_components(qw/InflateColumn::DateTime/);
package # hide from PAUSE
DBICTest::Schema::ForceForeign;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('forceforeign');
package # hide from PAUSE
DBICTest::Schema::FourKeys;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('fourkeys');
package # hide from PAUSE
DBICTest::Schema::FourKeys_to_TwoKeys;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('fourkeys_to_twokeys');
package DBICTest::Schema::Genre;
+use warnings;
use strict;
use base qw/DBICTest::BaseResult/;
package # hide from PAUSE
DBICTest::Schema::Image;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('images');
package # hide from PAUSE
DBICTest::Schema::LinerNotes;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('liner_notes');
package # hide from PAUSE
DBICTest::Schema::Link;
-use base qw/DBICTest::BaseResult/;
-
use strict;
use warnings;
+use base qw/DBICTest::BaseResult/;
+
__PACKAGE__->table('link');
__PACKAGE__->add_columns(
'id' => {
package # hide from PAUSE
DBICTest::Schema::LyricVersion;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('lyric_versions');
package # hide from PAUSE
DBICTest::Schema::Lyrics;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('lyrics');
package # hide from PAUSE
DBICTest::Schema::Money;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('money_test');
package # hide from PAUSE
DBICTest::Schema::NoPrimaryKey;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('noprimarykey');
package DBICTest::Schema::NoSuchClass;
+use warnings;
+use strict;
+
## This is purposefully not a real DBIC class
## Used in t/102load_classes.t
package # hide from PAUSE
DBICTest::Schema::OneKey;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('onekey');
package # hide from PAUSE
DBICTest::Schema::Owners;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('owners');
package # hide from PAUSE
DBICTest::Schema::Producer;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('producer');
package # hide from PAUSE
DBICTest::Schema::PunctuatedColumnName;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('punctuated_column_name');
package # hide from PAUSE
DBICTest::Schema::SelfRef;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('self_ref');
package # hide from PAUSE
DBICTest::Schema::SelfRefAlias;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('self_ref_alias');
package # hide from PAUSE
DBICTest::Schema::SequenceTest;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('sequence_test');
package # hide from PAUSE
DBICTest::Schema::Serialized;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('serialized');
package # hide from PAUSE
DBICTest::Schema::Tag;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('tags');
package # hide from PAUSE
DBICTest::Schema::TimestampPrimaryKey;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('timestamp_primary_key_test');
package # hide from PAUSE
DBICTest::Schema::Track;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
use Carp qw/confess/;
package # hide from PAUSE
DBICTest::Schema::TreeLike;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('treelike');
package # hide from PAUSE
DBICTest::Schema::TwoKeyTreeLike;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('twokeytreelike');
package # hide from PAUSE
DBICTest::Schema::TwoKeys;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('twokeys');
package # hide from PAUSE
DBICTest::Schema::TypedObject;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table('typed_object');
package # hide from PAUSE
DBICTest::Schema::VaryingMAX;
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
# Test VARCHAR(MAX) type for MSSQL (used in ADO tests)
DBICTest::Schema::Year1999CDs;
## Used in 104view.t
+use warnings;
+use strict;
+
use base qw/DBICTest::BaseResult/;
__PACKAGE__->table_class('DBIx::Class::ResultSource::View');
package # hide from PAUSE
DBICTest::Schema::Year2000CDs;
+use warnings;
+use strict;
+
use base qw/DBICTest::Schema::CD/;
__PACKAGE__->table_class('DBIx::Class::ResultSource::View');
package DBICErrorTest::SyntaxError;
use strict;
+use warnings;
I'm a syntax error!
package # hide from PAUSE
DBICTest::Taint::Classes::Auto;
+use warnings;
+use strict;
+
use base 'DBIx::Class::Core';
__PACKAGE__->table('test');
package # hide from PAUSE
DBICTest::Taint::Classes::Manual;
+use warnings;
+use strict;
+
use base 'DBIx::Class::Core';
__PACKAGE__->table('test');
package # hide from PAUSE
DBICTest::Taint::Namespaces::Result::Test;
+use warnings;
+use strict;
+
use base 'DBIx::Class::Core';
__PACKAGE__->table('test');
use strict;
use Carp;
-use Scalar::Util qw/isweak weaken blessed reftype refaddr/;
use Config;
use base 'Exporter';
-our @EXPORT_OK = qw/local_umask stacktrace populate_weakregistry assert_empty_weakregistry/;
+our @EXPORT_OK = qw/local_umask stacktrace/;
sub local_umask {
return unless defined $Config{d_umask};
return join "\tinvoked as ", map { sprintf ("%s at %s line %d\n", @$_ ) } @stack;
}
-my $refs_traced = 0;
-sub populate_weakregistry {
- my ($reg, $target, $slot) = @_;
-
- croak 'Target is not a reference' unless defined ref $target;
-
- $slot ||= (sprintf '%s%s(0x%x)', # so we don't trigger stringification
- (defined blessed $target) ? blessed($target) . '=' : '',
- reftype $target,
- refaddr $target,
- );
-
- if (defined $reg->{$slot}{weakref}) {
- if ( refaddr($reg->{$slot}{weakref}) != (refaddr $target) ) {
- print STDERR "Bail out! Weak Registry slot collision: $reg->{$slot}{weakref} / $target\n";
- exit 255;
- }
- }
- else {
- $refs_traced++;
- weaken( $reg->{$slot}{weakref} = $target );
- $reg->{$slot}{stacktrace} = stacktrace(1);
- }
-
- $target;
-}
-
-my $leaks_found;
-sub assert_empty_weakregistry {
- my ($weak_registry, $quiet) = @_;
-
- croak 'Expecting a registry hashref' unless ref $weak_registry eq 'HASH';
-
- return unless keys %$weak_registry;
-
- my $tb = eval { Test::Builder->new }
- or croak 'Calling test_weakregistry without a loaded Test::Builder makes no sense';
-
- for my $slot (sort keys %$weak_registry) {
- next if ! defined $weak_registry->{$slot}{weakref};
- $tb->BAILOUT("!!!! WEAK REGISTRY SLOT $slot IS NOT A WEAKREF !!!!")
- unless isweak( $weak_registry->{$slot}{weakref} );
- }
-
-
- for my $slot (sort keys %$weak_registry) {
- ! defined $weak_registry->{$slot}{weakref} and next if $quiet;
-
- $tb->ok (! defined $weak_registry->{$slot}{weakref}, "No leaks of $slot") or do {
- $leaks_found = 1;
-
- my $diag = '';
-
- $diag .= Devel::FindRef::track ($weak_registry->{$slot}{weakref}, 20) . "\n"
- if ( $ENV{TEST_VERBOSE} && eval { require Devel::FindRef });
-
- if (my $stack = $weak_registry->{$slot}{stacktrace}) {
- $diag .= " Reference first seen$stack";
- }
-
- $tb->diag($diag) if $diag;
- };
- }
-}
-
-END {
- if ($INC{'Test/Builder.pm'}) {
- my $tb = Test::Builder->new;
-
- # we check for test passage - a leak may be a part of a TODO
- if ($leaks_found and !$tb->is_passing) {
-
- $tb->diag(sprintf
- "\n\n%s\n%s\n\nInstall Devel::FindRef and re-run the test with set "
- . '$ENV{TEST_VERBOSE} (prove -v) to see a more detailed leak-report'
- . "\n\n%s\n%s\n\n", ('#' x 16) x 4
- ) if ( !$ENV{TEST_VERBOSE} or !$INC{'Devel/FindRef.pm'} );
-
- }
- else {
- $tb->note("Auto checked $refs_traced references for leaks - none detected");
- }
- }
-}
-
1;
--- /dev/null
+package DBICTest::Util::LeakTracer;
+
+use warnings;
+use strict;
+
+use Carp;
+use Scalar::Util qw/isweak weaken blessed reftype refaddr/;
+use DBICTest::Util 'stacktrace';
+
+use base 'Exporter';
+our @EXPORT_OK = qw/populate_weakregistry assert_empty_weakregistry/;
+
+my $refs_traced = 0;
+my $leaks_found;
+my %reg_of_regs;
+
+sub populate_weakregistry {
+ my ($weak_registry, $target, $slot) = @_;
+
+ croak 'Expecting a registry hashref' unless ref $weak_registry eq 'HASH';
+ croak 'Target is not a reference' unless length ref $target;
+
+ $slot ||= (sprintf '%s%s(0x%x)', # so we don't trigger stringification
+ (defined blessed $target) ? blessed($target) . '=' : '',
+ reftype $target,
+ refaddr $target,
+ );
+
+ if (defined $weak_registry->{$slot}{weakref}) {
+ if ( refaddr($weak_registry->{$slot}{weakref}) != (refaddr $target) ) {
+ print STDERR "Bail out! Weak Registry slot collision: $weak_registry->{$slot}{weakref} / $target\n";
+ exit 255;
+ }
+ }
+ else {
+ $refs_traced++;
+ weaken( $weak_registry->{$slot}{weakref} = $target );
+ $weak_registry->{$slot}{stacktrace} = stacktrace(1);
+ $weak_registry->{$slot}{renumber} = 1 unless $_[2];
+ }
+
+ weaken( $reg_of_regs{ refaddr($weak_registry) } = $weak_registry )
+ unless( $reg_of_regs{ refaddr($weak_registry) } );
+
+ $target;
+}
+
+# Renumber everything we auto-named on a thread spawn
+sub CLONE {
+ my @individual_regs = grep { scalar keys %{$_||{}} } values %reg_of_regs;
+ %reg_of_regs = ();
+
+ for my $reg (@individual_regs) {
+ my @live_slots = grep { defined $reg->{$_}{weakref} } keys %$reg
+ or next;
+
+ my @live_instances = @{$reg}{@live_slots};
+
+ $reg = {}; # get a fresh hashref in the new thread ctx
+ weaken( $reg_of_regs{refaddr($reg)} = $reg );
+
+ while (@live_slots) {
+ my $slot = shift @live_slots;
+ my $inst = shift @live_instances;
+
+ $slot =~ s/0x[0-9A-F]+/'0x' . sprintf ('0x%x', refaddr($inst))/ieg
+ if $inst->{renumber};
+
+ $reg->{$slot} = $inst;
+ }
+ }
+}
+
+sub assert_empty_weakregistry {
+ my ($weak_registry, $quiet) = @_;
+
+ croak 'Expecting a registry hashref' unless ref $weak_registry eq 'HASH';
+
+ return unless keys %$weak_registry;
+
+ my $tb = eval { Test::Builder->new }
+ or croak 'Calling test_weakregistry without a loaded Test::Builder makes no sense';
+
+ for my $slot (sort keys %$weak_registry) {
+ next if ! defined $weak_registry->{$slot}{weakref};
+ $tb->BAILOUT("!!!! WEAK REGISTRY SLOT $slot IS NOT A WEAKREF !!!!")
+ unless isweak( $weak_registry->{$slot}{weakref} );
+ }
+
+
+ for my $slot (sort keys %$weak_registry) {
+ ! defined $weak_registry->{$slot}{weakref} and next if $quiet;
+
+ $tb->ok (! defined $weak_registry->{$slot}{weakref}, "No leaks of $slot") or do {
+ $leaks_found = 1;
+
+ my $diag = '';
+
+ $diag .= Devel::FindRef::track ($weak_registry->{$slot}{weakref}, 20) . "\n"
+ if ( $ENV{TEST_VERBOSE} && eval { require Devel::FindRef });
+
+ if (my $stack = $weak_registry->{$slot}{stacktrace}) {
+ $diag .= " Reference first seen$stack";
+ }
+
+ $tb->diag($diag) if $diag;
+ };
+ }
+}
+
+END {
+ if ($INC{'Test/Builder.pm'}) {
+ my $tb = Test::Builder->new;
+
+ # we check for test passage - a leak may be a part of a TODO
+ if ($leaks_found and !$tb->is_passing) {
+
+ $tb->diag(sprintf
+ "\n\n%s\n%s\n\nInstall Devel::FindRef and re-run the test with set "
+ . '$ENV{TEST_VERBOSE} (prove -v) to see a more detailed leak-report'
+ . "\n\n%s\n%s\n\n", ('#' x 16) x 4
+ ) if ( !$ENV{TEST_VERBOSE} or !$INC{'Devel/FindRef.pm'} );
+
+ }
+ else {
+ $tb->note("Auto checked $refs_traced references for leaks - none detected");
+ }
+ }
+}
+
+1;
__PACKAGE__->set_primary_key('Version');
package DBICVersion::Schema;
-use base 'DBIx::Class::Schema';
+use base 'DBICTest::BaseSchema';
use strict;
use warnings;
__PACKAGE__->set_primary_key('Version');
package DBICVersion::Schema;
-use base 'DBIx::Class::Schema';
+use base 'DBICTest::BaseSchema';
use strict;
use warnings;
__PACKAGE__->set_primary_key('Version');
package DBICVersion::Schema;
-use base 'DBIx::Class::Schema';
+use base 'DBICTest::BaseSchema';
use strict;
use warnings;
--- /dev/null
+package
+ PrefetchBug;
+
+use strict;
+use warnings;
+
+use base qw/DBIx::Class::Schema/;
+
+__PACKAGE__->load_classes();
+
+1;
--- /dev/null
+package PrefetchBug::Left;
+
+use strict;
+use warnings;
+
+use base 'DBIx::Class::Core';
+
+__PACKAGE__->table('prefetchbug_left');
+__PACKAGE__->add_columns(
+ id => { data_type => 'integer', is_auto_increment => 1 },
+);
+
+__PACKAGE__->set_primary_key('id');
+
+__PACKAGE__->has_many(
+ prefetch_leftright => 'PrefetchBug::LeftRight',
+ 'left_id'
+);
+
+1;
--- /dev/null
+package
+ PrefetchBug::LeftRight;
+
+use strict;
+use warnings;
+
+use base 'DBIx::Class::Core';
+
+__PACKAGE__->table('prefetchbug_left_right');
+__PACKAGE__->add_columns(
+ left_id => { data_type => 'integer' },
+ right_id => { data_type => 'integer' },
+ value => {});
+
+__PACKAGE__->set_primary_key('left_id', 'right_id');
+__PACKAGE__->belongs_to(left => 'PrefetchBug::Left', 'left_id');
+__PACKAGE__->belongs_to(
+ right => 'PrefetchBug::Right',
+ 'right_id',
+# {join_type => 'left'}
+);
+
+
+1;
--- /dev/null
+package
+ PrefetchBug::Right;
+
+use strict;
+use warnings;
+
+use base 'DBIx::Class::Core';
+
+__PACKAGE__->table('prefetchbug_right');
+__PACKAGE__->add_columns(qw/ id name category description propagates locked/);
+__PACKAGE__->set_primary_key('id');
+
+__PACKAGE__->has_many('prefetch_leftright', 'PrefetchBug::LeftRight', 'right_id');
+1;
use strict;
use warnings;
-use base 'DBIx::Class::Schema';
+use base 'DBICTest::BaseSchema';
__PACKAGE__->load_namespaces;
use strict;
use warnings;
-use base 'DBIx::Class::Schema';
+use base 'DBICTest::BaseSchema';
__PACKAGE__->load_namespaces;
---
--- Created by SQL::Translator::Producer::SQLite
--- Created on Fri Mar 2 18:22:33 2012
---
-
---
--- Table: artist
---
-CREATE TABLE artist (
- artistid INTEGER PRIMARY KEY NOT NULL,
- name varchar(100),
- rank integer NOT NULL DEFAULT 13,
- charfield char(10)
-);
-
-CREATE INDEX artist_name_hookidx ON artist (name);
-
-CREATE UNIQUE INDEX artist_name ON artist (name);
-
-CREATE UNIQUE INDEX u_nullable ON artist (charfield, rank);
-
---
--- Table: bindtype_test
---
-CREATE TABLE bindtype_test (
- id INTEGER PRIMARY KEY NOT NULL,
- bytea blob,
- blob blob,
- clob clob,
- a_memo memo
-);
-
---
--- Table: collection
---
-CREATE TABLE collection (
- collectionid INTEGER PRIMARY KEY NOT NULL,
- name varchar(100) NOT NULL
-);
-
---
--- Table: encoded
---
-CREATE TABLE encoded (
- id INTEGER PRIMARY KEY NOT NULL,
- encoded varchar(100)
-);
-
---
--- Table: event
---
-CREATE TABLE event (
- id INTEGER PRIMARY KEY NOT NULL,
- starts_at date NOT NULL,
- created_on timestamp NOT NULL,
- varchar_date varchar(20),
- varchar_datetime varchar(20),
- skip_inflation datetime,
- ts_without_tz datetime
-);
-
---
--- Table: fourkeys
---
-CREATE TABLE fourkeys (
- foo integer NOT NULL,
- bar integer NOT NULL,
- hello integer NOT NULL,
- goodbye integer NOT NULL,
- sensors character(10) NOT NULL,
- read_count int,
- PRIMARY KEY (foo, bar, hello, goodbye)
-);
-
---
--- Table: genre
---
-CREATE TABLE genre (
- genreid INTEGER PRIMARY KEY NOT NULL,
- name varchar(100) NOT NULL
-);
-
-CREATE UNIQUE INDEX genre_name ON genre (name);
-
---
--- Table: link
---
-CREATE TABLE link (
- id INTEGER PRIMARY KEY NOT NULL,
- url varchar(100),
- title varchar(100)
-);
-
---
--- Table: money_test
---
-CREATE TABLE money_test (
- id INTEGER PRIMARY KEY NOT NULL,
- amount money
-);
-
---
--- Table: noprimarykey
---
-CREATE TABLE noprimarykey (
- foo integer NOT NULL,
- bar integer NOT NULL,
- baz integer NOT NULL
-);
-
-CREATE UNIQUE INDEX foo_bar ON noprimarykey (foo, bar);
+CREATE TABLE "artist" (
+ "artistid" INTEGER PRIMARY KEY NOT NULL,
+ "name" varchar(100),
+ "rank" integer NOT NULL DEFAULT 13,
+ "charfield" char(10)
+);
+
+CREATE INDEX "artist_name_hookidx" ON "artist" ("name");
+
+CREATE UNIQUE INDEX "artist_name" ON "artist" ("name");
+
+CREATE UNIQUE INDEX "u_nullable" ON "artist" ("charfield", "rank");
+
+CREATE TABLE "bindtype_test" (
+ "id" INTEGER PRIMARY KEY NOT NULL,
+ "bytea" blob,
+ "blob" blob,
+ "clob" clob,
+ "a_memo" memo
+);
+
+CREATE TABLE "collection" (
+ "collectionid" INTEGER PRIMARY KEY NOT NULL,
+ "name" varchar(100) NOT NULL
+);
+
+CREATE TABLE "encoded" (
+ "id" INTEGER PRIMARY KEY NOT NULL,
+ "encoded" varchar(100)
+);
+
+CREATE TABLE "event" (
+ "id" INTEGER PRIMARY KEY NOT NULL,
+ "starts_at" date NOT NULL,
+ "created_on" timestamp NOT NULL,
+ "varchar_date" varchar(20),
+ "varchar_datetime" varchar(20),
+ "skip_inflation" datetime,
+ "ts_without_tz" datetime
+);
+
+CREATE TABLE "fourkeys" (
+ "foo" integer NOT NULL,
+ "bar" integer NOT NULL,
+ "hello" integer NOT NULL,
+ "goodbye" integer NOT NULL,
+ "sensors" character(10) NOT NULL,
+ "read_count" int,
+ PRIMARY KEY ("foo", "bar", "hello", "goodbye")
+);
+
+CREATE TABLE "genre" (
+ "genreid" INTEGER PRIMARY KEY NOT NULL,
+ "name" varchar(100) NOT NULL
+);
---
--- Table: onekey
---
-CREATE TABLE onekey (
- id INTEGER PRIMARY KEY NOT NULL,
- artist integer NOT NULL,
- cd integer NOT NULL
-);
-
---
--- Table: owners
---
-CREATE TABLE owners (
- id INTEGER PRIMARY KEY NOT NULL,
- name varchar(100) NOT NULL
-);
-
-CREATE UNIQUE INDEX owners_name ON owners (name);
-
---
--- Table: producer
---
-CREATE TABLE producer (
- producerid INTEGER PRIMARY KEY NOT NULL,
- name varchar(100) NOT NULL
+CREATE UNIQUE INDEX "genre_name" ON "genre" ("name");
+
+CREATE TABLE "link" (
+ "id" INTEGER PRIMARY KEY NOT NULL,
+ "url" varchar(100),
+ "title" varchar(100)
+);
+
+CREATE TABLE "money_test" (
+ "id" INTEGER PRIMARY KEY NOT NULL,
+ "amount" money
+);
+
+CREATE TABLE "noprimarykey" (
+ "foo" integer NOT NULL,
+ "bar" integer NOT NULL,
+ "baz" integer NOT NULL
+);
+
+CREATE UNIQUE INDEX "foo_bar" ON "noprimarykey" ("foo", "bar");
+
+CREATE TABLE "onekey" (
+ "id" INTEGER PRIMARY KEY NOT NULL,
+ "artist" integer NOT NULL,
+ "cd" integer NOT NULL
);
-CREATE UNIQUE INDEX prod_name ON producer (name);
-
---
--- Table: self_ref
---
-CREATE TABLE self_ref (
- id INTEGER PRIMARY KEY NOT NULL,
- name varchar(100) NOT NULL
+CREATE TABLE "owners" (
+ "id" INTEGER PRIMARY KEY NOT NULL,
+ "name" varchar(100) NOT NULL
);
-
---
--- Table: sequence_test
---
-CREATE TABLE sequence_test (
- pkid1 integer NOT NULL,
- pkid2 integer NOT NULL,
- nonpkid integer NOT NULL,
- name varchar(100),
- PRIMARY KEY (pkid1, pkid2)
+
+CREATE UNIQUE INDEX "owners_name" ON "owners" ("name");
+
+CREATE TABLE "producer" (
+ "producerid" INTEGER PRIMARY KEY NOT NULL,
+ "name" varchar(100) NOT NULL
+);
+
+CREATE UNIQUE INDEX "prod_name" ON "producer" ("name");
+
+CREATE TABLE "self_ref" (
+ "id" INTEGER PRIMARY KEY NOT NULL,
+ "name" varchar(100) NOT NULL
+);
+
+CREATE TABLE "sequence_test" (
+ "pkid1" integer NOT NULL,
+ "pkid2" integer NOT NULL,
+ "nonpkid" integer NOT NULL,
+ "name" varchar(100),
+ PRIMARY KEY ("pkid1", "pkid2")
);
---
--- Table: serialized
---
-CREATE TABLE serialized (
- id INTEGER PRIMARY KEY NOT NULL,
- serialized text NOT NULL
+CREATE TABLE "serialized" (
+ "id" INTEGER PRIMARY KEY NOT NULL,
+ "serialized" text NOT NULL
);
---
--- Table: timestamp_primary_key_test
---
-CREATE TABLE timestamp_primary_key_test (
- id timestamp NOT NULL DEFAULT current_timestamp,
- PRIMARY KEY (id)
+CREATE TABLE "timestamp_primary_key_test" (
+ "id" timestamp NOT NULL DEFAULT current_timestamp,
+ PRIMARY KEY ("id")
);
---
--- Table: treelike
---
-CREATE TABLE treelike (
- id INTEGER PRIMARY KEY NOT NULL,
- parent integer,
- name varchar(100) NOT NULL
+CREATE TABLE "treelike" (
+ "id" INTEGER PRIMARY KEY NOT NULL,
+ "parent" integer,
+ "name" varchar(100) NOT NULL,
+ FOREIGN KEY ("parent") REFERENCES "treelike"("id") ON DELETE CASCADE ON UPDATE CASCADE
);
-CREATE INDEX treelike_idx_parent ON treelike (parent);
+CREATE INDEX "treelike_idx_parent" ON "treelike" ("parent");
---
--- Table: twokeytreelike
---
-CREATE TABLE twokeytreelike (
- id1 integer NOT NULL,
- id2 integer NOT NULL,
- parent1 integer NOT NULL,
- parent2 integer NOT NULL,
- name varchar(100) NOT NULL,
- PRIMARY KEY (id1, id2)
+CREATE TABLE "twokeytreelike" (
+ "id1" integer NOT NULL,
+ "id2" integer NOT NULL,
+ "parent1" integer NOT NULL,
+ "parent2" integer NOT NULL,
+ "name" varchar(100) NOT NULL,
+ PRIMARY KEY ("id1", "id2"),
+ FOREIGN KEY ("parent1", "parent2") REFERENCES "twokeytreelike"("id1", "id2")
);
-CREATE INDEX twokeytreelike_idx_parent1_parent2 ON twokeytreelike (parent1, parent2);
+CREATE INDEX "twokeytreelike_idx_parent1_parent2" ON "twokeytreelike" ("parent1", "parent2");
-CREATE UNIQUE INDEX tktlnameunique ON twokeytreelike (name);
+CREATE UNIQUE INDEX "tktlnameunique" ON "twokeytreelike" ("name");
---
--- Table: typed_object
---
-CREATE TABLE typed_object (
- objectid INTEGER PRIMARY KEY NOT NULL,
- type varchar(100) NOT NULL,
- value varchar(100) NOT NULL
+CREATE TABLE "typed_object" (
+ "objectid" INTEGER PRIMARY KEY NOT NULL,
+ "type" varchar(100) NOT NULL,
+ "value" varchar(100) NOT NULL
);
---
--- Table: artist_undirected_map
---
-CREATE TABLE artist_undirected_map (
- id1 integer NOT NULL,
- id2 integer NOT NULL,
- PRIMARY KEY (id1, id2)
+CREATE TABLE "artist_undirected_map" (
+ "id1" integer NOT NULL,
+ "id2" integer NOT NULL,
+ PRIMARY KEY ("id1", "id2"),
+ FOREIGN KEY ("id1") REFERENCES "artist"("artistid") ON DELETE RESTRICT ON UPDATE CASCADE,
+ FOREIGN KEY ("id2") REFERENCES "artist"("artistid")
);
-CREATE INDEX artist_undirected_map_idx_id1 ON artist_undirected_map (id1);
+CREATE INDEX "artist_undirected_map_idx_id1" ON "artist_undirected_map" ("id1");
-CREATE INDEX artist_undirected_map_idx_id2 ON artist_undirected_map (id2);
+CREATE INDEX "artist_undirected_map_idx_id2" ON "artist_undirected_map" ("id2");
---
--- Table: bookmark
---
-CREATE TABLE bookmark (
- id INTEGER PRIMARY KEY NOT NULL,
- link integer
+CREATE TABLE "bookmark" (
+ "id" INTEGER PRIMARY KEY NOT NULL,
+ "link" integer,
+ FOREIGN KEY ("link") REFERENCES "link"("id") ON DELETE SET NULL ON UPDATE CASCADE
);
-CREATE INDEX bookmark_idx_link ON bookmark (link);
+CREATE INDEX "bookmark_idx_link" ON "bookmark" ("link");
---
--- Table: books
---
-CREATE TABLE books (
- id INTEGER PRIMARY KEY NOT NULL,
- source varchar(100) NOT NULL,
- owner integer NOT NULL,
- title varchar(100) NOT NULL,
- price integer
+CREATE TABLE "books" (
+ "id" INTEGER PRIMARY KEY NOT NULL,
+ "source" varchar(100) NOT NULL,
+ "owner" integer NOT NULL,
+ "title" varchar(100) NOT NULL,
+ "price" integer,
+ FOREIGN KEY ("owner") REFERENCES "owners"("id") ON DELETE CASCADE ON UPDATE CASCADE
);
-CREATE INDEX books_idx_owner ON books (owner);
+CREATE INDEX "books_idx_owner" ON "books" ("owner");
-CREATE UNIQUE INDEX books_title ON books (title);
+CREATE UNIQUE INDEX "books_title" ON "books" ("title");
---
--- Table: employee
---
-CREATE TABLE employee (
- employee_id INTEGER PRIMARY KEY NOT NULL,
- position integer NOT NULL,
- group_id integer,
- group_id_2 integer,
- group_id_3 integer,
- name varchar(100),
- encoded integer
+CREATE TABLE "employee" (
+ "employee_id" INTEGER PRIMARY KEY NOT NULL,
+ "position" integer NOT NULL,
+ "group_id" integer,
+ "group_id_2" integer,
+ "group_id_3" integer,
+ "name" varchar(100),
+ "encoded" integer,
+ FOREIGN KEY ("encoded") REFERENCES "encoded"("id") ON DELETE CASCADE ON UPDATE CASCADE
);
-CREATE INDEX employee_idx_encoded ON employee (encoded);
+CREATE INDEX "employee_idx_encoded" ON "employee" ("encoded");
---
--- Table: forceforeign
---
-CREATE TABLE forceforeign (
- artist INTEGER PRIMARY KEY NOT NULL,
- cd integer NOT NULL
+CREATE TABLE "forceforeign" (
+ "artist" INTEGER PRIMARY KEY NOT NULL,
+ "cd" integer NOT NULL,
+ FOREIGN KEY ("artist") REFERENCES "artist"("artistid")
);
---
--- Table: self_ref_alias
---
-CREATE TABLE self_ref_alias (
- self_ref integer NOT NULL,
- alias integer NOT NULL,
- PRIMARY KEY (self_ref, alias)
+CREATE TABLE "self_ref_alias" (
+ "self_ref" integer NOT NULL,
+ "alias" integer NOT NULL,
+ PRIMARY KEY ("self_ref", "alias"),
+ FOREIGN KEY ("alias") REFERENCES "self_ref"("id"),
+ FOREIGN KEY ("self_ref") REFERENCES "self_ref"("id") ON DELETE CASCADE ON UPDATE CASCADE
);
-CREATE INDEX self_ref_alias_idx_alias ON self_ref_alias (alias);
+CREATE INDEX "self_ref_alias_idx_alias" ON "self_ref_alias" ("alias");
-CREATE INDEX self_ref_alias_idx_self_ref ON self_ref_alias (self_ref);
+CREATE INDEX "self_ref_alias_idx_self_ref" ON "self_ref_alias" ("self_ref");
---
--- Table: track
---
-CREATE TABLE track (
- trackid INTEGER PRIMARY KEY NOT NULL,
- cd integer NOT NULL,
- position int NOT NULL,
- title varchar(100) NOT NULL,
- last_updated_on datetime,
- last_updated_at datetime
+CREATE TABLE "track" (
+ "trackid" INTEGER PRIMARY KEY NOT NULL,
+ "cd" integer NOT NULL,
+ "position" int NOT NULL,
+ "title" varchar(100) NOT NULL,
+ "last_updated_on" datetime,
+ "last_updated_at" datetime,
+ FOREIGN KEY ("cd") REFERENCES "cd"("cdid") ON DELETE CASCADE ON UPDATE CASCADE
);
-CREATE INDEX track_idx_cd ON track (cd);
+CREATE INDEX "track_idx_cd" ON "track" ("cd");
-CREATE UNIQUE INDEX track_cd_position ON track (cd, position);
+CREATE UNIQUE INDEX "track_cd_position" ON "track" ("cd", "position");
-CREATE UNIQUE INDEX track_cd_title ON track (cd, title);
+CREATE UNIQUE INDEX "track_cd_title" ON "track" ("cd", "title");
---
--- Table: cd
---
-CREATE TABLE cd (
- cdid INTEGER PRIMARY KEY NOT NULL,
- artist integer NOT NULL,
- title varchar(100) NOT NULL,
- year varchar(100) NOT NULL,
- genreid integer,
- single_track integer
+CREATE TABLE "cd" (
+ "cdid" INTEGER PRIMARY KEY NOT NULL,
+ "artist" integer NOT NULL,
+ "title" varchar(100) NOT NULL,
+ "year" varchar(100) NOT NULL,
+ "genreid" integer,
+ "single_track" integer,
+ FOREIGN KEY ("artist") REFERENCES "artist"("artistid") ON DELETE CASCADE ON UPDATE CASCADE,
+ FOREIGN KEY ("single_track") REFERENCES "track"("trackid") ON DELETE CASCADE,
+ FOREIGN KEY ("genreid") REFERENCES "genre"("genreid") ON DELETE SET NULL ON UPDATE CASCADE
);
-CREATE INDEX cd_idx_artist ON cd (artist);
+CREATE INDEX "cd_idx_artist" ON "cd" ("artist");
-CREATE INDEX cd_idx_genreid ON cd (genreid);
+CREATE INDEX "cd_idx_single_track" ON "cd" ("single_track");
-CREATE INDEX cd_idx_single_track ON cd (single_track);
+CREATE INDEX "cd_idx_genreid" ON "cd" ("genreid");
-CREATE UNIQUE INDEX cd_artist_title ON cd (artist, title);
+CREATE UNIQUE INDEX "cd_artist_title" ON "cd" ("artist", "title");
---
--- Table: collection_object
---
-CREATE TABLE collection_object (
- collection integer NOT NULL,
- object integer NOT NULL,
- PRIMARY KEY (collection, object)
+CREATE TABLE "collection_object" (
+ "collection" integer NOT NULL,
+ "object" integer NOT NULL,
+ PRIMARY KEY ("collection", "object"),
+ FOREIGN KEY ("collection") REFERENCES "collection"("collectionid") ON DELETE CASCADE ON UPDATE CASCADE,
+ FOREIGN KEY ("object") REFERENCES "typed_object"("objectid") ON DELETE CASCADE ON UPDATE CASCADE
);
-CREATE INDEX collection_object_idx_collection ON collection_object (collection);
+CREATE INDEX "collection_object_idx_collection" ON "collection_object" ("collection");
-CREATE INDEX collection_object_idx_object ON collection_object (object);
+CREATE INDEX "collection_object_idx_object" ON "collection_object" ("object");
---
--- Table: lyrics
---
-CREATE TABLE lyrics (
- lyric_id INTEGER PRIMARY KEY NOT NULL,
- track_id integer NOT NULL
+CREATE TABLE "lyrics" (
+ "lyric_id" INTEGER PRIMARY KEY NOT NULL,
+ "track_id" integer NOT NULL,
+ FOREIGN KEY ("track_id") REFERENCES "track"("trackid") ON DELETE CASCADE
);
-CREATE INDEX lyrics_idx_track_id ON lyrics (track_id);
+CREATE INDEX "lyrics_idx_track_id" ON "lyrics" ("track_id");
---
--- Table: cd_artwork
---
-CREATE TABLE cd_artwork (
- cd_id INTEGER PRIMARY KEY NOT NULL
+CREATE TABLE "cd_artwork" (
+ "cd_id" INTEGER PRIMARY KEY NOT NULL,
+ FOREIGN KEY ("cd_id") REFERENCES "cd"("cdid") ON DELETE CASCADE
);
---
--- Table: liner_notes
---
-CREATE TABLE liner_notes (
- liner_id INTEGER PRIMARY KEY NOT NULL,
- notes varchar(100) NOT NULL
+CREATE TABLE "liner_notes" (
+ "liner_id" INTEGER PRIMARY KEY NOT NULL,
+ "notes" varchar(100) NOT NULL,
+ FOREIGN KEY ("liner_id") REFERENCES "cd"("cdid") ON DELETE CASCADE
);
---
--- Table: lyric_versions
---
-CREATE TABLE lyric_versions (
- id INTEGER PRIMARY KEY NOT NULL,
- lyric_id integer NOT NULL,
- text varchar(100) NOT NULL
+CREATE TABLE "lyric_versions" (
+ "id" INTEGER PRIMARY KEY NOT NULL,
+ "lyric_id" integer NOT NULL,
+ "text" varchar(100) NOT NULL,
+ FOREIGN KEY ("lyric_id") REFERENCES "lyrics"("lyric_id") ON DELETE CASCADE ON UPDATE CASCADE
);
-CREATE INDEX lyric_versions_idx_lyric_id ON lyric_versions (lyric_id);
+CREATE INDEX "lyric_versions_idx_lyric_id" ON "lyric_versions" ("lyric_id");
+
+CREATE UNIQUE INDEX "lyric_versions_lyric_id_text" ON "lyric_versions" ("lyric_id", "text");
---
--- Table: tags
---
-CREATE TABLE tags (
- tagid INTEGER PRIMARY KEY NOT NULL,
- cd integer NOT NULL,
- tag varchar(100) NOT NULL
+CREATE TABLE "tags" (
+ "tagid" INTEGER PRIMARY KEY NOT NULL,
+ "cd" integer NOT NULL,
+ "tag" varchar(100) NOT NULL,
+ FOREIGN KEY ("cd") REFERENCES "cd"("cdid") ON DELETE CASCADE ON UPDATE CASCADE
);
-CREATE INDEX tags_idx_cd ON tags (cd);
+CREATE INDEX "tags_idx_cd" ON "tags" ("cd");
-CREATE UNIQUE INDEX tagid_cd ON tags (tagid, cd);
+CREATE UNIQUE INDEX "tagid_cd" ON "tags" ("tagid", "cd");
-CREATE UNIQUE INDEX tagid_cd_tag ON tags (tagid, cd, tag);
+CREATE UNIQUE INDEX "tagid_cd_tag" ON "tags" ("tagid", "cd", "tag");
-CREATE UNIQUE INDEX tags_tagid_tag ON tags (tagid, tag);
+CREATE UNIQUE INDEX "tags_tagid_tag" ON "tags" ("tagid", "tag");
-CREATE UNIQUE INDEX tags_tagid_tag_cd ON tags (tagid, tag, cd);
+CREATE UNIQUE INDEX "tags_tagid_tag_cd" ON "tags" ("tagid", "tag", "cd");
---
--- Table: cd_to_producer
---
-CREATE TABLE cd_to_producer (
- cd integer NOT NULL,
- producer integer NOT NULL,
- attribute integer,
- PRIMARY KEY (cd, producer)
+CREATE TABLE "cd_to_producer" (
+ "cd" integer NOT NULL,
+ "producer" integer NOT NULL,
+ "attribute" integer,
+ PRIMARY KEY ("cd", "producer"),
+ FOREIGN KEY ("cd") REFERENCES "cd"("cdid") ON DELETE CASCADE ON UPDATE CASCADE,
+ FOREIGN KEY ("producer") REFERENCES "producer"("producerid")
);
-CREATE INDEX cd_to_producer_idx_cd ON cd_to_producer (cd);
+CREATE INDEX "cd_to_producer_idx_cd" ON "cd_to_producer" ("cd");
-CREATE INDEX cd_to_producer_idx_producer ON cd_to_producer (producer);
+CREATE INDEX "cd_to_producer_idx_producer" ON "cd_to_producer" ("producer");
---
--- Table: images
---
-CREATE TABLE images (
- id INTEGER PRIMARY KEY NOT NULL,
- artwork_id integer NOT NULL,
- name varchar(100) NOT NULL,
- data blob
+CREATE TABLE "images" (
+ "id" INTEGER PRIMARY KEY NOT NULL,
+ "artwork_id" integer NOT NULL,
+ "name" varchar(100) NOT NULL,
+ "data" blob,
+ FOREIGN KEY ("artwork_id") REFERENCES "cd_artwork"("cd_id") ON DELETE CASCADE ON UPDATE CASCADE
);
-CREATE INDEX images_idx_artwork_id ON images (artwork_id);
+CREATE INDEX "images_idx_artwork_id" ON "images" ("artwork_id");
---
--- Table: twokeys
---
-CREATE TABLE twokeys (
- artist integer NOT NULL,
- cd integer NOT NULL,
- PRIMARY KEY (artist, cd)
+CREATE TABLE "twokeys" (
+ "artist" integer NOT NULL,
+ "cd" integer NOT NULL,
+ PRIMARY KEY ("artist", "cd"),
+ FOREIGN KEY ("artist") REFERENCES "artist"("artistid") ON DELETE CASCADE ON UPDATE CASCADE,
+ FOREIGN KEY ("cd") REFERENCES "cd"("cdid")
);
-CREATE INDEX twokeys_idx_artist ON twokeys (artist);
+CREATE INDEX "twokeys_idx_artist" ON "twokeys" ("artist");
---
--- Table: artwork_to_artist
---
-CREATE TABLE artwork_to_artist (
- artwork_cd_id integer NOT NULL,
- artist_id integer NOT NULL,
- PRIMARY KEY (artwork_cd_id, artist_id)
+CREATE TABLE "artwork_to_artist" (
+ "artwork_cd_id" integer NOT NULL,
+ "artist_id" integer NOT NULL,
+ PRIMARY KEY ("artwork_cd_id", "artist_id"),
+ FOREIGN KEY ("artist_id") REFERENCES "artist"("artistid") ON DELETE CASCADE ON UPDATE CASCADE,
+ FOREIGN KEY ("artwork_cd_id") REFERENCES "cd_artwork"("cd_id") ON DELETE CASCADE ON UPDATE CASCADE
);
-CREATE INDEX artwork_to_artist_idx_artist_id ON artwork_to_artist (artist_id);
+CREATE INDEX "artwork_to_artist_idx_artist_id" ON "artwork_to_artist" ("artist_id");
-CREATE INDEX artwork_to_artist_idx_artwork_cd_id ON artwork_to_artist (artwork_cd_id);
+CREATE INDEX "artwork_to_artist_idx_artwork_cd_id" ON "artwork_to_artist" ("artwork_cd_id");
---
--- Table: fourkeys_to_twokeys
---
-CREATE TABLE fourkeys_to_twokeys (
- f_foo integer NOT NULL,
- f_bar integer NOT NULL,
- f_hello integer NOT NULL,
- f_goodbye integer NOT NULL,
- t_artist integer NOT NULL,
- t_cd integer NOT NULL,
- autopilot character NOT NULL,
- pilot_sequence integer,
- PRIMARY KEY (f_foo, f_bar, f_hello, f_goodbye, t_artist, t_cd)
+CREATE TABLE "fourkeys_to_twokeys" (
+ "f_foo" integer NOT NULL,
+ "f_bar" integer NOT NULL,
+ "f_hello" integer NOT NULL,
+ "f_goodbye" integer NOT NULL,
+ "t_artist" integer NOT NULL,
+ "t_cd" integer NOT NULL,
+ "autopilot" character NOT NULL,
+ "pilot_sequence" integer,
+ PRIMARY KEY ("f_foo", "f_bar", "f_hello", "f_goodbye", "t_artist", "t_cd"),
+ FOREIGN KEY ("f_foo", "f_bar", "f_hello", "f_goodbye") REFERENCES "fourkeys"("foo", "bar", "hello", "goodbye") ON DELETE CASCADE ON UPDATE CASCADE,
+ FOREIGN KEY ("t_artist", "t_cd") REFERENCES "twokeys"("artist", "cd") ON DELETE CASCADE ON UPDATE CASCADE
);
-CREATE INDEX fourkeys_to_twokeys_idx_f_foo_f_bar_f_hello_f_goodbye ON fourkeys_to_twokeys (f_foo, f_bar, f_hello, f_goodbye);
+CREATE INDEX "fourkeys_to_twokeys_idx_f_foo_f_bar_f_hello_f_goodbye" ON "fourkeys_to_twokeys" ("f_foo", "f_bar", "f_hello", "f_goodbye");
-CREATE INDEX fourkeys_to_twokeys_idx_t_artist_t_cd ON fourkeys_to_twokeys (t_artist, t_cd);
+CREATE INDEX "fourkeys_to_twokeys_idx_t_artist_t_cd" ON "fourkeys_to_twokeys" ("t_artist", "t_cd");
---
--- View: year2000cds
---
-CREATE VIEW year2000cds AS
+CREATE VIEW "year2000cds" AS
SELECT cdid, artist, title, year, genreid, single_track FROM cd WHERE year = "2000";
package DBICTestAdminInc;
-use base 'DBIx::Class::Schema';
+
+use warnings;
+use strict;
+
+use base 'DBICTest::BaseSchema';
sub connect { exit 70 } # this is what the test will expect to see
package DBICTestConfig;
-use base 'DBIx::Class::Schema';
+
+use warnings;
+use strict;
+
+use base 'DBICTest::BaseSchema';
sub connect {
my($self, @opt) = @_;
#
# ribasushi
-TODO: { my $f = __FILE__; local $TODO = "See comment at top of $f for discussion of the TODO";
+my $TODO_msg = "See comment at top of @{[ __FILE__ ]} for discussion of the TODO";
{
my $counts;
$counts->{$_} = $schema->resultset($_)->count for qw/Track CD Genre/;
- lives_ok (sub {
- my $existing_nogen_cd = $schema->resultset('CD')->search (
- { 'genre.genreid' => undef },
- { join => 'genre' },
- )->first;
-
- $schema->resultset('Track')->create ({
- title => 'Sugar-coated',
- cd => {
- title => $existing_nogen_cd->title,
- genre => {
- name => 'sugar genre',
- }
+ my $existing_nogen_cd = $schema->resultset('CD')->search (
+ { 'genre.genreid' => undef },
+ { join => 'genre' },
+ )->first;
+
+ $schema->resultset('Track')->create ({
+ title => 'Sugar-coated',
+ cd => {
+ title => $existing_nogen_cd->title,
+ genre => {
+ name => 'sugar genre',
}
- });
+ }
+ });
- is ($schema->resultset('Track')->count, $counts->{Track} + 1, '1 new track');
- is ($schema->resultset('CD')->count, $counts->{CD}, 'No new cds');
- is ($schema->resultset('Genre')->count, $counts->{Genre} + 1, '1 new genre');
+ is ($schema->resultset('Track')->count, $counts->{Track} + 1, '1 new track');
+ is ($schema->resultset('CD')->count, $counts->{CD}, 'No new cds');
+ TODO: {
+ todo_skip $TODO_msg, 1;
+ is ($schema->resultset('Genre')->count, $counts->{Genre} + 1, '1 new genre');
is ($existing_nogen_cd->genre->title, 'sugar genre', 'Correct genre assigned to CD');
- }, 'create() did not throw');
+ }
}
+
{
my $counts;
$counts->{$_} = $schema->resultset($_)->count for qw/Artist CD Producer/;
is ($schema->resultset('Artist')->count, $counts->{Artist}, 'No new artists');
is ($schema->resultset('Producer')->count, $counts->{Producer} + 1, '1 new producers');
+
+ local $TODO = $TODO_msg;
is ($schema->resultset('CD')->count, $counts->{CD} + 2, '2 new cds');
is ($producer->cds->count, 2, 'CDs assigned to correct producer');
}, 'create() did not throw');
}
-}
-
done_testing;
use warnings;
use Test::More;
+BEGIN {
+ plan skip_all => 'Disable test entirely until multicreate is rewritten in terms of subqueries';
+}
+
use Test::Exception;
use lib qw(t/lib);
use DBICTest;
$schema->storage->debugcb (sub { push @{$query_stats->{$_[0]}}, $_[1] });
$schema->storage->debug (1);
-TODO: {
- local $TODO = 'This is an optimization task, will wait... a while';
-
lives_ok (sub {
undef $query_stats;
$schema->resultset('Artist')->create ({
|| $ENV{DBIC_MULTICREATE_DEBUG} && diag join "\n", @{$query_stats->{SELECT} || []};
});
-}
-
done_testing;
use lib qw(t/lib);
use DBICTest;
-use POSIX qw(ceil);
-
my $schema = DBICTest->init_schema();
-plan tests => 1;
-
{
my $artist = $schema->resultset ('Artist')->search ({}, { rows => 1})->single; # braindead sqlite
my $cd = $schema->resultset ('CD')->create ({
lives_ok (sub { $cd->delete}, "Cascade delete on ordered has_many doesn't bomb");
}
-1;
+done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $cd = $schema->resultset('CD')->next;
+
+lives_ok {
+ $cd->tracks->delete;
+
+ my @tracks = map
+ { $cd->create_related('tracks', { title => "t_$_", position => $_ }) }
+ (4,2,5,1,3)
+ ;
+
+ for (@tracks) {
+ $_->discard_changes;
+ $_->delete;
+ }
+} 'Creation/deletion of out-of order tracks successful';
+
+done_testing;
use warnings;
+use strict;
use Test::More;
use lib qw(t/lib);
);
-TODO: {
+{
local $TODO = "Chaining with prefetch is fundamentally broken";
my $queries;
use warnings;
+use strict;
use Test::More;
use lib qw(t/lib);
--- /dev/null
+use warnings;
+use strict;
+
+use Test::More;
+
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema(
+ no_populate => 1,
+);
+
+$schema->resultset('CD')->create({
+ cdid => 0,
+ artist => {
+ artistid => 0,
+ name => '',
+ rank => 0,
+ charfield => 0,
+ },
+ title => '',
+ year => 0,
+ genreid => 0,
+ single_track => 0,
+});
+
+my $orig_debug = $schema->storage->debug;
+
+my $queries = 0;
+$schema->storage->debugcb(sub { $queries++; });
+$schema->storage->debug(1);
+
+my $cd = $schema->resultset('CD')->search( {}, { prefetch => 'artist' })->next;
+
+is_deeply
+ { $cd->get_columns },
+ {
+ artist => 0,
+ cdid => 0,
+ genreid => 0,
+ single_track => 0,
+ title => '',
+ year => 0,
+ },
+ 'Expected CD columns present',
+;
+
+is_deeply
+ { $cd->artist->get_columns },
+ {
+ artistid => 0,
+ charfield => 0,
+ name => "",
+ rank => 0,
+ },
+ 'Expected Artist columns present',
+;
+
+is $queries, 1, 'Only one query fired - prefetch worked';
+
+$schema->storage->debugcb(undef);
+$schema->storage->debug($orig_debug);
+
+done_testing;
FROM cd me
JOIN artist artist ON artist.artistid = me.artist
GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
- ORDER BY me.cdid
) me
JOIN artist artist ON artist.artistid = me.artist
ORDER BY me.cdid
JOIN artist artist ON artist.artistid = me.artist
WHERE ( tracks.title != ? )
GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
- ORDER BY me.cdid
) me
LEFT JOIN track tracks ON tracks.cd = me.cdid
JOIN artist artist ON artist.artistid = me.artist
'Sensible error message on mis-specified "as"',
);
+# check complex limiting prefetch without the join-able columns
+{
+ my $pref_rs = $schema->resultset('Owners')->search({}, {
+ rows => 3,
+ offset => 1,
+ columns => 'name', # only the owner name, still prefetch all the books
+ prefetch => 'books',
+ });
+
+ lives_ok {
+ is ($pref_rs->all, 1, 'Expected count of objects on limtied prefetch')
+ } "Complex limited prefetch works with non-selected join condition";
+}
+
+
done_testing;
use warnings;
+use strict;
use Test::More;
use lib qw(t/lib);
# cds belong to the second and third artist, respectively, and there's no sixth
# row
is_deeply (
- [ $filtered_cd_rs->hri_dump ],
+ $filtered_cd_rs->all_hri,
[
{
'artist' => '2',
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use PrefetchBug;
+
+my $schema = PrefetchBug->connect( DBICTest->_database (quote_char => '"') );
+ok( $schema, 'Connected to PrefetchBug schema OK' );
+
+$schema->storage->dbh->do(<<"EOF");
+CREATE TABLE prefetchbug_left (
+ id INTEGER PRIMARY KEY
+)
+EOF
+
+$schema->storage->dbh->do(<<"EOF");
+CREATE TABLE prefetchbug_right (
+ id INTEGER PRIMARY KEY,
+ name TEXT,
+ category TEXT,
+ description TEXT,
+ propagates INT,
+ locked INT
+)
+EOF
+
+$schema->storage->dbh->do(<<"EOF");
+CREATE TABLE prefetchbug_left_right (
+ left_id INTEGER REFERENCES prefetchbug_left(id),
+ right_id INTEGER REFERENCES prefetchbug_right(id),
+ value TEXT,
+ PRIMARY KEY (left_id, right_id)
+)
+EOF
+
+# Test simple has_many prefetch:
+
+my $leftc = $schema->resultset('Left')->create({});
+
+my $rightc = $schema->resultset('Right')->create({ id => 60, name => 'Johnny', category => 'something', description=> 'blah', propagates => 0, locked => 1 });
+$rightc->create_related('prefetch_leftright', { left => $leftc, value => 'lr' });
+
+# start with fresh whatsit
+my $left = $schema->resultset('Left')->find({ id => $leftc->id });
+
+my @left_rights = $left->search_related('prefetch_leftright', {}, { prefetch => 'right' });
+ok(defined $left_rights[0]->right, 'Prefetched Right side correctly');
+
+done_testing;
is($rs->all, 1, 'distinct with prefetch (objects)');
is($rs->count, 1, 'distinct with prefetch (count)');
- TODO: {
- local $TODO = "This makes another 2 trips to the database, it can't be right";
-
$queries = 0;
$schema->storage->debugcb ($debugcb);
$schema->storage->debug (1);
is($rs->search_related('cds')->all, 2, 'prefetched distinct with prefetch (objects)');
is($rs->search_related('cds')->count, 2, 'prefetched distinct with prefetch (count)');
- is ($queries, 0, 'No extra queries fired (prefetch survives search_related)');
+ {
+ local $TODO = "This makes another 2 trips to the database, it can't be right";
+ is ($queries, 0, 'No extra queries fired (prefetch survives search_related)');
+ }
$schema->storage->debugcb (undef);
$schema->storage->debug ($orig_debug);
- }
-
}, 'distinct generally works with prefetch on deep search_related chains');
done_testing;
FROM cd me
JOIN artist artist ON artist.artistid = me.artist
WHERE ( ( artist.name = ? AND me.year = ? ) )
- ORDER BY me.cdid
LIMIT ?
) me
LEFT JOIN track tracks
year => 2005,
} );
- TODO: {
+ {
local $TODO = "Can't fix right now" if $DBIx::Class::VERSION < 0.09;
lives_ok { $big_flop->genre} "Don't throw exception when col is not loaded after insert";
};
);
-TODO: {
+{
local $TODO = "relationship checking needs fixing";
# try to add a bogus relationship using the wrong cols
throws_ok {
'Resultset-class attributes do not seep outside of the subselect',
);
+$schema->storage->debug(1);
+
+is_same_sql_bind(
+ $schema->resultset('CD')->search ({}, {
+ rows => 2,
+ join => [ 'genre', { artist => 'cds' } ],
+ distinct => 1,
+ columns => {
+ title => 'me.title',
+ artist__name => 'artist.name',
+ genre__name => 'genre.name',
+ cds_for_artist => \ '(SELECT COUNT(*) FROM cds WHERE cd.artist = artist.id)',
+ },
+ order_by => { -desc => 'me.year' },
+ })->count_rs->as_query,
+ '(
+ SELECT COUNT( * )
+ FROM (
+ SELECT artist.name AS artist__name, (SELECT COUNT(*) FROM cds WHERE cd.artist = artist.id), genre.name AS genre__name, me.title, me.year
+ FROM cd me
+ LEFT JOIN genre genre
+ ON genre.genreid = me.genreid
+ JOIN artist artist ON artist.artistid = me.artist
+ GROUP BY artist.name, (SELECT COUNT(*) FROM cds WHERE cd.artist = artist.id), genre.name, me.title, me.year
+ LIMIT ?
+ ) me
+ )',
+ [ [{ sqlt_datatype => 'integer' } => 2 ] ],
+);
+
+
done_testing;
my $rs;
-TODO: {
+{
local $TODO = 'bind args order needs fixing (semifor)';
# First, the simple cases...
->search({}, $where_bind);
is ( $rs->count, 1, 'where/bind last' );
+
+ # and the complex case
+ local $TODO = 'bind args order needs fixing (semifor)';
+ $rs = $schema->resultset('CustomSql')->search({}, { bind => [ 1999 ] })
+ ->search({ 'artistid' => 1 }, {
+ where => \'title like ?',
+ bind => [ 'Spoon%' ] });
+ is ( $rs->count, 1, '...cookbook + chained search with extra bind' );
}
{
);
}
-TODO: {
- local $TODO = 'bind args order needs fixing (semifor)';
- $rs = $schema->resultset('Complex')->search({}, { bind => [ 1999 ] })
- ->search({ 'artistid' => 1 }, {
- where => \'title like ?',
- bind => [ 'Spoon%' ] });
- is ( $rs->count, 1, '...cookbook + chained search with extra bind' );
-}
-
done_testing;
# ramifications of changing this. Thus the value override and the
# TODO to go with it. Delete all of this if ever resolved.
my %todo_rel_inflation_override = ( artist => $basecols{artist} );
-TODO: {
+{
local $TODO = 'Treating relationships as inflatable data is wrong - see comment in ' . __FILE__;
ok (! keys %todo_rel_inflation_override);
}
use lib qw(t/lib);
use Test::More;
use Test::Exception;
+
+use DBICTest::Schema::CD;
+BEGIN {
+ # the default scalarref table name will not work well for this test
+ DBICTest::Schema::CD->table('cd');
+}
+
use DBICTest;
use DBIC::DebugObj;
use DBIC::SqlMakerTest;
my $tkfks = $schema->resultset('FourKeys_to_TwoKeys');
-my ($fa, $fb) = $tkfks->related_resultset ('fourkeys')->populate ([
+my ($fa, $fb, $fc) = $tkfks->related_resultset ('fourkeys')->populate ([
[qw/foo bar hello goodbye sensors read_count/],
[qw/1 1 1 1 a 10 /],
[qw/2 2 2 2 b 20 /],
+ [qw/1 1 1 2 c 30 /],
]);
# This is already provided by DBICTest
#
# create a resultset matching $fa and $fb only
-my $fks = $schema->resultset ('FourKeys')
- ->search ({ map { $_ => [1, 2] } qw/foo bar hello goodbye/}, { join => 'fourkeys_to_twokeys' });
+my $fks = $schema->resultset ('FourKeys')->search (
+ {
+ sensors => { '!=', 'c' },
+ ( map { $_ => [1, 2] } qw/foo bar hello goodbye/ ),
+ }, { join => 'fourkeys_to_twokeys'}
+);
is ($fks->count, 4, 'Joined FourKey count correct (2x2)');
\@bind,
'UPDATE fourkeys
SET read_count = read_count + 1
+ WHERE ( ( ( bar = ? OR bar = ? ) AND ( foo = ? OR foo = ? ) AND ( goodbye = ? OR goodbye = ? ) AND ( hello = ? OR hello = ? ) AND sensors != ? ) )
+ ',
+ [ ("'1'", "'2'") x 4, "'c'" ],
+ 'Correct update-SQL with multijoin with pruning',
+);
+
+is ($fa->discard_changes->read_count, 11, 'Update ran only once on discard-join resultset');
+is ($fb->discard_changes->read_count, 21, 'Update ran only once on discard-join resultset');
+is ($fc->discard_changes->read_count, 30, 'Update did not touch outlier');
+
+# make the multi-join stick
+$fks = $fks->search({ 'fourkeys_to_twokeys.pilot_sequence' => { '!=' => 666 } });
+
+$schema->storage->debugobj ($debugobj);
+$schema->storage->debug (1);
+$fks->update ({ read_count => \ 'read_count + 1' });
+$schema->storage->debugobj ($orig_debugobj);
+$schema->storage->debug ($orig_debug);
+
+is_same_sql_bind (
+ $sql,
+ \@bind,
+ 'UPDATE fourkeys
+ SET read_count = read_count + 1
WHERE ( bar = ? AND foo = ? AND goodbye = ? AND hello = ? ) OR ( bar = ? AND foo = ? AND goodbye = ? AND hello = ? )',
[ map { "'$_'" } ( (1) x 4, (2) x 4 ) ],
- 'Correct update-SQL without multicolumn in support',
+ 'Correct update-SQL with multijoin without pruning',
);
-is ($fa->discard_changes->read_count, 11, 'Update ran only once on joined resultset');
-is ($fb->discard_changes->read_count, 21, 'Update ran only once on joined resultset');
+is ($fa->discard_changes->read_count, 12, 'Update ran only once on joined resultset');
+is ($fb->discard_changes->read_count, 22, 'Update ran only once on joined resultset');
+is ($fc->discard_changes->read_count, 30, 'Update did not touch outlier');
# try the same sql with forced multicolumn in
$schema->storage->_use_multicolumn_in (1);
$schema->storage->debugobj ($debugobj);
$schema->storage->debug (1);
-eval { $fks->update ({ read_count => \ 'read_count + 1' }) }; # this can't actually execute, we just need the "as_query"
+throws_ok { $fks->update ({ read_count => \ 'read_count + 1' }) } # this can't actually execute, we just need the "as_query"
+ qr/\Q DBI Exception:/ or do { $sql = ''; @bind = () };
$schema->storage->_use_multicolumn_in (undef);
$schema->storage->debugobj ($orig_debugobj);
$schema->storage->debug ($orig_debug);
(foo, bar, hello, goodbye) IN (
SELECT me.foo, me.bar, me.hello, me.goodbye
FROM fourkeys me
- WHERE ( bar = ? OR bar = ? ) AND ( foo = ? OR foo = ? ) AND ( goodbye = ? OR goodbye = ? ) AND ( hello = ? OR hello = ? )
+ LEFT JOIN fourkeys_to_twokeys fourkeys_to_twokeys ON
+ fourkeys_to_twokeys.f_bar = me.bar
+ AND fourkeys_to_twokeys.f_foo = me.foo
+ AND fourkeys_to_twokeys.f_goodbye = me.goodbye
+ AND fourkeys_to_twokeys.f_hello = me.hello
+ WHERE fourkeys_to_twokeys.pilot_sequence != ? AND ( bar = ? OR bar = ? ) AND ( foo = ? OR foo = ? ) AND ( goodbye = ? OR goodbye = ? ) AND ( hello = ? OR hello = ? ) AND sensors != ?
)
)
',
- [ map { "'$_'" } ( (1, 2) x 4 ) ],
+ [
+ "'666'",
+ ("'1'", "'2'") x 4,
+ "'c'",
+ ],
'Correct update-SQL with multicolumn in support',
);
is ($tkfks->count, $tkfk_cnt -= 1, 'Only one row deleted');
-# Make sure prefetch is properly stripped too
-# check with sql-equality, as sqlite will accept bad sql just fine
+# check with sql-equality, as sqlite will accept most bad sql just fine
$schema->storage->debugobj ($debugobj);
$schema->storage->debug (1);
-$schema->resultset('CD')->search(
- { year => { '!=' => 2010 } },
- { prefetch => 'liner_notes' },
-)->delete;
+
+{
+ my $rs = $schema->resultset('CD')->search(
+ { 'me.year' => { '!=' => 2010 } },
+ );
+
+ $rs->search({}, { join => 'liner_notes' })->delete;
+ is_same_sql_bind (
+ $sql,
+ \@bind,
+ 'DELETE FROM cd WHERE ( year != ? )',
+ ["'2010'"],
+ 'Non-restricting multijoins properly thrown out'
+ );
+
+ $rs->search({}, { prefetch => 'liner_notes' })->delete;
+ is_same_sql_bind (
+ $sql,
+ \@bind,
+ 'DELETE FROM cd WHERE ( year != ? )',
+ ["'2010'"],
+ 'Non-restricting multiprefetch thrown out'
+ );
+
+ $rs->search({}, { prefetch => 'artist' })->delete;
+ is_same_sql_bind (
+ $sql,
+ \@bind,
+ 'DELETE FROM cd WHERE ( cdid IN ( SELECT me.cdid FROM cd me JOIN artist artist ON artist.artistid = me.artist WHERE ( me.year != ? ) ) )',
+ ["'2010'"],
+ 'Restricting prefetch left in, selector thrown out'
+ );
+
+ $rs->result_source->name('schema_qualified.cd');
+ # this is expected to fail - we only want to collect the generated SQL
+ eval { $rs->delete };
+ is_same_sql_bind (
+ $sql,
+ \@bind,
+ 'DELETE FROM schema_qualified.cd WHERE ( year != ? )',
+ ["'2010'"],
+ 'delete with fully qualified table name and subquery correct'
+ );
+
+ # this is expected to fail - we only want to collect the generated SQL
+ eval { $rs->search({}, { prefetch => 'artist' })->delete };
+ is_same_sql_bind (
+ $sql,
+ \@bind,
+ 'DELETE FROM schema_qualified.cd WHERE ( cdid IN ( SELECT me.cdid FROM schema_qualified.cd me JOIN artist artist ON artist.artistid = me.artist WHERE ( me.year != ? ) ) )',
+ ["'2010'"],
+ 'delete with fully qualified table name and subquery correct'
+ );
+
+ $rs->result_source->name('cd');
+
+ # check that as_subselect_rs works ok
+ # inner query is untouched, then a selector
+ # and an IN condition
+ $schema->resultset('CD')->search({
+ 'me.cdid' => 1,
+ 'artist.name' => 'partytimecity',
+ }, {
+ join => 'artist',
+ })->as_subselect_rs->delete;
+
+ is_same_sql_bind (
+ $sql,
+ \@bind,
+ '
+ DELETE FROM cd
+ WHERE (
+ cdid IN (
+ SELECT me.cdid
+ FROM (
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+ FROM cd me
+ JOIN artist artist ON artist.artistid = me.artist
+ WHERE artist.name = ? AND me.cdid = ?
+ ) me
+ )
+ )
+ ',
+ ["'partytimecity'", "'1'"],
+ 'Delete from as_subselect_rs works correctly'
+ );
+}
$schema->storage->debugobj ($orig_debugobj);
$schema->storage->debug ($orig_debug);
-is_same_sql_bind (
- $sql,
- \@bind,
- 'DELETE FROM cd WHERE ( cdid IN ( SELECT me.cdid FROM cd me WHERE ( year != ? ) ) )',
- ["'2010'"],
- 'Update on prefetching resultset strips prefetch correctly'
-);
-
done_testing;
)
);
-TODO: {
- local $TODO = 'New objects should also be inflated';
- my $user = $schema->resultset('User')->create($user_data);
- my $admin = $schema->resultset('User')->create($admin_data);
+{
+ my $user = $schema->resultset('User')->create($user_data);
+ my $admin = $schema->resultset('User')->create($admin_data);
- is( ref $user, 'My::Schema::Result::User' );
- is( ref $admin, 'My::Schema::Result::User::Admin' );
+ is( ref $user, 'My::Schema::Result::User' );
+
+ local $TODO = 'New objects should also be inflated';
+ is( ref $admin, 'My::Schema::Result::User::Admin' );
}
my $user = $schema->resultset('User')->single($user_data);
);
}
+# Tests base class for => \'FOO' actually generates proper query. for =>
+# 'READ'|'SHARE' is tested in db-specific subclasses
+# we have to instantiate base because SQLMaker::SQLite disables _lock_select
+{
+ require DBIx::Class::SQLMaker;
+ my $sa = DBIx::Class::SQLMaker->new;
+ {
+ my ($sql, @bind) = $sa->select('foo', '*', {}, { for => 'update' } );
+ is_same_sql_bind(
+ $sql,
+ \@bind,
+ 'SELECT * FROM foo FOR UPDATE',
+ [],
+ );
+ }
+
+ {
+ my ($sql, @bind) = $sa->select('bar', '*', {}, { for => \'baz' } );
+ is_same_sql_bind(
+ $sql,
+ \@bind,
+ 'SELECT * FROM bar FOR baz',
+ [],
+ );
+ }
+
+}
+
+
+
# Make sure the carp/croak override in SQLA works (via SQLMaker)
my $file = quotemeta (__FILE__);
throws_ok (sub {
use warnings;
use Test::More;
+use Test::Warn;
use lib qw(t/lib);
use DBICTest;
$s->storage->sql_maker_class ('DBICTest::SQLMaker::CustomDialect');
my $rs = $s->resultset ('CD');
-is_same_sql_bind (
+
+warnings_exist { is_same_sql_bind (
$rs->search ({}, { rows => 1, offset => 3,columns => [
{ id => 'foo.id' },
{ 'bar.id' => 'bar.id' },
)',
[],
'Rownum subsel aliasing works correctly'
-);
+ )}
+ qr/\Qthe legacy emulate_limit() mechanism inherited from SQL::Abstract::Limit has been deprecated/,
+ 'deprecation warning'
+;
done_testing;
{
order_by => \'title DESC',
order_inner => 'title DESC',
- order_outer => 'ORDER__BY__1 ASC',
- order_req => 'ORDER__BY__1 DESC',
- exselect_outer => 'ORDER__BY__1',
- exselect_inner => 'title AS ORDER__BY__1',
+ order_outer => 'ORDER__BY__001 ASC',
+ order_req => 'ORDER__BY__001 DESC',
+ exselect_outer => 'ORDER__BY__001',
+ exselect_inner => 'title AS ORDER__BY__001',
},
{
order_by => { -asc => 'title' },
order_inner => 'title ASC',
- order_outer => 'ORDER__BY__1 DESC',
- order_req => 'ORDER__BY__1 ASC',
- exselect_outer => 'ORDER__BY__1',
- exselect_inner => 'title AS ORDER__BY__1',
+ order_outer => 'ORDER__BY__001 DESC',
+ order_req => 'ORDER__BY__001 ASC',
+ exselect_outer => 'ORDER__BY__001',
+ exselect_inner => 'title AS ORDER__BY__001',
},
{
order_by => { -desc => 'title' },
order_inner => 'title DESC',
- order_outer => 'ORDER__BY__1 ASC',
- order_req => 'ORDER__BY__1 DESC',
- exselect_outer => 'ORDER__BY__1',
- exselect_inner => 'title AS ORDER__BY__1',
+ order_outer => 'ORDER__BY__001 ASC',
+ order_req => 'ORDER__BY__001 DESC',
+ exselect_outer => 'ORDER__BY__001',
+ exselect_inner => 'title AS ORDER__BY__001',
},
{
order_by => 'title',
order_inner => 'title',
- order_outer => 'ORDER__BY__1 DESC',
- order_req => 'ORDER__BY__1',
- exselect_outer => 'ORDER__BY__1',
- exselect_inner => 'title AS ORDER__BY__1',
+ order_outer => 'ORDER__BY__001 DESC',
+ order_req => 'ORDER__BY__001',
+ exselect_outer => 'ORDER__BY__001',
+ exselect_inner => 'title AS ORDER__BY__001',
},
{
order_by => [ qw{ title me.owner} ],
order_inner => 'title, me.owner',
- order_outer => 'ORDER__BY__1 DESC, me.owner DESC',
- order_req => 'ORDER__BY__1, me.owner',
- exselect_outer => 'ORDER__BY__1',
- exselect_inner => 'title AS ORDER__BY__1',
+ order_outer => 'ORDER__BY__001 DESC, me.owner DESC',
+ order_req => 'ORDER__BY__001, me.owner',
+ exselect_outer => 'ORDER__BY__001',
+ exselect_inner => 'title AS ORDER__BY__001',
},
{
order_by => ['title', { -desc => 'bar' } ],
order_inner => 'title, bar DESC',
- order_outer => 'ORDER__BY__1 DESC, ORDER__BY__2 ASC',
- order_req => 'ORDER__BY__1, ORDER__BY__2 DESC',
- exselect_outer => 'ORDER__BY__1, ORDER__BY__2',
- exselect_inner => 'title AS ORDER__BY__1, bar AS ORDER__BY__2',
+ order_outer => 'ORDER__BY__001 DESC, ORDER__BY__002 ASC',
+ order_req => 'ORDER__BY__001, ORDER__BY__002 DESC',
+ exselect_outer => 'ORDER__BY__001, ORDER__BY__002',
+ exselect_inner => 'title AS ORDER__BY__001, bar AS ORDER__BY__002',
},
{
order_by => { -asc => [qw{ title bar }] },
order_inner => 'title ASC, bar ASC',
- order_outer => 'ORDER__BY__1 DESC, ORDER__BY__2 DESC',
- order_req => 'ORDER__BY__1 ASC, ORDER__BY__2 ASC',
- exselect_outer => 'ORDER__BY__1, ORDER__BY__2',
- exselect_inner => 'title AS ORDER__BY__1, bar AS ORDER__BY__2',
+ order_outer => 'ORDER__BY__001 DESC, ORDER__BY__002 DESC',
+ order_req => 'ORDER__BY__001 ASC, ORDER__BY__002 ASC',
+ exselect_outer => 'ORDER__BY__001, ORDER__BY__002',
+ exselect_inner => 'title AS ORDER__BY__001, bar AS ORDER__BY__002',
},
{
order_by => [
{ -asc => [qw{me.owner sensors}]},
],
order_inner => 'title, bar DESC, me.owner ASC, sensors ASC',
- order_outer => 'ORDER__BY__1 DESC, ORDER__BY__2 ASC, me.owner DESC, ORDER__BY__3 DESC',
- order_req => 'ORDER__BY__1, ORDER__BY__2 DESC, me.owner ASC, ORDER__BY__3 ASC',
- exselect_outer => 'ORDER__BY__1, ORDER__BY__2, ORDER__BY__3',
- exselect_inner => 'title AS ORDER__BY__1, bar AS ORDER__BY__2, sensors AS ORDER__BY__3',
+ order_outer => 'ORDER__BY__001 DESC, ORDER__BY__002 ASC, me.owner DESC, ORDER__BY__003 DESC',
+ order_req => 'ORDER__BY__001, ORDER__BY__002 DESC, me.owner ASC, ORDER__BY__003 ASC',
+ exselect_outer => 'ORDER__BY__001, ORDER__BY__002, ORDER__BY__003',
+ exselect_inner => 'title AS ORDER__BY__001, bar AS ORDER__BY__002, sensors AS ORDER__BY__003',
},
) {
my $o_sel = $ord_set->{exselect_outer}
$books_45_and_owners->search ({}, { group_by => 'title', order_by => 'title' })->as_query,
'(SELECT me.id, me.source, me.owner, me.price, owner.id, owner.name
FROM (
- SELECT me.id, me.source, me.owner, me.price, ORDER__BY__1 AS title
+ SELECT me.id, me.source, me.owner, me.price
FROM (
- SELECT me.id, me.source, me.owner, me.price, ORDER__BY__1
+ SELECT me.id, me.source, me.owner, me.price, ORDER__BY__001
FROM (
- SELECT me.id, me.source, me.owner, me.price, title AS ORDER__BY__1
+ SELECT me.id, me.source, me.owner, me.price, title AS ORDER__BY__001
FROM books me
JOIN owners owner ON owner.id = me.owner
WHERE ( source = ? )
ORDER BY title
FETCH FIRST 5 ROWS ONLY
) me
- ORDER BY ORDER__BY__1 DESC
+ ORDER BY ORDER__BY__001 DESC
FETCH FIRST 2 ROWS ONLY
) me
- ORDER BY ORDER__BY__1
+ ORDER BY ORDER__BY__001
) me
JOIN owners owner ON owner.id = me.owner
WHERE ( source = ? )
order_by => 'me.id',
});
-# SELECT [owner_name], [owner_books] FROM (
-# SELECT [owner_name], [owner_books], [ORDER__BY__1], ROW_NUMBER() OVER( ORDER BY [ORDER__BY__1] ) AS [rno__row__index] FROM (
-# SELECT [owner].[name] AS [owner_name], (SELECT COUNT( * ) FROM [owners] [owner] WHERE ( ( [count].[id] = [owner].[id] AND [count].[name] = ? ) )) AS [owner_books], [me].[id] AS [ORDER__BY__1] FROM [books] [me] JOIN [owners] [owner] ON [owner].[id] = [me].[owner] WHERE ( [source] = ? )
-# ) [me]
-# ) [me] WHERE [rno__row__index] >= ? AND [rno__row__index] <= ?
-
is_same_sql_bind(
$rs_selectas_rel->as_query,
'(
SELECT [owner_name], [owner_books]
FROM (
- SELECT [owner_name], [owner_books], ROW_NUMBER() OVER( ORDER BY [ORDER__BY__1] ) AS [rno__row__index]
+ SELECT [owner_name], [owner_books], ROW_NUMBER() OVER( ORDER BY [ORDER__BY__001] ) AS [rno__row__index]
FROM (
SELECT [owner].[name] AS [owner_name],
( SELECT COUNT( * ) FROM [owners] [owner]
WHERE [count].[id] = [owner].[id] and [count].[name] = ? ) AS [owner_books],
- [me].[id] AS [ORDER__BY__1]
+ [me].[id] AS [ORDER__BY__001]
FROM [books] [me]
JOIN [owners] [owner] ON [owner].[id] = [me].[owner]
WHERE ( [source] = ? )
{
order_by => \'title DESC',
order_inner => 'title DESC',
- order_outer => 'ORDER__BY__1 ASC',
- order_req => 'ORDER__BY__1 DESC',
- exselect_outer => 'ORDER__BY__1',
- exselect_inner => 'title AS ORDER__BY__1',
+ order_outer => 'ORDER__BY__001 ASC',
+ order_req => 'ORDER__BY__001 DESC',
+ exselect_outer => 'ORDER__BY__001',
+ exselect_inner => 'title AS ORDER__BY__001',
},
{
order_by => { -asc => 'title' },
order_inner => 'title ASC',
- order_outer => 'ORDER__BY__1 DESC',
- order_req => 'ORDER__BY__1 ASC',
- exselect_outer => 'ORDER__BY__1',
- exselect_inner => 'title AS ORDER__BY__1',
+ order_outer => 'ORDER__BY__001 DESC',
+ order_req => 'ORDER__BY__001 ASC',
+ exselect_outer => 'ORDER__BY__001',
+ exselect_inner => 'title AS ORDER__BY__001',
},
{
order_by => { -desc => 'title' },
order_inner => 'title DESC',
- order_outer => 'ORDER__BY__1 ASC',
- order_req => 'ORDER__BY__1 DESC',
- exselect_outer => 'ORDER__BY__1',
- exselect_inner => 'title AS ORDER__BY__1',
+ order_outer => 'ORDER__BY__001 ASC',
+ order_req => 'ORDER__BY__001 DESC',
+ exselect_outer => 'ORDER__BY__001',
+ exselect_inner => 'title AS ORDER__BY__001',
},
{
order_by => 'title',
order_inner => 'title',
- order_outer => 'ORDER__BY__1 DESC',
- order_req => 'ORDER__BY__1',
- exselect_outer => 'ORDER__BY__1',
- exselect_inner => 'title AS ORDER__BY__1',
+ order_outer => 'ORDER__BY__001 DESC',
+ order_req => 'ORDER__BY__001',
+ exselect_outer => 'ORDER__BY__001',
+ exselect_inner => 'title AS ORDER__BY__001',
},
{
order_by => [ qw{ title me.owner} ],
order_inner => 'title, me.owner',
- order_outer => 'ORDER__BY__1 DESC, me.owner DESC',
- order_req => 'ORDER__BY__1, me.owner',
- exselect_outer => 'ORDER__BY__1',
- exselect_inner => 'title AS ORDER__BY__1',
+ order_outer => 'ORDER__BY__001 DESC, me.owner DESC',
+ order_req => 'ORDER__BY__001, me.owner',
+ exselect_outer => 'ORDER__BY__001',
+ exselect_inner => 'title AS ORDER__BY__001',
},
{
order_by => ['title', { -desc => 'bar' } ],
order_inner => 'title, bar DESC',
- order_outer => 'ORDER__BY__1 DESC, ORDER__BY__2 ASC',
- order_req => 'ORDER__BY__1, ORDER__BY__2 DESC',
- exselect_outer => 'ORDER__BY__1, ORDER__BY__2',
- exselect_inner => 'title AS ORDER__BY__1, bar AS ORDER__BY__2',
+ order_outer => 'ORDER__BY__001 DESC, ORDER__BY__002 ASC',
+ order_req => 'ORDER__BY__001, ORDER__BY__002 DESC',
+ exselect_outer => 'ORDER__BY__001, ORDER__BY__002',
+ exselect_inner => 'title AS ORDER__BY__001, bar AS ORDER__BY__002',
},
{
order_by => { -asc => [qw{ title bar }] },
order_inner => 'title ASC, bar ASC',
- order_outer => 'ORDER__BY__1 DESC, ORDER__BY__2 DESC',
- order_req => 'ORDER__BY__1 ASC, ORDER__BY__2 ASC',
- exselect_outer => 'ORDER__BY__1, ORDER__BY__2',
- exselect_inner => 'title AS ORDER__BY__1, bar AS ORDER__BY__2',
+ order_outer => 'ORDER__BY__001 DESC, ORDER__BY__002 DESC',
+ order_req => 'ORDER__BY__001 ASC, ORDER__BY__002 ASC',
+ exselect_outer => 'ORDER__BY__001, ORDER__BY__002',
+ exselect_inner => 'title AS ORDER__BY__001, bar AS ORDER__BY__002',
},
{
order_by => [
{ -asc => [qw{me.owner sensors}]},
],
order_inner => 'title, bar DESC, me.owner ASC, sensors ASC',
- order_outer => 'ORDER__BY__1 DESC, ORDER__BY__2 ASC, me.owner DESC, ORDER__BY__3 DESC',
- order_req => 'ORDER__BY__1, ORDER__BY__2 DESC, me.owner ASC, ORDER__BY__3 ASC',
- exselect_outer => 'ORDER__BY__1, ORDER__BY__2, ORDER__BY__3',
- exselect_inner => 'title AS ORDER__BY__1, bar AS ORDER__BY__2, sensors AS ORDER__BY__3',
+ order_outer => 'ORDER__BY__001 DESC, ORDER__BY__002 ASC, me.owner DESC, ORDER__BY__003 DESC',
+ order_req => 'ORDER__BY__001, ORDER__BY__002 DESC, me.owner ASC, ORDER__BY__003 ASC',
+ exselect_outer => 'ORDER__BY__001, ORDER__BY__002, ORDER__BY__003',
+ exselect_inner => 'title AS ORDER__BY__001, bar AS ORDER__BY__002, sensors AS ORDER__BY__003',
},
) {
my $o_sel = $ord_set->{exselect_outer}
$books_45_and_owners->search ({}, { group_by => 'title', order_by => 'title' })->as_query,
'(SELECT me.id, me.source, me.owner, me.price, owner.id, owner.name
FROM (
- SELECT me.id, me.source, me.owner, me.price, ORDER__BY__1 AS title
+ SELECT me.id, me.source, me.owner, me.price
FROM (
SELECT TOP 2
- me.id, me.source, me.owner, me.price, ORDER__BY__1
+ me.id, me.source, me.owner, me.price, ORDER__BY__001
FROM (
SELECT TOP 5
- me.id, me.source, me.owner, me.price, title AS ORDER__BY__1
+ me.id, me.source, me.owner, me.price, title AS ORDER__BY__001
FROM books me
JOIN owners owner ON owner.id = me.owner
WHERE ( source = ? )
GROUP BY title
ORDER BY title
) me
- ORDER BY ORDER__BY__1 DESC
+ ORDER BY ORDER__BY__001 DESC
) me
- ORDER BY ORDER__BY__1
+ ORDER BY ORDER__BY__001
) me
JOIN owners owner ON owner.id = me.owner
WHERE ( source = ? )
use warnings;
use Test::More;
+use Test::Exception;
use lib qw(t/lib);
use DBICTest;
use DBIC::SqlMakerTest;
my $schema = DBICTest->init_schema;
+my $native_limit_dialect = $schema->storage->sql_maker->{limit_dialect};
my $attr = {};
my @where_bind = (
[ { sqlt_datatype => 'integer' } => 3 ],
],
],
+ limit_offset_prefetch => [
+ '(
+ SELECT me.name, books.id, books.source, books.owner, books.title, books.price
+ FROM (
+ SELECT me.name, me.id
+ FROM owners me
+ LIMIT ? OFFSET ?
+ ) me
+ LEFT JOIN books books
+ ON books.owner = me.id
+ ORDER BY me.id
+ )',
+ [
+ [ { sqlt_datatype => 'integer' } => 3 ],
+ [ { sqlt_datatype => 'integer' } => 1 ],
+ ]
+ ],
},
LimitXY => {
[ { sqlt_datatype => 'integer' } => 4 ],
],
],
+ limit_offset_prefetch => [
+ '(
+ SELECT me.name, books.id, books.source, books.owner, books.title, books.price
+ FROM (
+ SELECT me.name, me.id
+ FROM owners me
+ LIMIT ?,?
+ ) me
+ LEFT JOIN books books
+ ON books.owner = me.id
+ ORDER BY me.id
+ )',
+ [
+ [ { sqlt_datatype => 'integer' } => 1 ],
+ [ { sqlt_datatype => 'integer' } => 3 ],
+ ]
+ ],
},
SkipFirst => {
@order_bind,
],
],
+ limit_offset_prefetch => [
+ '(
+ SELECT me.name, books.id, books.source, books.owner, books.title, books.price
+ FROM (
+ SELECT SKIP ? FIRST ? me.name, me.id
+ FROM owners me
+ ) me
+ LEFT JOIN books books
+ ON books.owner = me.id
+ ORDER BY me.id
+ )',
+ [
+ [ { sqlt_datatype => 'integer' } => 1 ],
+ [ { sqlt_datatype => 'integer' } => 3 ],
+ ]
+ ],
},
FirstSkip => {
@order_bind,
],
],
+ limit_offset_prefetch => [
+ '(
+ SELECT me.name, books.id, books.source, books.owner, books.title, books.price
+ FROM (
+ SELECT FIRST ? SKIP ? me.name, me.id
+ FROM owners me
+ ) me
+ LEFT JOIN books books
+ ON books.owner = me.id
+ ORDER BY me.id
+ )',
+ [
+ [ { sqlt_datatype => 'integer' } => 3 ],
+ [ { sqlt_datatype => 'integer' } => 1 ],
+ ]
+ ],
},
RowNumberOver => do {
my $ordered_sql = '(
SELECT me.id, owner__id, owner__name, bar, baz
FROM (
- SELECT me.id, owner__id, owner__name, bar, baz, ROW_NUMBER() OVER( ORDER BY ORDER__BY__1, ORDER__BY__2 ) AS rno__row__index
+ SELECT me.id, owner__id, owner__name, bar, baz, ROW_NUMBER() OVER( ORDER BY ORDER__BY__001, ORDER__BY__002 ) AS rno__row__index
FROM (
SELECT me.id, owner.id AS owner__id, owner.name AS owner__name, ? * ? AS bar, ? AS baz,
- ? / ? AS ORDER__BY__1, ? AS ORDER__BY__2
+ ? / ? AS ORDER__BY__001, ? AS ORDER__BY__002
FROM books me
JOIN owners owner
ON owner.id = me.owner
[ { sqlt_datatype => 'integer' } => 7 ],
],
],
+ limit_offset_prefetch => [
+ '(
+ SELECT me.name, books.id, books.source, books.owner, books.title, books.price
+ FROM (
+ SELECT me.name, me.id
+ FROM (
+ SELECT me.name, me.id, ROW_NUMBER() OVER() AS rno__row__index
+ FROM (
+ SELECT me.name, me.id FROM owners me
+ ) me
+ ) me
+ WHERE rno__row__index >= ? AND rno__row__index <= ?
+ ) me
+ LEFT JOIN books books
+ ON books.owner = me.id
+ ORDER BY me.id
+ )',
+ [
+ [ { sqlt_datatype => 'integer' } => 2 ],
+ [ { sqlt_datatype => 'integer' } => 4 ],
+ ]
+ ],
};
},
[ { sqlt_datatype => 'integer' } => 4 ],
],
],
+ limit_offset_prefetch => [
+ '(
+ SELECT me.name, books.id, books.source, books.owner, books.title, books.price
+ FROM (
+ SELECT me.name, me.id
+ FROM (
+ SELECT me.name, me.id, ROWNUM rownum__index
+ FROM (
+ SELECT me.name, me.id
+ FROM owners me
+ ) me
+ ) me WHERE rownum__index BETWEEN ? AND ?
+ ) me
+ LEFT JOIN books books
+ ON books.owner = me.id
+ ORDER BY me.id
+ )',
+ [
+ [ { sqlt_datatype => 'integer' } => 2 ],
+ [ { sqlt_datatype => 'integer' } => 4 ],
+ ]
+ ],
};
},
'(
SELECT me.id, owner__id, owner__name, bar, baz
FROM (
- SELECT me.id, owner__id, owner__name, bar, baz, ORDER__BY__1, ORDER__BY__2
+ SELECT me.id, owner__id, owner__name, bar, baz, ORDER__BY__001, ORDER__BY__002
FROM (
- SELECT me.id, owner.id AS owner__id, owner.name AS owner__name, ? * ? AS bar, ? AS baz, ? / ? AS ORDER__BY__1, ? AS ORDER__BY__2
+ SELECT me.id, owner.id AS owner__id, owner.name AS owner__name, ? * ? AS bar, ? AS baz, ? / ? AS ORDER__BY__001, ? AS ORDER__BY__002
FROM books me
JOIN owners owner
ON owner.id = me.owner
ORDER BY ? / ?, ?
FETCH FIRST 7 ROWS ONLY
) me
- ORDER BY ORDER__BY__1 DESC, ORDER__BY__2 DESC
+ ORDER BY ORDER__BY__001 DESC, ORDER__BY__002 DESC
FETCH FIRST 4 ROWS ONLY
) me
- ORDER BY ORDER__BY__1, ORDER__BY__2
+ ORDER BY ORDER__BY__001, ORDER__BY__002
)',
[
@select_bind,
(map { [ @$_ ] } @order_bind), # without this is_deeply throws a fit
],
],
+ limit_offset_prefetch => [
+ '(
+ SELECT me.name, books.id, books.source, books.owner, books.title, books.price
+ FROM (
+ SELECT me.name, me.id
+ FROM (
+ SELECT me.name, me.id
+ FROM owners me
+ ORDER BY me.id
+ FETCH FIRST 4 ROWS ONLY
+ ) me
+ ORDER BY me.id DESC
+ FETCH FIRST 3 ROWS ONLY
+ ) me
+ LEFT JOIN books books
+ ON books.owner = me.id
+ ORDER BY me.id
+ )',
+ [],
+ ],
},
Top => {
'(
SELECT me.id, owner__id, owner__name, bar, baz
FROM (
- SELECT TOP 4 me.id, owner__id, owner__name, bar, baz, ORDER__BY__1, ORDER__BY__2
+ SELECT TOP 4 me.id, owner__id, owner__name, bar, baz, ORDER__BY__001, ORDER__BY__002
FROM (
- SELECT TOP 7 me.id, owner.id AS owner__id, owner.name AS owner__name, ? * ? AS bar, ? AS baz, ? / ? AS ORDER__BY__1, ? AS ORDER__BY__2
+ SELECT TOP 7 me.id, owner.id AS owner__id, owner.name AS owner__name, ? * ? AS bar, ? AS baz, ? / ? AS ORDER__BY__001, ? AS ORDER__BY__002
FROM books me
JOIN owners owner
ON owner.id = me.owner
HAVING ?
ORDER BY ? / ?, ?
) me
- ORDER BY ORDER__BY__1 DESC, ORDER__BY__2 DESC
+ ORDER BY ORDER__BY__001 DESC, ORDER__BY__002 DESC
) me
- ORDER BY ORDER__BY__1, ORDER__BY__2
+ ORDER BY ORDER__BY__001, ORDER__BY__002
)',
[
@select_bind,
(map { [ @$_ ] } @order_bind), # without this is_deeply throws a fit
],
],
+ limit_offset_prefetch => [
+ '(
+ SELECT me.name, books.id, books.source, books.owner, books.title, books.price
+ FROM (
+ SELECT TOP 3 me.name, me.id
+ FROM (
+ SELECT TOP 4 me.name, me.id
+ FROM owners me
+ ORDER BY me.id
+ ) me
+ ORDER BY me.id DESC
+ ) me
+ LEFT JOIN books books
+ ON books.owner = me.id
+ ORDER BY me.id
+ )',
+ [],
+ ],
},
RowCountOrGenericSubQ => {
[ { sqlt_datatype => 'integer' } => 6 ],
],
],
+ limit_offset_prefetch => [
+ '(
+ SELECT me.name, books.id, books.source, books.owner, books.title, books.price
+ FROM (
+ SELECT me.name, me.id
+ FROM (
+ SELECT me.name, me.id FROM owners me
+ ) me
+ WHERE (
+ SELECT COUNT(*)
+ FROM owners rownum__emulation
+ WHERE rownum__emulation.id < me.id
+ ) BETWEEN ? AND ?
+ ORDER BY me.id
+ ) me
+ LEFT JOIN books books
+ ON books.owner = me.id
+ ORDER BY me.id
+ )',
+ [
+ [ { sqlt_datatype => 'integer' } => 1 ],
+ [ { sqlt_datatype => 'integer' } => 3 ],
+ ],
+ ],
}
};
@{$tests->{$limtype}{ordered_limit_offset}},
"$limtype: Ordered limit+offset with select/group/having",
) if $tests->{$limtype}{ordered_limit_offset};
+
+ # complex prefetch on partial-fetch root with limit
+ my $pref_rs = $schema->resultset('Owners')->search({}, {
+ rows => 3,
+ offset => 1,
+ columns => 'name', # only the owner name, still prefetch all the books
+ prefetch => 'books',
+ ($limtype =~ /GenericSubQ/ ? ( order_by => 'me.id' ) : () ), # needs a simple-column stable order to be happy
+ });
+
+ is_same_sql_bind (
+ $pref_rs->as_query,
+ @{$tests->{$limtype}{limit_offset_prefetch}},
+ "$limtype: Prefetch with limit+offset",
+ ) if $tests->{$limtype}{limit_offset_prefetch};
+
+ # we can actually run the query
+ if ($limtype eq $native_limit_dialect or $limtype eq 'GenericSubQ') {
+ lives_ok { is ($pref_rs->all, 1, 'Expected count of objects on limtied prefetch') }
+ "Complex limited prefetch works with supported limit $limtype"
+ }
}
done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::Schema;
+use DBIC::SqlMakerTest;
+use DBIC::DebugObj;
+
+my $schema = DBICTest::Schema->connect (DBICTest->_database, { quote_char => '`' });
+# cheat
+require DBIx::Class::Storage::DBI::mysql;
+bless ( $schema->storage, 'DBIx::Class::Storage::DBI::mysql' );
+
+# check that double-subqueries are properly wrapped
+{
+ my ($sql, @bind);
+ my $debugobj = DBIC::DebugObj->new (\$sql, \@bind);
+ my $orig_debugobj = $schema->storage->debugobj;
+ my $orig_debug = $schema->storage->debug;
+
+ $schema->storage->debugobj ($debugobj);
+ $schema->storage->debug (1);
+
+ # the expected SQL may seem wastefully nonsensical - this is due to
+ # CD's tablename being \'cd', which triggers the "this can be anything"
+ # mode, and forces a subquery. This in turn forces *another* subquery
+ # because mysql is being mysql
+ # Also we know it will fail - never deployed. All we care about is the
+ # SQL to compare
+ eval { $schema->resultset ('CD')->update({ genreid => undef }) };
+ is_same_sql_bind (
+ $sql,
+ \@bind,
+ 'UPDATE cd SET `genreid` = ? WHERE `cdid` IN ( SELECT * FROM ( SELECT `me`.`cdid` FROM cd `me` ) `_forced_double_subquery` )',
+ [ 'NULL' ],
+ 'Correct update-SQL with double-wrapped subquery',
+ );
+
+ # same comment as above
+ eval { $schema->resultset ('CD')->delete };
+ is_same_sql_bind (
+ $sql,
+ \@bind,
+ 'DELETE FROM cd WHERE `cdid` IN ( SELECT * FROM ( SELECT `me`.`cdid` FROM cd `me` ) `_forced_double_subquery` )',
+ [],
+ 'Correct delete-SQL with double-wrapped subquery',
+ );
+
+ # and a really contrived example (we test it live in t/71mysql.t)
+ my $rs = $schema->resultset('Artist')->search({ name => { -like => 'baby_%' } });
+ my ($count_sql, @count_bind) = @${$rs->count_rs->as_query};
+ eval {
+ $schema->resultset('Artist')->search(
+ { artistid => {
+ -in => $rs->get_column('artistid')
+ ->as_query
+ } },
+ )->update({ name => \[ "CONCAT( `name`, '_bell_out_of_', $count_sql )", @count_bind ] });
+ };
+
+ is_same_sql_bind (
+ $sql,
+ \@bind,
+ q(
+ UPDATE `artist`
+ SET `name` = CONCAT(`name`, '_bell_out_of_', (
+ SELECT *
+ FROM (
+ SELECT COUNT( * )
+ FROM `artist` `me`
+ WHERE `name` LIKE ?
+ ) `_forced_double_subquery`
+ ))
+ WHERE
+ `artistid` IN (
+ SELECT *
+ FROM (
+ SELECT `me`.`artistid`
+ FROM `artist` `me`
+ WHERE `name` LIKE ?
+ ) `_forced_double_subquery` )
+ ),
+ [ ("'baby_%'") x 2 ],
+ );
+
+ $schema->storage->debugobj ($orig_debugobj);
+ $schema->storage->debug ($orig_debug);
+}
+
+done_testing;
+++ /dev/null
-use strict;
-use warnings;
-
-use Test::More;
-
-use lib qw(t/lib);
-use DBIC::SqlMakerTest;
-
-use_ok('DBICTest');
-
-my $schema = DBICTest->init_schema();
-
-my $sql_maker = $schema->storage->sql_maker;
-
-for my $q ('', '"') {
-
- $sql_maker->quote_char($q);
-
- is_same_sql_bind (
- \[ $sql_maker->select ('artist', '*', { 'artist.name' => { -ident => 'artist.pseudonym' } } ) ],
- "SELECT *
- FROM ${q}artist${q}
- WHERE ${q}artist${q}.${q}name${q} = ${q}artist${q}.${q}pseudonym${q}
- ",
- [],
- );
-
- is_same_sql_bind (
- \[ $sql_maker->update ('artist',
- { 'artist.name' => { -ident => 'artist.pseudonym' } },
- { 'artist.name' => { '!=' => { -ident => 'artist.pseudonym' } } },
- ) ],
- "UPDATE ${q}artist${q}
- SET ${q}artist${q}.${q}name${q} = ${q}artist${q}.${q}pseudonym${q}
- WHERE ${q}artist${q}.${q}name${q} != ${q}artist${q}.${q}pseudonym${q}
- ",
- [],
- );
-}
-
-done_testing;
+++ /dev/null
-use strict;
-use warnings;
-
-use Test::More;
-
-use lib qw(t/lib);
-use DBIC::SqlMakerTest;
-
-use_ok('DBICTest');
-
-my $schema = DBICTest->init_schema();
-
-my $sql_maker = $schema->storage->sql_maker;
-
-for my $q ('', '"') {
-
- $sql_maker->quote_char($q);
-
- is_same_sql_bind (
- \[ $sql_maker->select ('artist', '*', { arr1 => { -value => [1,2] }, arr2 => { '>', { -value => [3,4] } }, field => [5,6] } ) ],
- "SELECT *
- FROM ${q}artist${q}
- WHERE ${q}arr1${q} = ? AND
- ${q}arr2${q} > ? AND
- ( ${q}field${q} = ? OR ${q}field${q} = ? )
- ",
- [
- [ arr1 => [1,2] ],
- [ arr2 => [3,4] ],
- [ field => 5 ],
- [ field => 6 ],
- ],
- );
-}
-
-done_testing;
use DBIC::SqlMakerTest;
sub test_order {
-
- TODO: {
my $rs = shift;
my $args = shift;
],
) || diag Dumper $args->{order_by};
};
- }
}
my @tests = (
my $schema = DBICTest->init_schema();
my $storage = $schema->storage;
+# test (re)connection
+for my $disconnect (0, 1) {
+ $schema->storage->_dbh->disconnect if $disconnect;
+ is_deeply (
+ $schema->storage->dbh_do(sub { $_[1]->selectall_arrayref('SELECT 1') }),
+ [ [ 1 ] ],
+ 'dbh_do on fresh handle worked',
+ );
+}
+
my @args;
my $test_func = sub { @args = @_ };
[ $storage, $storage->dbh, "baz", "buz" ],
);
-# test aliasing
+# test nested aliasing
my $res = 'original';
-$storage->dbh_do (sub { $_[2] = 'changed' }, $res);
+$storage->dbh_do (sub {
+ shift->dbh_do(sub { $_[3] = 'changed' }, @_)
+}, $res);
is ($res, 'changed', "Arguments properly aliased for dbh_do");
ok( -d $test_dir_1, 'create_ddl_dir did a make_path on its target dir' );
ok( scalar( glob $test_dir_1.'/*.sql' ), 'there are sql files in there' );
-TODO: {
- local $TODO = 'we should probably add some tests here for actual deployability of the DDL?';
- ok( 0 );
+{
+ local $TODO = 'we should probably add some tests here for actual deployability of the DDL?';
+ ok( 0 );
}
END {
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Warn;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+{
+ package DBICTest::Legacy::Storage;
+ use base 'DBIx::Class::Storage::DBI::SQLite';
+
+ use Data::Dumper::Concise;
+
+ sub source_bind_attributes { return {} }
+}
+
+
+my $schema = DBICTest::Schema->clone;
+$schema->storage_type('DBICTest::Legacy::Storage');
+$schema->connection('dbi:SQLite::memory:');
+
+throws_ok
+ { $schema->storage->ensure_connected }
+ qr/\Qstorage subclass DBICTest::Legacy::Storage provides (or inherits) the method source_bind_attributes()/,
+ 'deprecated use of source_bind_attributes throws',
+;
+
+done_testing;
use Test::Exception;
use lib qw(t/lib);
-use_ok( 'DBICTest' );
-use_ok( 'DBICTest::Schema' );
+use DBICTest;
my $schema = DBICTest->init_schema;
# exception fallback:
SKIP: {
- if (DBIx::Class::_ENV_::PEEPEENESS()) {
+ if (DBIx::Class::_ENV_::PEEPEENESS) {
skip "Your perl version $] appears to leak like a sieve - skipping garbage collected \$schema test", 1;
}
use warnings;
use Test::More;
-use Test::Exception;
use DBIx::Class::Optional::Dependencies ();
use lib qw(t/lib);
use DBICTest;
-plan skip_all => 'Test segfaults on Win32' if $^O eq 'MSWin32';
-
-for my $type (qw/PG MYSQL/) {
+for my $type (qw/PG MYSQL SQLite/) {
SKIP: {
- skip "Skipping $type tests without DBICTEST_${type}_DSN", 1
- unless $ENV{"DBICTEST_${type}_DSN"};
+ my @dsn = $type eq 'SQLite'
+ ? DBICTest->_database(sqlite_use_file => 1)
+ : do {
+ skip "Skipping $type tests without DBICTEST_${type}_DSN", 1
+ unless $ENV{"DBICTEST_${type}_DSN"};
+ @ENV{map { "DBICTEST_${type}_${_}" } qw/DSN USER PASS/}
+ }
+ ;
if ($type eq 'PG') {
skip "skipping Pg tests without dependencies installed", 1
unless DBIx::Class::Optional::Dependencies->req_ok_for('test_rdbms_mysql');
}
- my $schema = DBICTest::Schema->connect (@ENV{map { "DBICTEST_${type}_${_}" } qw/DSN USER PASS/});
+ my $schema = DBICTest::Schema->connect (@dsn);
# emulate a singleton-factory, just cache the object *somewhere in a different package*
# to induce out-of-order destruction
ok (!$schema->storage->connected, "$type: start disconnected");
- lives_ok (sub {
- $schema->txn_do (sub {
-
- ok ($schema->storage->connected, "$type: transaction starts connected");
+ $schema->txn_do (sub {
- my $pid = fork();
- SKIP: {
- skip "Fork failed: $!", 1 if (! defined $pid);
+ ok ($schema->storage->connected, "$type: transaction starts connected");
- if ($pid) {
- note "Parent $$ sleeping...";
- wait();
- note "Parent $$ woken up after child $pid exit";
- }
- else {
- note "Child $$ terminating";
- undef $DBICTest::FakeSchemaFactory::schema;
- exit 0;
- }
+ my $pid = fork();
+ SKIP: {
+ skip "Fork failed: $!", 1 if (! defined $pid);
- ok ($schema->storage->connected, "$type: parent still connected (in txn_do)");
+ if ($pid) {
+ note "Parent $$ sleeping...";
+ wait();
+ note "Parent $$ woken up after child $pid exit";
}
- });
+ else {
+ note "Child $$ terminating";
+ undef $DBICTest::FakeSchemaFactory::schema;
+ exit 0;
+ }
+
+ ok ($schema->storage->connected, "$type: parent still connected (in txn_do)");
+ }
});
ok ($schema->storage->connected, "$type: parent still connected (outside of txn_do)");
my $schema = DBICTest->init_schema( sqlite_use_file => 1 );
# Make sure we're connected by doing something
-my @art = $schema->resultset("Artist")->search({ }, { order_by => 'name DESC'});
+my @art = $schema->resultset("Artist")->search({ }, { order_by => { -desc => 'name' }});
cmp_ok(@art, '==', 3, "Three artists returned");
# Disconnect the dbh, and be sneaky about it
# 2. It catches the exception, checks ->{Active}/->ping, sees the disconnected state...
# 3. Reconnects, and retries the operation
# 4. Success!
-my @art_two = $schema->resultset("Artist")->search({ }, { order_by => 'name DESC'});
+my @art_two = $schema->resultset("Artist")->search({ }, { order_by => { -desc => 'name' }});
cmp_ok(@art_two, '==', 3, "Three artists returned");
### Now, disconnect the dbh, and move the db file;
-# create a new one and chmod 000 to prevent SQLite from connecting.
+# create a new one full of garbage, prevent SQLite from connecting.
$schema->storage->_dbh->disconnect;
move( $db_orig, $db_tmp )
or die "failed to move $db_orig to $db_tmp: $!";
-open DBFILE, '>', $db_orig;
-print DBFILE 'THIS IS NOT A REAL DATABASE';
-close DBFILE;
-chmod 0000, $db_orig;
+open my $db_file, '>', $db_orig;
+print $db_file 'THIS IS NOT A REAL DATABASE';
+close $db_file;
-### Try the operation again... it should fail, since there's no db
+### Try the operation again... it should fail, since there's no valid db
{
- # Catch the DBI connection error
- local $SIG{__WARN__} = sub {};
- dies_ok {
- my @art_three = $schema->resultset("Artist")->search( {}, { order_by => 'name DESC' } );
- } 'The operation failed';
+ # Catch the DBI connection error
+ local $SIG{__WARN__} = sub {};
+ throws_ok {
+ my @art_three = $schema->resultset("Artist")->search( {}, { order_by => { -desc => 'name' } } );
+ } qr/not a database/, 'The operation failed';
}
-# otherwise can't unlink the fake db file
-$schema->storage->_dbh->disconnect if $^O eq 'MSWin32';
+ok (! $schema->storage->connected, 'We are not connected' );
### Now, move the db file back to the correct name
unlink($db_orig) or die "could not delete $db_orig: $!";
### Try the operation again... this time, it should succeed
my @art_four;
lives_ok {
- @art_four = $schema->resultset("Artist")->search( {}, { order_by => 'name DESC' } );
+ @art_four = $schema->resultset("Artist")->search( {}, { order_by => { -desc => 'name' } } );
} 'The operation succeeded';
cmp_ok( @art_four, '==', 3, "Three artists returned" );
## Get the Schema and set the replication storage type
sub init_schema {
- # current SQLT SQLite producer does not handle DROP TABLE IF EXISTS, trap warnings here
- local $SIG{__WARN__} = sub { warn @_ unless $_[0] =~ /no such table.+DROP TABLE/s };
-
- my ($class, $schema_method) = @_;
-
- my $method = "get_schema_$schema_method";
- my $schema = $class->$method;
-
- return $schema;
+ #my ($class, $schema_getter) = @_;
+ shift->${\ ( 'get_schema_' . shift ) };
}
sub get_schema_by_storage_type {
+++ /dev/null
-use strict;
-use warnings;
-
-use Test::More;
-use Test::Warn;
-use Test::Exception;
-use lib qw(t/lib);
-use DBICTest;
-
-{
- package DBICTest::Legacy::Storage;
- use base 'DBIx::Class::Storage::DBI::SQLite';
-
- use Data::Dumper::Concise;
-
- sub source_bind_attributes { return {} }
-}
-
-
-my $schema = DBICTest::Schema->clone;
-$schema->storage_type('DBICTest::Legacy::Storage');
-$schema->connection('dbi:SQLite::memory:');
-
-$schema->storage->dbh_do( sub { $_[1]->do(<<'EOS') } );
-CREATE TABLE artist (
- artistid INTEGER PRIMARY KEY NOT NULL,
- name varchar(100),
- rank integer NOT NULL DEFAULT 13,
- charfield char(10)
-)
-EOS
-
-my $legacy = sub { $schema->resultset('Artist')->search({ name => 'foo'})->next };
-if (DBIx::Class->VERSION >= 0.09) {
- &throws_ok(
- $legacy,
- qr/XXXXXXXXX not sure what error to put here yet XXXXXXXXXXXXXXX/,
- 'deprecated use of source_bind_attributes throws',
- );
-}
-else {
- &warnings_exist (
- $legacy,
- qr/\QThe source_bind_attributes() override in DBICTest::Legacy::Storage relies on a deprecated codepath/,
- 'Warning issued during invocation of legacy storage codepath',
- );
-}
-
-done_testing;
});
$guard->commit;
- } qr/No such column made_up_column .*? at .*?\Q$fn\E line \d+/s, "Error propogated okay";
+ } qr/No such column 'made_up_column' .*? at .*?\Q$fn\E line \d+/s, "Error propogated okay";
ok(!$artist_rs->find({name => 'Death Cab for Cutie'}), "Artist not created");
use strict;
use warnings;
use Test::More;
-use Benchmark;
use lib qw(t/lib);
+
+BEGIN {
+ plan skip_all =>
+ 'Skipping RH perl performance bug tests as DBIC_NO_WARN_BAD_PERL set'
+ if ( $ENV{DBIC_NO_WARN_BAD_PERL} );
+
+ require DBICTest::RunMode;
+ plan skip_all => 'Skipping as system appears to be a smoker'
+ if DBICTest::RunMode->is_smoker;
+}
+
+use Benchmark;
use DBICTest ':GlobalLock';
# This is a rather unusual test.
# Perl Performance Issues on Red Hat Systems in
# L<DBIx::Class::Manual::Troubleshooting>
-plan skip_all =>
- 'Skipping RH perl performance bug tests as DBIC_NO_WARN_BAD_PERL set'
- if ( $ENV{DBIC_NO_WARN_BAD_PERL} );
-
-plan skip_all => 'Skipping as system appears to be a smoker'
- if DBICTest::RunMode->is_smoker;
-
plan tests => 3;
ok( 1, 'Dummy - prevents next test timing out' );
+++ /dev/null
-use warnings;
-use strict;
-
-use Test::More;
-use lib 't/lib';
-use DBICTest;
-
-require DBIx::Class;
-unless ( DBIx::Class::Optional::Dependencies->req_ok_for ('test_eol') ) {
- my $missing = DBIx::Class::Optional::Dependencies->req_missing_for ('test_eol');
- $ENV{RELEASE_TESTING}
- ? die ("Failed to load release-testing module requirements: $missing")
- : plan skip_all => "Test needs: $missing"
-}
-
-Test::EOL::all_perl_files_ok({ trailing_whitespace => 1 },
- qw/t xt lib script examples/,
- DBICTest::RunMode->is_author ? ('maint') : (),
-);
-
-# Changes is not a "perl file", hence checked separately
-Test::EOL::eol_unix_ok('Changes', { trailing_whitespace => 1 });
-
-# FIXME - Test::EOL declares 'no_plan' which conflicts with done_testing
-# https://github.com/schwern/test-more/issues/14
-#done_testing;
+++ /dev/null
-use warnings;
-use strict;
-
-use Test::More;
-use lib 't/lib';
-use DBICTest;
-
-require DBIx::Class;
-unless ( DBIx::Class::Optional::Dependencies->req_ok_for ('test_notabs') ) {
- my $missing = DBIx::Class::Optional::Dependencies->req_missing_for ('test_notabs');
- $ENV{RELEASE_TESTING}
- ? die ("Failed to load release-testing module requirements: $missing")
- : plan skip_all => "Test needs: $missing"
-}
-
-Test::NoTabs::all_perl_files_ok(
- qw/t xt lib script examples/,
- DBICTest::RunMode->is_author ? ('maint') : (),
-);
-
-# Changes is not a "perl file", hence checked separately
-Test::NoTabs::notabs_ok('Changes');
-
-# FIXME - Test::NoTabs declares 'no_plan' which conflicts with done_testing
-# https://github.com/schwern/test-more/issues/14
-#done_testing;
is_deeply(
DBIx::Class::Optional::Dependencies->req_list_for('test_rdbms_pg'),
{
- 'Sys::SigAction' => '0',
+ $^O ne 'MSWin32' ? ('Sys::SigAction' => '0') : (),
'DBD::Pg' => '2.009002',
}, 'optional dependencies for testing Postgres with ENV var ok');
: plan skip_all => "Test needs: $missing"
}
-Test::Pod::all_pod_files_ok();
+# this has already been required but leave it here for CPANTS static analysis
+require Test::Pod;
+
+my $generated_pod_dir = 'maint/.Generated_Pod';
+Test::Pod::all_pod_files_ok( 'lib', -d $generated_pod_dir ? $generated_pod_dir : () );
use Test::More;
use List::Util 'first';
-use lib qw(t/lib);
+use lib qw(t/lib maint/.Generated_Pod/lib);
use DBICTest;
use namespace::clean;
: plan skip_all => "Test needs: $missing"
}
+# this has already been required but leave it here for CPANTS static analysis
+require Test::Pod::Coverage;
+
# Since this is about checking documentation, a little documentation
# of what this is doing might be in order.
# The exceptions structure below is a hash keyed by the module
MULTICREATE_DEBUG
/],
},
- 'DBIx::Class::Storage::TxnScopeGuard' => {
- ignore => [qw/
- IS_BROKEN_PERL
- /],
- },
'DBIx::Class::FilterColumn' => {
ignore => [qw/
new
$ex_lookup->{$re} = $ex;
}
-my @modules = sort { $a cmp $b } (Test::Pod::Coverage::all_modules());
+my @modules = sort { $a cmp $b } Test::Pod::Coverage::all_modules('lib');
foreach my $module (@modules) {
SKIP: {
--- /dev/null
+use warnings;
+use strict;
+
+use Test::More;
+use lib 't/lib';
+use DBICTest ':GlobalLock';
+
+unless ( DBIx::Class::Optional::Dependencies->req_ok_for ('test_strictures') ) {
+ my $missing = DBIx::Class::Optional::Dependencies->req_missing_for ('test_strictures');
+ $ENV{RELEASE_TESTING}
+ ? die ("Failed to load release-testing module requirements: $missing")
+ : plan skip_all => "Test needs: $missing"
+}
+
+
+use File::Find;
+
+find({
+ wanted => sub {
+ -f $_ or return;
+ m/\.(?: pm | pl | t )$ /ix or return;
+
+ return if m{^(?:
+ maint/Makefile.PL.inc/.+ # all the maint inc snippets are auto-strictured
+ |
+ lib/DBIx/Class/Admin/Types.pm # MooseX::Types undetected
+ |
+ lib/DBIx/Class/Storage/DBI/Replicated/Types.pm # MooseX::Types undetected
+ |
+ lib/DBIx/Class/Storage/BlockRunner.pm # Moo undetected
+ |
+ t/lib/DBICTest/Util/OverrideRequire.pm # no stictures by design (load order sensitive)
+ )$}x;
+
+ my $f = $_;
+
+ Test::Strict::strict_ok($f);
+ Test::Strict::warnings_ok($f);
+
+ #Test::Strict::syntax_ok($f) if $f =~ /^ (?: lib )/x;
+ },
+ no_chdir => 1,
+}, (qw(lib t examples maint)) );
+
+done_testing;
--- /dev/null
+use warnings;
+use strict;
+
+use Test::More;
+use File::Glob 'bsd_glob';
+use lib 't/lib';
+use DBICTest ':GlobalLock';
+
+require DBIx::Class;
+unless ( DBIx::Class::Optional::Dependencies->req_ok_for ('test_whitespace') ) {
+ my $missing = DBIx::Class::Optional::Dependencies->req_missing_for ('test_whitespace');
+ $ENV{RELEASE_TESTING}
+ ? die ("Failed to load release-testing module requirements: $missing")
+ : plan skip_all => "Test needs: $missing"
+}
+
+# FIXME - temporary workaround for RT#82032, RT#82033
+# also add all scripts (no extension) and some extra extensions
+# we want to check
+{
+ no warnings 'redefine';
+ my $is_pm = sub {
+ $_[0] !~ /\./ || $_[0] =~ /\.(?:pm|pod|skip|sql|json|proto)$/i || $_[0] =~ /::/;
+ };
+
+ *Test::EOL::_is_perl_module = $is_pm;
+ *Test::NoTabs::_is_perl_module = $is_pm;
+}
+
+my @pl_targets = qw/t xt lib script examples maint/;
+Test::EOL::all_perl_files_ok({ trailing_whitespace => 1 }, @pl_targets);
+Test::NoTabs::all_perl_files_ok(@pl_targets);
+
+# check some non-"perl files" in the root separately
+# use .gitignore as a guide of what to skip
+# (or do not test at all if no .gitignore is found)
+if (open(my $gi, '<', '.gitignore')) {
+ my $skipnames;
+ while (my $ln = <$gi>) {
+ next if $ln =~ /^\s*$/;
+ chomp $ln;
+ $skipnames->{$_}++ for bsd_glob($ln);
+ }
+
+ # that we want to check anyway
+ delete $skipnames->{'META.yml'};
+
+ for my $fn (bsd_glob('*')) {
+ next if $skipnames->{$fn};
+ next unless -f $fn;
+ Test::EOL::eol_unix_ok($fn, { trailing_whitespace => 1 });
+ Test::NoTabs::notabs_ok($fn);
+ }
+}
+
+# FIXME - Test::NoTabs and Test::EOL declare 'no_plan' which conflicts with done_testing
+# https://github.com/schwern/test-more/issues/14
+#done_testing;