_build/
blib/
inc/
+lib/DBIx/Class/Optional/Dependencies.pod
pm_to_blib
t/var/
Revision history for DBIx::Class
+ - Fix regression where SQL files with comments were not
+ handled properly by ::Schema::Versioned.
+ - Fix regression on not properly throwing when $obj->relationship
+ is unresolvable
+ - Add has_relationship method to row objects
+ - Fix regression in set_column on PK-less objects
+ - Fix for SQLite to ignore the { for => ... } attribute
+
+0.08120 2010-02-24 08:58:00 (UTC)
+ - Make sure possibly overwritten deployment_statements methods in
+ schemas get called on $schema->deploy
+ - Fix count() with group_by aliased-function resultsets
+ - with_deferred_fk_checks() Oracle support
+ - Massive refactor and cleanup of primary key handling
+ - Fixed regression losing custom result_class (really this time)
+ (RT#54697)
+ - Fixed regression in DBIC SQLT::Parser failing with a classname
+ (as opposed to a schema object)
+ - Changes to Storage::DBI::Oracle to accomodate changes in latest
+ SQL::Translator (quote handling)
+ - Make sure deployment_statements is per-storage overridable
+ - Fix dbicadmin's (lack of) POD
+
+0.08119 2010-02-15 09:36:00 (UTC)
+ - Add $rs->is_ordered to test for existing order_by on a resultset
+ - Add as_subselect_rs to DBIC::ResultSet from
+ DBIC::Helper::ResultSet::VirtualView::as_virtual_view
+ - Refactor dbicadmin adding DDL manipulation capabilities
+ - New optional dependency manager to aid extension writers
+ - Depend on newest bugfixed Moose
+ - Make resultset chaining consistent wrt selection specification
+ - Storage::DBI::Replicated cleanup
+ - Fix autoinc PKs without an autoinc flag on Sybase ASA
+
+0.08118 2010-02-08 11:53:00 (UTC)
+ - Fix a bug causing UTF8 columns not to be decoded (RT#54395)
+ - Fix bug in One->Many->One prefetch-collapse handling (RT#54039)
+ - Cleanup handling of relationship accessor types
+
+0.08117 2010-02-05 17:10:00 (UTC)
- Perl 5.8.1 is now the minimum supported version
- DBIx::Class::InflateColumn::File entered deprecated state
- Massive optimization of the join resolution code - now joins
are not referenced by anything
- Subqueries no longer marked experimental
- Support for Informix RDBMS (limit/offset and auto-inc columns)
+ - Support for Sybase SQLAnywhere, both native and via ODBC
- might_have/has_one now warn if applied calling class's column
has is_nullable set to true.
- Fixed regression in deploy() with a {sources} table limit applied
parsed by SQL::Translator::Parser::DBIx::Class
- Stop the SQLT parser from auto-adding indexes identical to the
Primary Key
+ - InflateColumn::DateTime refactoring to allow fine grained method
+ overloads
- Fix ResultSetColumn improperly selecting more than the requested
column when +columns/+select is present
+ - Fix failure when update/delete of resultsets with complex WHERE
+ SQLA structures
- Fix regression in context sensitiveness of deployment_statements
- Fix regression resulting in overcomplicated query on
search_related from prefetching resultsets
-use inc::Module::Install 0.89;
+use inc::Module::Install 0.93;
use strict;
use warnings;
use POSIX ();
use 5.008001;
-# ****** DO NOT ADD OPTIONAL DEPENDENCIES. EVER. --mst ******
+use FindBin;
+use lib "$FindBin::Bin/lib";
-name 'DBIx-Class';
-perl_version '5.008001';
-all_from 'lib/DBIx/Class.pm';
+# adjust ENV for $AUTHOR system() calls
+use Config;
+$ENV{PERL5LIB} = join ($Config{path_sep}, @INC);
-test_requires 'Test::Builder' => '0.33';
-test_requires 'Test::Deep' => '0';
-test_requires 'Test::Exception' => '0';
-test_requires 'Test::More' => '0.92';
-test_requires 'Test::Warn' => '0.21';
-
-test_requires 'File::Temp' => '0.22';
-
-
-# Core
-requires 'List::Util' => '0';
-requires 'Scalar::Util' => '0';
-requires 'Storable' => '0';
-
-# Dependencies (keep in alphabetical order)
-requires 'Carp::Clan' => '6.0';
-requires 'Class::Accessor::Grouped' => '0.09002';
-requires 'Class::C3::Componentised' => '1.0005';
-requires 'Class::Inspector' => '1.24';
-requires 'Data::Page' => '2.00';
-requires 'DBD::SQLite' => '1.25';
-requires 'DBI' => '1.609';
-requires 'JSON::Any' => '1.18';
-requires 'MRO::Compat' => '0.09';
-requires 'Module::Find' => '0.06';
-requires 'Path::Class' => '0.16';
-requires 'Scope::Guard' => '0.03';
-requires 'SQL::Abstract' => '1.60';
-requires 'SQL::Abstract::Limit' => '0.13';
-requires 'Sub::Name' => '0.04';
-requires 'Data::Dumper::Concise' => '1.000';
-
-my %replication_requires = (
- 'Moose', => '0.90',
- 'MooseX::Types', => '0.21',
- 'namespace::clean' => '0.11',
- 'Hash::Merge', => '0.11',
-);
-
-#************************************************************************#
-# Make *ABSOLUTELY SURE* that nothing on this list is a real require, #
-# since every module listed in %force_requires_if_author is deleted #
-# from the final META.yml (thus will never make it as a CPAN dependency) #
-#************************************************************************#
-my %force_requires_if_author = (
- %replication_requires,
-
- # when changing also adjust $DBIx::Class::Storage::DBI::minimum_sqlt_version
- 'SQL::Translator' => '0.11002',
-
-# 'Module::Install::Pod::Inherit' => '0.01',
-
- # when changing also adjust version in t/02pod.t
- 'Test::Pod' => '1.26',
-
- # when changing also adjust version in t/06notabs.t
-# 'Test::NoTabs' => '0.9',
-
- # when changing also adjust version in t/07eol.t
-# 'Test::EOL' => '0.6',
-
- # when changing also adjust version in t/03podcoverage.t
- 'Test::Pod::Coverage' => '1.08',
- 'Pod::Coverage' => '0.20',
-
- # CDBI-compat related
- 'DBIx::ContextualFetch' => '0',
- 'Class::DBI::Plugin::DeepAbstractSearch' => '0',
- 'Class::Trigger' => '0',
- 'Time::Piece::MySQL' => '0',
- 'Clone' => '0',
- 'Date::Simple' => '3.03',
-
- # t/52cycle.t
- 'Test::Memory::Cycle' => '0',
- 'Devel::Cycle' => '1.10',
-
- # t/36datetime.t
- # t/60core.t
- 'DateTime::Format::SQLite' => '0',
-
- # t/96_is_deteministic_value.t
- 'DateTime::Format::Strptime'=> '0',
-
- # database-dependent reqs
- #
- $ENV{DBICTEST_PG_DSN}
- ? (
- 'Sys::SigAction' => '0',
- 'DBD::Pg' => '2.009002',
- 'DateTime::Format::Pg' => '0',
- ) : ()
- ,
-
- $ENV{DBICTEST_MYSQL_DSN}
- ? (
- 'DateTime::Format::MySQL' => '0',
- ) : ()
- ,
-
- $ENV{DBICTEST_ORA_DSN}
- ? (
- 'DateTime::Format::Oracle' => '0',
- ) : ()
- ,
-
- $ENV{DBICTEST_SYBASE_DSN}
- ? (
- 'DateTime::Format::Sybase' => 0,
- ) : ()
- ,
-);
-#************************************************************************#
-# Make ABSOLUTELY SURE that nothing on the list above is a real require, #
-# since every module listed in %force_requires_if_author is deleted #
-# from the final META.yml (thus will never make it as a CPAN dependency) #
-#************************************************************************#
+###
+### DO NOT ADD OPTIONAL DEPENDENCIES HERE, EVEN AS recommends()
+### All of them should go to DBIx::Class::Optional::Dependencies
+###
-install_script (qw|
- script/dbicadmin
-|);
-
-tests_recursive (qw|
- t
-|);
-
-resources 'IRC' => 'irc://irc.perl.org/#dbix-class';
-resources 'license' => 'http://dev.perl.org/licenses/';
-resources 'repository' => 'http://dev.catalyst.perl.org/repos/bast/DBIx-Class/';
-resources 'MailingList' => 'http://lists.scsys.co.uk/cgi-bin/mailman/listinfo/dbix-class';
-
-# Deprecated/internal modules need no exposure
-no_index directory => $_ for (qw|
- lib/DBIx/Class/SQLAHacks
- lib/DBIx/Class/PK/Auto
-|);
-no_index package => $_ for (qw/
- DBIx::Class::Storage::DBI::AmbiguousGlob
- DBIx::Class::SQLAHacks DBIx::Class::Storage::DBIHacks
-/);
-
-# re-build README and require extra modules for testing if we're in a checkout
+name 'DBIx-Class';
+perl_version '5.008001';
+all_from 'lib/DBIx/Class.pm';
+my $build_requires = {
+ 'DBD::SQLite' => '1.25',
+};
+
+my $test_requires = {
+ 'File::Temp' => '0.22',
+ 'Test::Builder' => '0.33',
+ 'Test::Exception' => '0',
+ 'Test::More' => '0.92',
+ 'Test::Warn' => '0.21',
+};
+
+my $runtime_requires = {
+ 'Carp::Clan' => '6.0',
+ 'Class::Accessor::Grouped' => '0.09002',
+ 'Class::C3::Componentised' => '1.0005',
+ 'Class::Inspector' => '1.24',
+ 'Data::Page' => '2.00',
+ 'DBI' => '1.609',
+ 'MRO::Compat' => '0.09',
+ 'Module::Find' => '0.06',
+ 'Path::Class' => '0.18',
+ 'SQL::Abstract' => '1.61',
+ 'SQL::Abstract::Limit' => '0.13',
+ 'Sub::Name' => '0.04',
+ 'Data::Dumper::Concise' => '1.000',
+ 'Scope::Guard' => '0.03',
+ 'Context::Preserve' => '0.01',
+};
+
+# this is so we can order requires alphabetically
+# copies are needed for author requires injection
+my $reqs = {
+ build_requires => { %$build_requires },
+ requires => { %$runtime_requires },
+ test_requires => { %$test_requires },
+};
+
+
+# require extra modules for testing if we're in a checkout
if ($Module::Install::AUTHOR) {
warn <<'EOW';
******************************************************************************
EOW
- foreach my $module (sort keys %force_requires_if_author) {
- build_requires ($module => $force_requires_if_author{$module});
+ require DBIx::Class::Optional::Dependencies;
+ $reqs->{test_requires} = {
+ %{$reqs->{test_requires}},
+ %{DBIx::Class::Optional::Dependencies->_all_optional_requirements},
+ };
+}
+
+# compose final req list, for alphabetical ordering
+my %final_req;
+for my $rtype (keys %$reqs) {
+ for my $mod (keys %{$reqs->{$rtype}} ) {
+
+ # sanity check req duplications
+ if ($final_req{$mod}) {
+ die "$mod specified as both a '$rtype' and a '$final_req{$mod}[0]'\n";
+ }
+
+ $final_req{$mod} = [ $rtype, $reqs->{$rtype}{$mod}||0 ],
}
+}
+
+# actual require
+for my $mod (sort keys %final_req) {
+ my ($rtype, $ver) = @{$final_req{$mod}};
+ no strict 'refs';
+ $rtype->($mod, $ver);
+}
+
+auto_install();
+
+# re-create various autogenerated documentation bits
+if ($Module::Install::AUTHOR) {
print "Regenerating README\n";
system('pod2text lib/DBIx/Class.pm > README');
unlink 'MANIFEST';
}
-# require Module::Install::Pod::Inherit;
-# PodInherit();
+ print "Regenerating Optional/Dependencies.pod\n";
+ require DBIx::Class::Optional::Dependencies;
+ DBIx::Class::Optional::Dependencies->_gen_pod;
+
+ # FIXME Disabled due to unsolved issues, ask theorbtwo
+ # require Module::Install::Pod::Inherit;
+ # PodInherit();
}
-auto_install();
+tests_recursive (qw|
+ t
+|);
+
+install_script (qw|
+ script/dbicadmin
+|);
+
+
+### Mangle makefile - read the comments for more info
+#
+postamble <<"EOP";
+
+# This will add an extra dep-spec for the distdir target,
+# which `make` will fold together in a first-come first-serve
+# fashion. What we do here is essentially adding extra
+# commands to execute once the distdir is assembled (via
+# create_distdir), but before control is returned to a higher
+# calling rule.
+distdir : dbicadmin_pod_inject
+
+# The pod self-injection code is in fact a hidden option in
+# dbicadmin itself
+dbicadmin_pod_inject :
+\tcd \$(DISTVNAME) && \$(ABSPERL) -Ilib script/dbicadmin --selfinject-pod
+
+# Regenerate manifest before running create_distdir.
+create_distdir : manifest
+
+EOP
+
+
+
+resources 'IRC' => 'irc://irc.perl.org/#dbix-class';
+resources 'license' => 'http://dev.perl.org/licenses/';
+resources 'repository' => 'http://dev.catalyst.perl.org/repos/bast/DBIx-Class/';
+resources 'MailingList' => 'http://lists.scsys.co.uk/cgi-bin/mailman/listinfo/dbix-class';
+
+# Deprecated/internal modules need no exposure
+no_index directory => $_ for (qw|
+ lib/DBIx/Class/Admin
+ lib/DBIx/Class/SQLAHacks
+ lib/DBIx/Class/PK/Auto
+ lib/DBIx/Class/CDBICompat
+|);
+no_index package => $_ for (qw/
+ DBIx::Class::SQLAHacks DBIx::Class::Storage::DBIHacks
+/);
+
WriteAll();
+
# Re-write META.yml to _exclude_ all forced requires (we do not want to ship this)
if ($Module::Install::AUTHOR) {
+ # FIXME test_requires is not yet part of META
+ my %original_build_requires = ( %$build_requires, %$test_requires );
+
+ print "Regenerating META with author requires excluded\n";
Meta->{values}{build_requires} = [ grep
- { not exists $force_requires_if_author{$_->[0]} }
- ( @{Meta->{values}{build_requires}} )
+ { exists $original_build_requires{$_->[0]} }
+ ( @{Meta->{values}{build_requires}} )
];
Meta->write;
use MRO::Compat;
use mro 'c3';
+use DBIx::Class::Optional::Dependencies;
+
use vars qw($VERSION);
use base qw/DBIx::Class::Componentised Class::Accessor::Grouped/;
use DBIx::Class::StartupCheck;
# Always remember to do all digits for the version even if they're 0
# i.e. first release of 0.XX *must* be 0.XX000. This avoids fBSD ports
# brain damage and presumably various other packaging systems too
-$VERSION = '0.08115';
+$VERSION = '0.08120_1';
-$VERSION = eval $VERSION; # numify for warning-free dev releases
+$VERSION = eval $VERSION if $VERSION =~ /_/; # numify for warning-free dev releases
sub MODIFY_CODE_ATTRIBUTES {
my ($class,$code,@attrs) = @_;
The community can be found via:
- Mailing list: http://lists.scsys.co.uk/mailman/listinfo/dbix-class/
+=over
+
+=item * IRC: L<irc.perl.org#dbix-class (click for instant chatroom login)
+|http://mibbit.com/chat/#dbix-class@irc.perl.org>
+
+=item * Mailing list: L<http://lists.scsys.co.uk/mailman/listinfo/dbix-class>
- SVN: http://dev.catalyst.perl.org/repos/bast/DBIx-Class/
+=item * RT Bug Tracker: L<https://rt.cpan.org/Dist/Display.html?Queue=DBIx-Class>
- SVNWeb: http://dev.catalyst.perl.org/svnweb/bast/browse/DBIx-Class/
+=item * SVNWeb: L<http://dev.catalyst.perl.org/svnweb/bast/browse/DBIx-Class/0.08>
- IRC: irc.perl.org#dbix-class
+=item * SVN: L<http://dev.catalyst.perl.org/repos/bast/DBIx-Class/0.08>
+
+=back
=head1 SYNOPSIS
aherzog: Adam Herzog <adam@herzogdesigns.com>
+amoore: Andrew Moore <amoore@cpan.org>
+
andyg: Andy Grundman <andy@hybridized.org>
ank: Andres Kievsky
bluefeet: Aran Deltac <bluefeet@cpan.org>
+boghead: Bryan Beeley <cpan@beeley.org>
+
bricas: Brian Cassidy <bricas@cpan.org>
brunov: Bruno Vecchi <vecchi.b@gmail.com>
debolaz: Anders Nor Berle <berle@cpan.org>
+dew: Dan Thomas <dan@godders.org>
+
dkubb: Dan Kubb <dan.kubb-cpan@onautopilot.com>
dnm: Justin Wheeler <jwheeler@datademons.com>
+dpetrov: Dimitar Petrov <mitakaa@gmail.com>
+
dwc: Daniel Westermann-Clark <danieltwc@cpan.org>
dyfrgi: Michael Leuchtenburg <michael@slashhome.org>
frew: Arthur Axel "fREW" Schmidt <frioux@gmail.com>
+goraxe: Gordon Irving <goraxe@cpan.org>
+
gphat: Cory G Watson <gphat@cpan.org>
groditi: Guillermo Roditi <groditi@cpan.org>
norbi: Norbert Buchmuller <norbi@nix.hu>
+nuba: Nuba Princigalli <nuba@cpan.org>
+
Numa: Dan Sully <daniel@cpan.org>
ovid: Curtis "Ovid" Poe <ovid@cpan.org>
=head1 COPYRIGHT
-Copyright (c) 2005 - 2009 the DBIx::Class L</AUTHOR> and L</CONTRIBUTORS>
+Copyright (c) 2005 - 2010 the DBIx::Class L</AUTHOR> and L</CONTRIBUTORS>
as listed above.
=head1 LICENSE
--- /dev/null
+package DBIx::Class::Admin;
+
+# check deps
+BEGIN {
+ use Carp::Clan qw/^DBIx::Class/;
+ use DBIx::Class;
+ croak('The following modules are required for DBIx::Class::Admin ' . DBIx::Class::Optional::Dependencies->req_missing_for ('admin') )
+ unless DBIx::Class::Optional::Dependencies->req_ok_for ('admin');
+}
+
+use Moose;
+use MooseX::Types::Moose qw/Int Str Any Bool/;
+use DBIx::Class::Admin::Types qw/DBICConnectInfo DBICHashRef/;
+use MooseX::Types::JSON qw(JSON);
+use MooseX::Types::Path::Class qw(Dir File);
+use Try::Tiny;
+use JSON::Any qw(DWIW XS JSON);
+use namespace::autoclean;
+
+=head1 NAME
+
+DBIx::Class::Admin - Administration object for schemas
+
+=head1 SYNOPSIS
+
+ $ dbicadmin --help
+
+ $ dbicadmin --schema=MyApp::Schema \
+ --connect='["dbi:SQLite:my.db", "", ""]' \
+ --deploy
+
+ $ dbicadmin --schema=MyApp::Schema --class=Employee \
+ --connect='["dbi:SQLite:my.db", "", ""]' \
+ --op=update --set='{ "name": "New_Employee" }'
+
+ use DBIx::Class::Admin;
+
+ # ddl manipulation
+ my $admin = DBIx::Class::Admin->new(
+ schema_class=> 'MY::Schema',
+ sql_dir=> $sql_dir,
+ connect_info => { dsn => $dsn, user => $user, password => $pass },
+ );
+
+ # create SQLite sql
+ $admin->create('SQLite');
+
+ # create SQL diff for an upgrade
+ $admin->create('SQLite', {} , "1.0");
+
+ # upgrade a database
+ $admin->upgrade();
+
+ # install a version for an unversioned schema
+ $admin->install("3.0");
+
+=head1 REQUIREMENTS
+
+The Admin interface has additional requirements not currently part of
+L<DBIx::Class>. See L<DBIx::Class::Optional::Dependencies> for more details.
+
+=head1 ATTRIBUTES
+
+=head2 schema_class
+
+the class of the schema to load
+
+=cut
+
+has 'schema_class' => (
+ is => 'ro',
+ isa => Str,
+);
+
+
+=head2 schema
+
+A pre-connected schema object can be provided for manipulation
+
+=cut
+
+has 'schema' => (
+ is => 'ro',
+ isa => 'DBIx::Class::Schema',
+ lazy_build => 1,
+);
+
+sub _build_schema {
+ my ($self) = @_;
+ require Class::MOP;
+ Class::MOP::load_class($self->schema_class);
+
+ $self->connect_info->[3]->{ignore_version} =1;
+ return $self->schema_class->connect(@{$self->connect_info()} ); # , $self->connect_info->[3], { ignore_version => 1} );
+}
+
+
+=head2 resultset
+
+a resultset from the schema to operate on
+
+=cut
+
+has 'resultset' => (
+ is => 'rw',
+ isa => Str,
+);
+
+
+=head2 where
+
+a hash ref or json string to be used for identifying data to manipulate
+
+=cut
+
+has 'where' => (
+ is => 'rw',
+ isa => DBICHashRef,
+ coerce => 1,
+);
+
+
+=head2 set
+
+a hash ref or json string to be used for inserting or updating data
+
+=cut
+
+has 'set' => (
+ is => 'rw',
+ isa => DBICHashRef,
+ coerce => 1,
+);
+
+
+=head2 attrs
+
+a hash ref or json string to be used for passing additonal info to the ->search call
+
+=cut
+
+has 'attrs' => (
+ is => 'rw',
+ isa => DBICHashRef,
+ coerce => 1,
+);
+
+
+=head2 connect_info
+
+connect_info the arguments to provide to the connect call of the schema_class
+
+=cut
+
+has 'connect_info' => (
+ is => 'ro',
+ isa => DBICConnectInfo,
+ lazy_build => 1,
+ coerce => 1,
+);
+
+sub _build_connect_info {
+ my ($self) = @_;
+ return $self->_find_stanza($self->config, $self->config_stanza);
+}
+
+
+=head2 config_file
+
+config_file provide a config_file to read connect_info from, if this is provided
+config_stanze should also be provided to locate where the connect_info is in the config
+The config file should be in a format readable by Config::General
+
+=cut
+
+has config_file => (
+ is => 'ro',
+ isa => File,
+ coerce => 1,
+);
+
+
+=head2 config_stanza
+
+config_stanza for use with config_file should be a '::' deliminated 'path' to the connection information
+designed for use with catalyst config files
+
+=cut
+
+has 'config_stanza' => (
+ is => 'ro',
+ isa => Str,
+);
+
+
+=head2 config
+
+Instead of loading from a file the configuration can be provided directly as a hash ref. Please note
+config_stanza will still be required.
+
+=cut
+
+has config => (
+ is => 'ro',
+ isa => DBICHashRef,
+ lazy_build => 1,
+);
+
+sub _build_config {
+ my ($self) = @_;
+
+ eval { require Config::Any }
+ or die ("Config::Any is required to parse the config file.\n");
+
+ my $cfg = Config::Any->load_files ( {files => [$self->config_file], use_ext =>1, flatten_to_hash=>1});
+
+ # just grab the config from the config file
+ $cfg = $cfg->{$self->config_file};
+ return $cfg;
+}
+
+
+=head2 sql_dir
+
+The location where sql ddl files should be created or found for an upgrade.
+
+=cut
+
+has 'sql_dir' => (
+ is => 'ro',
+ isa => Dir,
+ coerce => 1,
+);
+
+
+=head2 version
+
+Used for install, the version which will be 'installed' in the schema
+
+=cut
+
+has version => (
+ is => 'rw',
+ isa => Str,
+);
+
+
+=head2 preversion
+
+Previouse version of the schema to create an upgrade diff for, the full sql for that version of the sql must be in the sql_dir
+
+=cut
+
+has preversion => (
+ is => 'rw',
+ isa => Str,
+);
+
+
+=head2 force
+
+Try and force certain operations.
+
+=cut
+
+has force => (
+ is => 'rw',
+ isa => Bool,
+);
+
+
+=head2 quiet
+
+Be less verbose about actions
+
+=cut
+
+has quiet => (
+ is => 'rw',
+ isa => Bool,
+);
+
+has '_confirm' => (
+ is => 'bare',
+ isa => Bool,
+);
+
+
+=head1 METHODS
+
+=head2 create
+
+=over 4
+
+=item Arguments: $sqlt_type, \%sqlt_args, $preversion
+
+=back
+
+L<create> will generate sql for the supplied schema_class in sql_dir. The flavour of sql to
+generate can be controlled by suppling a sqlt_type which should be a L<SQL::Translator> name.
+
+Arguments for L<SQL::Translator> can be supplied in the sqlt_args hashref.
+
+Optional preversion can be supplied to generate a diff to be used by upgrade.
+
+=cut
+
+sub create {
+ my ($self, $sqlt_type, $sqlt_args, $preversion) = @_;
+
+ $preversion ||= $self->preversion();
+
+ my $schema = $self->schema();
+ # create the dir if does not exist
+ $self->sql_dir->mkpath() if ( ! -d $self->sql_dir);
+
+ $schema->create_ddl_dir( $sqlt_type, (defined $schema->schema_version ? $schema->schema_version : ""), $self->sql_dir->stringify, $preversion, $sqlt_args );
+}
+
+
+=head2 upgrade
+
+=over 4
+
+=item Arguments: <none>
+
+=back
+
+upgrade will attempt to upgrade the connected database to the same version as the schema_class.
+B<MAKE SURE YOU BACKUP YOUR DB FIRST>
+
+=cut
+
+sub upgrade {
+ my ($self) = @_;
+ my $schema = $self->schema();
+ if (!$schema->get_db_version()) {
+ # schema is unversioned
+ $schema->throw_exception ("Could not determin current schema version, please either install() or deploy().\n");
+ } else {
+ my $ret = $schema->upgrade();
+ return $ret;
+ }
+}
+
+
+=head2 install
+
+=over 4
+
+=item Arguments: $version
+
+=back
+
+install is here to help when you want to move to L<DBIx::Class::Schema::Versioned> and have an existing
+database. install will take a version and add the version tracking tables and 'install' the version. No
+further ddl modification takes place. Setting the force attribute to a true value will allow overriding of
+already versioned databases.
+
+=cut
+
+sub install {
+ my ($self, $version) = @_;
+
+ my $schema = $self->schema();
+ $version ||= $self->version();
+ if (!$schema->get_db_version() ) {
+ # schema is unversioned
+ print "Going to install schema version\n";
+ my $ret = $schema->install($version);
+ print "retun is $ret\n";
+ }
+ elsif ($schema->get_db_version() and $self->force ) {
+ carp "Forcing install may not be a good idea";
+ if($self->_confirm() ) {
+ $self->schema->_set_db_version({ version => $version});
+ }
+ }
+ else {
+ $schema->throw_exception ("Schema already has a version. Try upgrade instead.\n");
+ }
+
+}
+
+
+=head2 deploy
+
+=over 4
+
+=item Arguments: $args
+
+=back
+
+deploy will create the schema at the connected database. C<$args> are passed straight to
+L<DBIx::Class::Schema/deploy>.
+
+=cut
+
+sub deploy {
+ my ($self, $args) = @_;
+ my $schema = $self->schema();
+ if (!$schema->get_db_version() ) {
+ # schema is unversioned
+ $schema->deploy( $args, $self->sql_dir)
+ or $schema->throw_exception ("Could not deploy schema.\n"); # FIXME deploy() does not return 1/0 on success/fail
+ } else {
+ $schema->throw_exception("A versioned schema has already been deployed, try upgrade instead.\n");
+ }
+}
+
+=head2 insert
+
+=over 4
+
+=item Arguments: $rs, $set
+
+=back
+
+insert takes the name of a resultset from the schema_class and a hashref of data to insert
+into that resultset
+
+=cut
+
+sub insert {
+ my ($self, $rs, $set) = @_;
+
+ $rs ||= $self->resultset();
+ $set ||= $self->set();
+ my $resultset = $self->schema->resultset($rs);
+ my $obj = $resultset->create( $set );
+ print ''.ref($resultset).' ID: '.join(',',$obj->id())."\n" if (!$self->quiet);
+}
+
+
+=head2 update
+
+=over 4
+
+=item Arguments: $rs, $set, $where
+
+=back
+
+update takes the name of a resultset from the schema_class, a hashref of data to update and
+a where hash used to form the search for the rows to update.
+
+=cut
+
+sub update {
+ my ($self, $rs, $set, $where) = @_;
+
+ $rs ||= $self->resultset();
+ $where ||= $self->where();
+ $set ||= $self->set();
+ my $resultset = $self->schema->resultset($rs);
+ $resultset = $resultset->search( ($where||{}) );
+
+ my $count = $resultset->count();
+ print "This action will modify $count ".ref($resultset)." records.\n" if (!$self->quiet);
+
+ if ( $self->force || $self->_confirm() ) {
+ $resultset->update_all( $set );
+ }
+}
+
+
+=head2 delete
+
+=over 4
+
+=item Arguments: $rs, $where, $attrs
+
+=back
+
+delete takes the name of a resultset from the schema_class, a where hashref and a attrs to pass to ->search.
+The found data is deleted and cannot be recovered.
+
+=cut
+
+sub delete {
+ my ($self, $rs, $where, $attrs) = @_;
+
+ $rs ||= $self->resultset();
+ $where ||= $self->where();
+ $attrs ||= $self->attrs();
+ my $resultset = $self->schema->resultset($rs);
+ $resultset = $resultset->search( ($where||{}), ($attrs||()) );
+
+ my $count = $resultset->count();
+ print "This action will delete $count ".ref($resultset)." records.\n" if (!$self->quiet);
+
+ if ( $self->force || $self->_confirm() ) {
+ $resultset->delete_all();
+ }
+}
+
+
+=head2 select
+
+=over 4
+
+=item Arguments: $rs, $where, $attrs
+
+=back
+
+select takes the name of a resultset from the schema_class, a where hashref and a attrs to pass to ->search.
+The found data is returned in a array ref where the first row will be the columns list.
+
+=cut
+
+sub select {
+ my ($self, $rs, $where, $attrs) = @_;
+
+ $rs ||= $self->resultset();
+ $where ||= $self->where();
+ $attrs ||= $self->attrs();
+ my $resultset = $self->schema->resultset($rs);
+ $resultset = $resultset->search( ($where||{}), ($attrs||()) );
+
+ my @data;
+ my @columns = $resultset->result_source->columns();
+ push @data, [@columns];#
+
+ while (my $row = $resultset->next()) {
+ my @fields;
+ foreach my $column (@columns) {
+ push( @fields, $row->get_column($column) );
+ }
+ push @data, [@fields];
+ }
+
+ return \@data;
+}
+
+sub _confirm {
+ my ($self) = @_;
+ print "Are you sure you want to do this? (type YES to confirm) \n";
+ # mainly here for testing
+ return 1 if ($self->meta->get_attribute('_confirm')->get_value($self));
+ my $response = <STDIN>;
+ return 1 if ($response=~/^YES/);
+ return;
+}
+
+sub _find_stanza {
+ my ($self, $cfg, $stanza) = @_;
+ my @path = split /::/, $stanza;
+ while (my $path = shift @path) {
+ if (exists $cfg->{$path}) {
+ $cfg = $cfg->{$path};
+ }
+ else {
+ die ("Could not find $stanza in config, $path does not seem to exist.\n");
+ }
+ }
+ return $cfg;
+}
+
+=head1 AUTHOR
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself
+
+=cut
+
+1;
--- /dev/null
+package # hide from PAUSE
+ DBIx::Class::Admin::Descriptive;
+
+use DBIx::Class::Admin::Usage;
+
+use base 'Getopt::Long::Descriptive';
+
+sub usage_class { 'DBIx::Class::Admin::Usage'; }
+
+1;
--- /dev/null
+package # hide from PAUSE
+ DBIx::Class::Admin::Types;
+
+use MooseX::Types -declare => [qw(
+ DBICConnectInfo
+ DBICArrayRef
+ DBICHashRef
+)];
+use MooseX::Types::Moose qw/Int HashRef ArrayRef Str Any Bool/;
+use MooseX::Types::JSON qw(JSON);
+
+subtype DBICArrayRef,
+ as ArrayRef;
+
+subtype DBICHashRef,
+ as HashRef;
+
+coerce DBICArrayRef,
+ from JSON,
+ via { _json_to_data ($_) };
+
+coerce DBICHashRef,
+ from JSON,
+ via { _json_to_data($_) };
+
+subtype DBICConnectInfo,
+ as ArrayRef;
+
+coerce DBICConnectInfo,
+ from JSON,
+ via { return _json_to_data($_) } ;
+
+coerce DBICConnectInfo,
+ from Str,
+ via { return _json_to_data($_) };
+
+coerce DBICConnectInfo,
+ from HashRef,
+ via { [ $_ ] };
+
+sub _json_to_data {
+ my ($json_str) = @_;
+ my $json = JSON::Any->new(allow_barekey => 1, allow_singlequote => 1, relaxed=>1);
+ my $ret = $json->jsonToObj($json_str);
+ return $ret;
+}
+
+1;
--- /dev/null
+package # hide from PAUSE
+ DBIx::Class::Admin::Usage;
+
+
+use base 'Getopt::Long::Descriptive::Usage';
+
+use base 'Class::Accessor::Grouped';
+
+use Class::C3;
+
+__PACKAGE__->mk_group_accessors('simple', 'synopsis', 'short_description');
+
+sub prog_name {
+ Getopt::Long::Descriptive::prog_name();
+}
+
+sub set_simple {
+ my ($self,$field, $value) = @_;
+ my $prog_name = prog_name();
+ $value =~ s/%c/$prog_name/g;
+ $self->next::method($field, $value);
+}
+
+
+
+# This returns the usage formated as a pod document
+sub pod {
+ my ($self) = @_;
+ return join qq{\n}, $self->pod_leader_text, $self->pod_option_text, $self->pod_authorlic_text;
+}
+
+sub pod_leader_text {
+ my ($self) = @_;
+
+ return qq{=head1 NAME\n\n}.prog_name()." - ".$self->short_description().qq{\n\n}.
+ qq{=head1 SYNOPSIS\n\n}.$self->leader_text().qq{\n}.$self->synopsis().qq{\n\n};
+
+}
+
+sub pod_authorlic_text {
+
+ return join ("\n\n",
+ '=head1 AUTHORS',
+ 'See L<DBIx::Class/CONTRIBUTORS>',
+ '=head1 LICENSE',
+ 'You may distribute this code under the same terms as Perl itself',
+ '=cut',
+ );
+}
+
+
+sub pod_option_text {
+ my ($self) = @_;
+ my @options = @{ $self->{options} || [] };
+ my $string = q{};
+ return $string unless @options;
+
+ $string .= "=head1 OPTIONS\n\n=over\n\n";
+
+ foreach my $opt (@options) {
+ my $spec = $opt->{spec};
+ my $desc = $opt->{desc};
+ next if ($desc eq 'hidden');
+ if ($desc eq 'spacer') {
+ $string .= "=back\n\n=head2 $spec\n\n=cut\n\n=over\n\n";
+ next;
+ }
+
+ $spec = Getopt::Long::Descriptive->_strip_assignment($spec);
+ $string .= "=item " . join " or ", map { length > 1 ? "B<--$_>" : "B<-$_>" }
+ split /\|/, $spec;
+ $string .= "\n\n$desc\n\n=cut\n\n";
+
+ }
+ $string .= "=back\n\n";
+ return $string;
+}
+
+1;
=head2 Choosing Features
-In fact, this class is just a receipe containing all the features emulated.
+In fact, this class is just a recipe containing all the features emulated.
If you like, you can choose which features to emulate by building your
own class and loading it like this:
=item Relationships
-Relationships between tables (has_a, has_many...) must be delcared after all tables in the relationship have been declared. Thus the usual CDBI idiom of declaring columns and relationships for each class together will not work. They must instead be done like so:
+Relationships between tables (has_a, has_many...) must be declared after all tables in the relationship have been declared. Thus the usual CDBI idiom of declaring columns and relationships for each class together will not work. They must instead be done like so:
package Foo;
use base qw(Class::DBI);
=head1 SYNOPSIS
-See DBIx::Class::CDBICompat for directions for use.
+See DBIx::Class::CDBICompat for usage directions.
=head1 DESCRIPTION
=head1 SYNOPSIS
-See DBIx::Class::CDBICompat for directions for use.
+See DBIx::Class::CDBICompat for usage directions.
=head1 DESCRIPTION
=head1 SYNOPSIS
-See DBIx::Class::CDBICompat for directions for use.
+See DBIx::Class::CDBICompat for usage directions.
=head1 DESCRIPTION
=head1 SYNOPSIS
-See DBIx::Class::CDBICompat for directions for use.
+See DBIx::Class::CDBICompat for usage directions.
=head1 DESCRIPTION
It can be used, for example, to automatically convert to and from
L<DateTime> objects for your date and time fields. There's a
-conveniece component to actually do that though, try
+convenience component to actually do that though, try
L<DBIx::Class::InflateColumn::DateTime>.
It will handle all types of references except scalar references. It
$self->throw_exception("inflate_column needs attr hashref")
unless ref $attrs eq 'HASH';
$self->column_info($col)->{_inflate_info} = $attrs;
- $self->mk_group_accessors('inflated_column' => [$self->column_info($col)->{accessor} || $col, $col]);
+ my $acc = $self->column_info($col)->{accessor};
+ $self->mk_group_accessors('inflated_column' => [ (defined $acc ? $acc : $col), $col]);
return 1;
}
Fetch a column value in its inflated state. This is directly
analogous to L<DBIx::Class::Row/get_column> in that it only fetches a
-column already retreived from the database, and then inflates it.
+column already retrieved from the database, and then inflates it.
Throws an exception if the column requested is not an inflated column.
=cut
}
}
- my $timezone;
if ( defined $info->{extra}{timezone} ) {
carp "Putting timezone into extra => { timezone => '...' } has been deprecated, ".
"please put it directly into the '$column' column definition.";
- $timezone = $info->{extra}{timezone};
+ $info->{timezone} = $info->{extra}{timezone} unless defined $info->{timezone};
}
- my $locale;
if ( defined $info->{extra}{locale} ) {
carp "Putting locale into extra => { locale => '...' } has been deprecated, ".
"please put it directly into the '$column' column definition.";
- $locale = $info->{extra}{locale};
+ $info->{locale} = $info->{extra}{locale} unless defined $info->{locale};
}
- $locale = $info->{locale} if defined $info->{locale};
- $timezone = $info->{timezone} if defined $info->{timezone};
-
my $undef_if_invalid = $info->{datetime_undef_if_invalid};
if ($type eq 'datetime' || $type eq 'date' || $type eq 'timestamp') {
$self->throw_exception ("Error while inflating ${value} for ${column} on ${self}: $err");
}
- $dt->set_time_zone($timezone) if $timezone;
- $dt->set_locale($locale) if $locale;
- return $dt;
+ return $obj->_post_inflate_datetime( $dt, \%info );
},
deflate => sub {
my ($value, $obj) = @_;
- if ($timezone) {
- carp "You're using a floating timezone, please see the documentation of"
- . " DBIx::Class::InflateColumn::DateTime for an explanation"
- if ref( $value->time_zone ) eq 'DateTime::TimeZone::Floating'
- and not $info{floating_tz_ok}
- and not $ENV{DBIC_FLOATING_TZ_OK};
- $value->set_time_zone($timezone);
- $value->set_locale($locale) if $locale;
- }
+
+ $value = $obj->_pre_deflate_datetime( $value, \%info );
$obj->_deflate_from_datetime( $value, \%info );
},
}
shift->result_source->storage->datetime_parser (@_);
}
+sub _post_inflate_datetime {
+ my( $self, $dt, $info ) = @_;
+
+ $dt->set_time_zone($info->{timezone}) if defined $info->{timezone};
+ $dt->set_locale($info->{locale}) if defined $info->{locale};
+
+ return $dt;
+}
+
+sub _pre_deflate_datetime {
+ my( $self, $dt, $info ) = @_;
+
+ if (defined $info->{timezone}) {
+ carp "You're using a floating timezone, please see the documentation of"
+ . " DBIx::Class::InflateColumn::DateTime for an explanation"
+ if ref( $dt->time_zone ) eq 'DateTime::TimeZone::Floating'
+ and not $info->{floating_tz_ok}
+ and not $ENV{DBIC_FLOATING_TZ_OK};
+
+ $dt->set_time_zone($info->{timezone});
+ }
+
+ $dt->set_locale($info->{locale}) if defined $info->{locale};
+
+ return $dt;
+}
+
1;
__END__
=head2 _file_column_callback ($file,$ret,$target)
-method made to be overridden for callback purposes.
+Method made to be overridden for callback purposes.
=cut
=head2 Experimental
-These components are under development, there interfaces may
+These components are under development, their interfaces may
change, they may not work, etc. So, use them if you want, but
be warned.
);
... and you'll get back a perfect L<DBIx::Class::ResultSet> (except, of course,
-that you cannot modify the rows it contains, ie. cannot call L</update>,
+that you cannot modify the rows it contains, e.g. cannot call L</update>,
L</delete>, ... on it).
Note that you cannot have bind parameters unless is_virtual is set to true.
# SELECT name name, LENGTH( name )
# FROM artist
-Note that the C<as> attribute B<has absolutely nothing to do> with the sql
+Note that the C<as> attribute B<has absolutely nothing to do> with the SQL
syntax C< SELECT foo AS bar > (see the documentation in
L<DBIx::Class::ResultSet/ATTRIBUTES>). You can control the C<AS> part of the
generated SQL via the C<-as> field attribute as follows:
artist_id => { 'IN' => $inside_rs->get_column('id')->as_query },
});
-The usual operators ( =, !=, IN, NOT IN, etc) are supported.
+The usual operators ( =, !=, IN, NOT IN, etc.) are supported.
B<NOTE>: You have to explicitly use '=' when doing an equality comparison.
The following will B<not> work:
Using SQL functions on the left hand side of a comparison is generally not a
good idea since it requires a scan of the entire table. (Unless your RDBMS
-supports indexes on expressions - including return values of functions -, and
+supports indexes on expressions - including return values of functions - and
you create an index on the return value of the function in question.) However,
it can be accomplished with C<DBIx::Class> when necessary.
package My::App::Schema;
- use base DBIx::Class::Schema;
+ use base 'DBIx::Class::Schema';
# load subclassed classes from My::App::Schema::Result/ResultSet
__PACKAGE__->load_namespaces;
use strict;
use warnings;
- use base My::Shared::Model::Result::Baz;
+ use base 'My::Shared::Model::Result::Baz';
# WARNING: Make sure you call table() again in your subclass,
# otherwise DBIx::Class::ResultSourceProxy::Table will not be called
for admin. We would like like to give the admin users
objects (L<DBIx::Class::Row>) the same methods as a regular user but
also special admin only methods. It doesn't make sense to create two
-seperate proxy-class files for this. We would be copying all the user
+separate proxy-class files for this. We would be copying all the user
methods into the Admin class. There is a cleaner way to accomplish
this.
my $schema = MySchema->connect("dbi:Pg:dbname=my_db");
# Start a transaction. Every database change from here on will only be
- # commited into the database if the eval block succeeds.
+ # committed into the database if the eval block succeeds.
eval {
$schema->txn_do(sub {
# SQL: BEGIN WORK;
};
if ($@) {
# There was an error while handling the $job. Rollback all changes
- # since the transaction started, including the already commited
+ # since the transaction started, including the already committed
# ('released') savepoints. There will be neither a new $job nor any
# $thing entry in the database.
Add the L<DBIx::Class::Schema::Versioned> schema component to your
Schema class. This will add a new table to your database called
C<dbix_class_schema_vesion> which will keep track of which version is installed
-and warn if the user trys to run a newer schema version than the
+and warn if the user tries to run a newer schema version than the
database thinks it has.
-Alternatively, you can send the conversion sql scripts to your
+Alternatively, you can send the conversion SQL scripts to your
customers as above.
=head2 Setting quoting for the generated SQL
}
);
-In conditions (eg. C<\%cond> in the L<DBIx::Class::ResultSet/search> family of
+In conditions (e.g. C<\%cond> in the L<DBIx::Class::ResultSet/search> family of
methods) you cannot directly use array references (since this is interpreted as
a list of values to be C<OR>ed), but you can use the following syntax to force
passing them as bind values:
title TEXT NOT NULL
);
-and create the sqlite database file:
+and create the SQLite database file:
sqlite3 example.db < example.sql
use strict;
my $schema = MyDatabase::Main->connect('dbi:SQLite:db/example.db');
- # for other DSNs, e.g. MySql, see the perldoc for the relevant dbd
+ # for other DSNs, e.g. MySQL, see the perldoc for the relevant dbd
# driver, e.g perldoc L<DBD::mysql>.
get_tracks_by_cd('Bad');
=head1 Notes
-A reference implentation of the database and scripts in this example
+A reference implementation of the database and scripts in this example
are available in the main distribution for DBIx::Class under the
directory F<t/examples/Schema>.
the tables are to be joined. The condition may contain as many fields
as you like. See L<DBIx::Class::Relationship::Base>.
-=item .. define a relatiopnship across an intermediate table? (many-to-many)
+=item .. define a relationship across an intermediate table? (many-to-many)
Read the documentation on L<DBIx::Class::Relationship/many_to_many>.
second one will use a default port of 5433, while L<DBD::Pg> is compiled with a
default port of 5432.
-You can chance the port setting in C<postgresql.conf>.
+You can change the port setting in C<postgresql.conf>.
=item I've lost or forgotten my mysql password
__PACKAGE__->load_components(qw/ Ordered /);
__PACKAGE__->position_column('rank');
+Ordered will refer to a field called 'position' unless otherwise directed. Here you are defining
+the ordering field to be named 'rank'. (NOTE: Insert errors may occur if you use the Ordered
+component, but have not defined a position column or have a 'position' field in your row.)
+
Set the table for your class:
__PACKAGE__->table('album');
Note that L<DBIx::Class::Schema> does not cache connections for you. If you use
multiple connections, you need to do this manually.
-To execute some sql statements on every connect you can add them as an option in
+To execute some SQL statements on every connect you can add them as an option in
a special fifth argument to connect:
my $another_schema = My::Schema->connect(
=head2 Whole related objects
-To fetch entire related objects, eg CDs and all Track data, use the
+To fetch entire related objects, e.g. CDs and all Track data, use the
'prefetch' attribute:
$schema->resultset('CD')->search(
SELECT cd.ID, cd.Title, cd.Year, tracks.id, tracks.Name, tracks.Artist FROM CD JOIN Tracks ON CD.ID = tracks.CDID WHERE cd.Title = 'Funky CD' ORDER BY 'tracks.id';
The syntax of 'prefetch' is the same as 'join' and implies the
-joining, so no need to use both together.
+joining, so there is no need to use both together.
=head2 Subset of related fields
To perform joins using relations of the tables you are joining to, use
a hashref to indicate the join depth. This can theoretically go as
-deep as you like (warning, contrived examples!):
+deep as you like (warning: contrived examples!):
join => { room => { table => 'leg' } }
Methods should be documented in the files which also contain the code
for the method, or that file should be hidden from PAUSE completely,
in which case the methods are documented in the file which loads
-it. Methods may also be documented and refered to in files
+it. Methods may also be documented and referred to in files
representing the major objects or components on which they can be
called.
For example, L<DBIx::Class::Relationship> documents the methods
actually coded in the helper relationship classes like
DBIx::Class::Relationship::BelongsTo. The BelongsTo file itself is
-hidden from pause as it has no documentation. The accessors created by
+hidden from PAUSE as it has no documentation. The accessors created by
relationships should be mentioned in L<DBIx::Class::Row>, the major
object that they will be called on.
what the method returns.
The first item provides a list of all possible values for the
-arguments of the method in order, separated by C<, >, preceeded by the
+arguments of the method in order, separated by C<, >, preceded by the
text "Arguments: "
Example (for the belongs_to relationship):
=item *
The argument list is followed by some examples of how to use the
-method, using it's various types of arguments.
+method, using its various types of arguments.
The examples can also include ways to use the results if
-applicable. For instance if the documentation is for a relationship
+applicable. For instance, if the documentation is for a relationship
type, the examples can include how to call the resulting relation
accessor, how to use the relation name in a search and so on.
$schema->storage->debugfh(IO::File->new('/tmp/trace.out', 'w');
-Alternatively you can do this with the environment variable too:-
+Alternatively you can do this with the environment variable, too:-
export DBIC_TRACE="1=/tmp/trace.out"
There's likely a syntax error in the table class referred to elsewhere
in this error message. In particular make sure that the package
-declaration is correct, so for a schema C< MySchema > you need to
-specify a fully qualified namespace: C< package MySchema::MyTable; >
-for example.
+declaration is correct. For example, for a schema C< MySchema >
+you need to specify a fully qualified namespace: C< package MySchema::MyTable; >.
=head2 syntax error at or near "<something>" ...
=head2 column "foo DESC" does not exist ...
This can happen if you are still using the obsolete order hack, and also
-happen to turn on sql-quoting.
+happen to turn on SQL-quoting.
$rs->search( {}, { order_by => [ 'name DESC' ] } );
Fedora 8 - perl-5.8.8-41.fc8
RHEL5 - perl-5.8.8-15.el5_2.1
-The issue is due to perl doing an exhaustive search of blessed objects
+This issue is due to perl doing an exhaustive search of blessed objects
under certain circumstances. The problem shows up as performance
-degredation exponential to the number of L<DBIx::Class> row objects in
-memory, so can be unoticeable with certain data sets, but with huge
+degradation exponential to the number of L<DBIx::Class> row objects in
+memory, so can be unnoticeable with certain data sets, but with huge
performance impacts on other datasets.
-A pair of tests for susceptability to the issue, and performance effects
+A pair of tests for susceptibility to the issue and performance effects
of the bless/overload problem can be found in the L<DBIx::Class> test
-suite in the file C<t/99rh_perl_perf_bug.t>
+suite, in the C<t/99rh_perl_perf_bug.t> file.
Further information on this issue can be found in
L<https://bugzilla.redhat.com/show_bug.cgi?id=379791>,
=head2 Excessive Memory Allocation with TEXT/BLOB/etc. Columns and Large LongReadLen
-It has been observed, using L<DBD::ODBC>, that a creating a L<DBIx::Class::Row>
+It has been observed, using L<DBD::ODBC>, that creating a L<DBIx::Class::Row>
object which includes a column of data type TEXT/BLOB/etc. will allocate
LongReadLen bytes. This allocation does not leak, but if LongReadLen
is large in size, and many such row objects are created, e.g. as the
--- /dev/null
+package DBIx::Class::Optional::Dependencies;
+
+use warnings;
+use strict;
+
+use Carp;
+
+# NO EXTERNAL NON-5.8.1 CORE DEPENDENCIES EVER (e.g. C::A::G)
+# This module is to be loaded by Makefile.PM on a pristine system
+
+# POD is generated automatically by calling _gen_pod from the
+# Makefile.PL in $AUTHOR mode
+
+my $moose_basic = {
+ 'Moose' => '0.98',
+ 'MooseX::Types' => '0.21',
+};
+
+my $admin_basic = {
+ %$moose_basic,
+ 'MooseX::Types::Path::Class' => '0.05',
+ 'MooseX::Types::JSON' => '0.02',
+ 'JSON::Any' => '1.22',
+ 'namespace::autoclean' => '0.09',
+};
+
+my $reqs = {
+ dist => {
+ #'Module::Install::Pod::Inherit' => '0.01',
+ },
+
+ replicated => {
+ req => {
+ %$moose_basic,
+ 'namespace::clean' => '0.11',
+ 'Hash::Merge' => '0.12',
+ },
+ pod => {
+ title => 'Storage::Replicated',
+ desc => 'Modules required for L<DBIx::Class::Storage::DBI::Replicated>',
+ },
+ },
+
+ admin => {
+ req => {
+ %$admin_basic,
+ },
+ pod => {
+ title => 'DBIx::Class::Admin',
+ desc => 'Modules required for the DBIx::Class administrative library',
+ },
+ },
+
+ admin_script => {
+ req => {
+ %$moose_basic,
+ %$admin_basic,
+ 'Getopt::Long::Descriptive' => '0.081',
+ 'Text::CSV' => '1.16',
+ },
+ pod => {
+ title => 'dbicadmin',
+ desc => 'Modules required for the CLI DBIx::Class interface dbicadmin',
+ },
+ },
+
+ deploy => {
+ req => {
+ 'SQL::Translator' => '0.11005',
+ },
+ pod => {
+ title => 'Storage::DBI::deploy()',
+ desc => 'Modules required for L<DBIx::Class::Storage::DBI/deploy> and L<DBIx::Class::Storage::DBI/deploymen_statements>',
+ },
+ },
+
+
+ test_pod => {
+ req => {
+ 'Test::Pod' => '1.41',
+ },
+ },
+
+ test_podcoverage => {
+ req => {
+ 'Test::Pod::Coverage' => '1.08',
+ 'Pod::Coverage' => '0.20',
+ },
+ },
+
+ test_notabs => {
+ req => {
+ #'Test::NoTabs' => '0.9',
+ },
+ },
+
+ test_eol => {
+ req => {
+ #'Test::EOL' => '0.6',
+ },
+ },
+
+ test_cycle => {
+ req => {
+ 'Test::Memory::Cycle' => '0',
+ 'Devel::Cycle' => '1.10',
+ },
+ },
+
+ test_dtrelated => {
+ req => {
+ # t/36datetime.t
+ # t/60core.t
+ 'DateTime::Format::SQLite' => '0',
+
+ # t/96_is_deteministic_value.t
+ 'DateTime::Format::Strptime'=> '0',
+
+ # t/inflate/datetime_mysql.t
+ # (doesn't need Mysql itself)
+ 'DateTime::Format::MySQL' => '0',
+
+ # t/inflate/datetime_pg.t
+ # (doesn't need PG itself)
+ 'DateTime::Format::Pg' => '0',
+ },
+ },
+
+ cdbicompat => {
+ req => {
+ 'DBIx::ContextualFetch' => '0',
+ 'Class::DBI::Plugin::DeepAbstractSearch' => '0',
+ 'Class::Trigger' => '0',
+ 'Time::Piece::MySQL' => '0',
+ 'Clone' => '0',
+ 'Date::Simple' => '3.03',
+ },
+ },
+
+ rdbms_pg => {
+ req => {
+ $ENV{DBICTEST_PG_DSN}
+ ? (
+ 'Sys::SigAction' => '0',
+ 'DBD::Pg' => '2.009002',
+ ) : ()
+ },
+ },
+
+ rdbms_mysql => {
+ req => {
+ $ENV{DBICTEST_MYSQL_DSN}
+ ? (
+ 'DBD::mysql' => '0',
+ ) : ()
+ },
+ },
+
+ rdbms_oracle => {
+ req => {
+ $ENV{DBICTEST_ORA_DSN}
+ ? (
+ 'DateTime::Format::Oracle' => '0',
+ ) : ()
+ },
+ },
+
+ rdbms_ase => {
+ req => {
+ $ENV{DBICTEST_SYBASE_DSN}
+ ? (
+ 'DateTime::Format::Sybase' => 0,
+ ) : ()
+ },
+ },
+
+ rdbms_asa => {
+ req => {
+ (scalar grep { $ENV{$_} } (qw/DBICTEST_SYBASE_ASA_DSN DBICTEST_SYBASE_ASA_ODBC_DSN/) )
+ ? (
+ 'DateTime::Format::Strptime' => 0,
+ ) : ()
+ },
+ },
+
+ rdbms_db2 => {
+ req => {
+ $ENV{DBICTEST_DB2_DSN}
+ ? (
+ 'DBD::DB2' => 0,
+ ) : ()
+ },
+ },
+
+};
+
+
+sub _all_optional_requirements {
+ return { map { %{ $reqs->{$_}{req} || {} } } (keys %$reqs) };
+}
+
+sub req_list_for {
+ my ($class, $group) = @_;
+
+ croak "req_list_for() expects a requirement group name"
+ unless $group;
+
+ my $deps = $reqs->{$group}{req}
+ or croak "Requirement group '$group' does not exist";
+
+ return { %$deps };
+}
+
+
+our %req_availability_cache;
+sub req_ok_for {
+ my ($class, $group) = @_;
+
+ croak "req_ok_for() expects a requirement group name"
+ unless $group;
+
+ $class->_check_deps ($group) unless $req_availability_cache{$group};
+
+ return $req_availability_cache{$group}{status};
+}
+
+sub req_missing_for {
+ my ($class, $group) = @_;
+
+ croak "req_missing_for() expects a requirement group name"
+ unless $group;
+
+ $class->_check_deps ($group) unless $req_availability_cache{$group};
+
+ return $req_availability_cache{$group}{missing};
+}
+
+sub req_errorlist_for {
+ my ($class, $group) = @_;
+
+ croak "req_errorlist_for() expects a requirement group name"
+ unless $group;
+
+ $class->_check_deps ($group) unless $req_availability_cache{$group};
+
+ return $req_availability_cache{$group}{errorlist};
+}
+
+sub _check_deps {
+ my ($class, $group) = @_;
+
+ my $deps = $class->req_list_for ($group);
+
+ my %errors;
+ for my $mod (keys %$deps) {
+ if (my $ver = $deps->{$mod}) {
+ eval "use $mod $ver ()";
+ }
+ else {
+ eval "require $mod";
+ }
+
+ $errors{$mod} = $@ if $@;
+ }
+
+ if (keys %errors) {
+ my $missing = join (', ', map { $deps->{$_} ? "$_ >= $deps->{$_}" : $_ } (sort keys %errors) );
+ $missing .= " (see $class for details)" if $reqs->{$group}{pod};
+ $req_availability_cache{$group} = {
+ status => 0,
+ errorlist => { %errors },
+ missing => $missing,
+ };
+ }
+ else {
+ $req_availability_cache{$group} = {
+ status => 1,
+ errorlist => {},
+ missing => '',
+ };
+ }
+}
+
+# This is to be called by the author onbly (automatically in Makefile.PL)
+sub _gen_pod {
+ my $class = shift;
+ my $modfn = __PACKAGE__ . '.pm';
+ $modfn =~ s/\:\:/\//g;
+
+ require DBIx::Class;
+ my $distver = DBIx::Class->VERSION;
+
+ my @chunks = (
+ <<"EOC",
+#########################################################################
+##################### A U T O G E N E R A T E D ########################
+#########################################################################
+#
+# The contents of this POD file are auto-generated. Any changes you make
+# will be lost. If you need to change the generated text edit _gen_pod()
+# at the end of $modfn
+#
+EOC
+ '=head1 NAME',
+ "$class - Optional module dependency specifications (for module authors)",
+ '=head1 SYNOPSIS (EXPERIMENTAL)',
+ <<EOS,
+B<THE USAGE SHOWN HERE IS EXPERIMENTAL>
+
+Somewhere in your build-file (e.g. L<Module::Install>'s Makefile.PL):
+
+ ...
+
+ configure_requires 'DBIx::Class' => '$distver';
+
+ require $class;
+
+ my \$deploy_deps = $class->req_list_for ('deploy');
+
+ for (keys %\$deploy_deps) {
+ requires \$_ => \$deploy_deps->{\$_};
+ }
+
+ ...
+
+Note that there are some caveats regarding C<configure_requires()>, more info
+can be found at L<Module::Install/configure_requires>
+EOS
+ '=head1 DESCRIPTION',
+ <<'EOD',
+Some of the less-frequently used features of L<DBIx::Class> have external
+module dependencies on their own. In order not to burden the average user
+with modules he will never use, these optional dependencies are not included
+in the base Makefile.PL. Instead an exception with a descriptive message is
+thrown when a specific feature is missing one or several modules required for
+its operation. This module is the central holding place for the current list
+of such dependencies, for DBIx::Class core authors, and DBIx::Class extension
+authors alike.
+EOD
+ '=head1 CURRENT REQUIREMENT GROUPS',
+ <<'EOD',
+Dependencies are organized in C<groups> and each group can list one or more
+required modules, with an optional minimum version (or 0 for any version).
+The group name can be used in the
+EOD
+ );
+
+ for my $group (sort keys %$reqs) {
+ my $p = $reqs->{$group}{pod}
+ or next;
+
+ my $modlist = $reqs->{$group}{req}
+ or next;
+
+ next unless keys %$modlist;
+
+ push @chunks, (
+ "=head2 $p->{title}",
+ "$p->{desc}",
+ '=over',
+ ( map { "=item * $_" . ($modlist->{$_} ? " >= $modlist->{$_}" : '') } (sort keys %$modlist) ),
+ '=back',
+ "Requirement group: B<$group>",
+ );
+ }
+
+ push @chunks, (
+ '=head1 METHODS',
+ '=head2 req_list_for',
+ '=over',
+ '=item Arguments: $group_name',
+ '=item Returns: \%list_of_module_version_pairs',
+ '=back',
+ <<EOD,
+This method should be used by DBIx::Class extension authors, to determine the
+version of modules a specific feature requires in the B<current> version of
+DBIx::Class. See the L<SYNOPSIS|/SYNOPSIS (EXPERIMENTAL)> for a real-world
+example.
+EOD
+
+ '=head2 req_ok_for',
+ '=over',
+ '=item Arguments: $group_name',
+ '=item Returns: 1|0',
+ '=back',
+ 'Returns true or false depending on whether all modules required by C<$group_name> are present on the system and loadable',
+
+ '=head2 req_missing_for',
+ '=over',
+ '=item Arguments: $group_name',
+ '=item Returns: $error_message_string',
+ '=back',
+ <<EOD,
+Returns a single line string suitable for inclusion in larger error messages.
+This method would normally be used by DBIx::Class core-module author, to
+indicate to the user that he needs to install specific modules before he will
+be able to use a specific feature.
+
+For example if the requirements for C<replicated> are not available, the
+returned string would look like:
+
+ Moose >= 0.98, MooseX::Types >= 0.21, namespace::clean (see $class for details)
+
+The author is expected to prepend the necessary text to this message before
+returning the actual error seen by the user.
+EOD
+
+ '=head2 req_errorlist_for',
+ '=over',
+ '=item Arguments: $group_name',
+ '=item Returns: \%list_of_loaderrors_per_module',
+ '=back',
+ <<'EOD',
+Returns a hashref containing the actual errors that occured while attempting
+to load each module in the requirement group.
+EOD
+ '=head1 AUTHOR',
+ 'See L<DBIx::Class/CONTRIBUTORS>.',
+ '=head1 LICENSE',
+ 'You may distribute this code under the same terms as Perl itself',
+ );
+
+ my $fn = __FILE__;
+ $fn =~ s/\.pm$/\.pod/;
+
+ open (my $fh, '>', $fn) or croak "Unable to write to $fn: $!";
+ print $fh join ("\n\n", @chunks);
+ close ($fh);
+}
+
+1;
This method specifies a value of L</position_column> which B<would
never be assigned to a row> during normal operation. When
a row is moved, its position is set to this value temporarily, so
-that any unique constrainst can not be violated. This value defaults
+that any unique constraints can not be violated. This value defaults
to 0, which should work for all cases except when your positions do
indeed start from 0.
if (grep { $_ eq $position_column } ( map { @$_ } (values %{{ $rsrc->unique_constraints }} ) ) ) {
- my @pcols = $rsrc->primary_columns;
+ my @pcols = $rsrc->_pri_cols;
my $cursor = $shift_rs->search ({}, { order_by => { "-$ord", $position_column }, columns => \@pcols } )->cursor;
my $rs = $self->result_source->resultset;
- while (my @pks = $cursor->next ) {
-
+ my @all_pks = $cursor->all;
+ while (my $pks = shift @all_pks) {
my $cond;
for my $i (0.. $#pcols) {
- $cond->{$pcols[$i]} = $pks[$i];
+ $cond->{$pcols[$i]} = $pks->[$i];
}
$rs->search($cond)->update ({ $position_column => \ "$position_column $op 1" } );
triggering any of the positioning integrity code).
Some day you might get confronted by datasets that have ambiguous
-positioning data (i.e. duplicate position values within the same group,
+positioning data (e.g. duplicate position values within the same group,
in a table without unique constraints). When manually fixing such data
keep in mind that you can not invoke L<DBIx::Class::Row/update> like
you normally would, as it will get confused by the wrong data before
=head2 Multiple Moves
-Be careful when issueing move_* methods to multiple objects. If
+Be careful when issuing move_* methods to multiple objects. If
you've pre-loaded the objects then when you move one of the objects
the position of the other object will not reflect their new value
until you reload them from the database - see
L<DBIx::Class::Row/discard_changes>.
There are times when you will want to move objects as groups, such
-as changeing the parent of several objects at once - this directly
+as changing the parent of several objects at once - this directly
conflicts with this problem. One solution is for us to write a
ResultSet class that supports a parent() method, for example. Another
solution is to somehow automagically modify the objects that exist
my ($self) = @_;
$self->throw_exception( "Can't call id() as a class method" )
unless ref $self;
- my @pk = $self->_ident_values;
- return (wantarray ? @pk : $pk[0]);
+ my @id_vals = $self->_ident_values;
+ return (wantarray ? @id_vals : $id_vals[0]);
}
sub _ident_values {
my ($self) = @_;
- return (map { $self->{_column_data}{$_} } $self->primary_columns);
+ my (@ids, @missing);
+
+ for ($self->_pri_cols) {
+ push @ids, $self->get_column($_);
+ push @missing, $_ if (! defined $ids[-1] and ! $self->has_column_loaded ($_) );
+ }
+
+ if (@missing && $self->in_storage) {
+ $self->throw_exception (
+ 'Unable to uniquely identify row object with missing PK columns: '
+ . join (', ', @missing )
+ );
+ }
+
+ return @ids;
}
=head2 ID
$self->throw_exception( "Can't call ID() as a class method" )
unless ref $self;
return undef unless $self->in_storage;
- return $self->_create_ID(map { $_ => $self->{_column_data}{$_} }
- $self->primary_columns);
+ return $self->_create_ID(%{$self->ident_condition});
}
sub _create_ID {
- my ($self,%vals) = @_;
+ my ($self, %vals) = @_;
return undef unless 0 == grep { !defined } values %vals;
return join '|', ref $self || $self, $self->result_source->name,
map { $_ . '=' . $vals{$_} } sort keys %vals;
sub ident_condition {
my ($self, $alias) = @_;
- my %cond;
+
+ my @pks = $self->_pri_cols;
+ my @vals = $self->_ident_values;
+
+ my (%cond, @undef);
my $prefix = defined $alias ? $alias.'.' : '';
- $cond{$prefix.$_} = $self->get_column($_) for $self->primary_columns;
+ for my $col (@pks) {
+ if (! defined ($cond{$prefix.$col} = shift @vals) ) {
+ push @undef, $col;
+ }
+ }
+
+ if (@undef && $self->in_storage) {
+ $self->throw_exception (
+ 'Unable to construct row object identity condition due to NULL PK columns: '
+ . join (', ', @undef)
+ );
+ }
+
return \%cond;
}
you want to use the default value for it, but still want to set C<\%attrs>.
See L<DBIx::Class::Relationship::Base> for documentation on the
-attrubutes that are allowed in the C<\%attrs> argument.
+attributes that are allowed in the C<\%attrs> argument.
=head2 belongs_to
Creates a one-to-many relationship where the foreign class refers to
this class's primary key. This relationship refers to zero or more
-records in the foreign table (ie, a C<LEFT JOIN>). This relationship
+records in the foreign table (e.g. a C<LEFT JOIN>). This relationship
defaults to using the end of this classes namespace as the foreign key
in C<$related_class> to resolve the join, unless C<$their_fk_column>
specifies the foreign key column in C<$related_class> or C<cond>
use strict;
use warnings;
use Sub::Name ();
-use Class::Inspector ();
our %_pod_inherit_config =
(
__PACKAGE__->add_relationship('relname', 'Foreign::Class', $cond, $attrs);
+=head3 condition
+
The condition needs to be an L<SQL::Abstract>-style representation of the
join between the tables. When resolving the condition for use in a C<JOIN>,
keys using the pseudo-table C<foreign> are resolved to mean "the Table on the
To add an C<OR>ed condition, use an arrayref of hashrefs. See the
L<SQL::Abstract> documentation for more details.
-In addition to the
-L<standard ResultSet attributes|DBIx::Class::ResultSet/ATTRIBUTES>,
-the following attributes are also valid:
+=head3 attributes
+
+The L<standard ResultSet attributes|DBIx::Class::ResultSet/ATTRIBUTES> may
+be used as relationship attributes. In particular, the 'where' attribute is
+useful for filtering relationships:
+
+ __PACKAGE__->has_many( 'valid_users', 'MyApp::Schema::User',
+ { 'foreign.user_id' => 'self.user_id' },
+ { where => { valid => 1 } }
+ );
+
+The following attributes are also valid:
=over 4
my $query = ((@_ > 1) ? {@_} : shift);
my $source = $self->result_source;
- my $cond = $source->_resolve_condition(
- $rel_info->{cond}, $rel, $self
- );
+
+ # condition resolution may fail if an incomplete master-object prefetch
+ # is encountered - that is ok during prefetch construction (not yet in_storage)
+ my $cond = eval { $source->_resolve_condition( $rel_info->{cond}, $rel, $self ) };
+ if (my $err = $@) {
+ if ($self->in_storage) {
+ $self->throw_exception ($err);
+ }
+ else {
+ $cond = $DBIx::Class::ResultSource::UNRESOLVABLE_CONDITION;
+ }
+ }
+
if ($cond eq $DBIx::Class::ResultSource::UNRESOLVABLE_CONDITION) {
my $reverse = $source->reverse_relationship_info($rel);
foreach my $rev_rel (keys %$reverse) {
- if ($reverse->{$rev_rel}{attrs}{accessor} eq 'multi') {
+ if ($reverse->{$rev_rel}{attrs}{accessor} && $reverse->{$rev_rel}{attrs}{accessor} eq 'multi') {
$attrs->{related_objects}{$rev_rel} = [ $self ];
Scalar::Util::weaken($attrs->{related_object}{$rev_rel}[0]);
} else {
( $objects_rs ) = $rs->search_related_rs('relname', $cond, $attrs);
This method works exactly the same as search_related, except that
-it guarantees a restultset, even in list context.
+it guarantees a resultset, even in list context.
=cut
call set_from_related on the book.
This is called internally when you pass existing objects as values to
-L<DBIx::Class::ResultSet/create>, or pass an object to a belongs_to acessor.
+L<DBIx::Class::ResultSet/create>, or pass an object to a belongs_to accessor.
The columns are only set in the local copy of the object, call L</update> to
set them in the storage.
# no join condition or just a column name
if (!ref $cond) {
$class->ensure_class_loaded($f_class);
- my %f_primaries = map { $_ => 1 } eval { $f_class->primary_columns };
+ my %f_primaries = map { $_ => 1 } eval { $f_class->_pri_cols };
$class->throw_exception(
- "Can't infer join condition for ${rel} on ${class}; ".
- "unable to load ${f_class}: $@"
+ "Can't infer join condition for ${rel} on ${class}: $@"
) if $@;
my ($pri, $too_many) = keys %f_primaries;
$class->throw_exception(
"Can't infer join condition for ${rel} on ${class}; ".
- "${f_class} has no primary keys"
- ) unless defined $pri;
- $class->throw_exception(
- "Can't infer join condition for ${rel} on ${class}; ".
"${f_class} has multiple primary keys"
) if $too_many;
my @cascade = grep { $rels{$_}{attrs}{cascade_update} } keys %rels;
foreach my $rel (@cascade) {
next if (
+ $rels{$rel}{attrs}{accessor}
+ &&
$rels{$rel}{attrs}{accessor} eq 'single'
- && !exists($self->{_relationship_data}{$rel})
+ &&
+ !exists($self->{_relationship_data}{$rel})
);
$_->update for grep defined, $self->$rel;
}
unless (ref $cond) {
$class->ensure_class_loaded($f_class);
- my ($pri, $too_many) = $class->primary_columns;
+ my ($pri, $too_many) = eval { $class->_pri_cols };
+ $class->throw_exception(
+ "Can't infer join condition for ${rel} on ${class}: $@"
+ ) if $@;
$class->throw_exception(
"has_many can only infer join for a single primary key; ".
$class->ensure_class_loaded($f_class);
my $pri = $class->_get_primary_key;
-
+
$class->throw_exception(
"might_have/has_one needs a primary key to infer a join; ".
"${class} has none"
sub _get_primary_key {
my ( $class, $target_class ) = @_;
$target_class ||= $class;
- my ($pri, $too_many) = $target_class->primary_columns;
+ my ($pri, $too_many) = eval { $target_class->_pri_cols };
+ $class->throw_exception(
+ "Can't infer join condition on ${target_class}: $@"
+ ) if $@;
+
$class->throw_exception(
"might_have/has_one can only infer join for a single primary key; ".
"${class} has more"
=head1 SYNOPSIS
my $users_rs = $schema->resultset('User');
+ while( $user = $users_rs->next) {
+ print $user->username;
+ }
+
my $registered_users_rs = $schema->resultset('User')->search({ registered => 1 });
my @cds_in_2005 = $schema->resultset('CD')->search({ year => 2005 })->all();
=head1 OVERLOADING
If a resultset is used in a numeric context it returns the L</count>.
-However, if it is used in a booleand context it is always true. So if
+However, if it is used in a boolean context it is always true. So if
you want to check if a resultset has any results use C<if $rs != 0>.
C<if $rs> will always be true.
$rows = $self->get_cache;
}
+ # reset the selector list
+ if (List::Util::first { exists $attrs->{$_} } qw{columns select as}) {
+ delete @{$our_attrs}{qw{select as columns +select +as +columns include_columns}};
+ }
+
my $new_attrs = { %{$our_attrs}, %{$attrs} };
# merge new attrs into inherited
- foreach my $key (qw/join prefetch +select +as bind/) {
+ foreach my $key (qw/join prefetch +select +as +columns include_columns bind/) {
next unless exists $attrs->{$key};
$new_attrs->{$key} = $self->_merge_attr($our_attrs->{$key}, $attrs->{$key});
}
# in ::Relationship::Base::search_related (the row method), and furthermore
# the relationship is of the 'single' type. This means that the condition
# provided by the relationship (already attached to $self) is sufficient,
- # as there can be only one row in the databse that would satisfy the
+ # as there can be only one row in the database that would satisfy the
# relationship
}
else {
}
# Run the query
- my $rs = $self->search ($query, {result_class => $self->result_class, %$attrs});
+ my $rs = $self->search ($query, $attrs);
if (keys %{$rs->_resolved_attrs->{collapse}}) {
my $row = $rs->next;
carp "Query returned more than one row" if $rs->next;
=head2 search_related_rs
This method works exactly the same as search_related, except that
-it guarantees a restultset, even in list context.
+it guarantees a resultset, even in list context.
=cut
=item B<Note>
-As of 0.08100, this method enforces the assumption that the preceeding
+As of 0.08100, this method enforces the assumption that the preceding
query returns only one row. If more than one row is returned, you will receive
a warning:
if ($result_class) {
$self->ensure_class_loaded($result_class);
$self->_result_class($result_class);
+ $self->{attrs}{result_class} = $result_class if ref $self;
}
$self->_result_class;
}
# if we multi-prefetch we group_by primary keys only as this is what we would
# get out of the rs via ->next/->all. We *DO WANT* to clobber old group_by regardless
if ( keys %{$attrs->{collapse}} ) {
- $sub_attrs->{group_by} = [ map { "$attrs->{alias}.$_" } ($rsrc->primary_columns) ]
+ $sub_attrs->{group_by} = [ map { "$attrs->{alias}.$_" } ($rsrc->_pri_cols) ]
}
- $sub_attrs->{select} = $rsrc->storage->_subq_count_select ($rsrc, $sub_attrs);
+ $sub_attrs->{select} = $rsrc->storage->_subq_count_select ($rsrc, $attrs);
# this is so that the query can be simplified e.g.
# * ordering can be thrown away in things like Top limit
my $attrs = $self->_resolved_attrs_copy;
delete $attrs->{$_} for qw/collapse select as/;
- $attrs->{columns} = [ map { "$attrs->{alias}.$_" } ($self->result_source->primary_columns) ];
+ $attrs->{columns} = [ map { "$attrs->{alias}.$_" } ($self->result_source->_pri_cols) ];
if ($needs_group_by_subq) {
# make sure no group_by was supplied, or if there is one - make sure it matches
will not run DBIC cascade triggers. See L</delete_all> if you need triggers
to run. See also L<DBIx::Class::Row/delete>.
-Return value will be the amount of rows deleted; exact type of return value
+Return value will be the number of rows deleted; exact type of return value
is storage-dependent.
=cut
],
},
{ artistid => 5, name => 'Angsty-Whiny Girl', cds => [
- { title => 'My parents sold me to a record company' ,year => 2005 },
+ { title => 'My parents sold me to a record company', year => 2005 },
{ title => 'Why Am I So Ugly?', year => 2006 },
{ title => 'I Got Surgery and am now Popular', year => 2007 }
],
[qw/artistid name/],
[100, 'A Formally Unknown Singer'],
[101, 'A singer that jumped the shark two albums ago'],
- [102, 'An actually cool singer.'],
+ [102, 'An actually cool singer'],
]);
Please note an important effect on your data when choosing between void and
B<keyed on the relationship name>. If the relationship is of type C<multi>
(L<DBIx::Class::Relationship/has_many>) - pass an arrayref of hashrefs.
The process will correctly identify columns holding foreign keys, and will
-transparrently populate them from the keys of the corresponding relation.
+transparently populate them from the keys of the corresponding relation.
This can be applied recursively, and will work correctly for a structure
with an arbitrary depth and width, as long as the relationships actually
exists and the correct column data has been supplied.
return !!$self->{attrs}{page};
}
+=head2 is_ordered
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: true, if the resultset has been ordered with C<order_by>.
+
+=back
+
+=cut
+
+sub is_ordered {
+ my ($self) = @_;
+ return scalar $self->result_source->storage->_parse_order_by($self->{attrs}{order_by});
+}
+
=head2 related_resultset
=over 4
->relname_to_table_alias($rel, $join_count);
# since this is search_related, and we already slid the select window inwards
- # (the select/as attrs were deleted in the beginning), we need to flip all
+ # (the select/as attrs were deleted in the beginning), we need to flip all
# left joins to inner, so we get the expected results
# read the comment on top of the actual function to see what this does
$attrs->{from} = $rsrc->schema->storage->_straight_join_to_node ($attrs->{from}, $alias);
return ($self->{attrs} || {})->{alias} || 'me';
}
+=head2 as_subselect_rs
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: $resultset
+
+=back
+
+Act as a barrier to SQL symbols. The resultset provided will be made into a
+"virtual view" by including it as a subquery within the from clause. From this
+point on, any joined tables are inaccessible to ->search on the resultset (as if
+it were simply where-filtered without joins). For example:
+
+ my $rs = $schema->resultset('Bar')->search({'x.name' => 'abc'},{ join => 'x' });
+
+ # 'x' now pollutes the query namespace
+
+ # So the following works as expected
+ my $ok_rs = $rs->search({'x.other' => 1});
+
+ # But this doesn't: instead of finding a 'Bar' related to two x rows (abc and
+ # def) we look for one row with contradictory terms and join in another table
+ # (aliased 'x_2') which we never use
+ my $broken_rs = $rs->search({'x.name' => 'def'});
+
+ my $rs2 = $rs->as_subselect_rs;
+
+ # doesn't work - 'x' is no longer accessible in $rs2, having been sealed away
+ my $not_joined_rs = $rs2->search({'x.other' => 1});
+
+ # works as expected: finds a 'table' row related to two x rows (abc and def)
+ my $correctly_joined_rs = $rs2->search({'x.name' => 'def'});
+
+Another example of when one might use this would be to select a subset of
+columns in a group by clause:
+
+ my $rs = $schema->resultset('Bar')->search(undef, {
+ group_by => [qw{ id foo_id baz_id }],
+ })->as_subselect_rs->search(undef, {
+ columns => [qw{ id foo_id }]
+ });
+
+In the above example normally columns would have to be equal to the group by,
+but because we isolated the group by into a subselect the above works.
+
+=cut
+
+sub as_subselect_rs {
+ my $self = shift;
+
+ return $self->result_source->resultset->search( undef, {
+ alias => $self->current_source_alias,
+ from => [{
+ $self->current_source_alias => $self->as_query,
+ -alias => $self->current_source_alias,
+ -source_handle => $self->result_source->handle,
+ }]
+ });
+}
+
# This code is called by search_related, and makes sure there
# is clear separation between the joins before, during, and
# after the relationship. This information is needed later
# build columns (as long as select isn't set) into a set of as/select hashes
unless ( $attrs->{select} ) {
- my @cols = ( ref($attrs->{columns}) eq 'ARRAY' )
- ? @{ delete $attrs->{columns}}
- : (
- ( delete $attrs->{columns} )
- ||
- $source->columns
- )
- ;
+ my @cols;
+ if ( ref $attrs->{columns} eq 'ARRAY' ) {
+ @cols = @{ delete $attrs->{columns}}
+ } elsif ( defined $attrs->{columns} ) {
+ @cols = delete $attrs->{columns}
+ } else {
+ @cols = $source->columns
+ }
- @colbits = map {
- ( ref($_) eq 'HASH' )
- ? $_
- : {
- (
- /^\Q${alias}.\E(.+)$/
- ? "$1"
- : "$_"
- )
- =>
- (
- /\./
- ? "$_"
- : "${alias}.$_"
- )
- }
- } @cols;
+ for (@cols) {
+ if ( ref $_ eq 'HASH' ) {
+ push @colbits, $_
+ } else {
+ my $key = /^\Q${alias}.\E(.+)$/
+ ? "$1"
+ : "$_";
+ my $value = /\./
+ ? "$_"
+ : "${alias}.$_";
+ push @colbits, { $key => $value };
+ }
+ }
}
# add the additional columns on
- foreach ( 'include_columns', '+columns' ) {
- push @colbits, map {
- ( ref($_) eq 'HASH' )
- ? $_
- : { ( split( /\./, $_ ) )[-1] => ( /\./ ? $_ : "${alias}.$_" ) }
- } ( ref($attrs->{$_}) eq 'ARRAY' ) ? @{ delete $attrs->{$_} } : delete $attrs->{$_} if ( $attrs->{$_} );
+ foreach (qw{include_columns +columns}) {
+ if ( $attrs->{$_} ) {
+ my @list = ( ref($attrs->{$_}) eq 'ARRAY' )
+ ? @{ delete $attrs->{$_} }
+ : delete $attrs->{$_};
+ for (@list) {
+ if ( ref($_) eq 'HASH' ) {
+ push @colbits, $_
+ } else {
+ my $key = ( split /\./, $_ )[-1];
+ my $value = ( /\./ ? $_ : "$alias.$_" );
+ push @colbits, { $key => $value };
+ }
+ }
+ }
}
# start with initial select items
( ref $attrs->{select} eq 'ARRAY' )
? [ @{ $attrs->{select} } ]
: [ $attrs->{select} ];
- $attrs->{as} = (
- $attrs->{as}
- ? (
- ref $attrs->{as} eq 'ARRAY'
- ? [ @{ $attrs->{as} } ]
- : [ $attrs->{as} ]
+
+ if ( $attrs->{as} ) {
+ $attrs->{as} =
+ (
+ ref $attrs->{as} eq 'ARRAY'
+ ? [ @{ $attrs->{as} } ]
+ : [ $attrs->{as} ]
)
- : [ map { m/^\Q${alias}.\E(.+)$/ ? $1 : $_ } @{ $attrs->{select} } ]
- );
+ } else {
+ $attrs->{as} = [ map {
+ m/^\Q${alias}.\E(.+)$/
+ ? $1
+ : $_
+ } @{ $attrs->{select} }
+ ]
+ }
}
else {
}
# now add colbits to select/as
- push( @{ $attrs->{select} }, map { values( %{$_} ) } @colbits );
- push( @{ $attrs->{as} }, map { keys( %{$_} ) } @colbits );
+ push @{ $attrs->{select} }, map values %{$_}, @colbits;
+ push @{ $attrs->{as} }, map keys %{$_}, @colbits;
- my $adds;
- if ( $adds = delete $attrs->{'+select'} ) {
+ if ( my $adds = delete $attrs->{'+select'} ) {
$adds = [$adds] unless ref $adds eq 'ARRAY';
- push(
- @{ $attrs->{select} },
- map { /\./ || ref $_ ? $_ : "${alias}.$_" } @$adds
- );
+ push @{ $attrs->{select} },
+ map { /\./ || ref $_ ? $_ : "$alias.$_" } @$adds;
}
- if ( $adds = delete $attrs->{'+as'} ) {
+ if ( my $adds = delete $attrs->{'+as'} ) {
$adds = [$adds] unless ref $adds eq 'ARRAY';
- push( @{ $attrs->{as} }, @$adds );
+ push @{ $attrs->{as} }, @$adds;
}
- $attrs->{from} ||= [ {
+ $attrs->{from} ||= [{
-source_handle => $source->handle,
-alias => $self->{attrs}{alias},
$self->{attrs}{alias} => $source->from,
- } ];
+ }];
if ( $attrs->{join} || $attrs->{prefetch} ) {
$join,
$alias,
{ %{ $attrs->{seen_join} || {} } },
- ($attrs->{seen_join} && keys %{$attrs->{seen_join}})
+ ( $attrs->{seen_join} && keys %{$attrs->{seen_join}})
? $attrs->{from}[-1][0]{-join_path}
: []
,
my %already_grouped = map { $_ => 1 } (@{$attrs->{group_by}});
my $storage = $self->result_source->schema->storage;
- my $sql_maker = $storage->sql_maker;
- local $sql_maker->{quote_char}; #disable quoting
my $rs_column_list = $storage->_resolve_column_info ($attrs->{from});
- my @chunks = $sql_maker->_order_by_chunks ($attrs->{order_by});
- for my $chunk (map { ref $_ ? @$_ : $_ } (@chunks) ) {
- $chunk =~ s/\s+ (?: ASC|DESC ) \s* $//ix;
+ for my $chunk ($storage->_parse_order_by($attrs->{order_by})) {
if ($rs_column_list->{$chunk} && not $already_grouped{$chunk}++) {
push @{$attrs->{group_by}}, $chunk;
}
will fail miserably.
To get around this limitation, you can supply literal SQL to your
-C<select> attibute that contains the C<AS alias> text, eg:
+C<select> attribute that contains the C<AS alias> text, e.g.
select => [\'myfield AS alias']
C<prefetch> can be used with the following relationship types: C<belongs_to>,
C<has_one> (or if you're using C<add_relationship>, any relationship declared
with an accessor type of 'single' or 'filter'). A more complex example that
-prefetches an artists cds, the tracks on those cds, and the tags associted
+prefetches an artists cds, the tracks on those cds, and the tags associated
with that artist is given below (assuming many-to-many from artists to tags):
my $rs = $schema->resultset('Artist')->search(
=back
-Specifes the maximum number of rows for direct retrieval or the number of
+Specifies the maximum number of rows for direct retrieval or the number of
rows per page if the page attribute or method is used.
=head2 offset
$rs->throw_exception('column must be supplied') unless $column;
my $orig_attrs = $rs->_resolved_attrs;
- my $new_parent_rs = $rs->search_rs;
- my $new_attrs = $new_parent_rs->{attrs} ||= {};
-
- # since what we do is actually chain to the original resultset, we need to throw
- # away all selectors (otherwise they'll chain)
- delete $new_attrs->{$_} for (qw/columns +columns select +select as +as cols include_columns/);
-
- # prefetch causes additional columns to be fetched, but we can not just make a new
- # rs via the _resolved_attrs trick - we need to retain the separation between
- # +select/+as and select/as. At the same time we want to preserve any joins that the
- # prefetch would otherwise generate.
- $new_attrs->{join} = $rs->_merge_attr( delete $new_attrs->{join}, delete $new_attrs->{prefetch} );
+ my $alias = $rs->current_source_alias;
# If $column can be found in the 'as' list of the parent resultset, use the
# corresponding element of its 'select' list (to keep any custom column
my $as_index = List::Util::first { ($as_list->[$_] || "") eq $column } 0..$#$as_list;
my $select = defined $as_index ? $select_list->[$as_index] : $column;
+ my $new_parent_rs;
+ # analyze the order_by, and see if it is done over a function/nonexistentcolumn
+ # if this is the case we will need to wrap a subquery since the result of RSC
+ # *must* be a single column select
+ my %collist = map
+ { $_ => 1, ($_ =~ /\./) ? () : ( "$alias.$_" => 1 ) }
+ ($rs->result_source->columns, $column)
+ ;
+ if (
+ scalar grep
+ { ! $collist{$_} }
+ ( $rs->result_source->schema->storage->_parse_order_by ($orig_attrs->{order_by} ) )
+ ) {
+ # nuke the prefetch before collapsing to sql
+ my $subq_rs = $rs->search;
+ $subq_rs->{attrs}{join} = $subq_rs->_merge_attr( $subq_rs->{attrs}{join}, delete $subq_rs->{attrs}{prefetch} );
+ $new_parent_rs = $subq_rs->as_subselect_rs;
+ }
+
+ $new_parent_rs ||= $rs->search_rs;
+ my $new_attrs = $new_parent_rs->{attrs} ||= {};
+
+ # prefetch causes additional columns to be fetched, but we can not just make a new
+ # rs via the _resolved_attrs trick - we need to retain the separation between
+ # +select/+as and select/as. At the same time we want to preserve any joins that the
+ # prefetch would otherwise generate.
+ $new_attrs->{join} = $rs->_merge_attr( $new_attrs->{join}, delete $new_attrs->{prefetch} );
+
# {collapse} would mean a has_many join was injected, which in turn means
# we need to group *IF WE CAN* (only if the column in question is unique)
- if (!$new_attrs->{group_by} && keys %{$orig_attrs->{collapse}}) {
+ if (!$orig_attrs->{group_by} && keys %{$orig_attrs->{collapse}}) {
# scan for a constraint that would contain our column only - that'd be proof
# enough it is unique
return @{shift->_primaries||[]};
}
+sub _pri_cols {
+ my $self = shift;
+ my @pcols = $self->primary_columns
+ or $self->throw_exception (sprintf(
+ 'Operation requires a primary key to be declared on %s via set_primary_key',
+ ref $self,
+ ));
+ return @pcols;
+}
+
=head2 add_unique_constraint
=over 4
return $found;
}
-sub resolve_join {
- carp 'resolve_join is a private method, stop calling it';
- my $self = shift;
- $self->_resolve_join (@_);
-}
-
# Returns the {from} structure used to express JOIN conditions
sub _resolve_join {
my ($self, $join, $alias, $seen, $jpath, $parent_force_left) = @_;
: $rel_info->{attrs}{join_type}
,
-join_path => [@$jpath, { $join => $as } ],
- -is_single => (List::Util::first { $rel_info->{attrs}{accessor} eq $_ } (qw/single filter/) ),
+ -is_single => (
+ $rel_info->{attrs}{accessor}
+ &&
+ List::Util::first { $rel_info->{attrs}{accessor} eq $_ } (qw/single filter/)
+ ),
-alias => $as,
-relation_chain_depth => $seen->{-relation_chain_depth} || 0,
},
}
}
-# Legacy code, needs to go entirely away (fully replaced by _resolve_prefetch)
-sub resolve_prefetch {
- carp 'resolve_prefetch is a private method, stop calling it';
-
- my ($self, $pre, $alias, $seen, $order, $collapse) = @_;
- $seen ||= {};
- if( ref $pre eq 'ARRAY' ) {
- return
- map { $self->resolve_prefetch( $_, $alias, $seen, $order, $collapse ) }
- @$pre;
- }
- elsif( ref $pre eq 'HASH' ) {
- my @ret =
- map {
- $self->resolve_prefetch($_, $alias, $seen, $order, $collapse),
- $self->related_source($_)->resolve_prefetch(
- $pre->{$_}, "${alias}.$_", $seen, $order, $collapse)
- } keys %$pre;
- return @ret;
- }
- elsif( ref $pre ) {
- $self->throw_exception(
- "don't know how to resolve prefetch reftype ".ref($pre));
- }
- else {
- my $count = ++$seen->{$pre};
- my $as = ($count > 1 ? "${pre}_${count}" : $pre);
- my $rel_info = $self->relationship_info( $pre );
- $self->throw_exception( $self->name . " has no such relationship '$pre'" )
- unless $rel_info;
- my $as_prefix = ($alias =~ /^.*?\.(.+)$/ ? $1.'.' : '');
- my $rel_source = $self->related_source($pre);
-
- if (exists $rel_info->{attrs}{accessor}
- && $rel_info->{attrs}{accessor} eq 'multi') {
- $self->throw_exception(
- "Can't prefetch has_many ${pre} (join cond too complex)")
- unless ref($rel_info->{cond}) eq 'HASH';
- my $dots = @{[$as_prefix =~ m/\./g]} + 1; # +1 to match the ".${as_prefix}"
- if (my ($fail) = grep { @{[$_ =~ m/\./g]} == $dots }
- keys %{$collapse}) {
- my ($last) = ($fail =~ /([^\.]+)$/);
- carp (
- "Prefetching multiple has_many rels ${last} and ${pre} "
- .(length($as_prefix)
- ? "at the same level (${as_prefix}) "
- : "at top level "
- )
- . 'will explode the number of row objects retrievable via ->next or ->all. '
- . 'Use at your own risk.'
- );
- }
- #my @col = map { (/^self\.(.+)$/ ? ("${as_prefix}.$1") : ()); }
- # values %{$rel_info->{cond}};
- $collapse->{".${as_prefix}${pre}"} = [ $rel_source->primary_columns ];
- # action at a distance. prepending the '.' allows simpler code
- # in ResultSet->_collapse_result
- my @key = map { (/^foreign\.(.+)$/ ? ($1) : ()); }
- keys %{$rel_info->{cond}};
- my @ord = (ref($rel_info->{attrs}{order_by}) eq 'ARRAY'
- ? @{$rel_info->{attrs}{order_by}}
- : (defined $rel_info->{attrs}{order_by}
- ? ($rel_info->{attrs}{order_by})
- : ()));
- push(@$order, map { "${as}.$_" } (@key, @ord));
- }
-
- return map { [ "${as}.$_", "${as_prefix}${pre}.$_", ] }
- $rel_source->columns;
- }
-}
# Accepts one or more relationships for the current source and returns an
# array of column names for each of those relationships. Column names are
my $as_prefix = ($alias =~ /^.*?\.(.+)$/ ? $1.'.' : '');
my $rel_source = $self->related_source($pre);
- if (exists $rel_info->{attrs}{accessor}
- && $rel_info->{attrs}{accessor} eq 'multi') {
+ if ($rel_info->{attrs}{accessor} && $rel_info->{attrs}{accessor} eq 'multi') {
$self->throw_exception(
"Can't prefetch has_many ${pre} (join cond too complex)")
unless ref($rel_info->{cond}) eq 'HASH';
keys %{$rel_info->{cond}};
my @ord = (ref($rel_info->{attrs}{order_by}) eq 'ARRAY'
? @{$rel_info->{attrs}{order_by}}
- : (defined $rel_info->{attrs}{order_by}
+
+ : (defined $rel_info->{attrs}{order_by}
? ($rel_info->{attrs}{order_by})
: ()));
push(@$order, map { "${as}.$_" } (@key, @ord));
__PACKAGE__->column_info_from_storage(1);
Enables the on-demand automatic loading of the above column
-metadata from storage as neccesary. This is *deprecated*, and
+metadata from storage as necessary. This is *deprecated*, and
should not be used. It will be removed before 1.0.
=head1 DESCRIPTION
-Table object that inherits from L<DBIx::Class::ResultSource>
+Table object that inherits from L<DBIx::Class::ResultSource>.
=head1 METHODS
=head2 STORABLE_thaw
Thaws frozen handle. Resets the internal schema reference to the package
-variable C<$thaw_schema>. The recomened way of setting this is to use
+variable C<$thaw_schema>. The recommended way of setting this is to use
C<< $schema->thaw($ice) >> which handles this for you.
=cut
shift->result_source_instance->primary_columns(@_);
}
+sub _pri_cols {
+ shift->result_source_instance->_pri_cols(@_);
+}
+
sub add_unique_constraint {
shift->result_source_instance->add_unique_constraint(@_);
}
shift->result_source_instance->relationship_info(@_);
}
+sub has_relationship {
+ shift->result_source_instance->has_relationship(@_);
+}
1;
$new->throw_exception("Can't do multi-create without result source")
unless $source;
my $info = $source->relationship_info($key);
- if ($info && $info->{attrs}{accessor}
- && $info->{attrs}{accessor} eq 'single')
- {
+ my $acc_type = $info->{attrs}{accessor} || '';
+ if ($acc_type eq 'single') {
my $rel_obj = delete $attrs->{$key};
if(!Scalar::Util::blessed($rel_obj)) {
$rel_obj = $new->__new_related_find_or_new_helper($key, $rel_obj);
$related->{$key} = $rel_obj;
next;
- } elsif ($info && $info->{attrs}{accessor}
- && $info->{attrs}{accessor} eq 'multi'
- && ref $attrs->{$key} eq 'ARRAY') {
+ }
+ elsif ($acc_type eq 'multi' && ref $attrs->{$key} eq 'ARRAY' ) {
my $others = delete $attrs->{$key};
my $total = @$others;
my @objects;
}
$related->{$key} = \@objects;
next;
- } elsif ($info && $info->{attrs}{accessor}
- && $info->{attrs}{accessor} eq 'filter')
- {
+ }
+ elsif ($acc_type eq 'filter') {
## 'filter' should disappear and get merged in with 'single' above!
my $rel_obj = delete $attrs->{$key};
if(!Scalar::Util::blessed($rel_obj)) {
to C<update>, e.g. ( { %{ $href } } )
If the values passed or any of the column values set on the object
-contain scalar references, eg:
+contain scalar references, e.g.:
$row->last_modified(\'NOW()');
# OR
sub update {
my ($self, $upd) = @_;
$self->throw_exception( "Not in database" ) unless $self->in_storage;
- my $ident_cond = $self->ident_condition;
- $self->throw_exception("Cannot safely update a row in a PK-less table")
+
+ my $ident_cond = $self->{_orig_ident} || $self->ident_condition;
+
+ $self->throw_exception('Unable to update a row with incomplete or no identity')
if ! keys %$ident_cond;
$self->set_inflated_columns($upd) if $upd;
my %to_update = $self->get_dirty_columns;
return $self unless keys %to_update;
my $rows = $self->result_source->storage->update(
- $self->result_source, \%to_update,
- $self->{_orig_ident} || $ident_cond
- );
+ $self->result_source, \%to_update, $ident_cond
+ );
if ($rows == 0) {
$self->throw_exception( "Can't update ${self}: row not found" );
} elsif ($rows > 1) {
}
$self->{_dirty_columns} = {};
$self->{related_resultsets} = {};
- undef $self->{_orig_ident};
+ delete $self->{_orig_ident};
return $self;
}
my $self = shift;
if (ref $self) {
$self->throw_exception( "Not in database" ) unless $self->in_storage;
+
my $ident_cond = $self->{_orig_ident} || $self->ident_condition;
- $self->throw_exception("Cannot safely delete a row in a PK-less table")
+ $self->throw_exception('Unable to delete a row with incomplete or no identity')
if ! keys %$ident_cond;
- foreach my $column (keys %$ident_cond) {
- $self->throw_exception("Can't delete the object unless it has loaded the primary keys")
- unless exists $self->{_column_data}{$column};
- }
+
$self->result_source->storage->delete(
- $self->result_source, $ident_cond);
+ $self->result_source, $ident_cond
+ );
+
+ delete $self->{_orig_ident};
$self->in_storage(undef);
- } else {
+ }
+ else {
$self->throw_exception("Can't do class delete without a ResultSource instance")
unless $self->can('result_source_instance');
my $attrs = @_ > 1 && ref $_[$#_] eq 'HASH' ? { %{pop(@_)} } : {};
for my $col (keys %loaded_colinfo) {
if (exists $loaded_colinfo{$col}{accessor}) {
my $acc = $loaded_colinfo{$col}{accessor};
- if (defined $acc) {
- $inflated{$col} = $self->$acc;
- }
+ $inflated{$col} = $self->$acc if defined $acc;
}
else {
$inflated{$col} = $self->$col;
sub set_column {
my ($self, $column, $new_value) = @_;
- $self->{_orig_ident} ||= $self->ident_condition;
- my $old_value = $self->get_column($column);
+ # if we can't get an ident condition on first try - mark the object as unidentifiable
+ $self->{_orig_ident} ||= (eval { $self->ident_condition }) || {};
+ my $old_value = $self->get_column($column);
$new_value = $self->store_column($column, $new_value);
my $dirty;
L<DBIx::Class::Relationship/has_many> key, and create the related
objects if necessary.
-Be aware that the input hashref might be edited in place, so dont rely
+Be aware that the input hashref might be edited in place, so don't rely
on it being the same after a call to C<set_inflated_columns>. If you
need to preserve the hashref, it is sufficient to pass a shallow copy
to C<set_inflated_columns>, e.g. ( { %{ $href } } )
foreach my $key (keys %$upd) {
if (ref $upd->{$key}) {
my $info = $self->relationship_info($key);
- if ($info && $info->{attrs}{accessor}
- && $info->{attrs}{accessor} eq 'single')
- {
+ my $acc_type = $info->{attrs}{accessor} || '';
+ if ($acc_type eq 'single') {
my $rel = delete $upd->{$key};
$self->set_from_related($key => $rel);
$self->{_relationship_data}{$key} = $rel;
- } elsif ($info && $info->{attrs}{accessor}
- && $info->{attrs}{accessor} eq 'multi') {
- $self->throw_exception(
- "Recursive update is not supported over relationships of type multi ($key)"
- );
}
- elsif ($self->has_column($key)
- && exists $self->column_info($key)->{_inflate_info})
- {
+ elsif ($acc_type eq 'multi') {
+ $self->throw_exception(
+ "Recursive update is not supported over relationships of type '$acc_type' ($key)"
+ );
+ }
+ elsif ($self->has_column($key) && exists $self->column_info($key)->{_inflate_info}) {
$self->set_inflated_column($key, delete $upd->{$key});
}
}
the new object.
Relationships will be followed by the copy procedure B<only> if the
-relationship specifes a true value for its
+relationship specifies a true value for its
L<cascade_copy|DBIx::Class::Relationship::Base> attribute. C<cascade_copy>
is set by default on C<has_many> relationships and unset on all others.
$new->insert;
# Its possible we'll have 2 relations to the same Source. We need to make
- # sure we don't try to insert the same row twice esle we'll violate unique
+ # sure we don't try to insert the same row twice else we'll violate unique
# constraints
my $rels_copied = {};
my ($source_handle) = $source;
if ($source->isa('DBIx::Class::ResultSourceHandle')) {
- $source = $source_handle->resolve
- } else {
- $source_handle = $source->handle
+ $source = $source_handle->resolve
+ }
+ else {
+ $source_handle = $source->handle
}
my $new = {
};
bless $new, (ref $class || $class);
- my $schema;
foreach my $pre (keys %{$prefetch||{}}) {
- my $pre_val = $prefetch->{$pre};
- my $pre_source = $source->related_source($pre);
- $class->throw_exception("Can't prefetch non-existent relationship ${pre}")
- unless $pre_source;
- if (ref($pre_val->[0]) eq 'ARRAY') { # multi
- my @pre_objects;
- for my $me_pref (@$pre_val) {
+ my $pre_source = $source->related_source($pre)
+ or $class->throw_exception("Can't prefetch non-existent relationship ${pre}");
+
+ my $accessor = $source->relationship_info($pre)->{attrs}{accessor}
+ or $class->throw_exception("No accessor for prefetched $pre");
+ my @pre_vals;
+ if (ref $prefetch->{$pre}[0] eq 'ARRAY') {
+ @pre_vals = @{$prefetch->{$pre}};
+ }
+ elsif ($accessor eq 'multi') {
+ $class->throw_exception("Implicit prefetch (via select/columns) not supported with accessor 'multi'");
+ }
+ else {
+ @pre_vals = $prefetch->{$pre};
+ }
+
+ my @pre_objects;
+ for my $me_pref (@pre_vals) {
+
+ # FIXME - this should not be necessary
# the collapser currently *could* return bogus elements with all
# columns set to undef
my $has_def;
push @pre_objects, $pre_source->result_class->inflate_result(
$pre_source, @$me_pref
);
- }
+ }
- $new->related_resultset($pre)->set_cache(\@pre_objects);
- } elsif (defined $pre_val->[0]) {
- my $fetched;
- unless ($pre_source->primary_columns == grep { exists $pre_val->[0]{$_}
- and !defined $pre_val->[0]{$_} } $pre_source->primary_columns)
- {
- $fetched = $pre_source->result_class->inflate_result(
- $pre_source, @{$pre_val});
- }
- my $accessor = $source->relationship_info($pre)->{attrs}{accessor};
- $class->throw_exception("No accessor for prefetched $pre")
- unless defined $accessor;
- if ($accessor eq 'single') {
- $new->{_relationship_data}{$pre} = $fetched;
- } elsif ($accessor eq 'filter') {
- $new->{_inflated_column}{$pre} = $fetched;
- } else {
- $class->throw_exception("Implicit prefetch (via select/columns) not supported with accessor '$accessor'");
- }
- $new->related_resultset($pre)->set_cache([ $fetched ]);
+ if ($accessor eq 'single') {
+ $new->{_relationship_data}{$pre} = $pre_objects[0];
}
+ elsif ($accessor eq 'filter') {
+ $new->{_inflated_column}{$pre} = $pre_objects[0];
+ }
+
+ $new->related_resultset($pre)->set_cache(\@pre_objects);
}
$new->in_storage (1);
$resultset = $resultset->search(undef, $attrs);
}
- return $resultset->find($self->{_orig_ident} || $self->ident_condition);
+ my $ident_cond = $self->{_orig_ident} || $self->ident_condition;
+
+ $self->throw_exception('Unable to requery a row with incomplete or no identity')
+ if ! keys %$ident_cond;
+
+ return $resultset->find($ident_cond);
}
=head2 discard_changes ($attrs)
$self->_sqlcase($func),
$self->_recurse_fields($args),
$as
- ? sprintf (' %s %s', $self->_sqlcase('as'), $as)
+ ? sprintf (' %s %s', $self->_sqlcase('as'), $self->_quote ($as) )
: ''
);
This module was originally written to support Oracle < 9i where ANSI joins
weren't supported at all, but became the module for Oracle >= 8 because
-Oracle's optimising of ANSI joins is horrible. (See:
-http://scsys.co.uk:8001/7495)
+Oracle's optimising of ANSI joins is horrible.
=head1 SYNOPSIS
--- /dev/null
+package # Hide from PAUSE
+ DBIx::Class::SQLAHacks::SQLite;
+
+use base qw( DBIx::Class::SQLAHacks );
+use Carp::Clan qw/^DBIx::Class|^SQL::Abstract/;
+
+#
+# SQLite does not understand SELECT ... FOR UPDATE
+# Adjust SQL here instead
+#
+sub select {
+ my $self = shift;
+ local $self->{_dbic_rs_attrs}{for} = undef;
+ return $self->SUPER::select (@_);
+}
+
+1;
With no arguments, this method uses L<Module::Find> to load all your
Result classes from a sub-namespace F<Result> under your Schema class'
-namespace. Eg. With a Schema of I<MyDB::Schema> all files in
+namespace, i.e. with a Schema of I<MyDB::Schema> all files in
I<MyDB::Schema::Result> are assumed to be Result classes.
It also finds all ResultSet classes in the namespace F<ResultSet> and
L<DBIx::Class::ResultSet/create>, and a arrayref of the resulting row
objects is returned.
-i.e.,
+e.g.
$schema->populate('Artist', [
[ qw/artistid name/ ],
It also attaches a corresponding L<DBIx::Class::ResultSource> object to the
new $schema object. If C<$additional_base_class> is given, the new composed
-classes will inherit from first the corresponding classe from the current
+classes will inherit from first the corresponding class from the current
schema then the base class.
For example, for a schema with My::Schema::CD and My::Schema::Artist classes,
Provided as the recommended way of thawing schema objects. You can call
C<Storable::thaw> directly if you wish, but the thawed objects will not have a
-reference to any schema, so are rather useless
+reference to any schema, so are rather useless.
=cut
=head2 freeze
-This doesn't actualy do anything more than call L<Storable/freeze>, it is just
-provided here for symetry.
+This doesn't actually do anything more than call L<Storable/freeze>, it is just
+provided here for symmetry.
=cut
=back
-Virtual method that should be overriden to create an upgrade file.
+Virtual method that should be overridden to create an upgrade file.
This is useful in the case of upgrading across multiple versions
to concatenate several files to create one upgrade file.
=back
-Virtual method that should be overriden to return an ordered list
+Virtual method that should be overridden to return an ordered list
of schema versions. This is then used to produce a set of steps to
upgrade through to achieve the required schema version.
then it is assumed you can do the upgrade as a single step). It
then iterates through the list of versions between the current db
version and the schema version applying one update at a time until
-all relvant updates are applied.
+all relevant updates are applied.
The individual update steps are performed by using
L</upgrade_single_step>, which will apply the update and also
compatibility between the old versions table (SchemaVersions) and the new one
(dbix_class_schema_versions).
-To avoid the checks on connect, set the env var DBIC_NO_VERSION_CHECK or alternatively you can set the ignore_version attr in the forth argument like so:
+To avoid the checks on connect, set the environment var DBIC_NO_VERSION_CHECK or alternatively you can set the ignore_version attr in the forth argument like so:
my $schema = MyApp::Schema->connect(
$dsn,
return;
}
- $self->throw_exception($self->storage->_sqlt_version_error)
- if (not $self->storage->_sqlt_version_ok);
+ unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')) {
+ $self->throw_exception("Unable to proceed without " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
+ }
my $db_tr = SQL::Translator->new({
add_drop_table => 1,
my @data = split /\n/, join '', <$fh>;
close $fh;
- @data = grep {
- $_ &&
- !/^--/ &&
- !/^(BEGIN|BEGIN TRANSACTION|COMMIT)/m
- } split /;/,
- join '', @data;
+ @data = split /;/,
+ join '',
+ grep { $_ &&
+ !/^--/ &&
+ !/^(BEGIN|BEGIN TRANSACTION|COMMIT)/mi }
+ @data;
return \@data;
}
triggers, incorrectly flagging those versions of perl to be buggy. A
more comprehensive check has been moved into the test suite in
C<t/99rh_perl_perf_bug.t> and further information about the bug has been
-put in L<DBIx::Class::Manual::Troubleshooting>
+put in L<DBIx::Class::Manual::Troubleshooting>.
Other checks may be added from time to time.
=head2 debugfh
Set or retrieve the filehandle used for trace/debug output. This should be
-an IO::Handle compatible ojbect (only the C<print> method is used. Initially
+an IO::Handle compatible object (only the C<print> method is used. Initially
set to be STDERR - although see information on the
L<DBIC_TRACE> environment variable.
use Data::Dumper::Concise();
use Sub::Name ();
-# what version of sqlt do we require if deploy() without a ddl_dir is invoked
-# when changing also adjust the corresponding author_require in Makefile.PL
-my $minimum_sqlt_version = '0.11002';
-
-
__PACKAGE__->mk_group_accessors('simple' =>
qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts _conn_pid
_conn_tid transaction_depth _dbh_autocommit _driver_determined savepoints/
# Each of these methods need _determine_driver called before itself
# in order to function reliably. This is a purely DRY optimization
my @rdbms_specific_methods = qw/
+ deployment_statements
sqlt_type
build_datetime_parser
datetime_parser_type
In addition to the standard L<DBI|DBI/ATTRIBUTES_COMMON_TO_ALL_HANDLES>
L<connection|DBI/Database_Handle_Attributes> attributes, DBIx::Class recognizes
the following connection options. These options can be mixed in with your other
-L<DBI> connection attributes, or placed in a seperate hashref
+L<DBI> connection attributes, or placed in a separate hashref
(C<\%extra_attributes>) as shown above.
Every time C<connect_info> is invoked, any previous settings for
=item name_sep
This only needs to be used in conjunction with C<quote_char>, and is used to
-specify the charecter that seperates elements (schemas, tables, columns) from
+specify the character that separates elements (schemas, tables, columns) from
each other. In most cases this is simply a C<.>.
The consequences of not supplying this value is that L<SQL::Abstract>
=back
-Verifies that the the current database handle is active and ready to execute
-an SQL statement (i.e. the connection did not get stale, server is still
+Verifies that the current database handle is active and ready to execute
+an SQL statement (e.g. the connection did not get stale, server is still
answering, etc.) This method is used internally by L</dbh>.
=cut
my $rsrc = $rs->result_source;
# quick check if we got a sane rs on our hands
- my @pcols = $rsrc->primary_columns;
- unless (@pcols) {
- $self->throw_exception (
- sprintf (
- "You must declare primary key(s) on source '%s' (via set_primary_key) in order to update or delete complex resultsets",
- $rsrc->source_name || $rsrc->from
- )
- );
- }
+ my @pcols = $rsrc->_pri_cols;
my $sel = $rs->_resolved_attrs->{select};
$sel = [ $sel ] unless ref $sel eq 'ARRAY';
my ($rs, $op, $values) = @_;
my $rsrc = $rs->result_source;
- my @pcols = $rsrc->primary_columns;
+ my @pcols = $rsrc->_pri_cols;
my $guard = $self->txn_scope_guard;
&&
(ref $ident eq 'ARRAY' && @$ident > 1) # indicates a join
&&
- scalar $sql_maker->_order_by_chunks ($attrs->{order_by})
+ scalar $self->_parse_order_by ($attrs->{order_by})
) {
# the RNO limit dialect above mangles the SQL such that the join gets lost
# wrap a subquery here
#
sub _subq_count_select {
my ($self, $source, $rs_attrs) = @_;
- return $rs_attrs->{group_by} if $rs_attrs->{group_by};
+
+ if (my $groupby = $rs_attrs->{group_by}) {
+
+ my $avail_columns = $self->_resolve_column_info ($rs_attrs->{from});
+
+ my $sel_index;
+ for my $sel (@{$rs_attrs->{select}}) {
+ if (ref $sel eq 'HASH' and $sel->{-as}) {
+ $sel_index->{$sel->{-as}} = $sel;
+ }
+ }
+
+ my @selection;
+ for my $g_part (@$groupby) {
+ if (ref $g_part or $avail_columns->{$g_part}) {
+ push @selection, $g_part;
+ }
+ elsif ($sel_index->{$g_part}) {
+ push @selection, $sel_index->{$g_part};
+ }
+ else {
+ $self->throw_exception ("group_by criteria '$g_part' not contained within current resultset source(s)");
+ }
+ }
+
+ return \@selection;
+ }
my @pcols = map { join '.', $rs_attrs->{alias}, $_ } ($source->primary_columns);
return @pcols ? \@pcols : [ 1 ];
=cut
sub _dbh_last_insert_id {
- # All Storage's need to register their own _dbh_last_insert_id
- # the old SQLite-based method was highly inappropriate
+ my ($self, $dbh, $source, $col) = @_;
- my $self = shift;
- my $class = ref $self;
- $self->throw_exception (<<EOE);
+ my $id = eval { $dbh->last_insert_id (undef, undef, $source->name, $col) };
+
+ return $id if defined $id;
-No _dbh_last_insert_id() method found in $class.
-Since the method of obtaining the autoincrement id of the last insert
-operation varies greatly between different databases, this method must be
-individually implemented for every storage class.
-EOE
+ my $class = ref $self;
+ $self->throw_exception ("No storage specific _dbh_last_insert_id() method implemented in $class, and the generic DBI::last_insert_id() failed");
}
sub last_insert_id {
%{$sqltargs || {}}
};
- $self->throw_exception("Can't create a ddl file without SQL::Translator: " . $self->_sqlt_version_error)
- if !$self->_sqlt_version_ok;
+ unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')) {
+ $self->throw_exception("Can't create a ddl file without " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
+ }
my $sqlt = SQL::Translator->new( $sqltargs );
return join('', @rows);
}
- $self->throw_exception("Can't deploy without either SQL::Translator or a ddl_dir: " . $self->_sqlt_version_error )
- if !$self->_sqlt_version_ok;
+ unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy') ) {
+ $self->throw_exception("Can't deploy without a ddl_dir or " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
+ }
# sources needs to be a parser arg, but for simplicty allow at top level
# coming in
}
$self->_query_end($line);
};
- my @statements = $self->deployment_statements($schema, $type, undef, $dir, { %{ $sqltargs || {} }, no_comments => 1 } );
+ my @statements = $schema->deployment_statements($type, undef, $dir, { %{ $sqltargs || {} }, no_comments => 1 } );
if (@statements > 1) {
foreach my $statement (@statements) {
$deploy->( $statement );
return;
}
-# SQLT version handling
-{
- my $_sqlt_version_ok; # private
- my $_sqlt_version_error; # private
-
- sub _sqlt_version_ok {
- if (!defined $_sqlt_version_ok) {
- eval "use SQL::Translator $minimum_sqlt_version";
- if ($@) {
- $_sqlt_version_ok = 0;
- $_sqlt_version_error = $@;
- }
- else {
- $_sqlt_version_ok = 1;
- }
- }
- return $_sqlt_version_ok;
- }
-
- sub _sqlt_version_error {
- shift->_sqlt_version_ok unless defined $_sqlt_version_ok;
- return $_sqlt_version_error;
- }
-
- sub _sqlt_minimum_version { $minimum_sqlt_version };
-}
-
=head2 relname_to_table_alias
=over 4
This hook is to allow specific L<DBIx::Class::Storage> drivers to change the
way these aliases are named.
-The default behavior is C<"$relname_$join_count" if $join_count > 1>, otherwise
-C<"$relname">.
+The default behavior is C<< "$relname_$join_count" if $join_count > 1 >>,
+otherwise C<"$relname">.
=cut
# some databases need this to stop spewing warnings
if (my $dbh = $self->_dbh) {
local $@;
- eval { $dbh->disconnect };
+ eval {
+ %{ $dbh->{CachedKids} } = ();
+ $dbh->disconnect;
+ };
}
$self->_dbh(undef);
+++ /dev/null
-package DBIx::Class::Storage::DBI::AmbiguousGlob;
-
-use strict;
-use warnings;
-
-use base 'DBIx::Class::Storage::DBI';
-use mro 'c3';
-
-=head1 NAME
-
-DBIx::Class::Storage::DBI::AmbiguousGlob - Storage component for RDBMS choking on count(*)
-
-=head1 DESCRIPTION
-
-Some servers choke on things like:
-
- COUNT(*) FROM (SELECT tab1.col, tab2.col FROM tab1 JOIN tab2 ... )
-
-claiming that col is a duplicate column (it loses the table specifiers by
-the time it gets to the *). Thus for any subquery count we select only the
-primary keys of the main table in the inner query. This hopefully still
-hits the indexes and keeps the server happy.
-
-At this point the only overriden method is C<_subq_count_select()>
-
-=cut
-
-sub _subq_count_select {
- my ($self, $source, $rs_attrs) = @_;
-
- return $rs_attrs->{group_by} if $rs_attrs->{group_by};
-
- my @pcols = map { join '.', $rs_attrs->{alias}, $_ } ($source->primary_columns);
- return @pcols ? \@pcols : [ 1 ];
-}
-
-=head1 AUTHORS
-
-See L<DBIx::Class/CONTRIBUTORS>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
-
-=cut
-
-1;
throw implicit type conversion errors.
As long as a column L<data_type|DBIx::Class::ResultSource/add_columns> is
-defined, and it resolves to a base RDBMS native type via L</_native_data_type> as
+defined and resolves to a base RDBMS native type via L</_native_data_type> as
defined in your Storage driver, the placeholder for this column will be
converted to:
use strict;
use warnings;
-use base qw/DBIx::Class::Storage::DBI::AmbiguousGlob DBIx::Class::Storage::DBI/;
+use base qw/DBIx::Class::Storage::DBI/;
use mro 'c3';
use List::Util();
# see if this is an ordered subquery
my $attrs = $_[3];
- if ( scalar $self->sql_maker->_order_by_chunks ($attrs->{order_by}) ) {
+ if ( scalar $self->_parse_order_by ($attrs->{order_by}) ) {
$self->throw_exception(
'An ordered subselect encountered - this is not safe! Please see "Ordered Subselects" in DBIx::Class::Storage::DBI::MSSQL
') unless $attrs->{unsafe_subselect_ok};
Thus compromise between usability and perfection is the MSSQL-specific
L<resultset attribute|DBIx::Class::ResultSet/ATTRIBUTES> C<unsafe_subselect_ok>.
It is deliberately not possible to set this on the Storage level, as the user
-should inspect (and preferrably regression-test) the return of every such
+should inspect (and preferably regression-test) the return of every such
ResultSet individually. The example above would work if written like:
$rs->search ({}, {
If it is possible to rewrite the search() in a way that will avoid the need
for this flag - you are urged to do so. If DBIC internals insist that an
ordered subselect is necessary for an operation, and you believe there is a
-differnt/better way to get the same result - please file a bugreport.
+different/better way to get the same result - please file a bugreport.
=head1 AUTHOR
The storage class for any such RDBMS should inherit from this class, in order
to dramatically speed up update/delete operations on joined multipk resultsets.
-At this point the only overriden method is C<_multipk_update_delete()>
+At this point the only overridden method is C<_multipk_update_delete()>
=cut
my ($rs, $op, $values) = @_;
my $rsrc = $rs->result_source;
- my @pcols = $rsrc->primary_columns;
+ my @pcols = $rsrc->_pri_cols;
my $attrs = $rs->_resolved_attrs;
# naive check - this is an internal method after all, we should know what we are doing
}
}
-sub _dbh_last_insert_id {
- my ($self, $dbh, $source, $col) = @_;
-
- # punt: if there is no derived class for the specific backend, attempt
- # to use the DBI->last_insert_id, which may not be sufficient (see the
- # discussion of last_insert_id in perldoc DBI)
- return $dbh->last_insert_id(undef, undef, $source->from, $col);
-}
-
1;
=head1 NAME
=head1 IMPLEMENTATION NOTES
-MS Access supports the @@IDENTITY function for retriving the id of the latest inserted row.
+MS Access supports the @@IDENTITY function for retrieving the id of the latest inserted row.
@@IDENTITY is global to the connection, so to support the possibility of getting the last inserted
id for different tables, the insert() function stores the inserted id on a per table basis.
last_insert_id() then just returns the stored value.
--- /dev/null
+package DBIx::Class::Storage::DBI::ODBC::SQL_Anywhere;
+
+use strict;
+use warnings;
+use base qw/DBIx::Class::Storage::DBI::SQLAnywhere/;
+use mro 'c3';
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::ODBC::SQL_Anywhere - Driver for using Sybase SQL
+Anywhere through ODBC
+
+=head1 SYNOPSIS
+
+All functionality is provided by L<DBIx::Class::Storage::DBI::SQLAnywhere>, see
+that module for details.
+
+=head1 AUTHOR
+
+See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
use strict;
use warnings;
+use Scope::Guard ();
+use Context::Preserve ();
=head1 NAME
use base qw/DBIx::Class::Storage::DBI/;
use mro 'c3';
+sub deployment_statements {
+ my $self = shift;;
+ my ($schema, $type, $version, $dir, $sqltargs, @rest) = @_;
+
+ $sqltargs ||= {};
+ my $quote_char = $self->schema->storage->sql_maker->quote_char;
+ $sqltargs->{quote_table_names} = $quote_char ? 1 : 0;
+ $sqltargs->{quote_field_names} = $quote_char ? 1 : 0;
+
+ my $oracle_version = eval { $self->_get_dbh->get_info(18) };
+
+ $sqltargs->{producer_args}{oracle_version} = $oracle_version;
+
+ $self->next::method($schema, $type, $version, $dir, $sqltargs, @rest);
+}
+
sub _dbh_last_insert_id {
my ($self, $dbh, $source, @columns) = @_;
my @ids = ();
sub _dbh_get_autoinc_seq {
my ($self, $dbh, $source, $col) = @_;
- # look up the correct sequence automatically
- my $sql = q{
- SELECT trigger_body FROM ALL_TRIGGERS t
- WHERE t.table_name = ?
- AND t.triggering_event = 'INSERT'
- AND t.status = 'ENABLED'
- };
-
- # trigger_body is a LONG
- local $dbh->{LongReadLen} = 64 * 1024 if ($dbh->{LongReadLen} < 64 * 1024);
-
- my $sth;
+ my $sql_maker = $self->sql_maker;
my $source_name;
- if ( ref $source->name ne 'SCALAR' ) {
- $source_name = $source->name;
+ if ( ref $source->name eq 'SCALAR' ) {
+ $source_name = ${$source->name};
}
else {
- $source_name = ${$source->name};
+ $source_name = $source->name;
}
+ $source_name = uc($source_name) unless $sql_maker->quote_char;
+
+ # trigger_body is a LONG
+ local $dbh->{LongReadLen} = 64 * 1024 if ($dbh->{LongReadLen} < 64 * 1024);
+
+ # disable default bindtype
+ local $sql_maker->{bindtype} = 'normal';
+
+ # look up the correct sequence automatically
+ my ( $schema, $table ) = $source_name =~ /(\w+)\.(\w+)/;
+ my ($sql, @bind) = $sql_maker->select (
+ 'ALL_TRIGGERS',
+ ['trigger_body'],
+ {
+ $schema ? (owner => $schema) : (),
+ table_name => $table || $source_name,
+ triggering_event => 'INSERT',
+ status => 'ENABLED',
+ },
+ );
+ my $sth = $dbh->prepare($sql);
+ $sth->execute (@bind);
- # check for fully-qualified name (eg. SCHEMA.TABLENAME)
- if ( my ( $schema, $table ) = $source_name =~ /(\w+)\.(\w+)/ ) {
- $sql = q{
- SELECT trigger_body FROM ALL_TRIGGERS t
- WHERE t.owner = ? AND t.table_name = ?
- AND t.triggering_event = 'INSERT'
- AND t.status = 'ENABLED'
- };
- $sth = $dbh->prepare($sql);
- $sth->execute( uc($schema), uc($table) );
- }
- else {
- $sth = $dbh->prepare($sql);
- $sth->execute( uc( $source_name ) );
- }
while (my ($insert_trigger) = $sth->fetchrow_array) {
- return uc($1) if $insert_trigger =~ m!(\w+)\.nextval!i; # col name goes here???
+ return $1 if $insert_trigger =~ m!("?\w+"?)\.nextval!i; # col name goes here???
}
- $self->throw_exception("Unable to find a sequence INSERT trigger on table '" . $source->name . "'.");
+ $self->throw_exception("Unable to find a sequence INSERT trigger on table '$source_name'.");
}
sub _sequence_fetch {
sub columns_info_for {
my ($self, $table) = @_;
- $self->next::method(uc($table));
+ $self->next::method($table);
}
=head2 datetime_parser_type
my %column_bind_attrs = $self->bind_attribute_by_data_type($data_type);
if ($data_type =~ /^[BC]LOB$/i) {
+ if ($DBD::Oracle::VERSION eq '1.23') {
+ $self->throw_exception(
+"BLOB/CLOB support in DBD::Oracle == 1.23 is broken, use an earlier or later ".
+"version.\n\nSee: https://rt.cpan.org/Public/Bug/Display.html?id=46016\n"
+ );
+ }
+
$column_bind_attrs{'ora_type'} = uc($data_type) eq 'CLOB'
? DBD::Oracle::ORA_CLOB()
: DBD::Oracle::ORA_BLOB()
return $new_alias;
}
+=head2 with_deferred_fk_checks
+
+Runs a coderef between:
+
+ alter session set constraints = deferred
+ ...
+ alter session set constraints = immediate
+
+to defer foreign key checks.
+
+Constraints must be declared C<DEFERRABLE> for this to work.
+
+=cut
+
+sub with_deferred_fk_checks {
+ my ($self, $sub) = @_;
+
+ my $txn_scope_guard = $self->txn_scope_guard;
+
+ $self->_do_query('alter session set constraints = deferred');
+
+ my $sg = Scope::Guard->new(sub {
+ $self->_do_query('alter session set constraints = immediate');
+ });
+
+ return Context::Preserve::preserve_context(sub { $sub->() },
+ after => sub { $txn_scope_guard->commit });
+}
+
=head1 AUTHOR
See L<DBIx::Class/CONTRIBUTORS>.
This module was originally written to support Oracle < 9i where ANSI joins
weren't supported at all, but became the module for Oracle >= 8 because
-Oracle's optimising of ANSI joins is horrible. (See:
-http://scsys.co.uk:8001/7495)
+Oracle's optimising of ANSI joins is horrible.
=head1 SYNOPSIS
It should properly support left joins, and right joins. Full outer joins are
not possible due to the fact that Oracle requires the entire query be written
to union the results of a left and right join, and by the time this module is
-called to create the where query and table definition part of the sql query,
+called to create the where query and table definition part of the SQL query,
it's already too late.
=head1 METHODS
use DBD::Pg qw(:pg_types);
# Ask for a DBD::Pg with array support
-warn "DBD::Pg 2.9.2 or greater is strongly recommended\n"
+warn __PACKAGE__.": DBD::Pg 2.9.2 or greater is strongly recommended\n"
if ($DBD::Pg::VERSION < 2.009002); # pg uses (used?) version::qv()
sub with_deferred_fk_checks {
BEGIN {
use Carp::Clan qw/^DBIx::Class/;
-
- ## Modules required for Replication support not required for general DBIC
- ## use, so we explicitly test for these.
-
- my %replication_required = (
- 'Moose' => '0.90',
- 'MooseX::Types' => '0.21',
- 'namespace::clean' => '0.11',
- 'Hash::Merge' => '0.11'
- );
-
- my @didnt_load;
-
- for my $module (keys %replication_required) {
- eval "use $module $replication_required{$module}";
- push @didnt_load, "$module $replication_required{$module}"
- if $@;
- }
-
- croak("@{[ join ', ', @didnt_load ]} are missing and are required for Replication")
- if @didnt_load;
+ use DBIx::Class;
+ croak('The following modules are required for Replication ' . DBIx::Class::Optional::Dependencies->req_missing_for ('replicated') )
+ unless DBIx::Class::Optional::Dependencies->req_ok_for ('replicated');
}
use Moose;
use DBIx::Class::Storage::DBI::Replicated::Types qw/BalancerClassNamePart DBICSchema DBICStorageDBI/;
use MooseX::Types::Moose qw/ClassName HashRef Object/;
use Scalar::Util 'reftype';
-use Hash::Merge 'merge';
+use Hash::Merge;
+use List::Util qw/min max/;
use namespace::clean -except => 'meta';
=head1 SYNOPSIS
The Following example shows how to change an existing $schema to a replicated
-storage type, add some replicated (readonly) databases, and perform reporting
+storage type, add some replicated (read-only) databases, and perform reporting
tasks.
You should set the 'storage_type attribute to a replicated type. You should
Warning: This class is marked BETA. This has been running a production
website using MySQL native replication as its backend and we have some decent
test coverage but the code hasn't yet been stressed by a variety of databases.
-Individual DB's may have quirks we are not aware of. Please use this in first
+Individual DBs may have quirks we are not aware of. Please use this in first
development and pass along your experiences/bug fixes.
This class implements replicated data store for DBI. Currently you can define
to all existing storages. This way our storage class is a drop in replacement
for L<DBIx::Class::Storage::DBI>.
-Read traffic is spread across the replicants (slaves) occuring to a user
+Read traffic is spread across the replicants (slaves) occurring to a user
selected algorithm. The default algorithm is random weighted.
=head1 NOTES
-The consistancy betweeen master and replicants is database specific. The Pool
+The consistency between master and replicants is database specific. The Pool
gives you a method to validate its replicants, removing and replacing them
when they fail/pass predefined criteria. Please make careful use of the ways
to force a query to run against Master when needed.
=head1 REQUIREMENTS
-Replicated Storage has additional requirements not currently part of L<DBIx::Class>
-
- Moose => '0.90',
- MooseX::Types => '0.21',
- namespace::clean => '0.11',
- Hash::Merge => '0.11'
-
-You will need to install these modules manually via CPAN or make them part of the
-Makefile for your distribution.
+Replicated Storage has additional requirements not currently part of
+L<DBIx::Class>. See L<DBIx::Class::Optional::Dependencies> for more details.
=head1 ATTRIBUTES
select
select_single
columns_info_for
+ _dbh_columns_info_for
+ _select
/],
);
=head2 write_handler
-Defines an object that implements the write side of L<BIx::Class::Storage::DBI>.
+Defines an object that implements the write side of L<BIx::Class::Storage::DBI>,
+as well as methods that don't write or read that can be called on only one
+storage, methods that return a C<$dbh>, and any methods that don't make sense to
+run on a replicant.
=cut
handles=>[qw/
on_connect_do
on_disconnect_do
+ on_connect_call
+ on_disconnect_call
connect_info
+ _connect_info
throw_exception
sql_maker
sqlt_type
svp_rollback
svp_begin
svp_release
+ relname_to_table_alias
+ _straight_join_to_node
+ _dbh_last_insert_id
+ _fix_bind_params
+ _default_dbi_connect_attributes
+ _dbi_connect_info
+ auto_savepoint
+ _sqlt_version_ok
+ _query_end
+ bind_attribute_by_data_type
+ transaction_depth
+ _dbh
+ _select_args
+ _dbh_execute_array
+ _sql_maker_args
+ _sql_maker
+ _query_start
+ _sqlt_version_error
+ _per_row_update_delete
+ _dbh_begin_work
+ _dbh_execute_inserts_with_no_binds
+ _select_args_to_query
+ _svp_generate_name
+ _multipk_update_delete
+ source_bind_attributes
+ _normalize_connect_info
+ _parse_connect_do
+ _dbh_commit
+ _execute_array
+ _placeholders_supported
+ _verify_pid
+ savepoints
+ _sqlt_minimum_version
+ _sql_maker_opts
+ _conn_pid
+ _typeless_placeholders_supported
+ _conn_tid
+ _dbh_autocommit
+ _native_data_type
+ _get_dbh
+ sql_maker_class
+ _dbh_rollback
+ _adjust_select_args_for_complex_prefetch
+ _resolve_ident_sources
+ _resolve_column_info
+ _prune_unused_joins
+ _strip_cond_qualifiers
+ _parse_order_by
+ _resolve_aliastypes_from_select_args
+ _execute
+ _do_query
+ _dbh_sth
+ _dbh_execute
/],
);
=head2 around: connect_info
-Preserve master's C<connect_info> options (for merging with replicants.)
-Also set any Replicated related options from connect_info, such as
+Preserves master's C<connect_info> options (for merging with replicants.)
+Also sets any Replicated-related options from connect_info, such as
C<pool_type>, C<pool_args>, C<balancer_type> and C<balancer_args>.
=cut
my $wantarray = wantarray;
+ my $merge = Hash::Merge->new('LEFT_PRECEDENT');
+
my %opts;
for my $arg (@$info) {
next unless (reftype($arg)||'') eq 'HASH';
- %opts = %{ merge($arg, \%opts) };
+ %opts = %{ $merge->merge($arg, \%opts) };
}
delete $opts{dsn};
if $opts{pool_type};
$self->pool_args(
- merge((delete $opts{pool_args} || {}), $self->pool_args)
+ $merge->merge((delete $opts{pool_args} || {}), $self->pool_args)
);
$self->pool($self->_build_pool)
if $opts{balancer_type};
$self->balancer_args(
- merge((delete $opts{balancer_args} || {}), $self->balancer_args)
+ $merge->merge((delete $opts{balancer_args} || {}), $self->balancer_args)
);
$self->balancer($self->_build_balancer)
my $master = $self->master;
$master->_determine_driver;
Moose::Meta::Class->initialize(ref $master);
+
DBIx::Class::Storage::DBI::Replicated::WithDSN->meta->apply($master);
+ # link pool back to master
+ $self->pool->master($master);
+
$wantarray ? @res : $res;
};
$self->throw_exception('too many hashrefs in connect_info')
if @hashes > 2;
- my %opts = %{ merge(reverse @hashes) };
+ my $merge = Hash::Merge->new('LEFT_PRECEDENT');
+ my %opts = %{ $merge->merge(reverse @hashes) };
# delete them
splice @$r, $i+1, ($#{$r} - $i), ();
delete $master_opts{dbh_maker};
# merge with master
- %opts = %{ merge(\%opts, \%master_opts) };
+ %opts = %{ $merge->merge(\%opts, \%master_opts) };
# update
$r->[$i] = \%opts;
=head2 execute_reliably ($coderef, ?@args)
Given a coderef, saves the current state of the L</read_handler>, forces it to
-use reliable storage (ie sets it to the master), executes a coderef and then
+use reliable storage (e.g. sets it to the master), executes a coderef and then
restores the original state.
Example:
=head2 set_balanced_storage
Sets the current $schema to be use the </balancer> for all reads, while all
-writea are sent to the master only
+writes are sent to the master only
=cut
=head2 debugobj
-set a debug object across all storages
+set a debug object
=cut
sub debugobj {
my $self = shift @_;
- if(@_) {
- foreach my $source ($self->all_storages) {
- $source->debugobj(@_);
- }
- }
- return $self->master->debugobj;
+ return $self->master->debugobj(@_);
}
=head2 debugfh
-set a debugfh object across all storages
+set a debugfh object
=cut
sub debugfh {
my $self = shift @_;
- if(@_) {
- foreach my $source ($self->all_storages) {
- $source->debugfh(@_);
- }
- }
- return $self->master->debugfh;
+ return $self->master->debugfh(@_);
}
=head2 debugcb
-set a debug callback across all storages
+set a debug callback
=cut
sub debugcb {
my $self = shift @_;
- if(@_) {
- foreach my $source ($self->all_storages) {
- $source->debugcb(@_);
- }
- }
- return $self->master->debugcb;
+ return $self->master->debugcb(@_);
}
=head2 disconnect
$self->master->cursor_class;
}
+=head2 cursor
+
+set cursor class on all storages, or return master's, alias for L</cursor_class>
+above.
+
+=cut
+
+sub cursor {
+ my ($self, $cursor_class) = @_;
+
+ if ($cursor_class) {
+ $_->cursor($cursor_class) for $self->all_storages;
+ }
+ $self->master->cursor;
+}
+
+=head2 unsafe
+
+sets the L<DBIx::Class::Storage::DBI/unsafe> option on all storages or returns
+master's current setting
+
+=cut
+
+sub unsafe {
+ my $self = shift;
+
+ if (@_) {
+ $_->unsafe(@_) for $self->all_storages;
+ }
+
+ return $self->master->unsafe;
+}
+
+=head2 disable_sth_caching
+
+sets the L<DBIx::Class::Storage::DBI/disable_sth_caching> option on all storages
+or returns master's current setting
+
+=cut
+
+sub disable_sth_caching {
+ my $self = shift;
+
+ if (@_) {
+ $_->disable_sth_caching(@_) for $self->all_storages;
+ }
+
+ return $self->master->disable_sth_caching;
+}
+
+=head2 lag_behind_master
+
+returns the highest Replicant L<DBIx::Class::Storage::DBI/lag_behind_master>
+setting
+
+=cut
+
+sub lag_behind_master {
+ my $self = shift;
+
+ return max map $_->lag_behind_master, $self->replicants;
+}
+
+=head2 is_replicating
+
+returns true if all replicants return true for
+L<DBIx::Class::Storage::DBI/is_replicating>
+
+=cut
+
+sub is_replicating {
+ my $self = shift;
+
+ return (grep $_->is_replicating, $self->replicants) == ($self->replicants);
+}
+
+=head2 connect_call_datetime_setup
+
+calls L<DBIx::Class::Storage::DBI/connect_call_datetime_setup> for all storages
+
+=cut
+
+sub connect_call_datetime_setup {
+ my $self = shift;
+ $_->connect_call_datetime_setup for $self->all_storages;
+}
+
+sub _populate_dbh {
+ my $self = shift;
+ $_->_populate_dbh for $self->all_storages;
+}
+
+sub _connect {
+ my $self = shift;
+ $_->_connect for $self->all_storages;
+}
+
+sub _rebless {
+ my $self = shift;
+ $_->_rebless for $self->all_storages;
+}
+
+sub _determine_driver {
+ my $self = shift;
+ $_->_determine_driver for $self->all_storages;
+}
+
+sub _driver_determined {
+ my $self = shift;
+
+ if (@_) {
+ $_->_driver_determined(@_) for $self->all_storages;
+ }
+
+ return $self->master->_driver_determined;
+}
+
+sub _init {
+ my $self = shift;
+
+ $_->_init for $self->all_storages;
+}
+
+sub _run_connection_actions {
+ my $self = shift;
+
+ $_->_run_connection_actions for $self->all_storages;
+}
+
+sub _do_connection_actions {
+ my $self = shift;
+
+ if (@_) {
+ $_->_do_connection_actions(@_) for $self->all_storages;
+ }
+}
+
+sub connect_call_do_sql {
+ my $self = shift;
+ $_->connect_call_do_sql(@_) for $self->all_storages;
+}
+
+sub disconnect_call_do_sql {
+ my $self = shift;
+ $_->disconnect_call_do_sql(@_) for $self->all_storages;
+}
+
+sub _seems_connected {
+ my $self = shift;
+
+ return min map $_->_seems_connected, $self->all_storages;
+}
+
+sub _ping {
+ my $self = shift;
+
+ return min map $_->_ping, $self->all_storages;
+}
+
=head1 GOTCHAS
Due to the fact that replicants can lag behind a master, you must take care to
This method should be defined in the class which consumes this role.
Given a pool object, return the next replicant that will serve queries. The
-default behavior is to grap the first replicant it finds but you can write
+default behavior is to grab the first replicant it finds but you can write
your own subclasses of L<DBIx::Class::Storage::DBI::Replicated::Balancer> to
support other balance systems.
database's (L<DBIx::Class::Storage::DBI::Replicated::Replicant>), defines a
method by which query load can be spread out across each replicant in the pool.
-This Balancer just get's whatever is the first replicant in the pool
+This Balancer just gets whichever is the first replicant in the pool.
=head1 ATTRIBUTES
This is an introductory document for L<DBIx::Class::Storage::Replication>.
This document is not an overview of what replication is or why you should be
-using it. It is not a document explaing how to setup MySQL native replication
-either. Copious external resources are avialable for both. This document
+using it. It is not a document explaining how to setup MySQL native replication
+either. Copious external resources are available for both. This document
presumes you have the basics down.
=head1 DESCRIPTION
For an easy way to start playing with MySQL native replication, see:
L<MySQL::Sandbox>.
-If you are using this with a L<Catalyst> based appplication, you may also wish
+If you are using this with a L<Catalyst> based application, you may also want
to see more recent updates to L<Catalyst::Model::DBIC::Schema>, which has
support for replication configuration options as well.
By default, when you start L<DBIx::Class>, your Schema (L<DBIx::Class::Schema>)
is assigned a storage_type, which when fully connected will reflect your
-underlying storage engine as defined by your choosen database driver. For
+underlying storage engine as defined by your chosen database driver. For
example, if you connect to a MySQL database, your storage_type will be
L<DBIx::Class::Storage::DBI::mysql> Your storage type class will contain
database specific code to help smooth over the differences between databases
and let L<DBIx::Class> do its thing.
If you want to use replication, you will override this setting so that the
-replicated storage engine will 'wrap' your underlying storages and present to
-the end programmer a unified interface. This wrapper storage class will
+replicated storage engine will 'wrap' your underlying storages and present
+a unified interface to the end programmer. This wrapper storage class will
delegate method calls to either a master database or one or more replicated
databases based on if they are read only (by default sent to the replicants)
or write (reserved for the master). Additionally, the Replicated storage
storage itself (L<DBIx::Class::Storage::DBI::Replicated>). A replicated storage
takes a pool of replicants (L<DBIx::Class::Storage::DBI::Replicated::Pool>)
and a software balancer (L<DBIx::Class::Storage::DBI::Replicated::Pool>). The
-balancer does the job of splitting up all the read traffic amongst each
-replicant in the Pool. Currently there are two types of balancers, a Random one
+balancer does the job of splitting up all the read traffic amongst the
+replicants in the Pool. Currently there are two types of balancers, a Random one
which chooses a Replicant in the Pool using a naive randomizer algorithm, and a
First replicant, which just uses the first one in the Pool (and obviously is
only of value when you have a single replicant).
balancers have the 'auto_validate_every' option. This is the number of seconds
we allow to pass between validation checks on a load balanced replicant. So
the higher the number, the more possibility that your reads to the replicant
-may be inconsistant with what's on the master. Setting this number too low
+may be inconsistent with what's on the master. Setting this number too low
will result in increased database loads, so choose a number with care. Our
experience is that setting the number around 5 seconds results in a good
performance / integrity balance.
This object (L<DBIx::Class::Storage::DBI::Replicated::Pool>) manages all the
declared replicants. 'maximum_lag' is the number of seconds a replicant is
allowed to lag behind the master before being temporarily removed from the pool.
-Keep in mind that the Balancer option 'auto_validate_every' determins how often
+Keep in mind that the Balancer option 'auto_validate_every' determines how often
a replicant is tested against this condition, so the true possible lag can be
higher than the number you set. The default is zero.
No matter how low you set the maximum_lag or the auto_validate_every settings,
there is always the chance that your replicants will lag a bit behind the
master for the supported replication system built into MySQL. You can ensure
-reliabily reads by using a transaction, which will force both read and write
+reliable reads by using a transaction, which will force both read and write
activity to the master, however this will increase the load on your master
database.
use DBI ();
use Carp::Clan qw/^DBIx::Class/;
use MooseX::Types::Moose qw/Num Int ClassName HashRef/;
+use DBIx::Class::Storage::DBI::Replicated::Types 'DBICStorageDBI';
use namespace::clean -except => 'meta';
=head1 DESCRIPTION
In a replicated storage type, there is at least one replicant to handle the
-read only traffic. The Pool class manages this replicant, or list of
+read-only traffic. The Pool class manages this replicant, or list of
replicants, and gives some methods for querying information about their status.
=head1 ATTRIBUTES
This is an integer representing a time since the last time the replicants were
validated. It's nothing fancy, just an integer provided via the perl L<time|perlfunc/time>
-builtin.
+built-in.
=cut
=head2 replicants
A hashref of replicant, with the key being the dsn and the value returning the
-actual replicant storage. For example if the $dsn element is something like:
+actual replicant storage. For example, if the $dsn element is something like:
"dbi:SQLite:dbname=dbfile"
=item delete_replicant ($key)
-removes the replicant under $key from the pool
+Removes the replicant under $key from the pool
=back
},
);
+=head2 master
+
+Reference to the master Storage.
+
+=cut
+
+has master => (is => 'rw', isa => DBICStorageDBI, weak_ref => 1);
+
=head1 METHODS
This class defines the following methods.
$replicant->_determine_driver
});
- DBIx::Class::Storage::DBI::Replicated::Replicant->meta->apply($replicant);
+ Moose::Meta::Class->initialize(ref $replicant);
+
+ DBIx::Class::Storage::DBI::Replicated::Replicant->meta->apply($replicant);
+
+ # link back to master
+ $replicant->master($self->master);
+
return $replicant;
}
connect. For the master database this is desirable, but since replicants are
allowed to fail, this behavior is not desirable. This method wraps the call
to ensure_connected in an eval in order to catch any generated errors. That
-way a slave can go completely offline (ie, the box itself can die) without
+way a slave can go completely offline (e.g. the box itself can die) without
bringing down your entire pool of databases.
=cut
inactive, and thus removed from the replication pool.
This tests L<all_replicants>, since a replicant that has been previous marked
-as inactive can be reactived should it start to pass the validation tests again.
+as inactive can be reactivated should it start to pass the validation tests again.
See L<DBIx::Class::Storage::DBI> for more about checking if a replicating
connection is not following a master or is lagging.
requires qw/_query_start/;
with 'DBIx::Class::Storage::DBI::Replicated::WithDSN';
use MooseX::Types::Moose qw/Bool Str/;
+use DBIx::Class::Storage::DBI::Replicated::Types 'DBICStorageDBI';
use namespace::clean -except => 'meta';
=head2 active
This is a boolean which allows you to programmatically activate or deactivate a
-replicant from the pool. This way to you do stuff like disallow a replicant
-when it get's too far behind the master, if it stops replicating, etc.
+replicant from the pool. This way you can do stuff like disallow a replicant
+when it gets too far behind the master, if it stops replicating, etc.
This attribute DOES NOT reflect a replicant's internal status, i.e. if it is
properly replicating from a master and has not fallen too many seconds behind a
reliability threshold. For that, use L</is_replicating> and L</lag_behind_master>.
Since the implementation of those functions database specific (and not all DBIC
-supported DB's support replication) you should refer your database specific
+supported DBs support replication) you should refer your database-specific
storage driver for more information.
=cut
has dsn => (is => 'rw', isa => Str);
has id => (is => 'rw', isa => Str);
+=head2 master
+
+Reference to the master Storage.
+
+=cut
+
+has master => (is => 'rw', isa => DBICStorageDBI, weak_ref => 1);
+
=head1 METHODS
This class defines the following methods.
=cut
sub debugobj {
- return shift->schema->storage->debugobj;
+ my $self = shift;
+
+ return $self->master->debugobj;
}
=head1 ALSO SEE
This package defines the following attributes.
-head2 _query_count
+=head2 _query_count
Is the attribute holding the current query count. It defines a public reader
called 'query_count' which you can use to access the total number of queries
=head2 _query_start
-override on the method so that we count the queries.
+Override on the method so that we count the queries.
=cut
--- /dev/null
+package DBIx::Class::Storage::DBI::SQLAnywhere;
+
+use strict;
+use warnings;
+use base qw/DBIx::Class::Storage::DBI/;
+use mro 'c3';
+use List::Util ();
+
+__PACKAGE__->mk_group_accessors(simple => qw/
+ _identity
+/);
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::SQLAnywhere - Driver for Sybase SQL Anywhere
+
+=head1 DESCRIPTION
+
+This class implements autoincrements for Sybase SQL Anywhere, selects the
+RowNumberOver limit implementation and provides
+L<DBIx::Class::InflateColumn::DateTime> support.
+
+You need the C<DBD::SQLAnywhere> driver that comes with the SQL Anywhere
+distribution, B<NOT> the one on CPAN. It is usually under a path such as:
+
+ /opt/sqlanywhere11/sdk/perl
+
+Recommended L<connect_info|DBIx::Class::Storage::DBI/connect_info> settings:
+
+ on_connect_call => 'datetime_setup'
+
+=head1 METHODS
+
+=cut
+
+sub last_insert_id { shift->_identity }
+
+sub insert {
+ my $self = shift;
+ my ($source, $to_insert) = @_;
+
+ my $identity_col = List::Util::first {
+ $source->column_info($_)->{is_auto_increment}
+ } $source->columns;
+
+# user might have an identity PK without is_auto_increment
+ if (not $identity_col) {
+ foreach my $pk_col ($source->primary_columns) {
+ if (not exists $to_insert->{$pk_col}) {
+ $identity_col = $pk_col;
+ last;
+ }
+ }
+ }
+
+ if ($identity_col && (not exists $to_insert->{$identity_col})) {
+ my $dbh = $self->_get_dbh;
+ my $table_name = $source->from;
+ $table_name = $$table_name if ref $table_name;
+
+ my ($identity) = $dbh->selectrow_array("SELECT GET_IDENTITY('$table_name')");
+
+ $to_insert->{$identity_col} = $identity;
+
+ $self->_identity($identity);
+ }
+
+ return $self->next::method(@_);
+}
+
+# this sub stolen from DB2
+
+sub _sql_maker_opts {
+ my ( $self, $opts ) = @_;
+
+ if ( $opts ) {
+ $self->{_sql_maker_opts} = { %$opts };
+ }
+
+ return { limit_dialect => 'RowNumberOver', %{$self->{_sql_maker_opts}||{}} };
+}
+
+# this sub stolen from MSSQL
+
+sub build_datetime_parser {
+ my $self = shift;
+ my $type = "DateTime::Format::Strptime";
+ eval "use ${type}";
+ $self->throw_exception("Couldn't load ${type}: $@") if $@;
+ return $type->new( pattern => '%Y-%m-%d %H:%M:%S.%6N' );
+}
+
+=head2 connect_call_datetime_setup
+
+Used as:
+
+ on_connect_call => 'datetime_setup'
+
+In L<connect_info|DBIx::Class::Storage::DBI/connect_info> to set the date and
+timestamp formats (as temporary options for the session) for use with
+L<DBIx::Class::InflateColumn::DateTime>.
+
+The C<TIMESTAMP> data type supports up to 6 digits after the decimal point for
+second precision. The full precision is used.
+
+The C<DATE> data type supposedly stores hours and minutes too, according to the
+documentation, but I could not get that to work. It seems to only store the
+date.
+
+You will need the L<DateTime::Format::Strptime> module for inflation to work.
+
+=cut
+
+sub connect_call_datetime_setup {
+ my $self = shift;
+
+ $self->_do_query(
+ "set temporary option timestamp_format = 'yyyy-mm-dd hh:mm:ss.ssssss'"
+ );
+ $self->_do_query(
+ "set temporary option date_format = 'yyyy-mm-dd hh:mm:ss.ssssss'"
+ );
+}
+
+sub _svp_begin {
+ my ($self, $name) = @_;
+
+ $self->_get_dbh->do("SAVEPOINT $name");
+}
+
+# can't release savepoints that have been rolled back
+sub _svp_release { 1 }
+
+sub _svp_rollback {
+ my ($self, $name) = @_;
+
+ $self->_get_dbh->do("ROLLBACK TO SAVEPOINT $name")
+}
+
+1;
+
+=head1 MAXIMUM CURSORS
+
+A L<DBIx::Class> application can use a lot of cursors, due to the usage of
+L<prepare_cached|DBI/prepare_cached>.
+
+The default cursor maximum is C<50>, which can be a bit too low. This limit can
+be turned off (or increased) by the DBA by executing:
+
+ set option max_statement_count = 0
+ set option max_cursor_count = 0
+
+Highly recommended.
+
+=head1 AUTHOR
+
+See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
use File::Copy;
use File::Spec;
-sub _dbh_last_insert_id {
- my ($self, $dbh, $source, $col) = @_;
- $dbh->func('last_insert_rowid');
-}
+__PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks::SQLite');
sub backup
{
# check for empty insert
# INSERT INTO foo DEFAULT VALUES -- does not work with Sybase
- # try to insert explicit 'DEFAULT's instead (except for identity)
+ # try to insert explicit 'DEFAULT's instead (except for identity, timestamp
+ # and computed columns)
if (not %$to_insert) {
for my $col ($source->columns) {
next if $col eq $identity_col;
+
+ my $info = $source->column_info($col);
+
+ next if ref $info->{default_value} eq 'SCALAR'
+ || (exists $info->{data_type} && (not defined $info->{data_type}));
+
+ next if $info->{data_type} && $info->{data_type} =~ /^timestamp\z/i;
+
$to_insert->{$col} = \'DEFAULT';
}
}
sub _update_blobs {
my ($self, $source, $blob_cols, $where) = @_;
- my (@primary_cols) = $source->primary_columns;
-
- $self->throw_exception('Cannot update TEXT/IMAGE column(s) without a primary key')
- unless @primary_cols;
+ my @primary_cols = eval { $source->_pri_cols };
+ $self->throw_exception("Cannot update TEXT/IMAGE column(s): $@")
+ if $@;
# check if we're updating a single row by PK
my $pk_cols_in_where = 0;
my $table = $source->name;
my %row = %$row;
- my (@primary_cols) = $source->primary_columns;
-
- $self->throw_exception('Cannot update TEXT/IMAGE column(s) without a primary key')
- unless @primary_cols;
+ my @primary_cols = eval { $source->_pri_cols} ;
+ $self->throw_exception("Cannot update TEXT/IMAGE column(s): $@")
+ if $@;
$self->throw_exception('Cannot update TEXT/IMAGE column(s) without primary key values')
if ((grep { defined $row{$_} } @primary_cols) != @primary_cols);
=head1 Schema::Loader Support
-There is an experimental branch of L<DBIx::Class::Schema::Loader> that will
-allow you to dump a schema from most (if not all) versions of Sybase.
-
-It is available via subversion from:
-
- http://dev.catalyst.perl.org/repos/bast/branches/DBIx-Class-Schema-Loader/current/
+As of version C<0.05000>, L<DBIx::Class::Schema::Loader> should work well with
+most (if not all) versions of Sybase ASE.
=head1 FreeTDS
definitions in your Result classes, and are mapped to a Sybase type (if it isn't
already) using a mapping based on L<SQL::Translator>.
-In other configurations, placeholers will work just as they do with the Sybase
+In other configurations, placeholders will work just as they do with the Sybase
Open Client libraries.
Inserts or updates of TEXT/IMAGE columns will B<NOT> work with FreeTDS.
=head1 TRANSACTIONS
-Due to limitations of the TDS protocol, L<DBD::Sybase>, or both; you cannot
-begin a transaction while there are active cursors; nor can you use multiple
+Due to limitations of the TDS protocol, L<DBD::Sybase>, or both, you cannot
+begin a transaction while there are active cursors, nor can you use multiple
active cursors within a transaction. An active cursor is, for example, a
L<ResultSet|DBIx::Class::ResultSet> that has been executed using C<next> or
C<first> but has not been exhausted or L<reset|DBIx::Class::ResultSet/reset>.
When inserting IMAGE columns using this method, you'll need to use
L</connect_call_blob_setup> as well.
+=head1 COMPUTED COLUMNS
+
+If you have columns such as:
+
+ created_dtm AS getdate()
+
+represent them in your Result classes as:
+
+ created_dtm => {
+ data_type => undef,
+ default_value => \'getdate()',
+ is_nullable => 0,
+ }
+
+The C<data_type> must exist and must be C<undef>. Then empty inserts will work
+on tables with such columns.
+
+=head1 TIMESTAMP COLUMNS
+
+C<timestamp> columns in Sybase ASE are not really timestamps, see:
+L<http://dba.fyicenter.com/Interview-Questions/SYBASE/The_timestamp_datatype_in_Sybase_.html>.
+
+They should be defined in your Result classes as:
+
+ ts => {
+ data_type => 'timestamp',
+ is_nullable => 0,
+ inflate_datetime => 0,
+ }
+
+The C<<inflate_datetime => 0>> is necessary if you use
+L<DBIx::Class::InflateColumn::DateTime>, and most people do, and still want to
+be able to read these values.
+
+The values will come back as hexadecimal.
+
=head1 TODO
=over
=head1 DESCRIPTION
-If you're using this driver than your version of Sybase, or the libraries you
-use to connect to it, do not support placeholders.
+If you're using this driver then your version of Sybase or the libraries you
+use to connect to it do not support placeholders.
You can also enable this driver explicitly using:
$sth->execute >> for details on the pros and cons of using placeholders.
One advantage of not using placeholders is that C<select @@identity> will work
-for obtainging the last insert id of an C<IDENTITY> column, instead of having to
+for obtaining the last insert id of an C<IDENTITY> column, instead of having to
do C<select max(col)> in a transaction as the base Sybase driver does.
When using this driver, bind variables will be interpolated (properly quoted of
use base qw/
DBIx::Class::Storage::DBI::MultiColumnIn
- DBIx::Class::Storage::DBI::AmbiguousGlob
DBIx::Class::Storage::DBI
/;
use mro 'c3';
}
# construct the inner $from for the subquery
- # we need to prune first, because this will determine if we need a group_bu below
+ # we need to prune first, because this will determine if we need a group_by below
my $inner_from = $self->_prune_unused_joins ($from, $inner_select, $where, $inner_attrs);
# if a multi-type join was needed in the subquery - add a group_by to simulate the
my $group_by_sql = $sql_maker->_order_by({
map { $_ => $attrs->{$_} } qw/group_by having/
});
- my @order_by_chunks = (map
- { ref $_ ? $_->[0] : $_ }
- $sql_maker->_order_by_chunks ($attrs->{order_by})
- );
+ my @order_by_chunks = ($self->_parse_order_by ($attrs->{order_by}) );
# match every alias to the sql chunks above
for my $alias (keys %$alias_list) {
for (my $i = 0; $i < @cond; $i++) {
my $entry = $cond[$i];
my $hash;
- if (ref $entry eq 'HASH') {
+ my $ref = ref $entry;
+ if ($ref eq 'HASH' or $ref eq 'ARRAY') {
$hash = $self->_strip_cond_qualifiers($entry);
}
- else {
+ elsif (! $ref) {
$entry =~ /([^.]+)$/;
$hash->{$1} = $cond[++$i];
}
+ else {
+ $self->throw_exception ("_strip_cond_qualifiers() is unable to handle a condition reftype $ref");
+ }
push @{$cond->{-and}}, $hash;
}
}
return $cond;
}
+sub _parse_order_by {
+ my ($self, $order_by) = @_;
+
+ return scalar $self->sql_maker->_order_by_chunks ($order_by)
+ unless wantarray;
+
+ my $sql_maker = $self->sql_maker;
+ local $sql_maker->{quote_char}; #disable quoting
+ my @chunks;
+ for my $chunk (map { ref $_ ? @$_ : $_ } ($sql_maker->_order_by_chunks ($order_by) ) ) {
+ $chunk =~ s/\s+ (?: ASC|DESC ) \s* $//ix;
+ push @chunks, $chunk;
+ }
+
+ return @chunks;
+}
1;
=head2 commit
Commit the transaction, and stop guarding the scope. If this method is not
-called and this object goes out of scope (i.e. an exception is thrown) then
+called and this object goes out of scope (e.g. an exception is thrown) then
the transaction is rolled back, via L<DBIx::Class::Storage/txn_rollback>
=cut
Ash Berlin, 2008.
-Insipred by L<Scope::Guard> by chocolateboy.
+Inspired by L<Scope::Guard> by chocolateboy.
This module is free software. It may be used, redistributed and/or modified
under the same terms as Perl itself.
use strict;
use warnings;
use base qw/DBIx::Class/;
-use utf8;
__PACKAGE__->mk_classdata( '_utf8_columns' );
# override this if you want to force everything to be encoded/decoded
sub _is_utf8_column {
- return (shift->utf8_columns || {})->{shift};
+ # my ($self, $col) = @_;
+ return ($_[0]->utf8_columns || {})->{$_[1]};
}
=head1 AUTHORS
sub parse {
# this is a hack to prevent schema leaks due to a retarded SQLT implementation
# DO NOT REMOVE (until SQLT2 is out, the all of this will be rewritten anyway)
- Scalar::Util::weaken ($_[1]);
+ Scalar::Util::weaken ($_[1]) if ref ($_[1]);
my ($tr, $data) = @_;
my $args = $tr->parser_args;
--- /dev/null
+#!/usr/bin/perl
+
+use warnings;
+use strict;
+
+use CPANDB;
+use DBIx::Class::Schema::Loader 0.05;
+use Data::Dumper::Concise;
+
+{
+ package CPANDB::Schema;
+ use base qw/DBIx::Class::Schema::Loader/;
+
+ __PACKAGE__->loader_options (
+ naming => 'v5',
+ );
+}
+
+my $s = CPANDB::Schema->connect (sub { CPANDB->dbh } );
+
+# reference names are unstable - just create rels manually
+# is there a saner way to do that?
+my $distclass = $s->class('Distribution');
+$distclass->has_many (
+ 'deps',
+ $s->class('Dependency'),
+ 'distribution',
+);
+$s->unregister_source ('Distribution');
+$s->register_class ('Distribution', $distclass);
+
+
+# a proof of concept how to find out who uses us *AND* SQLT
+my $us_and_sqlt = $s->resultset('Distribution')->search (
+ {
+ 'deps.dependency' => 'DBIx-Class',
+ 'deps_2.dependency' => 'SQL-Translator',
+ },
+ {
+ join => [qw/deps deps/],
+ order_by => 'me.author',
+ select => [ 'me.distribution', 'me.author', map { "$_.phase" } (qw/deps deps_2/)],
+ as => [qw/dist_name dist_author req_dbic_at req_sqlt_at/],
+ result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+ },
+);
+
+print Dumper [$us_and_sqlt->all];
#!/usr/bin/perl
+
use strict;
use warnings;
-use Getopt::Long;
-use Pod::Usage;
-use JSON::Any;
-
-
-my $json = JSON::Any->new(allow_barekey => 1, allow_singlequote => 1);
-
-GetOptions(
- 'schema=s' => \my $schema_class,
- 'class=s' => \my $resultset_class,
- 'connect=s' => \my $connect,
- 'op=s' => \my $op,
- 'set=s' => \my $set,
- 'where=s' => \my $where,
- 'attrs=s' => \my $attrs,
- 'format=s' => \my $format,
- 'force' => \my $force,
- 'trace' => \my $trace,
- 'quiet' => \my $quiet,
- 'help' => \my $help,
- 'tlibs' => \my $t_libs,
-);
-
-if ($t_libs) {
- unshift( @INC, 't/lib', 'lib' );
+BEGIN {
+ use DBIx::Class;
+ die ( 'The following modules are required for the dbicadmin utility: '
+ . DBIx::Class::Optional::Dependencies->req_missing_for ('admin_script')
+ . "\n"
+ ) unless DBIx::Class::Optional::Dependencies->req_ok_for ('admin_script');
}
-pod2usage(1) if ($help);
-$ENV{DBIC_TRACE} = 1 if ($trace);
-
-die('No op specified') if(!$op);
-die('Invalid op') if ($op!~/^insert|update|delete|select$/s);
-my $csv_class;
-if ($op eq 'select') {
- $format ||= 'tsv';
- die('Invalid format') if ($format!~/^tsv|csv$/s);
- $csv_class = 'Text::CSV_XS';
- eval{ require Text::CSV_XS };
- if ($@) {
- $csv_class = 'Text::CSV_PP';
- eval{ require Text::CSV_PP };
- die('The select op requires either the Text::CSV_XS or the Text::CSV_PP module') if ($@);
- }
-}
-
-die('No schema specified') if(!$schema_class);
-eval("require $schema_class");
-die('Unable to load schema') if ($@);
-$connect = $json->jsonToObj( $connect ) if ($connect);
-my $schema = $schema_class->connect(
- ( $connect ? @$connect : () )
+use DBIx::Class::Admin::Descriptive;
+#use Getopt::Long::Descriptive;
+use DBIx::Class::Admin;
+
+my $short_description = "utility for administrating DBIx::Class schemata";
+my $synopsis_text =q|
+ deploy a schema to a database
+ %c --schema=MyApp::Schema \
+ --connect='["dbi:SQLite:my.db", "", ""]' \
+ --deploy
+
+ update an existing record
+ %c --schema=MyApp::Schema --class=Employee \
+ --connect='["dbi:SQLite:my.db", "", ""]' \
+ --op=update --set='{ "name": "New_Employee" }'
+|;
+
+my ($opts, $usage) = describe_options(
+ "%c: %o",
+ (
+ ['Actions'],
+ ["action" => hidden => { one_of => [
+ ['create' => 'Create version diffs needs preversion',],
+ ['upgrade' => 'Upgrade the database to the current schema '],
+ ['install' => 'Install the schema version tables to an existing database',],
+ ['deploy' => 'Deploy the schema to the database',],
+ ['select' => 'Select data from the schema', ],
+ ['insert' => 'Insert data into the schema', ],
+ ['update' => 'Update data in the schema', ],
+ ['delete' => 'Delete data from the schema',],
+ ['op:s' => 'compatiblity option all of the above can be suppied as --op=<action>'],
+ ['help' => 'display this help', { implies => { schema_class => '__dummy__' } } ],
+ ['selfinject-pod' => 'hidden', { implies => { schema_class => '__dummy__' } } ],
+ ], required=> 1 }],
+ ['Arguments'],
+ ['schema-class:s' => 'The class of the schema to load', { required => 1 } ],
+ ['resultset|resultset-class|class:s' => 'The resultset to operate on for data manipulation' ],
+ ['config-stanza:s' => 'Where in the config to find the connection_info, supply in form MyApp::Model::DB',],
+ ['config:s' => 'Supply the config file for parsing by Config::Any', { depends => 'config_stanza'} ],
+ ['connect-info:s%' => 'Supply the connect info as additonal options ie -I dsn=<dsn> user=<user> password=<pass> '],
+ ['connect:s' => 'Supply the connect info as a json string' ],
+ ['sql-dir:s' => 'The directory where sql diffs will be created'],
+ ['sql-type:s' => 'The RDBMs flavour you wish to use'],
+ ['version:i' => 'Supply a version install'],
+ ['preversion:s' => 'The previous version to diff against',],
+ ['set:s' => 'JSON data used to perform data operations' ],
+ ['attrs:s' => 'JSON string to be used for the second argument for search'],
+ ['where:s' => 'JSON string to be used for the where clause of search'],
+ ['force' => 'Be forceful with some operations'],
+ ['trace' => 'Turn on DBIx::Class trace output'],
+ ['quiet' => 'Be less verbose'],
+ )
);
-die('No class specified') if(!$resultset_class);
-my $resultset = eval{ $schema->resultset($resultset_class) };
-die('Unable to load the class with the schema') if ($@);
-
-$set = $json->jsonToObj( $set ) if ($set);
-$where = $json->jsonToObj( $where ) if ($where);
-$attrs = $json->jsonToObj( $attrs ) if ($attrs);
-
-if ($op eq 'insert') {
- die('Do not use the where option with the insert op') if ($where);
- die('Do not use the attrs option with the insert op') if ($attrs);
- my $obj = $resultset->create( $set );
- print ''.ref($resultset).' ID: '.join(',',$obj->id())."\n" if (!$quiet);
-}
-elsif ($op eq 'update') {
- $resultset = $resultset->search( ($where||{}) );
- my $count = $resultset->count();
- print "This action will modify $count ".ref($resultset)." records.\n" if (!$quiet);
- if ( $force || confirm() ) {
- $resultset->update_all( $set );
- }
-}
-elsif ($op eq 'delete') {
- die('Do not use the set option with the delete op') if ($set);
- $resultset = $resultset->search( ($where||{}), ($attrs||()) );
- my $count = $resultset->count();
- print "This action will delete $count ".ref($resultset)." records.\n" if (!$quiet);
- if ( $force || confirm() ) {
- $resultset->delete_all();
- }
-}
-elsif ($op eq 'select') {
- die('Do not use the set option with the select op') if ($set);
- my $csv = $csv_class->new({
- sep_char => ( $format eq 'tsv' ? "\t" : ',' ),
- });
- $resultset = $resultset->search( ($where||{}), ($attrs||()) );
- my @columns = $resultset->result_source->columns();
- $csv->combine( @columns );
- print $csv->string()."\n";
- while (my $row = $resultset->next()) {
- my @fields;
- foreach my $column (@columns) {
- push( @fields, $row->get_column($column) );
- }
- $csv->combine( @fields );
- print $csv->string()."\n";
- }
+die "please only use one of --config or --connect-info\n" if ($opts->{config} and $opts->{connect_info});
+
+if($opts->{selfinject_pod}) {
+
+ die "This is an internal method, do not call!!!\n"
+ unless $ENV{MAKELEVEL};
+
+ $usage->synopsis($synopsis_text);
+ $usage->short_description($short_description);
+ exec (
+ $^X,
+ qw/-p -0777 -i -e/,
+ (
+ 's/^# auto_pod_begin.*^# auto_pod_end/'
+ . quotemeta($usage->pod)
+ . '/ms'
+ ),
+ __FILE__
+ );
}
-sub confirm {
- print "Are you sure you want to do this? (type YES to confirm) ";
- my $response = <STDIN>;
- return 1 if ($response=~/^YES/);
- return;
+if($opts->{help}) {
+ $usage->die();
}
-__END__
-
-=head1 NAME
-
-dbicadmin - Execute operations upon DBIx::Class objects.
-
-=head1 SYNOPSIS
-
- dbicadmin --op=insert --schema=My::Schema --class=Class --set=JSON
- dbicadmin --op=update --schema=My::Schema --class=Class --set=JSON --where=JSON
- dbicadmin --op=delete --schema=My::Schema --class=Class --where=JSON
- dbicadmin --op=select --schema=My::Schema --class=Class --where=JSON --format=tsv
-
-=head1 DESCRIPTION
-
-This utility provides the ability to run INSERTs, UPDATEs,
-DELETEs, and SELECTs on any DBIx::Class object.
-
-=head1 OPTIONS
-
-=head2 op
-
-The type of operation. Valid values are insert, update, delete,
-and select.
-
-=head2 schema
-
-The name of your schema class.
-
-=head2 class
-
-The name of the class, within your schema, that you want to run
-the operation on.
-
-=head2 connect
-
-A JSON array to be passed to your schema class upon connecting.
-The array will need to be compatible with whatever the DBIC
-->connect() method requires.
-
-=head2 set
-
-This option must be valid JSON data string and is passed in to
-the DBIC update() method. Use this option with the update
-and insert ops.
-
-=head2 where
-
-This option must be valid JSON data string and is passed in as
-the first argument to the DBIC search() method. Use this
-option with the update, delete, and select ops.
-
-=head2 attrs
-
-This option must be valid JSON data string and is passed in as
-the second argument to the DBIC search() method. Use this
-option with the update, delete, and select ops.
-
-=head2 help
-
-Display this help page.
-
-=head2 force
-
-Suppresses the confirmation dialogues that are usually displayed
-when someone runs a DELETE or UPDATE action.
-
-=head2 quiet
-
-Do not display status messages.
-
-=head2 trace
+# option compatability mangle
+if($opts->{connect}) {
+ $opts->{connect_info} = delete $opts->{connect};
+}
-Turns on tracing on the DBI storage, thus printing SQL as it is
-executed.
+my $admin = DBIx::Class::Admin->new( %$opts );
-=head2 tlibs
-This option is purely for testing during the DBIC installation. Do
-not use it.
+my $action = $opts->{action};
-=head1 JSON
+$action = $opts->{op} if ($action eq 'op');
-JSON is a lightweight data-interchange format. It allows you
-to express complex data structures for use in the where and
-set options.
+print "Performig action $action...\n";
-This module turns on L<JSON>'s BareKey and QuotApos options so
-that your data can look a bit more readable.
+my $res = $admin->$action();
+if ($action eq 'select') {
- --where={"this":"that"} # generic JSON
- --where={this:'that'} # with BareKey and QuoteApos
+ my $format = $opts->{format} || 'tsv';
+ die('Invalid format') if ($format!~/^tsv|csv$/s);
-Consider wrapping your JSON in outer quotes so that you don't
-have to escape your inner quotes.
+ require Text::CSV;
- --where={this:\"that\"} # no outer quote
- --where='{this:"that"}' # outer quoted
+ my $csv = Text::CSV->new({
+ sep_char => ( $format eq 'tsv' ? "\t" : ',' ),
+ });
-=head1 AUTHOR
+ foreach my $row (@$res) {
+ $csv->combine( @$row );
+ print $csv->string()."\n";
+ }
+}
-Aran Deltac <bluefeet@cpan.org>
-=head1 LICENSE
+__END__
-You may distribute this code under the same terms as Perl itself.
+# auto_pod_begin
+#
+# This will be replaced by the actual pod when selfinject-pod is invoked
+#
+# auto_pod_end
+# vim: et ft=perl
use lib qw(t/lib);
use DBICTest;
-my @MODULES = (
- 'Test::Pod 1.26',
-);
-
# Don't run tests for installs
unless ( DBICTest::AuthorCheck->is_author || $ENV{AUTOMATED_TESTING} || $ENV{RELEASE_TESTING} ) {
plan( skip_all => "Author tests not required for installation" );
}
-# Load the testing modules
-foreach my $MODULE ( @MODULES ) {
- eval "use $MODULE";
- if ( $@ ) {
- $ENV{RELEASE_TESTING}
- ? die( "Failed to load required release-testing module $MODULE" )
- : plan( skip_all => "$MODULE not available for testing" );
- }
+require DBIx::Class;
+unless ( DBIx::Class::Optional::Dependencies->req_ok_for ('test_pod') ) {
+ my $missing = DBIx::Class::Optional::Dependencies->req_missing_for ('test_pod');
+ $ENV{RELEASE_TESTING} || DBICTest::AuthorCheck->is_author
+ ? die ("Failed to load release-testing module requirements: $missing")
+ : plan skip_all => "Test needs: $missing"
}
-all_pod_files_ok();
+Test::Pod::all_pod_files_ok();
use lib qw(t/lib);
use DBICTest;
-my @MODULES = (
- 'Test::Pod::Coverage 1.08',
- 'Pod::Coverage 0.20',
-);
-
# Don't run tests for installs
unless ( DBICTest::AuthorCheck->is_author || $ENV{AUTOMATED_TESTING} || $ENV{RELEASE_TESTING} ) {
plan( skip_all => "Author tests not required for installation" );
}
-# Load the testing modules
-foreach my $MODULE ( @MODULES ) {
- eval "use $MODULE";
- if ( $@ ) {
- $ENV{RELEASE_TESTING}
- ? die( "Failed to load required release-testing module $MODULE" )
- : plan( skip_all => "$MODULE not available for testing" );
- }
+require DBIx::Class;
+unless ( DBIx::Class::Optional::Dependencies->req_ok_for ('test_podcoverage') ) {
+ my $missing = DBIx::Class::Optional::Dependencies->req_missing_for ('test_podcoverage');
+ $ENV{RELEASE_TESTING} || DBICTest::AuthorCheck->is_author
+ ? die ("Failed to load release-testing module requirements: $missing")
+ : plan skip_all => "Test needs: $missing"
}
# Since this is about checking documentation, a little documentation
/]
},
+ 'DBIx::Class::Storage::DBI::Replicated*' => {
+ ignore => [ qw/
+ connect_call_do_sql
+ disconnect_call_do_sql
+ /]
+ },
+
+ 'DBIx::Class::Admin::*' => { skip => 1 },
'DBIx::Class::ClassResolver::PassThrough' => { skip => 1 },
'DBIx::Class::Componentised' => { skip => 1 },
'DBIx::Class::Relationship::*' => { skip => 1 },
'DBIx::Class::Storage::DBI::Replicated::Types' => { skip => 1 },
# test some specific components whose parents are exempt below
- 'DBIx::Class::Storage::DBI::Replicated*' => {},
'DBIx::Class::Relationship::Base' => {},
# internals
if exists($ex->{ignore});
# run the test with the potentially modified parm set
- pod_coverage_ok($module, $parms, "$module POD coverage");
+ Test::Pod::Coverage::pod_coverage_ok($module, $parms, "$module POD coverage");
}
}
--- /dev/null
+use strict;
+use warnings;
+no warnings qw/once/;
+
+use Test::More;
+use lib qw(t/lib);
+use Scalar::Util; # load before we break require()
+
+use_ok 'DBIx::Class::Optional::Dependencies';
+
+my $sqlt_dep = DBIx::Class::Optional::Dependencies->req_list_for ('deploy');
+is_deeply (
+ [ keys %$sqlt_dep ],
+ [ 'SQL::Translator' ],
+ 'Correct deploy() dependency list',
+);
+
+# make module loading impossible, regardless of actual libpath contents
+@INC = (sub { die('Optional Dep Test') } );
+
+ok (
+ ! DBIx::Class::Optional::Dependencies->req_ok_for ('deploy'),
+ 'deploy() deps missing',
+);
+
+like (
+ DBIx::Class::Optional::Dependencies->req_missing_for ('deploy'),
+ qr/^SQL::Translator \>\= \d/,
+ 'expected missing string contents',
+);
+
+like (
+ DBIx::Class::Optional::Dependencies->req_errorlist_for ('deploy')->{'SQL::Translator'},
+ qr/Optional Dep Test/,
+ 'custom exception found in errorlist',
+);
+
+
+#make it so module appears loaded
+$INC{'SQL/Translator.pm'} = 1;
+$SQL::Translator::VERSION = 999;
+
+ok (
+ ! DBIx::Class::Optional::Dependencies->req_ok_for ('deploy'),
+ 'deploy() deps missing cached properly',
+);
+
+#reset cache
+%DBIx::Class::Optional::Dependencies::req_availability_cache = ();
+
+
+ok (
+ DBIx::Class::Optional::Dependencies->req_ok_for ('deploy'),
+ 'deploy() deps present',
+);
+
+is (
+ DBIx::Class::Optional::Dependencies->req_missing_for ('deploy'),
+ '',
+ 'expected null missing string',
+);
+
+is_deeply (
+ DBIx::Class::Optional::Dependencies->req_errorlist_for ('deploy'),
+ {},
+ 'expected empty errorlist',
+);
+
+
+done_testing;
use lib qw(t/lib);
BEGIN {
- eval { require Test::Memory::Cycle; require Devel::Cycle };
- if ($@ or Devel::Cycle->VERSION < 1.10) {
- plan skip_all => "leak test needs Test::Memory::Cycle and Devel::Cycle >= 1.10";
- };
+ require DBIx::Class;
+ plan skip_all => 'Test needs: ' . DBIx::Class::Optional::Dependencies->req_missing_for ('test_cycle')
+ unless ( DBIx::Class::Optional::Dependencies->req_ok_for ('test_cycle') );
}
use DBICTest;
# make sure we got rid of the compat shims
SKIP: {
- skip "Remove in 0.09", 5 if $DBIx::Class::VERSION < 0.09;
+ skip "Remove in 0.082", 3 if $DBIx::Class::VERSION < 0.082;
- for (qw/compare_relationship_keys pk_depends_on resolve_condition resolve_join resolve_prefetch/) {
+ for (qw/compare_relationship_keys pk_depends_on resolve_condition/) {
ok (! DBIx::Class::ResultSource->can ($_), "$_ no longer provided by DBIx::Class::ResultSource");
}
}
my $schema = DBICTest->init_schema();
-BEGIN {
- eval "use DBD::SQLite";
- plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 6);
-}
-
my $art = $schema->resultset("Artist")->find(1);
isa_ok $art => 'DBICTest::Artist';
ok($art->name($name) eq $name, 'update');
-{
+{
my @changed_keys = $art->is_changed;
is( scalar (@changed_keys), 0, 'field changed but same value' );
-}
+}
$art->discard_changes;
my $art_100 = $schema->resultset("Artist")->find(100);
$art_100->artistid(101);
ok($art_100->update(), 'update allows pk mutation via column accessor');
+
+done_testing;
$dbh->do("DROP SEQUENCE nonpkid_seq");
$dbh->do("DROP TABLE artist");
$dbh->do("DROP TABLE sequence_test");
- $dbh->do("DROP TABLE cd");
$dbh->do("DROP TABLE track");
+ $dbh->do("DROP TABLE cd");
};
$dbh->do("CREATE SEQUENCE artist_seq START WITH 1 MAXVALUE 999999 MINVALUE 0");
$dbh->do("CREATE SEQUENCE cd_seq START WITH 1 MAXVALUE 999999 MINVALUE 0");
$dbh->do("CREATE SEQUENCE pkid1_seq START WITH 1 MAXVALUE 999999 MINVALUE 0");
$dbh->do("CREATE SEQUENCE pkid2_seq START WITH 10 MAXVALUE 999999 MINVALUE 0");
$dbh->do("CREATE SEQUENCE nonpkid_seq START WITH 20 MAXVALUE 999999 MINVALUE 0");
+
$dbh->do("CREATE TABLE artist (artistid NUMBER(12), name VARCHAR(255), rank NUMBER(38), charfield VARCHAR2(10))");
+$dbh->do("ALTER TABLE artist ADD (CONSTRAINT artist_pk PRIMARY KEY (artistid))");
+
$dbh->do("CREATE TABLE sequence_test (pkid1 NUMBER(12), pkid2 NUMBER(12), nonpkid NUMBER(12), name VARCHAR(255))");
-$dbh->do("CREATE TABLE cd (cdid NUMBER(12), artist NUMBER(12), title VARCHAR(255), year VARCHAR(4), genreid NUMBER(12), single_track NUMBER(12))");
-$dbh->do("CREATE TABLE track (trackid NUMBER(12), cd NUMBER(12), position NUMBER(12), title VARCHAR(255), last_updated_on DATE, last_updated_at DATE, small_dt DATE)");
+$dbh->do("ALTER TABLE sequence_test ADD (CONSTRAINT sequence_test_constraint PRIMARY KEY (pkid1, pkid2))");
-$dbh->do("ALTER TABLE artist ADD (CONSTRAINT artist_pk PRIMARY KEY (artistid))");
+$dbh->do("CREATE TABLE cd (cdid NUMBER(12), artist NUMBER(12), title VARCHAR(255), year VARCHAR(4), genreid NUMBER(12), single_track NUMBER(12))");
$dbh->do("ALTER TABLE cd ADD (CONSTRAINT cd_pk PRIMARY KEY (cdid))");
-$dbh->do("ALTER TABLE sequence_test ADD (CONSTRAINT sequence_test_constraint PRIMARY KEY (pkid1, pkid2))");
+
+$dbh->do("CREATE TABLE track (trackid NUMBER(12), cd NUMBER(12) REFERENCES cd(cdid) DEFERRABLE, position NUMBER(12), title VARCHAR(255), last_updated_on DATE, last_updated_at DATE, small_dt DATE)");
+
$dbh->do(qq{
CREATE OR REPLACE TRIGGER artist_insert_trg
BEFORE INSERT ON artist
is( scalar @results, 1, "Group by with limit OK" );
}
+# test with_deferred_fk_checks
+lives_ok {
+ $schema->storage->with_deferred_fk_checks(sub {
+ $schema->resultset('Track')->create({
+ trackid => 999, cd => 999, position => 1, title => 'deferred FK track'
+ });
+ $schema->resultset('CD')->create({
+ artist => 1, cdid => 999, year => '2003', title => 'deferred FK cd'
+ });
+ });
+} 'with_deferred_fk_checks code survived';
+
+is eval { $schema->resultset('Track')->find(999)->title }, 'deferred FK track',
+ 'code in with_deferred_fk_checks worked';
+
+throws_ok {
+ $schema->resultset('Track')->create({
+ trackid => 1, cd => 9999, position => 1, title => 'Track1'
+ });
+} qr/constraint/i, 'with_deferred_fk_checks is off';
+
# test auto increment using sequences WITHOUT triggers
for (1..5) {
my $st = $schema->resultset('SequenceTest')->create({ name => 'foo' });
is($st->pkid1, 55, "Oracle Auto-PK without trigger: First primary key set manually");
SKIP: {
- skip 'buggy BLOB support in DBD::Oracle 1.23', 8
- if $DBD::Oracle::VERSION == 1.23;
-
my %binstr = ( 'small' => join('', map { chr($_) } ( 1 .. 127 )) );
$binstr{'large'} = $binstr{'small'} x 1024;
my $rs = $schema->resultset('BindType');
my $id = 0;
+ if ($DBD::Oracle::VERSION eq '1.23') {
+ throws_ok { $rs->create({ id => 1, blob => $binstr{large} }) }
+ qr/broken/,
+ 'throws on blob insert with DBD::Oracle == 1.23';
+
+ skip 'buggy BLOB support in DBD::Oracle 1.23', 7;
+ }
+
foreach my $type (qw( blob clob )) {
foreach my $size (qw( small large )) {
$id++;
$dbh->do("DROP SEQUENCE nonpkid_seq");
$dbh->do("DROP TABLE artist");
$dbh->do("DROP TABLE sequence_test");
- $dbh->do("DROP TABLE cd");
$dbh->do("DROP TABLE track");
+ $dbh->do("DROP TABLE cd");
$dbh->do("DROP TABLE bindtype_test");
}
}
having => \['1 = ?', [ test => 1 ] ], #test having propagation
prefetch => 'owner',
rows => 2, # 3 results total
- order_by => { -desc => 'owner' },
+ order_by => { -desc => 'me.owner' },
unsafe_subselect_ok => 1,
},
);
my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_SYBASE_${_}" } qw/DSN USER PASS/};
-my $TESTS = 63 + 2;
+my $TESTS = 66 + 2;
if (not ($dsn && $user)) {
plan skip_all =>
'updated money value to NULL round-trip'
);
diag $@ if $@;
+
+# Test computed columns and timestamps
+ $schema->storage->dbh_do (sub {
+ my ($storage, $dbh) = @_;
+ eval { $dbh->do("DROP TABLE computed_column_test") };
+ $dbh->do(<<'SQL');
+CREATE TABLE computed_column_test (
+ id INT IDENTITY PRIMARY KEY,
+ a_computed_column AS getdate(),
+ a_timestamp timestamp,
+ charfield VARCHAR(20) DEFAULT 'foo'
+)
+SQL
+ });
+
+ require DBICTest::Schema::ComputedColumn;
+ $schema->register_class(
+ ComputedColumn => 'DBICTest::Schema::ComputedColumn'
+ );
+
+ ok (($rs = $schema->resultset('ComputedColumn')),
+ 'got rs for ComputedColumn');
+
+ lives_ok { $row = $rs->create({}) }
+ 'empty insert for a table with computed columns survived';
+
+ lives_ok {
+ $row->update({ charfield => 'bar' })
+ } 'update of a table with computed columns survived';
}
is $ping_count, 0, 'no pings';
END {
if (my $dbh = eval { $schema->storage->_dbh }) {
eval { $dbh->do("DROP TABLE $_") }
- for qw/artist bindtype_test money_test/;
+ for qw/artist bindtype_test money_test computed_column_test/;
}
}
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+# tests stolen from 748informix.t
+
+my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_SYBASE_ASA_${_}" } qw/DSN USER PASS/};
+my ($dsn2, $user2, $pass2) = @ENV{map { "DBICTEST_SYBASE_ASA_ODBC_${_}" } qw/DSN USER PASS/};
+
+plan skip_all => <<'EOF' unless $dsn || $dsn2;
+Set $ENV{DBICTEST_SYBASE_ASA_DSN} and/or $ENV{DBICTEST_SYBASE_ASA_ODBC_DSN},
+_USER and _PASS to run these tests
+EOF
+
+my @info = (
+ [ $dsn, $user, $pass ],
+ [ $dsn2, $user2, $pass2 ],
+);
+
+my @handles_to_clean;
+
+foreach my $info (@info) {
+ my ($dsn, $user, $pass) = @$info;
+
+ next unless $dsn;
+
+ my $schema = DBICTest::Schema->connect($dsn, $user, $pass, {
+ auto_savepoint => 1
+ });
+
+ my $dbh = $schema->storage->dbh;
+
+ push @handles_to_clean, $dbh;
+
+ eval { $dbh->do("DROP TABLE artist") };
+
+ $dbh->do(<<EOF);
+ CREATE TABLE artist (
+ artistid INT IDENTITY PRIMARY KEY,
+ name VARCHAR(255) NULL,
+ charfield CHAR(10) NULL,
+ rank INT DEFAULT 13
+ )
+EOF
+
+ my $ars = $schema->resultset('Artist');
+ is ( $ars->count, 0, 'No rows at first' );
+
+# test primary key handling
+ my $new = $ars->create({ name => 'foo' });
+ ok($new->artistid, "Auto-PK worked");
+
+# test explicit key spec
+ $new = $ars->create ({ name => 'bar', artistid => 66 });
+ is($new->artistid, 66, 'Explicit PK worked');
+ $new->discard_changes;
+ is($new->artistid, 66, 'Explicit PK assigned');
+
+# test savepoints
+ eval {
+ $schema->txn_do(sub {
+ eval {
+ $schema->txn_do(sub {
+ $ars->create({ name => 'in_savepoint' });
+ die "rolling back savepoint";
+ });
+ };
+ ok ((not $ars->search({ name => 'in_savepoint' })->first),
+ 'savepoint rolled back');
+ $ars->create({ name => 'in_outer_txn' });
+ die "rolling back outer txn";
+ });
+ };
+
+ like $@, qr/rolling back outer txn/,
+ 'correct exception for rollback';
+
+ ok ((not $ars->search({ name => 'in_outer_txn' })->first),
+ 'outer txn rolled back');
+
+# test populate
+ lives_ok (sub {
+ my @pop;
+ for (1..2) {
+ push @pop, { name => "Artist_$_" };
+ }
+ $ars->populate (\@pop);
+ });
+
+# test populate with explicit key
+ lives_ok (sub {
+ my @pop;
+ for (1..2) {
+ push @pop, { name => "Artist_expkey_$_", artistid => 100 + $_ };
+ }
+ $ars->populate (\@pop);
+ });
+
+# count what we did so far
+ is ($ars->count, 6, 'Simple count works');
+
+# test LIMIT support
+ my $lim = $ars->search( {},
+ {
+ rows => 3,
+ offset => 4,
+ order_by => 'artistid'
+ }
+ );
+ is( $lim->count, 2, 'ROWS+OFFSET count ok' );
+ is( $lim->all, 2, 'Number of ->all objects matches count' );
+
+# test iterator
+ $lim->reset;
+ is( $lim->next->artistid, 101, "iterator->next ok" );
+ is( $lim->next->artistid, 102, "iterator->next ok" );
+ is( $lim->next, undef, "next past end of resultset ok" );
+
+# test empty insert
+ {
+ local $ars->result_source->column_info('artistid')->{is_auto_increment} = 0;
+
+ lives_ok { $ars->create({}) }
+ 'empty insert works';
+ }
+
+# test blobs (stolen from 73oracle.t)
+ eval { $dbh->do('DROP TABLE bindtype_test') };
+ $dbh->do(qq[
+ CREATE TABLE bindtype_test
+ (
+ id INT NOT NULL PRIMARY KEY,
+ bytea INT NULL,
+ blob LONG BINARY NULL,
+ clob LONG VARCHAR NULL
+ )
+ ],{ RaiseError => 1, PrintError => 1 });
+
+ my %binstr = ( 'small' => join('', map { chr($_) } ( 1 .. 127 )) );
+ $binstr{'large'} = $binstr{'small'} x 1024;
+
+ my $maxloblen = length $binstr{'large'};
+ local $dbh->{'LongReadLen'} = $maxloblen;
+
+ my $rs = $schema->resultset('BindType');
+ my $id = 0;
+
+ foreach my $type (qw( blob clob )) {
+ foreach my $size (qw( small large )) {
+ $id++;
+
+# turn off horrendous binary DBIC_TRACE output
+ local $schema->storage->{debug} = 0;
+
+ lives_ok { $rs->create( { 'id' => $id, $type => $binstr{$size} } ) }
+ "inserted $size $type without dying";
+
+ ok($rs->find($id)->$type eq $binstr{$size}, "verified inserted $size $type" );
+ }
+ }
+}
+
+done_testing;
+
+# clean up our mess
+END {
+ foreach my $dbh (@handles_to_clean) {
+ eval { $dbh->do("DROP TABLE $_") } for qw/artist bindtype_test/;
+ }
+}
my $schema = DBICTest->init_schema();
-BEGIN {
- eval "use DBD::SQLite";
- plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 10);
-}
-
# test LIMIT
my $it = $schema->resultset("CD")->search( {},
{ rows => 3,
);
is( $it->count, 1, "complex abstract count ok" );
+done_testing;
my $orig_debug = $schema->storage->debug;
-BEGIN {
- eval "use DBD::SQLite";
- plan $@
- ? ( skip_all => 'needs DBD::SQLite for testing' )
- : ( tests => 33 );
-}
-
# test the abstract join => SQL generator
my $sa = new DBIx::Class::SQLAHacks;
is(cd_count(), 5, '5 rows in table cd');
is(tk_count(), 3, '3 rows in table twokeys');
}
+
+done_testing;
$sub_rs->single,
{
artist => 1,
- track_position => 2,
- tracks => {
+ tracks => {
+ title => 'Apiary',
trackid => 17,
- title => 'Apiary',
},
},
'columns/select/as fold properly on sub-searches',
use Test::Warn;
use lib qw(t/lib);
use DBICTest;
-use utf8;
warning_like (
sub {
DBICTest::Schema::CD->utf8_columns('title');
Class::C3->reinitialize();
-my $cd = $schema->resultset('CD')->create( { artist => 1, title => 'øni', year => '2048' } );
-my $utf8_char = 'uniuni';
-
+my $cd = $schema->resultset('CD')->create( { artist => 1, title => "weird\x{466}stuff", year => '2048' } );
ok( utf8::is_utf8( $cd->title ), 'got title with utf8 flag' );
+ok(! utf8::is_utf8( $cd->{_column_data}{title} ), 'store title without utf8' );
+
ok(! utf8::is_utf8( $cd->year ), 'got year without utf8 flag' );
+ok(! utf8::is_utf8( $cd->{_column_data}{year} ), 'store year without utf8' );
-utf8::decode($utf8_char);
-$cd->title($utf8_char);
+$cd->title('nonunicode');
+ok(! utf8::is_utf8( $cd->title ), 'got title without utf8 flag' );
ok(! utf8::is_utf8( $cd->{_column_data}{title} ), 'store utf8-less chars' );
use DBICTest;
BEGIN {
- require DBIx::Class::Storage::DBI;
+ require DBIx::Class;
plan skip_all =>
- 'Test needs SQL::Translator ' . DBIx::Class::Storage::DBI->_sqlt_minimum_version
- if not DBIx::Class::Storage::DBI->_sqlt_version_ok;
+ 'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy')
+ unless DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')
+}
+
+my $custom_deployment_statements_called = 0;
+
+sub DBICTest::Schema::deployment_statements {
+ $custom_deployment_statements_called = 1;
+ my $self = shift;
+ return $self->next::method(@_);
}
my $schema = DBICTest->init_schema (no_deploy => 1);
-# replace the sqlt calback with a custom version ading an index
-$schema->source('Track')->sqlt_deploy_callback(sub {
- my ($self, $sqlt_table) = @_;
+{
+ my $deploy_hook_called = 0;
- is (
- $sqlt_table->schema->translator->producer_type,
- join ('::', 'SQL::Translator::Producer', $schema->storage->sqlt_type),
- 'Production type passed to translator object',
- );
+ # replace the sqlt calback with a custom version ading an index
+ $schema->source('Track')->sqlt_deploy_callback(sub {
+ my ($self, $sqlt_table) = @_;
- if ($schema->storage->sqlt_type eq 'SQLite' ) {
- $sqlt_table->add_index( name => 'track_title', fields => ['title'] )
- or die $sqlt_table->error;
- }
+ $deploy_hook_called = 1;
- $self->default_sqlt_deploy_hook($sqlt_table);
-});
+ is (
+ $sqlt_table->schema->translator->producer_type,
+ join ('::', 'SQL::Translator::Producer', $schema->storage->sqlt_type),
+ 'Production type passed to translator object',
+ );
-$schema->deploy; # do not remove, this fires the is() test in the callback above
+ if ($schema->storage->sqlt_type eq 'SQLite' ) {
+ $sqlt_table->add_index( name => 'track_title', fields => ['title'] )
+ or die $sqlt_table->error;
+ }
+ $self->default_sqlt_deploy_hook($sqlt_table);
+ });
+
+ $schema->deploy; # do not remove, this fires the is() test in the callback above
+ ok($deploy_hook_called, 'deploy hook got called');
+ ok($custom_deployment_statements_called, '->deploy used the schemas deploy_statements method');
+}
my $translator = SQL::Translator->new(
# test +select/+as for single column
my $psrs = $schema->resultset('CD')->search({},
{
- '+select' => \'COUNT(*)',
- '+as' => 'count'
+ '+select' => \'MAX(year)',
+ '+as' => 'last_year'
}
);
-lives_ok(sub { $psrs->get_column('count')->next }, '+select/+as additional column "count" present (scalar)');
+lives_ok(sub { $psrs->get_column('last_year')->next }, '+select/+as additional column "last_year" present (scalar)');
dies_ok(sub { $psrs->get_column('noSuchColumn')->next }, '+select/+as nonexistent column throws exception');
# test +select/+as for overriding a column
# test +select/+as for multiple columns
$psrs = $schema->resultset('CD')->search({},
{
- '+select' => [ \'COUNT(*)', 'title' ],
- '+as' => [ 'count', 'addedtitle' ]
+ '+select' => [ \'LENGTH(title) AS title_length', 'title' ],
+ '+as' => [ 'tlength', 'addedtitle' ]
}
);
-lives_ok(sub { $psrs->get_column('count')->next }, '+select/+as multiple additional columns, "count" column present');
+lives_ok(sub { $psrs->get_column('tlength')->next }, '+select/+as multiple additional columns, "tlength" column present');
lives_ok(sub { $psrs->get_column('addedtitle')->next }, '+select/+as multiple additional columns, "addedtitle" column present');
# test that +select/+as specs do not leak
);
is_same_sql_bind (
- $psrs->get_column('count')->as_query,
- '(SELECT COUNT(*) FROM cd me)',
+ $psrs->get_column('tlength')->as_query,
+ '(SELECT LENGTH(title) AS title_length FROM cd me)',
[],
'Correct SQL for get_column/+as func'
);
-
+# test that order_by over a function forces a subquery
+lives_ok ( sub {
+ is_deeply (
+ [ $psrs->search ({}, { order_by => { -desc => 'title_length' } })->get_column ('title')->all ],
+ [
+ "Generic Manufactured Singles",
+ "Come Be Depressed With Us",
+ "Caterwaulin' Blues",
+ "Spoonful of bees",
+ "Forkful of bees",
+ ],
+ 'Subquery count induced by aliased ordering function',
+ );
+});
+
+# test for prefetch not leaking
{
my $rs = $schema->resultset("CD")->search({}, { prefetch => 'artist' });
my $rsc = $rs->get_column('year');
WHERE
cdid > CAST(? AS INT)
AND tracks.last_updated_at IS NOT NULL
- AND tracks.last_updated_on < CAST (? AS yyy)
+ AND tracks.last_updated_on < CAST (? AS DateTime)
AND tracks.position = ?
AND tracks.single_track = CAST(? AS INT)
)',
|| plan skip_all => 'Test needs Time::HiRes';
Time::HiRes->import(qw/time sleep/);
- require DBIx::Class::Storage::DBI;
+ require DBIx::Class;
plan skip_all =>
- 'Test needs SQL::Translator ' . DBIx::Class::Storage::DBI->_sqlt_minimum_version
- if not DBIx::Class::Storage::DBI->_sqlt_version_ok;
+ 'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy')
+ unless DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')
}
use lib qw(t/lib);
is($schema_v3->get_db_version(), '3.0', 'db version number upgraded');
}
+# Now, try a v1 -> v3 upgrade with a file that has comments strategically placed in it.
+# First put the v1 schema back again...
+{
+ # drop all the tables...
+ eval { $schema_v1->storage->dbh->do('drop table ' . $version_table_name) };
+ eval { $schema_v1->storage->dbh->do('drop table ' . $old_table_name) };
+ eval { $schema_v1->storage->dbh->do('drop table TestVersion') };
+
+ {
+ local $DBICVersion::Schema::VERSION = '1.0';
+ $schema_v1->deploy;
+ }
+ is($schema_v1->get_db_version(), '1.0', 'get_db_version 1.0 ok');
+}
+
+# add a "harmless" comment before one of the statements.
+system( qq($^X -pi -e "s/ALTER/-- this is a comment\nALTER/" $fn->{trans_v23};) );
+
+# Then attempt v1 -> v3 upgrade
+{
+ local $SIG{__WARN__} = sub { warn if $_[0] !~ /Attempting upgrade\.$/ };
+ $schema_v3->upgrade();
+ is($schema_v3->get_db_version(), '3.0', 'db version number upgraded to 3.0');
+
+ # make sure that the column added after the comment is actually added.
+ lives_ok ( sub {
+ $schema_v3->storage->dbh->do('select ExtraColumn from TestVersion');
+ }, 'new column created');
+}
+
+
# check behaviour of DBIC_NO_VERSION_CHECK env var and ignore_version connect attr
{
my $schema_version = DBICVersion::Schema->connect($dsn, $user, $pass);
use Scalar::Util ();
BEGIN {
- require DBIx::Class::Storage::DBI;
+ require DBIx::Class;
plan skip_all =>
- 'Test needs SQL::Translator ' . DBIx::Class::Storage::DBI->_sqlt_minimum_version
- if not DBIx::Class::Storage::DBI->_sqlt_version_ok;
+ 'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy')
+ unless DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')
}
# Test for SQLT-related leaks
{
my $s = DBICTest::Schema->clone;
- create_schema ({ schema => $s });
+ my $sqlt_schema = create_schema ({ schema => $s });
Scalar::Util::weaken ($s);
ok (!$s, 'Schema not leaked');
+
+ isa_ok ($sqlt_schema, 'SQL::Translator::Schema', 'SQLT schema object produced');
}
+# make sure classname-style works
+lives_ok { isa_ok (create_schema ({ schema => 'DBICTest::Schema' }), 'SQL::Translator::Schema', 'SQLT schema object produced') };
+
my $schema = DBICTest->init_schema();
# Dummy was yanked out by the sqlt hook test
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+
+BEGIN {
+ require DBIx::Class;
+ plan skip_all => 'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for('admin')
+ unless DBIx::Class::Optional::Dependencies->req_ok_for('admin');
+}
+
+use_ok 'DBIx::Class::Admin';
+
+
+done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use Test::Warn;
+
+BEGIN {
+ require DBIx::Class;
+ plan skip_all => 'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for('admin')
+ unless DBIx::Class::Optional::Dependencies->req_ok_for('admin');
+
+ plan skip_all => 'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for('deploy')
+ unless DBIx::Class::Optional::Dependencies->req_ok_for('deploy');
+}
+
+use lib qw(t/lib);
+use DBICTest;
+
+use Path::Class;
+
+use_ok 'DBIx::Class::Admin';
+
+
+my $sql_dir = dir(qw/t var/);
+my @connect_info = DBICTest->_database(
+ no_deploy=>1,
+ no_populate=>1,
+ sqlite_use_file => 1,
+);
+{ # create the schema
+
+# make sure we are clean
+clean_dir($sql_dir);
+
+
+my $admin = DBIx::Class::Admin->new(
+ schema_class=> "DBICTest::Schema",
+ sql_dir=> $sql_dir,
+ connect_info => \@connect_info,
+);
+isa_ok ($admin, 'DBIx::Class::Admin', 'create the admin object');
+lives_ok { $admin->create('MySQL'); } 'Can create MySQL sql';
+lives_ok { $admin->create('SQLite'); } 'Can Create SQLite sql';
+}
+
+{ # upgrade schema
+
+#my $schema = DBICTest->init_schema(
+# no_deploy => 1,
+# no_populat => 1,
+# sqlite_use_file => 1,
+#);
+
+clean_dir($sql_dir);
+require DBICVersion_v1;
+
+my $admin = DBIx::Class::Admin->new(
+ schema_class => 'DBICVersion::Schema',
+ sql_dir => $sql_dir,
+ connect_info => \@connect_info,
+);
+
+my $schema = $admin->schema();
+
+lives_ok { $admin->create($schema->storage->sqlt_type(), {add_drop_table=>0}); } 'Can create DBICVersionOrig sql in ' . $schema->storage->sqlt_type;
+lives_ok { $admin->deploy( ) } 'Can Deploy schema';
+
+# connect to now deployed schema
+lives_ok { $schema = DBICVersion::Schema->connect(@{$schema->storage->connect_info()}); } 'Connect to deployed Database';
+
+is($schema->get_db_version, $DBICVersion::Schema::VERSION, 'Schema deployed and versions match');
+
+
+require DBICVersion_v2;
+
+$admin = DBIx::Class::Admin->new(
+ schema_class => 'DBICVersion::Schema',
+ sql_dir => $sql_dir,
+ connect_info => \@connect_info
+);
+
+lives_ok { $admin->create($schema->storage->sqlt_type(), {}, "1.0" ); } 'Can create diff for ' . $schema->storage->sqlt_type;
+{
+ local $SIG{__WARN__} = sub { warn $_[0] unless $_[0] =~ /DB version .+? is lower than the schema version/ };
+ lives_ok {$admin->upgrade();} 'upgrade the schema';
+}
+
+is($schema->get_db_version, $DBICVersion::Schema::VERSION, 'Schema and db versions match');
+
+}
+
+{ # install
+
+clean_dir($sql_dir);
+
+my $admin = DBIx::Class::Admin->new(
+ schema_class => 'DBICVersion::Schema',
+ sql_dir => $sql_dir,
+ _confirm => 1,
+ connect_info => \@connect_info,
+);
+
+$admin->version("3.0");
+lives_ok { $admin->install(); } 'install schema version 3.0';
+is($admin->schema->get_db_version, "3.0", 'db thinks its version 3.0');
+dies_ok { $admin->install("4.0"); } 'cannot install to allready existing version';
+
+$admin->force(1);
+warnings_exist ( sub {
+ lives_ok { $admin->install("4.0") } 'can force install to allready existing version'
+}, qr/Forcing install may not be a good idea/, 'Force warning emitted' );
+is($admin->schema->get_db_version, "4.0", 'db thinks its version 4.0');
+#clean_dir($sql_dir);
+}
+
+sub clean_dir {
+ my ($dir) = @_;
+ $dir = $dir->resolve;
+ if ( ! -d $dir ) {
+ $dir->mkpath();
+ }
+ foreach my $file ($dir->children) {
+ # skip any hidden files
+ next if ($file =~ /^\./);
+ unlink $file;
+ }
+}
+
+done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+
+use Test::Exception;
+
+BEGIN {
+ require DBIx::Class;
+ plan skip_all => 'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for('admin')
+ unless DBIx::Class::Optional::Dependencies->req_ok_for('admin');
+}
+
+use lib 't/lib';
+use DBICTest;
+
+use_ok 'DBIx::Class::Admin';
+
+
+{ # test data maniplulation functions
+
+ # create a DBICTest so we can steal its connect info
+ my $schema = DBICTest->init_schema(
+ sqlite_use_file => 1,
+ );
+
+ my $admin = DBIx::Class::Admin->new(
+ schema_class=> "DBICTest::Schema",
+ connect_info => $schema->storage->connect_info(),
+ quiet => 1,
+ _confirm=>1,
+ );
+ isa_ok ($admin, 'DBIx::Class::Admin', 'create the admin object');
+
+ $admin->insert('Employee', { name => 'Matt' });
+ my $employees = $schema->resultset('Employee');
+ is ($employees->count(), 1, "insert okay" );
+
+ my $employee = $employees->find(1);
+ is($employee->name(), 'Matt', "insert valid" );
+
+ $admin->update('Employee', {name => 'Trout'}, {name => 'Matt'});
+
+ $employee = $employees->find(1);
+ is($employee->name(), 'Trout', "update Matt to Trout" );
+
+ $admin->insert('Employee', {name =>'Aran'});
+
+ my $expected_data = [
+ [$employee->result_source->columns() ],
+ [1,1,undef,undef,undef,'Trout'],
+ [2,2,undef,undef,undef,'Aran']
+ ];
+ my $data;
+ lives_ok { $data = $admin->select('Employee')} 'can retrive data from database';
+ is_deeply($data, $expected_data, 'DB matches whats expected');
+
+ $admin->delete('Employee', {name=>'Trout'});
+ my $del_rs = $employees->search({name => 'Trout'});
+ is($del_rs->count(), 0, "delete Trout" );
+ is ($employees->count(), 1, "left Aran" );
+}
+
+done_testing;
# vim: filetype=perl
use strict;
-use warnings;
+use warnings;
use Test::More;
+use Config;
use lib qw(t/lib);
+$ENV{PERL5LIB} = join ($Config{path_sep}, @INC);
use DBICTest;
-eval 'require JSON::Any';
-plan skip_all => 'Install JSON::Any to run this test' if ($@);
-
-eval 'require Text::CSV_XS';
-if ($@) {
- eval 'require Text::CSV_PP';
- plan skip_all => 'Install Text::CSV_XS or Text::CSV_PP to run this test' if ($@);
+BEGIN {
+ require DBIx::Class;
+ plan skip_all => 'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for('admin_script')
+ unless DBIx::Class::Optional::Dependencies->req_ok_for('admin_script');
}
my @json_backends = qw/XS JSON DWIW/;
open(my $fh, "-|", _prepare_system_args( qw|--op=select --attrs={"order_by":"name"}| ) ) or die $!;
my $data = do { local $/; <$fh> };
close($fh);
- ok( ($data=~/Aran.*Trout/s), "$ENV{JSON_ANY_ORDER}: select with attrs" );
+ if (!ok( ($data=~/Aran.*Trout/s), "$ENV{JSON_ANY_ORDER}: select with attrs" )) {
+ diag ("data from select is $data")
+ };
}
system( _prepare_system_args( qw|--op=delete --where={"name":"Trout"}| ) );
#
sub _prepare_system_args {
my $perl = $^X;
+
my @args = (
- qw|script/dbicadmin --quiet --schema=DBICTest::Schema --class=Employee --tlibs|,
+ qw|script/dbicadmin --quiet --schema=DBICTest::Schema --class=Employee|,
q|--connect=["dbi:SQLite:dbname=t/var/DBIxClass.db","","",{"AutoCommit":1}]|,
- qw|--force --tlibs|,
+ qw|--force|,
@_,
);
my $schema = DBICTest->init_schema;
-BEGIN {
- eval "use DBD::SQLite";
- plan $@
- ? ( skip_all => 'needs DBD::SQLite for testing' )
- : ( tests => 13 );
-}
-
my $where_bind = {
where => \'name like ?',
bind => [ 'Cat%' ],
->search({ artistid => 1});
is ( $rs->count, 1, 'where/bind first' );
-
+
$rs = $schema->resultset('Artist')->search({ artistid => 1})
->search({}, $where_bind);
$rs = $schema->resultset('Complex')->search({}, { bind => [ 1999 ] })->search({}, { where => \"title LIKE ?", bind => [ 'Spoon%' ] });
is_same_sql_bind(
$rs->as_query,
- "(SELECT me.artistid, me.name, me.rank, me.charfield FROM (SELECT a.*, cd.cdid AS cdid, cd.title AS title, cd.year AS year FROM artist a JOIN cd ON cd.artist = a.artistid WHERE cd.year = ?) WHERE title LIKE ?)",
+ "(SELECT me.artistid, me.name, me.rank, me.charfield FROM (SELECT a.*, cd.cdid AS cdid, cd.title AS title, cd.year AS year FROM artist a JOIN cd ON cd.artist = a.artistid WHERE cd.year = ?) me WHERE title LIKE ?)",
[
[ '!!dummy' => '1999' ],
[ '!!dummy' => 'Spoon%' ]
$rs = $schema->resultset('CustomSql')->search({}, { bind => [ 1999 ] })->search({}, { where => \"title LIKE ?", bind => [ 'Spoon%' ] });
is_same_sql_bind(
$rs->as_query,
- "(SELECT me.artistid, me.name, me.rank, me.charfield FROM (SELECT a.*, cd.cdid AS cdid, cd.title AS title, cd.year AS year FROM artist a JOIN cd ON cd.artist = a.artistid WHERE cd.year = ?) WHERE title LIKE ?)",
+ "(SELECT me.artistid, me.name, me.rank, me.charfield FROM (SELECT a.*, cd.cdid AS cdid, cd.title AS title, cd.year AS year FROM artist a JOIN cd ON cd.artist = a.artistid WHERE cd.year = ?) me WHERE title LIKE ?)",
[
[ '!!dummy' => '1999' ],
[ '!!dummy' => 'Spoon%' ]
bind => [ 'Spoon%' ] });
is ( $rs->count, 1, '...cookbook + chained search with extra bind' );
}
+
+done_testing;
next;
}
+plan skip_all => 'Set $ENV{DBICTEST_MYSQL_DSN}, _USER and _PASS to run this test'
+ unless ($ENV{DBICTEST_MYSQL_DSN} && $ENV{DBICTEST_MYSQL_USER});
+
eval { require Time::Piece::MySQL };
plan skip_all => "Need Time::Piece::MySQL for this test" if $@;
-use lib 't/cdbi/testlib';
-eval { require 't/cdbi/testlib/Log.pm' };
-plan skip_all => "Need MySQL for this test" if $@;
+plan tests => 3;
-plan tests => 2;
+use lib 't/cdbi/testlib';
+use_ok ('Log');
package main;
$rs = $schema->resultset('Tag')->search({ tag => 'Blue' }, { '+select' => { max => 'tagid' }, distinct => 1 });
is($get_count->($rs), 4, 'Count with +select aggreggate');
- $rs = $schema->resultset('Tag')->search({}, { select => 'length(me.tag)', distinct => 1 });
+ $rs = $schema->resultset('Tag')->search({}, { select => [\'length(me.tag)'], distinct => 1 });
is($get_count->($rs), 3, 'Count by distinct function result as select literal');
}
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+
+use lib qw(t/lib);
+
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $rs = $schema->resultset ('CD')->search ({}, {
+ select => [
+ { substr => [ 'title', 1, 1 ], -as => 'initial' },
+ { count => '*' },
+ ],
+ as => [qw/title_initial cnt/],
+ group_by => ['initial'],
+ order_by => { -desc => 'initial' },
+ result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+});
+
+is_deeply (
+ [$rs->all],
+ [
+ { title_initial => 'S', cnt => '1' },
+ { title_initial => 'G', cnt => '1' },
+ { title_initial => 'F', cnt => '1' },
+ { title_initial => 'C', cnt => '2' },
+ ],
+ 'Correct result',
+);
+
+is ($rs->count, 4, 'Correct count');
+
+done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+my $artist_rs = $schema->resultset ('Artist');
+
+my $init_count = $artist_rs->count;
+ok ($init_count, 'Some artists is database');
+
+$artist_rs->populate ([
+ {
+ name => 'foo',
+ },
+ {
+ name => 'bar',
+ }
+]);
+
+is ($artist_rs->count, $init_count + 2, '2 Artists created');
+
+$artist_rs->search ({
+ -and => [
+ { 'me.artistid' => { '!=', undef } },
+ [ { 'me.name' => 'foo' }, { 'me.name' => 'bar' } ],
+ ],
+})->delete;
+
+is ($artist_rs->count, $init_count, 'Correct amount of artists deleted');
+
+done_testing;
+
is_same_sql_bind(
$cdrs2->as_query,
- "(SELECT me.cdid,me.artist,me.title,me.year,me.genreid,me.single_track FROM cd me WHERE artist_id IN ( SELECT id FROM artist me LIMIT 1 ))",
+ "(SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE artist_id IN ( SELECT id FROM artist me LIMIT 1 ))",
[],
);
}
is_same_sql_bind(
$rs->as_query,
- "(SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track FROM (SELECT me.cdid,me.artist,me.title,me.year,me.genreid,me.single_track FROM cd me WHERE ( id > ? ) ) cd2)",
+ "(SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track FROM (
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE ( id > ? )
+ ) cd2)",
[
[ 'id', 20 ]
],
is_same_sql_bind(
$rs->as_query,
- "(SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track
- FROM
- (SELECT cd3.cdid,cd3.artist,cd3.title,cd3.year,cd3.genreid,cd3.single_track
- FROM
- (SELECT me.cdid,me.artist,me.title,me.year,me.genreid,me.single_track
+ "(SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track
+ FROM
+ (SELECT cd3.cdid, cd3.artist, cd3.title, cd3.year, cd3.genreid, cd3.single_track
+ FROM
+ (SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
FROM cd me WHERE ( id < ? ) ) cd3
WHERE ( id > ? ) ) cd2)",
[
is_same_sql_bind(
$rs->as_query,
- "(SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track FROM (SELECT me.cdid,me.artist,me.title,me.year,me.genreid,me.single_track FROM cd me WHERE ( title = ? ) ) cd2)",
+ "(SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track FROM (
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE ( title = ? )
+ ) cd2)",
[ [ 'title', 'Thriller' ] ],
);
}
if ($@) {
plan skip_all => 'needs DateTime and DateTime::Format::Sybase for testing';
}
- else {
- plan tests => (4 * 2 * 2) + 2; # (tests * dt_types * storage_types) + storage_tests
- }
}
my @storage_types = (
$schema->storage->dbh->do(<<"SQL");
CREATE TABLE track (
trackid INT IDENTITY PRIMARY KEY,
- cd INT,
- position INT,
- $col $type,
+ cd INT NULL,
+ position INT NULL,
+ $col $type NULL
)
SQL
ok(my $dt = DateTime::Format::Sybase->parse_datetime($sample_dt));
);
is( $row->$col, $dt, 'DateTime roundtrip' );
}
+
+ # test a computed datetime column
+ eval { $schema->storage->dbh->do("DROP TABLE track") };
+ $schema->storage->dbh->do(<<"SQL");
+CREATE TABLE track (
+ trackid INT IDENTITY PRIMARY KEY,
+ cd INT NULL,
+ position INT NULL,
+ title VARCHAR(100) NULL,
+ last_updated_on DATETIME NULL,
+ last_updated_at AS getdate(),
+ small_dt SMALLDATETIME NULL
+)
+SQL
+
+ my $now = DateTime->now;
+ sleep 1;
+ my $new_row = $schema->resultset('Track')->create({});
+ $new_row->discard_changes;
+
+ lives_and {
+ cmp_ok (($new_row->last_updated_at - $now)->seconds, '>=', 1)
+ } 'getdate() computed column works';
}
+done_testing;
+
# clean up our mess
END {
if (my $dbh = eval { $schema->storage->_dbh }) {
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_SYBASE_ASA_${_}" } qw/DSN USER PASS/};
+my ($dsn2, $user2, $pass2) = @ENV{map { "DBICTEST_SYBASE_ASA_ODBC_${_}" } qw/DSN USER PASS/};
+
+if (not ($dsn || $dsn2)) {
+ plan skip_all => <<'EOF';
+Set $ENV{DBICTEST_SYBASE_ASA_DSN} and/or $ENV{DBICTEST_SYBASE_ASA_ODBC_DSN}
+_USER and _PASS to run this test'.
+Warning: This test drops and creates a table called 'track'";
+EOF
+} else {
+ eval "use DateTime; use DateTime::Format::Strptime;";
+ if ($@) {
+ plan skip_all => 'needs DateTime and DateTime::Format::Strptime for testing';
+ }
+}
+
+my @info = (
+ [ $dsn, $user, $pass ],
+ [ $dsn2, $user2, $pass2 ],
+);
+
+my @handles_to_clean;
+
+foreach my $info (@info) {
+ my ($dsn, $user, $pass) = @$info;
+
+ next unless $dsn;
+
+ my $schema = DBICTest::Schema->clone;
+
+ $schema->connection($dsn, $user, $pass, {
+ on_connect_call => [ 'datetime_setup' ],
+ });
+
+ push @handles_to_clean, $schema->storage->dbh;
+
+# coltype, col, date
+ my @dt_types = (
+ ['TIMESTAMP', 'last_updated_at', '2004-08-21 14:36:48.080445'],
+# date only (but minute precision according to ASA docs)
+ ['DATE', 'small_dt', '2004-08-21 00:00:00.000000'],
+ );
+
+ for my $dt_type (@dt_types) {
+ my ($type, $col, $sample_dt) = @$dt_type;
+
+ eval { $schema->storage->dbh->do("DROP TABLE track") };
+ $schema->storage->dbh->do(<<"SQL");
+ CREATE TABLE track (
+ trackid INT IDENTITY PRIMARY KEY,
+ cd INT,
+ position INT,
+ $col $type,
+ )
+SQL
+ ok(my $dt = $schema->storage->datetime_parser->parse_datetime($sample_dt));
+
+ my $row;
+ ok( $row = $schema->resultset('Track')->create({
+ $col => $dt,
+ cd => 1,
+ }));
+ ok( $row = $schema->resultset('Track')
+ ->search({ trackid => $row->trackid }, { select => [$col] })
+ ->first
+ );
+ is( $row->$col, $dt, 'DateTime roundtrip' );
+
+ is $row->$col->nanosecond, $dt->nanosecond,
+ 'nanoseconds survived' if 0+$dt->nanosecond;
+ }
+}
+
+done_testing;
+
+# clean up our mess
+END {
+ foreach my $dbh (@handles_to_clean) {
+ eval { $dbh->do("DROP TABLE $_") } for qw/track/;
+ }
+}
my $cd1 = $rs->find ({cdid => 1});
is_deeply ( $cd1, $datahashref1, 'first/find return the same thing');
+
+ my $cd2 = $rs->search({ cdid => 1 })->single;
+ is_deeply ( $cd2, $datahashref1, 'first/search+single return the same thing');
}
sub check_cols_of {
my $root = _find_co_root()
or return;
+ my $optdeps = file('lib/DBIx/Class/Optional/Dependencies.pm');
+
# not using file->stat as it invokes File::stat which in turn breaks stat(_)
- my ($mf_pl_mtime, $mf_mtime) = ( map
+ my ($mf_pl_mtime, $mf_mtime, $optdeps_mtime) = ( map
{ (stat ($root->file ($_)) )[9] }
- qw/Makefile.PL Makefile/
+ (qw|Makefile.PL Makefile|, $optdeps)
);
return unless $mf_pl_mtime; # something went wrong during co_root detection ?
- if (
- not -d $root->subdir ('inc')
- or
- not $mf_mtime
- or
- $mf_mtime < $mf_pl_mtime
- ) {
- print STDERR <<'EOE';
+ my @fail_reasons;
+ if(not -d $root->subdir ('inc')) {
+ push @fail_reasons, "Missing ./inc directory";
+ }
+ if (not $mf_mtime) {
+ push @fail_reasons, "Missing ./Makefile";
+ }
+ elsif($mf_mtime < $mf_pl_mtime) {
+ push @fail_reasons, "./Makefile.PL is newer than ./Makefile";
+ }
+
+ if ($mf_mtime < $optdeps_mtime) {
+ push @fail_reasons, "./$optdeps is newer than ./Makefile";
+ }
+
+ if (@fail_reasons) {
+ print STDERR <<'EOE';
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
The DBIC team
+Reasons you received this message:
EOE
+ foreach my $r (@fail_reasons) {
+ print STDERR " * $r\n";
+ }
+ print STDERR "\n\n\n";
+
exit 1;
}
}
--- /dev/null
+package # hide from PAUSE
+ DBICTest::Schema::ComputedColumn;
+
+# for sybase and mssql computed column tests
+
+use base qw/DBICTest::BaseResult/;
+
+__PACKAGE__->table('computed_column_test');
+
+__PACKAGE__->add_columns(
+ 'id' => {
+ data_type => 'integer',
+ is_auto_increment => 1,
+ },
+ 'a_computed_column' => {
+ data_type => undef,
+ is_nullable => 0,
+ default_value => \'getdate()',
+ },
+ 'a_timestamp' => {
+ data_type => 'timestamp',
+ is_nullable => 0,
+ },
+ 'charfield' => {
+ data_type => 'varchar',
+ size => 20,
+ default_value => 'foo',
+ is_nullable => 0,
+ }
+);
+
+__PACKAGE__->set_primary_key('id');
+
+1;
# Normally this would not appear as a FK constraint
# since it uses the PK
-__PACKAGE__->might_have(
- 'artist_1', 'DBICTest::Schema::Artist', {
- 'foreign.artistid' => 'self.artist',
- }, {
- is_foreign_key_constraint => 1,
- },
+__PACKAGE__->might_have('artist_1', 'DBICTest::Schema::Artist',
+ { 'foreign.artistid' => 'self.artist' },
+ { is_foreign_key_constraint => 1 },
);
# Normally this would appear as a FK constraint
-__PACKAGE__->might_have(
- 'cd_1', 'DBICTest::Schema::CD', {
- 'foreign.cdid' => 'self.cd',
- }, {
- is_foreign_key_constraint => 0,
- },
+__PACKAGE__->might_have('cd_1', 'DBICTest::Schema::CD',
+ { 'foreign.cdid' => 'self.cd' },
+ { is_foreign_key_constraint => 0 },
);
# Normally this would appear as a FK constraint
-__PACKAGE__->belongs_to(
- 'cd_3', 'DBICTest::Schema::CD', {
- 'foreign.cdid' => 'self.cd',
- }, {
- is_foreign_key_constraint => 0,
- },
+__PACKAGE__->belongs_to('cd_3', 'DBICTest::Schema::CD',
+ { 'foreign.cdid' => 'self.cd' },
+ { is_foreign_key_constraint => 0 },
);
1;
--
-- Created by SQL::Translator::Producer::SQLite
--- Created on Thu Jan 28 11:26:22 2010
+-- Created on Sat Jan 30 19:18:55 2010
--
;
use lib qw(t/lib);
use DBICTest;
-plan tests => 12;
-
my $schema = DBICTest->init_schema();
# Test various new() invocations - this is all about backcompat, making
}
{
- my $new_artist = $schema->resultset("Artist")->new_result({ 'name' => 'Depeche Mode 2: Insertion Boogaloo' });
- my $new_related_cd = $new_artist->new_related('cds', { 'title' => 'Leave Loudly While Singing Off Key', 'year' => 1982});
+ my $new_cd = $schema->resultset('CD')->new ({ 'title' => 'Leave Loudly While Singing Off Key', 'year' => 1982});
+ my $new_artist = $schema->resultset("Artist")->new ({ 'name' => 'Depeche Mode 2: Insertion Boogaloo' });
+ $new_cd->artist ($new_artist);
+
eval {
- $new_related_cd->insert;
+ $new_cd->insert;
};
is ($@, '', 'CD insertion survives by inserting artist');
+ ok($new_cd->in_storage, 'new_related_cd inserted');
ok($new_artist->in_storage, 'artist inserted');
- ok($new_related_cd->in_storage, 'new_related_cd inserted');
+
+ my $retrieved_cd = $schema->resultset('CD')->find ({ 'title' => 'Leave Loudly While Singing Off Key'});
+ ok ($retrieved_cd, 'CD found in db');
+ is ($retrieved_cd->artist->name, 'Depeche Mode 2: Insertion Boogaloo', 'Correct artist attached to cd');
+}
+
+# test both sides of a 1:(1|0)
+{
+ for my $reldir ('might_have', 'belongs_to') {
+ my $artist = $schema->resultset('Artist')->next;
+
+ my $new_track = $schema->resultset('Track')->new ({
+ title => "$reldir: First track of latest cd",
+ cd => {
+ title => "$reldir: Latest cd",
+ year => 2666,
+ artist => $artist,
+ },
+ });
+
+ my $new_single = $schema->resultset('CD')->new ({
+ artist => $artist,
+ title => "$reldir: Awesome first single",
+ year => 2666,
+ });
+
+ if ($reldir eq 'might_have') {
+ $new_track->cd_single ($new_single);
+ $new_track->insert;
+ }
+ else {
+ $new_single->single_track ($new_track);
+ $new_single->insert;
+ }
+
+ ok ($new_single->in_storage, "$reldir single inserted");
+ ok ($new_track->in_storage, "$reldir track inserted");
+
+ my $new_cds = $artist->search_related ('cds',
+ { year => '2666' },
+ { prefetch => 'tracks', order_by => 'cdid' }
+ );
+
+ is_deeply (
+ [$new_cds->search ({}, { result_class => 'DBIx::Class::ResultClass::HashRefInflator'})->all ],
+ [
+ {
+ artist => 1,
+ cdid => 9,
+ genreid => undef,
+ single_track => undef,
+ title => "$reldir: Latest cd",
+ tracks => [
+ {
+ cd => 9,
+ last_updated_at => undef,
+ last_updated_on => undef,
+ position => 1,
+ small_dt => undef,
+ title => "$reldir: First track of latest cd",
+ trackid => 19
+ }
+ ],
+ year => 2666
+ },
+ {
+ artist => 1,
+ cdid => 10,
+ genreid => undef,
+ single_track => 19,
+ title => "$reldir: Awesome first single",
+ tracks => [],
+ year => 2666
+ },
+ ],
+ 'Expected rows created in database',
+ );
+
+ $new_cds->delete_all;
+ }
}
{
ok($new_related_artist->in_storage, 'related artist inserted');
ok($new_cd->in_storage, 'cd inserted');
}
+
+done_testing;
],
});
},
- qr/Recursive update is not supported over relationships of type multi/,
+ qr/Recursive update is not supported over relationships of type 'multi'/,
'create via update of multi relationships throws an exception'
);
'(
SELECT me.cd, me.track_count, cd.cdid, cd.artist, cd.title, cd.year, cd.genreid, cd.single_track
FROM (
- SELECT me.cd, COUNT (me.trackid) AS track_count,
+ SELECT me.cd, COUNT (me.trackid) AS track_count
FROM track me
JOIN cd cd ON cd.cdid = me.cd
WHERE ( me.cd IN ( ?, ?, ?, ?, ? ) )
GROUP BY me.cd
- ) as me
+ ) me
JOIN cd cd ON cd.cdid = me.cd
WHERE ( me.cd IN ( ?, ?, ?, ?, ? ) )
)',
tracks.trackid, tracks.cd, tracks.position, tracks.title, tracks.last_updated_on, tracks.last_updated_at, tracks.small_dt,
liner_notes.liner_id, liner_notes.notes
FROM (
- SELECT me.cdid, COUNT( tracks.trackid ) AS track_count, MAX( tracks.trackid ) AS maxtr,
+ SELECT me.cdid, COUNT( tracks.trackid ) AS track_count, MAX( tracks.trackid ) AS maxtr
FROM cd me
LEFT JOIN track tracks ON tracks.cd = me.cdid
WHERE ( me.cdid IS NOT NULL )
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $artist = $schema->resultset ('Artist')->find ({artistid => 1});
+is ($artist->cds->count, 3, 'Correct number of CDs');
+is ($artist->cds->search_related ('genre')->count, 1, 'Only one of the cds has a genre');
+
+my $queries = 0;
+my $orig_cb = $schema->storage->debugcb;
+$schema->storage->debugcb(sub { $queries++ });
+$schema->storage->debug(1);
+
+
+my $pref = $schema->resultset ('Artist')
+ ->search ({ 'me.artistid' => $artist->id }, { prefetch => { cds => 'genre' } })
+ ->next;
+
+is ($pref->cds->count, 3, 'Correct number of CDs prefetched');
+is ($pref->cds->search_related ('genre')->count, 1, 'Only one of the prefetched cds has a prefetched genre');
+
+
+is ($queries, 1, 'All happened within one query only');
+$schema->storage->debugcb($orig_cb);
+$schema->storage->debug(0);
+
+
+done_testing;
} );
$track->set_from_related( cd => $cd );
+# has_relationship
+ok(! $track->has_relationship( 'foo' ), 'Track has no relationship "foo"');
+ok($track->has_relationship( 'disc' ), 'Track has relationship "disk"' );
+
is($track->disc->cdid, 4, 'set_from_related ok, including alternative accessor' );
$track->set_from_related( cd => undef );
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $cd = $schema->resultset('CD')->search ({}, { columns => ['year'], rows => 1 })->single;
+
+
+throws_ok (
+ sub { $cd->tracks },
+ qr/Unable to resolve relationship .+ column .+ not loaded from storage/,
+ 'Correct exception on nonresolvable object-based condition'
+);
+
+done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+
+use lib qw(t/lib);
+use DBICTest;
+use DBIC::SqlMakerTest;
+
+my $schema = DBICTest->init_schema();
+
+my $new_rs = $schema->resultset('Artist')->search({
+ 'artwork_to_artist.artist_id' => 1
+}, {
+ join => 'artwork_to_artist'
+});
+lives_ok { $new_rs->count } 'regular search works';
+lives_ok { $new_rs->search({ 'artwork_to_artist.artwork_cd_id' => 1})->count }
+ '... and chaining off that using join works';
+lives_ok { $new_rs->search({ 'artwork_to_artist.artwork_cd_id' => 1})->as_subselect_rs->count }
+ '... and chaining off the virtual view works';
+dies_ok { $new_rs->as_subselect_rs->search({'artwork_to_artist.artwork_cd_id'=> 1})->count }
+ q{... but chaining off of a virtual view using join doesn't work};
+done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use lib qw(t/lib);
+use Test::More;
+use Test::Exception;
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+my $rs = $schema->resultset('Artist');
+
+ok !$rs->is_ordered, 'vanilla resultset is not ordered';
+
+# Simple ordering with a single column
+{
+ my $ordered = $rs->search(undef, { order_by => 'artistid' });
+ ok $ordered->is_ordered, 'Simple column ordering detected by is_ordered';
+}
+
+# Hashref order direction
+{
+ my $ordered = $rs->search(undef, { order_by => { -desc => 'artistid' } });
+ ok $ordered->is_ordered, 'resultset with order direction is_ordered';
+}
+
+# Column ordering with literal SQL
+{
+ my $ordered = $rs->search(undef, { order_by => \'artistid DESC' });
+ ok $ordered->is_ordered, 'resultset with literal SQL is_ordered';
+}
+
+# Multiple column ordering
+{
+ my $ordered = $rs->search(undef, { order_by => ['artistid', 'name'] });
+ ok $ordered->is_ordered, 'ordering with multiple columns as arrayref is ordered';
+}
+
+# More complicated ordering
+{
+ my $ordered = $rs->search(undef, {
+ order_by => [
+ { -asc => 'artistid' },
+ { -desc => 'name' },
+ ]
+ });
+ ok $ordered->is_ordered, 'more complicated resultset ordering is_ordered';
+}
+
+# Empty multi-column ordering arrayref
+{
+ my $ordered = $rs->search(undef, { order_by => [] });
+ ok !$ordered->is_ordered, 'ordering with empty arrayref is not ordered';
+}
+
+# Multi-column ordering syntax with empty hashref
+{
+ my $ordered = $rs->search(undef, { order_by => [{}] });
+ ok !$ordered->is_ordered, 'ordering with [{}] is not ordered';
+}
+
+# Remove ordering after being set
+{
+ my $ordered = $rs->search(undef, { order_by => 'artistid' });
+ ok $ordered->is_ordered, 'resultset with ordering applied works..';
+ my $unordered = $ordered->search(undef, { order_by => undef });
+ ok !$unordered->is_ordered, '..and is not ordered with ordering removed';
+}
+
+# Search without ordering
+{
+ my $ordered = $rs->search({ name => 'We Are Goth' }, { join => 'cds' });
+ ok !$ordered->is_ordered, 'WHERE clause but no order_by is not ordered';
+}
+
+# Other functions without ordering
+{
+ # Join
+ my $joined = $rs->search(undef, { join => 'cds' });
+ ok !$joined->is_ordered, 'join but no order_by is not ordered';
+
+ # Group By
+ my $grouped = $rs->search(undef, { group_by => 'rank' });
+ ok !$grouped->is_ordered, 'group_by but no order_by is not ordered';
+
+ # Paging
+ my $paged = $rs->search(undef, { page=> 5 });
+ ok !$paged->is_ordered, 'paging but no order_by is not ordered';
+}
+
+done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $rs = $schema->resultset('NoPrimaryKey');
+
+my $row = $rs->create ({ foo => 1, bar => 1, baz => 1 });
+
+lives_ok (sub {
+ $row->foo (2);
+}, 'Set on pkless object works');
+
+is ($row->foo, 2, 'Column updated in-object');
+
+dies_ok (sub {
+ $row->update ({baz => 3});
+}, 'update() fails on pk-less object');
+
+is ($row->foo, 2, 'Column not updated by failed update()');
+
+dies_ok (sub {
+ $row->delete;
+}, 'delete() fails on pk-less object');
+
+done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+
+use lib qw(t/lib);
+use DBIC::SqlMakerTest;
+use DBICTest;
+
+
+my $schema = DBICTest->init_schema();
+
+my @chain = (
+ {
+ columns => [ 'cdid' ],
+ '+columns' => [ { title_lc => { lower => 'title' } } ],
+ '+select' => [ 'genreid' ],
+ '+as' => [ 'genreid' ],
+ } => 'SELECT me.cdid, LOWER( title ), me.genreid FROM cd me',
+
+ {
+ '+columns' => [ { max_year => { max => 'me.year' }}, ],
+ '+select' => [ { count => 'me.cdid' }, ],
+ '+as' => [ 'cnt' ],
+ } => 'SELECT me.cdid, LOWER( title ), MAX( me.year ), me.genreid, COUNT( me.cdid ) FROM cd me',
+
+ {
+ select => [ { min => 'me.cdid' }, ],
+ as => [ 'min_id' ],
+ } => 'SELECT MIN( me.cdid ) FROM cd me',
+
+ {
+ '+columns' => [ { cnt => { count => 'cdid' } } ],
+ } => 'SELECT MIN( me.cdid ), COUNT ( cdid ) FROM cd me',
+
+ {
+ columns => [ 'year' ],
+ } => 'SELECT me.year FROM cd me',
+);
+
+my $rs = $schema->resultset('CD');
+
+my $testno = 1;
+while (@chain) {
+ my $attrs = shift @chain;
+ my $sql = shift @chain;
+
+ $rs = $rs->search ({}, $attrs);
+
+ is_same_sql_bind (
+ $rs->as_query,
+ "($sql)",
+ [],
+ "Test $testno of SELECT assembly ok",
+ );
+
+ $testno++;
+}
+
+done_testing;
search => \[ "title = ? AND year LIKE ?", 'buahaha', '20%' ],
attrs => { rows => 5 },
sqlbind => \[
- "( SELECT me.cdid,me.artist,me.title,me.year,me.genreid,me.single_track FROM cd me WHERE (title = ? AND year LIKE ?) LIMIT 5)",
+ "( SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE (title = ? AND year LIKE ?) LIMIT 5)",
'buahaha',
'20%',
],
artist_id => { 'in' => $art_rs->search({}, { rows => 1 })->get_column( 'id' )->as_query },
},
sqlbind => \[
- "( SELECT me.cdid,me.artist,me.title,me.year,me.genreid,me.single_track FROM cd me WHERE artist_id IN ( SELECT id FROM artist me LIMIT 1 ) )",
+ "( SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE artist_id IN ( SELECT id FROM artist me LIMIT 1 ) )",
],
},
],
},
sqlbind => \[
- "( SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track FROM (SELECT me.cdid,me.artist,me.title,me.year,me.genreid,me.single_track FROM cd me WHERE id > ?) cd2 )",
+ "( SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track FROM (
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE id > ?
+ ) cd2
+ )",
[ 'id', 20 ]
],
},
sqlbind => \[
"( SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track
FROM
- (SELECT cd3.cdid,cd3.artist,cd3.title,cd3.year,cd3.genreid,cd3.single_track
+ (SELECT cd3.cdid, cd3.artist, cd3.title, cd3.year, cd3.genreid, cd3.single_track
FROM
- (SELECT me.cdid,me.artist,me.title,me.year,me.genreid,me.single_track
+ (SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
FROM cd me WHERE id < ?) cd3
WHERE id > ?) cd2
)",
],
},
sqlbind => \[
- "(SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track FROM (SELECT me.cdid,me.artist,me.title,me.year,me.genreid,me.single_track FROM cd me WHERE title = ?) cd2)",
+ "(SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track FROM (
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE title = ?
+ ) cd2
+ )",
[ 'title',
'Thriller'
]
{
order_by => [ qw{ foo bar} ],
order_req => 'foo, bar',
- order_inner => 'foo ASC,bar ASC',
+ order_inner => 'foo ASC, bar ASC',
order_outer => 'foo DESC, bar DESC',
},
{
use lib qw(t/lib);
use DBIC::SqlMakerTest;
-BEGIN {
- eval "use DBD::SQLite";
- plan $@
- ? ( skip_all => 'needs DBD::SQLite for testing' )
- : ( tests => 7 );
-}
-
use_ok('DBICTest');
use_ok('DBIC::DebugObj');
$schema->storage->sql_maker->name_sep('.');
is($schema->storage->sql_maker->update('group', \%data), 'UPDATE `group` SET `name` = ?, `order` = ?', 'quoted table names for UPDATE');
+
+done_testing;
use lib qw(t/lib);
use DBIC::SqlMakerTest;
-BEGIN {
- eval "use DBD::SQLite";
- plan $@
- ? ( skip_all => 'needs DBD::SQLite for testing' )
- : ( tests => 7 );
-}
-
use_ok('DBICTest');
use_ok('DBIC::DebugObj');
);
is($schema->storage->sql_maker->update('group', \%data), 'UPDATE `group` SET `name` = ?, `order` = ?', 'quoted table names for UPDATE');
+
+done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBIC::SqlMakerTest;
+
+my $schema = DBICTest->init_schema;
+
+is_same_sql_bind(
+ $schema->resultset('Artist')->search ({}, {for => 'update'})->as_query,
+ '(SELECT me.artistid, me.name, me.rank, me.charfield FROM artist me)', [],
+);
+
+done_testing;
my @cds = $schema->resultset('CD')->search( { artist => 1, cdid => { -between => [ 1, 3 ] }, } );
is_same_sql_bind(
$sql, \@bind,
- "SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE ( artist = ? AND (cdid BETWEEN ? AND ?) ): '1', '1', '3'",
+ "SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE ( artist = ? AND (cdid BETWEEN ? AND ?) )",
[qw/'1' '1' '3'/],
'got correct SQL with all bind parameters (debugcb)'
);
-use Class::C3;
use strict;
-use Test::More;
use warnings;
-BEGIN {
- eval "use DBD::SQLite";
- plan $@
- ? ( skip_all => 'needs DBD::SQLite for testing' )
- : ( tests => 4 );
-}
+use Test::More;
+use Test::Warn;
+use Test::Exception;
use lib qw(t/lib);
-
use_ok( 'DBICTest' );
use_ok( 'DBICTest::Schema' );
+
my $schema = DBICTest->init_schema;
-{
- my $warnings;
- local $SIG{__WARN__} = sub { $warnings .= $_[0] };
- eval {
- $schema->resultset('CD')
- ->create({ title => 'vacation in antarctica' })
- };
- like $@, qr/NULL/; # as opposed to some other error
- unlike( $warnings, qr/uninitialized value/, "No warning from Storage" );
-}
+warnings_are ( sub {
+ throws_ok (sub {
+ $schema->resultset('CD')->create({ title => 'vacation in antarctica' });
+ }, qr/NULL/); # as opposed to some other error
+}, [], 'No warnings besides exception' );
+done_testing;
use IO::Handle;
BEGIN {
- eval "use DBIx::Class::Storage::DBI::Replicated; use Test::Moose";
- plan skip_all => "Deps not installed: $@" if $@;
+ eval { require Test::Moose; Test::Moose->import() };
+ plan skip_all => "Need Test::Moose to run this test" if $@;
+ require DBIx::Class;
+
+ plan skip_all => 'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for ('replicated')
+ unless DBIx::Class::Optional::Dependencies->req_ok_for ('replicated');
}
use_ok 'DBIx::Class::Storage::DBI::Replicated::Pool';
=> 'configured balancer_type';
}
+### check that all Storage::DBI methods are handled by ::Replicated
+{
+ my @storage_dbi_methods = Class::MOP::Class
+ ->initialize('DBIx::Class::Storage::DBI')->get_all_method_names;
+
+ my @replicated_methods = DBIx::Class::Storage::DBI::Replicated->meta
+ ->get_all_method_names;
+
+# remove constants and OTHER_CRAP
+ @storage_dbi_methods = grep !/^[A-Z_]+\z/, @storage_dbi_methods;
+
+# remove CAG accessors
+ @storage_dbi_methods = grep !/_accessor\z/, @storage_dbi_methods;
+
+# remove DBIx::Class (the root parent, with CAG and stuff) methods
+ my @root_methods = Class::MOP::Class->initialize('DBIx::Class')
+ ->get_all_method_names;
+ my %count;
+ $count{$_}++ for (@storage_dbi_methods, @root_methods);
+
+ @storage_dbi_methods = grep $count{$_} != 2, @storage_dbi_methods;
+
+# make hashes
+ my %storage_dbi_methods;
+ @storage_dbi_methods{@storage_dbi_methods} = ();
+ my %replicated_methods;
+ @replicated_methods{@replicated_methods} = ();
+
+# remove ::Replicated-specific methods
+ for my $method (@replicated_methods) {
+ delete $replicated_methods{$method}
+ unless exists $storage_dbi_methods{$method};
+ }
+ @replicated_methods = keys %replicated_methods;
+
+# check that what's left is implemented
+ %count = ();
+ $count{$_}++ for (@storage_dbi_methods, @replicated_methods);
+
+ if ((grep $count{$_} == 2, @storage_dbi_methods) == @storage_dbi_methods) {
+ pass 'all DBIx::Class::Storage::DBI methods implemented';
+ }
+ else {
+ my @unimplemented = grep $count{$_} == 1, @storage_dbi_methods;
+
+ fail 'the following DBIx::Class::Storage::DBI methods are unimplemented: '
+ . "@unimplemented";
+ }
+}
+
ok $replicated->schema->storage->meta
=> 'has a meta object';