'Class::Data::Accessor' => 0.01,
'Carp::Clan' => 0,
'DBI' => 1.40,
+ 'Module::Find' => 0,
+ 'Class::Inspector' => 0,
},
build_requires => {
'DBD::SQLite' => 1.11,
},
- recommends => {
- 'Data::UUID' => 0,
- 'Module::Find' => 0,
- 'Class::Inspector' => 0,
- },
create_makefile_pl => 'passthrough',
create_readme => 1,
- test_files => [ glob('t/*.t'), glob('t/*/*.t') ]
+ test_files => [ glob('t/*.t'), glob('t/*/*.t') ],
+ script_files => [ glob('script/*') ],
);
Module::Build->new(%arguments)->create_build_script;
Revision history for DBIx::Class
-0.06003
+ - rename _parent_rs to _parent_source in ResultSet
+
+0.06999_05 2006-07-04 14:40:01
+ - fix issue with incorrect $rs->{attrs}{alias}
+ - fix subclassing issue with source_name
+ - tweak quotes test to output text on failure
+ - fix Schema->txn_do to not fail as a classmethod
+
+0.06999_04 2006-06-29 20:18:47
+ - disable cdbi-t/02-Film.t warning tests under AS perl
+ - fixups to MySQL tests (aka "work round mysql being retarded")
+ - compat tweaks for Storage debug logging
+
+0.06999_03 2006-06-26 21:04:44
+ - various documentation improvements
+ - fixes to pass test suite on Windows
+ - rewrote and cleaned up SQL::Translator tests
+ - changed relationship helpers to only call ensure_class_loaded when the
+ join condition is inferred
+ - rewrote many_to_many implementation, now provides helpers for adding
+ and deleting objects without dealing with the link table
+ - reworked InflateColumn implementation to lazily deflate where
+ possible; now handles passing an inflated object to new()
+ - changed join merging to not create a rel_2 alias when adding a join
+ that already exists in a parent resultset
+ - Storage::DBI::deployment_statements now calls ensure_connected
+ if it isn't passed a type
+ - fixed Componentized::ensure_class_loaded
+ - InflateColumn::DateTime supports date as well as datetime
+ - split Storage::DBI::MSSQL into MSSQL and Sybase::MSSQL
+ - fixed wrong debugging hook call in Storage::DBI
+ - set connect_info properly before setting any ->sql_maker things
+
+0.06999_02 2006-06-09 23:58:33
+ - Fixed up POD::Coverage tests, filled in some POD holes
+ - Added a warning for incorrect component order in load_components
+ - Fixed resultset bugs to do with related searches
+ - added code and tests for Componentized::ensure_class_found and
+ load_optional_class
+ - NoBindVars + Sybase + MSSQL stuff
+ - only rebless S::DBI if it is still S::DBI and not a subclass
+ - Added `use' statement for DBD::Pg in Storage::DBI::Pg
+ - stopped test relying on order of unordered search
+ - bugfix for join-types in nested joins using the from attribute
+ - obscure prefetch problem fixed
+ - tightened up deep search_related
+ - Fixed 'DBIx/Class/DB.pm did not return a true value' error
+ - Revert change to test for deprecated find usage and swallow warnings
+ - Slight wording change to new_related() POD
+ - new specific test for connect_info coderefs
+ - POD clarification and content bugfixing + a few code formatting fixes
+ - POD::Coverage additions
+ - fixed debugfh
+ - Fix column_info stomping
+
+0.06999_01 2006-05-28 17:19:30
+ - add automatic naming of unique constraints
+ - marked DB.pm as deprecated and noted it will be removed by 1.0
+ - add ResultSetColumn
+ - refactor ResultSet code to resolve attrs as late as possible
+ - merge prefetch attrs into join attrs
+ - add +select and +as attributes to ResultSet
+ - added InflateColumn::DateTime component
+ - refactor debugging to allow for profiling using Storage::Statistics
+ - removed Data::UUID from deps, made other optionals required
+ - modified SQLT parser to skip dupe table names
+ - added remove_column(s) to ResultSource/ResultSourceProxy
+ - added add_column alias to ResultSourceProxy
+ - added source_name to ResultSource
+ - load_classes now uses source_name and sets it if necessary
+ - add update_or_create_related to Relationship::Base
+ - add find_or_new to ResultSet/ResultSetProxy and find_or_new_related
+ to Relationship::Base
+ - add accessors for unique constraint names and coulums to
+ ResultSource/ResultSourceProxy
+ - rework ResultSet::find() to search unique constraints
+ - CDBICompat: modify retrieve to fix column casing when ColumnCase is
+ loaded
+ - CDBICompat: override find_or_create to fix column casing when
+ ColumnCase is loaded
+ - reorganized and simplified tests
+ - added Ordered
+ - added the ability to set on_connect_do and the various sql_maker
+ options as part of Storage::DBI's connect_info.
+
+0.06003 2006-05-19 15:37:30
- make find_or_create_related check defined() instead of truth
- don't unnecessarily fetch rels for cascade_update
- don't set_columns explicitly in update_or_create; instead use
- columns_info_for made more robust / informative
- ithreads compat added, fork compat improved
- weaken result_source in all resultsets
- - Make pg seq extractor less sensitive.
+ - Make pg seq extractor less sensitive.
0.05999_03 2006-03-14 01:58:10
- has_many prefetch fixes
- remove build dependency on version.pm
0.05004 2006-02-13 20:59:00
- - allow specification of related columns via cols attr when primary
+ - allow specification of related columns via cols attr when primary
keys of the related table are not fetched
- fix count for group_by as scalar
- add horrific fix to make Oracle's retarded limit syntax work
# for developers only :)
^TODO$
+^VERSIONING\.SKETCH$
# Avoid Module::Build generated and utility files.
\bBuild$
# Save this 'cause CPAN will chdir all over the place.
my $cwd = Cwd::cwd();
- my $makefile = File::Spec->rel2abs($0);
- CPAN::Shell->install('Module::Build::Compat')
- or die " *** Cannot install without Module::Build. Exiting ...\n";
+ CPAN::Shell->install('Module::Build::Compat');
+ CPAN::Shell->expand("Module", "Module::Build::Compat")->uptodate
+ or die "Couldn't install Module::Build, giving up.\n";
chdir $cwd or die "Cannot chdir() back to $cwd: $!";
}
eval "use Module::Build::Compat 0.02; 1" or die $@;
- use lib '_build/lib';
+
Module::Build::Compat->run_build_pl(args => \@ARGV);
require Module::Build;
Module::Build::Compat->write_makefile(build_class => 'Module::Build');
+2005-04-16 by mst
+ - set_from_related should take undef
+ - ResultSource objects caching ->resultset causes interesting problems
+ - find why XSUB dumper kills schema in Catalyst (may be Pg only?)
+
+2006-04-11 by castaway
+ - using PK::Auto should set is_auto_increment for the PK columns, so that copy() "just works"
+ - docs of copy() should say that is_auto_increment is essential for auto_incrementing keys
+
+2006-03-25 by mst
+ - Refactor ResultSet::new to be less hairy
+ - we should move the setup of select, as, and from out of here
+ - these should be local rs attrs, not main attrs, and extra joins
+ provided on search should be merged
+ - find a way to un-wantarray search without breaking compat
+ - audit logging component
+ - delay relationship setup if done via ->load_classes
+ - double-sided relationships
+ - incremental deploy
+ - make short form of class specifier in relationships work
2006-01-31 by bluefeet
- Create a DBIx::Class::FilterColumn to replace inflate/deflate. This
We should still support the old inflate/deflate syntax, but this new
way should be recommended.
-2006-02-07 by JR
+2006-02-07 by castaway
- Extract DBIC::SQL::Abstract into a separate module for CPAN
- Chop PK::Auto::Foo up to have PK::Auto refer to an appropriate
DBIx::Storage::DBI::Foo, which will be loaded on connect from Driver info?
+(done -> 0.06001!)
- Add deploy method to Schema, which will create DB tables from Schema, via
SQLT
+(sorta done)
2006-03-18 by bluefeet
- Support table locking.
+2006-03-21 by bluefeet
+ - When subclassing a dbic class make it so you don't have to do
+ __PACKAGE__->table(__PACKAGE__->table()); for the result set to
+ return the correct object type.
+
+2006-03-27 by mst
+ Add the ability for deploy to be given a directory and grab <dbname>.sql
+ out of there if available. Try SQL::Translator if not. If none of the above,
+ cry (and die()). Then you can have a script that pre-gens for all available
+ SQLT modules so an app can do its own deploy without SQLT on the target
+ system
+
+2006-05-25 by mst (TODOed by bluefeet)
+ Add the search attributes "limit" and "rows_per_page".
+ limit: work as expected just like offset does
+ rows_per_page: only be used if you used the page attr or called $rs->page
+ rows: modify to be an alias that gets used to populate either as appropriate,
+ if you haven't specified one of the others
+
--- /dev/null
+Schema versioning/deployment ideas from Jess (with input from theorbtwo and mst):
+1) Add a method to storage to:
+ - take args of DB type, version, and optional file/pathname
+ - create an SQL file, via SQLT, for the current schema
+ - passing prev. version + version will create an sqlt-diff'ed upgrade file, such as
+ - $preversion->$currentversion-$dbtype.sql, which contains ALTER foo statements.
+2) Make deploy/deploy_statements able to to load from the appropriate file, for the current DB, or on the fly? - Compare against current schema version..
+3) Add an on_connect_cb (callback) thingy to storage.
+4) create a component to deploy version/updates:
+ - it hooks itself into on_connect_cb ?
+ - when run it:
+ - Attempts or prompts a backup of the database. (commands for these per-rdbms can be stored in storage::dbi::<dbtype> ?)
+ - Checks the version of the current schema being used
+ - Compares it to some schema table containing the installed version
+ - If none such exists, we can attempt to sqlt-diff the DB structure with the schema
+ - If version does exist, we use an array of user-defined upgrade paths,
+ eg: version = '3x.'; schema = '1.x', upgrade paths = ('1.x->2.x', '2.x->3.x')
+ - Find the appropriate upgrade-path file, parse into two chunks:
+ a) the commands which do not contain "DROP"
+ b) the ones that do
+ - Calls user callbacks for "pre-upgrade"
+ - Runs the first set of commands on the DB
+ - Calls user callbacks for "post-alter"
+ - Runs drop commands
+ - Calls user callbacks for "post-drop"
+ - The user will need to define (or ignore) the following callbacks:
+ - "pre-upgrade", any code to be run before the upgrade, called with schema object, version-from, version-to, db-type .. bear in mind that here any new fields in the schema will not work, but can be used via scalarrefs.
+ - "post-alter", this is the main callback, at this stage, all old and new fields will be available, to allow data migration.
+ - "post-drop", this is the clean-up stage, now only new fields are available.
+
# i.e. first release of 0.XX *must* be 0.XX000. This avoids fBSD ports
# brain damage and presumably various other packaging systems too
-$VERSION = '0.06003';
+$VERSION = '0.06999_05';
sub MODIFY_CODE_ATTRIBUTES {
my ($class,$code,@attrs) = @_;
ank: Andres Kievsky
-blblack: Brandon Black
+blblack: Brandon L. Black <blblack@gmail.com>
+
+bluefeet: Aran Deltac <bluefeet@cpan.org>
LTJake: Brian Cassidy <bricas@cpan.org>
jguenther: Justin Guenther <jguenther@cpan.org>
+captainL: Luke Saunders <luke.saunders@gmail.com>
+
draven: Marcus Ramberg <mramberg@cpan.org>
nigel: Nigel Metheringham <nigelm@cpan.org>
sszabo: Stephan Szabo <sszabo@bigpanda.com>
-captainL: Luke Saunders <luke.saunders@gmail.com>
-
Todd Lipcon
wdh: Will Hawes
+gphat: Cory G Watson <gphat@cpan.org>
+
=head1 LICENSE
You may distribute this code under the same terms as Perl itself.
sub _attribute_store {
my $self = shift;
my $vals = @_ == 1 ? shift: {@_};
- my (@cols) = keys %$vals;
- @{$self->{_column_data}}{@cols} = @{$vals}{@cols};
+ $self->store_column($_, $vals->{$_}) for keys %{$vals};
}
sub _attribute_set {
sub _attribute_exists {
my ($self, $attr) = @_;
- exists $self->{_column_data}{$attr};
+ $self->has_column_loaded($attr);
}
1;
return $class->next::method(lc($col));
}
+# _build_query
+#
+# Build a query hash for find, et al. Overrides Retrieve::_build_query.
+
+sub _build_query {
+ my ($self, $query) = @_;
+
+ my %new_query;
+ $new_query{lc $_} = $query->{$_} for keys %$query;
+
+ return \%new_query;
+}
+
sub _mk_group_accessors {
my ($class, $type, $group, @fields) = @_;
#warn join(', ', map { ref $_ ? (@$_) : ($_) } @fields);
sub has_a {
my ($self, $col, $f_class, %args) = @_;
$self->throw_exception( "No such column ${col}" ) unless $self->has_column($col);
- eval "require $f_class";
+ $self->ensure_class_loaded($f_class);
if ($args{'inflate'} || $args{'deflate'}) { # Non-database has_a
if (!ref $args{'inflate'}) {
my $meth = $args{'inflate'};
my $class = shift;
\r
my $rs = $class->search(@_);
- unless ($rs->{page}) {
+ unless ($rs->{attrs}{page}) {
$rs = $rs->page(1);
}
return ( $rs->pager, $rs );
use warnings FATAL => 'all';
-sub retrieve {
- die "No args to retrieve" unless @_ > 1;
- shift->find(@_);
+sub retrieve {
+ my $self = shift;
+ die "No args to retrieve" unless @_ > 0;
+
+ my @cols = $self->primary_columns;
+
+ my $query;
+ if (ref $_[0] eq 'HASH') {
+ $query = { %{$_[0]} };
+ }
+ elsif (@_ == @cols) {
+ $query = {};
+ @{$query}{@cols} = @_;
+ }
+ else {
+ $query = {@_};
+ }
+
+ $query = $self->_build_query($query);
+ $self->find($query);
+}
+
+sub find_or_create {
+ my $self = shift;
+ my $query = ref $_[0] eq 'HASH' ? shift : {@_};
+
+ $query = $self->_build_query($query);
+ $self->next::method($query);
+}
+
+# _build_query
+#
+# Build a query hash. Defaults to a no-op; ColumnCase overrides.
+
+sub _build_query {
+ my ($self, $query) = @_;
+
+ return $query;
}
sub retrieve_from_sql {
use warnings;
use Class::C3;
+use Class::Inspector;
+use Carp::Clan qw/DBIx::Class/;
sub inject_base {
my ($class, $target, @to_inject) = @_;
{
no strict 'refs';
- my %seen;
- unshift( @{"${target}::ISA"},
- grep { !$seen{ $_ }++ && $target ne $_ && !$target->isa($_) }
- @to_inject
- );
+ foreach my $to (reverse @to_inject) {
+ my @comps = qw(DigestColumns ResultSetManager Ordered UTF8Columns);
+ # Add components here that need to be loaded before Core
+ foreach my $first_comp (@comps) {
+ if ($to eq 'DBIx::Class::Core' &&
+ $target->isa("DBIx::Class::${first_comp}")) {
+ warn "Possible incorrect order of components in ".
+ "${target}::load_components($first_comp) call: Core loaded ".
+ "before $first_comp. See the documentation for ".
+ "DBIx::Class::$first_comp for more information";
+ }
+ }
+ unshift( @{"${target}::ISA"}, $to )
+ unless ($target eq $to || $target->isa($to));
+ }
}
# Yes, this is hack. But it *does* work. Please don't submit tickets about
sub _load_components {
my ($class, @comp) = @_;
foreach my $comp (@comp) {
- eval "use $comp";
- die $@ if $@;
+ $class->ensure_class_loaded($comp);
}
$class->inject_base($class => @comp);
}
+# Given a class name, tests to see if it is already loaded or otherwise
+# defined. If it is not yet loaded, the package is require'd, and an exception
+# is thrown if the class is still not loaded.
+#
+# TODO: handle ->has_many('rel', 'Class'...) instead of
+# ->has_many('rel', 'Some::Schema::Class'...)
+#
+# BUG: For some reason, packages with syntax errors are added to %INC on
+# require
+sub ensure_class_loaded {
+ my ($class, $f_class) = @_;
+ return if Class::Inspector->loaded($f_class);
+ eval "require $f_class"; # require needs a bareword or filename
+ if ($@) {
+ if ($class->can('throw_exception')) {
+ $class->throw_exception($@);
+ } else {
+ croak $@;
+ }
+ }
+}
+
+# Returns true if the specified class is installed or already loaded, false
+# otherwise
+sub ensure_class_found {
+ my ($class, $f_class) = @_;
+ return Class::Inspector->loaded($f_class) ||
+ Class::Inspector->installed($f_class);
+}
+
+# Returns a true value if the specified class is installed and loaded
+# successfully, throws an exception if the class is found but not loaded
+# successfully, and false if the class is not installed
+sub load_optional_class {
+ my ($class, $f_class) = @_;
+ if ($class->ensure_class_found($f_class)) {
+ $class->ensure_class_loaded($f_class);
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
1;
Serialize::Storable
InflateColumn
Relationship
+ PK::Auto
PK
Row
ResultSourceProxy::Table
=item L<DBIx::Class::Relationship>
+=item L<DBIx::Class::PK::Auto>
+
=item L<DBIx::Class::PK>
=item L<DBIx::Class::Row>
sub storage { shift->schema_instance(@_)->storage; }
-sub resultset_instance {
- my $class = ref $_[0] || $_[0];
- my $source = $class->result_source_instance;
- if ($source->result_class ne $class) {
- $source = $source->new($source);
- $source->result_class($class);
- }
- return $source->resultset;
-}
-
=head1 NAME
-DBIx::Class::DB - Non-recommended classdata schema component
+DBIx::Class::DB - (DEPRECATED) classdata schema component
=head1 SYNOPSIS
This class is designed to support the Class::DBI connection-as-classdata style
for DBIx::Class. You are *strongly* recommended to use a DBIx::Class::Schema
-instead; DBIx::Class::DB will continue to be supported but new development
-will be focused on Schema-based DBIx::Class setups.
+instead; DBIx::Class::DB will not undergo new development and will be moved
+to being a CDBICompat-only component before 1.0.
=head1 METHODS
}
}
-1;
+=head2 resultset_instance
+
+Returns an instance of a resultset for this class - effectively
+mapping the L<Class::DBI> connection-as-classdata paradigm into the
+native L<DBIx::Class::ResultSet> system.
+
+=cut
+
+sub resultset_instance {
+ my $class = ref $_[0] || $_[0];
+ my $source = $class->result_source_instance;
+ if ($source->result_class ne $class) {
+ $source = $source->new($source);
+ $source->result_class($class);
+ }
+ return $source->resultset;
+}
+
+=head2 resolve_class
+
+****DEPRECATED****
+
+See L<class_resolver>
+
+=head2 dbi_commit
+
+****DEPRECATED****
+
+Alias for L<txn_commit>
+
+=head2 dbi_rollback
+
+****DEPRECATED****
+
+Alias for L<txn_rollback>
=head1 AUTHORS
=cut
+1;
use strict;
use warnings;
-
+use Scalar::Util qw/blessed/;
use base qw/DBIx::Class::Row/;
return $deflate->($value, $self);
}
+=head2 get_inflated_column
+
+ my $val = $obj->get_inflated_column($col);
+
+Fetch a column value in its inflated state. This is directly
+analogous to L<DBIx::Class::Row/get_column> in that it only fetches a
+column already retreived from the database, and then inflates it.
+Throws an exception if the column requested is not an inflated column.
+
+=cut
+
sub get_inflated_column {
my ($self, $col) = @_;
$self->throw_exception("$col is not an inflated column")
unless exists $self->column_info($col)->{_inflate_info};
-
return $self->{_inflated_column}{$col}
if exists $self->{_inflated_column}{$col};
return $self->{_inflated_column}{$col} =
$self->_inflated_column($col, $self->get_column($col));
}
+=head2 set_inflated_column
+
+ my $copy = $obj->set_inflated_column($col => $val);
+
+Sets a column value from an inflated value. This is directly
+analogous to L<DBIx::Class::Row/set_column>.
+
+=cut
+
sub set_inflated_column {
- my ($self, $col, @rest) = @_;
- my $ret = $self->_inflated_column_op('set', $col, @rest);
- return $ret;
+ my ($self, $col, $obj) = @_;
+ $self->set_column($col, $self->_deflated_column($col, $obj));
+ if (blessed $obj) {
+ $self->{_inflated_column}{$col} = $obj;
+ } else {
+ delete $self->{_inflated_column}{$col};
+ }
+ return $obj;
}
+=head2 store_inflated_column
+
+ my $copy = $obj->store_inflated_column($col => $val);
+
+Sets a column value from an inflated value without marking the column
+as dirty. This is directly analogous to L<DBIx::Class::Row/store_column>.
+
+=cut
+
sub store_inflated_column {
- my ($self, $col, @rest) = @_;
- my $ret = $self->_inflated_column_op('store', $col, @rest);
- return $ret;
+ my ($self, $col, $obj) = @_;
+ unless (blessed $obj) {
+ delete $self->{_inflated_column}{$col};
+ $self->store_column($col => $obj);
+ return $obj;
+ }
+ delete $self->{_column_data}{$col};
+ return $self->{_inflated_column}{$col} = $obj;
+}
+
+=head2 get_column
+
+Gets a column value in the same way as L<DBIx::Class::Row/get_column>. If there
+is an inflated value stored that has not yet been deflated, it is deflated
+when the method is invoked.
+
+=cut
+
+sub get_column {
+ my ($self, $col) = @_;
+ if (exists $self->{_inflated_column}{$col}
+ && !exists $self->{_column_data}{$col}) {
+ $self->store_column($col, $self->_deflated_column($col, $self->{_inflated_column}{$col}));
+ }
+ return $self->next::method($col);
}
-sub _inflated_column_op {
- my ($self, $op, $col, $obj) = @_;
- my $meth = "${op}_column";
- unless (ref $obj) {
- delete $self->{_inflated_column}{$col};
- return $self->$meth($col, $obj);
+=head2 get_columns
+
+Returns the get_column info for all columns as a hash,
+just like L<DBIx::Class::Row/get_columns>. Handles inflation just
+like L</get_column>.
+
+=cut
+
+sub get_columns {
+ my $self = shift;
+ if (exists $self->{_inflated_column}) {
+ foreach my $col (keys %{$self->{_inflated_column}}) {
+ $self->store_column($col, $self->_deflated_column($col, $self->{_inflated_column}{$col}))
+ unless exists $self->{_column_data}{$col};
+ }
}
+ return $self->next::method;
+}
- my $deflated = $self->_deflated_column($col, $obj);
- # Do this now so we don't store if it's invalid
+=head2 has_column_loaded
- $self->{_inflated_column}{$col} = $obj;
- $self->$meth($col, $deflated);
- return $obj;
+Like L<DBIx::Class::Row/has_column_loaded>, but also returns true if there
+is an inflated value stored.
+
+=cut
+
+sub has_column_loaded {
+ my ($self, $col) = @_;
+ return 1 if exists $self->{_inflated_column}{$col};
+ return $self->next::method($col);
}
+=head2 update
+
+Updates a row in the same way as L<DBIx::Class::Row/update>, handling
+inflation and deflation of columns appropriately.
+
+=cut
+
sub update {
my ($class, $attrs, @rest) = @_;
- $attrs ||= {};
- foreach my $key (keys %$attrs) {
+ foreach my $key (keys %{$attrs||{}}) {
if (ref $attrs->{$key}
&& exists $class->column_info($key)->{_inflate_info}) {
-# $attrs->{$key} = $class->_deflated_column($key, $attrs->{$key});
- $class->set_inflated_column ($key, delete $attrs->{$key});
+ $class->set_inflated_column($key, delete $attrs->{$key});
}
}
return $class->next::method($attrs, @rest);
}
+=head2 new
+
+Creates a row in the same way as L<DBIx::Class::Row/new>, handling
+inflation and deflation of columns appropriately.
+
+=cut
+
sub new {
my ($class, $attrs, @rest) = @_;
- $attrs ||= {};
- foreach my $key (keys %$attrs) {
- if (ref $attrs->{$key}
- && exists $class->column_info($key)->{_inflate_info}) {
- $attrs->{$key} = $class->_deflated_column($key, $attrs->{$key});
- }
+ my $inflated;
+ foreach my $key (keys %{$attrs||{}}) {
+ $inflated->{$key} = delete $attrs->{$key}
+ if ref $attrs->{$key} && exists $class->column_info($key)->{_inflate_info};
}
- return $class->next::method($attrs, @rest);
+ my $obj = $class->next::method($attrs, @rest);
+ $obj->{_inflated_column} = $inflated if $inflated;
+ return $obj;
}
=head1 SEE ALSO
--- /dev/null
+package DBIx::Class::InflateColumn::DateTime;
+
+use strict;
+use warnings;
+use base qw/DBIx::Class/;
+
+=head1 NAME
+
+DBIx::Class::InflateColumn::DateTime - Auto-create DateTime objects from date and datetime columns.
+
+=head1 SYNOPSIS
+
+Load this component and then declare one or more
+columns to be of the datetime, timestamp or date datatype.
+
+ package Event;
+ __PACKAGE__->load_components(qw/InflateColumn::DateTime/);
+ __PACKAGE__->add_columns(
+ starts_when => { data_type => 'datetime' }
+ );
+
+Then you can treat the specified column as a L<DateTime> object.
+
+ print "This event starts the month of ".
+ $event->starts_when->month_name();
+
+=head1 DESCRIPTION
+
+This module figures out the type of DateTime::Format::* class to
+inflate/deflate with based on the type of DBIx::Class::Storage::DBI::*
+that you are using. If you switch from one database to a different
+one your code should continue to work without modification (though note
+that this feature is new as of 0.07, so it may not be perfect yet - bug
+reports to the list very much welcome).
+
+=cut
+
+__PACKAGE__->load_components(qw/InflateColumn/);
+
+__PACKAGE__->mk_group_accessors('simple' => '__datetime_parser');
+
+=head2 register_column
+
+Chains with the L<DBIx::Class::Row/register_column> method, and sets
+up datetime columns appropriately. This would not normally be
+directly called by end users.
+
+=cut
+
+sub register_column {
+ my ($self, $column, $info, @rest) = @_;
+ $self->next::method($column, $info, @rest);
+ return unless defined($info->{data_type});
+ my $type = lc($info->{data_type});
+ $type = 'datetime' if ($type eq 'timestamp');
+ if ($type eq 'datetime' || $type eq 'date') {
+ my ($parse, $format) = ("parse_${type}", "format_${type}");
+ $self->inflate_column(
+ $column =>
+ {
+ inflate => sub {
+ my ($value, $obj) = @_;
+ $obj->_datetime_parser->$parse($value);
+ },
+ deflate => sub {
+ my ($value, $obj) = @_;
+ $obj->_datetime_parser->$format($value);
+ },
+ }
+ );
+ }
+}
+
+sub _datetime_parser {
+ my $self = shift;
+ if (my $parser = $self->__datetime_parser) {
+ return $parser;
+ }
+ my $parser = $self->result_source->storage->datetime_parser(@_);
+ return $self->__datetime_parser($parser);
+}
+
+1;
+__END__
+
+=head1 SEE ALSO
+
+=over 4
+
+=item More information about the add_columns method, and column metadata,
+ can be found in the documentation for L<DBIx::Class::ResultSource>.
+
+=back
+
+=head1 AUTHOR
+
+Matt S. Trout <mst@shadowcatsystems.co.uk>
+
+=head1 CONTRIBUTORS
+
+Aran Deltac <bluefeet@cpan.org>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
L<DBIx::Class::HTMLWidget> - Like FromForm but with DBIx::Class and HTML::Widget.
+L<DBIx::Class::Ordered> - Modify the position of objects in an ordered list.
+
L<DBIx::Class::PK::Auto> - Retrieve automatically created primary keys upon insert.
L<DBIx::Class::QueriesTime> - Display the amount of time it takes to run queries.
This can be useful when you don't want to pass around a Schema object to every
method.
+=head2 Profiling
+
+When you enable L<DBIx::Class::Storage::DBI>'s debugging it prints the SQL
+executed as well as notifications of query completion and transaction
+begin/commit. If you'd like to profile the SQL you can subclass the
+L<DBIx::Class::Storage::Statistics> class and write your own profiling
+mechanism:
+
+ package My::Profiler;
+ use strict;
+
+ use base 'DBIx::Class::Storage::Statistics';
+
+ use Time::HiRes qw(time);
+
+ my $start;
+
+ sub query_start {
+ my $self = shift();
+ my $sql = shift();
+ my $params = @_;
+
+ print "Executing $sql: ".join(', ', @params)."\n";
+ $start = time();
+ }
+
+ sub query_end {
+ my $self = shift();
+ my $sql = shift();
+ my @params = @_;
+
+ printf("Execution took %0.4f seconds.\n", time() - $start);
+ $start = undef;
+ }
+
+ 1;
+
+You can then install that class as the debugging object:
+
+ __PACKAGE__->storage()->debugobj(new My::Profiler());
+ __PACKAGE__->storage()->debug(1);
+
+A more complicated example might involve storing each execution of SQL in an
+array:
+
+ sub query_end {
+ my $self = shift();
+ my $sql = shift();
+ my @params = @_;
+
+ my $elapsed = time() - $start;
+ push(@{ $calls{$sql} }, {
+ params => \@params,
+ elapsed => $elapsed
+ });
+ }
+
+You could then create average, high and low execution times for an SQL
+statement and dig down to see if certain parameters cause aberrant behavior.
+
=head2 Getting the value of the primary key for the last database insert
AKA getting last_insert_id
--- /dev/null
+=head1 NAME
+
+DBIx::Class::Manual::FAQ - Frequently Asked Questions (in theory)
+
+=head1 DESCRIPTION
+
+This document is intended as an anti-map of the documentation. If you
+know what you want to do, but not how to do it in L<DBIx::Class>, then
+look here. It does B<not> contain any code or examples, it just gives
+explanations and pointers to the correct pieces of documentation to
+read.
+
+=head1 FAQs
+
+How Do I:
+
+=head2 Getting started
+
+=over 4
+
+=item .. create a database to use?
+
+First, choose a database. For testing/experimenting, we reccommend
+L<DBD::SQLite>, which is a self-contained small database. (i.e. all
+you need to do is to install the DBD from CPAN, and it's usable).
+
+Next, spend some time defining which data you need to store, and how
+it relates to the other data you have. For some help on normalisation,
+go to L<http://b62.tripod.com/doc/dbbase.htm> or
+L<http://209.197.234.36/db/simple.html>.
+
+Now, decide whether you want to have the database itself be the
+definitive source of information about the data layout, or your
+DBIx::Class schema. If it's the former, look up the documentation for
+your database, eg. L<http://sqlite.org/lang_createtable.html>, on how
+to create tables, and start creating them. For a nice universal
+interface to your database, you can try L<DBI::Shell>. If you decided
+on the latter choice, read the FAQ on setting up your classes
+manually, and the one on creating tables from your schema.
+
+=item .. use DBIx::Class with L<Catalyst>?
+
+Install L<Catalyst::Model::DBIC::Schema> from CPAN. See it's
+documentation, or below, for further details.
+
+=item .. set up my DBIx::Class classes automatically from my database?
+
+Install L<DBIx::Class::Schema::Loader> from CPAN, and read it's documentation.
+
+=item .. set up my DBIx::Class classes manually?
+
+Look at the L<DBIx::Class::Manual::Example>, come back here if you get lost.
+
+=item .. create my database tables from my DBIx::Class schema?
+
+Create your classes manually, as above. Write a script that calls
+L<DBIx::Class::Schema/deploy>. See there for details, or the
+L<DBIx::Class::Manual::Cookbook>.
+
+=back
+
+=head2 Relationships
+
+=over 4
+
+=item .. tell DBIx::Class about relationships between my tables?
+
+There are a vareity of relationship types that come pre-defined for you to use. These are all listed in L<DBIx::Class::Relationship>. If you need a non-standard type, or more information, look in L<DBIx::Class::Relationship::Base>.
+
+=item .. define a one-to-many relationship?
+
+This is called a C<has_many> relationship on the one side, and a C<belongs_to> relationship on the many side. Currently these need to be set up individually on each side. See L<DBIx::Class::Relationship> for details.
+
+=item .. define a relationship where this table contains another table's primary key? (foreign key)
+
+Create a C<belongs_to> relationship for the field containing the foreign key. L<DBIx::Class::Relationship/belongs_to>.
+
+=item .. define a foreign key relationship where the key field may contain NULL?
+
+Just create a C<belongs_to> relationship, as above. If
+the column is NULL then the inflation to the foreign object will not
+happen. This has a side effect of not always fetching all the relevant
+data, if you use a nullable foreign-key relationship in a JOIN, then
+you probably want to set the join_type to 'left'.
+
+=item .. define a relationship where the key consists of more than one column?
+
+Instead of supplying a single column name, all relationship types also
+allow you to supply a hashref containing the condition across which
+the tables are to be joined. The condition may contain as many fields
+as you like. See L<DBIx::Class::Relationship::Base>.
+
+=item .. define a relatiopnship across an intermediate table? (many-to-many)
+
+Read the documentation on L<DBIx::Class::Relationship/many_to_many>.
+
+=item .. stop DBIx::Class from attempting to cascade deletes on my has_many relationships?
+
+By default, DBIx::Class cascades deletes and updates across
+C<has_many> relationships. If your database already does this (and
+probably better), turn it off by supplying C<< cascade_delete => 0 >> in
+the relationship attributes. See L<DBIx::Class::Relationship::Base>.
+
+=item .. use a relationship?
+
+Use it's name. An accessor is created using the name. See examples in L<DBIx::Class::Manual::Cookbook/Using relationships>.
+
+=back
+
+=head2 Searching
+
+=over 4
+
+=item .. search for data?
+
+=item .. search using database functions?
+
+=item .. sort the results of my search?
+
+=item .. group the results of my search?
+
+=item .. filter the results of my search?
+
+=item .. search in several tables simultaneously?
+
+=item .. find more help on constructing searches?
+
+Behind the scenes, DBIx::Class uses L<SQL::Abstract> to help construct
+it's SQL searches. So if you fail to find help in the
+L<DBIx::Class::Manual::Cookbook>, try looking in the SQL::Abstract
+documentation.
+
+=back
+
+=head2 Fetching data
+
+=over 4
+
+=item .. fetch as much data as possible in as few select calls as possible? (prefetch)
+
+See the prefetch examples in the L<DBIx::Class::Manual::Cookbook|"Cookbook">.
+
+=back
+
+=over 4
+
+=head2 Inserting and updating data
+
+=over 4
+
+=item .. insert many rows of data efficiently?
+
+=item .. update a collection of rows at the same time?
+
+=item .. use database functions when updating rows?
+
+=item .. update a column using data from another column?
+
+=back
+
+=head2 Misc
+
+=over 4
+
+=item How do I store my own (non-db) data in my DBIx::Class objects?
+
+=item How do I use DBIx::Class objects my TT templates?
+
+=item See the SQL statements my code is producing?
+
+=item Why didn't my search run any SQL?
+
+
+=back
--- /dev/null
+# vim: ts=8:sw=4:sts=4:et
+package DBIx::Class::Ordered;
+use strict;
+use warnings;
+use base qw( DBIx::Class );
+
+=head1 NAME
+
+DBIx::Class::Ordered - Modify the position of objects in an ordered list.
+
+=head1 SYNOPSIS
+
+Create a table for your ordered data.
+
+ CREATE TABLE items (
+ item_id INTEGER PRIMARY KEY AUTOINCREMENT,
+ name TEXT NOT NULL,
+ position INTEGER NOT NULL
+ );
+ # Optional: group_id INTEGER NOT NULL
+
+In your Schema or DB class add Ordered to the top
+of the component list.
+
+ __PACKAGE__->load_components(qw( Ordered ... ));
+
+Specify the column that stores the position number for
+each row.
+
+ package My::Item;
+ __PACKAGE__->position_column('position');
+ __PACKAGE__->grouping_column('group_id'); # optional
+
+Thats it, now you can change the position of your objects.
+
+ #!/use/bin/perl
+ use My::Item;
+
+ my $item = My::Item->create({ name=>'Matt S. Trout' });
+ # If using grouping_column:
+ my $item = My::Item->create({ name=>'Matt S. Trout', group_id=>1 });
+
+ my $rs = $item->siblings();
+ my @siblings = $item->siblings();
+
+ my $sibling;
+ $sibling = $item->first_sibling();
+ $sibling = $item->last_sibling();
+ $sibling = $item->previous_sibling();
+ $sibling = $item->next_sibling();
+
+ $item->move_previous();
+ $item->move_next();
+ $item->move_first();
+ $item->move_last();
+ $item->move_to( $position );
+
+=head1 DESCRIPTION
+
+This module provides a simple interface for modifying the ordered
+position of DBIx::Class objects.
+
+=head1 AUTO UPDATE
+
+All of the move_* methods automatically update the rows involved in
+the query. This is not configurable and is due to the fact that if you
+move a record it always causes other records in the list to be updated.
+
+=head1 METHODS
+
+=head2 position_column
+
+ __PACKAGE__->position_column('position');
+
+Sets and retrieves the name of the column that stores the
+positional value of each record. Default to "position".
+
+=cut
+
+__PACKAGE__->mk_classdata( 'position_column' => 'position' );
+
+=head2 grouping_column
+
+ __PACKAGE__->grouping_column('group_id');
+
+This method specified a column to limit all queries in
+this module by. This effectively allows you to have multiple
+ordered lists within the same table.
+
+=cut
+
+__PACKAGE__->mk_classdata( 'grouping_column' );
+
+=head2 siblings
+
+ my $rs = $item->siblings();
+ my @siblings = $item->siblings();
+
+Returns either a result set or an array of all other objects
+excluding the one you called it on.
+
+=cut
+
+sub siblings {
+ my( $self ) = @_;
+ my $position_column = $self->position_column;
+ my $rs = $self->result_source->resultset->search(
+ {
+ $position_column => { '!=' => $self->get_column($position_column) },
+ $self->_grouping_clause(),
+ },
+ { order_by => $self->position_column },
+ );
+ return $rs->all() if (wantarray());
+ return $rs;
+}
+
+=head2 first_sibling
+
+ my $sibling = $item->first_sibling();
+
+Returns the first sibling object, or 0 if the first sibling
+is this sibliing.
+
+=cut
+
+sub first_sibling {
+ my( $self ) = @_;
+ return 0 if ($self->get_column($self->position_column())==1);
+ return ($self->result_source->resultset->search(
+ {
+ $self->position_column => 1,
+ $self->_grouping_clause(),
+ },
+ )->all())[0];
+}
+
+=head2 last_sibling
+
+ my $sibling = $item->last_sibling();
+
+Return the last sibling, or 0 if the last sibling is this
+sibling.
+
+=cut
+
+sub last_sibling {
+ my( $self ) = @_;
+ my $count = $self->result_source->resultset->search({$self->_grouping_clause()})->count();
+ return 0 if ($self->get_column($self->position_column())==$count);
+ return ($self->result_source->resultset->search(
+ {
+ $self->position_column => $count,
+ $self->_grouping_clause(),
+ },
+ )->all())[0];
+}
+
+=head2 previous_sibling
+
+ my $sibling = $item->previous_sibling();
+
+Returns the sibling that resides one position back. Undef
+is returned if the current object is the first one.
+
+=cut
+
+sub previous_sibling {
+ my( $self ) = @_;
+ my $position_column = $self->position_column;
+ my $position = $self->get_column( $position_column );
+ return 0 if ($position==1);
+ return ($self->result_source->resultset->search(
+ {
+ $position_column => $position - 1,
+ $self->_grouping_clause(),
+ }
+ )->all())[0];
+}
+
+=head2 next_sibling
+
+ my $sibling = $item->next_sibling();
+
+Returns the sibling that resides one position foward. Undef
+is returned if the current object is the last one.
+
+=cut
+
+sub next_sibling {
+ my( $self ) = @_;
+ my $position_column = $self->position_column;
+ my $position = $self->get_column( $position_column );
+ my $count = $self->result_source->resultset->search({$self->_grouping_clause()})->count();
+ return 0 if ($position==$count);
+ return ($self->result_source->resultset->search(
+ {
+ $position_column => $position + 1,
+ $self->_grouping_clause(),
+ },
+ )->all())[0];
+}
+
+=head2 move_previous
+
+ $item->move_previous();
+
+Swaps position with the sibling on position previous in the list.
+1 is returned on success, and 0 is returned if the objects is already
+the first one.
+
+=cut
+
+sub move_previous {
+ my( $self ) = @_;
+ my $position = $self->get_column( $self->position_column() );
+ return $self->move_to( $position - 1 );
+}
+
+=head2 move_next
+
+ $item->move_next();
+
+Swaps position with the sibling in the next position. 1 is returned on
+success, and 0 is returned if the object is already the last in the list.
+
+=cut
+
+sub move_next {
+ my( $self ) = @_;
+ my $position = $self->get_column( $self->position_column() );
+ my $count = $self->result_source->resultset->search({$self->_grouping_clause()})->count();
+ return 0 if ($position==$count);
+ return $self->move_to( $position + 1 );
+}
+
+=head2 move_first
+
+ $item->move_first();
+
+Moves the object to the first position. 1 is returned on
+success, and 0 is returned if the object is already the first.
+
+=cut
+
+sub move_first {
+ my( $self ) = @_;
+ return $self->move_to( 1 );
+}
+
+=head2 move_last
+
+ $item->move_last();
+
+Moves the object to the very last position. 1 is returned on
+success, and 0 is returned if the object is already the last one.
+
+=cut
+
+sub move_last {
+ my( $self ) = @_;
+ my $count = $self->result_source->resultset->search({$self->_grouping_clause()})->count();
+ return $self->move_to( $count );
+}
+
+=head2 move_to
+
+ $item->move_to( $position );
+
+Moves the object to the specified position. 1 is returned on
+success, and 0 is returned if the object is already at the
+specified position.
+
+=cut
+
+sub move_to {
+ my( $self, $to_position ) = @_;
+ my $position_column = $self->position_column;
+ my $from_position = $self->get_column( $position_column );
+ return 0 if ( $to_position < 1 );
+ return 0 if ( $from_position==$to_position );
+ my @between = (
+ ( $from_position < $to_position )
+ ? ( $from_position+1, $to_position )
+ : ( $to_position, $from_position-1 )
+ );
+ my $rs = $self->result_source->resultset->search({
+ $position_column => { -between => [ @between ] },
+ $self->_grouping_clause(),
+ });
+ my $op = ($from_position>$to_position) ? '+' : '-';
+ $rs->update({ $position_column => \"$position_column $op 1" });
+ $self->update({ $position_column => $to_position });
+ return 1;
+}
+
+=head2 insert
+
+Overrides the DBIC insert() method by providing a default
+position number. The default will be the number of rows in
+the table +1, thus positioning the new record at the last position.
+
+=cut
+
+sub insert {
+ my $self = shift;
+ my $position_column = $self->position_column;
+ $self->set_column( $position_column => $self->result_source->resultset->search( {$self->_grouping_clause()} )->count()+1 )
+ if (!$self->get_column($position_column));
+ return $self->next::method( @_ );
+}
+
+=head2 delete
+
+Overrides the DBIC delete() method by first moving the object
+to the last position, then deleting it, thus ensuring the
+integrity of the positions.
+
+=cut
+
+sub delete {
+ my $self = shift;
+ $self->move_last;
+ return $self->next::method( @_ );
+}
+
+=head1 PRIVATE METHODS
+
+These methods are used internally. You should never have the
+need to use them.
+
+=head2 _grouping_clause
+
+This method returns a name=>value pare for limiting a search
+by the collection column. If the collection column is not
+defined then this will return an empty list.
+
+=cut
+
+sub _grouping_clause {
+ my( $self ) = @_;
+ my $col = $self->grouping_column();
+ if ($col) {
+ return ( $col => $self->get_column($col) );
+ }
+ return ();
+}
+
+1;
+__END__
+
+=head1 BUGS
+
+=head2 Unique Constraints
+
+Unique indexes and constraints on the position column are not
+supported at this time. It would be make sense to support them,
+but there are some unexpected database issues that make this
+hard to do. The main problem from the author's view is that
+SQLite (the DB engine that we use for testing) does not support
+ORDER BY on updates.
+
+=head2 Race Condition on Insert
+
+If a position is not specified for an insert than a position
+will be chosen based on COUNT(*)+1. But, it first selects the
+count then inserts the record. The space of time between select
+and insert introduces a race condition. To fix this we need the
+ability to lock tables in DBIC. I've added an entry in the TODO
+about this.
+
+=head2 Multiple Moves
+
+Be careful when issueing move_* methods to multiple objects. If
+you've pre-loaded the objects then when you move one of the objects
+the position of the other object will not reflect their new value
+until you reload them from the database.
+
+There are times when you will want to move objects as groups, such
+as changeing the parent of several objects at once - this directly
+conflicts with this problem. One solution is for us to write a
+ResultSet class that supports a parent() method, for example. Another
+solution is to somehow automagically modify the objects that exist
+in the current object's result set to have the new position value.
+
+=head1 AUTHOR
+
+Aran Deltac <bluefeet@cpan.org>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
map { $_ . '=' . $vals{$_} } sort keys %vals;
}
+=head2 ident_condition
+
+ my $cond = $result_source->ident_condition();
+
+ my $cond = $result_source->ident_condition('alias');
+
+Produces a condition hash to locate a row based on the primary key(s).
+
+=cut
+
sub ident_condition {
my ($self, $alias) = @_;
my %cond;
holding the foreign key. If $cond is not given, the relname is used as
the column name.
+If the relationship is optional - ie the column containing the foreign
+key can be NULL - then the belongs_to relationship does the right
+thing - so in the example above C<$obj->author> would return C<undef>.
+However in this case you would probably want to set the C<join_type>
+attribute so that a C<LEFT JOIN> is done, which makes complex
+resultsets involving C<join> or C<prefetch> operations work correctly.
+The modified declaration is shown below:-
+
+ # in a Book class (where Author has many Books)
+ __PACKAGE__->belongs_to(author => 'My::DBIC::Schema::Author',
+ 'author', {join_type => 'left'});
+
+
Cascading deletes are off per default on a C<belongs_to> relationship, to turn
them on, pass C<< cascade_delete => 1 >> in the $attr hashref.
{ prefetch => [qw/book/],
});
my @book_objs = $obj->books;
+ my $books_rs = $obj->books;
+ ( $books_rs ) = $obj->books_rs;
$obj->add_to_books(\%col_data);
columns. You should pass the name of the column in the foreign class as the
$cond argument, or specify a complete join condition.
-As well as the accessor method, a method named C<< add_to_<relname> >>
-will also be added to your Row items, this allows you to insert new
-related items, using the same mechanism as in L<DBIx::Class::Relationship::Base/"create_related">.
+Three methods are created when you create a has_many relationship. The first
+method is the expected accessor method. The second is almost exactly the same
+as the accessor method but "_rs" is added to the end of the method name. This
+method works just like the normal accessor, except that it returns a resultset
+no matter what, even in list context. The third method, named
+C<< add_to_<relname> >>, will also be added to your Row items, this allows
+you to insert new related items, using the same mechanism as in
+L<DBIx::Class::Relationship::Base/"create_related">.
If you delete an object in a class with a C<has_many> relationship, all
the related objects will be deleted as well. However, any database-level
=head2 many_to_many
+=over 4
+
+=item Arguments: $accessor_name, $link_rel_name, $foreign_rel_name
+
+=back
+
My::DBIC::Schema::Actor->has_many( actor_roles =>
'My::DBIC::Schema::ActorRoles',
'actor' );
My::DBIC::Schema::Actor->many_to_many( roles => 'actor_roles',
'role' );
- ...
-
- my @role_objs = $actor->roles;
+Creates a accessors bridging two relationships; not strictly a relationship in
+its own right, although the accessor will return a resultset or collection of
+objects just as a has_many would.
-Creates an accessor bridging two relationships; not strictly a relationship
-in its own right, although the accessor will return a resultset or collection
-of objects just as a has_many would.
To use many_to_many, existing relationships from the original table to the link
table, and from the link table to the end table must already exist, these
relation names are then used in the many_to_many call.
);
} elsif ($acc_type eq 'multi') {
$meth{$rel} = sub { shift->search_related($rel, @_) };
+ $meth{"${rel}_rs"} = sub { shift->search_related_rs($rel, @_) };
$meth{"add_to_${rel}"} = sub { shift->create_related($rel, @_); };
} else {
$class->throw_exception("No such relationship accessor type $acc_type");
use strict;
use warnings;
+use Scalar::Util ();
use base qw/DBIx::Class/;
=head1 NAME
=head2 search_related
- $rs->search_related('relname', $cond, $attrs);
+ @objects = $rs->search_related('relname', $cond, $attrs);
+ $objects_rs = $rs->search_related('relname', $cond, $attrs);
Run a search on a related resultset. The search will be restricted to the
item or items represented by the L<DBIx::Class::ResultSet> it was called
return shift->related_resultset(shift)->search(@_);
}
+=head2 search_related_rs
+
+ ( $objects_rs ) = $rs->search_related_rs('relname', $cond, $attrs);
+
+This method works exactly the same as search_related, except that
+it garauntees a restultset, even in list context.
+
+=cut
+
+sub search_related_rs {
+ return shift->related_resultset(shift)->search_rs(@_);
+}
+
=head2 count_related
$obj->count_related('relname', $cond, $attrs);
my $new_obj = $obj->new_related('relname', \%col_data);
Create a new item of the related foreign class. If called on a
-L<DBIx::Class::Manual::Glossary/"Row"> object, it will magically set any
-primary key values into foreign key columns for you. The newly created item
-will not be saved into your storage until you call L<DBIx::Class::Row/insert>
+L<DBIx::Class::Manual::Glossary/"Row"> object, it will magically
+set any foreign key columns of the new object to the related primary
+key columns of the source object for you. The newly created item will
+not be saved into your storage until you call L<DBIx::Class::Row/insert>
on it.
=cut
return $self->search_related($rel)->find(@_);
}
+=head2 find_or_new_related
+
+ my $new_obj = $obj->find_or_new_related('relname', \%col_data);
+
+Find an item of a related class. If none exists, instantiate a new item of the
+related class. The object will not be saved into your storage until you call
+L<DBIx::Class::Row/insert> on it.
+
+=cut
+
+sub find_or_new_related {
+ my $self = shift;
+ return $self->find_related(@_) || $self->new_related(@_);
+}
+
=head2 find_or_create_related
my $new_obj = $obj->find_or_create_related('relname', \%col_data);
Find or create an item of a related class. See
-L<DBIx::Class::ResultSet/"find_or_create"> for details.
+L<DBIx::Class::ResultSet/find_or_create> for details.
=cut
return (defined($obj) ? $obj : $self->create_related(@_));
}
+=head2 update_or_create_related
+
+ my $updated_item = $obj->update_or_create_related('relname', \%col_data, \%attrs?);
+
+Update or create an item of a related class. See
+L<DBIx::Class::ResultSet/update_or_create> for details.
+
+=cut
+
+sub update_or_create_related {
+ my $self = shift;
+ my $rel = shift;
+ return $self->related_resultset($rel)->update_or_create(@_);
+}
+
=head2 set_from_related
$book->set_from_related('author', $author_obj);
if (defined $f_obj) {
my $f_class = $self->result_source->schema->class($rel_obj->{class});
$self->throw_exception( "Object $f_obj isn't a ".$f_class )
- unless $f_obj->isa($f_class);
+ unless Scalar::Util::blessed($f_obj) and $f_obj->isa($f_class);
}
$self->set_columns(
$self->result_source->resolve_condition(
return $obj;
}
-1;
+=head2 add_to_$rel
+
+B<Currently only available for C<has_many>, C<many-to-many> and 'multi' type
+relationships.>
+
+=over 4
+
+=item Arguments: ($foreign_vals | $obj), $link_vals?
+
+=back
+
+ my $role = $schema->resultset('Role')->find(1);
+ $actor->add_to_roles($role);
+ # creates a My::DBIC::Schema::ActorRoles linking table row object
+
+ $actor->add_to_roles({ name => 'lead' }, { salary => 15_000_000 });
+ # creates a new My::DBIC::Schema::Role row object and the linking table
+ # object with an extra column in the link
+
+Adds a linking table object for C<$obj> or C<$foreign_vals>. If the first
+argument is a hash reference, the related object is created first with the
+column values in the hash. If an object reference is given, just the linking
+table object is created. In either case, any additional column values for the
+linking table object can be specified in C<$link_vals>.
+
+=head2 set_$rel
+
+B<Currently only available for C<many-to-many> relationships.>
+
+=over 4
+
+=item Arguments: (@hashrefs | @objs)
+
+=back
+
+ my $actor = $schema->resultset('Actor')->find(1);
+ my @roles = $schema->resultset('Role')->search({ role =>
+ { '-in' -> ['Fred', 'Barney'] } } );
+
+ $actor->set_roles(@roles);
+ # Replaces all of $actors previous roles with the two named
+
+Replace all the related objects with the given list of objects. This does a
+C<delete> B<on the link table resultset> to remove the association between the
+current object and all related objects, then calls C<add_to_$rel> repeatedly to
+link all the new objects.
+
+Note that this means that this method will B<not> delete any objects in the
+table on the right side of the relation, merely that it will delete the link
+between them.
+
+=head2 remove_from_$rel
+
+B<Currently only available for C<many-to-many> relationships.>
+
+=over 4
+
+=item Arguments: $obj
+
+=back
+
+ my $role = $schema->resultset('Role')->find(1);
+ $actor->remove_from_roles($role);
+ # removes $role's My::DBIC::Schema::ActorRoles linking table row object
+
+Removes the link between the current object and the related object. Note that
+the related object itself won't be deleted unless you call ->delete() on
+it. This method just removes the link between the two objects.
=head1 AUTHORS
=cut
+1;
-package DBIx::Class::Relationship::BelongsTo;
+package # hide from PAUSE
+ DBIx::Class::Relationship::BelongsTo;
+
+# Documentation for these methods can be found in
+# DBIx::Class::Relationship
use strict;
use warnings;
sub belongs_to {
my ($class, $rel, $f_class, $cond, $attrs) = @_;
- eval "require $f_class";
- if ($@) {
- $class->throw_exception($@) unless $@ =~ /Can't locate/;
- }
-
# no join condition or just a column name
if (!ref $cond) {
+ $class->ensure_class_loaded($f_class);
my %f_primaries = map { $_ => 1 } eval { $f_class->primary_columns };
- $class->throw_exception("Can't infer join condition for ${rel} on ${class}; unable to load ${f_class}")
- if $@;
+ $class->throw_exception(
+ "Can't infer join condition for ${rel} on ${class}; ".
+ "unable to load ${f_class}: $@"
+ ) if $@;
my ($pri, $too_many) = keys %f_primaries;
- $class->throw_exception("Can't infer join condition for ${rel} on ${class}; ${f_class} has no primary keys")
- unless defined $pri;
- $class->throw_exception("Can't infer join condition for ${rel} on ${class}; ${f_class} has multiple primary keys")
- if $too_many;
+ $class->throw_exception(
+ "Can't infer join condition for ${rel} on ${class}; ".
+ "${f_class} has no primary keys"
+ ) unless defined $pri;
+ $class->throw_exception(
+ "Can't infer join condition for ${rel} on ${class}; ".
+ "${f_class} has multiple primary keys"
+ ) if $too_many;
my $fk = defined $cond ? $cond : $rel;
- $class->throw_exception("Can't infer join condition for ${rel} on ${class}; $fk is not a column")
- unless $class->has_column($fk);
+ $class->throw_exception(
+ "Can't infer join condition for ${rel} on ${class}; ".
+ "$fk is not a column of $class"
+ ) unless $class->has_column($fk);
my $acc_type = $class->has_column($rel) ? 'filter' : 'single';
$class->add_relationship($rel, $f_class,
}
$cond_rel->{"foreign.$_"} = "self.".$cond->{$_};
}
- my $acc_type = (keys %$cond_rel == 1 and $class->has_column($rel)) ? 'filter' : 'single';
+ my $acc_type = (keys %$cond_rel == 1 and $class->has_column($rel))
+ ? 'filter'
+ : 'single';
$class->add_relationship($rel, $f_class,
$cond_rel,
{ accessor => $acc_type, %{$attrs || {}} }
);
}
else {
- $class->throw_exception('third argument for belongs_to must be undef, a column name, or a join condition');
+ $class->throw_exception(
+ 'third argument for belongs_to must be undef, a column name, '.
+ 'or a join condition'
+ );
}
return 1;
}
sub has_many {
my ($class, $rel, $f_class, $cond, $attrs) = @_;
-
- eval "require $f_class";
- if ($@) {
- $class->throw_exception($@) unless $@ =~ /Can't locate/;
- }
unless (ref $cond) {
+ $class->ensure_class_loaded($f_class);
my ($pri, $too_many) = $class->primary_columns;
- $class->throw_exception( "has_many can only infer join for a single primary key; ${class} has more" )
- if $too_many;
+
+ $class->throw_exception(
+ "has_many can only infer join for a single primary key; ".
+ "${class} has more"
+ ) if $too_many;
my ($f_key,$guess);
if (defined $cond && length $cond) {
}
my $f_class_loaded = eval { $f_class->columns };
- $class->throw_exception("No such column ${f_key} on foreign class ${f_class} ($guess)")
- if $f_class_loaded && !$f_class->has_column($f_key);
+ $class->throw_exception(
+ "No such column ${f_key} on foreign class ${f_class} ($guess)"
+ ) if $f_class_loaded && !$f_class->has_column($f_key);
$cond = { "foreign.${f_key}" => "self.${pri}" };
}
- $class->add_relationship($rel, $f_class, $cond,
- { accessor => 'multi',
- join_type => 'LEFT',
- cascade_delete => 1,
- cascade_copy => 1,
- %{$attrs||{}} } );
+ $class->add_relationship($rel, $f_class, $cond, {
+ accessor => 'multi',
+ join_type => 'LEFT',
+ cascade_delete => 1,
+ cascade_copy => 1,
+ %{$attrs||{}}
+ });
}
1;
sub _has_one {
my ($class, $join_type, $rel, $f_class, $cond, $attrs) = @_;
- eval "require $f_class";
- if ($@) {
- $class->throw_exception($@) unless $@ =~ /Can't locate/;
- }
-
unless (ref $cond) {
+ $class->ensure_class_loaded($f_class);
my ($pri, $too_many) = $class->primary_columns;
- $class->throw_exception( "might_have/has_one can only infer join for a single primary key; ${class} has more" )
- if $too_many;
+ $class->throw_exception(
+ "might_have/has_one can only infer join for a single primary key; ".
+ "${class} has more"
+ ) if $too_many;
my $f_class_loaded = eval { $f_class->columns };
my ($f_key,$guess);
if (defined $cond && length $cond) {
$guess = "using given relationship '$rel' for foreign key";
} else {
($f_key, $too_many) = $f_class->primary_columns;
- $class->throw_exception( "might_have/has_one can only infer join for a single primary key; ${f_class} has more" )
- if $too_many;
+ $class->throw_exception(
+ "might_have/has_one can only infer join for a single primary key; ".
+ "${f_class} has more"
+ ) if $too_many;
$guess = "using primary key of foreign class for foreign key";
}
- $class->throw_exception("No such column ${f_key} on foreign class ${f_class} ($guess)")
- if $f_class_loaded && !$f_class->has_column($f_key);
+ $class->throw_exception(
+ "No such column ${f_key} on foreign class ${f_class} ($guess)"
+ ) if $f_class_loaded && !$f_class->has_column($f_key);
$cond = { "foreign.${f_key}" => "self.${pri}" };
}
$class->add_relationship($rel, $f_class,
{
no strict 'refs';
no warnings 'redefine';
+
+ my $add_meth = "add_to_${meth}";
+ my $remove_meth = "remove_from_${meth}";
+ my $set_meth = "set_${meth}";
+
*{"${class}::${meth}"} = sub {
my $self = shift;
my $attrs = @_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {};
- $self->search_related($rel)->search_related($f_rel, @_ > 0 ? @_ : undef, { %{$rel_attrs||{}}, %$attrs });
+ $self->search_related($rel)->search_related(
+ $f_rel, @_ > 0 ? @_ : undef, { %{$rel_attrs||{}}, %$attrs }
+ );
+ };
+
+ *{"${class}::${add_meth}"} = sub {
+ my $self = shift;
+ @_ > 0 or $self->throw_exception(
+ "${add_meth} needs an object or hashref"
+ );
+ my $source = $self->result_source;
+ my $schema = $source->schema;
+ my $rel_source_name = $source->relationship_info($rel)->{source};
+ my $rel_source = $schema->resultset($rel_source_name)->result_source;
+ my $f_rel_source_name = $rel_source->relationship_info($f_rel)->{source};
+ my $f_rel_rs = $schema->resultset($f_rel_source_name);
+ my $obj = ref $_[0]
+ ? ( ref $_[0] eq 'HASH' ? $f_rel_rs->create($_[0]) : $_[0] )
+ : ( $f_rel_rs->create({@_}) );
+ my $link_vals = @_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {};
+ my $link = $self->search_related($rel)->new_result({});
+ $link->set_from_related($f_rel, $obj);
+ $link->set_columns($link_vals);
+ $link->insert();
+ };
+
+ *{"${class}::${set_meth}"} = sub {
+ my $self = shift;
+ @_ > 0 or $self->throw_exception(
+ "{$set_meth} needs a list of objects or hashrefs"
+ );
+ $self->search_related($rel, {})->delete;
+ $self->$add_meth(shift) while (defined $_[0]);
+ };
+
+ *{"${class}::${remove_meth}"} = sub {
+ my $self = shift;
+ @_ > 0 && ref $_[0] ne 'HASH'
+ or $self->throw_exception("${remove_meth} needs an object");
+ my $obj = shift;
+ my $rel_source = $self->search_related($rel)->result_source;
+ my $cond = $rel_source->relationship_info($f_rel)->{cond};
+ my $link_cond = $rel_source->resolve_condition(
+ $cond, $obj, $f_rel
+ );
+ $self->search_related($rel, $link_cond)->delete;
};
+
}
}
'0+' => \&count,
'bool' => sub { 1; },
fallback => 1;
+use Carp::Clan qw/^DBIx::Class/;
use Data::Page;
use Storable;
-use Scalar::Util qw/weaken/;
-
+use DBIx::Class::ResultSetColumn;
use base qw/DBIx::Class/;
__PACKAGE__->load_components(qw/AccessorGroup/);
__PACKAGE__->mk_group_accessors('simple' => qw/result_source result_class/);
sub new {
my $class = shift;
return $class->new_result(@_) if ref $class;
-
- my ($source, $attrs) = @_;
- weaken $source;
- $attrs = Storable::dclone($attrs || {}); # { %{ $attrs || {} } };
- #use Data::Dumper; warn Dumper($attrs);
- my $alias = ($attrs->{alias} ||= 'me');
-
- $attrs->{columns} ||= delete $attrs->{cols} if $attrs->{cols};
- delete $attrs->{as} if $attrs->{columns};
- $attrs->{columns} ||= [ $source->columns ] unless $attrs->{select};
- $attrs->{select} = [
- map { m/\./ ? $_ : "${alias}.$_" } @{delete $attrs->{columns}}
- ] if $attrs->{columns};
- $attrs->{as} ||= [
- map { m/^\Q$alias.\E(.+)$/ ? $1 : $_ } @{$attrs->{select}}
- ];
- if (my $include = delete $attrs->{include_columns}) {
- push(@{$attrs->{select}}, @$include);
- push(@{$attrs->{as}}, map { m/([^.]+)$/; $1; } @$include);
- }
- #use Data::Dumper; warn Dumper(@{$attrs}{qw/select as/});
-
- $attrs->{from} ||= [ { $alias => $source->from } ];
- $attrs->{seen_join} ||= {};
- my %seen;
- if (my $join = delete $attrs->{join}) {
- foreach my $j (ref $join eq 'ARRAY' ? @$join : ($join)) {
- if (ref $j eq 'HASH') {
- $seen{$_} = 1 foreach keys %$j;
- } else {
- $seen{$j} = 1;
- }
- }
- push(@{$attrs->{from}}, $source->resolve_join(
- $join, $attrs->{alias}, $attrs->{seen_join})
- );
- }
-
- $attrs->{group_by} ||= $attrs->{select} if delete $attrs->{distinct};
- $attrs->{order_by} = [ $attrs->{order_by} ] if
- $attrs->{order_by} and !ref($attrs->{order_by});
- $attrs->{order_by} ||= [];
- my $collapse = $attrs->{collapse} || {};
- if (my $prefetch = delete $attrs->{prefetch}) {
- my @pre_order;
- foreach my $p (ref $prefetch eq 'ARRAY' ? @$prefetch : ($prefetch)) {
- if ( ref $p eq 'HASH' ) {
- foreach my $key (keys %$p) {
- push(@{$attrs->{from}}, $source->resolve_join($p, $attrs->{alias}))
- unless $seen{$key};
- }
- } else {
- push(@{$attrs->{from}}, $source->resolve_join($p, $attrs->{alias}))
- unless $seen{$p};
- }
- my @prefetch = $source->resolve_prefetch(
- $p, $attrs->{alias}, {}, \@pre_order, $collapse);
- push(@{$attrs->{select}}, map { $_->[0] } @prefetch);
- push(@{$attrs->{as}}, map { $_->[1] } @prefetch);
- }
- push(@{$attrs->{order_by}}, @pre_order);
- }
- $attrs->{collapse} = $collapse;
-# use Data::Dumper; warn Dumper($collapse) if keys %{$collapse};
+ my ($source, $attrs) = @_;
+ #weaken $source;
if ($attrs->{page}) {
$attrs->{rows} ||= 10;
$attrs->{offset} += ($attrs->{rows} * ($attrs->{page} - 1));
}
+ $attrs->{alias} ||= 'me';
+ $attrs->{_orig_alias} ||= $attrs->{alias};
+
bless {
result_source => $source,
result_class => $attrs->{result_class} || $source->result_class,
cond => $attrs->{where},
- from => $attrs->{from},
- collapse => $collapse,
+# from => $attrs->{from},
+# collapse => $collapse,
count => undef,
- page => delete $attrs->{page},
pager => undef,
attrs => $attrs
}, $class;
sub search {
my $self = shift;
-
- my $attrs = { %{$self->{attrs}} };
- my $having = delete $attrs->{having};
- $attrs = { %$attrs, %{ pop(@_) } } if @_ > 1 and ref $_[$#_] eq 'HASH';
+ my $rs = $self->search_rs( @_ );
+ return (wantarray ? $rs->all : $rs);
+}
+
+=head2 search_rs
+
+=over 4
+
+=item Arguments: $cond, \%attrs?
+
+=item Return Value: $resultset
+
+=back
+
+This method does the same exact thing as search() except it will
+always return a resultset, even in list context.
+
+=cut
+
+sub search_rs {
+ my $self = shift;
+
+ my $attrs = {};
+ $attrs = pop(@_) if @_ > 1 and ref $_[$#_] eq 'HASH';
+ my $our_attrs = exists $attrs->{_parent_attrs}
+ ? { %{delete $attrs->{_parent_attrs}} }
+ : { %{$self->{attrs}} };
+ my $having = delete $our_attrs->{having};
+
+ # XXX should only maintain _live_join_stack and generate _live_join_h from that
+ if ($attrs->{_live_join_stack}) {
+ foreach my $join (reverse @{$attrs->{_live_join_stack}}) {
+ $attrs->{_live_join_h} = defined $attrs->{_live_join_h}
+ ? { $join => $attrs->{_live_join_h} }
+ : $join;
+ }
+ }
+ # merge new attrs into inherited
+ foreach my $key (qw/join prefetch/) {
+ next unless exists $attrs->{$key};
+ if (my $live_join = $attrs->{_live_join_stack} || $our_attrs->{_live_join_stack}) {
+ foreach my $join (reverse @{$live_join}) {
+ $attrs->{$key} = { $join => $attrs->{$key} };
+ }
+ }
+
+ $our_attrs->{$key} = $self->_merge_attr($our_attrs->{$key}, delete $attrs->{$key});
+ }
+
+ $our_attrs->{join} = $self->_merge_attr(
+ $our_attrs->{join}, $attrs->{_live_join_h}
+ ) if ($attrs->{_live_join_h});
+
+ if (defined $our_attrs->{prefetch}) {
+ $our_attrs->{join} = $self->_merge_attr(
+ $our_attrs->{join}, $our_attrs->{prefetch}
+ );
+ }
+
+ my $new_attrs = { %{$our_attrs}, %{$attrs} };
my $where = (@_
- ? ((@_ == 1 || ref $_[0] eq "HASH")
- ? shift
- : ((@_ % 2)
- ? $self->throw_exception(
- "Odd number of arguments to search")
- : {@_}))
- : undef());
+ ? (
+ (@_ == 1 || ref $_[0] eq "HASH")
+ ? shift
+ : (
+ (@_ % 2)
+ ? $self->throw_exception("Odd number of arguments to search")
+ : {@_}
+ )
+ )
+ : undef
+ );
+
if (defined $where) {
- $attrs->{where} = (defined $attrs->{where}
- ? { '-and' =>
- [ map { ref $_ eq 'ARRAY' ? [ -or => $_ ] : $_ }
- $where, $attrs->{where} ] }
- : $where);
+ $new_attrs->{where} = (
+ defined $new_attrs->{where}
+ ? { '-and' => [
+ map {
+ ref $_ eq 'ARRAY' ? [ -or => $_ ] : $_
+ } $where, $new_attrs->{where}
+ ]
+ }
+ : $where);
}
if (defined $having) {
- $attrs->{having} = (defined $attrs->{having}
- ? { '-and' =>
- [ map { ref $_ eq 'ARRAY' ? [ -or => $_ ] : $_ }
- $having, $attrs->{having} ] }
- : $having);
+ $new_attrs->{having} = (
+ defined $new_attrs->{having}
+ ? { '-and' => [
+ map {
+ ref $_ eq 'ARRAY' ? [ -or => $_ ] : $_
+ } $having, $new_attrs->{having}
+ ]
+ }
+ : $having);
}
- my $rs = (ref $self)->new($self->result_source, $attrs);
+ my $rs = (ref $self)->new($self->result_source, $new_attrs);
+ $rs->{_parent_source} = $self->{_parent_source} if $self->{_parent_source};
- unless (@_) { # no search, effectively just a clone
+ unless (@_) { # no search, effectively just a clone
my $rows = $self->get_cache;
if ($rows) {
$rs->set_cache($rows);
}
}
-
- return (wantarray ? $rs->all : $rs);
+ return $rs;
}
=head2 search_literal
=back
-Finds a row based on its primary key or unique constraint. For example:
+Finds a row based on its primary key or unique constraint. For example, to find
+a row by its primary key:
my $cd = $schema->resultset('CD')->find(5);
-Also takes an optional C<key> attribute, to search by a specific key or unique
-constraint. For example:
+You can also find a row by a specific unique constraint using the C<key>
+attribute. For example:
+
+ my $cd = $schema->resultset('CD')->find('Massive Attack', 'Mezzanine', {
+ key => 'cd_artist_title'
+ });
+
+Additionally, you can specify the columns explicitly by name:
my $cd = $schema->resultset('CD')->find(
{
artist => 'Massive Attack',
title => 'Mezzanine',
},
- { key => 'artist_title' }
+ { key => 'cd_artist_title' }
);
-See also L</find_or_create> and L</update_or_create>.
+If the C<key> is specified as C<primary>, it searches only on the primary key.
+
+If no C<key> is specified, it searches on all unique constraints defined on the
+source, including the primary key.
+
+See also L</find_or_create> and L</update_or_create>. For information on how to
+declare unique constraints, see
+L<DBIx::Class::ResultSource/add_unique_constraint>.
=cut
sub find {
- my ($self, @vals) = @_;
- my $attrs = (@vals > 1 && ref $vals[$#vals] eq 'HASH' ? pop(@vals) : {});
+ my $self = shift;
+ my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {});
- my @cols = $self->result_source->primary_columns;
- if (exists $attrs->{key}) {
- my %uniq = $self->result_source->unique_constraints;
- $self->throw_exception(
- "Unknown key $attrs->{key} on '" . $self->result_source->name . "'"
- ) unless exists $uniq{$attrs->{key}};
- @cols = @{ $uniq{$attrs->{key}} };
- }
- #use Data::Dumper; warn Dumper($attrs, @vals, @cols);
+ # Default to the primary key, but allow a specific key
+ my @cols = exists $attrs->{key}
+ ? $self->result_source->unique_constraint_columns($attrs->{key})
+ : $self->result_source->primary_columns;
$self->throw_exception(
"Can't find unless a primary key or unique constraint is defined"
) unless @cols;
- my $query;
- if (ref $vals[0] eq 'HASH') {
- $query = { %{$vals[0]} };
- } elsif (@cols == @vals) {
- $query = {};
- @{$query}{@cols} = @vals;
- } else {
- $query = {@vals};
+ # Parse out a hashref from input
+ my $input_query;
+ if (ref $_[0] eq 'HASH') {
+ $input_query = { %{$_[0]} };
}
- foreach my $key (grep { ! m/\./ } keys %$query) {
- $query->{"$self->{attrs}{alias}.$key"} = delete $query->{$key};
+ elsif (@_ == @cols) {
+ $input_query = {};
+ @{$input_query}{@cols} = @_;
}
- #warn Dumper($query);
-
+ else {
+ # Compatibility: Allow e.g. find(id => $value)
+ carp "Find by key => value deprecated; please use a hashref instead";
+ $input_query = {@_};
+ }
+
+ my @unique_queries = $self->_unique_queries($input_query, $attrs);
+
+ # Handle cases where the ResultSet defines the query, or where the user is
+ # abusing find
+ my $query = @unique_queries ? \@unique_queries : $input_query;
+
+ # Run the query
if (keys %$attrs) {
- my $rs = $self->search($query,$attrs);
- return keys %{$rs->{collapse}} ? $rs->next : $rs->single;
- } else {
- return keys %{$self->{collapse}} ?
- $self->search($query)->next :
- $self->single($query);
+ my $rs = $self->search($query, $attrs);
+ return keys %{$rs->_resolved_attrs->{collapse}} ? $rs->next : $rs->single;
}
+ else {
+ return keys %{$self->_resolved_attrs->{collapse}}
+ ? $self->search($query)->next
+ : $self->single($query);
+ }
+}
+
+# _unique_queries
+#
+# Build a list of queries which satisfy unique constraints.
+
+sub _unique_queries {
+ my ($self, $query, $attrs) = @_;
+
+ my @constraint_names = exists $attrs->{key}
+ ? ($attrs->{key})
+ : $self->result_source->unique_constraint_names;
+
+ my @unique_queries;
+ foreach my $name (@constraint_names) {
+ my @unique_cols = $self->result_source->unique_constraint_columns($name);
+ my $unique_query = $self->_build_unique_query($query, \@unique_cols);
+
+ next unless scalar keys %$unique_query;
+
+ # Add the ResultSet's alias
+ my $alias = $self->{attrs}{alias};
+ foreach my $key (grep { ! m/\./ } keys %$unique_query) {
+ $unique_query->{"$alias.$key"} = delete $unique_query->{$key};
+ }
+
+ push @unique_queries, $unique_query;
+ }
+
+ return @unique_queries;
+}
+
+# _build_unique_query
+#
+# Constrain the specified query hash based on the specified column names.
+
+sub _build_unique_query {
+ my ($self, $query, $unique_cols) = @_;
+
+ return {
+ map { $_ => $query->{$_} }
+ grep { exists $query->{$_} }
+ @$unique_cols
+ };
}
=head2 search_related
=over 4
-=item Arguments: $cond, \%attrs?
+=item Arguments: $rel, $cond, \%attrs?
=item Return Value: $new_resultset
sub cursor {
my ($self) = @_;
- my $attrs = { %{$self->{attrs}} };
+
+ my $attrs = { %{$self->_resolved_attrs} };
return $self->{cursor}
- ||= $self->result_source->storage->select($self->{from}, $attrs->{select},
+ ||= $self->result_source->storage->select($attrs->{from}, $attrs->{select},
$attrs->{where},$attrs);
}
my $cd = $schema->resultset('CD')->single({ year => 2001 });
Inflates the first result without creating a cursor if the resultset has
-any records in it; if not returns nothing. Used by find() as an optimisation.
+any records in it; if not returns nothing. Used by L</find> as an optimisation.
Can optionally take an additional condition *only* - this is a fast-code-path
method; if you need to add extra joins or similar call ->search and then
sub single {
my ($self, $where) = @_;
- my $attrs = { %{$self->{attrs}} };
+ my $attrs = { %{$self->_resolved_attrs} };
if ($where) {
if (defined $attrs->{where}) {
$attrs->{where} = {
$attrs->{where} = $where;
}
}
+
+ unless ($self->_is_unique_query($attrs->{where})) {
+ carp "Query not guaranteed to return a single row"
+ . "; please declare your unique constraints or use search instead";
+ }
+
my @data = $self->result_source->storage->select_single(
- $self->{from}, $attrs->{select},
- $attrs->{where},$attrs);
+ $attrs->{from}, $attrs->{select},
+ $attrs->{where}, $attrs
+ );
+
return (@data ? $self->_construct_object(@data) : ());
}
+# _is_unique_query
+#
+# Try to determine if the specified query is guaranteed to be unique, based on
+# the declared unique constraints.
+
+sub _is_unique_query {
+ my ($self, $query) = @_;
+
+ my $collapsed = $self->_collapse_query($query);
+ my $alias = $self->{attrs}{alias};
+
+ foreach my $name ($self->result_source->unique_constraint_names) {
+ my @unique_cols = map {
+ "$alias.$_"
+ } $self->result_source->unique_constraint_columns($name);
+
+ # Count the values for each unique column
+ my %seen = map { $_ => 0 } @unique_cols;
+
+ foreach my $key (keys %$collapsed) {
+ my $aliased = $key =~ /\./ ? $key : "$alias.$key";
+ next unless exists $seen{$aliased}; # Additional constraints are okay
+ $seen{$aliased} = scalar @{ $collapsed->{$key} };
+ }
+
+ # If we get 0 or more than 1 value for a column, it's not necessarily unique
+ return 1 unless grep { $_ != 1 } values %seen;
+ }
+
+ return 0;
+}
+
+# _collapse_query
+#
+# Recursively collapse the query, accumulating values for each column.
+
+sub _collapse_query {
+ my ($self, $query, $collapsed) = @_;
+
+ $collapsed ||= {};
+
+ if (ref $query eq 'ARRAY') {
+ foreach my $subquery (@$query) {
+ next unless ref $subquery; # -or
+# warn "ARRAY: " . Dumper $subquery;
+ $collapsed = $self->_collapse_query($subquery, $collapsed);
+ }
+ }
+ elsif (ref $query eq 'HASH') {
+ if (keys %$query and (keys %$query)[0] eq '-and') {
+ foreach my $subquery (@{$query->{-and}}) {
+# warn "HASH: " . Dumper $subquery;
+ $collapsed = $self->_collapse_query($subquery, $collapsed);
+ }
+ }
+ else {
+# warn "LEAF: " . Dumper $query;
+ foreach my $key (keys %$query) {
+ push @{$collapsed->{$key}}, $query->{$key};
+ }
+ }
+ }
+
+ return $collapsed;
+}
+
+=head2 get_column
+
+=over 4
+
+=item Arguments: $cond?
+
+=item Return Value: $resultsetcolumn
+
+=back
+
+ my $max_length = $rs->get_column('length')->max;
+
+Returns a ResultSetColumn instance for $column based on $self
+
+=cut
+
+sub get_column {
+ my ($self, $column) = @_;
+ my $new = DBIx::Class::ResultSetColumn->new($self, $column);
+ return $new;
+}
=head2 search_like
print $cd->title;
}
-Note that you need to store the resultset object, and call C<next> on it.
+Note that you need to store the resultset object, and call C<next> on it.
Calling C<< resultset('Table')->next >> repeatedly will always return the
first record from the resultset.
$self->{all_cache_position} = 1;
return ($self->all)[0];
}
- my @row = (exists $self->{stashed_row} ?
- @{delete $self->{stashed_row}} :
- $self->cursor->next
+ my @row = (
+ exists $self->{stashed_row}
+ ? @{delete $self->{stashed_row}}
+ : $self->cursor->next
);
-# warn Dumper(\@row); use Data::Dumper;
return unless (@row);
return $self->_construct_object(@row);
}
-sub _construct_object {
- my ($self, @row) = @_;
- my @as = @{ $self->{attrs}{as} };
+sub _resolved_attrs {
+ my $self = shift;
+ return $self->{_attrs} if $self->{_attrs};
+
+ my $attrs = $self->{attrs};
+ my $source = $self->{_parent_source} || $self->{result_source};
+ my $alias = $attrs->{_orig_alias};
+
+ # XXX - lose storable dclone
+ my $record_filter = delete $attrs->{record_filter};
+ $attrs = Storable::dclone($attrs || {}); # { %{ $attrs || {} } };
+ $attrs->{record_filter} = $record_filter if $record_filter;
+
+ $attrs->{columns} ||= delete $attrs->{cols} if exists $attrs->{cols};
+ if ($attrs->{columns}) {
+ delete $attrs->{as};
+ } elsif (!$attrs->{select}) {
+ $attrs->{columns} = [ $self->{result_source}->columns ];
+ }
- my $info = $self->_collapse_result(\@as, \@row);
+ my $select_alias = $self->{attrs}{alias};
+ $attrs->{select} ||= [
+ map { m/\./ ? $_ : "${select_alias}.$_" } @{delete $attrs->{columns}}
+ ];
+ $attrs->{as} ||= [
+ map { m/^\Q${alias}.\E(.+)$/ ? $1 : $_ } @{$attrs->{select}}
+ ];
- my $new = $self->result_class->inflate_result($self->result_source, @$info);
+ my $adds;
+ if ($adds = delete $attrs->{include_columns}) {
+ $adds = [$adds] unless ref $adds eq 'ARRAY';
+ push(@{$attrs->{select}}, @$adds);
+ push(@{$attrs->{as}}, map { m/([^.]+)$/; $1 } @$adds);
+ }
+ if ($adds = delete $attrs->{'+select'}) {
+ $adds = [$adds] unless ref $adds eq 'ARRAY';
+ push(@{$attrs->{select}}, map { /\./ || ref $_ ? $_ : "${alias}.$_" } @$adds);
+ }
+ if (my $adds = delete $attrs->{'+as'}) {
+ $adds = [$adds] unless ref $adds eq 'ARRAY';
+ push(@{$attrs->{as}}, @$adds);
+ }
+
+ $attrs->{from} ||= [ { $alias => $source->from } ];
+ $attrs->{seen_join} ||= {};
+ my %seen;
+ if (my $join = delete $attrs->{join}) {
+ foreach my $j (ref $join eq 'ARRAY' ? @$join : ($join)) {
+ if (ref $j eq 'HASH') {
+ $seen{$_} = 1 foreach keys %$j;
+ } else {
+ $seen{$j} = 1;
+ }
+ }
+ push(@{$attrs->{from}},
+ $source->resolve_join($join, $alias, $attrs->{seen_join})
+ );
+ }
+
+ $attrs->{group_by} ||= $attrs->{select} if delete $attrs->{distinct};
+ if ($attrs->{order_by}) {
+ $attrs->{order_by} = [ $attrs->{order_by} ] unless ref $attrs->{order_by};
+ } else {
+ $attrs->{order_by} ||= [];
+ }
+
+ my $collapse = $attrs->{collapse} || {};
+ if (my $prefetch = delete $attrs->{prefetch}) {
+ my @pre_order;
+ foreach my $p (ref $prefetch eq 'ARRAY' ? @$prefetch : ($prefetch)) {
+ if ( ref $p eq 'HASH' ) {
+ foreach my $key (keys %$p) {
+ push(@{$attrs->{from}}, $source->resolve_join($p, $alias))
+ unless $seen{$key};
+ }
+ } else {
+ push(@{$attrs->{from}}, $source->resolve_join($p, $alias))
+ unless $seen{$p};
+ }
+ # bring joins back to level of current class
+ $p = $self->_reduce_joins($p, $attrs) if $attrs->{_live_join_stack};
+ if ($p) {
+ my @prefetch = $self->result_source->resolve_prefetch(
+ $p, $alias, {}, \@pre_order, $collapse
+ );
+ push(@{$attrs->{select}}, map { $_->[0] } @prefetch);
+ push(@{$attrs->{as}}, map { $_->[1] } @prefetch);
+ }
+ }
+ push(@{$attrs->{order_by}}, @pre_order);
+ }
+ $attrs->{collapse} = $collapse;
+
+ return $self->{_attrs} = $attrs;
+}
+
+sub _merge_attr {
+ my ($self, $a, $b) = @_;
+ return $b unless $a;
- $new = $self->{attrs}{record_filter}->($new)
- if exists $self->{attrs}{record_filter};
+ if (ref $b eq 'HASH' && ref $a eq 'HASH') {
+ foreach my $key (keys %{$b}) {
+ if (exists $a->{$key}) {
+ $a->{$key} = $self->_merge_attr($a->{$key}, $b->{$key});
+ } else {
+ $a->{$key} = $b->{$key};
+ }
+ }
+ return $a;
+ } else {
+ $a = [$a] unless ref $a eq 'ARRAY';
+ $b = [$b] unless ref $b eq 'ARRAY';
+
+ my $hash = {};
+ my @array;
+ foreach my $x ($a, $b) {
+ foreach my $element (@{$x}) {
+ if (ref $element eq 'HASH') {
+ $hash = $self->_merge_attr($hash, $element);
+ } elsif (ref $element eq 'ARRAY') {
+ push(@array, @{$element});
+ } else {
+ push(@array, $element) unless $b == $x
+ && grep { $_ eq $element } @array;
+ }
+ }
+ }
+
+ @array = grep { !exists $hash->{$_} } @array;
+
+ return keys %{$hash}
+ ? ( scalar(@array)
+ ? [$hash, @array]
+ : $hash
+ )
+ : \@array;
+ }
+}
+
+# bring the joins (which are from the original class) to the level
+# of the current class so that we can resolve them properly
+sub _reduce_joins {
+ my ($self, $p, $attrs) = @_;
+
+ STACK:
+ foreach my $join (@{$attrs->{_live_join_stack}}) {
+ if (ref $p eq 'HASH') {
+ return undef unless exists $p->{$join};
+ $p = $p->{$join};
+ } elsif (ref $p eq 'ARRAY') {
+ foreach my $pe (@{$p}) {
+ return undef if $pe eq $join;
+ if (ref $pe eq 'HASH' && exists $pe->{$join}) {
+ $p = $pe->{$join};
+ next STACK;
+ }
+ }
+ return undef;
+ } else {
+ return undef;
+ }
+ }
+ return $p;
+}
+
+sub _construct_object {
+ my ($self, @row) = @_;
+ my $info = $self->_collapse_result($self->{_attrs}{as}, \@row);
+ my $new = $self->result_class->inflate_result($self->result_source, @$info);
+ $new = $self->{_attrs}{record_filter}->($new)
+ if exists $self->{_attrs}{record_filter};
return $new;
}
my ($self, $as, $row, $prefix) = @_;
my %const;
-
my @copy = @$row;
+
foreach my $this_as (@$as) {
my $val = shift @copy;
if (defined $prefix) {
}
}
+ my $alias = $self->{attrs}{alias};
my $info = [ {}, {} ];
foreach my $key (keys %const) {
- if (length $key) {
+ if (length $key && $key ne $alias) {
my $target = $info;
my @parts = split(/\./, $key);
foreach my $p (@parts) {
$info->[0] = $const{$key};
}
}
-
+
my @collapse;
if (defined $prefix) {
@collapse = map {
m/^\Q${prefix}.\E(.+)$/ ? ($1) : ()
- } keys %{$self->{collapse}}
+ } keys %{$self->{_attrs}{collapse}}
} else {
- @collapse = keys %{$self->{collapse}};
+ @collapse = keys %{$self->{_attrs}{collapse}};
};
if (@collapse) {
$target = $target->[1]->{$p} ||= [];
}
my $c_prefix = (defined($prefix) ? "${prefix}.${c}" : $c);
- my @co_key = @{$self->{collapse}{$c_prefix}};
- my %co_check = map { ($_, $target->[0]->{$_}); } @co_key;
+ my @co_key = @{$self->{_attrs}{collapse}{$c_prefix}};
my $tree = $self->_collapse_result($as, $row, $c_prefix);
+ my %co_check = map { ($_, $tree->[0]->{$_}); } @co_key;
my (@final, @raw);
- while ( !(grep {
- !defined($tree->[0]->{$_}) ||
- $co_check{$_} ne $tree->[0]->{$_}
- } @co_key) ) {
+
+ while (
+ !(
+ grep {
+ !defined($tree->[0]->{$_}) || $co_check{$_} ne $tree->[0]->{$_}
+ } @co_key
+ )
+ ) {
push(@final, $tree);
last unless (@raw = $self->cursor->next);
$row = $self->{stashed_row} = \@raw;
# single empty result to indicate an empty prefetched has_many
}
+ #print "final info: " . Dumper($info);
return $info;
}
my $self = shift;
return $self->search(@_)->count if @_ and defined $_[0];
return scalar @{ $self->get_cache } if $self->get_cache;
-
my $count = $self->_count;
return 0 unless $count;
sub _count { # Separated out so pager can get the full count
my $self = shift;
my $select = { count => '*' };
- my $attrs = { %{ $self->{attrs} } };
+
+ my $attrs = { %{$self->_resolved_attrs} };
if (my $group_by = delete $attrs->{group_by}) {
delete $attrs->{having};
my @distinct = (ref $group_by ? @$group_by : ($group_by));
# todo: try CONCAT for multi-column pk
my @pk = $self->result_source->primary_columns;
if (@pk == 1) {
+ my $alias = $attrs->{_orig_alias};
foreach my $column (@distinct) {
- if ($column =~ qr/^(?:\Q$attrs->{alias}.\E)?$pk[0]$/) {
+ if ($column =~ qr/^(?:\Q${alias}.\E)?$pk[0]$/) {
@distinct = ($column);
last;
}
}
$select = { count => { distinct => \@distinct } };
- #use Data::Dumper; die Dumper $select;
}
$attrs->{select} = $select;
# offset, order by and page are not needed to count. record_filter is cdbi
delete $attrs->{$_} for qw/rows offset order_by page pager record_filter/;
-
- my ($count) = (ref $self)->new($self->result_source, $attrs)->cursor->next;
+ my $tmp_rs = (ref $self)->new($self->result_source, $attrs);
+ $tmp_rs->{_parent_source} = $self->{_parent_source} if $self->{_parent_source};
+ #XXX - hack to pass through parent of related resultsets
+
+ my ($count) = $tmp_rs->cursor->next;
return $count;
}
my @obj;
- if (keys %{$self->{collapse}}) {
+ # TODO: don't call resolve here
+ if (keys %{$self->_resolved_attrs->{collapse}}) {
+# if ($self->{attrs}{prefetch}) {
# Using $self->cursor->all is really just an optimisation.
# If we're collapsing has_many prefetches it probably makes
# very little difference, and this is cleaner than hacking
# _construct_object to survive the approach
- $self->cursor->reset;
my @row = $self->cursor->next;
while (@row) {
push(@obj, $self->_construct_object(@row));
sub reset {
my ($self) = @_;
+ delete $self->{_attrs} if exists $self->{_attrs};
$self->{all_cache_position} = 0;
$self->cursor->reset;
return $self;
my ($self) = @_;
my $cond = {};
- if (!ref($self->{cond})) {
- # No-op. No condition, we're updating/deleting everything
- }
- elsif (ref $self->{cond} eq 'ARRAY') {
+ # No-op. No condition, we're updating/deleting everything
+ return $cond unless ref $self->{cond};
+
+ if (ref $self->{cond} eq 'ARRAY') {
$cond = [
map {
my %hash;
$cond->{-and} = [];
my @cond = @{$self->{cond}{-and}};
- for (my $i = 0; $i < @cond - 1; $i++) {
+ for (my $i = 0; $i < @cond; $i++) {
my $entry = $cond[$i];
my %hash;
}
else {
$entry =~ /([^.]+)$/;
- $hash{$entry} = $cond[++$i];
+ $hash{$1} = $cond[++$i];
}
push @{$cond->{-and}}, \%hash;
sub delete {
my ($self) = @_;
- my $del = {};
my $cond = $self->_cond_for_update_delete;
my ($self) = @_;
my $attrs = $self->{attrs};
$self->throw_exception("Can't create pager for non-paged rs")
- unless $self->{page};
+ unless $self->{attrs}{page};
$attrs->{rows} ||= 10;
return $self->{pager} ||= Data::Page->new(
- $self->_count, $attrs->{rows}, $self->{page});
+ $self->_count, $attrs->{rows}, $self->{attrs}{page});
}
=head2 page
sub page {
my ($self, $page) = @_;
- my $attrs = { %{$self->{attrs}} };
- $attrs->{page} = $page;
- return (ref $self)->new($self->result_source, $attrs);
+ return (ref $self)->new($self->result_source, { %{$self->{attrs}}, page => $page });
}
=head2 new_result
"Can't abstract implicit construct, condition not a hash"
) if ($self->{cond} && !(ref $self->{cond} eq 'HASH'));
my %new = %$values;
- my $alias = $self->{attrs}{alias};
+ my $alias = $self->{attrs}{_orig_alias};
foreach my $key (keys %{$self->{cond}||{}}) {
$new{$1} = $self->{cond}{$key} if ($key =~ m/^(?:\Q${alias}.\E)?([^.]+)$/);
}
return $obj;
}
+=head2 find_or_new
+
+=over 4
+
+=item Arguments: \%vals, \%attrs?
+
+=item Return Value: $object
+
+=back
+
+Find an existing record from this resultset. If none exists, instantiate a new
+result object and return it. The object will not be saved into your storage
+until you call L<DBIx::Class::Row/insert> on it.
+
+If you want objects to be saved immediately, use L</find_or_create> instead.
+
+=cut
+
+sub find_or_new {
+ my $self = shift;
+ my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {});
+ my $hash = ref $_[0] eq 'HASH' ? shift : {@_};
+ my $exists = $self->find($hash, $attrs);
+ return defined $exists ? $exists : $self->new_result($hash);
+}
+
=head2 create
=over 4
$class->find_or_create({ key => $val, ... });
-Searches for a record matching the search condition; if it doesn't find one,
-creates one and returns that instead.
+Tries to find a record based on its primary key or unique constraint; if none
+is found, creates one and returns that instead.
my $cd = $schema->resultset('CD')->find_or_create({
cdid => 5,
artist => 'Massive Attack',
title => 'Mezzanine',
},
- { key => 'artist_title' }
+ { key => 'cd_artist_title' }
);
-See also L</find> and L</update_or_create>.
+See also L</find> and L</update_or_create>. For information on how to declare
+unique constraints, see L<DBIx::Class::ResultSource/add_unique_constraint>.
=cut
title => 'Mezzanine',
year => 1998,
},
- { key => 'artist_title' }
+ { key => 'cd_artist_title' }
);
If no C<key> is specified, it searches on all unique constraints defined on the
If the C<key> is specified as C<primary>, it searches only on the primary key.
-See also L</find> and L</find_or_create>.
+See also L</find> and L</find_or_create>. For information on how to declare
+unique constraints, see L<DBIx::Class::ResultSource/add_unique_constraint>.
=cut
sub update_or_create {
my $self = shift;
my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {});
- my $hash = ref $_[0] eq 'HASH' ? shift : {@_};
-
- my %unique_constraints = $self->result_source->unique_constraints;
- my @constraint_names = (exists $attrs->{key}
- ? ($attrs->{key})
- : keys %unique_constraints);
-
- my @unique_hashes;
- foreach my $name (@constraint_names) {
- my @unique_cols = @{ $unique_constraints{$name} };
- my %unique_hash =
- map { $_ => $hash->{$_} }
- grep { exists $hash->{$_} }
- @unique_cols;
-
- push @unique_hashes, \%unique_hash
- if (scalar keys %unique_hash == scalar @unique_cols);
- }
+ my $cond = ref $_[0] eq 'HASH' ? shift : {@_};
- if (@unique_hashes) {
- my $row = $self->single(\@unique_hashes);
- if (defined $row) {
- $row->update($hash);
- return $row;
- }
+ my $row = $self->find($cond);
+ if (defined $row) {
+ $row->update($cond);
+ return $row;
}
- return $self->create($hash);
+ return $self->create($cond);
}
=head2 get_cache
sub set_cache {
my ( $self, $data ) = @_;
$self->throw_exception("set_cache requires an arrayref")
- if defined($data) && (ref $data ne 'ARRAY');
+ if defined($data) && (ref $data ne 'ARRAY');
$self->{all_cache} = $data;
}
=cut
sub related_resultset {
- my ( $self, $rel ) = @_;
+ my ($self, $rel) = @_;
+
$self->{related_resultsets} ||= {};
return $self->{related_resultsets}{$rel} ||= do {
- #warn "fetching related resultset for rel '$rel'";
- my $rel_obj = $self->result_source->relationship_info($rel);
- $self->throw_exception(
- "search_related: result source '" . $self->result_source->name .
- "' has no such relationship ${rel}")
- unless $rel_obj; #die Dumper $self->{attrs};
-
- my $rs = $self->search(undef, { join => $rel });
- my $alias = defined $rs->{attrs}{seen_join}{$rel}
- && $rs->{attrs}{seen_join}{$rel} > 1
- ? join('_', $rel, $rs->{attrs}{seen_join}{$rel})
- : $rel;
-
- $self->result_source->schema->resultset($rel_obj->{class}
- )->search( undef,
- { %{$rs->{attrs}},
- alias => $alias,
- select => undef,
- as => undef }
- );
+ my $rel_obj = $self->result_source->relationship_info($rel);
+
+ $self->throw_exception(
+ "search_related: result source '" . $self->result_source->name .
+ "' has no such relationship $rel")
+ unless $rel_obj;
+
+ my @live_join_stack = @{$self->{attrs}{_live_join_stack}||[]};
+
+ # XXX mst: I'm sure this is wrong, somehow
+ # something with complex joins early on could die on search_rel
+ # followed by a prefetch. I think. need a test case.
+
+ my $join_count = scalar(grep { $_ eq $rel } @live_join_stack);
+ my $alias = $join_count ? join('_', $rel, $join_count+1) : $rel;
+
+ push(@live_join_stack, $rel);
+
+ my $rs = $self->result_source->schema->resultset($rel_obj->{class})->search(
+ undef, {
+ select => undef,
+ as => undef,
+ alias => $alias,
+ _live_join_stack => \@live_join_stack,
+ _parent_attrs => $self->{attrs}}
+ );
+
+ # keep reference of the original resultset
+ $rs->{_parent_source} = $self->{_parent_source} || $self->result_source;
+
+ return $rs;
};
}
through directly to SQL, so you can give e.g. C<year DESC> for a
descending order on the column `year'.
-Please note that if you have quoting enabled (see
+Please note that if you have quoting enabled (see
L<DBIx::Class::Storage/quote_char>) you will need to do C<\'year DESC' > to
specify an order. (The scalar ref causes it to be passed as raw sql to the DB,
so you will need to manually quote things as appropriate.)
attribute, the column names returned are storage-dependent. E.g. MySQL would
return a column named C<count(employeeid)> in the above example.
+=head2 +select
+
+=over 4
+
+Indicates additional columns to be selected from storage. Works the same as
+L<select> but adds columns to the selection.
+
+=back
+
+=head2 +as
+
+=over 4
+
+Indicates additional column names for those added via L<+select>.
+
+=back
+
=head2 as
=over 4
Makes the resultset paged and specifies the page to retrieve. Effectively
identical to creating a non-pages resultset and then calling ->page($page)
-on it.
+on it.
If L<rows> attribute is not specified it defualts to 10 rows per page.
HAVING is a select statement attribute that is applied between GROUP BY and
ORDER BY. It is applied to the after the grouping calculations have been
-done.
+done.
having => { 'count(employee)' => { '>=', 100 } }
Set to 1 to group by all columns.
+=head2 where
+
+=over 4
+
+Adds to the WHERE clause.
+
+ # only return rows WHERE deleted IS NULL for all searches
+ __PACKAGE__->resultset_attributes({ where => { deleted => undef } }); )
+
+Can be overridden by passing C<{ where => undef }> as an attribute
+to a resulset.
+
+=back
+
=head2 cache
Set to 1 to cache search results. This prevents extra SQL queries if you
revisit rows in your ResultSet:
my $resultset = $schema->resultset('Artist')->search( undef, { cache => 1 } );
-
+
while( my $artist = $resultset->next ) {
... do stuff ...
}
--- /dev/null
+package DBIx::Class::ResultSetColumn;
+use strict;
+use warnings;
+use base 'DBIx::Class';
+
+=head1 NAME
+
+ DBIx::Class::ResultSetColumn - helpful methods for messing
+ with a single column of the resultset
+
+=head1 SYNOPSIS
+
+ $rs = $schema->resultset('CD')->search({ artist => 'Tool' });
+ $rs_column = $rs->get_column('year');
+ $max_year = $rs_column->max; #returns latest year
+
+=head1 DESCRIPTION
+
+A convenience class used to perform operations on a specific column of a resultset.
+
+=cut
+
+=head1 METHODS
+
+=head2 new
+
+ my $obj = DBIx::Class::ResultSetColumn->new($rs, $column);
+
+Creates a new resultset column object from the resultset and column passed as params
+
+=cut
+
+sub new {
+ my ($class, $rs, $column) = @_;
+ $class = ref $class if ref $class;
+
+ my $object_ref = { _column => $column,
+ _parent_resultset => $rs };
+
+ my $new = bless $object_ref, $class;
+ $new->throw_exception("column must be supplied") unless ($column);
+ return $new;
+}
+
+=head2 next
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: $value
+
+=back
+
+Returns the next value of the column in the resultset (C<undef> is there is none).
+
+Much like $rs->next but just returning the one value
+
+=cut
+
+sub next {
+ my $self = shift;
+
+ $self->{_resultset} = $self->{_parent_resultset}->search(undef, {select => [$self->{_column}], as => [$self->{_column}]}) unless ($self->{_resultset});
+ my ($row) = $self->{_resultset}->cursor->next;
+ return $row;
+}
+
+=head2 all
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: @values
+
+=back
+
+Returns all values of the column in the resultset (C<undef> is there are none).
+
+Much like $rs->all but returns values rather than row objects
+
+=cut
+
+sub all {
+ my $self = shift;
+ return map {$_->[0]} $self->{_parent_resultset}->search(undef, {select => [$self->{_column}], as => [$self->{_column}]})->cursor->all;
+}
+
+=head2 min
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: $lowest_value
+
+=back
+
+Wrapper for ->func. Returns the lowest value of the column in the resultset (C<undef> is there are none).
+
+=cut
+
+sub min {
+ my $self = shift;
+ return $self->func('MIN');
+}
+
+=head2 max
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: $highest_value
+
+=back
+
+Wrapper for ->func. Returns the highest value of the column in the resultset (C<undef> is there are none).
+
+=cut
+
+sub max {
+ my $self = shift;
+ return $self->func('MAX');
+}
+
+=head2 sum
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: $sum_of_values
+
+=back
+
+Wrapper for ->func. Returns the sum of all the values in the column of the resultset. Use on varchar-like columns at your own risk.
+
+=cut
+
+sub sum {
+ my $self = shift;
+ return $self->func('SUM');
+}
+
+=head2 func
+
+=over 4
+
+=item Arguments: $function
+
+=item Return Value: $function_return_value
+
+=back
+
+Runs a query using the function on the column and returns the value. For example
+ $rs = $schema->resultset("CD")->search({});
+ $length = $rs->get_column('title')->func('LENGTH');
+
+Produces the following SQL
+ SELECT LENGTH( title ) from cd me
+
+=cut
+
+sub func {
+ my $self = shift;
+ my $function = shift;
+
+ my ($row) = $self->{_parent_resultset}->search(undef, {select => {$function => $self->{_column}}, as => [$self->{_column}]})->cursor->next;
+ return $row;
+}
+
+1;
+
+=head1 AUTHORS
+
+Luke Saunders <luke.saunders@gmail.com>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
=head1 NAME
- DBIx::Class::ResultSetManager - helpful methods for managing
- resultset classes (EXPERIMENTAL)
+DBIx::Class::ResultSetManager - helpful methods for managing resultset
+classes (EXPERIMENTAL)
=head1 SYNOPSIS
# in a table class
__PACKAGE__->load_components(qw/ResultSetManager Core/); # note order!
__PACKAGE__->load_resultset_components(qw/AlwaysRS/);
-
+
# will be removed from the table class and inserted into a
# table-specific resultset class
sub search_by_year_desc : ResultSet {
__PACKAGE__->base_resultset_class('DBIx::Class::ResultSet');
__PACKAGE__->table_resultset_class_suffix('::_resultset');
+=head2 table
+
+Stacks on top of the normal L<DBIx::Class> C<table> method. Any
+methods tagged with the C<ResultSet> attribute will be moved into a
+table-specific resultset class (by default called
+C<Class::_resultset>, but configurable via
+C<table_resultset_class_suffix>). The magic for this is done within
+this C<< __PACKAGE__->table >> call.
+
+=cut
+
sub table {
my ($self,@rest) = @_;
my $ret = $self->next::method(@rest);
return $ret;
}
+=head2 load_resultset_components
+
+ # in a table class
+ __PACKAGE__->load_components(qw/ResultSetManager Core/); # note order!
+ __PACKAGE__->load_resultset_components(qw/AlwaysRS/);
+
+C<load_resultset_components> loads components in addition to
+C<DBIx::Class::ResultSet> (or whatever you set as
+C<base_resultset_class>).
+
+=cut
+
sub load_resultset_components {
my ($self,@comp) = @_;
my $resultset_class = $self->_setup_resultset_class;
my $self = shift;
my $cache = $self->_attr_cache;
return if keys %$cache == 0;
-
+
foreach my $meth (@{Class::Inspector->methods($self) || []}) {
my $attrs = $cache->{$self->can($meth)};
next unless $attrs;
sub find { shift->resultset_instance->find(@_); }
sub create { shift->resultset_instance->create(@_); }
sub find_or_create { shift->resultset_instance->find_or_create(@_); }
+sub find_or_new { shift->resultset_instance->find_or_new(@_); }
sub update_or_create { shift->resultset_instance->update_or_create(@_); }
1;
schema from _relationships/);
__PACKAGE__->mk_group_accessors('component_class' => qw/resultset_class
- result_class/);
+ result_class source_name/);
=head1 NAME
=head1 METHODS
+=pod
+
+=head2 new
+
+ $class->new();
+
+ $class->new({attribute_name => value});
+
+Creates a new ResultSource object. Not normally called directly by end users.
+
=cut
sub new {
sub add_columns {
my ($self, @cols) = @_;
$self->_ordered_columns(\@cols) unless $self->_ordered_columns;
-
+
my @added;
my $columns = $self->_columns;
while (my $col = shift @cols) {
{
$self->{_columns_info_loaded}++;
my $info;
+ my $lc_info;
# eval for the case of storage without table
- eval { $info = $self->storage->columns_info_for($self->from) };
+ eval { $info = $self->storage->columns_info_for( $self->from, keys %{$self->_columns} ) };
unless ($@) {
+ for my $realcol ( keys %{$info} ) {
+ $lc_info->{lc $realcol} = $info->{$realcol};
+ }
foreach my $col ( keys %{$self->_columns} ) {
- foreach my $i ( keys %{$info->{$col}} ) {
- $self->_columns->{$col}{$i} = $info->{$col}{$i};
- }
+ $self->_columns->{$col} = { %{ $self->_columns->{$col}}, %{$info->{$col} || $lc_info->{lc $col}} };
}
}
}
return @{$self->{_ordered_columns}||[]};
}
+=head2 remove_columns
+
+ $table->remove_columns(qw/col1 col2 col3/);
+
+Removes columns from the result source.
+
+=head2 remove_column
+
+ $table->remove_column('col');
+
+Convenience alias to remove_columns.
+
+=cut
+
+sub remove_columns {
+ my ($self, @cols) = @_;
+
+ return unless $self->_ordered_columns;
+
+ my $columns = $self->_columns;
+ my @remaining;
+
+ foreach my $col (@{$self->_ordered_columns}) {
+ push @remaining, $col unless grep(/$col/, @cols);
+ }
+
+ foreach (@cols) {
+ undef $columns->{$_};
+ };
+
+ $self->_ordered_columns(\@remaining);
+}
+
+*remove_column = \&remove_columns;
+
=head2 set_primary_key
=over 4
=head2 add_unique_constraint
Declare a unique constraint on this source. Call once for each unique
-constraint. Unique constraints are used when you call C<find> on a
-L<DBIx::Class::ResultSet>. Only columns in the constraint are searched,
-for example:
+constraint.
# For UNIQUE (column1, column2)
__PACKAGE__->add_unique_constraint(
constraint_name => [ qw/column1 column2/ ],
);
+Alternatively, you can specify only the columns:
+
+ __PACKAGE__->add_unique_constraint([ qw/column1 column2/ ]);
+
+This will result in a unique constraint named C<table_column1_column2>, where
+C<table> is replaced with the table name.
+
+Unique constraints are used, for example, when you call
+L<DBIx::Class::ResultSet/find>. Only columns in the constraint are searched.
+
=cut
sub add_unique_constraint {
- my ($self, $name, $cols) = @_;
+ my $self = shift;
+ my $cols = pop @_;
+ my $name = shift;
+
+ $name ||= $self->name_unique_constraint($cols);
foreach my $col (@$cols) {
$self->throw_exception("No such column $col on table " . $self->name)
$self->_unique_constraints(\%unique_constraints);
}
+=head2 name_unique_constraint
+
+Return a name for a unique constraint containing the specified columns. These
+names consist of the table name and each column name, separated by underscores.
+
+For example, a constraint on a table named C<cd> containing the columns
+C<artist> and C<title> would result in a constraint name of C<cd_artist_title>.
+
+=cut
+
+sub name_unique_constraint {
+ my ($self, $cols) = @_;
+
+ return join '_', $self->name, @$cols;
+}
+
=head2 unique_constraints
Read-only accessor which returns the list of unique constraints on this source.
return %{shift->_unique_constraints||{}};
}
+=head2 unique_constraint_names
+
+Returns the list of unique constraint names defined on this source.
+
+=cut
+
+sub unique_constraint_names {
+ my ($self) = @_;
+
+ my %unique_constraints = $self->unique_constraints;
+
+ return keys %unique_constraints;
+}
+
+=head2 unique_constraint_columns
+
+Returns the list of columns that make up the specified unique constraint.
+
+=cut
+
+sub unique_constraint_columns {
+ my ($self, $constraint_name) = @_;
+
+ my %unique_constraints = $self->unique_constraints;
+
+ $self->throw_exception(
+ "Unknown unique constraint $constraint_name on '" . $self->name . "'"
+ ) unless exists $unique_constraints{$constraint_name};
+
+ return @{ $unique_constraints{$constraint_name} };
+}
+
=head2 from
Returns an expression of the source to be supplied to storage to specify
An arrayref containing a list of accessors in the foreign class to proxy in
the main class. If, for example, you do the following:
-
+
CD->might_have(liner_notes => 'LinerNotes', undef, {
proxy => [ qw/notes/ ],
});
-
+
Then, assuming LinerNotes has an accessor named notes, you can do:
my $cd = CD->find(1);
my $f_source = $self->schema->source($f_source_name);
unless ($f_source) {
- eval "require $f_source_name;";
- if ($@) {
- die $@ unless $@ =~ /Can't locate/;
- }
+ $self->ensure_class_loaded($f_source_name);
$f_source = $f_source_name->result_source;
#my $s_class = ref($self->schema);
#$f_source_name =~ m/^${s_class}::(.*)$/;
return exists $self->_relationships->{$rel};
}
+=head2 reverse_relationship_info
+
+=over 4
+
+=item Arguments: $relname
+
+=back
+
+Returns an array of hash references of relationship information for
+the other side of the specified relationship name.
+
+=cut
+
+sub reverse_relationship_info {
+ my ($self, $rel) = @_;
+ my $rel_info = $self->relationship_info($rel);
+ my $ret = {};
+
+ return $ret unless ((ref $rel_info->{cond}) eq 'HASH');
+
+ my @cond = keys(%{$rel_info->{cond}});
+ my @refkeys = map {/^\w+\.(\w+)$/} @cond;
+ my @keys = map {$rel_info->{cond}->{$_} =~ /^\w+\.(\w+)$/} @cond;
+
+ # Get the related result source for this relationship
+ my $othertable = $self->related_source($rel);
+
+ # Get all the relationships for that source that related to this source
+ # whose foreign column set are our self columns on $rel and whose self
+ # columns are our foreign columns on $rel.
+ my @otherrels = $othertable->relationships();
+ my $otherrelationship;
+ foreach my $otherrel (@otherrels) {
+ my $otherrel_info = $othertable->relationship_info($otherrel);
+
+ my $back = $othertable->related_source($otherrel);
+ next unless $back->name eq $self->name;
+
+ my @othertestconds;
+
+ if (ref $otherrel_info->{cond} eq 'HASH') {
+ @othertestconds = ($otherrel_info->{cond});
+ }
+ elsif (ref $otherrel_info->{cond} eq 'ARRAY') {
+ @othertestconds = @{$otherrel_info->{cond}};
+ }
+ else {
+ next;
+ }
+
+ foreach my $othercond (@othertestconds) {
+ my @other_cond = keys(%$othercond);
+ my @other_refkeys = map {/^\w+\.(\w+)$/} @other_cond;
+ my @other_keys = map {$othercond->{$_} =~ /^\w+\.(\w+)$/} @other_cond;
+ next if (!$self->compare_relationship_keys(\@refkeys, \@other_keys) ||
+ !$self->compare_relationship_keys(\@other_refkeys, \@keys));
+ $ret->{$otherrel} = $otherrel_info;
+ }
+ }
+ return $ret;
+}
+
+=head2 compare_relationship_keys
+
+=over 4
+
+=item Arguments: $keys1, $keys2
+
+=back
+
+Returns true if both sets of keynames are the same, false otherwise.
+
+=cut
+
+sub compare_relationship_keys {
+ my ($self, $keys1, $keys2) = @_;
+
+ # Make sure every keys1 is in keys2
+ my $found;
+ foreach my $key (@$keys1) {
+ $found = 0;
+ foreach my $prim (@$keys2) {
+ if ($prim eq $key) {
+ $found = 1;
+ last;
+ }
+ }
+ last unless $found;
+ }
+
+ # Make sure every key2 is in key1
+ if ($found) {
+ foreach my $prim (@$keys2) {
+ $found = 0;
+ foreach my $key (@$keys1) {
+ if ($prim eq $key) {
+ $found = 1;
+ last;
+ }
+ }
+ last unless $found;
+ }
+ }
+
+ return $found;
+}
+
=head2 resolve_join
=over 4
);
}
+=head2 source_name
+
+=over 4
+
+=item Arguments: $source_name
+
+=back
+
+Set the name of the result source when it is loaded into a schema.
+This is usefull if you want to refer to a result source by a name other than
+its class name.
+
+ package ArchivedBooks;
+ use base qw/DBIx::Class/;
+ __PACKAGE__->table('books_archive');
+ __PACKAGE__->source_name('Books');
+
+ # from your schema...
+ $schema->resultset('Books')->find(1);
+
=head2 throw_exception
See L<DBIx::Class::Schema/"throw_exception">.
sub iterator_class { shift->result_source_instance->resultset_class(@_) }
sub resultset_class { shift->result_source_instance->resultset_class(@_) }
+sub source_name { shift->result_source_instance->source_name(@_) }
sub resultset_attributes {
shift->result_source_instance->resultset_attributes(@_);
}
}
+*add_column = \&add_columns;
+
sub has_column {
my ($self, $column) = @_;
return $self->result_source_instance->has_column($column);
return $self->result_source_instance->column_info($column);
}
-
+
sub columns {
return shift->result_source_instance->columns(@_);
}
-
+
+sub remove_columns {
+ return shift->result_source_instance->remove_columns(@_);
+}
+
+*remove_column = \&remove_columns;
+
sub set_primary_key {
shift->result_source_instance->set_primary_key(@_);
}
shift->result_source_instance->unique_constraints(@_);
}
+sub unique_constraint_names {
+ shift->result_source_instance->unique_constraint_names(@_);
+}
+
+sub unique_constraint_columns {
+ shift->result_source_instance->unique_constraint_columns(@_);
+}
+
sub add_relationship {
my ($class, $rel, @rest) = @_;
my $source = $class->result_source_instance;
%{$class->result_source_instance} : (),
name => $table,
result_class => $class,
+ source_name => undef,
});
}
$class->mk_classdata('result_source_instance' => $table);
return undef;
}
+=head2 has_column_loaded
+
+ if ( $obj->has_column_loaded($col) ) {
+ print "$col has been loaded from db";
+ }
+
+Returns a true value if the column value has been loaded from the
+database (or set locally).
+
+=cut
+
sub has_column_loaded {
my ($self, $column) = @_;
$self->throw_exception( "Can't call has_column data as class method" ) unless ref $self;
Updates the object if it's already in the db, else inserts it.
+=head2 insert_or_update
+
+ $obj->insert_or_update
+
+Alias for L</update_or_insert>
+
=cut
*insert_or_update = \&update_or_insert;
=head2 is_changed
- my @changed_col_names = $obj->is_changed
+ my @changed_col_names = $obj->is_changed();
+ if ($obj->is_changed()) { ... }
+
+In array context returns a list of columns with uncommited changes, or
+in scalar context returns a true value if there are uncommitted
+changes.
=cut
return keys %{shift->{_dirty_columns} || {}};
}
+=head2 is_column_changed
+
+ if ($obj->is_column_changed('col')) { ... }
+
+Returns a true value if the column has uncommitted changes.
+
+=cut
+
+sub is_column_changed {
+ my( $self, $col ) = @_;
+ return exists $self->{_dirty_columns}->{$col};
+}
+
=head2 result_source
- Accessor to the ResultSource this object was created from
+ my $resultsource = $object->result_source;
-=head2 register_column
+Accessor to the ResultSource this object was created from
-=over 4
+=head2 register_column
-=item Arguments: $column, $column_info
+ $column_info = { .... };
+ $class->register_column($column_name, $column_info);
-=back
+Registers a column on the class. If the column_info has an 'accessor'
+key, creates an accessor named after the value if defined; if there is
+no such key, creates an accessor with the same name as the column
- Registers a column on the class. If the column_info has an 'accessor' key,
- creates an accessor named after the value if defined; if there is no such
- key, creates an accessor with the same name as the column
+The column_info attributes are described in
+L<DBIx::Class::ResultSource/add_columns>
=cut
package Library::Schema;
use base qw/DBIx::Class::Schema/;
-
+
# load Library::Schema::CD, Library::Schema::Book, Library::Schema::DVD
__PACKAGE__->load_classes(qw/CD Book DVD/);
$password,
{ AutoCommit => 0 },
);
-
+
my $schema2 = Library::Schema->connect($coderef_returning_dbh);
# fetch objects using Library::Schema::DVD
sub load_classes {
my ($class, @params) = @_;
-
+
my %comps_for;
-
+
if (@params) {
foreach my $param (@params) {
if (ref $param eq 'ARRAY') {
# filter out commented entries
my @modules = grep { $_ !~ /^#/ } @$param;
-
+
push (@{$comps_for{$class}}, @modules);
}
elsif (ref $param eq 'HASH') {
foreach my $prefix (keys %comps_for) {
foreach my $comp (@{$comps_for{$prefix}||[]}) {
my $comp_class = "${prefix}::${comp}";
- eval "use $comp_class"; # If it fails, assume the user fixed it
- if ($@) {
- $comp_class =~ s/::/\//g;
- die $@ unless $@ =~ /Can't locate.+$comp_class\.pm\sin\s\@INC/;
- warn $@ if $@;
- }
- push(@to_register, [ $comp, $comp_class ]);
+ $class->ensure_class_loaded($comp_class);
+ $comp_class->source_name($comp) unless $comp_class->source_name;
+
+ push(@to_register, [ $comp_class->source_name, $comp_class ]);
}
}
}
sub txn_do {
my ($self, $coderef, @args) = @_;
- ref $self or $self->throw_exception
- ('Cannot execute txn_do as a class method');
+ $self->storage or $self->throw_exception
+ ('txn_do called on $schema without storage');
ref $coderef eq 'CODE' or $self->throw_exception
('$coderef must be a CODE reference');
$self->storage->deploy($self, undef, $sqltargs);
}
+=head2 create_ddl_dir (EXPERIMENTAL)
+
+=over 4
+
+=item Arguments: \@databases, $version, $directory, $sqlt_args
+
+=back
+
+Creates an SQL file based on the Schema, for each of the specified
+database types, in the given directory.
+
+Note that this feature is currently EXPERIMENTAL and may not work correctly
+across all databases, or fully handle complex relationships.
+
+=cut
+
+sub create_ddl_dir
+{
+ my $self = shift;
+
+ $self->throw_exception("Can't create_ddl_dir without storage") unless $self->storage;
+ $self->storage->create_ddl_dir($self, @_);
+}
+
+=head2 ddl_filename (EXPERIMENTAL)
+
+ my $filename = $table->ddl_filename($type, $dir, $version)
+
+Creates a filename for a SQL file based on the table class name. Not
+intended for direct end user use.
+
+=cut
+
+sub ddl_filename
+{
+ my ($self, $type, $dir, $version) = @_;
+
+ my $filename = ref($self);
+ $filename =~ s/^.*:://;
+ $filename = "$dir$filename-$version-$type.sql";
+
+ return $filename;
+}
+
1;
=head1 AUTHORS
# in a table class definition
__PACKAGE__->load_components(qw/Serialize::Storable/);
-
+
# meanwhile, in a nearby piece of code
my $cd = $schema->resultset('CD')->find(12);
# if the cache uses Storable, this will work automatically
serialized. It assumes that your row object class (C<result_class>) is
the same as your table class, which is the normal situation.
+=head1 HOOKS
+
+The following hooks are defined for L<Storable> - see the
+documentation for L<Storable/Hooks> for detailed information on these
+hooks.
+
+=head2 STORABLE_freeze
+
+The serializing hook, called on the object during serialization. It
+can be inherited, or defined in the class itself, like any other
+method.
+
+=head2 STORABLE_thaw
+
+The deserializing hook called on the object during deserialization.
+
=head1 AUTHORS
David Kamholz <dkamholz@cpan.org>
package DBIx::Class::Storage::DBI;
+# -*- mode: cperl; cperl-indent-level: 2 -*-
use base 'DBIx::Class::Storage';
use DBI;
use SQL::Abstract::Limit;
use DBIx::Class::Storage::DBI::Cursor;
+use DBIx::Class::Storage::Statistics;
use IO::File;
use Carp::Clan qw/DBIx::Class/;
-
BEGIN {
package DBIC::SQL::Abstract; # Would merge upstream, but nate doesn't reply :(
# check whether a join type exists
my $join_clause = '';
- if (ref($to) eq 'HASH' and exists($to->{-join_type})) {
- $join_clause = ' '.uc($to->{-join_type}).' JOIN ';
+ my $to_jt = ref($to) eq 'ARRAY' ? $to->[0] : $to;
+ if (ref($to_jt) eq 'HASH' and exists($to_jt->{-join_type})) {
+ $join_clause = ' '.uc($to_jt->{-join_type}).' JOIN ';
} else {
$join_clause = ' JOIN ';
}
$self->SUPER::_RowNum(@_);
}
-# Accessor for setting limit dialect. This is useful
-# for JDBC-bridge among others where the remote SQL-dialect cannot
-# be determined by the name of the driver alone.
-#
sub limit_dialect {
my $self = shift;
$self->{limit_dialect} = shift if @_;
return $self->{name_sep};
}
-
-
-
-package DBIx::Class::Storage::DBI::DebugCallback;
-
-sub print {
- my ($self, $string) = @_;
- $string =~ m/^(\w+)/;
- ${$self}->($1, $string);
-}
-
} # End of BEGIN block
use base qw/DBIx::Class/;
__PACKAGE__->load_components(qw/AccessorGroup/);
__PACKAGE__->mk_group_accessors('simple' =>
- qw/connect_info _dbh _sql_maker _conn_pid _conn_tid debug debugfh
+ qw/_connect_info _dbh _sql_maker _conn_pid _conn_tid debug debugobj
cursor on_connect_do transaction_depth/);
+=head1 NAME
+
+DBIx::Class::Storage::DBI - DBI storage handler
+
+=head1 SYNOPSIS
+
+=head1 DESCRIPTION
+
+This class represents the connection to the database
+
+=head1 METHODS
+
+=head2 new
+
+=cut
+
sub new {
my $new = bless({}, ref $_[0] || $_[0]);
$new->cursor("DBIx::Class::Storage::DBI::Cursor");
$new->transaction_depth(0);
+
+ $new->debugobj(new DBIx::Class::Storage::Statistics());
+
+ my $fh;
if (defined($ENV{DBIX_CLASS_STORAGE_DBI_DEBUG}) &&
($ENV{DBIX_CLASS_STORAGE_DBI_DEBUG} =~ /=(.+)$/)) {
- $new->debugfh(IO::File->new($1, 'w'))
+ $fh = IO::File->new($1, 'w')
or $new->throw_exception("Cannot open trace file $1");
} else {
- $new->debugfh(IO::File->new('>&STDERR'));
+ $fh = IO::File->new('>&STDERR');
}
+ $new->debugfh($fh);
$new->debug(1) if $ENV{DBIX_CLASS_STORAGE_DBI_DEBUG};
return $new;
}
+=head2 throw_exception
+
+Throws an exception - croaks.
+
+=cut
+
sub throw_exception {
my ($self, $msg) = @_;
croak($msg);
}
-=head1 NAME
+=head2 connect_info
-DBIx::Class::Storage::DBI - DBI storage handler
+The arguments of C<connect_info> are always a single array reference.
-=head1 SYNOPSIS
+This is normally accessed via L<DBIx::Class::Schema/connection>, which
+encapsulates its argument list in an arrayref before calling
+C<connect_info> here.
-=head1 DESCRIPTION
+The arrayref can either contain the same set of arguments one would
+normally pass to L<DBI/connect>, or a lone code reference which returns
+a connected database handle.
-This class represents the connection to the database
+In either case, there is an optional final element within the arrayref
+which can hold a hashref of connection-specific Storage::DBI options.
+These include C<on_connect_do>, and the sql_maker options
+C<limit_dialect>, C<quote_char>, and C<name_sep>. Examples:
-=head1 METHODS
-
-=cut
+ ->connect_info([ 'dbi:SQLite:./foo.db' ]);
-=head2 on_connect_do
-
-Executes the sql statements given as a listref on every db connect.
+ ->connect_info([ sub { DBI->connect(...) } ]);
-=head2 quote_char
+ ->connect_info(
+ [
+ 'dbi:Pg:dbname=foo',
+ 'postgres',
+ 'my_pg_password',
+ { AutoCommit => 0 },
+ { quote_char => q{`}, name_sep => q{@} },
+ ]
+ );
-Specifies what characters to use to quote table and column names. If
-you use this you will want to specify L<name_sep> as well.
+ ->connect_info(
+ [
+ sub { DBI->connect(...) },
+ { quote_char => q{`}, name_sep => q{@} },
+ ]
+ );
-quote_char expectes either a single character, in which case is it is placed
-on either side of the table/column, or an array of length 2 in which case the
-table/column name is placed between the elements.
+=head2 on_connect_do
-For example under MySQL you'd use C<quote_char('`')>, and user SQL Server you'd
-use C<quote_char(qw/[ ]/)>.
+ $schema->storage->on_connect_do(['PRAGMA synchronous = OFF']);
-=head2 name_sep
+Call this after C<< $schema->connect >> to have the sql statements
+given executed on every db connect.
-This only needs to be used in conjunction with L<quote_char>, and is used to
-specify the charecter that seperates elements (schemas, tables, columns) from
-each other. In most cases this is simply a C<.>.
+This option can also be set via L</connect_info>.
=head2 debug
-Causes SQL trace information to be emitted on C<debugfh> filehandle
-(or C<STDERR> if C<debugfh> has not specifically been set).
+Causes SQL trace information to be emitted on the C<debugobj> object.
+(or C<STDERR> if C<debugobj> has not specifically been set).
=head2 debugfh
-Sets or retrieves the filehandle used for trace/debug output. This
-should be an IO::Handle compatible object (only the C<print> method is
-used). Initially set to be STDERR - although see information on the
+Set or retrieve the filehandle used for trace/debug output. This should be
+an IO::Handle compatible ojbect (only the C<print> method is used. Initially
+set to be STDERR - although see information on the
L<DBIX_CLASS_STORAGE_DBI_DEBUG> environment variable.
+=cut
+
+sub debugfh {
+ my $self = shift;
+
+ if ($self->debugobj->can('debugfh')) {
+ return $self->debugobj->debugfh(@_);
+ }
+}
+
+=head2 debugobj
+
+Sets or retrieves the object used for metric collection. Defaults to an instance
+of L<DBIx::Class::Storage::Statistics> that is campatible with the original
+method of using a coderef as a callback. See the aforementioned Statistics
+class for more information.
+
=head2 debugcb
Sets a callback to be executed each time a statement is run; takes a sub
-reference. Overrides debugfh. Callback is executed as $sub->($op, $info)
-where $op is SELECT/INSERT/UPDATE/DELETE and $info is what would normally
-be printed.
+reference. Callback is executed as $sub->($op, $info) where $op is
+SELECT/INSERT/UPDATE/DELETE and $info is what would normally be printed.
+
+See L<debugobj> for a better way.
=cut
sub debugcb {
- my ($self, $cb) = @_;
- my $cb_obj = bless(\$cb, 'DBIx::Class::Storage::DBI::DebugCallback');
- $self->debugfh($cb_obj);
+ my $self = shift;
+
+ if ($self->debugobj->can('callback')) {
+ return $self->debugobj->callback(@_);
+ }
}
+=head2 disconnect
+
+Disconnect the L<DBI> handle, performing a rollback first if the
+database is not in C<AutoCommit> mode.
+
+=cut
+
sub disconnect {
my ($self) = @_;
}
}
-sub connected {
- my ($self) = @_;
+=head2 connected
+
+Check if the L<DBI> handle is connected. Returns true if the handle
+is connected.
+
+=cut
+
+sub connected { my ($self) = @_;
if(my $dbh = $self->_dbh) {
if(defined $self->_conn_tid && $self->_conn_tid != threads->tid) {
return 0;
}
+=head2 ensure_connected
+
+Check whether the database handle is connected - if not then make a
+connection.
+
+=cut
+
sub ensure_connected {
my ($self) = @_;
return $self->_dbh;
}
+sub _sql_maker_args {
+ my ($self) = @_;
+
+ return ( limit_dialect => $self->dbh );
+}
+
+=head2 sql_maker
+
+Returns a C<sql_maker> object - normally an object of class
+C<DBIC::SQL::Abstract>.
+
+=cut
+
sub sql_maker {
my ($self) = @_;
unless ($self->_sql_maker) {
- $self->_sql_maker(new DBIC::SQL::Abstract( limit_dialect => $self->dbh ));
+ $self->_sql_maker(new DBIC::SQL::Abstract( $self->_sql_maker_args ));
}
return $self->_sql_maker;
}
+sub connect_info {
+ my ($self, $info_arg) = @_;
+
+ if($info_arg) {
+ my %sql_maker_opts;
+ my $info = [ @$info_arg ]; # copy because we can alter it
+ my $last_info = $info->[-1];
+ if(ref $last_info eq 'HASH') {
+ my $used;
+ if(my $on_connect_do = $last_info->{on_connect_do}) {
+ $used = 1;
+ $self->on_connect_do($on_connect_do);
+ }
+ for my $sql_maker_opt (qw/limit_dialect quote_char name_sep/) {
+ if(my $opt_val = $last_info->{$sql_maker_opt}) {
+ $used = 1;
+ $sql_maker_opts{$sql_maker_opt} = $opt_val;
+ }
+ }
+
+ # remove our options hashref if it was there, to avoid confusing
+ # DBI in the case the user didn't use all 4 DBI options, as in:
+ # [ 'dbi:SQLite:foo.db', { quote_char => q{`} } ]
+ pop(@$info) if $used;
+ }
+
+ $self->_connect_info($info);
+ $self->sql_maker->$_($sql_maker_opts{$_}) for(keys %sql_maker_opts);
+ }
+
+ $self->_connect_info;
+}
+
sub _populate_dbh {
my ($self) = @_;
- my @info = @{$self->connect_info || []};
+ my @info = @{$self->_connect_info || []};
$self->_dbh($self->_connect(@info));
- my $driver = $self->_dbh->{Driver}->{Name};
- eval "require DBIx::Class::Storage::DBI::${driver}";
- unless ($@) {
- bless $self, "DBIx::Class::Storage::DBI::${driver}";
+
+ if(ref $self eq 'DBIx::Class::Storage::DBI') {
+ my $driver = $self->_dbh->{Driver}->{Name};
+ if ($self->load_optional_class("DBIx::Class::Storage::DBI::${driver}")) {
+ bless $self, "DBIx::Class::Storage::DBI::${driver}";
+ $self->_rebless() if $self->can('_rebless');
+ }
}
+
# if on-connect sql statements are given execute them
foreach my $sql_statement (@{$self->on_connect_do || []}) {
+ $self->debugobj->query_start($sql_statement) if $self->debug();
$self->_dbh->do($sql_statement);
+ $self->debugobj->query_end($sql_statement) if $self->debug();
}
$self->_conn_pid($$);
}
eval {
- if(ref $info[0] eq 'CODE') {
- $dbh = &{$info[0]};
- }
- else {
- $dbh = DBI->connect(@info);
- }
+ $dbh = ref $info[0] eq 'CODE'
+ ? &{$info[0]}
+ : DBI->connect(@info);
};
$DBI::connect_via = $old_connect_via if $old_connect_via;
if ($self->{transaction_depth}++ == 0) {
my $dbh = $self->dbh;
if ($dbh->{AutoCommit}) {
- $self->debugfh->print("BEGIN WORK\n")
+ $self->debugobj->txn_begin()
if ($self->debug);
$dbh->begin_work;
}
my $dbh = $self->dbh;
if ($self->{transaction_depth} == 0) {
unless ($dbh->{AutoCommit}) {
- $self->debugfh->print("COMMIT\n")
+ $self->debugobj->txn_commit()
if ($self->debug);
$dbh->commit;
}
}
else {
if (--$self->{transaction_depth} == 0) {
- $self->debugfh->print("COMMIT\n")
+ $self->debugobj->txn_commit()
if ($self->debug);
$dbh->commit;
}
my $dbh = $self->dbh;
if ($self->{transaction_depth} == 0) {
unless ($dbh->{AutoCommit}) {
- $self->debugfh->print("ROLLBACK\n")
+ $self->debugobj->txn_rollback()
if ($self->debug);
$dbh->rollback;
}
}
else {
if (--$self->{transaction_depth} == 0) {
- $self->debugfh->print("ROLLBACK\n")
+ $self->debugobj->txn_rollback()
if ($self->debug);
$dbh->rollback;
}
my ($sql, @bind) = $self->sql_maker->$op($ident, @args);
unshift(@bind, @$extra_bind) if $extra_bind;
if ($self->debug) {
- my $bind_str = join(', ', map {
- defined $_ ? qq{`$_'} : q{`NULL'}
- } @bind);
- $self->debugfh->print("$sql ($bind_str)\n");
+ my @debug_bind = map { defined $_ ? qq{'$_'} : q{'NULL'} } @bind;
+ $self->debugobj->query_start($sql, @debug_bind);
}
my $sth = eval { $self->sth($sql,$op) };
);
}
@bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
- my $rv = eval { $sth->execute(@bind) };
- if ($@ || !$rv) {
- my $bind_str = join(', ', map {
- defined $_ ? qq{`$_'} : q{`NULL'}
- } @bind);
- $self->throw_exception(
- "Error executing '$sql' ($bind_str): ".($@ || $sth->errstr)
- );
+ my $rv;
+ if ($sth) {
+ my $time = time();
+ $rv = eval { $sth->execute(@bind) };
+
+ if ($@ || !$rv) {
+ $self->throw_exception("Error executing '$sql': ".($@ || $sth->errstr));
+ }
+ } else {
+ $self->throw_exception("'$sql' did not generate a statement.");
+ }
+ if ($self->debug) {
+ my @debug_bind = map { defined $_ ? qq{`$_'} : q{`NULL'} } @bind;
+ $self->debugobj->query_end($sql, @debug_bind);
}
return (wantarray ? ($rv, $sth, @bind) : $rv);
}
return $self->_execute(@args);
}
+=head2 select
+
+Handle a SQL select statement.
+
+=cut
+
sub select {
my $self = shift;
my ($ident, $select, $condition, $attrs) = @_;
return $self->cursor->new($self, \@_, $attrs);
}
+=head2 select_single
+
+Performs a select, fetch and return of data - handles a single row
+only.
+
+=cut
+
# Need to call finish() to work round broken DBDs
sub select_single {
return @row;
}
+=head2 sth
+
+Returns a L<DBI> sth (statement handle) for the supplied SQL.
+
+=cut
+
sub sth {
my ($self, $sql) = @_;
# 3 is the if_active parameter which avoids active sth re-use
$dbh->{RaiseError} = 1;
$dbh->{PrintError} = 0;
eval {
- my $sth = $dbh->column_info( undef, undef, $table, '%' );
+ my ($schema,$tab) = $table =~ /^(.+?)\.(.+)$/ ? ($1,$2) : (undef,$table);
+ my $sth = $dbh->column_info( undef,$schema, $tab, '%' );
$sth->execute();
while ( my $info = $sth->fetchrow_hashref() ){
my %column_info;
$column_info{size} = $info->{COLUMN_SIZE};
$column_info{is_nullable} = $info->{NULLABLE} ? 1 : 0;
$column_info{default_value} = $info->{COLUMN_DEF};
+ my $col_name = $info->{COLUMN_NAME};
+ $col_name =~ s/^\"(.*)\"$/$1/;
- $result{$info->{COLUMN_NAME}} = \%column_info;
+ $result{$col_name} = \%column_info;
}
};
$dbh->{RaiseError} = $old_raise_err;
return \%result;
}
+=head2 last_insert_id
+
+Return the row id of the last insert.
+
+=cut
+
sub last_insert_id {
my ($self, $row) = @_;
}
+=head2 sqlt_type
+
+Returns the database driver name.
+
+=cut
+
sub sqlt_type { shift->dbh->{Driver}->{Name} }
+=head2 create_ddl_dir (EXPERIMENTAL)
+
+=over 4
+
+=item Arguments: $schema \@databases, $version, $directory, $sqlt_args
+
+=back
+
+Creates an SQL file based on the Schema, for each of the specified
+database types, in the given directory.
+
+Note that this feature is currently EXPERIMENTAL and may not work correctly
+across all databases, or fully handle complex relationships.
+
+=cut
+
+sub create_ddl_dir
+{
+ my ($self, $schema, $databases, $version, $dir, $sqltargs) = @_;
+
+ if(!$dir || !-d $dir)
+ {
+ warn "No directory given, using ./\n";
+ $dir = "./";
+ }
+ $databases ||= ['MySQL', 'SQLite', 'PostgreSQL'];
+ $databases = [ $databases ] if(ref($databases) ne 'ARRAY');
+ $version ||= $schema->VERSION || '1.x';
+
+ eval "use SQL::Translator";
+ $self->throw_exception("Can't deploy without SQL::Translator: $@") if $@;
+
+ my $sqlt = SQL::Translator->new({
+# debug => 1,
+ add_drop_table => 1,
+ });
+ foreach my $db (@$databases)
+ {
+ $sqlt->reset();
+ $sqlt->parser('SQL::Translator::Parser::DBIx::Class');
+# $sqlt->parser_args({'DBIx::Class' => $schema);
+ $sqlt->data($schema);
+ $sqlt->producer($db);
+
+ my $file;
+ my $filename = $schema->ddl_filename($db, $dir, $version);
+ if(-e $filename)
+ {
+ $self->throw_exception("$filename already exists, skipping $db");
+ next;
+ }
+ open($file, ">$filename")
+ or $self->throw_exception("Can't open $filename for writing ($!)");
+ my $output = $sqlt->translate;
+#use Data::Dumper;
+# print join(":", keys %{$schema->source_registrations});
+# print Dumper($sqlt->schema);
+ if(!$output)
+ {
+ $self->throw_exception("Failed to translate to $db. (" . $sqlt->error . ")");
+ next;
+ }
+ print $file $output;
+ close($file);
+ }
+
+}
+
+=head2 deployment_statements
+
+Create the statements for L</deploy> and
+L<DBIx::Class::Schema/deploy>.
+
+=cut
+
sub deployment_statements {
- my ($self, $schema, $type, $sqltargs) = @_;
+ my ($self, $schema, $type, $version, $dir, $sqltargs) = @_;
+ # Need to be connected to get the correct sqlt_type
+ $self->ensure_connected() unless $type;
$type ||= $self->sqlt_type;
+ $version ||= $schema->VERSION || '1.x';
+ $dir ||= './';
eval "use SQL::Translator";
- $self->throw_exception("Can't deploy without SQL::Translator: $@") if $@;
- eval "use SQL::Translator::Parser::DBIx::Class;";
- $self->throw_exception($@) if $@;
- eval "use SQL::Translator::Producer::${type};";
- $self->throw_exception($@) if $@;
- my $tr = SQL::Translator->new(%$sqltargs);
- SQL::Translator::Parser::DBIx::Class::parse( $tr, $schema );
- return "SQL::Translator::Producer::${type}"->can('produce')->($tr);
+ if(!$@)
+ {
+ eval "use SQL::Translator::Parser::DBIx::Class;";
+ $self->throw_exception($@) if $@;
+ eval "use SQL::Translator::Producer::${type};";
+ $self->throw_exception($@) if $@;
+ my $tr = SQL::Translator->new(%$sqltargs);
+ SQL::Translator::Parser::DBIx::Class::parse( $tr, $schema );
+ return "SQL::Translator::Producer::${type}"->can('produce')->($tr);
+ }
+
+ my $filename = $schema->ddl_filename($type, $dir, $version);
+ if(!-f $filename)
+ {
+# $schema->create_ddl_dir([ $type ], $version, $dir, $sqltargs);
+ $self->throw_exception("No SQL::Translator, and no Schema file found, aborting deploy");
+ return;
+ }
+ my $file;
+ open($file, "<$filename")
+ or $self->throw_exception("Can't open $filename ($!)");
+ my @rows = <$file>;
+ close($file);
+
+ return join('', @rows);
+
}
+=head2 deploy
+
+Sends the appropriate statements to create or modify tables to the
+db. This would normally be called through
+L<DBIx::Class::Schema/deploy>.
+
+=cut
+
sub deploy {
my ($self, $schema, $type, $sqltargs) = @_;
- foreach my $statement ( $self->deployment_statements($schema, $type, $sqltargs) ) {
+ foreach my $statement ( $self->deployment_statements($schema, $type, undef, undef, $sqltargs) ) {
for ( split(";\n", $statement)) {
- $self->debugfh->print("$_\n") if $self->debug;
+ next if($_ =~ /^--/);
+ next if(!$_);
+# next if($_ =~ /^DROP/m);
+ next if($_ =~ /^BEGIN TRANSACTION/m);
+ next if($_ =~ /^COMMIT/m);
+ $self->debugobj->query_start($_) if $self->debug;
$self->dbh->do($_) or warn "SQL was:\n $_";
+ $self->debugobj->query_end($_) if $self->debug;
}
}
}
+=head2 datetime_parser
+
+Returns the datetime parser class
+
+=cut
+
+sub datetime_parser {
+ my $self = shift;
+ return $self->{datetime_parser} ||= $self->build_datetime_parser(@_);
+}
+
+=head2 datetime_parser_type
+
+Defines (returns) the datetime parser class - currently hardwired to
+L<DateTime::Format::MySQL>
+
+=cut
+
+sub datetime_parser_type { "DateTime::Format::MySQL"; }
+
+=head2 build_datetime_parser
+
+See L</datetime_parser>
+
+=cut
+
+sub build_datetime_parser {
+ my $self = shift;
+ my $type = $self->datetime_parser_type(@_);
+ eval "use ${type}";
+ $self->throw_exception("Couldn't load ${type}: $@") if $@;
+ return $type;
+}
+
sub DESTROY { shift->disconnect }
1;
+=head1 SQL METHODS
+
+The module defines a set of methods within the DBIC::SQL::Abstract
+namespace. These build on L<SQL::Abstract::Limit> to provide the
+SQL query functions.
+
+The following methods are extended:-
+
+=over 4
+
+=item delete
+
+=item insert
+
+=item select
+
+=item update
+
+=item limit_dialect
+
+Accessor for setting limit dialect. This is useful
+for JDBC-bridge among others where the remote SQL-dialect cannot
+be determined by the name of the driver alone.
+
+This option can also be set via L</connect_info>.
+
+=item quote_char
+
+Specifies what characters to use to quote table and column names. If
+you use this you will want to specify L<name_sep> as well.
+
+quote_char expectes either a single character, in which case is it is placed
+on either side of the table/column, or an arrayref of length 2 in which case the
+table/column name is placed between the elements.
+
+For example under MySQL you'd use C<quote_char('`')>, and user SQL Server you'd
+use C<quote_char(qw/[ ]/)>.
+
+This option can also be set via L</connect_info>.
+
+=item name_sep
+
+This only needs to be used in conjunction with L<quote_char>, and is used to
+specify the charecter that seperates elements (schemas, tables, columns) from
+each other. In most cases this is simply a C<.>.
+
+This option can also be set via L</connect_info>.
+
+=back
+
=head1 ENVIRONMENT VARIABLES
=head2 DBIX_CLASS_STORAGE_DBI_DEBUG
}
+sub datetime_parser_type { "DateTime::Format::DB2"; }
+
1;
=head1 NAME
package DBIx::Class::Storage::DBI::MSSQL;
-\r
+
use strict;
use warnings;
-\r
+
use base qw/DBIx::Class::Storage::DBI/;
-\r
-# __PACKAGE__->load_components(qw/PK::Auto/);
-\r
+
sub last_insert_id {
my( $id ) = $_[0]->_dbh->selectrow_array('SELECT @@IDENTITY' );
return $id;
}
-\r
+
+sub build_datetime_parser {
+ my $self = shift;
+ my $type = "DateTime::Format::Strptime";
+ eval "use ${type}";
+ $self->throw_exception("Couldn't load ${type}: $@") if $@;
+ return $type->new( pattern => '%m/%d/%Y %H:%M:%S' );
+}
+
1;
-\r
+
=head1 NAME
-\r
-DBIx::Class::Storage::DBI::MSSQL - Automatic primary key class for MSSQL
-\r
+
+DBIx::Class::Storage::DBI::MSSQL - Storage::DBI subclass for MSSQL
+
=head1 SYNOPSIS
-\r
- # In your table classes
- __PACKAGE__->load_components(qw/PK::Auto Core/);
- __PACKAGE__->set_primary_key('id');
-\r
-=head1 DESCRIPTION
-\r
-This class implements autoincrements for MSSQL.
-\r
+
+This subclass supports MSSQL, and can in theory be used directly
+via the C<storage_type> mechanism:
+
+ $schema->storage_type('::DBI::MSSQL');
+ $schema->connect_info('dbi:....', ...);
+
+However, as there is no L<DBD::MSSQL>, you will probably want to use
+one of the other DBD-specific MSSQL classes, such as
+L<DBIx::Class::Storage::DBI::Sybase::MSSQL>. These classes will
+merge this class with a DBD-specific class to obtain fully
+correct behavior for your scenario.
+
=head1 AUTHORS
-\r
+
Brian Cassidy <bricas@cpan.org>
-\r
+
=head1 LICENSE
-\r
+
You may distribute this code under the same terms as Perl itself.
-\r
+
=cut
--- /dev/null
+package DBIx::Class::Storage::DBI::NoBindVars;
+
+use strict;
+use warnings;
+
+use base 'DBIx::Class::Storage::DBI';
+
+sub _execute {
+ my ($self, $op, $extra_bind, $ident, @args) = @_;
+ my ($sql, @bind) = $self->sql_maker->$op($ident, @args);
+ unshift(@bind, @$extra_bind) if $extra_bind;
+ if ($self->debug) {
+ my @debug_bind = map { defined $_ ? qq{'$_'} : q{'NULL'} } @bind;
+ $self->debugobj->query_start($sql, @debug_bind);
+ }
+
+ while(my $bvar = shift @bind) {
+ $bvar = $self->dbh->quote($bvar);
+ $sql =~ s/\?/$bvar/;
+ }
+
+ my $sth = eval { $self->sth($sql,$op) };
+
+ if (!$sth || $@) {
+ $self->throw_exception(
+ 'no sth generated via sql (' . ($@ || $self->_dbh->errstr) . "): $sql"
+ );
+ }
+
+ my $rv;
+ if ($sth) {
+ my $time = time();
+ $rv = eval { $sth->execute };
+
+ if ($@ || !$rv) {
+ $self->throw_exception("Error executing '$sql': ".($@ || $sth->errstr));
+ }
+ } else {
+ $self->throw_exception("'$sql' did not generate a statement.");
+ }
+ if ($self->debug) {
+ my @debug_bind = map { defined $_ ? qq{`$_'} : q{`NULL'} } @bind;
+ $self->debugobj->query_end($sql, @debug_bind);
+ }
+ return (wantarray ? ($rv, $sth, @bind) : $rv);
+}
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::NoBindVars - Sometime DBDs have poor to no support for bind variables
+
+=head1 SYNOPSIS
+
+=head1 DESCRIPTION
+
+This class allows queries to work when the DBD or underlying library does not
+support the usual C<?> placeholders, or at least doesn't support them very
+well, as is the case with L<DBD::Sybase>
+
+=head1 AUTHORS
+
+Brandon Black <blblack@gmail.com>
+Trym Skaar <trym@tryms.no>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
--- /dev/null
+package DBIx::Class::Storage::DBI::ODBC;
+use strict;
+use warnings;
+
+use base qw/DBIx::Class::Storage::DBI/;
+
+sub _rebless {
+ my ($self) = @_;
+
+ my $dbh = $self->_dbh;
+ my $dbtype = eval { $dbh->get_info(17) };
+ unless ( $@ ) {
+ # Translate the backend name into a perl identifier
+ $dbtype =~ s/\W/_/gi;
+ my $class = "DBIx::Class::Storage::DBI::ODBC::${dbtype}";
+ eval "require $class";
+ bless $self, $class unless $@;
+ }
+}
+
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::ODBC - Base class for ODBC drivers
+
+=head1 SYNOPSIS
+
+ # In your table classes
+ __PACKAGE__->load_components(qw/Core/);
+
+
+=head1 DESCRIPTION
+
+This class simply provides a mechanism for discovering and loading a sub-class
+for a specific ODBC backend. It should be transparent to the user.
+
+
+=head1 AUTHORS
+
+Marc Mims C<< <marc@sssonline.com> >>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
--- /dev/null
+package DBIx::Class::Storage::DBI::ODBC::DB2_400_SQL;
+use strict;
+use warnings;
+
+use base qw/DBIx::Class::Storage::DBI::ODBC/;
+
+sub last_insert_id
+{
+ my ($self) = @_;
+
+ my $dbh = $self->_dbh;
+
+ # get the schema/table separator:
+ # '.' when SQL naming is active
+ # '/' when system naming is active
+ my $sep = $dbh->get_info(41);
+ my $sth = $dbh->prepare_cached(
+ "SELECT IDENTITY_VAL_LOCAL() FROM SYSIBM${sep}SYSDUMMY1", {}, 3);
+ $sth->execute();
+
+ my @res = $sth->fetchrow_array();
+
+ return @res ? $res[0] : undef;
+}
+
+sub _sql_maker_args {
+ my ($self) = @_;
+
+ return (
+ limit_dialect => 'FetchFirst',
+ name_sep => $self->_dbh->get_info(41)
+ );
+}
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::ODBC::DB2_400_SQL - Support specific to DB2/400
+over ODBC
+
+=head1 SYNOPSIS
+
+ # In your table classes
+ __PACKAGE__->load_components(qw/PK::Auto Core/);
+ __PACKAGE__->set_primary_key('id');
+
+
+=head1 DESCRIPTION
+
+This class implements support specific to DB2/400 over ODBC, including
+auto-increment primary keys, SQL::Abstract::Limit dialect, and name separator
+for for connections using either SQL naming or System naming.
+
+
+=head1 AUTHORS
+
+Marc Mims C<< <marc@sssonline.com> >>
+
+Based on DBIx::Class::Storage::DBI::DB2 by Jess Robinson.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
--- /dev/null
+package DBIx::Class::Storage::DBI::ODBC400;
+use strict;
+use warnings;
+
+use base qw/DBIx::Class::Storage::DBI/;
+
+sub last_insert_id
+{
+ my ($self) = @_;
+
+ my $dbh = $self->_dbh;
+
+ # get the schema/table separator:
+ # '.' when SQL naming is active
+ # '/' when system naming is active
+ my $sep = $dbh->get_info(41);
+ my $sth = $dbh->prepare_cached(
+ "SELECT IDENTITY_VAL_LOCAL() FROM SYSIBM${sep}SYSDUMMY1", {}, 3);
+ $sth->execute();
+
+ my @res = $sth->fetchrow_array();
+
+ return @res ? $res[0] : undef;
+}
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::ODBC400 - Automatic primary key class for DB2/400
+over ODBC
+
+=head1 SYNOPSIS
+
+ # In your table classes
+ __PACKAGE__->load_components(qw/PK::Auto Core/);
+ __PACKAGE__->set_primary_key('id');
+
+
+=head1 DESCRIPTION
+
+This class implements autoincrements for DB2/400 over ODBC.
+
+
+=head1 AUTHORS
+
+Marc Mims C<< <marc@questright.com> >>
+
+Based on DBIx::Class::Storage::DBI::DB2 by Jess Robinson.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
use strict;
use warnings;
+use DBD::Pg;
+
use base qw/DBIx::Class::Storage::DBI/;
# __PACKAGE__->load_components(qw/PK::Auto/);
+# Warn about problematic versions of DBD::Pg
+warn "DBD::Pg 1.49 is strongly recommended"
+ if ($DBD::Pg::VERSION < 1.49);
+
sub last_insert_id {
my ($self,$source,$col) = @_;
my $seq = ($source->column_info($col)->{sequence} ||= $self->get_autoinc_seq($source,$col));
my ($schema,$table) = $source->name =~ /^(.+)\.(.+)$/ ? ($1,$2)
: (undef,$source->name);
while (my $col = shift @pri) {
- my $info = $dbh->column_info(undef,$schema,$table,$col)->fetchrow_arrayref;
- if (defined $info->[12] and $info->[12] =~
+ my $info = $dbh->column_info(undef,$schema,$table,$col)->fetchrow_hashref;
+ if (defined $info->{COLUMN_DEF} and $info->{COLUMN_DEF} =~
/^nextval\(+'([^']+)'::(?:text|regclass)\)/)
{
- return $1; # may need to strip quotes -- see if this works
+ my $seq = $1;
+ return $seq =~ /\./ ? $seq : $info->{TABLE_SCHEM} . "." . $seq; # may need to strip quotes -- see if this works
}
}
}
return 'PostgreSQL';
}
+sub datetime_parser_type { return "DateTime::Format::Pg"; }
+
1;
=head1 NAME
--- /dev/null
+package DBIx::Class::Storage::DBI::Sybase;
+
+use strict;
+use warnings;
+
+use base qw/DBIx::Class::Storage::DBI::NoBindVars/;
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Sybase - Storage::DBI subclass for Sybase
+
+=head1 SYNOPSIS
+
+This subclass supports L<DBD::Sybase> for real Sybase databases. If
+you are using an MSSQL database via L<DBD::Sybase>, see
+L<DBIx::Class::Storage::DBI::Sybase::MSSQL>.
+
+=head1 AUTHORS
+
+Brandon L Black <blblack@gmail.com>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
--- /dev/null
+package DBIx::Class::Storage::DBI::Sybase::MSSQL;
+
+use strict;
+use warnings;
+
+use base qw/DBIx::Class::Storage::DBI::MSSQL DBIx::Class::Storage::DBI::Sybase/;
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Sybase::MSSQL - Storage::DBI subclass for MSSQL via
+DBD::Sybase
+
+=head1 SYNOPSIS
+
+This subclass supports MSSQL connected via L<DBD::Sybase>.
+
+ $schema->storage_type('::DBI::Sybase::MSSQL');
+ $schema->connect_info('dbi:Sybase:....', ...);
+
+=head1 AUTHORS
+
+Brandon L Black <blblack@gmail.com>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
--- /dev/null
+package DBIx::Class::Storage::Statistics;
+use strict;
+
+use base qw/DBIx::Class::AccessorGroup Class::Data::Accessor/;
+__PACKAGE__->mk_group_accessors(simple => qw/callback debugfh/);
+
+=head1 NAME
+
+DBIx::Class::Storage::Statistics - SQL Statistics
+
+=head1 SYNOPSIS
+
+=head1 DESCRIPTION
+
+This class is called by DBIx::Class::Storage::DBI as a means of collecting
+statistics on it's actions. Using this class alone merely prints the SQL
+executed, the fact that it completes and begin/end notification for
+transactions.
+
+To really use this class you should subclass it and create your own method
+for collecting the statistics as discussed in L<DBIx::Class::Manual::Cookbook>.
+
+=head1 METHODS
+
+=cut
+
+=head2 new
+
+Returns a new L<DBIx::Class::Storage::Statistics> object.
+
+=cut
+sub new {
+ my $self = bless({}, ref($_[0]) || $_[0]);
+
+ return $self;
+}
+
+=head2 debugfh
+
+Sets or retrieves the filehandle used for trace/debug output. This should
+be an IO::Handle compatible object (only the C<print> method is used). Initially
+should be set to STDERR - although see information on the
+L<DBIX_CLASS_STORAGE_DBI_DEBUG> environment variable.
+
+=head2 txn_begin
+
+Called when a transaction begins.
+
+=cut
+sub txn_begin {
+ my $self = shift();
+
+ $self->debugfh->print("BEGIN WORK\n");
+}
+
+=head2 txn_rollback
+
+Called when a transaction is rolled back.
+
+=cut
+sub txn_rollback {
+ my $self = shift();
+
+ $self->debugfh->print("ROLLBACK\n");
+}
+
+=head2 txn_commit
+
+Called when a transaction is committed.
+
+=cut
+sub txn_commit {
+ my $self = shift();
+
+ $self->debugfh->print("COMMIT\n");
+}
+
+=head2 query_start
+
+Called before a query is executed. The first argument is the SQL string being
+executed and subsequent arguments are the parameters used for the query.
+
+=cut
+sub query_start {
+ my $self = shift();
+ my $string = shift();
+
+ if(defined($self->callback())) {
+ $string =~ m/^(\w+)/;
+ $self->callback()->($1, $string);
+ return;
+ }
+
+ $self->debugfh->print("$string: " . join(', ', @_) . "\n");
+}
+
+=head2 query_end
+
+Called when a query finishes executing. Has the same arguments as query_start.
+
+=cut
+sub query_end {
+ my $self = shift();
+ my $string = shift();
+}
+
+1;
+
+=head1 AUTHORS
+
+Cory G. Watson <gphat@cpan.org>
+
+=head1 LICENSE
+
+You may distribute this code under the same license as Perl itself.
+
+=cut
use base qw/DBIx::Class/;
-__PACKAGE__->load_components(qw/PK::Auto CDBICompat Core DB/);
+__PACKAGE__->load_components(qw/CDBICompat Core DB/);
use File::Temp qw/tempfile/;
my (undef, $DB) = tempfile();
+++ /dev/null
-package DBIx::Class::UUIDColumns;
-
-use strict;
-use warnings;
-
-use base qw/DBIx::Class/;
-
-__PACKAGE__->mk_classdata( 'uuid_auto_columns' => [] );
-__PACKAGE__->mk_classdata( 'uuid_maker' );
-__PACKAGE__->uuid_class( __PACKAGE__->_find_uuid_module );
-
-# be compatible with Class::DBI::UUID
-sub uuid_columns {
- my $self = shift;
- for (@_) {
- $self->throw_exception("column $_ doesn't exist") unless $self->has_column($_);
- }
- $self->uuid_auto_columns(\@_);
-}
-
-sub uuid_class {
- my ($self, $class) = @_;
-
- if ($class) {
- $class = "DBIx::Class::UUIDMaker$class" if $class =~ /^::/;
-
- if (!eval "require $class") {
- $self->throw_exception("$class could not be loaded: $@");
- } elsif (!$class->isa('DBIx::Class::UUIDMaker')) {
- $self->throw_exception("$class is not a UUIDMaker subclass");
- } else {
- $self->uuid_maker($class->new);
- };
- };
-
- return ref $self->uuid_maker;
-};
-
-sub insert {
- my $self = shift;
- for my $column (@{$self->uuid_auto_columns}) {
- $self->store_column( $column, $self->get_uuid )
- unless defined $self->get_column( $column );
- }
- $self->next::method(@_);
-}
-
-sub get_uuid {
- return shift->uuid_maker->as_string;
-}
-
-sub _find_uuid_module {
- if (eval{require Data::UUID}) {
- return '::Data::UUID';
- } elsif ($^O ne 'openbsd' && eval{require APR::UUID}) {
- # APR::UUID on openbsd causes some as yet unfound nastiness for XS
- return '::APR::UUID';
- } elsif (eval{require UUID}) {
- return '::UUID';
- } elsif (eval{
- # squelch the 'too late for INIT' warning in Win32::API::Type
- local $^W = 0;
- require Win32::Guidgen;
- }) {
- return '::Win32::Guidgen';
- } elsif (eval{require Win32API::GUID}) {
- return '::Win32API::GUID';
- } else {
- shift->throw_exception('no suitable uuid module could be found')
- };
-};
-
-1;
-__END__
-
-=head1 NAME
-
-DBIx::Class::UUIDColumns - Implicit uuid columns
-
-=head1 SYNOPSIS
-
- package Artist;
- __PACKAGE__->load_components(qw/UUIDColumns Core DB/);
- __PACKAGE__->uuid_columns( 'artist_id' );
-
-=head1 DESCRIPTION
-
-This L<DBIx::Class> component resembles the behaviour of
-L<Class::DBI::UUID>, to make some columns implicitly created as uuid.
-
-When loaded, C<UUIDColumns> will search for a suitable uuid generation module
-from the following list of supported modules:
-
- Data::UUID
- APR::UUID*
- UUID
- Win32::Guidgen
- Win32API::GUID
-
-If no supporting module can be found, an exception will be thrown.
-
-*APR::UUID will not be loaded under OpenBSD due to an as yet unidentified XS
-issue.
-
-If you would like to use a specific module, you can set C<uuid_class>:
-
- __PACKAGE__->uuid_class('::Data::UUID');
- __PACKAGE__->uuid_class('MyUUIDGenerator');
-
-Note that the component needs to be loaded before Core.
-
-=head1 METHODS
-
-=head2 uuid_columns(@columns)
-
-Takes a list of columns to be filled with uuids during insert.
-
- __PACKAGE__->uuid_columns('id');
-
-=head2 uuid_class($classname)
-
-Takes the name of a UUIDMaker subclass to be used for uuid value generation.
-This can be a fully qualified class name, or a shortcut name starting with ::
-that matches one of the available DBIx::Class::UUIDMaker subclasses:
-
- __PACKAGE__->uuid_class('CustomUUIDGenerator');
- # loads CustomeUUIDGenerator
-
- __PACKAGE->uuid_class('::Data::UUID');
- # loads DBIx::Class::UUIDMaker::Data::UUID;
-
-Note that C<uuid_class> chacks to see that the specified class isa
-DBIx::Class::UUIDMaker subbclass and throws and exception if it isn't.
-
-=head2 uuid_maker
-
-Returns the current UUIDMaker instance for the given module.
-
- my $uuid = __PACKAGE__->uuid_maker->as_string;
-
-=head1 SEE ALSO
-
-L<DBIx::Class::UUIDMaker>
-
-=head1 AUTHORS
-
-Chia-liang Kao <clkao@clkao.org>
-Chris Laco <claco@chrislaco.com>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
+++ /dev/null
-package DBIx::Class::UUIDMaker;
-
-use strict;
-use warnings;
-
-sub new {
- return bless {}, shift;
-};
-
-sub as_string {
- return undef;
-};
-
-1;
-__END__
-
-=head1 NAME
-
-DBIx::Class::UUIDMaker - UUID wrapper module
-
-=head1 SYNOPSIS
-
- package CustomUUIDMaker;
- use base qw/DBIx::Class::/;
-
- sub as_string {
- my $uuid;
- ...magic incantations...
- return $uuid;
- };
-
-=head1 DESCRIPTION
-
-DBIx::Class::UUIDMaker is a base class used by the various uuid generation
-subclasses.
-
-=head1 METHODS
-
-=head2 as_string
-
-Returns the new uuid as a string.
-
-=head1 SEE ALSO
-
-L<DBIx::Class::UUIDMaker>,
-L<DBIx::Class::UUIDMaker::UUID>,
-L<DBIx::Class::UUIDMaker::APR::UUID>,
-L<DBIx::Class::UUIDMaker::Data::UUID>,
-L<DBIx::Class::UUIDMaker::Win32::Guidgen>,
-L<DBIx::Class::UUIDMaker::Win32API::GUID>,
-L<DBIx::Class::UUIDMaker::Data::Uniqid>
-
-=head1 AUTHOR
-
-Chris Laco <claco@chrislaco.com>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
+++ /dev/null
-package DBIx::Class::UUIDMaker::APR::UUID;
-
-use strict;
-use warnings;
-
-use base qw/DBIx::Class::UUIDMaker/;
-use APR::UUID ();
-
-sub as_string {
- return APR::UUID->new->format;
-};
-
-1;
-__END__
-
-=head1 NAME
-
-DBIx::Class::UUIDMaker::APR::UUID - Create uuids using APR::UUID
-
-=head1 SYNOPSIS
-
- package Artist;
- __PACKAGE__->load_components(qw/UUIDColumns Core DB/);
- __PACKAGE__->uuid_columns( 'artist_id' );
- __PACKAGE__->uuid_class('::APR::UUID');
-
-=head1 DESCRIPTION
-
-This DBIx::Class::UUIDMaker subclass uses APR::UUID to generate uuid
-strings in the following format:
-
- 098f2470-bae0-11cd-b579-08002b30bfeb
-
-=head1 METHODS
-
-=head2 as_string
-
-Returns the new uuid as a string.
-
-=head1 SEE ALSO
-
-L<APR::UUID>
-
-=head1 AUTHOR
-
-Chris Laco <claco@chrislaco.com>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
+++ /dev/null
-package DBIx::Class::UUIDMaker::Data::UUID;
-
-use strict;
-use warnings;
-
-use base qw/DBIx::Class::UUIDMaker/;
-use Data::UUID ();
-
-sub as_string {
- return Data::UUID->new->to_string(Data::UUID->new->create);
-};
-
-1;
-__END__
-
-=head1 NAME
-
-DBIx::Class::UUIDMaker::Data::UUID - Create uuids using Data::UUID
-
-=head1 SYNOPSIS
-
- package Artist;
- __PACKAGE__->load_components(qw/UUIDColumns Core DB/);
- __PACKAGE__->uuid_columns( 'artist_id' );
- __PACKAGE__->uuid_class('::Data::UUID');
-
-=head1 DESCRIPTION
-
-This DBIx::Class::UUIDMaker subclass uses Data::UUID to generate uuid
-strings in the following format:
-
- 098f2470-bae0-11cd-b579-08002b30bfeb
-
-=head1 METHODS
-
-=head2 as_string
-
-Returns the new uuid as a string.
-
-=head1 SEE ALSO
-
-L<Data::UUID>
-
-=head1 AUTHOR
-
-Chris Laco <claco@chrislaco.com>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
+++ /dev/null
-package DBIx::Class::UUIDMaker::Data::Uniqid;
-
-use strict;
-use warnings;
-
-use base qw/DBIx::Class::UUIDMaker/;
-use Data::Uniqid ();
-
-sub as_string {
- return Data::Uniqid->luniqid;
-};
-
-1;
-__END__
-
-=head1 NAME
-
-DBIx::Class::UUIDMaker::Data::Uniqid - Create uuids using Data::Uniqid
-
-=head1 SYNOPSIS
-
- package Artist;
- __PACKAGE__->load_components(qw/UUIDColumns Core DB/);
- __PACKAGE__->uuid_columns( 'artist_id' );
- __PACKAGE__->uuid_class('::Data::Uniqid');
-
-=head1 DESCRIPTION
-
-This DBIx::Class::UUIDMaker subclass uses Data::Uniqid to generate uuid
-strings using Data::Uniqid::luniqid.
-
-=head1 METHODS
-
-=head2 as_string
-
-Returns the new uuid as a string.
-
-=head1 SEE ALSO
-
-L<Data::Data::Uniqid>
-
-=head1 AUTHOR
-
-Chris Laco <claco@chrislaco.com>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
+++ /dev/null
-package DBIx::Class::UUIDMaker::UUID;
-
-use strict;
-use warnings;
-
-use base qw/DBIx::Class::UUIDMaker/;
-use UUID ();
-
-sub as_string {
- my ($uuid, $uuidstring);
- UUID::generate($uuid);
- UUID::unparse($uuid, $uuidstring);
-
- return $uuidstring;
-};
-
-1;
-__END__
-
-=head1 NAME
-
-DBIx::Class::UUIDMaker::UUID - Create uuids using UUID
-
-=head1 SYNOPSIS
-
- package Artist;
- __PACKAGE__->load_components(qw/UUIDColumns Core DB/);
- __PACKAGE__->uuid_columns( 'artist_id' );
- __PACKAGE__->uuid_class('::UUID');
-
-=head1 DESCRIPTION
-
-This DBIx::Class::UUIDMaker subclass uses UUID to generate uuid
-strings in the following format:
-
- 098f2470-bae0-11cd-b579-08002b30bfeb
-
-=head1 METHODS
-
-=head2 as_string
-
-Returns the new uuid as a string.
-
-=head1 SEE ALSO
-
-L<UUID>
-
-=head1 AUTHOR
-
-Chris Laco <claco@chrislaco.com>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
+++ /dev/null
-package DBIx::Class::UUIDMaker::Win32::Guidgen;
-
-use strict;
-use warnings;
-
-use base qw/DBIx::Class::UUIDMaker/;
-use Win32::Guidgen ();
-
-sub as_string {
- my $uuid = Win32::Guidgen::create();
- $uuid =~ s/(^\{|\}$)//g;
-
- return $uuid;
-};
-
-1;
-__END__
-
-=head1 NAME
-
-DBIx::Class::UUIDMaker::Win32::Guidgen - Create uuids using Win32::Guidgen
-
-=head1 SYNOPSIS
-
- package Artist;
- __PACKAGE__->load_components(qw/UUIDColumns Core DB/);
- __PACKAGE__->uuid_columns( 'artist_id' );
- __PACKAGE__->uuid_class('::Win32::Guidgen');
-
-=head1 DESCRIPTION
-
-This DBIx::Class::UUIDMaker subclass uses Win32::Guidgen to generate uuid
-strings in the following format:
-
- 098f2470-bae0-11cd-b579-08002b30bfeb
-
-=head1 METHODS
-
-=head2 as_string
-
-Returns the new uuid as a string.
-
-=head1 SEE ALSO
-
-L<Win32::Guidgen>
-
-=head1 AUTHOR
-
-Chris Laco <claco@chrislaco.com>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
+++ /dev/null
-package DBIx::Class::UUIDMaker::Win32API::GUID;
-
-use strict;
-use warnings;
-
-use base qw/DBIx::Class::UUIDMaker/;
-use Win32API::GUID ();
-
-sub as_string {
- return Win32API::GUID::CreateGuid();
-};
-
-1;
-__END__
-
-=head1 NAME
-
-DBIx::Class::UUIDMaker::Win32API::GUID - Create uuids using Win32API::GUID
-
-=head1 SYNOPSIS
-
- package Artist;
- __PACKAGE__->load_components(qw/UUIDColumns Core DB/);
- __PACKAGE__->uuid_columns( 'artist_id' );
- __PACKAGE__->uuid_class('::Win32API::GUID');
-
-=head1 DESCRIPTION
-
-This DBIx::Class::UUIDMaker subclass uses Win32API::GUID to generate uuid
-strings in the following format:
-
- 098f2470-bae0-11cd-b579-08002b30bfeb
-
-=head1 METHODS
-
-=head2 as_string
-
-Returns the new uuid as a string.
-
-=head1 SEE ALSO
-
-L<Win32API::GUID>
-
-=head1 AUTHOR
-
-Chris Laco <claco@chrislaco.com>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
+++ /dev/null
-package DBIx::Class::Validation;
-
-use strict;
-use warnings;
-
-use base qw( DBIx::Class );
-use English qw( -no_match_vars );
-
-#local $^W = 0; # Silence C:D:I redefined sub errors.
-# Switched to C::D::Accessor which doesn't do this. Hate hate hate hate.
-
-our $VERSION = '0.01';
-
-__PACKAGE__->mk_classdata( 'validation_module' => 'FormValidator::Simple' );
-__PACKAGE__->mk_classdata( 'validation_profile' );
-__PACKAGE__->mk_classdata( 'validation_auto' => 1 );
-
-sub validation_module {
- my $class = shift;
- my $module = shift;
-
- eval("use $module");
- $class->throw_exception("Unable to load the validation module '$module' because $EVAL_ERROR") if ($EVAL_ERROR);
- $class->throw_exception("The '$module' module does not support the check method") if (!$module->can('check'));
-
- $class->_validation_module_accessor( $module );
-}
-
-sub validation {
- my $class = shift;
- my %args = @_;
-
- $class->validation_module( $args{module} ) if (exists $args{module});
- $class->validation_profile( $args{profile} ) if (exists $args{profile});
- $class->validation_auto( $args{auto} ) if (exists $args{auto});
-}
-
-sub validate {
- my $self = shift;
- my %data = $self->get_columns();
- my $module = $self->validation_module();
- my $profile = $self->validation_profile();
- my $result = $module->check( \%data => $profile );
- return $result if ($result->success());
- $self->throw_exception( $result );
-}
-
-sub insert {
- my $self = shift;
- $self->validate if ($self->validation_auto());
- $self->next::method(@_);
-}
-
-sub update {
- my $self = shift;
- $self->validate if ($self->validation_auto());
- $self->next::method(@_);
-}
-
-1;
-__END__
-
-=head1 NAME
-
-DBIx::Class::Validation - Validate all data before submitting to your database.
-
-=head1 SYNOPSIS
-
-In your base DBIC package:
-
- __PACKAGE__->load_components(qw/... Validation/);
-
-And in your subclasses:
-
- __PACKAGE__->validation(
- module => 'FormValidator::Simple',
- profile => { ... },
- auto => 1,
- );
-
-And then somewhere else:
-
- eval{ $obj->validate() };
- if( my $results = $EVAL_ERROR ){
- ...
- }
-
-=head1 METHODS
-
-=head2 validation
-
- __PACKAGE__->validation(
- module => 'FormValidator::Simple',
- profile => { ... },
- auto => 1,
- );
-
-Calls validation_module(), validation_profile(), and validation_auto() if the corresponding
-argument is defined.
-
-=head2 validation_module
-
- __PACKAGE__->validation_module('Data::FormValidator');
-
-Sets the validation module to use. Any module that supports a check() method just like
-Data::FormValidator's can be used here, such as FormValidator::Simple.
-
-Defaults to FormValidator::Simple.
-
-=head2 validation_profile
-
- __PACKAGE__->validation_profile(
- { ... }
- );
-
-Sets the profile that will be passed to the validation module.
-
-=head2 validation_auto
-
- __PACKAGE__->validation_auto( 1 );
-
-This flag, when enabled, causes any updates or inserts of the class
-to call validate() before actually executing.
-
-=head2 validate
-
- $obj->validate();
-
-Validates all the data in the object against the pre-defined validation
-module and profile. If there is a problem then a hard error will be
-thrown. If you put the validation in an eval you can capture whatever
-the module's check() method returned.
-
-=head2 auto_validate
-
- __PACKAGE__->auto_validate( 0 );
-
-Turns on and off auto-validation. This feature makes all UPDATEs and
-INSERTs call the validate() method before doing anything. The default
-is for auto-validation to be on.
-
-Defaults to on.
-
-=head1 AUTHOR
-
-Aran C. Deltac <bluefeet@cpan.org>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
-
# print Dumper($dbixschema->registered_classes);
#foreach my $tableclass ($dbixschema->registered_classes)
+
+ my %seen_tables;
+
foreach my $moniker ($dbixschema->sources)
{
#eval "use $tableclass";
#print("Can't load $tableclass"), next if($@);
my $source = $dbixschema->source($moniker);
+ next if $seen_tables{$source->name}++;
+
my $table = $schema->add_table(
name => $source->name,
type => 'TABLE',
}
$table->primary_key($source->primary_columns);
+ my @primary = $source->primary_columns;
+ my %unique_constraints = $source->unique_constraints;
+ foreach my $uniq (keys %unique_constraints) {
+ if (!$source->compare_relationship_keys($unique_constraints{$uniq}, \@primary)) {
+ $table->add_constraint(
+ type => 'unique',
+ name => "$uniq",
+ fields => $unique_constraints{$uniq}
+ );
+ }
+ }
+
my @rels = $source->relationships();
foreach my $rel (@rels)
{
my $rel_info = $source->relationship_info($rel);
- my $rel_table = $source->related_source($rel)->name;
-
# Ignore any rel cond that isn't a straight hash
next unless ref $rel_info->{cond} eq 'HASH';
+ my $othertable = $source->related_source($rel);
+ my $rel_table = $othertable->name;
+
# Get the key information, mapping off the foreign/self markers
my @cond = keys(%{$rel_info->{cond}});
my @refkeys = map {/^\w+\.(\w+)$/} @cond;
if($rel_table)
{
- #Decide if this is a foreign key based on whether the self
- #items are our primary columns.
+ my $reverse_rels = $source->reverse_relationship_info($rel);
+ my ($otherrelname, $otherrelationship) = each %{$reverse_rels};
- # Make sure every self key is in the primary key list
- my $found;
- foreach my $key (@keys) {
- $found = 0;
- foreach my $prim ($source->primary_columns) {
- if ($prim eq $key) {
- $found = 1;
- last;
- }
- }
- last unless $found;
- }
+ my $on_delete = '';
+ my $on_update = '';
- # Make sure every primary key column is in the self keys
- if ($found) {
- foreach my $prim ($source->primary_columns) {
- $found = 0;
- foreach my $key (@keys) {
- if ($prim eq $key) {
- $found = 1;
- last;
- }
- }
- last unless $found;
- }
+ if (defined $otherrelationship) {
+ $on_delete = $otherrelationship->{'attrs'}->{cascade_delete} ? 'CASCADE' : '';
+ $on_update = $otherrelationship->{'attrs'}->{cascade_copy} ? 'CASCADE' : '';
}
- # if $found then the two sets are equal.
+ #Decide if this is a foreign key based on whether the self
+ #items are our primary columns.
# If the sets are different, then we assume it's a foreign key from
# us to another table.
- if (!$found) {
+ if (!$source->compare_relationship_keys(\@keys, \@primary)) {
$table->add_constraint(
type => 'foreign_key',
name => "fk_$keys[0]",
fields => \@keys,
reference_fields => \@refkeys,
reference_table => $rel_table,
+ on_delete => $on_delete,
+ on_update => $on_update
);
}
}
}
1;
+
--- /dev/null
+#!/usr/bin/perl
+use strict;
+use warnings;
+use lib qw(lib t/lib);
+
+# USAGE:
+# maint/inheritance_pod.pl Some::Module
+
+my $module = $ARGV[0];
+eval(" require $module; ");
+
+my @modules = Class::C3::calculateMRO($module);
+shift( @modules );
+
+print "=head1 INHERITED METHODS\n\n";
+
+foreach my $module (@modules) {
+ print "=head2 $module\n\n";
+ print "=over 4\n\n";
+ my $file = $module;
+ $file =~ s/::/\//g;
+ $file .= '.pm';
+ foreach my $path (@INC){
+ if (-e "$path/$file") {
+ open(MODULE,"<$path/$file");
+ while (my $line = <MODULE>) {
+ if ($line=~/^\s*sub ([a-z][a-z_]+) \{/) {
+ my $method = $1;
+ print "=item *\n\n";
+ print "L<$method|$module/$method>\n\n";
+ }
+ }
+ close(MODULE);
+ last;
+ }
+ }
+ print "=back\n\n";
+}
+
+1;
--- /dev/null
+#!/usr/bin/perl
+use strict;
+use warnings;
+
+use Getopt::Long;
+use Pod::Usage;
+use JSON qw( jsonToObj );
+
+$JSON::BareKey = 1;
+$JSON::QuotApos = 1;
+
+GetOptions(
+ 'schema=s' => \my $schema_class,
+ 'class=s' => \my $resultset_class,
+ 'connect=s' => \my $connect,
+ 'op=s' => \my $op,
+ 'set=s' => \my $set,
+ 'where=s' => \my $where,
+ 'attrs=s' => \my $attrs,
+ 'format=s' => \my $format,
+ 'force' => \my $force,
+ 'trace' => \my $trace,
+ 'quiet' => \my $quiet,
+ 'help' => \my $help,
+ 'tlibs' => \my $t_libs,
+);
+
+if ($t_libs) {
+ unshift( @INC, 't/lib', 'lib' );
+}
+
+pod2usage(1) if ($help);
+$ENV{DBIX_CLASS_STORAGE_DBI_DEBUG} = 1 if ($trace);
+
+die('No op specified') if(!$op);
+die('Invalid op') if ($op!~/^insert|update|delete|select$/s);
+my $csv_class;
+if ($op eq 'select') {
+ $format ||= 'tsv';
+ die('Invalid format') if ($format!~/^tsv|csv$/s);
+ $csv_class = 'Text::CSV_XS';
+ eval{ require Text::CSV_XS };
+ if ($@) {
+ $csv_class = 'Text::CSV_PP';
+ eval{ require Text::CSV_PP };
+ die('The select op requires either the Text::CSV_XS or the Text::CSV_PP module') if ($@);
+ }
+}
+
+die('No schema specified') if(!$schema_class);
+eval("require $schema_class");
+die('Unable to load schema') if ($@);
+$connect = jsonToObj( $connect ) if ($connect);
+my $schema = $schema_class->connect(
+ ( $connect ? @$connect : () )
+);
+
+die('No class specified') if(!$resultset_class);
+my $resultset = eval{ $schema->resultset($resultset_class) };
+die('Unable to load the class with the schema') if ($@);
+
+$set = jsonToObj( $set ) if ($set);
+$where = jsonToObj( $where ) if ($where);
+$attrs = jsonToObj( $attrs ) if ($attrs);
+
+if ($op eq 'insert') {
+ die('Do not use the where option with the insert op') if ($where);
+ die('Do not use the attrs option with the insert op') if ($attrs);
+ my $obj = $resultset->create( $set );
+ print ''.ref($resultset).' ID: '.join(',',$obj->id())."\n";
+}
+elsif ($op eq 'update') {
+ $resultset = $resultset->search( ($where||{}) );
+ my $count = $resultset->count();
+ print "This action will modify $count ".ref($resultset)." records.\n" if (!$quiet);
+ if ( $force || confirm() ) {
+ $resultset->update_all( $set );
+ }
+}
+elsif ($op eq 'delete') {
+ die('Do not use the set option with the delete op') if ($set);
+ $resultset = $resultset->search( ($where||{}), ($attrs||()) );
+ my $count = $resultset->count();
+ print "This action will delete $count ".ref($resultset)." records.\n" if (!$quiet);
+ if ( $force || confirm() ) {
+ $resultset->delete_all();
+ }
+}
+elsif ($op eq 'select') {
+ die('Do not use the set option with the select op') if ($set);
+ my $csv = $csv_class->new({
+ sep_char => ( $format eq 'tsv' ? "\t" : ',' ),
+ });
+ $resultset = $resultset->search( ($where||{}), ($attrs||()) );
+ my @columns = $resultset->result_source->columns();
+ $csv->combine( @columns );
+ print $csv->string()."\n";
+ while (my $row = $resultset->next()) {
+ my @fields;
+ foreach my $column (@columns) {
+ push( @fields, $row->get_column($column) );
+ }
+ $csv->combine( @fields );
+ print $csv->string()."\n";
+ }
+}
+
+sub confirm {
+ print "Are you sure you want to do this? (type YES to confirm) ";
+ my $response = <STDIN>;
+ return 1 if ($response=~/^YES/);
+ return;
+}
+
+__END__
+
+=head1 NAME
+
+dbicadmin - Execute operations upon DBIx::Class objects.
+
+=head1 SYNOPSIS
+
+ dbicadmin --op=insert --schema=My::Schema --class=Class --set=JSON
+ dbicadmin --op=update --schema=My::Schema --class=Class --set=JSON --where=JSON
+ dbicadmin --op=delete --schema=My::Schema --class=Class --where=JSON
+ dbicadmin --op=select --schema=My::Schema --class=Class --where=JSON --format=tsv
+
+=head1 DESCRIPTION
+
+This utility provides the ability to run INSERTs, UPDATEs,
+DELETEs, and SELECTs on any DBIx::Class object.
+
+=head1 OPTIONS
+
+=head2 op
+
+The type of operation. Valid values are insert, update, delete,
+and select.
+
+=head2 schema
+
+The name of your schema class.
+
+=head2 class
+
+The name of the class, within your schema, that you want to run
+the operation on.
+
+=head2 connect
+
+A JSON array to be passed to your schema class upon connecting.
+The array will need to be compatible with whatever the DBIC
+->connect() method requires.
+
+=head2 set
+
+This option must be valid JSON data string and is passed in to
+the DBIC update() method. Use this option with the update
+and insert ops.
+
+=head2 where
+
+This option must be valid JSON data string and is passed in as
+the first argument to the DBIC search() method. Use this
+option with the update, delete, and select ops.
+
+=head2 attrs
+
+This option must be valid JSON data string and is passed in as
+the second argument to the DBIC search() method. Use this
+option with the update, delete, and select ops.
+
+=head2 help
+
+Display this help page.
+
+=head2 force
+
+Suppresses the confirmation dialogues that are usually displayed
+when someone runs a DELETE or UPDATE action.
+
+=head2 quiet
+
+Do not display status messages.
+
+=head2 trace
+
+Turns on tracing on the DBI storage, thus printing SQL as it is
+executed.
+
+=head2 tlibs
+
+This option is purely for testing during the DBIC installation. Do
+not use it.
+
+=head1 JSON
+
+JSON is a lightweight data-interchange format. It allows you
+to express complex data structures for use in the where and
+set options.
+
+This module turns on L<JSON>'s BareKey and QuotApos options so
+that your data can look a bit more readable.
+
+ --where={"this":"that"} # generic JSON
+ --where={this:'that'} # with BareKey and QuoteApos
+
+Consider wrapping your JSON in outer quotes so that you don't
+have to escape your inner quotes.
+
+ --where={this:\"that\"} # no outer quote
+ --where='{this:"that"}' # outer quoted
+
+=head1 AUTHOR
+
+Aran Deltac <bluefeet@cpan.org>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
--- /dev/null
+use Test::More;
+
+eval "use Test::Pod::Coverage 1.04";
+plan skip_all => 'Test::Pod::Coverage 1.04 required' if $@;
+plan skip_all => 'set TEST_POD to enable this test' unless $ENV{TEST_POD};
+
+my @modules = sort { $a cmp $b } (Test::Pod::Coverage::all_modules());
+plan tests => scalar(@modules);
+
+# Since this is about checking documentation, a little documentation
+# of what this is doing might be in order...
+# The exceptions structure below is a hash keyed by the module
+# name. The value for each is a hash, which contains one or more
+# (although currently more than one makes no sense) of the following
+# things:-
+# skip => a true value means this module is not checked
+# ignore => array ref containing list of methods which
+# do not need to be documented.
+my $exceptions = {
+ 'DBIx::Class' => {
+ ignore => [
+ qw/MODIFY_CODE_ATTRIBUTES
+ component_base_class
+ mk_classdata/
+ ]
+ },
+ 'DBIx::Class::CDBICompat::AccessorMapping' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::AttributeAPI' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::AutoUpdate' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::ColumnCase' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::ColumnGroups' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::Constraints' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::Constructor' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::DestroyWarning' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::GetSet' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::HasA' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::HasMany' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::ImaDBI' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::LazyLoading' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::LiveObjectIndex' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::MightHave' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::ObjIndexStubs' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::Pager' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::ReadOnly' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::Retrieve' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::Stringify' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::TempColumns' => { skip => 1 },
+ 'DBIx::Class::CDBICompat::Triggers' => { skip => 1 },
+ 'DBIx::Class::ClassResolver::PassThrough' => { skip => 1 },
+ 'DBIx::Class::Componentised' => { skip => 1 },
+ 'DBIx::Class::Relationship::Accessor' => { skip => 1 },
+ 'DBIx::Class::Relationship::BelongsTo' => { skip => 1 },
+ 'DBIx::Class::Relationship::CascadeActions' => { skip => 1 },
+ 'DBIx::Class::Relationship::HasMany' => { skip => 1 },
+ 'DBIx::Class::Relationship::HasOne' => { skip => 1 },
+ 'DBIx::Class::Relationship::Helpers' => { skip => 1 },
+ 'DBIx::Class::Relationship::ManyToMany' => { skip => 1 },
+ 'DBIx::Class::Relationship::ProxyMethods' => { skip => 1 },
+ 'DBIx::Class::ResultSetProxy' => { skip => 1 },
+ 'DBIx::Class::ResultSourceProxy' => { skip => 1 },
+ 'DBIx::Class::Storage' => { skip => 1 },
+ 'DBIx::Class::Storage::DBI::DB2' => { skip => 1 },
+ 'DBIx::Class::Storage::DBI::MSSQL' => { skip => 1 },
+ 'DBIx::Class::Storage::DBI::MultiDistinctEmulation' => { skip => 1 },
+ 'DBIx::Class::Storage::DBI::ODBC400' => { skip => 1 },
+ 'DBIx::Class::Storage::DBI::ODBC::DB2_400_SQL' => { skip => 1 },
+ 'DBIx::Class::Storage::DBI::Oracle' => { skip => 1 },
+ 'DBIx::Class::Storage::DBI::Pg' => { skip => 1 },
+ 'DBIx::Class::Storage::DBI::SQLite' => { skip => 1 },
+ 'DBIx::Class::Storage::DBI::mysql' => { skip => 1 },
+ 'SQL::Translator::Parser::DBIx::Class' => { skip => 1 },
+ 'SQL::Translator::Producer::DBIx::Class::File' => { skip => 1 },
+};
+
+foreach my $module (@modules) {
+ SKIP:
+ {
+ skip "No real methods", 1 if ($exceptions->{$module}{skip});
+
+ # build parms up from ignore list
+ my $parms = {};
+ $parms->{trustme} =
+ [ map { qr/^$_$/ } @{ $exceptions->{$module}{ignore} } ]
+ if exists($exceptions->{$module}{ignore});
+
+ # run the test with the potentially modified parm set
+ pod_coverage_ok($module, $parms, "$module POD coverage");
+ }
+}
+++ /dev/null
-use Test::More;
-
-eval "use Test::Pod::Coverage 1.04";
-plan skip_all => 'Test::Pod::Coverage 1.04 required' if $@;
-plan skip_all => 'set TEST_POD to enable this test' unless $ENV{TEST_POD};
-
-all_pod_coverage_ok();
use lib qw(t/lib);
use DBICTest::ForeignComponent;
-plan tests => 2;
+plan tests => 5;
# Tests if foreign component was loaded by calling foreign's method
ok( DBICTest::ForeignComponent->foreign_test_method, 'foreign component' );
# Test for inject_base to filter out duplicates
{ package DBICTest::_InjectBaseTest;
use base qw/ DBIx::Class /;
+ package DBICTest::_InjectBaseTest::A;
+ package DBICTest::_InjectBaseTest::B;
+ package DBICTest::_InjectBaseTest::C;
}
DBICTest::_InjectBaseTest->inject_base( 'DBICTest::_InjectBaseTest', qw/
DBICTest::_InjectBaseTest::A
/],
'inject_base filters duplicates'
);
+
+# Test for a warning with incorrect order in load_components
+my @warnings = ();
+{
+ package A::Test;
+ our @ISA = 'DBIx::Class';
+ {
+ local $SIG{__WARN__} = sub { push @warnings, shift};
+ __PACKAGE__->load_components(qw(Core UTF8Columns));
+ }
+}
+like( $warnings[0], qr/Core loaded before UTF8Columns/,
+ 'warning issued for incorrect order in load_components()' );
+is( scalar @warnings, 1,
+ 'only one warning issued for incorrect load_components call' );
+
+# Test that no warning is issued for the correct order in load_components
+{
+ @warnings = ();
+ package B::Test;
+ our @ISA = 'DBIx::Class';
+ {
+ local $SIG{__WARN__} = sub { push @warnings, shift };
+ __PACKAGE__->load_components(qw(UTF8Columns Core));
+ }
+}
+is( scalar @warnings, 0,
+ 'warning not issued for correct order in load_components()' );
use strict;
+use warnings;
+
use Test::More;
use IO::File;
eval "use DBD::SQLite";
plan $@
? ( skip_all => 'needs DBD::SQLite for testing' )
- : ( tests => 7 );
+ : ( tests => 6 );
}
use lib qw(t/lib);
use_ok('DBICTest');
-
-use_ok('DBICTest::HelperRels');
+DBICTest->init_schema();
DBICTest->schema->storage->sql_maker->quote_char("'");
DBICTest->schema->storage->sql_maker->name_sep('.');
$rs = DBICTest::CD->search({},
{ 'order_by' => 'year DESC'});
{
- my $warnings;
+ my $warnings = '';
local $SIG{__WARN__} = sub { $warnings .= $_[0] };
my $first = eval{ $rs->first() };
- ok( $warnings =~ /ORDER BY terms/, "Problem with ORDER BY quotes" );
+ like( $warnings, qr/ORDER BY terms/, "Problem with ORDER BY quotes" );
}
my $order = 'year DESC';
$rs = DBICTest::CD->search({},
{ 'order_by' => \$order });
{
- my $warnings;
+ my $warnings = '';
local $SIG{__WARN__} = sub { $warnings .= $_[0] };
my $first = $rs->first();
ok( $warnings !~ /ORDER BY terms/,
BEGIN {
eval "use DBD::SQLite";
plan $ENV{DATA_DUMPER_TEST}
- ? ( tests => 3 )
+ ? ( tests => 2 )
: ( skip_all => 'Set $ENV{DATA_DUMPER_TEST} to run this test' );
}
use_ok('DBICTest');
-use_ok('DBICTest::HelperRels');
-
-my $rs = DBICTest::CD->search(
- { 'artist.name' => 'We Are Goth',
- 'liner_notes.notes' => 'Kill Yourself!' },
- { join => [ qw/artist liner_notes/ ] });
+my $schema = DBICTest->init_schema();
+my $rs = $schema->resultset('CD')->search({
+ 'artist.name' => 'We Are Goth',
+ 'liner_notes.notes' => 'Kill Yourself!',
+}, {
+ join => [ qw/artist liner_notes/ ],
+});
Dumper($rs);
-$rs = DBICTest::CD->search(
- { 'artist.name' => 'We Are Goth',
- 'liner_notes.notes' => 'Kill Yourself!' },
- { join => [ qw/artist liner_notes/ ] });
+$rs = $schema->resultset('CD')->search({
+ 'artist.name' => 'We Are Goth',
+ 'liner_notes.notes' => 'Kill Yourself!',
+}, {
+ join => [ qw/artist liner_notes/ ],
+});
-cmp_ok( $rs + 0, '==', 1, "Single record in after death with dumper");
+cmp_ok( $rs->count(), '==', 1, "Single record in after death with dumper");
1;
--- /dev/null
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+use Test::More;
+
+BEGIN {
+ eval "use DBD::SQLite";
+ plan $@
+ ? ( skip_all => 'needs DBD::SQLite for testing' )
+ : ( tests => 12 );
+}
+
+use lib qw(t/lib);
+
+use_ok('DBICTest');
+DBICTest->init_schema();
+
+my $cbworks = 0;
+
+DBICTest->schema->storage->debugcb(sub { $cbworks = 1; });
+DBICTest->schema->storage->debug(0);
+my $rs = DBICTest::CD->search({});
+$rs->count();
+ok(!$cbworks, 'Callback not called with debug disabled');
+
+DBICTest->schema->storage->debug(1);
+
+$rs->count();
+ok($cbworks, 'Debug callback worked.');
+
+my $prof = new DBIx::Test::Profiler();
+DBICTest->schema->storage->debugobj($prof);
+
+# Test non-transaction calls.
+$rs->count();
+ok($prof->{'query_start'}, 'query_start called');
+ok($prof->{'query_end'}, 'query_end called');
+ok(!$prof->{'txn_begin'}, 'txn_begin not called');
+ok(!$prof->{'txn_commit'}, 'txn_commit not called');
+
+$prof->reset();
+
+# Test transaction calls
+DBICTest->schema->txn_begin();
+ok($prof->{'txn_begin'}, 'txn_begin called');
+
+$rs = DBICTest::CD->search({});
+$rs->count();
+ok($prof->{'query_start'}, 'query_start called');
+ok($prof->{'query_end'}, 'query_end called');
+
+DBICTest->schema->txn_commit();
+ok($prof->{'txn_commit'}, 'txn_commit called');
+
+$prof->reset();
+
+# Test a rollback
+DBICTest->schema->txn_begin();
+$rs = DBICTest::CD->search({});
+$rs->count();
+DBICTest->schema->txn_rollback();
+ok($prof->{'txn_rollback'}, 'txn_rollback called');
+
+DBICTest->schema->storage->debug(0);
+
+package DBIx::Test::Profiler;
+use strict;
+
+sub new {
+ my $self = bless({});
+}
+
+sub query_start {
+ my $self = shift();
+ $self->{'query_start'} = 1;
+}
+
+sub query_end {
+ my $self = shift();
+ $self->{'query_end'} = 1;
+}
+
+sub txn_begin {
+ my $self = shift();
+ $self->{'txn_begin'} = 1;
+}
+
+sub txn_rollback {
+ my $self = shift();
+ $self->{'txn_rollback'} = 1;
+}
+
+sub txn_commit {
+ my $self = shift();
+ $self->{'txn_commit'} = 1;
+}
+
+sub reset {
+ my $self = shift();
+
+ $self->{'query_start'} = 0;
+ $self->{'query_end'} = 0;
+ $self->{'txn_begin'} = 0;
+ $self->{'txn_rollback'} = 0;
+ $self->{'txn_end'} = 0;
+}
+
+1;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+plan tests => 1;
+
+# Set up the "usual" sqlite for DBICTest
+my $normal_schema = DBICTest->init_schema;
+
+# Steal the dsn, which should be like 'dbi:SQLite:t/var/DBIxClass.db'
+my $normal_dsn = $normal_schema->storage->connect_info->[0];
+
+# Make sure we have no active connection
+$normal_schema->storage->disconnect;
+
+# Make a new clone with a new connection, using a code reference
+my $code_ref_schema = $normal_schema->connect(sub { DBI->connect($normal_dsn); });
+
+# Stolen from 60core.t - this just verifies things seem to work at all
+my @art = $code_ref_schema->resultset("Artist")->search({ }, { order_by => 'name DESC'});
+cmp_ok(@art, '==', 3, "Three artists returned");
}
}
-use DBICTest::Extra; # uses Class::Inspector
+use DBICTest::ResultSetManager; # uses Class::Inspector
-my $schema = DBICTest::Extra->compose_connection('DB', 'foo');
+my $schema = DBICTest::ResultSetManager->compose_connection('DB', 'foo');
my $rs = $schema->resultset('Foo');
ok( !DB::Foo->can('bar'), 'Foo class does not have bar method' );
ok( $rs->can('bar'), 'Foo resultset class has bar method' );
-isa_ok( $rs, 'DBICTest::Extra::Foo::_resultset', 'Foo resultset class is correct' );
+isa_ok( $rs, 'DBICTest::ResultSetManager::Foo::_resultset', 'Foo resultset class is correct' );
is( $rs->bar, 'good', 'bar method works' );
--- /dev/null
+use Test::More;
+use strict;
+use warnings;
+use lib qw(t/lib);
+use DBICTest;
+
+plan tests => 7;
+
+my $schema = DBICTest->init_schema();
+my $total_cds = $schema->resultset('CD')->count;
+cmp_ok($total_cds, '>', 0, 'need cd records');
+
+# test that delete_related w/o conditions deletes all related records only
+my $artist = $schema->resultset("Artist")->find(3);
+my $artist_cds = $artist->cds->count;
+cmp_ok($artist_cds, '<', $total_cds, 'need more cds than just related cds');
+
+ok($artist->delete_related('cds'));
+cmp_ok($schema->resultset('CD')->count, '==', ($total_cds - $artist_cds), 'too many cds were deleted');
+
+$total_cds -= $artist_cds;
+
+# test that delete_related w/conditions deletes just the matched related records only
+my $artist2 = $schema->resultset("Artist")->find(2);
+my $artist2_cds = $artist2->search_related('cds')->count;
+cmp_ok($artist2_cds, '<', $total_cds, 'need more cds than related cds');
+
+ok($artist2->delete_related('cds', {title => {like => '%'}}));
+cmp_ok($schema->resultset('CD')->count, '==', ($total_cds - $artist2_cds), 'too many cds were deleted');
+
-sub run_tests {
-my $schema = shift;
+use strict;
+use warnings;
-plan tests => 47;
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+plan tests => 62;
# figure out if we've got a version of sqlite that is older than 3.2.6, in
# which case COUNT(DISTINCT()) doesn't work
ok($art->update, 'Update run');
+my $record_jp = $schema->resultset("Artist")->search(undef, { join => 'cds' })->search(undef, { prefetch => 'cds' })->next;
+
+ok($record_jp, "prefetch on same rel okay");
+
+my $record_fn = $schema->resultset("Artist")->search(undef, { join => 'cds' })->search({'cds.cdid' => '1'}, {join => 'artist_undirected_maps'})->next;
+
+ok($record_fn, "funny join is okay");
+
@art = $schema->resultset("Artist")->search({ name => 'We Are In Rehab' });
cmp_ok(@art, '==', 1, "Changed artist returned by search");
$new->update;
-$new_again = $schema->resultset("Artist")->find(4);
+my $new_again = $schema->resultset("Artist")->find(4);
is($new_again->name, 'Man With A Spoon', 'Retrieved correctly');
is($new_again->ID, 'DBICTest::Artist|artist|artistid=4', 'unique object id generated correctly');
+# Test backwards compatibility
+{
+ my $warnings = '';
+ local $SIG{__WARN__} = sub { $warnings .= $_[0] };
+
+ my $artist_by_hash = $schema->resultset('Artist')->find(artistid => 4);
+ is($artist_by_hash->name, 'Man With A Spoon', 'Retrieved correctly');
+ is($artist_by_hash->ID, 'DBICTest::Artist|artist|artistid=4', 'unique object id generated correctly');
+ like($warnings, qr/deprecated/, 'warned about deprecated find usage');
+}
+
is($schema->resultset("Artist")->count, 4, 'count ok');
+# test find_or_new
+{
+ my $existing_obj = $schema->resultset('Artist')->find_or_new({
+ artistid => 4,
+ });
+
+ is($existing_obj->name, 'Man With A Spoon', 'find_or_new: found existing artist');
+ ok($existing_obj->in_storage, 'existing artist is in storage');
+
+ my $new_obj = $schema->resultset('Artist')->find_or_new({
+ artistid => 5,
+ name => 'find_or_new',
+ });
+
+ is($new_obj->name, 'find_or_new', 'find_or_new: instantiated a new artist');
+ ok(! $new_obj->in_storage, 'new artist is not in storage');
+}
+
my $cd = $schema->resultset("CD")->find(1);
my %cols = $cd->get_columns;
my $search = [ { 'tags.tag' => 'Cheesy' }, { 'tags.tag' => 'Blue' } ];
-my $or_rs = $schema->resultset("CD")->search($search, { join => 'tags',
+my( $or_rs ) = $schema->resultset("CD")->search_rs($search, { join => 'tags',
order_by => 'cdid' });
cmp_ok($or_rs->count, '==', 5, 'Search with OR ok');
is($art->name, 'Test _cond_for_update_delete', 'updated second artist name');
}
-#test cascade_delete thru many_many relations
-my $art_del = $schema->resultset("Artist")->find({ artistid => 1 });
-$art_del->delete;
-cmp_ok( $schema->resultset("CD")->search({artist => 1}), '==', 0, 'Cascading through has_many top level.');
-cmp_ok( $schema->resultset("CD_to_Producer")->search({cd => 1}), '==', 0, 'Cascading through has_many children.');
+# test source_name
+{
+ # source_name should be set for normal modules
+ is($schema->source('CD')->source_name, 'CD', 'source_name is set to moniker');
-$schema->source("Artist")->{_columns}{'artistid'} = {};
+ # test the result source that sets source_name explictly
+ ok($schema->source('SourceNameArtists'), 'SourceNameArtists result source exists');
-my $typeinfo = $schema->source("Artist")->column_info('artistid');
-is($typeinfo->{data_type}, 'INTEGER', 'column_info ok');
-$schema->source("Artist")->column_info('artistid');
-ok($schema->source("Artist")->{_columns_info_loaded} == 1, 'Columns info flag set');
+ my @artsn = $schema->resultset('SourceNameArtists')->search({}, { order_by => 'name DESC' });
+ cmp_ok(@artsn, '==', 4, "Four artists returned");
+
+ # make sure subclasses that don't set source_name are ok
+ ok($schema->source('ArtistSubclass', 'ArtistSubclass exists'));
+}
my $newbook = $schema->resultset( 'Bookmark' )->find(1);
};
ok(!$@, "stringify to false value doesn't cause error");
+# test cascade_delete through many_to_many relations
+{
+ my $art_del = $schema->resultset("Artist")->find({ artistid => 1 });
+ $art_del->delete;
+ cmp_ok( $schema->resultset("CD")->search({artist => 1}), '==', 0, 'Cascading through has_many top level.');
+ cmp_ok( $schema->resultset("CD_to_Producer")->search({cd => 1}), '==', 0, 'Cascading through has_many children.');
+}
+
+# test column_info
+{
+ $schema->source("Artist")->{_columns}{'artistid'} = {};
+
+ my $typeinfo = $schema->source("Artist")->column_info('artistid');
+ is($typeinfo->{data_type}, 'INTEGER', 'column_info ok');
+ $schema->source("Artist")->column_info('artistid');
+ ok($schema->source("Artist")->{_columns_info_loaded} == 1, 'Columns info flag set');
+}
+
+# test remove_columns
+{
+ is_deeply([$schema->source('CD')->columns], [qw/cdid artist title year/]);
+ $schema->source('CD')->remove_columns('year');
+ is_deeply([$schema->source('CD')->columns], [qw/cdid artist title/]);
}
-1;
-sub run_tests {
-my $schema = shift;
-
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
plan tests => 3;
# add some rows inside a transaction and commit it
'name' => {
'data_type' => 'varchar',
'is_nullable' => 0,
- }
+ },
};
is_deeply($type_info, $test_type_info, 'columns_info_for - column data types');
-}
-
-1;
-sub run_tests {
-my $schema = shift;
+use strict;
+use warnings;
-plan tests => 4;
-$artist = DBICTest::Artist->find(1);
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+plan tests => 5;
+
+my $artist = DBICTest::Artist->find(1);
ok($artist->find_related('twokeys', {cd => 1}), "find multiple pks using relationships + args");
+
+ok($schema->resultset("FourKeys")->search({ foo => 1, bar => 2 })->find({ hello => 3, goodbye => 4 }), "search on partial key followed by a find");
ok($schema->resultset("FourKeys")->find(1,2,3,4), "find multiple pks without hash");
ok($schema->resultset("FourKeys")->find(5,4,3,6), "find multiple pks without hash");
is($schema->resultset("FourKeys")->find(1,2,3,4)->ID, 'DBICTest::FourKeys|fourkeys|bar=2|foo=1|goodbye=4|hello=3', 'unique object id ok for multiple pks');
-}
-
-1;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+plan tests => 54;
+
+# has_a test
+my $cd = $schema->resultset("CD")->find(4);
+my ($artist) = ($INC{'DBICTest/HelperRels'}
+ ? $cd->artist
+ : $cd->search_related('artist'));
+is($artist->name, 'Random Boy Band', 'has_a search_related ok');
+
+# has_many test with an order_by clause defined
+$artist = $schema->resultset("Artist")->find(1);
+my @cds = ($INC{'DBICTest/HelperRels'}
+ ? $artist->cds
+ : $artist->search_related('cds'));
+is( $cds[1]->title, 'Spoonful of bees', 'has_many search_related with order_by ok' );
+
+# search_related with additional abstract query
+@cds = ($INC{'DBICTest/HelperRels'}
+ ? $artist->cds({ title => { like => '%of%' } })
+ : $artist->search_related('cds', { title => { like => '%of%' } } )
+ );
+is( $cds[1]->title, 'Forkful of bees', 'search_related with abstract query ok' );
+
+# creating a related object
+if ($INC{'DBICTest/HelperRels.pm'}) {
+ $artist->add_to_cds({ title => 'Big Flop', year => 2005 });
+} else {
+ $artist->create_related( 'cds', {
+ title => 'Big Flop',
+ year => 2005,
+ } );
+}
+
+is( ($artist->search_related('cds'))[3]->title, 'Big Flop', 'create_related ok' );
+
+my( $rs_from_list ) = $artist->search_related_rs('cds');
+is( ref($rs_from_list), 'DBIx::Class::ResultSet', 'search_related_rs in list context returns rs' );
+
+( $rs_from_list ) = $artist->cds_rs();
+is( ref($rs_from_list), 'DBIx::Class::ResultSet', 'relation_rs in list context returns rs' );
+
+# count_related
+is( $artist->count_related('cds'), 4, 'count_related ok' );
+
+# set_from_related
+my $track = $schema->resultset("Track")->create( {
+ trackid => 1,
+ cd => 3,
+ position => 98,
+ title => 'Hidden Track'
+} );
+$track->set_from_related( cd => $cd );
+
+is($track->disc->cdid, 4, 'set_from_related ok, including alternative accessor' );
+
+$track->set_from_related( cd => undef );
+
+ok( !defined($track->cd), 'set_from_related with undef ok');
+
+
+# update_from_related, the same as set_from_related, but it calls update afterwards
+$track = $schema->resultset("Track")->create( {
+ trackid => 2,
+ cd => 3,
+ position => 99,
+ title => 'Hidden Track'
+} );
+$track->update_from_related( cd => $cd );
+
+my $t_cd = ($schema->resultset("Track")->search( cd => 4, position => 99 ))[0]->cd;
+
+is( $t_cd->cdid, 4, 'update_from_related ok' );
+
+# find_or_create_related with an existing record
+$cd = $artist->find_or_create_related( 'cds', { title => 'Big Flop' } );
+is( $cd->year, 2005, 'find_or_create_related on existing record ok' );
+
+# find_or_create_related creating a new record
+$cd = $artist->find_or_create_related( 'cds', {
+ title => 'Greatest Hits',
+ year => 2006,
+} );
+is( $cd->title, 'Greatest Hits', 'find_or_create_related new record ok' );
+@cds = $artist->search_related('cds');
+is( ($artist->search_related('cds'))[4]->title, 'Greatest Hits', 'find_or_create_related new record search ok' );
+
+$artist->delete_related( cds => { title => 'Greatest Hits' });
+cmp_ok( $schema->resultset("CD")->search( title => 'Greatest Hits' ), '==', 0, 'delete_related ok' );
+
+# find_or_new_related with an existing record
+$cd = $artist->find_or_new_related( 'cds', { title => 'Big Flop' } );
+is( $cd->year, 2005, 'find_or_new_related on existing record ok' );
+ok( $cd->in_storage, 'find_or_new_related on existing record: is in_storage' );
+
+# find_or_new_related instantiating a new record
+$cd = $artist->find_or_new_related( 'cds', {
+ title => 'Greatest Hits 2: Louder Than Ever',
+ year => 2007,
+} );
+is( $cd->title, 'Greatest Hits 2: Louder Than Ever', 'find_or_new_related new record ok' );
+ok( ! $cd->in_storage, 'find_or_new_related on a new record: not in_storage' );
+
+SKIP: {
+ skip "relationship checking needs fixing", 1;
+ # try to add a bogus relationship using the wrong cols
+ eval {
+ DBICTest::Schema::Artist->add_relationship(
+ tracks => 'DBICTest::Schema::Track',
+ { 'foreign.cd' => 'self.cdid' }
+ );
+ };
+ like($@, qr/Unknown column/, 'failed when creating a rel with invalid key, ok');
+}
+
+# another bogus relationship using no join condition
+eval {
+ DBICTest::Schema::Artist->add_relationship( tracks => 'DBICTest::Track' );
+};
+like($@, qr/join condition/, 'failed when creating a rel without join condition, ok');
+
+# many_to_many helper tests
+$cd = $schema->resultset("CD")->find(1);
+my @producers = $cd->producers();
+is( $producers[0]->name, 'Matt S Trout', 'many_to_many ok' );
+is( $cd->producers_sorted->next->name, 'Bob The Builder',
+ 'sorted many_to_many ok' );
+is( $cd->producers_sorted(producerid => 3)->next->name, 'Fred The Phenotype',
+ 'sorted many_to_many with search condition ok' );
+
+$cd = $schema->resultset('CD')->find(2);
+my $prod_rs = $cd->producers();
+my $prod_before_count = $schema->resultset('Producer')->count;
+is( $prod_rs->count, 0, "CD doesn't yet have any producers" );
+my $prod = $schema->resultset('Producer')->find(1);
+$cd->add_to_producers($prod);
+is( $prod_rs->count(), 1, 'many_to_many add_to_$rel($obj) count ok' );
+is( $prod_rs->first->name, 'Matt S Trout',
+ 'many_to_many add_to_$rel($obj) ok' );
+$cd->remove_from_producers($prod);
+is( $schema->resultset('Producer')->find(1)->name, 'Matt S Trout',
+ "producer object exists after remove of link" );
+is( $prod_rs->count, 0, 'many_to_many remove_from_$rel($obj) ok' );
+$cd->add_to_producers({ name => 'Testy McProducer' });
+is( $schema->resultset('Producer')->count, $prod_before_count+1,
+ 'add_to_$rel($hash) inserted a new producer' );
+is( $prod_rs->count(), 1, 'many_to_many add_to_$rel($hash) count ok' );
+is( $prod_rs->first->name, 'Testy McProducer',
+ 'many_to_many add_to_$rel($hash) ok' );
+$cd->add_to_producers({ name => 'Jack Black' });
+is( $prod_rs->count(), 2, 'many_to_many add_to_$rel($hash) count ok' );
+$cd->set_producers($schema->resultset('Producer')->all);
+is( $cd->producers->count(), $prod_before_count+2,
+ 'many_to_many set_$rel(@objs) count ok' );
+$cd->set_producers($schema->resultset('Producer')->find(1));
+is( $cd->producers->count(), 1, 'many_to_many set_$rel($obj) count ok' );
+
+eval { $cd->remove_from_producers({ fake => 'hash' }); };
+like( $@, qr/needs an object/, 'remove_from_$rel($hash) dies correctly' );
+
+eval { $cd->add_to_producers(); };
+like( $@, qr/needs an object or hashref/,
+ 'add_to_$rel(undef) dies correctly' );
+
+# many_to_many stresstest
+my $twokey = $schema->resultset('TwoKeys')->find(1,1);
+my $fourkey = $schema->resultset('FourKeys')->find(1,2,3,4);
+
+is( $twokey->fourkeys->count, 0, 'twokey has no fourkeys' );
+$twokey->add_to_fourkeys($fourkey, { autopilot => 'engaged' });
+my $got_fourkey = $twokey->fourkeys({ sensors => 'online' })->first;
+is( $twokey->fourkeys->count, 1, 'twokey has one fourkey' );
+is( $got_fourkey->$_, $fourkey->$_,
+ 'fourkeys row has the correct value for column '.$_ )
+ for (qw(foo bar hello goodbye sensors));
+$twokey->remove_from_fourkeys($fourkey);
+is( $twokey->fourkeys->count, 0, 'twokey has no fourkeys' );
+is( $twokey->fourkeys_to_twokeys->count, 0,
+ 'twokey has no links to fourkey' );
+
+
+# test undirected many-to-many relationship (e.g. "related artists")
+my $undir_maps = $schema->resultset("Artist")->find(1)->artist_undirected_maps;
+is($undir_maps->count, 1, 'found 1 undirected map for artist 1');
+
+$undir_maps = $schema->resultset("Artist")->find(2)->artist_undirected_maps;
+is($undir_maps->count, 1, 'found 1 undirected map for artist 2');
+
+my $mapped_rs = $undir_maps->search_related('mapped_artists');
+
+my @art = $mapped_rs->all;
+
+cmp_ok(@art, '==', 2, "Both artist returned from map");
+
+my $searched = $mapped_rs->search({'mapped_artists.artistid' => {'!=', undef}});
+
+cmp_ok($searched->count, '==', 2, "Both artist returned from map after adding another condition");
+
+# check join through cascaded has_many relationships
+$artist = $schema->resultset("Artist")->find(1);
+my $trackset = $artist->cds->search_related('tracks');
+# LEFT join means we also see the trackless additional album...
+cmp_ok($trackset->count, '==', 11, "Correct number of tracks for artist");
+
+# now see about updating eveything that belongs to artist 2 to artist 3
+$artist = $schema->resultset("Artist")->find(2);
+my $nartist = $schema->resultset("Artist")->find(3);
+cmp_ok($artist->cds->count, '==', 1, "Correct orig #cds for artist");
+cmp_ok($nartist->cds->count, '==', 1, "Correct orig #cds for artist");
+$artist->cds->update({artist => $nartist->id});
+cmp_ok($artist->cds->count, '==', 0, "Correct new #cds for artist");
+cmp_ok($nartist->cds->count, '==', 2, "Correct new #cds for artist");
+
-sub run_tests {
-my $schema = shift;
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
plan tests => 12;
is( $it->next->title, "Generic Manufactured Singles", "software iterator->next ok" );
-}
-
-1;
-sub run_tests {
-my $schema = shift;
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+DBICTest::Schema::CD->add_column('year');
+my $schema = DBICTest->init_schema();
eval { require DateTime };
plan skip_all => "Need DateTime for inflation tests" if $@;
-plan tests => 3;
+plan tests => 4;
DBICTest::Schema::CD->inflate_column( 'year',
{ inflate => sub { DateTime->new( year => shift ) },
is( ref($cd->year), 'DateTime', 'year is a DateTime, ok' );
+is( $cd->year->year, 1997, 'inflated year ok' );
+
is( $cd->year->month, 1, 'inflated month ok' );
# deflate test
($cd) = $schema->resultset("CD")->search( year => $now->year );
is( $cd->year->year, $now->year, 'deflate ok' );
-}
-
-1;
-sub run_tests {
-my $schema = shift;
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
eval { require DateTime };
plan skip_all => "Need DateTime for inflation tests" if $@;
($cd) = $schema->resultset("CD")->search( year => $now->year );
is( $cd->year->year, $now->year, 'deflate ok' );
-}
-
-1;
-sub run_tests {
-my $schema = shift;
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
use Data::Dumper;
ok($inflated = $entry->serialized, 'arrayref inflation ok');
is_deeply($inflated, $complex2->{serialized}, 'inflated array matches original');
-}
-
-1;
-sub run_tests {
-my $schema = shift;
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
BEGIN {
eval "use DBD::SQLite";
$art->discard_changes;
-}
-
-1;
-sub run_tests {
-my $schema = shift;
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
plan tests => 2;
my $copied = $artist->copy({ name => 'Don\'t tell the RIAA', artistid => undef });
is($copied->name, 'Don\'t tell the RIAA', "Copied with PKs ok.");
-}
-
-1;
-sub run_tests {
-my $schema = shift;
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBI::Const::GetInfoType;
my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_MYSQL_${_}" } qw/DSN USER PASS/};
#warn "$dsn $user $pass";
-plan skip_all, 'Set $ENV{DBICTEST_MYSQL_DSN}, _USER and _PASS to run this test'
+plan skip_all => 'Set $ENV{DBICTEST_MYSQL_DSN}, _USER and _PASS to run this test'
unless ($dsn && $user);
plan tests => 5;
'default_value' => undef,
},
'charfield' => {
- 'data_type' => 'VARCHAR',
+ 'data_type' => 'CHAR',
'is_nullable' => 1,
'size' => 10,
'default_value' => undef,
},
};
+SKIP: {
+ my $mysql_version = $dbh->get_info( $GetInfoType{SQL_DBMS_VER} );
+ skip "Cannot determine MySQL server version", 1 if !$mysql_version;
-my $type_info = MySQLTest->schema->storage->columns_info_for('artist');
-is_deeply($type_info, $test_type_info, 'columns_info_for - column data types');
+ my ($v1, $v2, $v3) = $mysql_version =~ /^(\d+)\.(\d+)(?:\.(\d+))?/;
+ skip "Cannot determine MySQL server version", 1 if !$v1 || !defined($v2);
+ $v3 ||= 0;
+ if( ($v1 < 5) || ($v1 == 5 && $v2 == 0 && $v3 <= 3) ) {
+ $test_type_info->{charfield}->{data_type} = 'VARCHAR';
+ }
-# clean up our mess
-$dbh->do("DROP TABLE artist");
-
+ my $type_info = MySQLTest->schema->storage->columns_info_for('artist');
+ is_deeply($type_info, $test_type_info, 'columns_info_for - column data types');
}
-1;
+# clean up our mess
+$dbh->do("DROP TABLE artist");
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+{
+ package DBICTest::Schema::Casecheck;
+
+ use strict;
+ use warnings;
+ use base 'DBIx::Class';
+
+ __PACKAGE__->load_components(qw/PK::Auto Core/);
+ __PACKAGE__->table('casecheck');
+ __PACKAGE__->add_columns(qw/id name NAME uc_name/);
+ __PACKAGE__->set_primary_key('id');
+
+}
+
+my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_PG_${_}" } qw/DSN USER PASS/};
+
+#warn "$dsn $user $pass";
+
+plan skip_all => 'Set $ENV{DBICTEST_PG_DSN}, _USER and _PASS to run this test'
+ . ' (note: creates and drops tables named artist and casecheck!)' unless ($dsn && $user);
+
+plan tests => 8;
+
+DBICTest::Schema->load_classes( 'Casecheck' );
+DBICTest::Schema->compose_connection('PgTest' => $dsn, $user, $pass);
+
+my $dbh = PgTest->schema->storage->dbh;
+PgTest->schema->source("Artist")->name("testschema.artist");
+$dbh->do("CREATE SCHEMA testschema;");
+$dbh->do("CREATE TABLE testschema.artist (artistid serial PRIMARY KEY, name VARCHAR(100), charfield CHAR(10));");
+ok ( $dbh->do('CREATE TABLE testschema.casecheck (id serial PRIMARY KEY, "name" VARCHAR(1), "NAME" VARCHAR(2), "UC_NAME" VARCHAR(3));'), 'Creation of casecheck table');
+
+PgTest::Artist->load_components('PK::Auto');
+
+my $new = PgTest::Artist->create({ name => 'foo' });
+
+is($new->artistid, 1, "Auto-PK worked");
+
+$new = PgTest::Artist->create({ name => 'bar' });
+
+is($new->artistid, 2, "Auto-PK worked");
+
+my $test_type_info = {
+ 'artistid' => {
+ 'data_type' => 'integer',
+ 'is_nullable' => 0,
+ 'size' => 4,
+ },
+ 'name' => {
+ 'data_type' => 'character varying',
+ 'is_nullable' => 1,
+ 'size' => 100,
+ 'default_value' => undef,
+ },
+ 'charfield' => {
+ 'data_type' => 'character',
+ 'is_nullable' => 1,
+ 'size' => 10,
+ 'default_value' => undef,
+ },
+};
+
+
+my $type_info = PgTest->schema->storage->columns_info_for('testschema.artist');
+my $artistid_defval = delete $type_info->{artistid}->{default_value};
+like($artistid_defval,
+ qr/^nextval\('([^\.]*\.){0,1}artist_artistid_seq'::(?:text|regclass)\)/,
+ 'columns_info_for - sequence matches Pg get_autoinc_seq expectations');
+is_deeply($type_info, $test_type_info,
+ 'columns_info_for - column data types');
+
+my $name_info = PgTest::Casecheck->column_info( 'name' );
+is( $name_info->{size}, 1, "Case sensitive matching info for 'name'" );
+
+my $NAME_info = PgTest::Casecheck->column_info( 'NAME' );
+is( $NAME_info->{size}, 2, "Case sensitive matching info for 'NAME'" );
+
+my $uc_name_info = PgTest::Casecheck->column_info( 'uc_name' );
+is( $uc_name_info->{size}, 3, "Case insensitive matching info for 'uc_name'" );
+
+$dbh->do("DROP TABLE testschema.artist;");
+$dbh->do("DROP TABLE testschema.casecheck;");
+$dbh->do("DROP SCHEMA testschema;");
+
-sub run_tests {
-my $schema = shift;
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_ORA_${_}" } qw/DSN USER PASS/};
-plan skip_all, 'Set $ENV{DBICTEST_ORA_DSN}, _USER and _PASS to run this test. ' .
+plan skip_all => 'Set $ENV{DBICTEST_ORA_DSN}, _USER and _PASS to run this test. ' .
'Warning: This test drops and creates tables called \'artist\', \'cd\' and \'track\''
unless ($dsn && $user && $pass);
my $tcount = OraTest::Track->search(
{},
{
- select => [{count => {distinct => ['position', 'title']}}],
- as => ['count']
+ select => [{count => {distinct => ['position', 'title']}}],
+ as => ['count']
}
);
$dbh->do("DROP TABLE cd");
$dbh->do("DROP TABLE track");
-}
-
-1;
-sub run_tests {
-my $schema = shift;
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_DB2_${_}" } qw/DSN USER PASS/};
#warn "$dsn $user $pass";
-plan skip_all, 'Set $ENV{DBICTEST_DB2_DSN}, _USER and _PASS to run this test'
+plan skip_all => 'Set $ENV{DBICTEST_DB2_DSN}, _USER and _PASS to run this test'
unless ($dsn && $user);
plan tests => 6;
my $dbh = DB2Test->schema->storage->dbh;
-{
- local $SIG{__WARN__} = sub {};
- $dbh->do("DROP TABLE artist;");
-}
+$dbh->do("DROP TABLE artist", { RaiseError => 0, PrintError => 0 });
$dbh->do("CREATE TABLE artist (artistid INTEGER GENERATED BY DEFAULT AS IDENTITY (START WITH 1, INCREMENT BY 1), name VARCHAR(255), charfield CHAR(10));");
# clean up our mess
$dbh->do("DROP TABLE artist");
-}
-
-1;
-sub run_tests {
-my $schema = shift;
+use strict;
+use warnings;
-my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_DB2_${_}" } qw/DSN USER PASS/};
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_DB2_400_${_}" } qw/DSN USER PASS/};
#warn "$dsn $user $pass";
-plan skip_all, 'Set $ENV{DBICTEST_DB2_DSN}, _USER and _PASS to run this test'
+# Probably best to pass the DBQ option in the DSN to specify a specific
+# libray. Something like:
+# DBICTEST_DB2_400_DSN='dbi:ODBC:dsn=MyAS400;DBQ=MYLIB'
+plan skip_all => 'Set $ENV{DBICTEST_DB2_400_DSN}, _USER and _PASS to run this test'
unless ($dsn && $user);
plan tests => 6;
my $dbh = DB2Test->schema->storage->dbh;
-{
- local $SIG{__WARN__} = sub {};
- $dbh->do("DROP TABLE artist;");
-}
-
-$dbh->do("CREATE TABLE artist (artistid INTEGER GENERATED BY DEFAULT AS IDENTITY (START WITH 1, INCREMENT BY 1), name VARCHAR(255), charfield CHAR(10));");
+$dbh->do("DROP TABLE artist", { RaiseError => 0, PrintError => 0 });
-#'dbi:mysql:host=localhost;database=dbic_test', 'dbic_test', '');
+$dbh->do("CREATE TABLE artist (artistid INTEGER GENERATED BY DEFAULT AS IDENTITY (START WITH 1, INCREMENT BY 1), name VARCHAR(255), charfield CHAR(10))");
DB2Test::Artist->load_components('PK::Auto');
# clean up our mess
$dbh->do("DROP TABLE artist");
-}
-
-1;
-sub run_tests {
-my $schema = shift;
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_MSSQL_${_}" } qw/DSN USER PASS/};
#warn "$dsn $user $pass";
-plan skip_all, 'Set $ENV{DBICTEST_MSSQL_DSN}, _USER and _PASS to run this test'
+plan skip_all => 'Set $ENV{DBICTEST_MSSQL_DSN}, _USER and _PASS to run this test'
unless ($dsn);
plan tests => 4;
-$schema->compose_connection( 'MSSQLTest' => $dsn, $user, $pass );
+my $storage_type = '::DBI::MSSQL';
+$storage_type = '::DBI::Sybase::MSSQL' if $dsn =~ /^dbi:Sybase:/;
+# Add more for others in the future when they exist (ODBC? ADO? JDBC?)
+
+DBICTest::Schema->storage_type($storage_type);
+DBICTest::Schema->compose_connection( 'MSSQLTest' => $dsn, $user, $pass );
my $dbh = MSSQLTest->schema->storage->dbh;
$it->next;
is( $it->next, undef, "next past end of resultset ok" );
-}
-
-1;
-sub run_tests {
-my $schema = shift;
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
BEGIN {
eval "use DBD::SQLite";
);
is( $it->count, 1, "complex abstract count ok" );
-}
-
-1;
-sub run_tests {
-my $schema = shift;
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
use IO::File;
eval "use DBD::SQLite";
plan $@
? ( skip_all => 'needs DBD::SQLite for testing' )
- : ( tests => 44 );
+ : ( tests => 47 );
}
# figure out if we've got a version of sqlite that is older than 3.2.6, in
is( $sa->_recurse_from(@j3), $match, 'join 3 (inner join) ok');
+my @j4 = (
+ { mother => 'person' },
+ [ [ { child => 'person', -join_type => 'left' },
+ [ { father => 'person', -join_type => 'right' },
+ { 'father.person_id' => 'child.father_id' }
+ ]
+ ],
+ { 'mother.person_id' => 'child.mother_id' }
+ ],
+);
+$match = 'person mother LEFT JOIN (person child RIGHT JOIN person father ON ('
+ . ' father.person_id = child.father_id )) ON ( mother.person_id = '
+ . 'child.mother_id )'
+ ;
+is( $sa->_recurse_from(@j4), $match, 'join 4 (nested joins + join types) ok');
+
my $rs = $schema->resultset("CD")->search(
{ 'year' => 2001, 'artist.name' => 'Caterwauler McCrae' },
{ from => [ { 'me' => 'cd' },
);
cmp_ok( scalar $rs->all, '==', scalar $rs->slice(0, $rs->count - 1), 'slice() with join has same count as all()' );
-eval { $rs->search(undef, { rows => 0, offset => 3 })->all; };
-
-ok($@, "rows => 0 errors: $@");
-
$rs = $schema->resultset("Artist")->search(
{ 'liner_notes.notes' => 'Kill Yourself!' },
{ join => { 'cds' => 'liner_notes' } });
cmp_ok($queries, '==', 1, 'Only one query run');
-# has_many resulting in an additional select if no records available despite prefetch
-my $track = $schema->resultset("Artist")->create( {
- artistid => 4,
- name => 'Artist without CDs',
-} );
+$tree_like = $schema->resultset('TreeLike')->search({'me.id' => 1});
+$tree_like = $tree_like->search_related('children')->search_related('children')->search_related('children')->first;
+is($tree_like->name, 'quux', 'Tree search_related ok');
-$queries = 0;
-$schema->storage->debug(1);
+$tree_like = $schema->resultset('TreeLike')->search({ 'children.id' => 2 });
+$tree_like = $tree_like->search_related('children', undef, { prefetch => { children => 'children' } })->first;
+is($tree_like->children->first->name, 'baz', 'Tree search_related with prefetch ok');
-my $artist_without_cds = $schema->resultset("Artist")->find(4, {
- join => [qw/ cds /],
- prefetch => [qw/ cds /],
-});
-my @no_cds = $artist_without_cds->cds;
+$tree_like = $schema->resultset('TreeLike')->search(
+ { 'children.id' => 2, 'children_2.id' => 5 },
+ { join => [qw/children children/] })->first;
+is($tree_like->name, 'foo', 'Tree with multiple has_many joins ok');
-is($queries, 1, 'prefetch ran only 1 sql statement');
+# test that collapsed joins don't get a _2 appended to the alias
-$schema->storage->debug(0);
+my $sql = '';
+$schema->storage->debugcb(sub { $sql = $_[1] });
+$schema->storage->debug(1);
-} # end run_tests
+eval {
+ my $row = $schema->resultset('Artist')->search_related('cds', undef, {
+ join => 'tracks',
+ prefetch => 'tracks',
+ })->search_related('tracks')->first;
+};
-1;
+like( $sql, qr/^SELECT tracks.trackid/, "collapsed join didn't add _2 to alias" );
+
+$schema->storage->debug(0);
-sub run_tests {
-my $schema = shift;
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
eval "use DBD::SQLite";
plan skip_all => 'needs DBD::SQLite for testing' if $@;
{ join => [ qw/tags liner_notes/ ] } ),
'==', 2, "Mixed count ok");
-}
-
-1;
-sub run_tests {
-my $schema = shift;\r
-\r
-# this test will check to see if you can have 2 columns\r
-# in the same class pointing at the same other class\r
-#\r
-# example:\r
-#\r
-# +---------+ +--------------+\r
-# | SelfRef | | SelfRefAlias |\r
-# +---------+ 1-M +--------------+\r
-# | id |-------| self_ref | --+\r
-# | name | | alias | --+\r
-# +---------+ +--------------+ |\r
-# /|\ |\r
-# | |\r
-# +--------------------------------+\r
-#\r
-# see http://use.perl.org/~LTjake/journal/24876 for the\r
-# issue with CDBI\r
-\r
-plan tests => 4;\r
-\r
-my $item = $schema->resultset("SelfRef")->find( 1 );\r
-is( $item->name, 'First', 'proper start item' );\r
-\r
-my @aliases = $item->aliases;\r
-\r
-is( scalar @aliases, 1, 'proper number of aliases' );\r
-\r
-my $orig = $aliases[ 0 ]->self_ref;\r
-my $alias = $aliases[ 0 ]->alias;\r
-\r
-is( $orig->name, 'First', 'proper original' );\r
-is( $alias->name, 'Second', 'proper alias' );\r
-\r
-}\r
-\r
-1;\r
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+# this test will check to see if you can have 2 columns
+# in the same class pointing at the same other class
+#
+# example:
+#
+# +---------+ +--------------+
+# | SelfRef | | SelfRefAlias |
+# +---------+ 1-M +--------------+
+# | id |-------| self_ref | --+
+# | name | | alias | --+
+# +---------+ +--------------+ |
+# /|\ |
+# | |
+# +--------------------------------+
+#
+# see http://use.perl.org/~LTjake/journal/24876 for the
+# issue with CDBI
+
+plan tests => 4;
+
+my $item = $schema->resultset("SelfRef")->find( 1 );
+is( $item->name, 'First', 'proper start item' );
+
+my @aliases = $item->aliases;
+
+is( scalar @aliases, 1, 'proper number of aliases' );
+
+my $orig = $aliases[ 0 ]->self_ref;
+my $alias = $aliases[ 0 ]->alias;
+
+is( $orig->name, 'First', 'proper original' );
+is( $alias->name, 'Second', 'proper alias' );
+
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+plan tests => 36;
+
+is_deeply([ sort $schema->source('CD')->unique_constraint_names ], [ qw/cd_artist_title primary/ ], 'CD source has an automatically named unique constraint');
+is_deeply([ sort $schema->source('Producer')->unique_constraint_names ], [ qw/primary prod_name/ ], 'Producer source has a named unique constraint');
+
+my $artistid = 1;
+my $title = 'UNIQUE Constraint';
+
+my $cd1 = $schema->resultset('CD')->find_or_create({
+ artist => $artistid,
+ title => $title,
+ year => 2005,
+});
+
+my $cd2 = $schema->resultset('CD')->find(
+ {
+ artist => $artistid,
+ title => $title,
+ },
+ { key => 'cd_artist_title' }
+);
+
+is($cd2->get_column('artist'), $cd1->get_column('artist'), 'find by specific key: artist is correct');
+is($cd2->title, $cd1->title, 'title is correct');
+is($cd2->year, $cd1->year, 'year is correct');
+
+my $cd3 = $schema->resultset('CD')->find($artistid, $title, { key => 'cd_artist_title' });
+
+is($cd3->get_column('artist'), $cd1->get_column('artist'), 'find by specific key, ordered columns: artist is correct');
+is($cd3->title, $cd1->title, 'title is correct');
+is($cd3->year, $cd1->year, 'year is correct');
+
+my $cd4 = $schema->resultset('CD')->update_or_create(
+ {
+ artist => $artistid,
+ title => $title,
+ year => 2007,
+ },
+);
+
+ok(! $cd4->is_changed, 'update_or_create without key: row is clean');
+is($cd4->cdid, $cd2->cdid, 'cdid is correct');
+is($cd4->get_column('artist'), $cd2->get_column('artist'), 'artist is correct');
+is($cd4->title, $cd2->title, 'title is correct');
+is($cd4->year, 2007, 'updated year is correct');
+
+my $cd5 = $schema->resultset('CD')->update_or_create(
+ {
+ artist => $artistid,
+ title => $title,
+ year => 2007,
+ },
+ { key => 'cd_artist_title' }
+);
+
+ok(! $cd5->is_changed, 'update_or_create by specific key: row is clean');
+is($cd5->cdid, $cd2->cdid, 'cdid is correct');
+is($cd5->get_column('artist'), $cd2->get_column('artist'), 'artist is correct');
+is($cd5->title, $cd2->title, 'title is correct');
+is($cd5->year, 2007, 'updated year is correct');
+
+my $cd6 = $schema->resultset('CD')->update_or_create(
+ {
+ cdid => $cd2->cdid,
+ artist => 1,
+ title => $cd2->title,
+ year => 2005,
+ },
+ { key => 'primary' }
+);
+
+ok(! $cd6->is_changed, 'update_or_create by PK: row is clean');
+is($cd6->cdid, $cd2->cdid, 'cdid is correct');
+is($cd6->get_column('artist'), $cd2->get_column('artist'), 'artist is correct');
+is($cd6->title, $cd2->title, 'title is correct');
+is($cd6->year, 2005, 'updated year is correct');
+
+my $cd7 = $schema->resultset('CD')->find_or_create(
+ {
+ artist => $artistid,
+ title => $title,
+ year => 2010,
+ },
+ { key => 'cd_artist_title' }
+);
+
+is($cd7->cdid, $cd1->cdid, 'find_or_create by specific key: cdid is correct');
+is($cd7->get_column('artist'), $cd1->get_column('artist'), 'artist is correct');
+is($cd7->title, $cd1->title, 'title is correct');
+is($cd7->year, $cd1->year, 'year is correct');
+
+my $artist = $schema->resultset('Artist')->find($artistid);
+my $cd8 = $artist->find_or_create_related('cds',
+ {
+ title => $title,
+ year => 2020,
+ },
+ { key => 'cd_artist_title' }
+);
+
+is($cd8->cdid, $cd1->cdid, 'find_or_create related by specific key: cdid is correct');
+is($cd8->get_column('artist'), $cd1->get_column('artist'), 'artist is correct');
+is($cd8->title, $cd1->title, 'title is correct');
+is($cd8->year, $cd1->year, 'year is correct');
+
+my $cd9 = $artist->update_or_create_related('cds',
+ {
+ title => $title,
+ year => 2021,
+ },
+ { key => 'cd_artist_title' }
+);
+
+ok(! $cd9->is_changed, 'update_or_create by specific key: row is clean');
+is($cd9->cdid, $cd1->cdid, 'cdid is correct');
+is($cd9->get_column('artist'), $cd1->get_column('artist'), 'artist is correct');
+is($cd9->title, $cd1->title, 'title is correct');
+is($cd9->year, 2021, 'year is correct');
+
-sub run_tests {
-my $schema = shift;
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
plan tests => 39;
my $code = sub {
eval {
(ref $schema)->txn_do(sub{});
};
- like($@, qr/class method/, '$self parameter check ok');
+ like($@, qr/storage/, "can't call txn_do without storage");
eval {
$schema->txn_do('');
};
# Force txn_rollback() to throw an exception
no warnings 'redefine';
+ no strict 'refs';
local *{"DBIx::Class::Schema::txn_rollback"} = sub{die 'FAILED'};
eval {
})->first;
ok(!defined($cd), q{failed txn_do didn't add failed txn's cd});
}
-}
-1;
use strict;
-use warnings;
+use warnings;
-sub run_tests {
-my $schema = shift;
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
plan tests => 4;
my $artist = $schema->resultset('Artist')->find(1);
my $cover_band;
{
- no warnings 'redefine';
+ no warnings qw(redefine once);
local *DBICTest::Artist::result_source_instance = \&DBICTest::Schema::Artist::result_source_instance;
$cover_band = $artist->copy;
cmp_ok($cover_cds->search_related('tags')->count, '==',
$artist_cds->search_related('tags')->count , 'duplicated count ok');
-}
-1;
-sub run_tests {
-my $schema = shift;
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
my $queries;
$schema->storage->debugcb( sub{ $queries++ } );
is( scalar @{$rs->get_cache}, 2, 'set_cache() is functional' );
-$cd = $schema->resultset('CD')->find(1);
+my $cd = $schema->resultset('CD')->find(1);
$rs->clear_cache;
$schema->storage->debug(0);
-}
-
-1;
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
use Storable;
-sub run_tests {
-my $schema = shift;
+my $schema = DBICTest->init_schema();
plan tests => 1;
my $copy = eval { Storable::dclone($artist) };
is_deeply($copy, $artist, 'serialize row object works');
-}
-
-1;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+eval 'use Encode ; 1'
+ or plan skip_all => 'Install Encode run this test';
+
+plan tests => 2;
+
+DBICTest::Schema::Artist->load_components('UTF8Columns');
+DBICTest::Schema::Artist->utf8_columns('name');
+Class::C3->reinitialize();
+
+my $artist = $schema->resultset("Artist")->create( { name => 'uni' } );
+ok( Encode::is_utf8( $artist->name ), 'got name with utf8 flag' );
+
+my $utf8_char = 'uniuni';
+Encode::_utf8_on($utf8_char);
+$artist->name($utf8_char);
+ok( !Encode::is_utf8( $artist->{_column_data}->{name} ),
+ 'store utf8 less chars' );
+
-sub run_tests {
-my $schema = shift;
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
my $queries;
#$schema->storage->debugfh(IO::File->new('t/var/temp.trace', 'w'));
liner_notes on update');
$schema->storage->debug(0);
-}
-1;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+eval "use SQL::Translator";
+plan skip_all => 'SQL::Translator required' if $@;
+
+my $schema = DBICTest->init_schema;
+
+plan tests => 53;
+
+my $translator = SQL::Translator->new(
+ parser_args => {
+ 'DBIx::Schema' => $schema,
+ },
+ producer_args => {},
+);
+
+$translator->parser('SQL::Translator::Parser::DBIx::Class');
+$translator->producer('SQLite');
+
+my $output = $translator->translate();
+
+# Note that the constraints listed here are the only ones that are tested -- if
+# more exist in the Schema than are listed here and all listed constraints are
+# correct, the test will still pass. If you add a class with UNIQUE or FOREIGN
+# KEY constraints to DBICTest::Schema, add tests here if you think the existing
+# test coverage is not sufficient
+
+my %fk_constraints = (
+
+ # TwoKeys
+ twokeys => [
+ {
+ 'display' => 'twokeys->cd',
+ 'selftable' => 'twokeys', 'foreigntable' => 'cd',
+ 'selfcols' => ['cd'], 'foreigncols' => ['cdid'],
+ on_delete => '', on_update => '',
+ },
+ {
+ 'display' => 'twokeys->artist',
+ 'selftable' => 'twokeys', 'foreigntable' => 'artist',
+ 'selfcols' => ['artist'], 'foreigncols' => ['artistid'],
+ on_delete => 'CASCADE', on_update => 'CASCADE',
+ },
+ ],
+
+ # FourKeys_to_TwoKeys
+ fourkeys_to_twokeys => [
+ {
+ 'display' => 'fourkeys_to_twokeys->twokeys',
+ 'selftable' => 'fourkeys_to_twokeys', 'foreigntable' => 'twokeys',
+ 'selfcols' => ['t_artist', 't_cd'], 'foreigncols' => ['artist', 'cd'],
+ on_delete => 'CASCADE', on_update => 'CASCADE',
+ },
+ {
+ 'display' => 'fourkeys_to_twokeys->fourkeys',
+ 'selftable' => 'fourkeys_to_twokeys', 'foreigntable' => 'fourkeys',
+ 'selfcols' => [qw(f_foo f_bar f_hello f_goodbye)],
+ 'foreigncols' => [qw(foo bar hello goodbye)],
+ on_delete => 'CASCADE', on_update => 'CASCADE',
+ },
+ ],
+
+ # CD_to_Producer
+ cd_to_producer => [
+ {
+ 'display' => 'cd_to_producer->cd',
+ 'selftable' => 'cd_to_producer', 'foreigntable' => 'cd',
+ 'selfcols' => ['cd'], 'foreigncols' => ['cdid'],
+ on_delete => 'CASCADE', on_update => 'CASCADE',
+ },
+ {
+ 'display' => 'cd_to_producer->producer',
+ 'selftable' => 'cd_to_producer', 'foreigntable' => 'producer',
+ 'selfcols' => ['producer'], 'foreigncols' => ['producerid'],
+ on_delete => '', on_update => '',
+ },
+ ],
+
+ # Self_ref_alias
+ self_ref_alias => [
+ {
+ 'display' => 'self_ref_alias->self_ref for self_ref',
+ 'selftable' => 'self_ref_alias', 'foreigntable' => 'self_ref',
+ 'selfcols' => ['self_ref'], 'foreigncols' => ['id'],
+ on_delete => 'CASCADE', on_update => 'CASCADE',
+ },
+ {
+ 'display' => 'self_ref_alias->self_ref for alias',
+ 'selftable' => 'self_ref_alias', 'foreigntable' => 'self_ref',
+ 'selfcols' => ['alias'], 'foreigncols' => ['id'],
+ on_delete => '', on_update => '',
+ },
+ ],
+
+ # CD
+ cd => [
+ {
+ 'display' => 'cd->artist',
+ 'selftable' => 'cd', 'foreigntable' => 'artist',
+ 'selfcols' => ['artist'], 'foreigncols' => ['artistid'],
+ on_delete => 'CASCADE', on_update => 'CASCADE',
+ },
+ ],
+
+ # Artist_undirected_map
+ artist_undirected_map => [
+ {
+ 'display' => 'artist_undirected_map->artist for id1',
+ 'selftable' => 'artist_undirected_map', 'foreigntable' => 'artist',
+ 'selfcols' => ['id1'], 'foreigncols' => ['artistid'],
+ on_delete => 'CASCADE', on_update => '',
+ },
+ {
+ 'display' => 'artist_undirected_map->artist for id2',
+ 'selftable' => 'artist_undirected_map', 'foreigntable' => 'artist',
+ 'selfcols' => ['id2'], 'foreigncols' => ['artistid'],
+ on_delete => 'CASCADE', on_update => '',
+ },
+ ],
+
+ # Track
+ track => [
+ {
+ 'display' => 'track->cd',
+ 'selftable' => 'track', 'foreigntable' => 'cd',
+ 'selfcols' => ['cd'], 'foreigncols' => ['cdid'],
+ on_delete => 'CASCADE', on_update => 'CASCADE',
+ },
+ ],
+
+ # TreeLike
+ treelike => [
+ {
+ 'display' => 'treelike->treelike for parent',
+ 'selftable' => 'treelike', 'foreigntable' => 'treelike',
+ 'selfcols' => ['parent'], 'foreigncols' => ['id'],
+ on_delete => 'CASCADE', on_update => 'CASCADE',
+ },
+ ],
+
+ # TwoKeyTreeLike
+ twokeytreelike => [
+ {
+ 'display' => 'twokeytreelike->twokeytreelike for parent1,parent2',
+ 'selftable' => 'twokeytreelike', 'foreigntable' => 'twokeytreelike',
+ 'selfcols' => ['parent1', 'parent2'], 'foreigncols' => ['id1','id2'],
+ on_delete => '', on_update => '',
+ },
+ ],
+
+ # Tags
+ tags => [
+ {
+ 'display' => 'tags->cd',
+ 'selftable' => 'tags', 'foreigntable' => 'cd',
+ 'selfcols' => ['cd'], 'foreigncols' => ['cdid'],
+ on_delete => 'CASCADE', on_update => 'CASCADE',
+ },
+ ],
+
+ # Bookmark
+ bookmark => [
+ {
+ 'display' => 'bookmark->link',
+ 'selftable' => 'bookmark', 'foreigntable' => 'link',
+ 'selfcols' => ['link'], 'foreigncols' => ['id'],
+ on_delete => '', on_update => '',
+ },
+ ],
+);
+
+my %unique_constraints = (
+ # CD
+ cd => [
+ {
+ 'display' => 'cd artist and title unique',
+ 'table' => 'cd', 'cols' => ['artist', 'title'],
+ },
+ ],
+
+ # Producer
+ producer => [
+ {
+ 'display' => 'producer name unique',
+ 'table' => 'producer', 'cols' => ['name'],
+ },
+ ],
+
+ # TwoKeyTreeLike
+ twokeytreelike => [
+ {
+ 'display' => 'twokeytreelike name unique',
+ 'table' => 'twokeytreelike', 'cols' => ['name'],
+ },
+ ],
+
+ # Employee
+# Constraint is commented out in DBICTest/Schema/Employee.pm
+# employee => [
+# {
+# 'display' => 'employee position and group_id unique',
+# 'table' => 'employee', cols => ['position', 'group_id'],
+# },
+# ],
+);
+
+my $tschema = $translator->schema();
+
+# Test that nonexistent constraints are not found
+my $constraint = get_constraint('FOREIGN KEY', 'cd', ['title'], 'cd', ['year']);
+ok( !defined($constraint), 'nonexistent FOREIGN KEY constraint not found' );
+$constraint = get_constraint('UNIQUE', 'cd', ['artist']);
+ok( !defined($constraint), 'nonexistent UNIQUE constraint not found' );
+
+for my $expected_constraints (keys %fk_constraints) {
+ for my $expected_constraint (@{ $fk_constraints{$expected_constraints} }) {
+ my $desc = $expected_constraint->{display};
+ my $constraint = get_constraint(
+ 'FOREIGN KEY',
+ $expected_constraint->{selftable}, $expected_constraint->{selfcols},
+ $expected_constraint->{foreigntable}, $expected_constraint->{foreigncols},
+ );
+ ok( defined($constraint), "FOREIGN KEY constraint matching `$desc' found" );
+ test_fk($expected_constraint, $constraint);
+ }
+}
+
+for my $expected_constraints (keys %unique_constraints) {
+ for my $expected_constraint (@{ $unique_constraints{$expected_constraints} }) {
+ my $desc = $expected_constraint->{display};
+ my $constraint = get_constraint(
+ 'UNIQUE', $expected_constraint->{table}, $expected_constraint->{cols},
+ );
+ ok( defined($constraint), "UNIQUE constraint matching `$desc' found" );
+ }
+}
+
+# Returns the Constraint object for the specified constraint type, table and
+# columns from the SQL::Translator schema, or undef if no matching constraint
+# is found.
+#
+# NB: $type is either 'FOREIGN KEY' or 'UNIQUE'. In UNIQUE constraints the last
+# two parameters are not used.
+sub get_constraint {
+ my ($type, $table_name, $cols, $f_table, $f_cols) = @_;
+ $f_table ||= ''; # For UNIQUE constraints, reference_table is ''
+ $f_cols ||= [];
+
+ my $table = $tschema->get_table($table_name);
+
+ my %fields = map { $_ => 1 } @$cols;
+ my %f_fields = map { $_ => 1 } @$f_cols;
+
+ CONSTRAINT:
+ for my $constraint ( $table->get_constraints ) {
+ next unless $constraint->type eq $type;
+ next unless $constraint->reference_table eq $f_table;
+
+ my %rev_fields = map { $_ => 1 } $constraint->fields;
+ my %rev_f_fields = map { $_ => 1 } $constraint->reference_fields;
+
+ # Check that the given fields are a subset of the constraint's fields
+ for my $field ($constraint->fields) {
+ next CONSTRAINT unless $fields{$field};
+ }
+ if ($type eq 'FOREIGN KEY') {
+ for my $f_field ($constraint->reference_fields) {
+ next CONSTRAINT unless $f_fields{$f_field};
+ }
+ }
+
+ # Check that the constraint's fields are a subset of the given fields
+ for my $field (@$cols) {
+ next CONSTRAINT unless $rev_fields{$field};
+ }
+ if ($type eq 'FOREIGN KEY') {
+ for my $f_field (@$f_cols) {
+ next CONSTRAINT unless $rev_f_fields{$f_field};
+ }
+ }
+
+ return $constraint; # everything passes, found the constraint
+ }
+ return undef; # didn't find a matching constraint
+}
+
+# Test parameters in a FOREIGN KEY constraint other than columns
+sub test_fk {
+ my ($expected, $got) = @_;
+ my $desc = $expected->{display};
+ is( $got->on_delete, $expected->{on_delete},
+ "on_delete parameter correct for `$desc'" );
+ is( $got->on_update, $expected->{on_update},
+ "on_update parameter correct for `$desc'" );
+}
--- /dev/null
+# vim: filetype=perl
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+plan tests => 321;
+
+my $employees = $schema->resultset('Employee');
+$employees->delete();
+
+foreach (1..5) {
+ $employees->create({ name=>'temp' });
+}
+$employees = $employees->search(undef,{order_by=>'position'});
+ok( check_rs($employees), "intial positions" );
+
+hammer_rs( $employees );
+
+DBICTest::Employee->grouping_column('group_id');
+$employees->delete();
+foreach my $group_id (1..3) {
+ foreach (1..6) {
+ $employees->create({ name=>'temp', group_id=>$group_id });
+ }
+}
+$employees = $employees->search(undef,{order_by=>'group_id,position'});
+
+foreach my $group_id (1..3) {
+ my $group_employees = $employees->search({group_id=>$group_id});
+ $group_employees->all();
+ ok( check_rs($group_employees), "group intial positions" );
+ hammer_rs( $group_employees );
+}
+
+sub hammer_rs {
+ my $rs = shift;
+ my $employee;
+ my $count = $rs->count();
+ my $position_column = $rs->result_class->position_column();
+ my $row;
+
+ foreach my $position (1..$count) {
+
+ ($row) = $rs->search({ $position_column=>$position })->all();
+ $row->move_previous();
+ ok( check_rs($rs), "move_previous( $position )" );
+
+ ($row) = $rs->search({ $position_column=>$position })->all();
+ $row->move_next();
+ ok( check_rs($rs), "move_next( $position )" );
+
+ ($row) = $rs->search({ $position_column=>$position })->all();
+ $row->move_first();
+ ok( check_rs($rs), "move_first( $position )" );
+
+ ($row) = $rs->search({ $position_column=>$position })->all();
+ $row->move_last();
+ ok( check_rs($rs), "move_last( $position )" );
+
+ foreach my $to_position (1..$count) {
+ ($row) = $rs->search({ $position_column=>$position })->all();
+ $row->move_to($to_position);
+ ok( check_rs($rs), "move_to( $position => $to_position )" );
+ }
+
+ ($row) = $rs->search({ position=>$position })->all();
+ if ($position==1) {
+ ok( !$row->previous_sibling(), 'no previous sibling' );
+ ok( !$row->first_sibling(), 'no first sibling' );
+ }
+ else {
+ ok( $row->previous_sibling(), 'previous sibling' );
+ ok( $row->first_sibling(), 'first sibling' );
+ }
+ if ($position==$count) {
+ ok( !$row->next_sibling(), 'no next sibling' );
+ ok( !$row->last_sibling(), 'no last sibling' );
+ }
+ else {
+ ok( $row->next_sibling(), 'next sibling' );
+ ok( $row->last_sibling(), 'last sibling' );
+ }
+
+ }
+}
+
+sub check_rs {
+ my( $rs ) = @_;
+ $rs->reset();
+ my $position_column = $rs->result_class->position_column();
+ my $expected_position = 0;
+ while (my $row = $rs->next()) {
+ $expected_position ++;
+ if ($row->get_column($position_column)!=$expected_position) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+plan tests => 8;
+
+my $cd;
+my $rs = $cd = $schema->resultset("CD")->search({});
+
+my $rs_title = $rs->get_column('title');
+my $rs_year = $rs->get_column('year');
+
+is($rs_title->next, 'Spoonful of bees', "next okay");
+
+my @all = $rs_title->all;
+cmp_ok(scalar @all, '==', 5, "five titles returned");
+
+cmp_ok($rs_year->max, '==', 2001, "max okay for year");
+is($rs_title->min, 'Caterwaulin\' Blues', "min okay for title");
+
+cmp_ok($rs_year->sum, '==', 9996, "three artists returned");
+
+my $psrs = $schema->resultset('CD')->search({},
+ {
+ '+select' => \'COUNT(*)',
+ '+as' => 'count'
+ }
+);
+ok(defined($psrs->get_column('count')), '+select/+as count');
+
+$psrs = $schema->resultset('CD')->search({},
+ {
+ '+select' => [ \'COUNT(*)', 'title' ],
+ '+as' => [ 'count', 'addedtitle' ]
+ }
+);
+ok(defined($psrs->get_column('count')), '+select/+as arrayref count');
+ok(defined($psrs->get_column('addedtitle')), '+select/+as title');
+
--- /dev/null
+# vim: filetype=perl
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+eval 'require JSON';
+plan skip_all => 'Install JSON to run this test' if ($@);
+
+eval 'require Text::CSV_XS';
+if ($@) {
+ eval 'require Text::CSV_PP';
+ plan skip_all => 'Install Text::CSV_XS or Text::CSV_PP to run this test' if ($@);
+}
+
+plan tests => 5;
+
+# double quotes round the arguments and single-quote within to make sure the
+# tests run on windows as well
+
+my $employees = $schema->resultset('Employee');
+my $cmd = qq|perl script/dbicadmin --schema=DBICTest::Schema --class=Employee --tlibs --connect="['dbi:SQLite:dbname=t/var/DBIxClass.db','','']" --force --tlibs|;
+
+`$cmd --op=insert --set="{name:'Matt'}"`;
+ok( ($employees->count()==1), 'insert count' );
+
+my $employee = $employees->find(1);
+ok( ($employee->name() eq 'Matt'), 'insert valid' );
+
+`$cmd --op=update --set="{name:'Trout'}"`;
+$employee = $employees->find(1);
+ok( ($employee->name() eq 'Trout'), 'update' );
+
+`$cmd --op=insert --set="{name:'Aran'}"`;
+my $data = `$cmd --op=select --attrs="{order_by:'name'}"`;
+ok( ($data=~/Aran.*Trout/s), 'select with attrs' );
+
+`$cmd --op=delete --where="{name:'Trout'}"`;
+ok( ($employees->count()==1), 'delete' );
+
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+eval { require DateTime::Format::MySQL };
+plan skip_all => "Need DateTime::Format::MySQL for inflation tests" if $@;
+
+plan tests => 8;
+
+# inflation test
+my $event = $schema->resultset("Event")->find(1);
+
+isa_ok($event->starts_at, 'DateTime', 'DateTime returned');
+
+# klunky, but makes older Test::More installs happy
+my $starts = $event->starts_at . '';
+is($starts, '2006-04-25T22:24:33', 'Correct date/time');
+
+# create using DateTime
+my $created = $schema->resultset('Event')->create({
+ starts_at => DateTime->new(year=>2006, month=>6, day=>18),
+ created_on => DateTime->new(year=>2006, month=>6, day=>23)
+});
+my $created_start = $created->starts_at;
+
+isa_ok($created->starts_at, 'DateTime', 'DateTime returned');
+is($created_start, '2006-06-18T00:00:00', 'Correct date/time');
+
+## timestamp field
+isa_ok($event->created_on, 'DateTime', 'DateTime returned');
+
+# klunky, but makes older Test::More installs happy
+my $createo = $event->created_on . '';
+is($createo, '2006-06-22T21:00:05', 'Correct date/time');
+
+my $created_cron = $created->created_on;
+
+isa_ok($created->created_on, 'DateTime', 'DateTime returned');
+is($created_cron, '2006-06-23T00:00:00', 'Correct date/time');
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use Class::Inspector;
+
+BEGIN {
+ package TestPackage::A;
+ sub some_method {}
+}
+
+my $schema = DBICTest->init_schema();
+
+plan tests => 19;
+
+# Test ensure_class_found
+ok( $schema->ensure_class_found('DBIx::Class::Schema'),
+ 'loaded package DBIx::Class::Schema was found' );
+ok( !Class::Inspector->loaded('DBICTest::FakeComponent'),
+ 'DBICTest::FakeComponent not loaded yet' );
+ok( $schema->ensure_class_found('DBICTest::FakeComponent'),
+ 'package DBICTest::FakeComponent was found' );
+ok( !Class::Inspector->loaded('DBICTest::FakeComponent'),
+ 'DBICTest::FakeComponent not loaded by ensure_class_found()' );
+ok( $schema->ensure_class_found('TestPackage::A'),
+ 'anonymous package TestPackage::A found' );
+ok( !$schema->ensure_class_found('FAKE::WONT::BE::FOUND'),
+ 'fake package not found' );
+
+# Test load_optional_class
+my $retval = eval { $schema->load_optional_class('ANOTHER::FAKE::PACKAGE') };
+ok( !$@, 'load_optional_class on a nonexistent class did not throw' );
+ok( !$retval, 'nonexistent package not loaded' );
+$retval = eval { $schema->load_optional_class('DBICTest::OptionalComponent') };
+ok( !$@, 'load_optional_class on an existing class did not throw' );
+ok( $retval, 'DBICTest::OptionalComponent loaded' );
+eval { $schema->load_optional_class('DBICTest::ErrorComponent') };
+like( $@, qr/did not return a true value/,
+ 'DBICTest::ErrorComponent threw ok' );
+
+# Test ensure_class_loaded
+ok( Class::Inspector->loaded('TestPackage::A'), 'anonymous package exists' );
+eval { $schema->ensure_class_loaded('TestPackage::A'); };
+ok( !$@, 'ensure_class_loaded detected an anon. class' );
+eval { $schema->ensure_class_loaded('FakePackage::B'); };
+like( $@, qr/Can't locate/,
+ 'ensure_class_loaded threw exception for nonexistent class' );
+ok( !Class::Inspector->loaded('DBICTest::FakeComponent'),
+ 'DBICTest::FakeComponent not loaded yet' );
+eval { $schema->ensure_class_loaded('DBICTest::FakeComponent'); };
+ok( !$@, 'ensure_class_loaded detected an existing but non-loaded class' );
+ok( Class::Inspector->loaded('DBICTest::FakeComponent'),
+ 'DBICTest::FakeComponent now loaded' );
+
+{
+ # Squash warnings about syntax errors in SytaxErrorComponent.pm
+ local $SIG{__WARN__} = sub {
+ my $warning = shift;
+ warn $warning unless (
+ $warning =~ /String found where operator expected/ or
+ $warning =~ /Missing operator before/
+ );
+ };
+
+ eval { $schema->ensure_class_loaded('DBICTest::SyntaxErrorComponent1') };
+ like( $@, qr/syntax error/,
+ 'ensure_class_loaded(DBICTest::SyntaxErrorComponent1) threw ok' );
+ eval { $schema->load_optional_class('DBICTest::SyntaxErrorComponent2') };
+ like( $@, qr/syntax error/,
+ 'load_optional_class(DBICTest::SyntaxErrorComponent2) threw ok' );
+}
+
+1;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use Data::Dumper;
+my $schema = DBICTest->init_schema();
+
+plan tests => 17;
+
+my @rs1a_results = $schema->resultset("Artist")->search_related('cds', {title => 'Forkful of bees'}, {order_by => 'title'});
+is($rs1a_results[0]->title, 'Forkful of bees', "bare field conditions okay after search related");
+my $rs1 = $schema->resultset("Artist")->search({ 'tags.tag' => 'Blue' }, { join => {'cds' => 'tracks'}, prefetch => {'cds' => 'tags'} });
+my @artists = $rs1->all;
+cmp_ok(@artists, '==', 1, "Two artists returned");
+
+my $rs2 = $rs1->search({ artistid => '1' }, { join => {'cds' => {'cd_to_producer' => 'producer'} } });
+
+my @artists2 = $rs2->search({ 'producer.name' => 'Matt S Trout' });
+my @cds = $artists2[0]->cds;
+cmp_ok(scalar @cds, '==', 1, "condition based on inherited join okay");
+
+#this is wrong, should accept me.title really
+my $rs3 = $rs2->search_related('cds');
+cmp_ok($rs3->count, '==', 9, "Nine artists returned");
+
+my $rs4 = $schema->resultset("CD")->search({ 'artist.artistid' => '1' }, { join => ['tracks', 'artist'], prefetch => 'artist' });
+my @rs4_results = $rs4->all;
+
+is($rs4_results[0]->cdid, 1, "correct artist returned");
+
+my $rs5 = $rs4->search({'tracks.title' => 'Sticky Honey'});
+is($rs5->count, 1, "search without using previous joins okay");
+
+my $record_rs = $schema->resultset("Artist")->search(undef, { join => 'cds' })->search(undef, { prefetch => { 'cds' => 'tracks' }});
+my $record_jp = $record_rs->next;
+ok($record_jp, "prefetch on same rel okay");
+
+my $artist = $schema->resultset("Artist")->find(1);
+my $cds = $artist->cds;
+is($cds->find(2)->title, 'Forkful of bees', "find on has many rs okay");
+
+my $cd = $cds->search({'me.title' => 'Forkful of bees'}, { prefetch => 'tracks' })->first;
+my @tracks = $cd->tracks->all;
+is(scalar(@tracks), 3, 'right number of prefetched tracks after has many');
+
+#causes ambig col error due to order_by
+#my $tracks_rs = $cds->search_related('tracks', { 'tracks.position' => '2', 'disc.title' => 'Forkful of bees' });
+#my $first_tracks_rs = $tracks_rs->first;
+
+my $related_rs = $schema->resultset("Artist")->search({ name => 'Caterwauler McCrae' })->search_related('cds', { year => '2001'})->search_related('tracks', { 'position' => '2' });
+is($related_rs->first->trackid, '5', 'search related on search related okay');
+
+#causes ambig col error due to order_by
+#$related_rs->search({'cd.year' => '2001'}, {join => ['cd', 'cd']})->all;
+
+my $title = $schema->resultset("Artist")->search_related('twokeys')->search_related('cd')->search({'tracks.position' => '2'}, {join => 'tracks', order_by => 'tracks.trackid'})->next->title;
+is($title, 'Forkful of bees', 'search relateds with order by okay');
+
+my $prod_rs = $schema->resultset("CD")->find(1)->producers_sorted;
+my $prod_rs2 = $prod_rs->search({ name => 'Matt S Trout' });
+my $prod_first = $prod_rs2->first;
+is($prod_first->id, '1', 'somewhat pointless search on rel with order_by on it okay');
+
+my $prod_map_rs = $schema->resultset("Artist")->find(1)->cds->search_related('cd_to_producer', {}, { join => 'producer', prefetch => 'producer' });
+ok($prod_map_rs->next->producer, 'search related with prefetch okay');
+
+my $stupid = $schema->resultset("Artist")->search_related('artist_undirected_maps', {}, { prefetch => 'artist1' })->search_related('mapped_artists')->search_related('cds', {'cds.cdid' => '2'}, { prefetch => 'tracks' });
+
+my $cd_final = $schema->resultset("Artist")->search_related('artist_undirected_maps', {}, { prefetch => 'artist1' })->search_related('mapped_artists')->search_related('cds', {'cds.cdid' => '2'}, { prefetch => 'tracks' })->first;
+is($cd_final->cdid, '2', 'bonkers search_related-with-join-midway okay');
+
+# should end up with cds and cds_2 joined
+my $merge_rs_1 = $schema->resultset("Artist")->search({ 'cds_2.cdid' => '2' }, { join => ['cds', 'cds'] });
+is(scalar(@{$merge_rs_1->{attrs}->{join}}), 2, 'both joins kept');
+ok($merge_rs_1->next, 'query on double joined rel runs okay');
+
+# should only end up with cds joined
+my $merge_rs_2 = $schema->resultset("Artist")->search({ }, { join => 'cds' })->search({ 'cds.cdid' => '2' }, { join => 'cds' });
+is(scalar(@{$merge_rs_2->{attrs}->{join}}), 1, 'only one join kept when inherited');
+my $merge_rs_2_cd = $merge_rs_2->next;
+
+1;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+plan tests => 2;
+
+ok ( $schema->storage->debug(1), 'debug' );
+ok ( defined(
+ $schema->storage->debugfh(
+ IO::File->new('t/var/sql.log', 'w')
+ )
+ ),
+ 'debugfh'
+ );
+
+1;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+plan tests => 1;
+
+my $schema = DBICTest->init_schema();
+
+is( ref($schema->storage), 'DBIx::Class::Storage::DBI::SQLite',
+ 'Storage reblessed correctly into DBIx::Class::Storage::DBI::SQLite' );
+
+1;
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/01core.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/04db.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/05multipk.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/06relationship.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/07pager.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/08inflate.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/08inflate_has_a.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/08inflate_serialize.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/09update.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/10auto.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/11mysql.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/12pg.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/13oracle.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/145db2.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/14mssql.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/15limit.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/16joins.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/17join_count.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/18self_referencial.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/19uuid.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/20unique.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/21transactions.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/22cascade_copy.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/23cache.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/24serialize.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/25utf8.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::BasicRels;
-
-require "t/run/26might_have.tl";
-run_tests(DBICTest->schema);
use lib 't/lib';
-use_ok('DBICTest::HelperRels');
+use_ok('DBICTest');
+DBICTest->init_schema();
DBICTest::CD->load_components(qw/CDBICompat::Pager/);
City->table('City');
City->columns(All => qw/Name State Population/);
-City->has_a(State => 'State');
+{
+ # Disable the `no such table' warning
+ local $SIG{__WARN__} = sub {
+ my $warning = shift;
+ warn $warning unless ($warning =~ /\Qno such table: City(1)\E/);
+ };
+
+ City->has_a(State => 'State');
+}
#-------------------------------------------------------------------------
package CD;
{
eval { my $id = Film->title };
- like $@, qr/class method/, "Can't get title with no object";
+ #like $@, qr/class method/, "Can't get title with no object";
+ ok $@, "Can't get title with no object";
}
eval { my $duh = Film->create; };
$btaste->discard_changes;
is($btaste->Director, $orig_director, 'discard_changes()');
-{
+SKIP: {
+ skip "ActiveState perl produces additional warnings", 3
+ if ($^O eq 'MSWin32');
+
Film->autoupdate(1);
my $btaste2 = Film->retrieve($btaste->id);
$btaste->NumExplodingSheep(18);
$btaste3->NumExplodingSheep(13);
}
is @warnings, 1, "DESTROY without update warns";
-print join("\n", @warnings);
Film->autoupdate(0);
}
ok $pj = $btaste->Director, "Bad taste now hasa() director";
isa_ok $pj => 'Director';
{
- no warnings 'redefine';
+ no warnings qw(redefine once);
local *Ima::DBI::st::execute =
sub { ::fail("Shouldn't need to query db"); };
is $pj->id, 'Peter Jackson', 'ID already stored';
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/01core.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/04db.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/05multipk.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/06relationship.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/07pager.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/08inflate.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/08inflate_has_a.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/08inflate_serialize.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/09update.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/10auto.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/11mysql.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/12pg.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/13oracle.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/145db2.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/14mssql.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/15limit.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/16joins.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/17join_count.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/18self_referencial.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/19uuid.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/20unique.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/21transactions.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/22cascade_copy.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/23cache.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/24serialize.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/25utf8.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-require "t/run/26might_have.tl";
-run_tests(DBICTest->schema);
+++ /dev/null
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBICTest::HelperRels;
-
-eval "use SQL::Translator";
-plan skip_all => 'SQL::Translator required' if $@;
-
-my $schema = DBICTest::Schema;
-
-plan tests => 29;
-
-my $translator = SQL::Translator->new(
- parser_args => {
- 'DBIx::Schema' => $schema,
- },
- producer_args => {
- },
-);
-
-$translator->parser('SQL::Translator::Parser::DBIx::Class');
-$translator->producer('SQLite');
-
-my $output = $translator->translate();
-
-my @constraints =
- (
- {'display' => 'twokeys->cd',
- 'selftable' => 'twokeys', 'foreigntable' => 'cd',
- 'selfcols' => ['cd'], 'foreigncols' => ['cdid'],
- 'needed' => 1, on_delete => '', on_update => ''},
- {'display' => 'twokeys->artist',
- 'selftable' => 'twokeys', 'foreigntable' => 'artist',
- 'selfcols' => ['artist'], 'foreigncols' => ['artistid'],
- 'needed' => 1, on_delete => '', on_update => ''},
- {'display' => 'cd_to_producer->cd',
- 'selftable' => 'cd_to_producer', 'foreigntable' => 'cd',
- 'selfcols' => ['cd'], 'foreigncols' => ['cdid'],
- 'needed' => 1, on_delete => '', on_update => ''},
- {'display' => 'cd_to_producer->producer',
- 'selftable' => 'cd_to_producer', 'foreigntable' => 'producer',
- 'selfcols' => ['producer'], 'foreigncols' => ['producerid'],
- 'needed' => 1, on_delete => '', on_update => ''},
- {'display' => 'self_ref_alias -> self_ref for self_ref',
- 'selftable' => 'self_ref_alias', 'foreigntable' => 'self_ref',
- 'selfcols' => ['self_ref'], 'foreigncols' => ['id'],
- 'needed' => 1, on_delete => '', on_update => ''},
- {'display' => 'self_ref_alias -> self_ref for alias',
- 'selftable' => 'self_ref_alias', 'foreigntable' => 'self_ref',
- 'selfcols' => ['alias'], 'foreigncols' => ['id'],
- 'needed' => 1, on_delete => '', on_update => ''},
- {'display' => 'cd -> artist',
- 'selftable' => 'cd', 'foreigntable' => 'artist',
- 'selfcols' => ['artist'], 'foreigncols' => ['artistid'],
- 'needed' => 1, on_delete => '', on_update => ''},
- {'display' => 'artist_undirected_map -> artist for id1',
- 'selftable' => 'artist_undirected_map', 'foreigntable' => 'artist',
- 'selfcols' => ['id1'], 'foreigncols' => ['artistid'],
- 'needed' => 1, on_delete => '', on_update => ''},
- {'display' => 'artist_undirected_map -> artist for id2',
- 'selftable' => 'artist_undirected_map', 'foreigntable' => 'artist',
- 'selfcols' => ['id2'], 'foreigncols' => ['artistid'],
- 'needed' => 1, on_delete => '', on_update => ''},
- {'display' => 'track->cd',
- 'selftable' => 'track', 'foreigntable' => 'cd',
- 'selfcols' => ['cd'], 'foreigncols' => ['cdid'],
- 'needed' => 2, on_delete => '', on_update => ''},
- {'display' => 'treelike -> treelike for parent',
- 'selftable' => 'treelike', 'foreigntable' => 'treelike',
- 'selfcols' => ['parent'], 'foreigncols' => ['id'],
- 'needed' => 1, on_delete => '', on_update => ''},
- {'display' => 'twokeytreelike -> twokeytreelike for parent1,parent2',
- 'selftable' => 'twokeytreelike', 'foreigntable' => 'twokeytreelike',
- 'selfcols' => ['parent1', 'parent2'], 'foreigncols' => ['id1','id2'],
- 'needed' => 1, on_delete => '', on_update => ''},
- {'display' => 'tags -> cd',
- 'selftable' => 'tags', 'foreigntable' => 'cd',
- 'selfcols' => ['cd'], 'foreigncols' => ['cdid'],
- 'needed' => 1, on_delete => '', on_update => ''},
- {'display' => 'bookmark -> link',
- 'selftable' => 'bookmark', 'foreigntable' => 'link',
- 'selfcols' => ['link'], 'foreigncols' => ['id'],
- 'needed' => 1, on_delete => '', on_update => ''},
- );
-
-my $tschema = $translator->schema();
-for my $table ($tschema->get_tables) {
- my $table_name = $table->name;
- for my $c ( $table->get_constraints ) {
- next unless $c->type eq 'FOREIGN KEY';
-
- ok(check($table_name, scalar $c->fields,
- $c->reference_table, scalar $c->reference_fields,
- $c->on_delete, $c->on_update), "Constraint on $table_name matches an expected constraint");
- }
-}
-
-my $i;
-for ($i = 0; $i <= $#constraints; ++$i) {
- ok(!$constraints[$i]->{'needed'}, "Constraint $constraints[$i]->{display}");
-}
-
-sub check {
- my ($selftable, $selfcol, $foreigntable, $foreigncol, $ondel, $onupd) = @_;
-
- $ondel = '' if (!defined($ondel));
- $onupd = '' if (!defined($onupd));
-
- my $i;
- for ($i = 0; $i <= $#constraints; ++$i) {
- if ($selftable eq $constraints[$i]->{'selftable'} &&
- $foreigntable eq $constraints[$i]->{'foreigntable'} &&
- ($ondel eq $constraints[$i]->{on_delete}) &&
- ($onupd eq $constraints[$i]->{on_update})) {
- # check columns
-
- my $found = 0;
- for (my $j = 0; $j <= $#$selfcol; ++$j) {
- $found = 0;
- for (my $k = 0; $k <= $#{$constraints[$i]->{'selfcols'}}; ++$k) {
- if ($selfcol->[$j] eq $constraints[$i]->{'selfcols'}->[$k] &&
- $foreigncol->[$j] eq $constraints[$i]->{'foreigncols'}->[$k]) {
- $found = 1;
- last;
- }
- }
- last unless $found;
- }
-
- if ($found) {
- for (my $j = 0; $j <= $#{$constraints[$i]->{'selfcols'}}; ++$j) {
- $found = 0;
- for (my $k = 0; $k <= $#$selfcol; ++$k) {
- if ($selfcol->[$k] eq $constraints[$i]->{'selfcols'}->[$j] &&
- $foreigncol->[$k] eq $constraints[$i]->{'foreigncols'}->[$j]) {
- $found = 1;
- last;
- }
- }
- last unless $found;
- }
- }
-
- if ($found) {
- --$constraints[$i]->{needed};
- return 1;
- }
- }
- }
- return 0;
-}
use warnings;
use DBICTest::Schema;
-sub initialise {
+=head1 NAME
- my $db_file = "t/var/DBIxClass.db";
-
- unlink($db_file) if -e $db_file;
- unlink($db_file . "-journal") if -e $db_file . "-journal";
- mkdir("t/var") unless -d "t/var";
-
- my $dsn = "dbi:SQLite:${db_file}";
+DBICTest - Library to be used by DBIx::Class test scripts.
+
+=head1 SYNOPSIS
+
+ use lib qw(t/lib);
+ use DBICTest;
+ use Test::More;
- return DBICTest::Schema->compose_connection('DBICTest' => $dsn);
+ my $schema = DBICTest->init_schema();
+
+=head1 DESCRIPTION
+
+This module provides the basic utilities to write tests against
+DBIx::Class.
+
+=head1 METHODS
+
+=head2 init_schema
+
+ my $schema = DBICTest->init_schema(
+ no_deploy=>1,
+ no_populate=>1,
+ );
+
+This method removes the test SQLite database in t/var/DBIxClass.db
+and then creates a new, empty database.
+
+This method will call deploy_schema() by default, unless the
+no_deploy flag is set.
+
+Also, by default, this method will call populate_schema() by
+default, unless the no_deploy or no_populate flags are set.
+
+=cut
+
+sub init_schema {
+ my $self = shift;
+ my %args = @_;
+ my $db_file = "t/var/DBIxClass.db";
+
+ unlink($db_file) if -e $db_file;
+ unlink($db_file . "-journal") if -e $db_file . "-journal";
+ mkdir("t/var") unless -d "t/var";
+
+ my $dsn = $ENV{"DBICTEST_DSN"} || "dbi:SQLite:${db_file}";
+ my $dbuser = $ENV{"DBICTEST_DBUSER"} || '';
+ my $dbpass = $ENV{"DBICTEST_DBPASS"} || '';
+
+ my $schema = DBICTest::Schema->compose_connection('DBICTest' => $dsn, $dbuser, $dbpass);
+ $schema->storage->on_connect_do(['PRAGMA synchronous = OFF']);
+ if ( !$args{no_deploy} ) {
+ __PACKAGE__->deploy_schema( $schema );
+ __PACKAGE__->populate_schema( $schema ) if( !$args{no_populate} );
+ }
+ return $schema;
}
-
+
+=head2 deploy_schema
+
+ DBICTest->deploy_schema( $schema );
+
+This method does one of two things to the schema. It can either call
+the experimental $schema->deploy() if the DBICTEST_SQLT_DEPLOY environment
+variable is set, otherwise the default is to read in the t/lib/sqlite.sql
+file and execute the SQL within. Either way you end up with a fresh set
+of tables for testing.
+
+=cut
+
+sub deploy_schema {
+ my $self = shift;
+ my $schema = shift;
+
+ if ($ENV{"DBICTEST_SQLT_DEPLOY"}) {
+ return $schema->deploy();
+ } else {
+ open IN, "t/lib/sqlite.sql";
+ my $sql;
+ { local $/ = undef; $sql = <IN>; }
+ close IN;
+ $schema->storage->dbh->do($_) for split(/;\n/, $sql);
+ }
+}
+
+=head2 populate_schema
+
+ DBICTest->populate_schema( $schema );
+
+After you deploy your schema you can use this method to populate
+the tables with test data.
+
+=cut
+
+sub populate_schema {
+ my $self = shift;
+ my $schema = shift;
+
+ $schema->populate('Artist', [
+ [ qw/artistid name/ ],
+ [ 1, 'Caterwauler McCrae' ],
+ [ 2, 'Random Boy Band' ],
+ [ 3, 'We Are Goth' ],
+ ]);
+
+ $schema->populate('CD', [
+ [ qw/cdid artist title year/ ],
+ [ 1, 1, "Spoonful of bees", 1999 ],
+ [ 2, 1, "Forkful of bees", 2001 ],
+ [ 3, 1, "Caterwaulin' Blues", 1997 ],
+ [ 4, 2, "Generic Manufactured Singles", 2001 ],
+ [ 5, 3, "Come Be Depressed With Us", 1998 ],
+ ]);
+
+ $schema->populate('LinerNotes', [
+ [ qw/liner_id notes/ ],
+ [ 2, "Buy Whiskey!" ],
+ [ 4, "Buy Merch!" ],
+ [ 5, "Kill Yourself!" ],
+ ]);
+
+ $schema->populate('Tag', [
+ [ qw/tagid cd tag/ ],
+ [ 1, 1, "Blue" ],
+ [ 2, 2, "Blue" ],
+ [ 3, 3, "Blue" ],
+ [ 4, 5, "Blue" ],
+ [ 5, 2, "Cheesy" ],
+ [ 6, 4, "Cheesy" ],
+ [ 7, 5, "Cheesy" ],
+ [ 8, 2, "Shiny" ],
+ [ 9, 4, "Shiny" ],
+ ]);
+
+ $schema->populate('TwoKeys', [
+ [ qw/artist cd/ ],
+ [ 1, 1 ],
+ [ 1, 2 ],
+ [ 2, 2 ],
+ ]);
+
+ $schema->populate('FourKeys', [
+ [ qw/foo bar hello goodbye sensors/ ],
+ [ 1, 2, 3, 4, 'online' ],
+ [ 5, 4, 3, 6, 'offline' ],
+ ]);
+
+ $schema->populate('OneKey', [
+ [ qw/id artist cd/ ],
+ [ 1, 1, 1 ],
+ [ 2, 1, 2 ],
+ [ 3, 2, 2 ],
+ ]);
+
+ $schema->populate('SelfRef', [
+ [ qw/id name/ ],
+ [ 1, 'First' ],
+ [ 2, 'Second' ],
+ ]);
+
+ $schema->populate('SelfRefAlias', [
+ [ qw/self_ref alias/ ],
+ [ 1, 2 ]
+ ]);
+
+ $schema->populate('ArtistUndirectedMap', [
+ [ qw/id1 id2/ ],
+ [ 1, 2 ]
+ ]);
+
+ $schema->populate('Producer', [
+ [ qw/producerid name/ ],
+ [ 1, 'Matt S Trout' ],
+ [ 2, 'Bob The Builder' ],
+ [ 3, 'Fred The Phenotype' ],
+ ]);
+
+ $schema->populate('CD_to_Producer', [
+ [ qw/cd producer/ ],
+ [ 1, 1 ],
+ [ 1, 2 ],
+ [ 1, 3 ],
+ ]);
+
+ $schema->populate('TreeLike', [
+ [ qw/id parent name/ ],
+ [ 1, 0, 'foo' ],
+ [ 2, 1, 'bar' ],
+ [ 5, 1, 'blop' ],
+ [ 3, 2, 'baz' ],
+ [ 4, 3, 'quux' ],
+ ]);
+
+ $schema->populate('Track', [
+ [ qw/trackid cd position title/ ],
+ [ 4, 2, 1, "Stung with Success"],
+ [ 5, 2, 2, "Stripy"],
+ [ 6, 2, 3, "Sticky Honey"],
+ [ 7, 3, 1, "Yowlin"],
+ [ 8, 3, 2, "Howlin"],
+ [ 9, 3, 3, "Fowlin"],
+ [ 10, 4, 1, "Boring Name"],
+ [ 11, 4, 2, "Boring Song"],
+ [ 12, 4, 3, "No More Ideas"],
+ [ 13, 5, 1, "Sad"],
+ [ 14, 5, 2, "Under The Weather"],
+ [ 15, 5, 3, "Suicidal"],
+ [ 16, 1, 1, "The Bees Knees"],
+ [ 17, 1, 2, "Apiary"],
+ [ 18, 1, 3, "Beehind You"],
+ ]);
+
+ $schema->populate('Event', [
+ [ qw/id starts_at created_on/ ],
+ [ 1, '2006-04-25 22:24:33', '2006-06-22 21:00:05'],
+ ]);
+
+ $schema->populate('Link', [
+ [ qw/id title/ ],
+ [ 1, 'aaa' ]
+ ]);
+
+ $schema->populate('Bookmark', [
+ [ qw/id link/ ],
+ [ 1, 1 ]
+ ]);
+}
+
1;
+++ /dev/null
-package # hide from PAUSE
- DBICTest::BasicRels;
-
-use DBICTest::Schema;
-use DBICTest::Schema::BasicRels;
-use DBICTest::Setup;
-
-1;
--- /dev/null
+# belongs to t/run/90ensure_class_loaded.tl
+package # hide from PAUSE
+ DBICTest::ErrorComponent;
+use warnings;
+use strict;
+
+# this is missing on purpose
+# 1;
--- /dev/null
+# belongs to t/run/90ensure_class_loaded.tl
+package # hide from PAUSE
+ DBICTest::FakeComponent;
+use warnings;
+use strict;
+
+1;
+++ /dev/null
-package # hide from PAUSE
- DBICTest::HelperRels;
-
-use DBICTest::Schema;
-use DBICTest::Schema::HelperRels;
-use DBICTest::Setup;
-
-1;
--- /dev/null
+# belongs to t/run/90ensure_class_loaded.tl
+package # hide from PAUSE
+ DBICTest::OptionalComponent;
+use warnings;
+use strict;
+
+1;
package # hide from PAUSE
- DBICTest::Extra;
+ DBICTest::ResultSetManager;
use base 'DBIx::Class::Schema';
__PACKAGE__->load_classes("Foo");
package # hide from PAUSE
- DBICTest::Extra::Foo;
+ DBICTest::ResultSetManager::Foo;
use base 'DBIx::Class';
__PACKAGE__->load_components(qw/ ResultSetManager Core /);
-package # hide from PAUSE
+package # hide from PAUSE
DBICTest::Schema;
use base qw/DBIx::Class::Schema/;
__PACKAGE__->load_classes(qw/
Artist
+ Employee
CD
Link
Bookmark
/]},
(
'FourKeys',
+ 'FourKeys_to_TwoKeys',
'#dummy',
'SelfRef',
'ArtistUndirectedMap',
+ 'ArtistSourceName',
+ 'ArtistSubclass',
'Producer',
'CD_to_Producer',
),
- qw/SelfRefAlias TreeLike TwoKeyTreeLike/
+ qw/SelfRefAlias TreeLike TwoKeyTreeLike Event/
);
1;
use base 'DBIx::Class::Core';
-__PACKAGE__->load_components('PK::Auto');
-
-DBICTest::Schema::Artist->table('artist');
-DBICTest::Schema::Artist->add_columns(
+__PACKAGE__->table('artist');
+__PACKAGE__->add_columns(
'artistid' => {
data_type => 'integer',
is_auto_increment => 1
is_nullable => 1,
},
);
-DBICTest::Schema::Artist->set_primary_key('artistid');
+__PACKAGE__->set_primary_key('artistid');
__PACKAGE__->mk_classdata('field_name_for', {
artistid => 'primary key',
name => 'artist name',
});
+__PACKAGE__->has_many(
+ cds => 'DBICTest::Schema::CD', undef,
+ { order_by => 'year' },
+);
+
+__PACKAGE__->has_many( twokeys => 'DBICTest::Schema::TwoKeys' );
+__PACKAGE__->has_many( onekeys => 'DBICTest::Schema::OneKey' );
+
+__PACKAGE__->has_many(
+ artist_undirected_maps => 'DBICTest::Schema::ArtistUndirectedMap',
+ [ {'foreign.id1' => 'self.artistid'}, {'foreign.id2' => 'self.artistid'} ],
+ { cascade_copy => 0 } # this would *so* not make sense
+);
+
1;
--- /dev/null
+package # hide from PAUSE
+ DBICTest::Schema::ArtistSourceName;
+
+use base 'DBICTest::Schema::Artist';
+
+__PACKAGE__->source_name('SourceNameArtists');
+
+1;
--- /dev/null
+package # hide from PAUSE
+ DBICTest::Schema::ArtistSubclass;
+
+use base 'DBICTest::Schema::Artist';
+
+__PACKAGE__->table(__PACKAGE__->table);
+
+1;
\ No newline at end of file
);
__PACKAGE__->set_primary_key(qw/id1 id2/);
+__PACKAGE__->belongs_to( 'artist1', 'DBICTest::Schema::Artist', 'id1' );
+__PACKAGE__->belongs_to( 'artist2', 'DBICTest::Schema::Artist', 'id2');
+__PACKAGE__->has_many(
+ 'mapped_artists', 'DBICTest::Schema::Artist',
+ [ {'foreign.artistid' => 'self.id1'}, {'foreign.artistid' => 'self.id2'} ],
+);
+
1;
+++ /dev/null
-package # hide from PAUSE
- DBICTest::Schema::BasicRels;
-
-use base 'DBIx::Class::Core';
-
-DBICTest::Schema::Artist->add_relationship(
- cds => 'DBICTest::Schema::CD',
- { 'foreign.artist' => 'self.artistid' },
- { order_by => 'year', join_type => 'LEFT', cascade_delete => 1, cascade_copy => 1, accessor => 'multi' }
-);
-DBICTest::Schema::Artist->add_relationship(
- twokeys => 'DBICTest::Schema::TwoKeys',
- { 'foreign.artist' => 'self.artistid' },
- { cascade_copy => 1 }
-);
-DBICTest::Schema::Artist->add_relationship(
- onekeys => 'DBICTest::Schema::OneKey',
- { 'foreign.artist' => 'self.artistid' }
-);
-DBICTest::Schema::Artist->add_relationship(
- artist_undirected_maps => 'DBICTest::Schema::ArtistUndirectedMap',
- [{'foreign.id1' => 'self.artistid'}, {'foreign.id2' => 'self.artistid'}],
- { accessor => 'multi' }
-);
-DBICTest::Schema::ArtistUndirectedMap->add_relationship(
- 'mapped_artists', 'DBICTest::Schema::Artist',
- [{'foreign.artistid' => 'self.id1'}, {'foreign.artistid' => 'self.id2'}]
-);
-DBICTest::Schema::CD->add_relationship(
- artist => 'DBICTest::Schema::Artist',
- { 'foreign.artistid' => 'self.artist' },
- { accessor => 'filter' },
-);
-DBICTest::Schema::CD->add_relationship(
- tracks => 'DBICTest::Schema::Track',
- { 'foreign.cd' => 'self.cdid' },
- { join_type => 'LEFT', cascade_delete => 1 }
-);
-DBICTest::Schema::CD->add_relationship(
- tags => 'DBICTest::Schema::Tag',
- { 'foreign.cd' => 'self.cdid' },
- { join_type => 'LEFT', cascade_delete => 1, cascade_copy => 1, accessor => 'multi', order_by => 'tag' }
-);
-#DBICTest::Schema::CD->might_have(liner_notes => 'DBICTest::Schema::LinerNotes' => qw/notes/);
-DBICTest::Schema::CD->add_relationship(
- liner_notes => 'DBICTest::Schema::LinerNotes',
- { 'foreign.liner_id' => 'self.cdid' },
- { join_type => 'LEFT', accessor => 'single' }
-);
-DBICTest::Schema::CD->add_relationship(
- cd_to_producer => 'DBICTest::Schema::CD_to_Producer',
- { 'foreign.cd' => 'self.cdid' },
- { join_type => 'LEFT', cascade_delete => 1 }
-);
-
-DBICTest::Schema::SelfRefAlias->add_relationship(
- self_ref => 'DBICTest::Schema::SelfRef',
- { 'foreign.id' => 'self.self_ref' },
- { accessor => 'single' }
-
-);
-DBICTest::Schema::SelfRefAlias->add_relationship(
- alias => 'DBICTest::Schema::SelfRef',
- { 'foreign.id' => 'self.alias' },
- { accessor => 'single' }
-);
-
-DBICTest::Schema::SelfRef->add_relationship(
- aliases => 'DBICTest::Schema::SelfRefAlias',
- { 'foreign.self_ref' => 'self.id' },
- { accessor => 'multi' }
-);
-
-DBICTest::Schema::Tag->add_relationship(
- cd => 'DBICTest::Schema::CD',
- { 'foreign.cdid' => 'self.cd' },
- { accessor => 'single' }
-);
-
-DBICTest::Schema::Track->add_relationship(
- cd => 'DBICTest::Schema::CD',
- { 'foreign.cdid' => 'self.cd' }
-);
-
-DBICTest::Schema::TwoKeys->add_relationship(
- artist => 'DBICTest::Schema::Artist',
- { 'foreign.artistid' => 'self.artist' }
-);
-DBICTest::Schema::TwoKeys->add_relationship(
- cd => 'DBICTest::Schema::CD',
- { 'foreign.cdid' => 'self.cd' }
-);
-
-DBICTest::Schema::CD_to_Producer->add_relationship(
- cd => 'DBICTest::Schema::CD',
- { 'foreign.cdid' => 'self.cd' }
-);
-DBICTest::Schema::CD_to_Producer->add_relationship(
- producer => 'DBICTest::Schema::Producer',
- { 'foreign.producerid' => 'self.producer' }
-);
-
-# now the Helpers
-DBICTest::Schema::CD->many_to_many( 'producers', 'cd_to_producer', 'producer');
-DBICTest::Schema::CD->many_to_many( 'producers_sorted', 'cd_to_producer', 'producer', { order_by => 'producer.name' });
-
-1;
use strict;
use warnings;
-__PACKAGE__->load_components(qw/PK::Auto Core/);
__PACKAGE__->table('bookmark');
__PACKAGE__->add_columns(qw/id link/);
__PACKAGE__->add_columns(
use base 'DBIx::Class::Core';
-__PACKAGE__->load_components('PK::Auto');
-
-DBICTest::Schema::CD->table('cd');
-DBICTest::Schema::CD->add_columns(
+__PACKAGE__->table('cd');
+__PACKAGE__->add_columns(
'cdid' => {
data_type => 'integer',
is_auto_increment => 1,
size => 100,
},
);
-DBICTest::Schema::CD->set_primary_key('cdid');
-DBICTest::Schema::CD->add_unique_constraint(artist_title => [ qw/artist title/ ]);
+__PACKAGE__->set_primary_key('cdid');
+__PACKAGE__->add_unique_constraint([ qw/artist title/ ]);
+
+__PACKAGE__->belongs_to( artist => 'DBICTest::Schema::Artist' );
+
+__PACKAGE__->has_many( tracks => 'DBICTest::Schema::Track' );
+__PACKAGE__->has_many(
+ tags => 'DBICTest::Schema::Tag', undef,
+ { order_by => 'tag' },
+);
+__PACKAGE__->has_many(
+ cd_to_producer => 'DBICTest::Schema::CD_to_Producer' => 'cd'
+);
+
+__PACKAGE__->might_have(
+ liner_notes => 'DBICTest::Schema::LinerNotes', undef,
+ { proxy => [ qw/notes/ ] },
+);
+__PACKAGE__->many_to_many( producers => cd_to_producer => 'producer' );
+__PACKAGE__->many_to_many(
+ producers_sorted => cd_to_producer => 'producer',
+ { order_by => 'producer.name' },
+);
1;
);
__PACKAGE__->set_primary_key(qw/cd producer/);
+__PACKAGE__->belongs_to(
+ 'cd', 'DBICTest::Schema::CD',
+ { 'foreign.cdid' => 'self.cd' }
+);
+
+__PACKAGE__->belongs_to(
+ 'producer', 'DBICTest::Schema::Producer',
+ { 'foreign.producerid' => 'self.producer' }
+);
+
1;
--- /dev/null
+package # hide from PAUSE
+ DBICTest::Schema::Employee;
+
+use base 'DBIx::Class::Core';
+
+__PACKAGE__->load_components(qw( Ordered ));
+
+__PACKAGE__->table('employee');
+
+__PACKAGE__->add_columns(
+ employee_id => {
+ data_type => 'integer',
+ is_auto_increment => 1
+ },
+ position => {
+ data_type => 'integer',
+ },
+ group_id => {
+ data_type => 'integer',
+ is_nullable => 1,
+ },
+ name => {
+ data_type => 'varchar',
+ size => 100,
+ is_nullable => 1,
+ },
+);
+
+__PACKAGE__->set_primary_key('employee_id');
+__PACKAGE__->position_column('position');
+
+#__PACKAGE__->add_unique_constraint(position_group => [ qw/position group_id/ ]);
+
+__PACKAGE__->mk_classdata('field_name_for', {
+ employee_id => 'primary key',
+ position => 'list position',
+ group_id => 'collection column',
+ name => 'employee name',
+});
+
+1;
--- /dev/null
+package DBICTest::Schema::Event;
+
+use strict;
+use warnings;
+use base qw/DBIx::Class::Core/;
+
+__PACKAGE__->load_components(qw/InflateColumn::DateTime/);
+
+__PACKAGE__->table('event');
+
+__PACKAGE__->add_columns(
+ id => { data_type => 'integer', is_auto_increment => 1 },
+ starts_at => { data_type => 'datetime' },
+ created_on => { data_type => 'timestamp' }
+);
+
+__PACKAGE__->set_primary_key('id');
+
+1;
'bar' => { data_type => 'integer' },
'hello' => { data_type => 'integer' },
'goodbye' => { data_type => 'integer' },
+ 'sensors' => { data_type => 'character' },
);
DBICTest::Schema::FourKeys->set_primary_key(qw/foo bar hello goodbye/);
+DBICTest::Schema::FourKeys->has_many(
+ 'fourkeys_to_twokeys', 'DBICTest::Schema::FourKeys_to_TwoKeys', {
+ 'foreign.f_foo' => 'self.foo',
+ 'foreign.f_bar' => 'self.bar',
+ 'foreign.f_hello' => 'self.hello',
+ 'foreign.f_goodbye' => 'self.goodbye',
+});
+
+DBICTest::Schema::FourKeys->many_to_many(
+ 'twokeys', 'fourkeys_to_twokeys', 'twokeys',
+);
+
1;
--- /dev/null
+package # hide from PAUSE
+ DBICTest::Schema::FourKeys_to_TwoKeys;
+
+use base 'DBIx::Class::Core';
+
+__PACKAGE__->table('fourkeys_to_twokeys');
+__PACKAGE__->add_columns(
+ 'f_foo' => { data_type => 'integer' },
+ 'f_bar' => { data_type => 'integer' },
+ 'f_hello' => { data_type => 'integer' },
+ 'f_goodbye' => { data_type => 'integer' },
+ 't_artist' => { data_type => 'integer' },
+ 't_cd' => { data_type => 'integer' },
+ 'autopilot' => { data_type => 'character' },
+);
+__PACKAGE__->set_primary_key(
+ qw/f_foo f_bar f_hello f_goodbye t_artist t_cd/
+);
+
+__PACKAGE__->belongs_to('fourkeys', 'DBICTest::Schema::FourKeys', {
+ 'foreign.foo' => 'self.f_foo',
+ 'foreign.bar' => 'self.f_bar',
+ 'foreign.hello' => 'self.f_hello',
+ 'foreign.goodbye' => 'self.f_goodbye',
+});
+
+__PACKAGE__->belongs_to('twokeys', 'DBICTest::Schema::TwoKeys', {
+ 'foreign.artist' => 'self.t_artist',
+ 'foreign.cd' => 'self.t_cd',
+});
+
+1;
+++ /dev/null
-package # hide from PAUSE
- DBICTest::Schema::HelperRels;
-
-use base 'DBIx::Class::Core';
-
-DBICTest::Schema::Artist->has_many(cds => 'DBICTest::Schema::CD', undef,
- { order_by => 'year' });
-DBICTest::Schema::Artist->has_many(twokeys => 'DBICTest::Schema::TwoKeys');
-DBICTest::Schema::Artist->has_many(onekeys => 'DBICTest::Schema::OneKey');
-
-DBICTest::Schema::CD->belongs_to('artist', 'DBICTest::Schema::Artist');
-
-DBICTest::Schema::CD->has_many(tracks => 'DBICTest::Schema::Track');
-DBICTest::Schema::CD->has_many(tags => 'DBICTest::Schema::Tag', undef,
- { order_by => 'tag' });
-DBICTest::Schema::CD->has_many(cd_to_producer => 'DBICTest::Schema::CD_to_Producer' => 'cd');
-
-DBICTest::Schema::CD->might_have(liner_notes => 'DBICTest::Schema::LinerNotes',
- undef, { proxy => [ qw/notes/ ] });
-
-DBICTest::Schema::SelfRefAlias->belongs_to(
- self_ref => 'DBICTest::Schema::SelfRef');
-DBICTest::Schema::SelfRefAlias->belongs_to(
- alias => 'DBICTest::Schema::SelfRef');
-
-DBICTest::Schema::SelfRef->has_many(
- aliases => 'DBICTest::Schema::SelfRefAlias' => 'self_ref');
-
-DBICTest::Schema::Tag->belongs_to('cd', 'DBICTest::Schema::CD');
-
-DBICTest::Schema::Track->belongs_to('cd', 'DBICTest::Schema::CD');
-DBICTest::Schema::Track->belongs_to('disc', 'DBICTest::Schema::CD', 'cd');
-
-DBICTest::Schema::TwoKeys->belongs_to('artist', 'DBICTest::Schema::Artist');
-DBICTest::Schema::TwoKeys->belongs_to('cd', 'DBICTest::Schema::CD');
-
-DBICTest::Schema::CD_to_Producer->belongs_to(
- 'cd', 'DBICTest::Schema::CD',
- { 'foreign.cdid' => 'self.cd' }
-);
-DBICTest::Schema::CD_to_Producer->belongs_to(
- 'producer', 'DBICTest::Schema::Producer',
- { 'foreign.producerid' => 'self.producer' }
-);
-DBICTest::Schema::Artist->has_many(
- 'artist_undirected_maps', 'DBICTest::Schema::ArtistUndirectedMap',
- [{'foreign.id1' => 'self.artistid'}, {'foreign.id2' => 'self.artistid'}],
- { cascade_copy => 0 } # this would *so* not make sense
-);
-DBICTest::Schema::ArtistUndirectedMap->belongs_to(
- 'artist1', 'DBICTest::Schema::Artist', 'id1');
-DBICTest::Schema::ArtistUndirectedMap->belongs_to(
- 'artist2', 'DBICTest::Schema::Artist', 'id2');
-DBICTest::Schema::ArtistUndirectedMap->has_many(
- 'mapped_artists', 'DBICTest::Schema::Artist',
- [{'foreign.artistid' => 'self.id1'}, {'foreign.artistid' => 'self.id2'}]);
-
-# now the Helpers
-DBICTest::Schema::CD->many_to_many( 'producers', 'cd_to_producer', 'producer');
-DBICTest::Schema::CD->many_to_many( 'producers_sorted', 'cd_to_producer', 'producer', { order_by => 'producer.name' });
-
-1;
use strict;
use warnings;
-__PACKAGE__->load_components(qw/PK::Auto Core/);
__PACKAGE__->table('link');
__PACKAGE__->add_columns(
'id' => {
use base 'DBIx::Class::Core';
-__PACKAGE__->load_components('PK::Auto');
-
DBICTest::Schema::OneKey->table('onekey');
DBICTest::Schema::OneKey->add_columns(
'id' => {
},
);
__PACKAGE__->set_primary_key('producerid');
+__PACKAGE__->add_unique_constraint(prod_name => [ qw/name/ ]);
1;
);\r
__PACKAGE__->set_primary_key('id');\r
\r
+__PACKAGE__->has_many( aliases => 'DBICTest::Schema::SelfRefAlias' => 'self_ref' );\r
+\r
1;\r
);\r
__PACKAGE__->set_primary_key(qw/self_ref alias/);\r
\r
+__PACKAGE__->belongs_to( self_ref => 'DBICTest::Schema::SelfRef' );\r
+__PACKAGE__->belongs_to( alias => 'DBICTest::Schema::SelfRef' );\r
+\r
1;\r
use base qw/DBIx::Class::Core/;
-__PACKAGE__->load_components('PK::Auto');
-
-DBICTest::Schema::Tag->table('tags');
-DBICTest::Schema::Tag->add_columns(
+__PACKAGE__->table('tags');
+__PACKAGE__->add_columns(
'tagid' => {
data_type => 'integer',
is_auto_increment => 1,
size => 100,
},
);
-DBICTest::Schema::Tag->set_primary_key('tagid');
+__PACKAGE__->set_primary_key('tagid');
+
+__PACKAGE__->belongs_to( cd => 'DBICTest::Schema::CD' );
1;
use base 'DBIx::Class::Core';
-DBICTest::Schema::Track->table('track');
-DBICTest::Schema::Track->add_columns(
+__PACKAGE__->table('track');
+__PACKAGE__->add_columns(
'trackid' => {
data_type => 'integer',
is_auto_increment => 1,
size => 100,
},
);
-DBICTest::Schema::Track->set_primary_key('trackid');
+__PACKAGE__->set_primary_key('trackid');
+
+__PACKAGE__->belongs_to( cd => 'DBICTest::Schema::CD' );
+__PACKAGE__->belongs_to( disc => 'DBICTest::Schema::CD' => 'cd');
1;
package # hide from PAUSE
DBICTest::Schema::TreeLike;
-use base qw/DBIx::Class/;
-
-__PACKAGE__->load_components(qw/PK::Auto::SQLite Core/);
+use base qw/DBIx::Class::Core/;
__PACKAGE__->table('treelike');
__PACKAGE__->add_columns(
__PACKAGE__->set_primary_key(qw/id/);
__PACKAGE__->belongs_to('parent', 'TreeLike',
{ 'foreign.id' => 'self.parent' });
+__PACKAGE__->has_many('children', 'TreeLike', { 'foreign.parent' => 'self.id' });
1;
package # hide from PAUSE
DBICTest::Schema::TwoKeyTreeLike;
-use base qw/DBIx::Class/;
-
-__PACKAGE__->load_components(qw/Core/);
+use base qw/DBIx::Class::Core/;
__PACKAGE__->table('twokeytreelike');
__PACKAGE__->add_columns(
},
);
__PACKAGE__->set_primary_key(qw/id1 id2/);
-__PACKAGE__->belongs_to('parent', 'TwoKeyTreeLike',
+__PACKAGE__->add_unique_constraint('tktlnameunique' => ['name']);
+__PACKAGE__->belongs_to('parent', 'DBICTest::Schema::TwoKeyTreeLike',
{ 'foreign.id1' => 'self.parent1', 'foreign.id2' => 'self.parent2'});
1;
use base 'DBIx::Class::Core';
-DBICTest::Schema::TwoKeys->table('twokeys');
-DBICTest::Schema::TwoKeys->add_columns(
+__PACKAGE__->table('twokeys');
+__PACKAGE__->add_columns(
'artist' => { data_type => 'integer' },
'cd' => { data_type => 'integer' },
);
-DBICTest::Schema::TwoKeys->set_primary_key(qw/artist cd/);
+__PACKAGE__->set_primary_key(qw/artist cd/);
+
+__PACKAGE__->belongs_to( artist => 'DBICTest::Schema::Artist' );
+__PACKAGE__->belongs_to( cd => 'DBICTest::Schema::CD' );
+
+__PACKAGE__->has_many(
+ 'fourkeys_to_twokeys', 'DBICTest::Schema::FourKeys_to_TwoKeys', {
+ 'foreign.t_artist' => 'self.artist',
+ 'foreign.t_cd' => 'self.cd',
+});
+
+__PACKAGE__->many_to_many(
+ 'fourkeys', 'fourkeys_to_twokeys', 'fourkeys',
+);
1;
+++ /dev/null
-use strict;
-use warnings;
-use DBICTest;
-
-my $schema = DBICTest->initialise;
-
-$schema->storage->on_connect_do([ "PRAGMA synchronous = OFF" ]);
-
-my $dbh = $schema->storage->dbh;
-
-if ($ENV{"DBICTEST_SQLT_DEPLOY"}) {
- $schema->deploy;
-} else {
- open IN, "t/lib/sqlite.sql";
-
- my $sql;
-
- { local $/ = undef; $sql = <IN>; }
-
- close IN;
-
- $dbh->do($_) for split(/\n\n/, $sql);
-}
-
-$schema->storage->dbh->do("PRAGMA synchronous = OFF");
-
-$schema->populate('Artist', [
- [ qw/artistid name/ ],
- [ 1, 'Caterwauler McCrae' ],
- [ 2, 'Random Boy Band' ],
- [ 3, 'We Are Goth' ],
-]);
-
-$schema->populate('CD', [
- [ qw/cdid artist title year/ ],
- [ 1, 1, "Spoonful of bees", 1999 ],
- [ 2, 1, "Forkful of bees", 2001 ],
- [ 3, 1, "Caterwaulin' Blues", 1997 ],
- [ 4, 2, "Generic Manufactured Singles", 2001 ],
- [ 5, 3, "Come Be Depressed With Us", 1998 ],
-]);
-
-$schema->populate('LinerNotes', [
- [ qw/liner_id notes/ ],
- [ 2, "Buy Whiskey!" ],
- [ 4, "Buy Merch!" ],
- [ 5, "Kill Yourself!" ],
-]);
-
-$schema->populate('Tag', [
- [ qw/tagid cd tag/ ],
- [ 1, 1, "Blue" ],
- [ 2, 2, "Blue" ],
- [ 3, 3, "Blue" ],
- [ 4, 5, "Blue" ],
- [ 5, 2, "Cheesy" ],
- [ 6, 4, "Cheesy" ],
- [ 7, 5, "Cheesy" ],
- [ 8, 2, "Shiny" ],
- [ 9, 4, "Shiny" ],
-]);
-
-$schema->populate('TwoKeys', [
- [ qw/artist cd/ ],
- [ 1, 1 ],
- [ 1, 2 ],
- [ 2, 2 ],
-]);
-
-$schema->populate('FourKeys', [
- [ qw/foo bar hello goodbye/ ],
- [ 1, 2, 3, 4 ],
- [ 5, 4, 3, 6 ],
-]);
-
-$schema->populate('OneKey', [
- [ qw/id artist cd/ ],
- [ 1, 1, 1 ],
- [ 2, 1, 2 ],
- [ 3, 2, 2 ],
-]);
-
-$schema->populate('SelfRef', [
- [ qw/id name/ ],
- [ 1, 'First' ],
- [ 2, 'Second' ],
-]);
-
-$schema->populate('SelfRefAlias', [
- [ qw/self_ref alias/ ],
- [ 1, 2 ]
-]);
-
-$schema->populate('ArtistUndirectedMap', [
- [ qw/id1 id2/ ],
- [ 1, 2 ]
-]);
-
-$schema->populate('Producer', [
- [ qw/producerid name/ ],
- [ 1, 'Matt S Trout' ],
- [ 2, 'Bob The Builder' ],
- [ 3, 'Fred The Phenotype' ],
-]);
-
-$schema->populate('CD_to_Producer', [
- [ qw/cd producer/ ],
- [ 1, 1 ],
- [ 1, 2 ],
- [ 1, 3 ],
-]);
-
-$schema->populate('TreeLike', [
- [ qw/id parent name/ ],
- [ 1, 0, 'foo' ],
- [ 2, 1, 'bar' ],
- [ 3, 2, 'baz' ],
- [ 4, 3, 'quux' ],
-]);
-
-$schema->populate('Track', [
- [ qw/trackid cd position title/ ],
- [ 4, 2, 1, "Stung with Success"],
- [ 5, 2, 2, "Stripy"],
- [ 6, 2, 3, "Sticky Honey"],
- [ 7, 3, 1, "Yowlin"],
- [ 8, 3, 2, "Howlin"],
- [ 9, 3, 3, "Fowlin"],
- [ 10, 4, 1, "Boring Name"],
- [ 11, 4, 2, "Boring Song"],
- [ 12, 4, 3, "No More Ideas"],
- [ 13, 5, 1, "Sad"],
- [ 14, 5, 2, "Under The Weather"],
- [ 15, 5, 3, "Suicidal"],
- [ 16, 1, 1, "The Bees Knees"],
- [ 17, 1, 2, "Apiary"],
- [ 18, 1, 3, "Beehind You"],
-]);
-
-$schema->populate('Link', [
- [ qw/id title/ ],
- [ 1, 'aaa' ]
-]);
-
-$schema->populate('Bookmark', [
- [ qw/id link/ ],
- [ 1, 1 ]
-]);
-
-1;
--- /dev/null
+# belongs to t/run/90ensure_class_loaded.tl
+package # hide from PAUSE
+ DBICTest::SyntaxErrorComponent1;
+use warnings;
+use strict;
+
+my $str ''; # syntax error
+
+1;
--- /dev/null
+# belongs to t/run/90ensure_class_loaded.tl
+package # hide from PAUSE
+ DBICTest::SyntaxErrorComponent2;
+use warnings;
+use strict;
+
+my $str ''; # syntax error
+
+1;
--
-- Created by SQL::Translator::Producer::SQLite
--- Created on Fri May 12 01:09:57 2006
+-- Created on Thu Jun 22 22:47:36 2006
--
BEGIN TRANSACTION;
--
--- Table: serialized
+-- Table: employee
--
-CREATE TABLE serialized (
- id INTEGER PRIMARY KEY NOT NULL,
- serialized text NOT NULL
+CREATE TABLE employee (
+ employee_id INTEGER PRIMARY KEY NOT NULL,
+ position integer NOT NULL,
+ group_id integer,
+ name varchar(100)
);
--
--- Table: liner_notes
+-- Table: serialized
--
-CREATE TABLE liner_notes (
- liner_id INTEGER PRIMARY KEY NOT NULL,
- notes varchar(100) NOT NULL
+CREATE TABLE serialized (
+ id INTEGER PRIMARY KEY NOT NULL,
+ serialized text NOT NULL
);
--
);
--
+-- Table: liner_notes
+--
+CREATE TABLE liner_notes (
+ liner_id INTEGER PRIMARY KEY NOT NULL,
+ notes varchar(100) NOT NULL
+);
+
+--
-- Table: artist
--
CREATE TABLE artist (
);
--
+-- Table: fourkeys_to_twokeys
+--
+CREATE TABLE fourkeys_to_twokeys (
+ f_foo integer NOT NULL,
+ f_bar integer NOT NULL,
+ f_hello integer NOT NULL,
+ f_goodbye integer NOT NULL,
+ t_artist integer NOT NULL,
+ t_cd integer NOT NULL,
+ autopilot character NOT NULL,
+ PRIMARY KEY (f_foo, f_bar, f_hello, f_goodbye, t_artist, t_cd)
+);
+
+--
-- Table: twokeytreelike
--
CREATE TABLE twokeytreelike (
);
--
--- Table: link
+-- Table: treelike
--
-CREATE TABLE link (
+CREATE TABLE treelike (
id INTEGER PRIMARY KEY NOT NULL,
- url varchar(100),
- title varchar(100)
+ parent integer NOT NULL,
+ name varchar(100) NOT NULL
);
--
);
--
--- Table: treelike
+-- Table: link
--
-CREATE TABLE treelike (
+CREATE TABLE link (
id INTEGER PRIMARY KEY NOT NULL,
- parent integer NOT NULL,
- name varchar(100) NOT NULL
+ url varchar(100),
+ title varchar(100)
);
--
);
--
+-- Table: event
+--
+CREATE TABLE event (
+ id INTEGER PRIMARY KEY NOT NULL,
+ starts_at datetime NOT NULL,
+ created_on timestamp NOT NULL
+);
+
+--
-- Table: twokeys
--
CREATE TABLE twokeys (
bar integer NOT NULL,
hello integer NOT NULL,
goodbye integer NOT NULL,
+ sensors character NOT NULL,
PRIMARY KEY (foo, bar, hello, goodbye)
);
cd integer NOT NULL
);
+CREATE UNIQUE INDEX tktlnameunique_twokeytreelike on twokeytreelike (name);
+CREATE UNIQUE INDEX cd_artist_title_cd on cd (artist, title);
+CREATE UNIQUE INDEX prod_name_producer on producer (name);
COMMIT;
+++ /dev/null
-sub run_tests {
-my $schema = shift;
-
-use strict;
-use warnings;
-plan tests => 26;
-
-# has_a test
-my $cd = $schema->resultset("CD")->find(4);
-my ($artist) = ($INC{'DBICTest/HelperRels'}
- ? $cd->artist
- : $cd->search_related('artist'));
-is($artist->name, 'Random Boy Band', 'has_a search_related ok');
-
-# has_many test with an order_by clause defined
-$artist = $schema->resultset("Artist")->find(1);
-my @cds = ($INC{'DBICTest/HelperRels'}
- ? $artist->cds
- : $artist->search_related('cds'));
-is( $cds[1]->title, 'Spoonful of bees', 'has_many search_related with order_by ok' );
-
-# search_related with additional abstract query
-@cds = ($INC{'DBICTest/HelperRels'}
- ? $artist->cds({ title => { like => '%of%' } })
- : $artist->search_related('cds', { title => { like => '%of%' } } )
- );
-is( $cds[1]->title, 'Forkful of bees', 'search_related with abstract query ok' );
-
-# creating a related object
-if ($INC{'DBICTest/HelperRels.pm'}) {
- $artist->add_to_cds({ title => 'Big Flop', year => 2005 });
-} else {
- $artist->create_related( 'cds', {
- title => 'Big Flop',
- year => 2005,
- } );
-}
-
-is( ($artist->search_related('cds'))[3]->title, 'Big Flop', 'create_related ok' );
-
-# count_related
-is( $artist->count_related('cds'), 4, 'count_related ok' );
-
-# set_from_related
-my $track = $schema->resultset("Track")->create( {
- trackid => 1,
- cd => 3,
- position => 98,
- title => 'Hidden Track'
-} );
-$track->set_from_related( cd => $cd );
-
-if ($INC{'DBICTest/HelperRels.pm'}) { # expect inflated object
- is($track->disc->cdid, 4, 'set_from_related ok, including alternative accessor' );
-} else {
- is( $track->cd, 4, 'set_from_related ok' );
-}
-
-$track->set_from_related( cd => undef );
-
-ok( !defined($track->cd), 'set_from_related with undef ok');
-
-
-# update_from_related, the same as set_from_related, but it calls update afterwards
-$track = $schema->resultset("Track")->create( {
- trackid => 2,
- cd => 3,
- position => 99,
- title => 'Hidden Track'
-} );
-$track->update_from_related( cd => $cd );
-
-my $t_cd = ($schema->resultset("Track")->search( cd => 4, position => 99 ))[0]->cd;
-
-if ($INC{'DBICTest/HelperRels.pm'}) { # except inflated object
- is( $t_cd->cdid, 4, 'update_from_related ok' );
-} else {
- is( $t_cd, 4, 'update_from_related ok' );
-}
-
-# find_or_create_related with an existing record
-$cd = $artist->find_or_create_related( 'cds', { title => 'Big Flop' } );
-is( $cd->year, 2005, 'find_or_create_related on existing record ok' );
-
-# find_or_create_related creating a new record
-$cd = $artist->find_or_create_related( 'cds', {
- title => 'Greatest Hits',
- year => 2006,
-} );
-is( $cd->title, 'Greatest Hits', 'find_or_create_related new record ok' );
-@cds = $artist->search_related('cds');
-is( ($artist->search_related('cds'))[4]->title, 'Greatest Hits', 'find_or_create_related new record search ok' );
-
-$artist->delete_related( cds => { title => 'Greatest Hits' });
-cmp_ok( $schema->resultset("CD")->search( title => 'Greatest Hits' ), '==', 0, 'delete_related ok' );
-
-SKIP: {
- skip "relationship checking needs fixing", 1;
- # try to add a bogus relationship using the wrong cols
- eval {
- DBICTest::Schema::Artist->add_relationship(
- tracks => 'DBICTest::Schema::Track',
- { 'foreign.cd' => 'self.cdid' }
- );
- };
- like($@, qr/Unknown column/, 'failed when creating a rel with invalid key, ok');
-}
-
-# another bogus relationship using no join condition
-eval {
- DBICTest::Schema::Artist->add_relationship( tracks => 'DBICTest::Track' );
-};
-like($@, qr/join condition/, 'failed when creating a rel without join condition, ok');
-
-# many_to_many helper test
-$cd = $schema->resultset("CD")->find(1);
-my @producers = $cd->producers();
-is( $producers[0]->name, 'Matt S Trout', 'many_to_many ok' );
-is( $cd->producers_sorted->next->name, 'Bob The Builder', 'sorted many_to_many ok' );
-is( $cd->producers_sorted(producerid => 3)->next->name, 'Fred The Phenotype', 'sorted many_to_many with search condition ok' );
-
-# test undirected many-to-many relationship (e.g. "related artists")
-my $undir_maps = $schema->resultset("Artist")->find(1)->artist_undirected_maps;
-is($undir_maps->count, 1, 'found 1 undirected map for artist 1');
-
-$undir_maps = $schema->resultset("Artist")->find(2)->artist_undirected_maps;
-is($undir_maps->count, 1, 'found 1 undirected map for artist 2');
-
-my $mapped_rs = $undir_maps->search_related('mapped_artists');
-
-my @art = $mapped_rs->all;
-
-cmp_ok(@art, '==', 2, "Both artist returned from map");
-
-my $searched = $mapped_rs->search({'mapped_artists.artistid' => {'!=', undef}});
-
-cmp_ok($searched->count, '==', 2, "Both artist returned from map after adding another condition");
-
-# check join through cascaded has_many relationships
-$artist = $schema->resultset("Artist")->find(1);
-my $trackset = $artist->cds->search_related('tracks');
-# LEFT join means we also see the trackless additional album...
-cmp_ok($trackset->count, '==', 11, "Correct number of tracks for artist");
-
-# now see about updating eveything that belongs to artist 2 to artist 3
-$artist = $schema->resultset("Artist")->find(2);
-my $nartist = $schema->resultset("Artist")->find(3);
-cmp_ok($artist->cds->count, '==', 1, "Correct orig #cds for artist");
-cmp_ok($nartist->cds->count, '==', 1, "Correct orig #cds for artist");
-$artist->cds->update({artist => $nartist->id});
-cmp_ok($artist->cds->count, '==', 0, "Correct new #cds for artist");
-cmp_ok($nartist->cds->count, '==', 2, "Correct new #cds for artist");
-
-}
-
-1;
+++ /dev/null
-sub run_tests {
-my $schema = shift;
-
-my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_PG_${_}" } qw/DSN USER PASS/};
-
-#warn "$dsn $user $pass";
-
-plan skip_all, 'Set $ENV{DBICTEST_PG_DSN}, _USER and _PASS to run this test'
- . ' (note: creates and drops a table named artist!)' unless ($dsn && $user);
-
-plan tests => 4;
-
-DBICTest::Schema->compose_connection('PgTest' => $dsn, $user, $pass);
-
-my $dbh = PgTest->schema->storage->dbh;
-
-$dbh->do("CREATE TABLE artist (artistid serial PRIMARY KEY, name VARCHAR(255), charfield CHAR(10));");
-
-PgTest::Artist->load_components('PK::Auto');
-
-my $new = PgTest::Artist->create({ name => 'foo' });
-
-is($new->artistid, 1, "Auto-PK worked");
-
-$new = PgTest::Artist->create({ name => 'bar' });
-
-is($new->artistid, 2, "Auto-PK worked");
-
-my $test_type_info = {
- 'artistid' => {
- 'data_type' => 'integer',
- 'is_nullable' => 0,
- 'size' => 4,
- },
- 'name' => {
- 'data_type' => 'character varying',
- 'is_nullable' => 1,
- 'size' => 255,
- 'default_value' => undef,
- },
- 'charfield' => {
- 'data_type' => 'character',
- 'is_nullable' => 1,
- 'size' => 10,
- 'default_value' => undef,
- },
-};
-
-
-my $type_info = PgTest->schema->storage->columns_info_for('artist');
-my $artistid_defval = delete $type_info->{artistid}->{default_value};
-like($artistid_defval,
- qr/^nextval\('public\.artist_artistid_seq'::(?:text|regclass)\)/,
- 'columns_info_for - sequence matches Pg get_autoinc_seq expectations');
-is_deeply($type_info, $test_type_info,
- 'columns_info_for - column data types');
-
-$dbh->do("DROP TABLE artist;");
-
-}
-
-1;
+++ /dev/null
-sub run_tests {
-my $schema = shift;
-
-eval 'use Data::UUID ; 1'
- or plan skip_all, 'Install Data::UUID run this test';
-
-plan tests => 1;
-DBICTest::Schema::Artist->load_components('UUIDColumns');
-DBICTest::Schema::Artist->uuid_columns('name');
-Class::C3->reinitialize();
-
-my $artist = $schema->resultset("Artist")->create( { artistid => 100 } );
-like $artist->name, qr/[\w-]{36}/, 'got something like uuid';
-
-}
-
-1;
+++ /dev/null
-sub run_tests {
-my $schema = shift;
-
-plan tests => 18;
-
-my $artistid = 1;
-my $title = 'UNIQUE Constraint';
-
-my $cd1 = $schema->resultset('CD')->find_or_create({
- artist => $artistid,
- title => $title,
- year => 2005,
-});
-
-my $cd2 = $schema->resultset('CD')->find(
- {
- artist => $artistid,
- title => $title,
- },
- { key => 'artist_title' }
-);
-
-is($cd2->get_column('artist'), $cd1->get_column('artist'), 'find by specific key: artist is correct');
-is($cd2->title, $cd1->title, 'title is correct');
-is($cd2->year, $cd1->year, 'year is correct');
-
-my $cd3 = $schema->resultset('CD')->update_or_create(
- {
- artist => $artistid,
- title => $title,
- year => 2007,
- },
-);
-
-ok(! $cd3->is_changed, 'update_or_create without key: row is clean');
-is($cd3->cdid, $cd2->cdid, 'cdid is correct');
-is($cd3->get_column('artist'), $cd2->get_column('artist'), 'artist is correct');
-is($cd3->title, $cd2->title, 'title is correct');
-is($cd3->year, 2007, 'updated year is correct');
-
-my $cd4 = $schema->resultset('CD')->update_or_create(
- {
- artist => $artistid,
- title => $title,
- year => 2007,
- },
- { key => 'artist_title' }
-);
-
-ok(! $cd4->is_changed, 'update_or_create by specific key: row is clean');
-is($cd4->cdid, $cd2->cdid, 'cdid is correct');
-is($cd4->get_column('artist'), $cd2->get_column('artist'), 'artist is correct');
-is($cd4->title, $cd2->title, 'title is correct');
-is($cd4->year, 2007, 'updated year is correct');
-
-my $cd5 = $schema->resultset('CD')->update_or_create(
- {
- cdid => $cd2->cdid,
- artist => 1,
- title => $cd2->title,
- year => 2005,
- },
- { key => 'primary' }
-);
-
-ok(! $cd5->is_changed, 'update_or_create by PK: row is clean');
-is($cd5->cdid, $cd2->cdid, 'cdid is correct');
-is($cd5->get_column('artist'), $cd2->get_column('artist'), 'artist is correct');
-is($cd5->title, $cd2->title, 'title is correct');
-is($cd5->year, 2005, 'updated year is correct');
-
-}
-
-1;
+++ /dev/null
-sub run_tests {
- my $schema = shift;
-
- eval 'use Encode ; 1'
- or plan skip_all, 'Install Encode run this test';
-
- plan tests => 2;
-
- DBICTest::Schema::Artist->load_components('UTF8Columns');
- DBICTest::Schema::Artist->utf8_columns('name');
- Class::C3->reinitialize();
-
- my $artist = $schema->resultset("Artist")->create( { name => 'uni' } );
- ok( Encode::is_utf8( $artist->name ), 'got name with utf8 flag' );
-
- my $utf8_char = 'uniuni';
- Encode::_utf8_on($utf8_char);
- $artist->name($utf8_char);
- ok( !Encode::is_utf8( $artist->{_column_data}->{name} ),
- 'store utf8 less chars' );
-}
-
-1;