From: David Kamholz Date: Thu, 6 Jul 2006 21:40:24 +0000 (+0000) Subject: Merge 'trunk' into 'DBIx-Class-current' X-Git-Tag: v0.07002~75^2~49 X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=commitdiff_plain;h=3ffa4e3b9cbe6bfc1d0ff1c8f20891aacdf13c09;hp=102a298406840b8ec81fa0b2643fcd15884dedf3;p=dbsrgits%2FDBIx-Class.git Merge 'trunk' into 'DBIx-Class-current' r13443@haferschleim (orig r2096): antirice | 2006-07-04 05:57:26 +0200 SQL::Abstract compatability r13444@haferschleim (orig r2097): matthewt | 2006-07-04 06:00:22 +0200 reverted commit that should have gone on branches/subselect r13533@haferschleim (orig r2117): jguenther | 2006-07-06 01:35:53 +0200 updated my contact info --- diff --git a/Build.PL b/Build.PL index f1d2ad8..2ab62b9 100644 --- a/Build.PL +++ b/Build.PL @@ -15,18 +15,16 @@ my %arguments = ( 'Class::Data::Accessor' => 0.01, 'Carp::Clan' => 0, 'DBI' => 1.40, + 'Module::Find' => 0, + 'Class::Inspector' => 0, }, build_requires => { 'DBD::SQLite' => 1.11, }, - recommends => { - 'Data::UUID' => 0, - 'Module::Find' => 0, - 'Class::Inspector' => 0, - }, create_makefile_pl => 'passthrough', create_readme => 1, - test_files => [ glob('t/*.t'), glob('t/*/*.t') ] + test_files => [ glob('t/*.t'), glob('t/*/*.t') ], + script_files => [ glob('script/*') ], ); Module::Build->new(%arguments)->create_build_script; diff --git a/Changes b/Changes index 54514fb..19080c1 100644 --- a/Changes +++ b/Changes @@ -1,6 +1,91 @@ Revision history for DBIx::Class -0.06003 + - rename _parent_rs to _parent_source in ResultSet + +0.06999_05 2006-07-04 14:40:01 + - fix issue with incorrect $rs->{attrs}{alias} + - fix subclassing issue with source_name + - tweak quotes test to output text on failure + - fix Schema->txn_do to not fail as a classmethod + +0.06999_04 2006-06-29 20:18:47 + - disable cdbi-t/02-Film.t warning tests under AS perl + - fixups to MySQL tests (aka "work round mysql being retarded") + - compat tweaks for Storage debug logging + +0.06999_03 2006-06-26 21:04:44 + - various documentation improvements + - fixes to pass test suite on Windows + - rewrote and cleaned up SQL::Translator tests + - changed relationship helpers to only call ensure_class_loaded when the + join condition is inferred + - rewrote many_to_many implementation, now provides helpers for adding + and deleting objects without dealing with the link table + - reworked InflateColumn implementation to lazily deflate where + possible; now handles passing an inflated object to new() + - changed join merging to not create a rel_2 alias when adding a join + that already exists in a parent resultset + - Storage::DBI::deployment_statements now calls ensure_connected + if it isn't passed a type + - fixed Componentized::ensure_class_loaded + - InflateColumn::DateTime supports date as well as datetime + - split Storage::DBI::MSSQL into MSSQL and Sybase::MSSQL + - fixed wrong debugging hook call in Storage::DBI + - set connect_info properly before setting any ->sql_maker things + +0.06999_02 2006-06-09 23:58:33 + - Fixed up POD::Coverage tests, filled in some POD holes + - Added a warning for incorrect component order in load_components + - Fixed resultset bugs to do with related searches + - added code and tests for Componentized::ensure_class_found and + load_optional_class + - NoBindVars + Sybase + MSSQL stuff + - only rebless S::DBI if it is still S::DBI and not a subclass + - Added `use' statement for DBD::Pg in Storage::DBI::Pg + - stopped test relying on order of unordered search + - bugfix for join-types in nested joins using the from attribute + - obscure prefetch problem fixed + - tightened up deep search_related + - Fixed 'DBIx/Class/DB.pm did not return a true value' error + - Revert change to test for deprecated find usage and swallow warnings + - Slight wording change to new_related() POD + - new specific test for connect_info coderefs + - POD clarification and content bugfixing + a few code formatting fixes + - POD::Coverage additions + - fixed debugfh + - Fix column_info stomping + +0.06999_01 2006-05-28 17:19:30 + - add automatic naming of unique constraints + - marked DB.pm as deprecated and noted it will be removed by 1.0 + - add ResultSetColumn + - refactor ResultSet code to resolve attrs as late as possible + - merge prefetch attrs into join attrs + - add +select and +as attributes to ResultSet + - added InflateColumn::DateTime component + - refactor debugging to allow for profiling using Storage::Statistics + - removed Data::UUID from deps, made other optionals required + - modified SQLT parser to skip dupe table names + - added remove_column(s) to ResultSource/ResultSourceProxy + - added add_column alias to ResultSourceProxy + - added source_name to ResultSource + - load_classes now uses source_name and sets it if necessary + - add update_or_create_related to Relationship::Base + - add find_or_new to ResultSet/ResultSetProxy and find_or_new_related + to Relationship::Base + - add accessors for unique constraint names and coulums to + ResultSource/ResultSourceProxy + - rework ResultSet::find() to search unique constraints + - CDBICompat: modify retrieve to fix column casing when ColumnCase is + loaded + - CDBICompat: override find_or_create to fix column casing when + ColumnCase is loaded + - reorganized and simplified tests + - added Ordered + - added the ability to set on_connect_do and the various sql_maker + options as part of Storage::DBI's connect_info. + +0.06003 2006-05-19 15:37:30 - make find_or_create_related check defined() instead of truth - don't unnecessarily fetch rels for cascade_update - don't set_columns explicitly in update_or_create; instead use @@ -45,7 +130,7 @@ Revision history for DBIx::Class - columns_info_for made more robust / informative - ithreads compat added, fork compat improved - weaken result_source in all resultsets - - Make pg seq extractor less sensitive. + - Make pg seq extractor less sensitive. 0.05999_03 2006-03-14 01:58:10 - has_many prefetch fixes @@ -92,7 +177,7 @@ Revision history for DBIx::Class - remove build dependency on version.pm 0.05004 2006-02-13 20:59:00 - - allow specification of related columns via cols attr when primary + - allow specification of related columns via cols attr when primary keys of the related table are not fetched - fix count for group_by as scalar - add horrific fix to make Oracle's retarded limit syntax work diff --git a/MANIFEST.SKIP b/MANIFEST.SKIP index 94080e9..10c77a4 100644 --- a/MANIFEST.SKIP +++ b/MANIFEST.SKIP @@ -14,6 +14,7 @@ # for developers only :) ^TODO$ +^VERSIONING\.SKETCH$ # Avoid Module::Build generated and utility files. \bBuild$ diff --git a/Makefile.PL b/Makefile.PL index 51d31fd..192903a 100644 --- a/Makefile.PL +++ b/Makefile.PL @@ -17,15 +17,15 @@ # Save this 'cause CPAN will chdir all over the place. my $cwd = Cwd::cwd(); - my $makefile = File::Spec->rel2abs($0); - CPAN::Shell->install('Module::Build::Compat') - or die " *** Cannot install without Module::Build. Exiting ...\n"; + CPAN::Shell->install('Module::Build::Compat'); + CPAN::Shell->expand("Module", "Module::Build::Compat")->uptodate + or die "Couldn't install Module::Build, giving up.\n"; chdir $cwd or die "Cannot chdir() back to $cwd: $!"; } eval "use Module::Build::Compat 0.02; 1" or die $@; - use lib '_build/lib'; + Module::Build::Compat->run_build_pl(args => \@ARGV); require Module::Build; Module::Build::Compat->write_makefile(build_class => 'Module::Build'); diff --git a/TODO b/TODO index d0726b3..e22c6ba 100644 --- a/TODO +++ b/TODO @@ -1,3 +1,23 @@ +2005-04-16 by mst + - set_from_related should take undef + - ResultSource objects caching ->resultset causes interesting problems + - find why XSUB dumper kills schema in Catalyst (may be Pg only?) + +2006-04-11 by castaway + - using PK::Auto should set is_auto_increment for the PK columns, so that copy() "just works" + - docs of copy() should say that is_auto_increment is essential for auto_incrementing keys + +2006-03-25 by mst + - Refactor ResultSet::new to be less hairy + - we should move the setup of select, as, and from out of here + - these should be local rs attrs, not main attrs, and extra joins + provided on search should be merged + - find a way to un-wantarray search without breaking compat + - audit logging component + - delay relationship setup if done via ->load_classes + - double-sided relationships + - incremental deploy + - make short form of class specifier in relationships work 2006-01-31 by bluefeet - Create a DBIx::Class::FilterColumn to replace inflate/deflate. This @@ -7,13 +27,34 @@ We should still support the old inflate/deflate syntax, but this new way should be recommended. -2006-02-07 by JR +2006-02-07 by castaway - Extract DBIC::SQL::Abstract into a separate module for CPAN - Chop PK::Auto::Foo up to have PK::Auto refer to an appropriate DBIx::Storage::DBI::Foo, which will be loaded on connect from Driver info? +(done -> 0.06001!) - Add deploy method to Schema, which will create DB tables from Schema, via SQLT +(sorta done) 2006-03-18 by bluefeet - Support table locking. +2006-03-21 by bluefeet + - When subclassing a dbic class make it so you don't have to do + __PACKAGE__->table(__PACKAGE__->table()); for the result set to + return the correct object type. + +2006-03-27 by mst + Add the ability for deploy to be given a directory and grab .sql + out of there if available. Try SQL::Translator if not. If none of the above, + cry (and die()). Then you can have a script that pre-gens for all available + SQLT modules so an app can do its own deploy without SQLT on the target + system + +2006-05-25 by mst (TODOed by bluefeet) + Add the search attributes "limit" and "rows_per_page". + limit: work as expected just like offset does + rows_per_page: only be used if you used the page attr or called $rs->page + rows: modify to be an alias that gets used to populate either as appropriate, + if you haven't specified one of the others + diff --git a/VERSIONING.SKETCH b/VERSIONING.SKETCH new file mode 100644 index 0000000..03e6ea1 --- /dev/null +++ b/VERSIONING.SKETCH @@ -0,0 +1,30 @@ +Schema versioning/deployment ideas from Jess (with input from theorbtwo and mst): +1) Add a method to storage to: + - take args of DB type, version, and optional file/pathname + - create an SQL file, via SQLT, for the current schema + - passing prev. version + version will create an sqlt-diff'ed upgrade file, such as + - $preversion->$currentversion-$dbtype.sql, which contains ALTER foo statements. +2) Make deploy/deploy_statements able to to load from the appropriate file, for the current DB, or on the fly? - Compare against current schema version.. +3) Add an on_connect_cb (callback) thingy to storage. +4) create a component to deploy version/updates: + - it hooks itself into on_connect_cb ? + - when run it: + - Attempts or prompts a backup of the database. (commands for these per-rdbms can be stored in storage::dbi:: ?) + - Checks the version of the current schema being used + - Compares it to some schema table containing the installed version + - If none such exists, we can attempt to sqlt-diff the DB structure with the schema + - If version does exist, we use an array of user-defined upgrade paths, + eg: version = '3x.'; schema = '1.x', upgrade paths = ('1.x->2.x', '2.x->3.x') + - Find the appropriate upgrade-path file, parse into two chunks: + a) the commands which do not contain "DROP" + b) the ones that do + - Calls user callbacks for "pre-upgrade" + - Runs the first set of commands on the DB + - Calls user callbacks for "post-alter" + - Runs drop commands + - Calls user callbacks for "post-drop" + - The user will need to define (or ignore) the following callbacks: + - "pre-upgrade", any code to be run before the upgrade, called with schema object, version-from, version-to, db-type .. bear in mind that here any new fields in the schema will not work, but can be used via scalarrefs. + - "post-alter", this is the main callback, at this stage, all old and new fields will be available, to allow data migration. + - "post-drop", this is the clean-up stage, now only new fields are available. + diff --git a/lib/DBIx/Class.pm b/lib/DBIx/Class.pm index 2f05309..69c9385 100644 --- a/lib/DBIx/Class.pm +++ b/lib/DBIx/Class.pm @@ -13,7 +13,7 @@ sub component_base_class { 'DBIx::Class' } # i.e. first release of 0.XX *must* be 0.XX000. This avoids fBSD ports # brain damage and presumably various other packaging systems too -$VERSION = '0.06003'; +$VERSION = '0.06999_05'; sub MODIFY_CODE_ATTRIBUTES { my ($class,$code,@attrs) = @_; @@ -180,7 +180,9 @@ andyg: Andy Grundman ank: Andres Kievsky -blblack: Brandon Black +blblack: Brandon L. Black + +bluefeet: Aran Deltac LTJake: Brian Cassidy @@ -206,6 +208,8 @@ quicksilver: Jules Bean jguenther: Justin Guenther +captainL: Luke Saunders + draven: Marcus Ramberg nigel: Nigel Metheringham @@ -222,12 +226,12 @@ scotty: Scotty Allen sszabo: Stephan Szabo -captainL: Luke Saunders - Todd Lipcon wdh: Will Hawes +gphat: Cory G Watson + =head1 LICENSE You may distribute this code under the same terms as Perl itself. diff --git a/lib/DBIx/Class/CDBICompat/AttributeAPI.pm b/lib/DBIx/Class/CDBICompat/AttributeAPI.pm index b3d4752..abf9ac0 100644 --- a/lib/DBIx/Class/CDBICompat/AttributeAPI.pm +++ b/lib/DBIx/Class/CDBICompat/AttributeAPI.pm @@ -14,8 +14,7 @@ sub _attrs { sub _attribute_store { my $self = shift; my $vals = @_ == 1 ? shift: {@_}; - my (@cols) = keys %$vals; - @{$self->{_column_data}}{@cols} = @{$vals}{@cols}; + $self->store_column($_, $vals->{$_}) for keys %{$vals}; } sub _attribute_set { @@ -31,7 +30,7 @@ sub _attribute_delete { sub _attribute_exists { my ($self, $attr) = @_; - exists $self->{_column_data}{$attr}; + $self->has_column_loaded($attr); } 1; diff --git a/lib/DBIx/Class/CDBICompat/ColumnCase.pm b/lib/DBIx/Class/CDBICompat/ColumnCase.pm index 9d0c96f..9be24ff 100644 --- a/lib/DBIx/Class/CDBICompat/ColumnCase.pm +++ b/lib/DBIx/Class/CDBICompat/ColumnCase.pm @@ -66,6 +66,19 @@ sub find_column { return $class->next::method(lc($col)); } +# _build_query +# +# Build a query hash for find, et al. Overrides Retrieve::_build_query. + +sub _build_query { + my ($self, $query) = @_; + + my %new_query; + $new_query{lc $_} = $query->{$_} for keys %$query; + + return \%new_query; +} + sub _mk_group_accessors { my ($class, $type, $group, @fields) = @_; #warn join(', ', map { ref $_ ? (@$_) : ($_) } @fields); diff --git a/lib/DBIx/Class/CDBICompat/HasA.pm b/lib/DBIx/Class/CDBICompat/HasA.pm index 6930f3b..647674f 100644 --- a/lib/DBIx/Class/CDBICompat/HasA.pm +++ b/lib/DBIx/Class/CDBICompat/HasA.pm @@ -7,7 +7,7 @@ use warnings; sub has_a { my ($self, $col, $f_class, %args) = @_; $self->throw_exception( "No such column ${col}" ) unless $self->has_column($col); - eval "require $f_class"; + $self->ensure_class_loaded($f_class); if ($args{'inflate'} || $args{'deflate'}) { # Non-database has_a if (!ref $args{'inflate'}) { my $meth = $args{'inflate'}; diff --git a/lib/DBIx/Class/CDBICompat/Pager.pm b/lib/DBIx/Class/CDBICompat/Pager.pm index c4bc3a4..8d02dc7 100644 --- a/lib/DBIx/Class/CDBICompat/Pager.pm +++ b/lib/DBIx/Class/CDBICompat/Pager.pm @@ -10,7 +10,7 @@ sub page { my $class = shift; my $rs = $class->search(@_); - unless ($rs->{page}) { + unless ($rs->{attrs}{page}) { $rs = $rs->page(1); } return ( $rs->pager, $rs ); diff --git a/lib/DBIx/Class/CDBICompat/Retrieve.pm b/lib/DBIx/Class/CDBICompat/Retrieve.pm index 899ed69..1186ae4 100644 --- a/lib/DBIx/Class/CDBICompat/Retrieve.pm +++ b/lib/DBIx/Class/CDBICompat/Retrieve.pm @@ -5,9 +5,44 @@ use strict; use warnings FATAL => 'all'; -sub retrieve { - die "No args to retrieve" unless @_ > 1; - shift->find(@_); +sub retrieve { + my $self = shift; + die "No args to retrieve" unless @_ > 0; + + my @cols = $self->primary_columns; + + my $query; + if (ref $_[0] eq 'HASH') { + $query = { %{$_[0]} }; + } + elsif (@_ == @cols) { + $query = {}; + @{$query}{@cols} = @_; + } + else { + $query = {@_}; + } + + $query = $self->_build_query($query); + $self->find($query); +} + +sub find_or_create { + my $self = shift; + my $query = ref $_[0] eq 'HASH' ? shift : {@_}; + + $query = $self->_build_query($query); + $self->next::method($query); +} + +# _build_query +# +# Build a query hash. Defaults to a no-op; ColumnCase overrides. + +sub _build_query { + my ($self, $query) = @_; + + return $query; } sub retrieve_from_sql { diff --git a/lib/DBIx/Class/Componentised.pm b/lib/DBIx/Class/Componentised.pm index 7e62354..2b3bf83 100644 --- a/lib/DBIx/Class/Componentised.pm +++ b/lib/DBIx/Class/Componentised.pm @@ -5,16 +5,28 @@ use strict; use warnings; use Class::C3; +use Class::Inspector; +use Carp::Clan qw/DBIx::Class/; sub inject_base { my ($class, $target, @to_inject) = @_; { no strict 'refs'; - my %seen; - unshift( @{"${target}::ISA"}, - grep { !$seen{ $_ }++ && $target ne $_ && !$target->isa($_) } - @to_inject - ); + foreach my $to (reverse @to_inject) { + my @comps = qw(DigestColumns ResultSetManager Ordered UTF8Columns); + # Add components here that need to be loaded before Core + foreach my $first_comp (@comps) { + if ($to eq 'DBIx::Class::Core' && + $target->isa("DBIx::Class::${first_comp}")) { + warn "Possible incorrect order of components in ". + "${target}::load_components($first_comp) call: Core loaded ". + "before $first_comp. See the documentation for ". + "DBIx::Class::$first_comp for more information"; + } + } + unshift( @{"${target}::ISA"}, $to ) + unless ($target eq $to || $target->isa($to)); + } } # Yes, this is hack. But it *does* work. Please don't submit tickets about @@ -42,10 +54,52 @@ sub load_own_components { sub _load_components { my ($class, @comp) = @_; foreach my $comp (@comp) { - eval "use $comp"; - die $@ if $@; + $class->ensure_class_loaded($comp); } $class->inject_base($class => @comp); } +# Given a class name, tests to see if it is already loaded or otherwise +# defined. If it is not yet loaded, the package is require'd, and an exception +# is thrown if the class is still not loaded. +# +# TODO: handle ->has_many('rel', 'Class'...) instead of +# ->has_many('rel', 'Some::Schema::Class'...) +# +# BUG: For some reason, packages with syntax errors are added to %INC on +# require +sub ensure_class_loaded { + my ($class, $f_class) = @_; + return if Class::Inspector->loaded($f_class); + eval "require $f_class"; # require needs a bareword or filename + if ($@) { + if ($class->can('throw_exception')) { + $class->throw_exception($@); + } else { + croak $@; + } + } +} + +# Returns true if the specified class is installed or already loaded, false +# otherwise +sub ensure_class_found { + my ($class, $f_class) = @_; + return Class::Inspector->loaded($f_class) || + Class::Inspector->installed($f_class); +} + +# Returns a true value if the specified class is installed and loaded +# successfully, throws an exception if the class is found but not loaded +# successfully, and false if the class is not installed +sub load_optional_class { + my ($class, $f_class) = @_; + if ($class->ensure_class_found($f_class)) { + $class->ensure_class_loaded($f_class); + return 1; + } else { + return 0; + } +} + 1; diff --git a/lib/DBIx/Class/Core.pm b/lib/DBIx/Class/Core.pm index 96a6a9a..4f9a59c 100644 --- a/lib/DBIx/Class/Core.pm +++ b/lib/DBIx/Class/Core.pm @@ -10,6 +10,7 @@ __PACKAGE__->load_components(qw/ Serialize::Storable InflateColumn Relationship + PK::Auto PK Row ResultSourceProxy::Table @@ -41,6 +42,8 @@ The core modules currently are: =item L +=item L + =item L =item L diff --git a/lib/DBIx/Class/DB.pm b/lib/DBIx/Class/DB.pm index aa5eeb3..0fb7e8a 100644 --- a/lib/DBIx/Class/DB.pm +++ b/lib/DBIx/Class/DB.pm @@ -19,19 +19,9 @@ __PACKAGE__->load_components(qw/ResultSetProxy/); sub storage { shift->schema_instance(@_)->storage; } -sub resultset_instance { - my $class = ref $_[0] || $_[0]; - my $source = $class->result_source_instance; - if ($source->result_class ne $class) { - $source = $source->new($source); - $source->result_class($class); - } - return $source->resultset; -} - =head1 NAME -DBIx::Class::DB - Non-recommended classdata schema component +DBIx::Class::DB - (DEPRECATED) classdata schema component =head1 SYNOPSIS @@ -54,8 +44,8 @@ DBIx::Class::DB - Non-recommended classdata schema component This class is designed to support the Class::DBI connection-as-classdata style for DBIx::Class. You are *strongly* recommended to use a DBIx::Class::Schema -instead; DBIx::Class::DB will continue to be supported but new development -will be focused on Schema-based DBIx::Class setups. +instead; DBIx::Class::DB will not undergo new development and will be moved +to being a CDBICompat-only component before 1.0. =head1 METHODS @@ -150,7 +140,41 @@ sub txn_do { shift->schema_instance->txn_do(@_); } } } -1; +=head2 resultset_instance + +Returns an instance of a resultset for this class - effectively +mapping the L connection-as-classdata paradigm into the +native L system. + +=cut + +sub resultset_instance { + my $class = ref $_[0] || $_[0]; + my $source = $class->result_source_instance; + if ($source->result_class ne $class) { + $source = $source->new($source); + $source->result_class($class); + } + return $source->resultset; +} + +=head2 resolve_class + +****DEPRECATED**** + +See L + +=head2 dbi_commit + +****DEPRECATED**** + +Alias for L + +=head2 dbi_rollback + +****DEPRECATED**** + +Alias for L =head1 AUTHORS @@ -162,3 +186,4 @@ You may distribute this code under the same terms as Perl itself. =cut +1; diff --git a/lib/DBIx/Class/InflateColumn.pm b/lib/DBIx/Class/InflateColumn.pm index d9817fe..de68b23 100644 --- a/lib/DBIx/Class/InflateColumn.pm +++ b/lib/DBIx/Class/InflateColumn.pm @@ -2,7 +2,7 @@ package DBIx::Class::InflateColumn; use strict; use warnings; - +use Scalar::Util qw/blessed/; use base qw/DBIx::Class::Row/; @@ -94,68 +94,151 @@ sub _deflated_column { return $deflate->($value, $self); } +=head2 get_inflated_column + + my $val = $obj->get_inflated_column($col); + +Fetch a column value in its inflated state. This is directly +analogous to L in that it only fetches a +column already retreived from the database, and then inflates it. +Throws an exception if the column requested is not an inflated column. + +=cut + sub get_inflated_column { my ($self, $col) = @_; $self->throw_exception("$col is not an inflated column") unless exists $self->column_info($col)->{_inflate_info}; - return $self->{_inflated_column}{$col} if exists $self->{_inflated_column}{$col}; return $self->{_inflated_column}{$col} = $self->_inflated_column($col, $self->get_column($col)); } +=head2 set_inflated_column + + my $copy = $obj->set_inflated_column($col => $val); + +Sets a column value from an inflated value. This is directly +analogous to L. + +=cut + sub set_inflated_column { - my ($self, $col, @rest) = @_; - my $ret = $self->_inflated_column_op('set', $col, @rest); - return $ret; + my ($self, $col, $obj) = @_; + $self->set_column($col, $self->_deflated_column($col, $obj)); + if (blessed $obj) { + $self->{_inflated_column}{$col} = $obj; + } else { + delete $self->{_inflated_column}{$col}; + } + return $obj; } +=head2 store_inflated_column + + my $copy = $obj->store_inflated_column($col => $val); + +Sets a column value from an inflated value without marking the column +as dirty. This is directly analogous to L. + +=cut + sub store_inflated_column { - my ($self, $col, @rest) = @_; - my $ret = $self->_inflated_column_op('store', $col, @rest); - return $ret; + my ($self, $col, $obj) = @_; + unless (blessed $obj) { + delete $self->{_inflated_column}{$col}; + $self->store_column($col => $obj); + return $obj; + } + delete $self->{_column_data}{$col}; + return $self->{_inflated_column}{$col} = $obj; +} + +=head2 get_column + +Gets a column value in the same way as L. If there +is an inflated value stored that has not yet been deflated, it is deflated +when the method is invoked. + +=cut + +sub get_column { + my ($self, $col) = @_; + if (exists $self->{_inflated_column}{$col} + && !exists $self->{_column_data}{$col}) { + $self->store_column($col, $self->_deflated_column($col, $self->{_inflated_column}{$col})); + } + return $self->next::method($col); } -sub _inflated_column_op { - my ($self, $op, $col, $obj) = @_; - my $meth = "${op}_column"; - unless (ref $obj) { - delete $self->{_inflated_column}{$col}; - return $self->$meth($col, $obj); +=head2 get_columns + +Returns the get_column info for all columns as a hash, +just like L. Handles inflation just +like L. + +=cut + +sub get_columns { + my $self = shift; + if (exists $self->{_inflated_column}) { + foreach my $col (keys %{$self->{_inflated_column}}) { + $self->store_column($col, $self->_deflated_column($col, $self->{_inflated_column}{$col})) + unless exists $self->{_column_data}{$col}; + } } + return $self->next::method; +} - my $deflated = $self->_deflated_column($col, $obj); - # Do this now so we don't store if it's invalid +=head2 has_column_loaded - $self->{_inflated_column}{$col} = $obj; - $self->$meth($col, $deflated); - return $obj; +Like L, but also returns true if there +is an inflated value stored. + +=cut + +sub has_column_loaded { + my ($self, $col) = @_; + return 1 if exists $self->{_inflated_column}{$col}; + return $self->next::method($col); } +=head2 update + +Updates a row in the same way as L, handling +inflation and deflation of columns appropriately. + +=cut + sub update { my ($class, $attrs, @rest) = @_; - $attrs ||= {}; - foreach my $key (keys %$attrs) { + foreach my $key (keys %{$attrs||{}}) { if (ref $attrs->{$key} && exists $class->column_info($key)->{_inflate_info}) { -# $attrs->{$key} = $class->_deflated_column($key, $attrs->{$key}); - $class->set_inflated_column ($key, delete $attrs->{$key}); + $class->set_inflated_column($key, delete $attrs->{$key}); } } return $class->next::method($attrs, @rest); } +=head2 new + +Creates a row in the same way as L, handling +inflation and deflation of columns appropriately. + +=cut + sub new { my ($class, $attrs, @rest) = @_; - $attrs ||= {}; - foreach my $key (keys %$attrs) { - if (ref $attrs->{$key} - && exists $class->column_info($key)->{_inflate_info}) { - $attrs->{$key} = $class->_deflated_column($key, $attrs->{$key}); - } + my $inflated; + foreach my $key (keys %{$attrs||{}}) { + $inflated->{$key} = delete $attrs->{$key} + if ref $attrs->{$key} && exists $class->column_info($key)->{_inflate_info}; } - return $class->next::method($attrs, @rest); + my $obj = $class->next::method($attrs, @rest); + $obj->{_inflated_column} = $inflated if $inflated; + return $obj; } =head1 SEE ALSO diff --git a/lib/DBIx/Class/InflateColumn/DateTime.pm b/lib/DBIx/Class/InflateColumn/DateTime.pm new file mode 100644 index 0000000..f05523c --- /dev/null +++ b/lib/DBIx/Class/InflateColumn/DateTime.pm @@ -0,0 +1,106 @@ +package DBIx::Class::InflateColumn::DateTime; + +use strict; +use warnings; +use base qw/DBIx::Class/; + +=head1 NAME + +DBIx::Class::InflateColumn::DateTime - Auto-create DateTime objects from date and datetime columns. + +=head1 SYNOPSIS + +Load this component and then declare one or more +columns to be of the datetime, timestamp or date datatype. + + package Event; + __PACKAGE__->load_components(qw/InflateColumn::DateTime/); + __PACKAGE__->add_columns( + starts_when => { data_type => 'datetime' } + ); + +Then you can treat the specified column as a L object. + + print "This event starts the month of ". + $event->starts_when->month_name(); + +=head1 DESCRIPTION + +This module figures out the type of DateTime::Format::* class to +inflate/deflate with based on the type of DBIx::Class::Storage::DBI::* +that you are using. If you switch from one database to a different +one your code should continue to work without modification (though note +that this feature is new as of 0.07, so it may not be perfect yet - bug +reports to the list very much welcome). + +=cut + +__PACKAGE__->load_components(qw/InflateColumn/); + +__PACKAGE__->mk_group_accessors('simple' => '__datetime_parser'); + +=head2 register_column + +Chains with the L method, and sets +up datetime columns appropriately. This would not normally be +directly called by end users. + +=cut + +sub register_column { + my ($self, $column, $info, @rest) = @_; + $self->next::method($column, $info, @rest); + return unless defined($info->{data_type}); + my $type = lc($info->{data_type}); + $type = 'datetime' if ($type eq 'timestamp'); + if ($type eq 'datetime' || $type eq 'date') { + my ($parse, $format) = ("parse_${type}", "format_${type}"); + $self->inflate_column( + $column => + { + inflate => sub { + my ($value, $obj) = @_; + $obj->_datetime_parser->$parse($value); + }, + deflate => sub { + my ($value, $obj) = @_; + $obj->_datetime_parser->$format($value); + }, + } + ); + } +} + +sub _datetime_parser { + my $self = shift; + if (my $parser = $self->__datetime_parser) { + return $parser; + } + my $parser = $self->result_source->storage->datetime_parser(@_); + return $self->__datetime_parser($parser); +} + +1; +__END__ + +=head1 SEE ALSO + +=over 4 + +=item More information about the add_columns method, and column metadata, + can be found in the documentation for L. + +=back + +=head1 AUTHOR + +Matt S. Trout + +=head1 CONTRIBUTORS + +Aran Deltac + +=head1 LICENSE + +You may distribute this code under the same terms as Perl itself. + diff --git a/lib/DBIx/Class/Manual/Component.pod b/lib/DBIx/Class/Manual/Component.pod index 2607e36..9bbe684 100644 --- a/lib/DBIx/Class/Manual/Component.pod +++ b/lib/DBIx/Class/Manual/Component.pod @@ -90,6 +90,8 @@ L - Build forms with multiple interconnected objects. L - Like FromForm but with DBIx::Class and HTML::Widget. +L - Modify the position of objects in an ordered list. + L - Retrieve automatically created primary keys upon insert. L - Display the amount of time it takes to run queries. diff --git a/lib/DBIx/Class/Manual/Cookbook.pod b/lib/DBIx/Class/Manual/Cookbook.pod index f0004b2..7e9a810 100644 --- a/lib/DBIx/Class/Manual/Cookbook.pod +++ b/lib/DBIx/Class/Manual/Cookbook.pod @@ -783,6 +783,66 @@ It is possible to get a Schema object from a row object like so, This can be useful when you don't want to pass around a Schema object to every method. +=head2 Profiling + +When you enable L's debugging it prints the SQL +executed as well as notifications of query completion and transaction +begin/commit. If you'd like to profile the SQL you can subclass the +L class and write your own profiling +mechanism: + + package My::Profiler; + use strict; + + use base 'DBIx::Class::Storage::Statistics'; + + use Time::HiRes qw(time); + + my $start; + + sub query_start { + my $self = shift(); + my $sql = shift(); + my $params = @_; + + print "Executing $sql: ".join(', ', @params)."\n"; + $start = time(); + } + + sub query_end { + my $self = shift(); + my $sql = shift(); + my @params = @_; + + printf("Execution took %0.4f seconds.\n", time() - $start); + $start = undef; + } + + 1; + +You can then install that class as the debugging object: + + __PACKAGE__->storage()->debugobj(new My::Profiler()); + __PACKAGE__->storage()->debug(1); + +A more complicated example might involve storing each execution of SQL in an +array: + + sub query_end { + my $self = shift(); + my $sql = shift(); + my @params = @_; + + my $elapsed = time() - $start; + push(@{ $calls{$sql} }, { + params => \@params, + elapsed => $elapsed + }); + } + +You could then create average, high and low execution times for an SQL +statement and dig down to see if certain parameters cause aberrant behavior. + =head2 Getting the value of the primary key for the last database insert AKA getting last_insert_id diff --git a/lib/DBIx/Class/Manual/FAQ.pod b/lib/DBIx/Class/Manual/FAQ.pod new file mode 100644 index 0000000..d7ff6cc --- /dev/null +++ b/lib/DBIx/Class/Manual/FAQ.pod @@ -0,0 +1,174 @@ +=head1 NAME + +DBIx::Class::Manual::FAQ - Frequently Asked Questions (in theory) + +=head1 DESCRIPTION + +This document is intended as an anti-map of the documentation. If you +know what you want to do, but not how to do it in L, then +look here. It does B contain any code or examples, it just gives +explanations and pointers to the correct pieces of documentation to +read. + +=head1 FAQs + +How Do I: + +=head2 Getting started + +=over 4 + +=item .. create a database to use? + +First, choose a database. For testing/experimenting, we reccommend +L, which is a self-contained small database. (i.e. all +you need to do is to install the DBD from CPAN, and it's usable). + +Next, spend some time defining which data you need to store, and how +it relates to the other data you have. For some help on normalisation, +go to L or +L. + +Now, decide whether you want to have the database itself be the +definitive source of information about the data layout, or your +DBIx::Class schema. If it's the former, look up the documentation for +your database, eg. L, on how +to create tables, and start creating them. For a nice universal +interface to your database, you can try L. If you decided +on the latter choice, read the FAQ on setting up your classes +manually, and the one on creating tables from your schema. + +=item .. use DBIx::Class with L? + +Install L from CPAN. See it's +documentation, or below, for further details. + +=item .. set up my DBIx::Class classes automatically from my database? + +Install L from CPAN, and read it's documentation. + +=item .. set up my DBIx::Class classes manually? + +Look at the L, come back here if you get lost. + +=item .. create my database tables from my DBIx::Class schema? + +Create your classes manually, as above. Write a script that calls +L. See there for details, or the +L. + +=back + +=head2 Relationships + +=over 4 + +=item .. tell DBIx::Class about relationships between my tables? + +There are a vareity of relationship types that come pre-defined for you to use. These are all listed in L. If you need a non-standard type, or more information, look in L. + +=item .. define a one-to-many relationship? + +This is called a C relationship on the one side, and a C relationship on the many side. Currently these need to be set up individually on each side. See L for details. + +=item .. define a relationship where this table contains another table's primary key? (foreign key) + +Create a C relationship for the field containing the foreign key. L. + +=item .. define a foreign key relationship where the key field may contain NULL? + +Just create a C relationship, as above. If +the column is NULL then the inflation to the foreign object will not +happen. This has a side effect of not always fetching all the relevant +data, if you use a nullable foreign-key relationship in a JOIN, then +you probably want to set the join_type to 'left'. + +=item .. define a relationship where the key consists of more than one column? + +Instead of supplying a single column name, all relationship types also +allow you to supply a hashref containing the condition across which +the tables are to be joined. The condition may contain as many fields +as you like. See L. + +=item .. define a relatiopnship across an intermediate table? (many-to-many) + +Read the documentation on L. + +=item .. stop DBIx::Class from attempting to cascade deletes on my has_many relationships? + +By default, DBIx::Class cascades deletes and updates across +C relationships. If your database already does this (and +probably better), turn it off by supplying C<< cascade_delete => 0 >> in +the relationship attributes. See L. + +=item .. use a relationship? + +Use it's name. An accessor is created using the name. See examples in L. + +=back + +=head2 Searching + +=over 4 + +=item .. search for data? + +=item .. search using database functions? + +=item .. sort the results of my search? + +=item .. group the results of my search? + +=item .. filter the results of my search? + +=item .. search in several tables simultaneously? + +=item .. find more help on constructing searches? + +Behind the scenes, DBIx::Class uses L to help construct +it's SQL searches. So if you fail to find help in the +L, try looking in the SQL::Abstract +documentation. + +=back + +=head2 Fetching data + +=over 4 + +=item .. fetch as much data as possible in as few select calls as possible? (prefetch) + +See the prefetch examples in the L. + +=back + +=over 4 + +=head2 Inserting and updating data + +=over 4 + +=item .. insert many rows of data efficiently? + +=item .. update a collection of rows at the same time? + +=item .. use database functions when updating rows? + +=item .. update a column using data from another column? + +=back + +=head2 Misc + +=over 4 + +=item How do I store my own (non-db) data in my DBIx::Class objects? + +=item How do I use DBIx::Class objects my TT templates? + +=item See the SQL statements my code is producing? + +=item Why didn't my search run any SQL? + + +=back diff --git a/lib/DBIx/Class/Ordered.pm b/lib/DBIx/Class/Ordered.pm new file mode 100644 index 0000000..8e2c74d --- /dev/null +++ b/lib/DBIx/Class/Ordered.pm @@ -0,0 +1,393 @@ +# vim: ts=8:sw=4:sts=4:et +package DBIx::Class::Ordered; +use strict; +use warnings; +use base qw( DBIx::Class ); + +=head1 NAME + +DBIx::Class::Ordered - Modify the position of objects in an ordered list. + +=head1 SYNOPSIS + +Create a table for your ordered data. + + CREATE TABLE items ( + item_id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL, + position INTEGER NOT NULL + ); + # Optional: group_id INTEGER NOT NULL + +In your Schema or DB class add Ordered to the top +of the component list. + + __PACKAGE__->load_components(qw( Ordered ... )); + +Specify the column that stores the position number for +each row. + + package My::Item; + __PACKAGE__->position_column('position'); + __PACKAGE__->grouping_column('group_id'); # optional + +Thats it, now you can change the position of your objects. + + #!/use/bin/perl + use My::Item; + + my $item = My::Item->create({ name=>'Matt S. Trout' }); + # If using grouping_column: + my $item = My::Item->create({ name=>'Matt S. Trout', group_id=>1 }); + + my $rs = $item->siblings(); + my @siblings = $item->siblings(); + + my $sibling; + $sibling = $item->first_sibling(); + $sibling = $item->last_sibling(); + $sibling = $item->previous_sibling(); + $sibling = $item->next_sibling(); + + $item->move_previous(); + $item->move_next(); + $item->move_first(); + $item->move_last(); + $item->move_to( $position ); + +=head1 DESCRIPTION + +This module provides a simple interface for modifying the ordered +position of DBIx::Class objects. + +=head1 AUTO UPDATE + +All of the move_* methods automatically update the rows involved in +the query. This is not configurable and is due to the fact that if you +move a record it always causes other records in the list to be updated. + +=head1 METHODS + +=head2 position_column + + __PACKAGE__->position_column('position'); + +Sets and retrieves the name of the column that stores the +positional value of each record. Default to "position". + +=cut + +__PACKAGE__->mk_classdata( 'position_column' => 'position' ); + +=head2 grouping_column + + __PACKAGE__->grouping_column('group_id'); + +This method specified a column to limit all queries in +this module by. This effectively allows you to have multiple +ordered lists within the same table. + +=cut + +__PACKAGE__->mk_classdata( 'grouping_column' ); + +=head2 siblings + + my $rs = $item->siblings(); + my @siblings = $item->siblings(); + +Returns either a result set or an array of all other objects +excluding the one you called it on. + +=cut + +sub siblings { + my( $self ) = @_; + my $position_column = $self->position_column; + my $rs = $self->result_source->resultset->search( + { + $position_column => { '!=' => $self->get_column($position_column) }, + $self->_grouping_clause(), + }, + { order_by => $self->position_column }, + ); + return $rs->all() if (wantarray()); + return $rs; +} + +=head2 first_sibling + + my $sibling = $item->first_sibling(); + +Returns the first sibling object, or 0 if the first sibling +is this sibliing. + +=cut + +sub first_sibling { + my( $self ) = @_; + return 0 if ($self->get_column($self->position_column())==1); + return ($self->result_source->resultset->search( + { + $self->position_column => 1, + $self->_grouping_clause(), + }, + )->all())[0]; +} + +=head2 last_sibling + + my $sibling = $item->last_sibling(); + +Return the last sibling, or 0 if the last sibling is this +sibling. + +=cut + +sub last_sibling { + my( $self ) = @_; + my $count = $self->result_source->resultset->search({$self->_grouping_clause()})->count(); + return 0 if ($self->get_column($self->position_column())==$count); + return ($self->result_source->resultset->search( + { + $self->position_column => $count, + $self->_grouping_clause(), + }, + )->all())[0]; +} + +=head2 previous_sibling + + my $sibling = $item->previous_sibling(); + +Returns the sibling that resides one position back. Undef +is returned if the current object is the first one. + +=cut + +sub previous_sibling { + my( $self ) = @_; + my $position_column = $self->position_column; + my $position = $self->get_column( $position_column ); + return 0 if ($position==1); + return ($self->result_source->resultset->search( + { + $position_column => $position - 1, + $self->_grouping_clause(), + } + )->all())[0]; +} + +=head2 next_sibling + + my $sibling = $item->next_sibling(); + +Returns the sibling that resides one position foward. Undef +is returned if the current object is the last one. + +=cut + +sub next_sibling { + my( $self ) = @_; + my $position_column = $self->position_column; + my $position = $self->get_column( $position_column ); + my $count = $self->result_source->resultset->search({$self->_grouping_clause()})->count(); + return 0 if ($position==$count); + return ($self->result_source->resultset->search( + { + $position_column => $position + 1, + $self->_grouping_clause(), + }, + )->all())[0]; +} + +=head2 move_previous + + $item->move_previous(); + +Swaps position with the sibling on position previous in the list. +1 is returned on success, and 0 is returned if the objects is already +the first one. + +=cut + +sub move_previous { + my( $self ) = @_; + my $position = $self->get_column( $self->position_column() ); + return $self->move_to( $position - 1 ); +} + +=head2 move_next + + $item->move_next(); + +Swaps position with the sibling in the next position. 1 is returned on +success, and 0 is returned if the object is already the last in the list. + +=cut + +sub move_next { + my( $self ) = @_; + my $position = $self->get_column( $self->position_column() ); + my $count = $self->result_source->resultset->search({$self->_grouping_clause()})->count(); + return 0 if ($position==$count); + return $self->move_to( $position + 1 ); +} + +=head2 move_first + + $item->move_first(); + +Moves the object to the first position. 1 is returned on +success, and 0 is returned if the object is already the first. + +=cut + +sub move_first { + my( $self ) = @_; + return $self->move_to( 1 ); +} + +=head2 move_last + + $item->move_last(); + +Moves the object to the very last position. 1 is returned on +success, and 0 is returned if the object is already the last one. + +=cut + +sub move_last { + my( $self ) = @_; + my $count = $self->result_source->resultset->search({$self->_grouping_clause()})->count(); + return $self->move_to( $count ); +} + +=head2 move_to + + $item->move_to( $position ); + +Moves the object to the specified position. 1 is returned on +success, and 0 is returned if the object is already at the +specified position. + +=cut + +sub move_to { + my( $self, $to_position ) = @_; + my $position_column = $self->position_column; + my $from_position = $self->get_column( $position_column ); + return 0 if ( $to_position < 1 ); + return 0 if ( $from_position==$to_position ); + my @between = ( + ( $from_position < $to_position ) + ? ( $from_position+1, $to_position ) + : ( $to_position, $from_position-1 ) + ); + my $rs = $self->result_source->resultset->search({ + $position_column => { -between => [ @between ] }, + $self->_grouping_clause(), + }); + my $op = ($from_position>$to_position) ? '+' : '-'; + $rs->update({ $position_column => \"$position_column $op 1" }); + $self->update({ $position_column => $to_position }); + return 1; +} + +=head2 insert + +Overrides the DBIC insert() method by providing a default +position number. The default will be the number of rows in +the table +1, thus positioning the new record at the last position. + +=cut + +sub insert { + my $self = shift; + my $position_column = $self->position_column; + $self->set_column( $position_column => $self->result_source->resultset->search( {$self->_grouping_clause()} )->count()+1 ) + if (!$self->get_column($position_column)); + return $self->next::method( @_ ); +} + +=head2 delete + +Overrides the DBIC delete() method by first moving the object +to the last position, then deleting it, thus ensuring the +integrity of the positions. + +=cut + +sub delete { + my $self = shift; + $self->move_last; + return $self->next::method( @_ ); +} + +=head1 PRIVATE METHODS + +These methods are used internally. You should never have the +need to use them. + +=head2 _grouping_clause + +This method returns a name=>value pare for limiting a search +by the collection column. If the collection column is not +defined then this will return an empty list. + +=cut + +sub _grouping_clause { + my( $self ) = @_; + my $col = $self->grouping_column(); + if ($col) { + return ( $col => $self->get_column($col) ); + } + return (); +} + +1; +__END__ + +=head1 BUGS + +=head2 Unique Constraints + +Unique indexes and constraints on the position column are not +supported at this time. It would be make sense to support them, +but there are some unexpected database issues that make this +hard to do. The main problem from the author's view is that +SQLite (the DB engine that we use for testing) does not support +ORDER BY on updates. + +=head2 Race Condition on Insert + +If a position is not specified for an insert than a position +will be chosen based on COUNT(*)+1. But, it first selects the +count then inserts the record. The space of time between select +and insert introduces a race condition. To fix this we need the +ability to lock tables in DBIC. I've added an entry in the TODO +about this. + +=head2 Multiple Moves + +Be careful when issueing move_* methods to multiple objects. If +you've pre-loaded the objects then when you move one of the objects +the position of the other object will not reflect their new value +until you reload them from the database. + +There are times when you will want to move objects as groups, such +as changeing the parent of several objects at once - this directly +conflicts with this problem. One solution is for us to write a +ResultSet class that supports a parent() method, for example. Another +solution is to somehow automagically modify the objects that exist +in the current object's result set to have the new position value. + +=head1 AUTHOR + +Aran Deltac + +=head1 LICENSE + +You may distribute this code under the same terms as Perl itself. + diff --git a/lib/DBIx/Class/PK.pm b/lib/DBIx/Class/PK.pm index d093d93..9895edb 100644 --- a/lib/DBIx/Class/PK.pm +++ b/lib/DBIx/Class/PK.pm @@ -91,6 +91,16 @@ sub _create_ID { map { $_ . '=' . $vals{$_} } sort keys %vals; } +=head2 ident_condition + + my $cond = $result_source->ident_condition(); + + my $cond = $result_source->ident_condition('alias'); + +Produces a condition hash to locate a row based on the primary key(s). + +=cut + sub ident_condition { my ($self, $alias) = @_; my %cond; diff --git a/lib/DBIx/Class/Relationship.pm b/lib/DBIx/Class/Relationship.pm index 6048fd0..3a23108 100644 --- a/lib/DBIx/Class/Relationship.pm +++ b/lib/DBIx/Class/Relationship.pm @@ -118,6 +118,19 @@ instead of a join condition hash, that is used as the name of the column holding the foreign key. If $cond is not given, the relname is used as the column name. +If the relationship is optional - ie the column containing the foreign +key can be NULL - then the belongs_to relationship does the right +thing - so in the example above C<$obj->author> would return C. +However in this case you would probably want to set the C +attribute so that a C is done, which makes complex +resultsets involving C or C operations work correctly. +The modified declaration is shown below:- + + # in a Book class (where Author has many Books) + __PACKAGE__->belongs_to(author => 'My::DBIC::Schema::Author', + 'author', {join_type => 'left'}); + + Cascading deletes are off per default on a C relationship, to turn them on, pass C<< cascade_delete => 1 >> in the $attr hashref. @@ -134,6 +147,8 @@ of C. { prefetch => [qw/book/], }); my @book_objs = $obj->books; + my $books_rs = $obj->books; + ( $books_rs ) = $obj->books_rs; $obj->add_to_books(\%col_data); @@ -142,9 +157,14 @@ foreign class store the calling class's primary key in one (or more) of its columns. You should pass the name of the column in the foreign class as the $cond argument, or specify a complete join condition. -As well as the accessor method, a method named C<< add_to_ >> -will also be added to your Row items, this allows you to insert new -related items, using the same mechanism as in L. +Three methods are created when you create a has_many relationship. The first +method is the expected accessor method. The second is almost exactly the same +as the accessor method but "_rs" is added to the end of the method name. This +method works just like the normal accessor, except that it returns a resultset +no matter what, even in list context. The third method, named +C<< add_to_ >>, will also be added to your Row items, this allows +you to insert new related items, using the same mechanism as in +L. If you delete an object in a class with a C relationship, all the related objects will be deleted as well. However, any database-level @@ -180,6 +200,12 @@ left join. =head2 many_to_many +=over 4 + +=item Arguments: $accessor_name, $link_rel_name, $foreign_rel_name + +=back + My::DBIC::Schema::Actor->has_many( actor_roles => 'My::DBIC::Schema::ActorRoles', 'actor' ); @@ -191,13 +217,10 @@ left join. My::DBIC::Schema::Actor->many_to_many( roles => 'actor_roles', 'role' ); - ... - - my @role_objs = $actor->roles; +Creates a accessors bridging two relationships; not strictly a relationship in +its own right, although the accessor will return a resultset or collection of +objects just as a has_many would. -Creates an accessor bridging two relationships; not strictly a relationship -in its own right, although the accessor will return a resultset or collection -of objects just as a has_many would. To use many_to_many, existing relationships from the original table to the link table, and from the link table to the end table must already exist, these relation names are then used in the many_to_many call. diff --git a/lib/DBIx/Class/Relationship/Accessor.pm b/lib/DBIx/Class/Relationship/Accessor.pm index 035661a..b20eb16 100644 --- a/lib/DBIx/Class/Relationship/Accessor.pm +++ b/lib/DBIx/Class/Relationship/Accessor.pm @@ -48,6 +48,7 @@ sub add_relationship_accessor { ); } elsif ($acc_type eq 'multi') { $meth{$rel} = sub { shift->search_related($rel, @_) }; + $meth{"${rel}_rs"} = sub { shift->search_related_rs($rel, @_) }; $meth{"add_to_${rel}"} = sub { shift->create_related($rel, @_); }; } else { $class->throw_exception("No such relationship accessor type $acc_type"); diff --git a/lib/DBIx/Class/Relationship/Base.pm b/lib/DBIx/Class/Relationship/Base.pm index 05f4c52..3e74d54 100644 --- a/lib/DBIx/Class/Relationship/Base.pm +++ b/lib/DBIx/Class/Relationship/Base.pm @@ -3,6 +3,7 @@ package DBIx::Class::Relationship::Base; use strict; use warnings; +use Scalar::Util (); use base qw/DBIx::Class/; =head1 NAME @@ -175,7 +176,8 @@ sub related_resultset { =head2 search_related - $rs->search_related('relname', $cond, $attrs); + @objects = $rs->search_related('relname', $cond, $attrs); + $objects_rs = $rs->search_related('relname', $cond, $attrs); Run a search on a related resultset. The search will be restricted to the item or items represented by the L it was called @@ -187,6 +189,19 @@ sub search_related { return shift->related_resultset(shift)->search(@_); } +=head2 search_related_rs + + ( $objects_rs ) = $rs->search_related_rs('relname', $cond, $attrs); + +This method works exactly the same as search_related, except that +it garauntees a restultset, even in list context. + +=cut + +sub search_related_rs { + return shift->related_resultset(shift)->search_rs(@_); +} + =head2 count_related $obj->count_related('relname', $cond, $attrs); @@ -208,9 +223,10 @@ sub count_related { my $new_obj = $obj->new_related('relname', \%col_data); Create a new item of the related foreign class. If called on a -L object, it will magically set any -primary key values into foreign key columns for you. The newly created item -will not be saved into your storage until you call L +L object, it will magically +set any foreign key columns of the new object to the related primary +key columns of the source object for you. The newly created item will +not be saved into your storage until you call L on it. =cut @@ -253,12 +269,27 @@ sub find_related { return $self->search_related($rel)->find(@_); } +=head2 find_or_new_related + + my $new_obj = $obj->find_or_new_related('relname', \%col_data); + +Find an item of a related class. If none exists, instantiate a new item of the +related class. The object will not be saved into your storage until you call +L on it. + +=cut + +sub find_or_new_related { + my $self = shift; + return $self->find_related(@_) || $self->new_related(@_); +} + =head2 find_or_create_related my $new_obj = $obj->find_or_create_related('relname', \%col_data); Find or create an item of a related class. See -L for details. +L for details. =cut @@ -268,6 +299,21 @@ sub find_or_create_related { return (defined($obj) ? $obj : $self->create_related(@_)); } +=head2 update_or_create_related + + my $updated_item = $obj->update_or_create_related('relname', \%col_data, \%attrs?); + +Update or create an item of a related class. See +L for details. + +=cut + +sub update_or_create_related { + my $self = shift; + my $rel = shift; + return $self->related_resultset($rel)->update_or_create(@_); +} + =head2 set_from_related $book->set_from_related('author', $author_obj); @@ -295,7 +341,7 @@ sub set_from_related { if (defined $f_obj) { my $f_class = $self->result_source->schema->class($rel_obj->{class}); $self->throw_exception( "Object $f_obj isn't a ".$f_class ) - unless $f_obj->isa($f_class); + unless Scalar::Util::blessed($f_obj) and $f_obj->isa($f_class); } $self->set_columns( $self->result_source->resolve_condition( @@ -333,7 +379,74 @@ sub delete_related { return $obj; } -1; +=head2 add_to_$rel + +B, C and 'multi' type +relationships.> + +=over 4 + +=item Arguments: ($foreign_vals | $obj), $link_vals? + +=back + + my $role = $schema->resultset('Role')->find(1); + $actor->add_to_roles($role); + # creates a My::DBIC::Schema::ActorRoles linking table row object + + $actor->add_to_roles({ name => 'lead' }, { salary => 15_000_000 }); + # creates a new My::DBIC::Schema::Role row object and the linking table + # object with an extra column in the link + +Adds a linking table object for C<$obj> or C<$foreign_vals>. If the first +argument is a hash reference, the related object is created first with the +column values in the hash. If an object reference is given, just the linking +table object is created. In either case, any additional column values for the +linking table object can be specified in C<$link_vals>. + +=head2 set_$rel + +B relationships.> + +=over 4 + +=item Arguments: (@hashrefs | @objs) + +=back + + my $actor = $schema->resultset('Actor')->find(1); + my @roles = $schema->resultset('Role')->search({ role => + { '-in' -> ['Fred', 'Barney'] } } ); + + $actor->set_roles(@roles); + # Replaces all of $actors previous roles with the two named + +Replace all the related objects with the given list of objects. This does a +C B to remove the association between the +current object and all related objects, then calls C repeatedly to +link all the new objects. + +Note that this means that this method will B delete any objects in the +table on the right side of the relation, merely that it will delete the link +between them. + +=head2 remove_from_$rel + +B relationships.> + +=over 4 + +=item Arguments: $obj + +=back + + my $role = $schema->resultset('Role')->find(1); + $actor->remove_from_roles($role); + # removes $role's My::DBIC::Schema::ActorRoles linking table row object + +Removes the link between the current object and the related object. Note that +the related object itself won't be deleted unless you call ->delete() on +it. This method just removes the link between the two objects. =head1 AUTHORS @@ -345,3 +458,4 @@ You may distribute this code under the same terms as Perl itself. =cut +1; diff --git a/lib/DBIx/Class/Relationship/BelongsTo.pm b/lib/DBIx/Class/Relationship/BelongsTo.pm index 535fa75..b871266 100644 --- a/lib/DBIx/Class/Relationship/BelongsTo.pm +++ b/lib/DBIx/Class/Relationship/BelongsTo.pm @@ -1,30 +1,38 @@ -package DBIx::Class::Relationship::BelongsTo; +package # hide from PAUSE + DBIx::Class::Relationship::BelongsTo; + +# Documentation for these methods can be found in +# DBIx::Class::Relationship use strict; use warnings; sub belongs_to { my ($class, $rel, $f_class, $cond, $attrs) = @_; - eval "require $f_class"; - if ($@) { - $class->throw_exception($@) unless $@ =~ /Can't locate/; - } - # no join condition or just a column name if (!ref $cond) { + $class->ensure_class_loaded($f_class); my %f_primaries = map { $_ => 1 } eval { $f_class->primary_columns }; - $class->throw_exception("Can't infer join condition for ${rel} on ${class}; unable to load ${f_class}") - if $@; + $class->throw_exception( + "Can't infer join condition for ${rel} on ${class}; ". + "unable to load ${f_class}: $@" + ) if $@; my ($pri, $too_many) = keys %f_primaries; - $class->throw_exception("Can't infer join condition for ${rel} on ${class}; ${f_class} has no primary keys") - unless defined $pri; - $class->throw_exception("Can't infer join condition for ${rel} on ${class}; ${f_class} has multiple primary keys") - if $too_many; + $class->throw_exception( + "Can't infer join condition for ${rel} on ${class}; ". + "${f_class} has no primary keys" + ) unless defined $pri; + $class->throw_exception( + "Can't infer join condition for ${rel} on ${class}; ". + "${f_class} has multiple primary keys" + ) if $too_many; my $fk = defined $cond ? $cond : $rel; - $class->throw_exception("Can't infer join condition for ${rel} on ${class}; $fk is not a column") - unless $class->has_column($fk); + $class->throw_exception( + "Can't infer join condition for ${rel} on ${class}; ". + "$fk is not a column of $class" + ) unless $class->has_column($fk); my $acc_type = $class->has_column($rel) ? 'filter' : 'single'; $class->add_relationship($rel, $f_class, @@ -42,14 +50,19 @@ sub belongs_to { } $cond_rel->{"foreign.$_"} = "self.".$cond->{$_}; } - my $acc_type = (keys %$cond_rel == 1 and $class->has_column($rel)) ? 'filter' : 'single'; + my $acc_type = (keys %$cond_rel == 1 and $class->has_column($rel)) + ? 'filter' + : 'single'; $class->add_relationship($rel, $f_class, $cond_rel, { accessor => $acc_type, %{$attrs || {}} } ); } else { - $class->throw_exception('third argument for belongs_to must be undef, a column name, or a join condition'); + $class->throw_exception( + 'third argument for belongs_to must be undef, a column name, '. + 'or a join condition' + ); } return 1; } diff --git a/lib/DBIx/Class/Relationship/HasMany.pm b/lib/DBIx/Class/Relationship/HasMany.pm index a709d6a..2c9a3bb 100644 --- a/lib/DBIx/Class/Relationship/HasMany.pm +++ b/lib/DBIx/Class/Relationship/HasMany.pm @@ -6,16 +6,15 @@ use warnings; sub has_many { my ($class, $rel, $f_class, $cond, $attrs) = @_; - - eval "require $f_class"; - if ($@) { - $class->throw_exception($@) unless $@ =~ /Can't locate/; - } unless (ref $cond) { + $class->ensure_class_loaded($f_class); my ($pri, $too_many) = $class->primary_columns; - $class->throw_exception( "has_many can only infer join for a single primary key; ${class} has more" ) - if $too_many; + + $class->throw_exception( + "has_many can only infer join for a single primary key; ". + "${class} has more" + ) if $too_many; my ($f_key,$guess); if (defined $cond && length $cond) { @@ -28,18 +27,20 @@ sub has_many { } my $f_class_loaded = eval { $f_class->columns }; - $class->throw_exception("No such column ${f_key} on foreign class ${f_class} ($guess)") - if $f_class_loaded && !$f_class->has_column($f_key); + $class->throw_exception( + "No such column ${f_key} on foreign class ${f_class} ($guess)" + ) if $f_class_loaded && !$f_class->has_column($f_key); $cond = { "foreign.${f_key}" => "self.${pri}" }; } - $class->add_relationship($rel, $f_class, $cond, - { accessor => 'multi', - join_type => 'LEFT', - cascade_delete => 1, - cascade_copy => 1, - %{$attrs||{}} } ); + $class->add_relationship($rel, $f_class, $cond, { + accessor => 'multi', + join_type => 'LEFT', + cascade_delete => 1, + cascade_copy => 1, + %{$attrs||{}} + }); } 1; diff --git a/lib/DBIx/Class/Relationship/HasOne.pm b/lib/DBIx/Class/Relationship/HasOne.pm index 4efbec0..568078c 100644 --- a/lib/DBIx/Class/Relationship/HasOne.pm +++ b/lib/DBIx/Class/Relationship/HasOne.pm @@ -14,15 +14,13 @@ sub has_one { sub _has_one { my ($class, $join_type, $rel, $f_class, $cond, $attrs) = @_; - eval "require $f_class"; - if ($@) { - $class->throw_exception($@) unless $@ =~ /Can't locate/; - } - unless (ref $cond) { + $class->ensure_class_loaded($f_class); my ($pri, $too_many) = $class->primary_columns; - $class->throw_exception( "might_have/has_one can only infer join for a single primary key; ${class} has more" ) - if $too_many; + $class->throw_exception( + "might_have/has_one can only infer join for a single primary key; ". + "${class} has more" + ) if $too_many; my $f_class_loaded = eval { $f_class->columns }; my ($f_key,$guess); if (defined $cond && length $cond) { @@ -33,12 +31,15 @@ sub _has_one { $guess = "using given relationship '$rel' for foreign key"; } else { ($f_key, $too_many) = $f_class->primary_columns; - $class->throw_exception( "might_have/has_one can only infer join for a single primary key; ${f_class} has more" ) - if $too_many; + $class->throw_exception( + "might_have/has_one can only infer join for a single primary key; ". + "${f_class} has more" + ) if $too_many; $guess = "using primary key of foreign class for foreign key"; } - $class->throw_exception("No such column ${f_key} on foreign class ${f_class} ($guess)") - if $f_class_loaded && !$f_class->has_column($f_key); + $class->throw_exception( + "No such column ${f_key} on foreign class ${f_class} ($guess)" + ) if $f_class_loaded && !$f_class->has_column($f_key); $cond = { "foreign.${f_key}" => "self.${pri}" }; } $class->add_relationship($rel, $f_class, diff --git a/lib/DBIx/Class/Relationship/ManyToMany.pm b/lib/DBIx/Class/Relationship/ManyToMany.pm index 387fc0b..23b971e 100644 --- a/lib/DBIx/Class/Relationship/ManyToMany.pm +++ b/lib/DBIx/Class/Relationship/ManyToMany.pm @@ -9,11 +9,62 @@ sub many_to_many { { no strict 'refs'; no warnings 'redefine'; + + my $add_meth = "add_to_${meth}"; + my $remove_meth = "remove_from_${meth}"; + my $set_meth = "set_${meth}"; + *{"${class}::${meth}"} = sub { my $self = shift; my $attrs = @_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {}; - $self->search_related($rel)->search_related($f_rel, @_ > 0 ? @_ : undef, { %{$rel_attrs||{}}, %$attrs }); + $self->search_related($rel)->search_related( + $f_rel, @_ > 0 ? @_ : undef, { %{$rel_attrs||{}}, %$attrs } + ); + }; + + *{"${class}::${add_meth}"} = sub { + my $self = shift; + @_ > 0 or $self->throw_exception( + "${add_meth} needs an object or hashref" + ); + my $source = $self->result_source; + my $schema = $source->schema; + my $rel_source_name = $source->relationship_info($rel)->{source}; + my $rel_source = $schema->resultset($rel_source_name)->result_source; + my $f_rel_source_name = $rel_source->relationship_info($f_rel)->{source}; + my $f_rel_rs = $schema->resultset($f_rel_source_name); + my $obj = ref $_[0] + ? ( ref $_[0] eq 'HASH' ? $f_rel_rs->create($_[0]) : $_[0] ) + : ( $f_rel_rs->create({@_}) ); + my $link_vals = @_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {}; + my $link = $self->search_related($rel)->new_result({}); + $link->set_from_related($f_rel, $obj); + $link->set_columns($link_vals); + $link->insert(); + }; + + *{"${class}::${set_meth}"} = sub { + my $self = shift; + @_ > 0 or $self->throw_exception( + "{$set_meth} needs a list of objects or hashrefs" + ); + $self->search_related($rel, {})->delete; + $self->$add_meth(shift) while (defined $_[0]); + }; + + *{"${class}::${remove_meth}"} = sub { + my $self = shift; + @_ > 0 && ref $_[0] ne 'HASH' + or $self->throw_exception("${remove_meth} needs an object"); + my $obj = shift; + my $rel_source = $self->search_related($rel)->result_source; + my $cond = $rel_source->relationship_info($f_rel)->{cond}; + my $link_cond = $rel_source->resolve_condition( + $cond, $obj, $f_rel + ); + $self->search_related($rel, $link_cond)->delete; }; + } } diff --git a/lib/DBIx/Class/ResultSet.pm b/lib/DBIx/Class/ResultSet.pm index 21fc256..4132d5f 100644 --- a/lib/DBIx/Class/ResultSet.pm +++ b/lib/DBIx/Class/ResultSet.pm @@ -6,10 +6,10 @@ use overload '0+' => \&count, 'bool' => sub { 1; }, fallback => 1; +use Carp::Clan qw/^DBIx::Class/; use Data::Page; use Storable; -use Scalar::Util qw/weaken/; - +use DBIx::Class::ResultSetColumn; use base qw/DBIx::Class/; __PACKAGE__->load_components(qw/AccessorGroup/); __PACKAGE__->mk_group_accessors('simple' => qw/result_source result_class/); @@ -82,71 +82,9 @@ will return a CD object, not a ResultSet. sub new { my $class = shift; return $class->new_result(@_) if ref $class; - - my ($source, $attrs) = @_; - weaken $source; - $attrs = Storable::dclone($attrs || {}); # { %{ $attrs || {} } }; - #use Data::Dumper; warn Dumper($attrs); - my $alias = ($attrs->{alias} ||= 'me'); - - $attrs->{columns} ||= delete $attrs->{cols} if $attrs->{cols}; - delete $attrs->{as} if $attrs->{columns}; - $attrs->{columns} ||= [ $source->columns ] unless $attrs->{select}; - $attrs->{select} = [ - map { m/\./ ? $_ : "${alias}.$_" } @{delete $attrs->{columns}} - ] if $attrs->{columns}; - $attrs->{as} ||= [ - map { m/^\Q$alias.\E(.+)$/ ? $1 : $_ } @{$attrs->{select}} - ]; - if (my $include = delete $attrs->{include_columns}) { - push(@{$attrs->{select}}, @$include); - push(@{$attrs->{as}}, map { m/([^.]+)$/; $1; } @$include); - } - #use Data::Dumper; warn Dumper(@{$attrs}{qw/select as/}); - - $attrs->{from} ||= [ { $alias => $source->from } ]; - $attrs->{seen_join} ||= {}; - my %seen; - if (my $join = delete $attrs->{join}) { - foreach my $j (ref $join eq 'ARRAY' ? @$join : ($join)) { - if (ref $j eq 'HASH') { - $seen{$_} = 1 foreach keys %$j; - } else { - $seen{$j} = 1; - } - } - push(@{$attrs->{from}}, $source->resolve_join( - $join, $attrs->{alias}, $attrs->{seen_join}) - ); - } - - $attrs->{group_by} ||= $attrs->{select} if delete $attrs->{distinct}; - $attrs->{order_by} = [ $attrs->{order_by} ] if - $attrs->{order_by} and !ref($attrs->{order_by}); - $attrs->{order_by} ||= []; - my $collapse = $attrs->{collapse} || {}; - if (my $prefetch = delete $attrs->{prefetch}) { - my @pre_order; - foreach my $p (ref $prefetch eq 'ARRAY' ? @$prefetch : ($prefetch)) { - if ( ref $p eq 'HASH' ) { - foreach my $key (keys %$p) { - push(@{$attrs->{from}}, $source->resolve_join($p, $attrs->{alias})) - unless $seen{$key}; - } - } else { - push(@{$attrs->{from}}, $source->resolve_join($p, $attrs->{alias})) - unless $seen{$p}; - } - my @prefetch = $source->resolve_prefetch( - $p, $attrs->{alias}, {}, \@pre_order, $collapse); - push(@{$attrs->{select}}, map { $_->[0] } @prefetch); - push(@{$attrs->{as}}, map { $_->[1] } @prefetch); - } - push(@{$attrs->{order_by}}, @pre_order); - } - $attrs->{collapse} = $collapse; -# use Data::Dumper; warn Dumper($collapse) if keys %{$collapse}; + my ($source, $attrs) = @_; + #weaken $source; if ($attrs->{page}) { $attrs->{rows} ||= 10; @@ -154,14 +92,16 @@ sub new { $attrs->{offset} += ($attrs->{rows} * ($attrs->{page} - 1)); } + $attrs->{alias} ||= 'me'; + $attrs->{_orig_alias} ||= $attrs->{alias}; + bless { result_source => $source, result_class => $attrs->{result_class} || $source->result_class, cond => $attrs->{where}, - from => $attrs->{from}, - collapse => $collapse, +# from => $attrs->{from}, +# collapse => $collapse, count => undef, - page => delete $attrs->{page}, pager => undef, attrs => $attrs }, $class; @@ -195,45 +135,114 @@ call it as C. sub search { my $self = shift; - - my $attrs = { %{$self->{attrs}} }; - my $having = delete $attrs->{having}; - $attrs = { %$attrs, %{ pop(@_) } } if @_ > 1 and ref $_[$#_] eq 'HASH'; + my $rs = $self->search_rs( @_ ); + return (wantarray ? $rs->all : $rs); +} + +=head2 search_rs + +=over 4 + +=item Arguments: $cond, \%attrs? + +=item Return Value: $resultset + +=back + +This method does the same exact thing as search() except it will +always return a resultset, even in list context. + +=cut + +sub search_rs { + my $self = shift; + + my $attrs = {}; + $attrs = pop(@_) if @_ > 1 and ref $_[$#_] eq 'HASH'; + my $our_attrs = exists $attrs->{_parent_attrs} + ? { %{delete $attrs->{_parent_attrs}} } + : { %{$self->{attrs}} }; + my $having = delete $our_attrs->{having}; + + # XXX should only maintain _live_join_stack and generate _live_join_h from that + if ($attrs->{_live_join_stack}) { + foreach my $join (reverse @{$attrs->{_live_join_stack}}) { + $attrs->{_live_join_h} = defined $attrs->{_live_join_h} + ? { $join => $attrs->{_live_join_h} } + : $join; + } + } + # merge new attrs into inherited + foreach my $key (qw/join prefetch/) { + next unless exists $attrs->{$key}; + if (my $live_join = $attrs->{_live_join_stack} || $our_attrs->{_live_join_stack}) { + foreach my $join (reverse @{$live_join}) { + $attrs->{$key} = { $join => $attrs->{$key} }; + } + } + + $our_attrs->{$key} = $self->_merge_attr($our_attrs->{$key}, delete $attrs->{$key}); + } + + $our_attrs->{join} = $self->_merge_attr( + $our_attrs->{join}, $attrs->{_live_join_h} + ) if ($attrs->{_live_join_h}); + + if (defined $our_attrs->{prefetch}) { + $our_attrs->{join} = $self->_merge_attr( + $our_attrs->{join}, $our_attrs->{prefetch} + ); + } + + my $new_attrs = { %{$our_attrs}, %{$attrs} }; my $where = (@_ - ? ((@_ == 1 || ref $_[0] eq "HASH") - ? shift - : ((@_ % 2) - ? $self->throw_exception( - "Odd number of arguments to search") - : {@_})) - : undef()); + ? ( + (@_ == 1 || ref $_[0] eq "HASH") + ? shift + : ( + (@_ % 2) + ? $self->throw_exception("Odd number of arguments to search") + : {@_} + ) + ) + : undef + ); + if (defined $where) { - $attrs->{where} = (defined $attrs->{where} - ? { '-and' => - [ map { ref $_ eq 'ARRAY' ? [ -or => $_ ] : $_ } - $where, $attrs->{where} ] } - : $where); + $new_attrs->{where} = ( + defined $new_attrs->{where} + ? { '-and' => [ + map { + ref $_ eq 'ARRAY' ? [ -or => $_ ] : $_ + } $where, $new_attrs->{where} + ] + } + : $where); } if (defined $having) { - $attrs->{having} = (defined $attrs->{having} - ? { '-and' => - [ map { ref $_ eq 'ARRAY' ? [ -or => $_ ] : $_ } - $having, $attrs->{having} ] } - : $having); + $new_attrs->{having} = ( + defined $new_attrs->{having} + ? { '-and' => [ + map { + ref $_ eq 'ARRAY' ? [ -or => $_ ] : $_ + } $having, $new_attrs->{having} + ] + } + : $having); } - my $rs = (ref $self)->new($self->result_source, $attrs); + my $rs = (ref $self)->new($self->result_source, $new_attrs); + $rs->{_parent_source} = $self->{_parent_source} if $self->{_parent_source}; - unless (@_) { # no search, effectively just a clone + unless (@_) { # no search, effectively just a clone my $rows = $self->get_cache; if ($rows) { $rs->set_cache($rows); } } - - return (wantarray ? $rs->all : $rs); + return $rs; } =head2 search_literal @@ -271,71 +280,133 @@ sub search_literal { =back -Finds a row based on its primary key or unique constraint. For example: +Finds a row based on its primary key or unique constraint. For example, to find +a row by its primary key: my $cd = $schema->resultset('CD')->find(5); -Also takes an optional C attribute, to search by a specific key or unique -constraint. For example: +You can also find a row by a specific unique constraint using the C +attribute. For example: + + my $cd = $schema->resultset('CD')->find('Massive Attack', 'Mezzanine', { + key => 'cd_artist_title' + }); + +Additionally, you can specify the columns explicitly by name: my $cd = $schema->resultset('CD')->find( { artist => 'Massive Attack', title => 'Mezzanine', }, - { key => 'artist_title' } + { key => 'cd_artist_title' } ); -See also L and L. +If the C is specified as C, it searches only on the primary key. + +If no C is specified, it searches on all unique constraints defined on the +source, including the primary key. + +See also L and L. For information on how to +declare unique constraints, see +L. =cut sub find { - my ($self, @vals) = @_; - my $attrs = (@vals > 1 && ref $vals[$#vals] eq 'HASH' ? pop(@vals) : {}); + my $self = shift; + my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {}); - my @cols = $self->result_source->primary_columns; - if (exists $attrs->{key}) { - my %uniq = $self->result_source->unique_constraints; - $self->throw_exception( - "Unknown key $attrs->{key} on '" . $self->result_source->name . "'" - ) unless exists $uniq{$attrs->{key}}; - @cols = @{ $uniq{$attrs->{key}} }; - } - #use Data::Dumper; warn Dumper($attrs, @vals, @cols); + # Default to the primary key, but allow a specific key + my @cols = exists $attrs->{key} + ? $self->result_source->unique_constraint_columns($attrs->{key}) + : $self->result_source->primary_columns; $self->throw_exception( "Can't find unless a primary key or unique constraint is defined" ) unless @cols; - my $query; - if (ref $vals[0] eq 'HASH') { - $query = { %{$vals[0]} }; - } elsif (@cols == @vals) { - $query = {}; - @{$query}{@cols} = @vals; - } else { - $query = {@vals}; + # Parse out a hashref from input + my $input_query; + if (ref $_[0] eq 'HASH') { + $input_query = { %{$_[0]} }; } - foreach my $key (grep { ! m/\./ } keys %$query) { - $query->{"$self->{attrs}{alias}.$key"} = delete $query->{$key}; + elsif (@_ == @cols) { + $input_query = {}; + @{$input_query}{@cols} = @_; } - #warn Dumper($query); - + else { + # Compatibility: Allow e.g. find(id => $value) + carp "Find by key => value deprecated; please use a hashref instead"; + $input_query = {@_}; + } + + my @unique_queries = $self->_unique_queries($input_query, $attrs); + + # Handle cases where the ResultSet defines the query, or where the user is + # abusing find + my $query = @unique_queries ? \@unique_queries : $input_query; + + # Run the query if (keys %$attrs) { - my $rs = $self->search($query,$attrs); - return keys %{$rs->{collapse}} ? $rs->next : $rs->single; - } else { - return keys %{$self->{collapse}} ? - $self->search($query)->next : - $self->single($query); + my $rs = $self->search($query, $attrs); + return keys %{$rs->_resolved_attrs->{collapse}} ? $rs->next : $rs->single; } + else { + return keys %{$self->_resolved_attrs->{collapse}} + ? $self->search($query)->next + : $self->single($query); + } +} + +# _unique_queries +# +# Build a list of queries which satisfy unique constraints. + +sub _unique_queries { + my ($self, $query, $attrs) = @_; + + my @constraint_names = exists $attrs->{key} + ? ($attrs->{key}) + : $self->result_source->unique_constraint_names; + + my @unique_queries; + foreach my $name (@constraint_names) { + my @unique_cols = $self->result_source->unique_constraint_columns($name); + my $unique_query = $self->_build_unique_query($query, \@unique_cols); + + next unless scalar keys %$unique_query; + + # Add the ResultSet's alias + my $alias = $self->{attrs}{alias}; + foreach my $key (grep { ! m/\./ } keys %$unique_query) { + $unique_query->{"$alias.$key"} = delete $unique_query->{$key}; + } + + push @unique_queries, $unique_query; + } + + return @unique_queries; +} + +# _build_unique_query +# +# Constrain the specified query hash based on the specified column names. + +sub _build_unique_query { + my ($self, $query, $unique_cols) = @_; + + return { + map { $_ => $query->{$_} } + grep { exists $query->{$_} } + @$unique_cols + }; } =head2 search_related =over 4 -=item Arguments: $cond, \%attrs? +=item Arguments: $rel, $cond, \%attrs? =item Return Value: $new_resultset @@ -371,9 +442,10 @@ L for more information. sub cursor { my ($self) = @_; - my $attrs = { %{$self->{attrs}} }; + + my $attrs = { %{$self->_resolved_attrs} }; return $self->{cursor} - ||= $self->result_source->storage->select($self->{from}, $attrs->{select}, + ||= $self->result_source->storage->select($attrs->{from}, $attrs->{select}, $attrs->{where},$attrs); } @@ -390,7 +462,7 @@ sub cursor { my $cd = $schema->resultset('CD')->single({ year => 2001 }); Inflates the first result without creating a cursor if the resultset has -any records in it; if not returns nothing. Used by find() as an optimisation. +any records in it; if not returns nothing. Used by L as an optimisation. Can optionally take an additional condition *only* - this is a fast-code-path method; if you need to add extra joins or similar call ->search and then @@ -400,7 +472,7 @@ method; if you need to add extra joins or similar call ->search and then sub single { my ($self, $where) = @_; - my $attrs = { %{$self->{attrs}} }; + my $attrs = { %{$self->_resolved_attrs} }; if ($where) { if (defined $attrs->{where}) { $attrs->{where} = { @@ -412,12 +484,107 @@ sub single { $attrs->{where} = $where; } } + + unless ($self->_is_unique_query($attrs->{where})) { + carp "Query not guaranteed to return a single row" + . "; please declare your unique constraints or use search instead"; + } + my @data = $self->result_source->storage->select_single( - $self->{from}, $attrs->{select}, - $attrs->{where},$attrs); + $attrs->{from}, $attrs->{select}, + $attrs->{where}, $attrs + ); + return (@data ? $self->_construct_object(@data) : ()); } +# _is_unique_query +# +# Try to determine if the specified query is guaranteed to be unique, based on +# the declared unique constraints. + +sub _is_unique_query { + my ($self, $query) = @_; + + my $collapsed = $self->_collapse_query($query); + my $alias = $self->{attrs}{alias}; + + foreach my $name ($self->result_source->unique_constraint_names) { + my @unique_cols = map { + "$alias.$_" + } $self->result_source->unique_constraint_columns($name); + + # Count the values for each unique column + my %seen = map { $_ => 0 } @unique_cols; + + foreach my $key (keys %$collapsed) { + my $aliased = $key =~ /\./ ? $key : "$alias.$key"; + next unless exists $seen{$aliased}; # Additional constraints are okay + $seen{$aliased} = scalar @{ $collapsed->{$key} }; + } + + # If we get 0 or more than 1 value for a column, it's not necessarily unique + return 1 unless grep { $_ != 1 } values %seen; + } + + return 0; +} + +# _collapse_query +# +# Recursively collapse the query, accumulating values for each column. + +sub _collapse_query { + my ($self, $query, $collapsed) = @_; + + $collapsed ||= {}; + + if (ref $query eq 'ARRAY') { + foreach my $subquery (@$query) { + next unless ref $subquery; # -or +# warn "ARRAY: " . Dumper $subquery; + $collapsed = $self->_collapse_query($subquery, $collapsed); + } + } + elsif (ref $query eq 'HASH') { + if (keys %$query and (keys %$query)[0] eq '-and') { + foreach my $subquery (@{$query->{-and}}) { +# warn "HASH: " . Dumper $subquery; + $collapsed = $self->_collapse_query($subquery, $collapsed); + } + } + else { +# warn "LEAF: " . Dumper $query; + foreach my $key (keys %$query) { + push @{$collapsed->{$key}}, $query->{$key}; + } + } + } + + return $collapsed; +} + +=head2 get_column + +=over 4 + +=item Arguments: $cond? + +=item Return Value: $resultsetcolumn + +=back + + my $max_length = $rs->get_column('length')->max; + +Returns a ResultSetColumn instance for $column based on $self + +=cut + +sub get_column { + my ($self, $column) = @_; + my $new = DBIx::Class::ResultSetColumn->new($self, $column); + return $new; +} =head2 search_like @@ -496,7 +663,7 @@ Can be used to efficiently iterate over records in the resultset: print $cd->title; } -Note that you need to store the resultset object, and call C on it. +Note that you need to store the resultset object, and call C on it. Calling C<< resultset('Table')->next >> repeatedly will always return the first record from the resultset. @@ -512,25 +679,186 @@ sub next { $self->{all_cache_position} = 1; return ($self->all)[0]; } - my @row = (exists $self->{stashed_row} ? - @{delete $self->{stashed_row}} : - $self->cursor->next + my @row = ( + exists $self->{stashed_row} + ? @{delete $self->{stashed_row}} + : $self->cursor->next ); -# warn Dumper(\@row); use Data::Dumper; return unless (@row); return $self->_construct_object(@row); } -sub _construct_object { - my ($self, @row) = @_; - my @as = @{ $self->{attrs}{as} }; +sub _resolved_attrs { + my $self = shift; + return $self->{_attrs} if $self->{_attrs}; + + my $attrs = $self->{attrs}; + my $source = $self->{_parent_source} || $self->{result_source}; + my $alias = $attrs->{_orig_alias}; + + # XXX - lose storable dclone + my $record_filter = delete $attrs->{record_filter}; + $attrs = Storable::dclone($attrs || {}); # { %{ $attrs || {} } }; + $attrs->{record_filter} = $record_filter if $record_filter; + + $attrs->{columns} ||= delete $attrs->{cols} if exists $attrs->{cols}; + if ($attrs->{columns}) { + delete $attrs->{as}; + } elsif (!$attrs->{select}) { + $attrs->{columns} = [ $self->{result_source}->columns ]; + } - my $info = $self->_collapse_result(\@as, \@row); + my $select_alias = $self->{attrs}{alias}; + $attrs->{select} ||= [ + map { m/\./ ? $_ : "${select_alias}.$_" } @{delete $attrs->{columns}} + ]; + $attrs->{as} ||= [ + map { m/^\Q${alias}.\E(.+)$/ ? $1 : $_ } @{$attrs->{select}} + ]; - my $new = $self->result_class->inflate_result($self->result_source, @$info); + my $adds; + if ($adds = delete $attrs->{include_columns}) { + $adds = [$adds] unless ref $adds eq 'ARRAY'; + push(@{$attrs->{select}}, @$adds); + push(@{$attrs->{as}}, map { m/([^.]+)$/; $1 } @$adds); + } + if ($adds = delete $attrs->{'+select'}) { + $adds = [$adds] unless ref $adds eq 'ARRAY'; + push(@{$attrs->{select}}, map { /\./ || ref $_ ? $_ : "${alias}.$_" } @$adds); + } + if (my $adds = delete $attrs->{'+as'}) { + $adds = [$adds] unless ref $adds eq 'ARRAY'; + push(@{$attrs->{as}}, @$adds); + } + + $attrs->{from} ||= [ { $alias => $source->from } ]; + $attrs->{seen_join} ||= {}; + my %seen; + if (my $join = delete $attrs->{join}) { + foreach my $j (ref $join eq 'ARRAY' ? @$join : ($join)) { + if (ref $j eq 'HASH') { + $seen{$_} = 1 foreach keys %$j; + } else { + $seen{$j} = 1; + } + } + push(@{$attrs->{from}}, + $source->resolve_join($join, $alias, $attrs->{seen_join}) + ); + } + + $attrs->{group_by} ||= $attrs->{select} if delete $attrs->{distinct}; + if ($attrs->{order_by}) { + $attrs->{order_by} = [ $attrs->{order_by} ] unless ref $attrs->{order_by}; + } else { + $attrs->{order_by} ||= []; + } + + my $collapse = $attrs->{collapse} || {}; + if (my $prefetch = delete $attrs->{prefetch}) { + my @pre_order; + foreach my $p (ref $prefetch eq 'ARRAY' ? @$prefetch : ($prefetch)) { + if ( ref $p eq 'HASH' ) { + foreach my $key (keys %$p) { + push(@{$attrs->{from}}, $source->resolve_join($p, $alias)) + unless $seen{$key}; + } + } else { + push(@{$attrs->{from}}, $source->resolve_join($p, $alias)) + unless $seen{$p}; + } + # bring joins back to level of current class + $p = $self->_reduce_joins($p, $attrs) if $attrs->{_live_join_stack}; + if ($p) { + my @prefetch = $self->result_source->resolve_prefetch( + $p, $alias, {}, \@pre_order, $collapse + ); + push(@{$attrs->{select}}, map { $_->[0] } @prefetch); + push(@{$attrs->{as}}, map { $_->[1] } @prefetch); + } + } + push(@{$attrs->{order_by}}, @pre_order); + } + $attrs->{collapse} = $collapse; + + return $self->{_attrs} = $attrs; +} + +sub _merge_attr { + my ($self, $a, $b) = @_; + return $b unless $a; - $new = $self->{attrs}{record_filter}->($new) - if exists $self->{attrs}{record_filter}; + if (ref $b eq 'HASH' && ref $a eq 'HASH') { + foreach my $key (keys %{$b}) { + if (exists $a->{$key}) { + $a->{$key} = $self->_merge_attr($a->{$key}, $b->{$key}); + } else { + $a->{$key} = $b->{$key}; + } + } + return $a; + } else { + $a = [$a] unless ref $a eq 'ARRAY'; + $b = [$b] unless ref $b eq 'ARRAY'; + + my $hash = {}; + my @array; + foreach my $x ($a, $b) { + foreach my $element (@{$x}) { + if (ref $element eq 'HASH') { + $hash = $self->_merge_attr($hash, $element); + } elsif (ref $element eq 'ARRAY') { + push(@array, @{$element}); + } else { + push(@array, $element) unless $b == $x + && grep { $_ eq $element } @array; + } + } + } + + @array = grep { !exists $hash->{$_} } @array; + + return keys %{$hash} + ? ( scalar(@array) + ? [$hash, @array] + : $hash + ) + : \@array; + } +} + +# bring the joins (which are from the original class) to the level +# of the current class so that we can resolve them properly +sub _reduce_joins { + my ($self, $p, $attrs) = @_; + + STACK: + foreach my $join (@{$attrs->{_live_join_stack}}) { + if (ref $p eq 'HASH') { + return undef unless exists $p->{$join}; + $p = $p->{$join}; + } elsif (ref $p eq 'ARRAY') { + foreach my $pe (@{$p}) { + return undef if $pe eq $join; + if (ref $pe eq 'HASH' && exists $pe->{$join}) { + $p = $pe->{$join}; + next STACK; + } + } + return undef; + } else { + return undef; + } + } + return $p; +} + +sub _construct_object { + my ($self, @row) = @_; + my $info = $self->_collapse_result($self->{_attrs}{as}, \@row); + my $new = $self->result_class->inflate_result($self->result_source, @$info); + $new = $self->{_attrs}{record_filter}->($new) + if exists $self->{_attrs}{record_filter}; return $new; } @@ -538,8 +866,8 @@ sub _collapse_result { my ($self, $as, $row, $prefix) = @_; my %const; - my @copy = @$row; + foreach my $this_as (@$as) { my $val = shift @copy; if (defined $prefix) { @@ -554,9 +882,10 @@ sub _collapse_result { } } + my $alias = $self->{attrs}{alias}; my $info = [ {}, {} ]; foreach my $key (keys %const) { - if (length $key) { + if (length $key && $key ne $alias) { my $target = $info; my @parts = split(/\./, $key); foreach my $p (@parts) { @@ -567,14 +896,14 @@ sub _collapse_result { $info->[0] = $const{$key}; } } - + my @collapse; if (defined $prefix) { @collapse = map { m/^\Q${prefix}.\E(.+)$/ ? ($1) : () - } keys %{$self->{collapse}} + } keys %{$self->{_attrs}{collapse}} } else { - @collapse = keys %{$self->{collapse}}; + @collapse = keys %{$self->{_attrs}{collapse}}; }; if (@collapse) { @@ -584,14 +913,18 @@ sub _collapse_result { $target = $target->[1]->{$p} ||= []; } my $c_prefix = (defined($prefix) ? "${prefix}.${c}" : $c); - my @co_key = @{$self->{collapse}{$c_prefix}}; - my %co_check = map { ($_, $target->[0]->{$_}); } @co_key; + my @co_key = @{$self->{_attrs}{collapse}{$c_prefix}}; my $tree = $self->_collapse_result($as, $row, $c_prefix); + my %co_check = map { ($_, $tree->[0]->{$_}); } @co_key; my (@final, @raw); - while ( !(grep { - !defined($tree->[0]->{$_}) || - $co_check{$_} ne $tree->[0]->{$_} - } @co_key) ) { + + while ( + !( + grep { + !defined($tree->[0]->{$_}) || $co_check{$_} ne $tree->[0]->{$_} + } @co_key + ) + ) { push(@final, $tree); last unless (@raw = $self->cursor->next); $row = $self->{stashed_row} = \@raw; @@ -601,6 +934,7 @@ sub _collapse_result { # single empty result to indicate an empty prefetched has_many } + #print "final info: " . Dumper($info); return $info; } @@ -646,7 +980,6 @@ sub count { my $self = shift; return $self->search(@_)->count if @_ and defined $_[0]; return scalar @{ $self->get_cache } if $self->get_cache; - my $count = $self->_count; return 0 unless $count; @@ -659,15 +992,17 @@ sub count { sub _count { # Separated out so pager can get the full count my $self = shift; my $select = { count => '*' }; - my $attrs = { %{ $self->{attrs} } }; + + my $attrs = { %{$self->_resolved_attrs} }; if (my $group_by = delete $attrs->{group_by}) { delete $attrs->{having}; my @distinct = (ref $group_by ? @$group_by : ($group_by)); # todo: try CONCAT for multi-column pk my @pk = $self->result_source->primary_columns; if (@pk == 1) { + my $alias = $attrs->{_orig_alias}; foreach my $column (@distinct) { - if ($column =~ qr/^(?:\Q$attrs->{alias}.\E)?$pk[0]$/) { + if ($column =~ qr/^(?:\Q${alias}.\E)?$pk[0]$/) { @distinct = ($column); last; } @@ -675,7 +1010,6 @@ sub _count { # Separated out so pager can get the full count } $select = { count => { distinct => \@distinct } }; - #use Data::Dumper; die Dumper $select; } $attrs->{select} = $select; @@ -683,8 +1017,11 @@ sub _count { # Separated out so pager can get the full count # offset, order by and page are not needed to count. record_filter is cdbi delete $attrs->{$_} for qw/rows offset order_by page pager record_filter/; - - my ($count) = (ref $self)->new($self->result_source, $attrs)->cursor->next; + my $tmp_rs = (ref $self)->new($self->result_source, $attrs); + $tmp_rs->{_parent_source} = $self->{_parent_source} if $self->{_parent_source}; + #XXX - hack to pass through parent of related resultsets + + my ($count) = $tmp_rs->cursor->next; return $count; } @@ -726,12 +1063,13 @@ sub all { my @obj; - if (keys %{$self->{collapse}}) { + # TODO: don't call resolve here + if (keys %{$self->_resolved_attrs->{collapse}}) { +# if ($self->{attrs}{prefetch}) { # Using $self->cursor->all is really just an optimisation. # If we're collapsing has_many prefetches it probably makes # very little difference, and this is cleaner than hacking # _construct_object to survive the approach - $self->cursor->reset; my @row = $self->cursor->next; while (@row) { push(@obj, $self->_construct_object(@row)); @@ -763,6 +1101,7 @@ Resets the resultset's cursor, so you can iterate through the elements again. sub reset { my ($self) = @_; + delete $self->{_attrs} if exists $self->{_attrs}; $self->{all_cache_position} = 0; $self->cursor->reset; return $self; @@ -797,10 +1136,10 @@ sub _cond_for_update_delete { my ($self) = @_; my $cond = {}; - if (!ref($self->{cond})) { - # No-op. No condition, we're updating/deleting everything - } - elsif (ref $self->{cond} eq 'ARRAY') { + # No-op. No condition, we're updating/deleting everything + return $cond unless ref $self->{cond}; + + if (ref $self->{cond} eq 'ARRAY') { $cond = [ map { my %hash; @@ -817,7 +1156,7 @@ sub _cond_for_update_delete { $cond->{-and} = []; my @cond = @{$self->{cond}{-and}}; - for (my $i = 0; $i < @cond - 1; $i++) { + for (my $i = 0; $i < @cond; $i++) { my $entry = $cond[$i]; my %hash; @@ -829,7 +1168,7 @@ sub _cond_for_update_delete { } else { $entry =~ /([^.]+)$/; - $hash{$entry} = $cond[++$i]; + $hash{$1} = $cond[++$i]; } push @{$cond->{-and}}, \%hash; @@ -923,7 +1262,6 @@ to run. sub delete { my ($self) = @_; - my $del = {}; my $cond = $self->_cond_for_update_delete; @@ -971,10 +1309,10 @@ sub pager { my ($self) = @_; my $attrs = $self->{attrs}; $self->throw_exception("Can't create pager for non-paged rs") - unless $self->{page}; + unless $self->{attrs}{page}; $attrs->{rows} ||= 10; return $self->{pager} ||= Data::Page->new( - $self->_count, $attrs->{rows}, $self->{page}); + $self->_count, $attrs->{rows}, $self->{attrs}{page}); } =head2 page @@ -995,9 +1333,7 @@ attribute set on the resultset (10 by default). sub page { my ($self, $page) = @_; - my $attrs = { %{$self->{attrs}} }; - $attrs->{page} = $page; - return (ref $self)->new($self->result_source, $attrs); + return (ref $self)->new($self->result_source, { %{$self->{attrs}}, page => $page }); } =head2 new_result @@ -1022,7 +1358,7 @@ sub new_result { "Can't abstract implicit construct, condition not a hash" ) if ($self->{cond} && !(ref $self->{cond} eq 'HASH')); my %new = %$values; - my $alias = $self->{attrs}{alias}; + my $alias = $self->{attrs}{_orig_alias}; foreach my $key (keys %{$self->{cond}||{}}) { $new{$1} = $self->{cond}{$key} if ($key =~ m/^(?:\Q${alias}.\E)?([^.]+)$/); } @@ -1031,6 +1367,32 @@ sub new_result { return $obj; } +=head2 find_or_new + +=over 4 + +=item Arguments: \%vals, \%attrs? + +=item Return Value: $object + +=back + +Find an existing record from this resultset. If none exists, instantiate a new +result object and return it. The object will not be saved into your storage +until you call L on it. + +If you want objects to be saved immediately, use L instead. + +=cut + +sub find_or_new { + my $self = shift; + my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {}); + my $hash = ref $_[0] eq 'HASH' ? shift : {@_}; + my $exists = $self->find($hash, $attrs); + return defined $exists ? $exists : $self->new_result($hash); +} + =head2 create =over 4 @@ -1066,8 +1428,8 @@ sub create { $class->find_or_create({ key => $val, ... }); -Searches for a record matching the search condition; if it doesn't find one, -creates one and returns that instead. +Tries to find a record based on its primary key or unique constraint; if none +is found, creates one and returns that instead. my $cd = $schema->resultset('CD')->find_or_create({ cdid => 5, @@ -1084,10 +1446,11 @@ constraint. For example: artist => 'Massive Attack', title => 'Mezzanine', }, - { key => 'artist_title' } + { key => 'cd_artist_title' } ); -See also L and L. +See also L and L. For information on how to declare +unique constraints, see L. =cut @@ -1126,7 +1489,7 @@ For example: title => 'Mezzanine', year => 1998, }, - { key => 'artist_title' } + { key => 'cd_artist_title' } ); If no C is specified, it searches on all unique constraints defined on the @@ -1134,41 +1497,23 @@ source, including the primary key. If the C is specified as C, it searches only on the primary key. -See also L and L. +See also L and L. For information on how to declare +unique constraints, see L. =cut sub update_or_create { my $self = shift; my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {}); - my $hash = ref $_[0] eq 'HASH' ? shift : {@_}; - - my %unique_constraints = $self->result_source->unique_constraints; - my @constraint_names = (exists $attrs->{key} - ? ($attrs->{key}) - : keys %unique_constraints); - - my @unique_hashes; - foreach my $name (@constraint_names) { - my @unique_cols = @{ $unique_constraints{$name} }; - my %unique_hash = - map { $_ => $hash->{$_} } - grep { exists $hash->{$_} } - @unique_cols; - - push @unique_hashes, \%unique_hash - if (scalar keys %unique_hash == scalar @unique_cols); - } + my $cond = ref $_[0] eq 'HASH' ? shift : {@_}; - if (@unique_hashes) { - my $row = $self->single(\@unique_hashes); - if (defined $row) { - $row->update($hash); - return $row; - } + my $row = $self->find($cond); + if (defined $row) { + $row->update($cond); + return $row; } - return $self->create($hash); + return $self->create($cond); } =head2 get_cache @@ -1209,7 +1554,7 @@ than re-querying the database even if the cache attr is not set. sub set_cache { my ( $self, $data ) = @_; $self->throw_exception("set_cache requires an arrayref") - if defined($data) && (ref $data ne 'ARRAY'); + if defined($data) && (ref $data ne 'ARRAY'); $self->{all_cache} = $data; } @@ -1248,29 +1593,41 @@ Returns a related resultset for the supplied relationship name. =cut sub related_resultset { - my ( $self, $rel ) = @_; + my ($self, $rel) = @_; + $self->{related_resultsets} ||= {}; return $self->{related_resultsets}{$rel} ||= do { - #warn "fetching related resultset for rel '$rel'"; - my $rel_obj = $self->result_source->relationship_info($rel); - $self->throw_exception( - "search_related: result source '" . $self->result_source->name . - "' has no such relationship ${rel}") - unless $rel_obj; #die Dumper $self->{attrs}; - - my $rs = $self->search(undef, { join => $rel }); - my $alias = defined $rs->{attrs}{seen_join}{$rel} - && $rs->{attrs}{seen_join}{$rel} > 1 - ? join('_', $rel, $rs->{attrs}{seen_join}{$rel}) - : $rel; - - $self->result_source->schema->resultset($rel_obj->{class} - )->search( undef, - { %{$rs->{attrs}}, - alias => $alias, - select => undef, - as => undef } - ); + my $rel_obj = $self->result_source->relationship_info($rel); + + $self->throw_exception( + "search_related: result source '" . $self->result_source->name . + "' has no such relationship $rel") + unless $rel_obj; + + my @live_join_stack = @{$self->{attrs}{_live_join_stack}||[]}; + + # XXX mst: I'm sure this is wrong, somehow + # something with complex joins early on could die on search_rel + # followed by a prefetch. I think. need a test case. + + my $join_count = scalar(grep { $_ eq $rel } @live_join_stack); + my $alias = $join_count ? join('_', $rel, $join_count+1) : $rel; + + push(@live_join_stack, $rel); + + my $rs = $self->result_source->schema->resultset($rel_obj->{class})->search( + undef, { + select => undef, + as => undef, + alias => $alias, + _live_join_stack => \@live_join_stack, + _parent_attrs => $self->{attrs}} + ); + + # keep reference of the original resultset + $rs->{_parent_source} = $self->{_parent_source} || $self->result_source; + + return $rs; }; } @@ -1304,7 +1661,7 @@ Which column(s) to order the results by. This is currently passed through directly to SQL, so you can give e.g. C for a descending order on the column `year'. -Please note that if you have quoting enabled (see +Please note that if you have quoting enabled (see L) you will need to do C<\'year DESC' > to specify an order. (The scalar ref causes it to be passed as raw sql to the DB, so you will need to manually quote things as appropriate.) @@ -1364,6 +1721,23 @@ When you use function/stored procedure names and do not supply an C attribute, the column names returned are storage-dependent. E.g. MySQL would return a column named C in the above example. +=head2 +select + +=over 4 + +Indicates additional columns to be selected from storage. Works the same as +L