X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FResultSet.pm;h=5609572668b18853a38d307976773e3c7dbcf081;hb=37aafa2ede65e38af8fe9eda374ad4626290932f;hp=c2287431960bb678f35f226c37406b39807fec8b;hpb=7d3139ac1ff52213e2dad35fc9c9d1057711256a;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/ResultSet.pm b/lib/DBIx/Class/ResultSet.pm index c228743..5609572 100644 --- a/lib/DBIx/Class/ResultSet.pm +++ b/lib/DBIx/Class/ResultSet.pm @@ -10,10 +10,17 @@ use Storable; use DBIx::Class::ResultSetColumn; use DBIx::Class::ResultSourceHandle; use List::Util (); +use Hash::Merge (); use Scalar::Util qw/blessed weaken/; use Try::Tiny; +use Storable qw/nfreeze thaw/; use namespace::clean; +BEGIN { + # De-duplication in _merge_attr() is disabled, but left in for reference + *__HM_DEDUP = sub () { 0 }; +} + use overload '0+' => "count", 'bool' => "_bool", @@ -244,12 +251,32 @@ documentation for the first argument, see L. For more help on using joins with search, see L. +=head3 CAVEAT + +Note that L does not process/deflate any of the values passed in the +L-compatible search condition structure. This is unlike other +condition-bound methods L, L and L. The user must ensure +manually that any value passed to this method will stringify to something the +RDBMS knows how to deal with. A notable example is the handling of L +objects, for more info see: +L. + =cut sub search { my $self = shift; my $rs = $self->search_rs( @_ ); - return (wantarray ? $rs->all : $rs); + + my $want = wantarray; + if ($want) { + return $rs->all; + } + elsif (defined $want) { + return $rs; + } + else { + $self->throw_exception ('->search is *not* a mutator, calling it in void context makes no sense'); + } } =head2 search_rs @@ -272,106 +299,105 @@ sub search_rs { # Special-case handling for (undef, undef). if ( @_ == 2 && !defined $_[1] && !defined $_[0] ) { - pop(@_); pop(@_); + @_ = (); } - my $attrs = {}; - $attrs = pop(@_) if @_ > 1 and ref $_[$#_] eq 'HASH'; - my $our_attrs = { %{$self->{attrs}} }; - my $having = delete $our_attrs->{having}; - my $where = delete $our_attrs->{where}; - - my $rows; + my $call_attrs = {}; + $call_attrs = pop(@_) if ( + @_ > 1 and ( ! defined $_[-1] or ref $_[-1] eq 'HASH' ) + ); + # see if we can keep the cache (no $rs changes) + my $cache; my %safe = (alias => 1, cache => 1); - - unless ( - (@_ && defined($_[0])) # @_ == () or (undef) - || - (keys %$attrs # empty attrs or only 'safe' attrs - && List::Util::first { !$safe{$_} } keys %$attrs) - ) { - # no search, effectively just a clone - $rows = $self->get_cache; + if ( ! List::Util::first { !$safe{$_} } keys %$call_attrs and ( + ! defined $_[0] + or + ref $_[0] eq 'HASH' && ! keys %{$_[0]} + or + ref $_[0] eq 'ARRAY' && ! @{$_[0]} + )) { + $cache = $self->get_cache; } + my $old_attrs = { %{$self->{attrs}} }; + my $old_having = delete $old_attrs->{having}; + my $old_where = delete $old_attrs->{where}; + # reset the selector list - if (List::Util::first { exists $attrs->{$_} } qw{columns select as}) { - delete @{$our_attrs}{qw{select as columns +select +as +columns include_columns}}; + if (List::Util::first { exists $call_attrs->{$_} } qw{columns select as}) { + delete @{$old_attrs}{qw{select as columns +select +as +columns include_columns}}; } - my $new_attrs = { %{$our_attrs}, %{$attrs} }; + my $new_attrs = { %{$old_attrs}, %{$call_attrs} }; # merge new attrs into inherited - foreach my $key (qw/join prefetch +select +as +columns include_columns bind/) { - next unless exists $attrs->{$key}; - $new_attrs->{$key} = $self->_merge_attr($our_attrs->{$key}, $attrs->{$key}); + foreach my $key (qw/join prefetch/) { + next unless exists $call_attrs->{$key}; + $new_attrs->{$key} = $self->_merge_joinpref_attr($old_attrs->{$key}, $call_attrs->{$key}); + } + foreach my $key (qw/+select +as +columns include_columns bind/) { + next unless exists $call_attrs->{$key}; + $new_attrs->{$key} = $self->_merge_attr($old_attrs->{$key}, $call_attrs->{$key}); } - my $cond = (@_ - ? ( - (@_ == 1 || ref $_[0] eq "HASH") - ? ( - (ref $_[0] eq 'HASH') - ? ( - (keys %{ $_[0] } > 0) - ? shift - : undef - ) - : shift - ) - : ( - (@_ % 2) - ? $self->throw_exception("Odd number of arguments to search") - : {@_} - ) - ) - : undef - ); + # rip apart the rest of @_, parse a condition + my $call_cond = do { - if (defined $where) { - $new_attrs->{where} = ( - defined $new_attrs->{where} - ? { '-and' => [ - map { - ref $_ eq 'ARRAY' ? [ -or => $_ ] : $_ - } $where, $new_attrs->{where} - ] - } - : $where); - } + if (ref $_[0] eq 'HASH') { + (keys %{$_[0]}) ? $_[0] : undef + } + elsif (@_ == 1) { + $_[0] + } + elsif (@_ % 2) { + $self->throw_exception('Odd number of arguments to search') + } + else { + +{ @_ } + } - if (defined $cond) { - $new_attrs->{where} = ( - defined $new_attrs->{where} - ? { '-and' => [ - map { - ref $_ eq 'ARRAY' ? [ -or => $_ ] : $_ - } $cond, $new_attrs->{where} - ] - } - : $cond); + } if @_; + + carp 'search( %condition ) is deprecated, use search( \%condition ) instead' + if (@_ > 1 and ! $self->result_source->result_class->isa('DBIx::Class::CDBICompat') ); + + for ($old_where, $call_cond) { + if (defined $_) { + $new_attrs->{where} = $self->_stack_cond ( + $_, $new_attrs->{where} + ); + } } - if (defined $having) { - $new_attrs->{having} = ( - defined $new_attrs->{having} - ? { '-and' => [ - map { - ref $_ eq 'ARRAY' ? [ -or => $_ ] : $_ - } $having, $new_attrs->{having} - ] - } - : $having); + if (defined $old_having) { + $new_attrs->{having} = $self->_stack_cond ( + $old_having, $new_attrs->{having} + ) } my $rs = (ref $self)->new($self->result_source, $new_attrs); - $rs->set_cache($rows) if ($rows); + $rs->set_cache($cache) if ($cache); return $rs; } +sub _stack_cond { + my ($self, $left, $right) = @_; + if (defined $left xor defined $right) { + return defined $left ? $left : $right; + } + elsif (defined $left) { + return { -and => [ map + { ref $_ eq 'ARRAY' ? [ -or => $_ ] : $_ } + ($left, $right) + ]}; + } + + return undef; +} + =head2 search_literal =over 4 @@ -418,25 +444,56 @@ sub search_literal { =over 4 -=item Arguments: @values | \%cols, \%attrs? +=item Arguments: \%columns_values | @pk_values, \%attrs? =item Return Value: $row_object | undef =back -Finds a row based on its primary key or unique constraint. For example, to find -a row by its primary key: +Finds and returns a single row based on supplied criteria. Takes either a +hashref with the same format as L (including inference of foreign +keys from related objects), or a list of primary key values in the same +order as the L +declaration on the L. + +In either case an attempt is made to combine conditions already existing on +the resultset with the condition passed to this method. + +To aid with preparing the correct query for the storage you may supply the +C attribute, which is the name of a +L (the +unique constraint corresponding to the +L is always named +C). If the C attribute has been supplied, and DBIC is unable +to construct a query that satisfies the named unique constraint fully ( +non-NULL values for each column member of the constraint) an exception is +thrown. + +If no C is specified, the search is carried over all unique constraints +which are fully defined by the available condition. + +If no such constraint is found, C currently defaults to a simple +C<< search->(\%column_values) >> which may or may not do what you expect. +Note that this fallback behavior may be deprecated in further versions. If +you need to search with arbitrary conditions - use L. If the query +resulting from this fallback produces more than one row, a warning to the +effect is issued, though only the first row is constructed and returned as +C<$row_object>. - my $cd = $schema->resultset('CD')->find(5); +In addition to C, L recognizes and applies standard +L in the same way as L does. -You can also find a row by a specific unique constraint using the C -attribute. For example: +Note that if you have extra concerns about the correctness of the resulting +query you need to specify the C attribute and supply the entire condition +as an argument to find (since it is not always possible to perform the +combination of the resultset condition with the supplied one, especially if +the resultset condition contains literal sql). - my $cd = $schema->resultset('CD')->find('Massive Attack', 'Mezzanine', { - key => 'cd_artist_title' - }); +For example, to find a row by its primary key: + + my $cd = $schema->resultset('CD')->find(5); -Additionally, you can specify the columns explicitly by name: +You can also find a row by a specific unique constraint: my $cd = $schema->resultset('CD')->find( { @@ -446,24 +503,7 @@ Additionally, you can specify the columns explicitly by name: { key => 'cd_artist_title' } ); -If the C is specified as C, it searches only on the primary key. - -If no C is specified, it searches on all unique constraints defined on the -source for which column data is provided, including the primary key. - -If your table does not have a primary key, you B provide a value for the -C attribute matching one of the unique constraints on the source. - -In addition to C, L recognizes and applies standard -L in the same way as L does. - -Note: If your query does not return only one row, a warning is generated: - - Query returned more than one row - -See also L and L. For information on how to -declare unique constraints, see -L. +See also L and L. =cut @@ -471,57 +511,64 @@ sub find { my $self = shift; my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {}); - # Default to the primary key, but allow a specific key - my @cols = exists $attrs->{key} - ? $self->result_source->unique_constraint_columns($attrs->{key}) - : $self->result_source->primary_columns; - $self->throw_exception( - "Can't find unless a primary key is defined or unique constraint is specified" - ) unless @cols; + my $rsrc = $self->result_source; - # Parse out a hashref from input - my $input_query; + # Parse out the condition from input + my $call_cond; if (ref $_[0] eq 'HASH') { - $input_query = { %{$_[0]} }; - } - elsif (@_ == @cols) { - $input_query = {}; - @{$input_query}{@cols} = @_; + $call_cond = { %{$_[0]} }; } else { - # Compatibility: Allow e.g. find(id => $value) - carp "Find by key => value deprecated; please use a hashref instead"; - $input_query = {@_}; - } - - my (%related, $info); - - KEY: foreach my $key (keys %$input_query) { - if (ref($input_query->{$key}) - && ($info = $self->result_source->relationship_info($key))) { - my $val = delete $input_query->{$key}; - next KEY if (ref($val) eq 'ARRAY'); # has_many for multi_create - my $rel_q = $self->result_source->_resolve_condition( - $info->{cond}, $val, $key - ); - die "Can't handle OR join condition in find" if ref($rel_q) eq 'ARRAY'; + my $constraint = exists $attrs->{key} ? $attrs->{key} : 'primary'; + my @c_cols = $rsrc->unique_constraint_columns($constraint); + + $self->throw_exception( + "No constraint columns, maybe a malformed '$constraint' constraint?" + ) unless @c_cols; + + $self->throw_exception ( + 'find() expects either a column/value hashref, or a list of values ' + . "corresponding to the columns of the specified unique constraint '$constraint'" + ) unless @c_cols == @_; + + $call_cond = {}; + @{$call_cond}{@c_cols} = @_; + } + + my %related; + for my $key (keys %$call_cond) { + if ( + my $keyref = ref($call_cond->{$key}) + and + my $relinfo = $rsrc->relationship_info($key) + ) { + my $val = delete $call_cond->{$key}; + + next if $keyref eq 'ARRAY'; # has_many for multi_create + + my $rel_q = $rsrc->_resolve_condition( + $relinfo->{cond}, $val, $key + ); + die "Can't handle complex relationship conditions in find" if ref($rel_q) ne 'HASH'; @related{keys %$rel_q} = values %$rel_q; } } - if (my @keys = keys %related) { - @{$input_query}{@keys} = values %related; - } + # relationship conditions take precedence (?) + @{$call_cond}{keys %related} = values %related; - # Build the final query: Default to the disjunction of the unique queries, - # but allow the input query in case the ResultSet defines the query or the - # user is abusing find my $alias = exists $attrs->{alias} ? $attrs->{alias} : $self->{attrs}{alias}; - my $query; + my $final_cond; if (exists $attrs->{key}) { - my @unique_cols = $self->result_source->unique_constraint_columns($attrs->{key}); - my $unique_query = $self->_build_unique_query($input_query, \@unique_cols); - $query = $self->_add_alias($unique_query, $alias); + $final_cond = $self->_qualify_cond_columns ( + + $self->_build_unique_cond ( + $attrs->{key}, + $call_cond, + ), + + $alias, + ); } elsif ($self->{attrs}{accessor} and $self->{attrs}{accessor} eq 'single') { # This means that we got here after a merger of relationship conditions @@ -532,14 +579,28 @@ sub find { # relationship } else { - my @unique_queries = $self->_unique_queries($input_query, $attrs); - $query = @unique_queries - ? [ map { $self->_add_alias($_, $alias) } @unique_queries ] - : $self->_add_alias($input_query, $alias); + # no key was specified - fall down to heuristics mode: + # run through all unique queries registered on the resultset, and + # 'OR' all qualifying queries together + my (@unique_queries, %seen_column_combinations); + for my $c_name ($rsrc->unique_constraint_names) { + next if $seen_column_combinations{ + join "\x00", sort $rsrc->unique_constraint_columns($c_name) + }++; + + push @unique_queries, try { + $self->_build_unique_cond ($c_name, $call_cond) + } || (); + } + + $final_cond = @unique_queries + ? [ map { $self->_qualify_cond_columns($_, $alias) } @unique_queries ] + : $self->_non_unique_find_fallback ($call_cond, $attrs) + ; } # Run the query, passing the result_class since it should propagate for find - my $rs = $self->search ($query, {result_class => $self->result_class, %$attrs}); + my $rs = $self->search ($final_cond, {result_class => $self->result_class, %$attrs}); if (keys %{$rs->_resolved_attrs->{collapse}}) { my $row = $rs->next; carp "Query returned more than one row" if $rs->next; @@ -550,71 +611,65 @@ sub find { } } -# _add_alias +# This is a stop-gap method as agreed during the discussion on find() cleanup: +# http://lists.scsys.co.uk/pipermail/dbix-class/2010-October/009535.html # -# Add the specified alias to the specified query hash. A copy is made so the -# original query is not modified. +# It is invoked when find() is called in legacy-mode with insufficiently-unique +# condition. It is provided for overrides until a saner way forward is devised +# +# *NOTE* This is not a public method, and it's *GUARANTEED* to disappear down +# the road. Please adjust your tests accordingly to catch this situation early +# DBIx::Class::ResultSet->can('_non_unique_find_fallback') is reasonable +# +# The method will not be removed without an adequately complete replacement +# for strict-mode enforcement +sub _non_unique_find_fallback { + my ($self, $cond, $attrs) = @_; + + return $self->_qualify_cond_columns( + $cond, + exists $attrs->{alias} + ? $attrs->{alias} + : $self->{attrs}{alias} + ); +} -sub _add_alias { - my ($self, $query, $alias) = @_; - my %aliased = %$query; - foreach my $col (grep { ! m/\./ } keys %aliased) { - $aliased{"$alias.$col"} = delete $aliased{$col}; +sub _qualify_cond_columns { + my ($self, $cond, $alias) = @_; + + my %aliased = %$cond; + for (keys %aliased) { + $aliased{"$alias.$_"} = delete $aliased{$_} + if $_ !~ /\./; } return \%aliased; } -# _unique_queries -# -# Build a list of queries which satisfy unique constraints. - -sub _unique_queries { - my ($self, $query, $attrs) = @_; - - my @constraint_names = exists $attrs->{key} - ? ($attrs->{key}) - : $self->result_source->unique_constraint_names; +sub _build_unique_cond { + my ($self, $constraint_name, $extra_cond) = @_; - my $where = $self->_collapse_cond($self->{attrs}{where} || {}); - my $num_where = scalar keys %$where; + my @c_cols = $self->result_source->unique_constraint_columns($constraint_name); - my (@unique_queries, %seen_column_combinations); - foreach my $name (@constraint_names) { - my @constraint_cols = $self->result_source->unique_constraint_columns($name); - - my $constraint_sig = join "\x00", sort @constraint_cols; - next if $seen_column_combinations{$constraint_sig}++; - - my $unique_query = $self->_build_unique_query($query, \@constraint_cols); + # combination may fail if $self->{cond} is non-trivial + my ($final_cond) = try { + $self->_merge_with_rscond ($extra_cond) + } catch { + +{ %$extra_cond } + }; - my $num_cols = scalar @constraint_cols; - my $num_query = scalar keys %$unique_query; + # trim out everything not in $columns + $final_cond = { map { $_ => $final_cond->{$_} } @c_cols }; - my $total = $num_query + $num_where; - if ($num_query && ($num_query == $num_cols || $total == $num_cols)) { - # The query is either unique on its own or is unique in combination with - # the existing where clause - push @unique_queries, $unique_query; - } + if (my @missing = grep { ! defined $final_cond->{$_} } (@c_cols) ) { + $self->throw_exception( sprintf ( "Unable to satisfy requested constraint '%s', no values for column(s): %s", + $constraint_name, + join (', ', map { "'$_'" } @missing), + ) ); } - return @unique_queries; -} - -# _build_unique_query -# -# Constrain the specified query hash based on the specified column names. - -sub _build_unique_query { - my ($self, $query, $unique_cols) = @_; - - return { - map { $_ => $query->{$_} } - grep { exists $query->{$_} } - @$unique_cols - }; + return $final_cond; } =head2 search_related @@ -682,15 +737,15 @@ sub cursor { =item Arguments: $cond? -=item Return Value: $row_object? +=item Return Value: $row_object | undef =back my $cd = $schema->resultset('CD')->single({ year => 2001 }); Inflates the first result without creating a cursor if the resultset has -any records in it; if not returns nothing. Used by L as a lean version of -L. +any records in it; if not returns C. Used by L as a lean version +of L. While this method can take an optional search condition (just like L) being a fast-code-path it does not recognize search attributes. If you need to @@ -745,12 +800,6 @@ sub single { } } -# XXX: Disabled since it doesn't infer uniqueness in all cases -# unless ($self->_is_unique_query($attrs->{where})) { -# carp "Query not guaranteed to return a single row" -# . "; please declare your unique constraints or use search instead"; -# } - my @data = $self->result_source->storage->select_single( $attrs->{from}, $attrs->{select}, $attrs->{where}, $attrs @@ -760,38 +809,6 @@ sub single { } -# _is_unique_query -# -# Try to determine if the specified query is guaranteed to be unique, based on -# the declared unique constraints. - -sub _is_unique_query { - my ($self, $query) = @_; - - my $collapsed = $self->_collapse_query($query); - my $alias = $self->{attrs}{alias}; - - foreach my $name ($self->result_source->unique_constraint_names) { - my @unique_cols = map { - "$alias.$_" - } $self->result_source->unique_constraint_columns($name); - - # Count the values for each unique column - my %seen = map { $_ => 0 } @unique_cols; - - foreach my $key (keys %$collapsed) { - my $aliased = $key =~ /\./ ? $key : "$alias.$key"; - next unless exists $seen{$aliased}; # Additional constraints are okay - $seen{$aliased} = scalar keys %{ $collapsed->{$key} }; - } - - # If we get 0 or more than 1 value for a column, it's not necessarily unique - return 1 unless grep { $_ != 1 } values %seen; - } - - return 0; -} - # _collapse_query # # Recursively collapse the query, accumulating values for each column. @@ -913,7 +930,7 @@ sub slice { $attrs->{offset} = $self->{attrs}{offset} || 0; $attrs->{offset} += $min; $attrs->{rows} = ($max ? ($max - $min + 1) : 1); - return $self->search(undef(), $attrs); + return $self->search(undef, $attrs); #my $slice = (ref $self)->new($self->result_source, $attrs); #return (wantarray ? $slice->all : $slice); } @@ -924,7 +941,7 @@ sub slice { =item Arguments: none -=item Return Value: $result? +=item Return Value: $result | undef =back @@ -1409,12 +1426,12 @@ sub reset { =item Arguments: none -=item Return Value: $object? +=item Return Value: $object | undef =back -Resets the resultset and returns an object for the first result (if the -resultset returns anything). +Resets the resultset and returns an object for the first result (or C +if the resultset is empty). =cut @@ -1503,8 +1520,25 @@ sub _rs_update_delete { =back Sets the specified columns in the resultset to the supplied values in a -single query. Return value will be true if the update succeeded or false -if no records were updated; exact type of success value is storage-dependent. +single query. Note that this will not run any accessor/set_column/update +triggers, nor will it update any row object instances derived from this +resultset (this includes the contents of the L +if any). See L if you need to execute any on-update +triggers or cascades defined either by you or a +L. + +The return value is a pass through of what the underlying +storage backend returned, and may vary. See L for the most +common case. + +=head3 CAVEAT + +Note that L does not process/deflate any of the values passed in. +This is unlike the corresponding L. The user must +ensure manually that any value passed to this method will stringify to +something the RDBMS knows how to deal with. A notable example is the +handling of L objects, for more info see: +L. =cut @@ -1526,8 +1560,9 @@ sub update { =back -Fetches all objects and updates them one at a time. Note that C -will run DBIC cascade triggers, while L will not. +Fetches all objects and updates them one at a time via +L. Note that C will run DBIC defined +triggers, while L will not. =cut @@ -1552,12 +1587,16 @@ sub update_all { =back -Deletes the contents of the resultset from its result source. Note that this -will not run DBIC cascade triggers. See L if you need triggers -to run. See also L. +Deletes the rows matching this resultset in a single query. Note that this +will not run any delete triggers, nor will it alter the +L status of any row object instances +derived from this resultset (this includes the contents of the +L if any). See L if you need to +execute any on-delete triggers or cascades defined either by you or a +L. -Return value will be the number of rows deleted; exact type of return value -is storage-dependent. +The return value is a pass through of what the underlying storage backend +returned, and may vary. See L for the most common case. =cut @@ -1579,8 +1618,9 @@ sub delete { =back -Fetches all objects and deletes them one at a time. Note that C -will run DBIC cascade triggers, while L will not. +Fetches all objects and deletes them one at a time via +L. Note that C will run DBIC defined +triggers, while L will not. =cut @@ -1727,7 +1767,7 @@ sub populate { } ## inherit the data locked in the conditions of the resultset - my ($rs_data) = $self->_merge_cond_with_data({}); + my ($rs_data) = $self->_merge_with_rscond({}); delete @{$rs_data}{@columns}; my @inherit_cols = keys %$rs_data; my @inherit_data = values %$rs_data; @@ -1867,7 +1907,8 @@ my $mk_lazy_count_wizard = sub { # the tie class for 5.8.1 { - package DBIx::Class::__DBIC_LAZY_RS_COUNT__; + package # hide from pause + DBIx::Class::__DBIC_LAZY_RS_COUNT__; use base qw/Tie::Hash/; sub FIRSTKEY { my $dummy = scalar keys %{$_[0]{data}}; each %{$_[0]{data}} } @@ -2034,7 +2075,7 @@ sub new_result { $self->throw_exception( "new_result needs a hash" ) unless (ref $values eq 'HASH'); - my ($merged_cond, $cols_from_relations) = $self->_merge_cond_with_data($values); + my ($merged_cond, $cols_from_relations) = $self->_merge_with_rscond($values); my %new = ( %$merged_cond, @@ -2048,13 +2089,13 @@ sub new_result { return $self->result_class->new(\%new); } -# _merge_cond_with_data +# _merge_with_rscond # # Takes a simple hash of K/V data and returns its copy merged with the # condition already present on the resultset. Additionally returns an # arrayref of value/condition names, which were inferred from related # objects (this is needed for in-memory related objects) -sub _merge_cond_with_data { +sub _merge_with_rscond { my ($self, $data) = @_; my (%new_data, @cols_from_relations); @@ -2080,11 +2121,13 @@ sub _merge_cond_with_data { my %implied = %{$self->_remove_alias($collapsed_cond, $alias)}; while ( my($col, $value) = each %implied ) { - if (ref($value) eq 'HASH' && keys(%$value) && (keys %$value)[0] eq '=') { + my $vref = ref $value; + if ($vref eq 'HASH' && keys(%$value) && (keys %$value)[0] eq '=') { $new_data{$col} = $value->{'='}; - next; } - $new_data{$col} = $value if $self->_is_deterministic_value($value); + elsif( !$vref or $vref eq 'SCALAR' or blessed($value) ) { + $new_data{$col} = $value; + } } } @@ -2096,20 +2139,6 @@ sub _merge_cond_with_data { return (\%new_data, \@cols_from_relations); } -# _is_deterministic_value -# -# Make an effor to strip non-deterministic values from the condition, -# to make sure new_result chokes less - -sub _is_deterministic_value { - my $self = shift; - my $value = shift; - my $ref_type = ref $value; - return 1 if $ref_type eq '' || $ref_type eq 'SCALAR'; - return 1 if blessed $value; - return 0; -} - # _has_resolved_attr # # determines if the resultset defines at least one @@ -2267,17 +2296,18 @@ sub as_query { $cd->cd_to_producer->find_or_new({ producer => $producer }, { key => 'primary }); -Find an existing record from this resultset, based on its primary -key, or a unique constraint. If none exists, instantiate a new result -object and return it. The object will not be saved into your storage -until you call L on it. +Find an existing record from this resultset using L. if none exists, +instantiate a new result object and return it. The object will not be saved +into your storage until you call L on it. + +You most likely want this method when looking for existing rows using a unique +constraint that is not the primary key, or looking for related rows. -You most likely want this method when looking for existing rows using -a unique constraint that is not the primary key, or looking for -related rows. +If you want objects to be saved immediately, use L instead. -If you want objects to be saved immediately, use L -instead. +B: Make sure to read the documentation of L and understand the +significance of the C attribute, as its lack may skew your search, and +subsequently result in spurious new objects. B: Take care when using C with a table having columns with default values that you intend to be automatically @@ -2419,6 +2449,10 @@ constraint. For example: { key => 'cd_artist_title' } ); +B: Make sure to read the documentation of L and understand the +significance of the C attribute, as its lack may skew your search, and +subsequently result in spurious row creation. + B: Because find_or_create() reads from the database and then possibly inserts based on the result, this method is subject to a race condition. Another process could create a record in the table after @@ -2452,16 +2486,15 @@ sub find_or_create { =item Arguments: \%col_values, { key => $unique_constraint }? -=item Return Value: $rowobject +=item Return Value: $row_object =back $resultset->update_or_create({ col => $val, ... }); -First, searches for an existing row matching one of the unique constraints -(including the primary key) on the source of this resultset. If a row is -found, updates it with the other given column values. Otherwise, creates a new -row. +Like L, but if a row is found it is immediately updated via +C<< $found_row->update (\%col_values) >>. + Takes an optional C attribute to search on a specific unique constraint. For example: @@ -2483,14 +2516,9 @@ For example: key => 'primary', }); - -If no C is specified, it searches on all unique constraints defined on the -source, including the primary key. - -If the C is specified as C, it searches only on the primary key. - -See also L and L. For information on how to declare -unique constraints, see L. +B: Make sure to read the documentation of L and understand the +significance of the C attribute, as its lack may skew your search, and +subsequently result in spurious row creation. B: Take care when using C with a table having columns with default values that you intend to be automatically @@ -2498,6 +2526,9 @@ supplied by the database (e.g. an auto_increment primary key column). In normal usage, the value of such columns should NOT be included at all in the call to C, even when set to C. +See also L and L. For information on how to declare +unique constraints, see L. + =cut sub update_or_create { @@ -2526,13 +2557,9 @@ sub update_or_create { $resultset->update_or_new({ col => $val, ... }); -First, searches for an existing row matching one of the unique constraints -(including the primary key) on the source of this resultset. If a row is -found, updates it with the other given column values. Otherwise, instantiate -a new result object and return it. The object will not be saved into your storage -until you call L on it. +Like L but if a row is found it is immediately updated via +C<< $found_row->update (\%col_values) >>. -Takes an optional C attribute to search on a specific unique constraint. For example: # In your application @@ -2553,13 +2580,17 @@ For example: $cd->insert; } +B: Make sure to read the documentation of L and understand the +significance of the C attribute, as its lack may skew your search, and +subsequently result in spurious new objects. + B: Take care when using C with a table having columns with default values that you intend to be automatically supplied by the database (e.g. an auto_increment primary key column). In normal usage, the value of such columns should NOT be included at all in the call to C, even when set to C. -See also L, L and L. +See also L, L and L. =cut @@ -2583,7 +2614,7 @@ sub update_or_new { =item Arguments: none -=item Return Value: \@cache_objects? +=item Return Value: \@cache_objects | undef =back @@ -2631,7 +2662,7 @@ sub set_cache { =item Arguments: none -=item Return Value: [] +=item Return Value: undef =back @@ -2892,7 +2923,7 @@ sub _chain_relationship { # we need to take the prefetch the attrs into account before we # ->_resolve_join as otherwise they get lost - captainL - my $join = $self->_merge_attr( $attrs->{join}, $attrs->{prefetch} ); + my $join = $self->_merge_joinpref_attr( $attrs->{join}, $attrs->{prefetch} ); delete @{$attrs}{qw/join prefetch collapse group_by distinct select as columns +select +as +columns/}; @@ -2910,7 +2941,7 @@ sub _chain_relationship { # are resolved (prefetch is useless - we are wrapping # a subquery anyway). my $rs_copy = $self->search; - $rs_copy->{attrs}{join} = $self->_merge_attr ( + $rs_copy->{attrs}{join} = $self->_merge_joinpref_attr ( $rs_copy->{attrs}{join}, delete $rs_copy->{attrs}{prefetch}, ); @@ -2993,97 +3024,171 @@ sub _resolved_attrs { my $source = $self->result_source; my $alias = $attrs->{alias}; - $attrs->{columns} ||= delete $attrs->{cols} if exists $attrs->{cols}; - my @colbits; +######## +# resolve selectors, this one is quite hairy - # build columns (as long as select isn't set) into a set of as/select hashes - unless ( $attrs->{select} ) { + my $selection_pieces; - my @cols; - if ( ref $attrs->{columns} eq 'ARRAY' ) { - @cols = @{ delete $attrs->{columns}} - } elsif ( defined $attrs->{columns} ) { - @cols = delete $attrs->{columns} - } else { - @cols = $source->columns - } + $attrs->{columns} ||= delete $attrs->{cols} + if exists $attrs->{cols}; - for (@cols) { - if ( ref $_ eq 'HASH' ) { - push @colbits, $_ - } else { - my $key = /^\Q${alias}.\E(.+)$/ - ? "$1" - : "$_"; - my $value = /\./ - ? "$_" - : "${alias}.$_"; - push @colbits, { $key => $value }; - } - } - } + # disassemble columns / +columns + ( + $selection_pieces->{columns}{select}, + $selection_pieces->{columns}{as}, + $selection_pieces->{'+columns'}{select}, + $selection_pieces->{'+columns'}{as}, + ) = map + { + my (@sel, @as); + + for my $colbit (@$_) { - # add the additional columns on - foreach (qw{include_columns +columns}) { - if ( $attrs->{$_} ) { - my @list = ( ref($attrs->{$_}) eq 'ARRAY' ) - ? @{ delete $attrs->{$_} } - : delete $attrs->{$_}; - for (@list) { - if ( ref($_) eq 'HASH' ) { - push @colbits, $_ - } else { - my $key = ( split /\./, $_ )[-1]; - my $value = ( /\./ ? $_ : "$alias.$_" ); - push @colbits, { $key => $value }; + if (ref $colbit eq 'HASH') { + for my $as (keys %$colbit) { + push @sel, $colbit->{$as}; + push @as, $as; + } + } + elsif ($colbit) { + push @sel, $colbit; + push @as, $colbit; } } + + (\@sel, \@as) + } + ( + (ref $attrs->{columns} eq 'ARRAY' ? delete $attrs->{columns} : [ delete $attrs->{columns} ]), + # include_columns is a legacy add-on to +columns + [ map { ref $_ eq 'ARRAY' ? @$_ : ($_ || () ) } delete @{$attrs}{qw/+columns include_columns/} ] ) + ; + + # make copies of select/as and +select/+as + ( + $selection_pieces->{'select/as'}{select}, + $selection_pieces->{'select/as'}{as}, + $selection_pieces->{'+select/+as'}{select}, + $selection_pieces->{'+select/+as'}{as}, + ) = map + { $_ ? [ ref $_ eq 'ARRAY' ? @$_ : $_ ] : [] } + ( delete @{$attrs}{qw/select as +select +as/} ) + ; + + # default to * only when neither no non-plus selectors are available + if ( + ! @{$selection_pieces->{'select/as'}{select}} + and + ! @{$selection_pieces->{'columns'}{select}} + ) { + for ($source->columns) { + push @{$selection_pieces->{'select/as'}{select}}, $_; + push @{$selection_pieces->{'select/as'}{as}}, $_; } } - # start with initial select items - if ( $attrs->{select} ) { - $attrs->{select} = - ( ref $attrs->{select} eq 'ARRAY' ) - ? [ @{ $attrs->{select} } ] - : [ $attrs->{select} ]; + # final composition order (important) + my @sel_pairs = grep { + $selection_pieces->{$_} + && + ( + ( $selection_pieces->{$_}{select} && @{$selection_pieces->{$_}{select}} ) + || + ( $selection_pieces->{$_}{as} && @{$selection_pieces->{$_}{as}} ) + ) + } qw|columns select/as +columns +select/+as|; - if ( $attrs->{as} ) { - $attrs->{as} = - ( - ref $attrs->{as} eq 'ARRAY' - ? [ @{ $attrs->{as} } ] - : [ $attrs->{as} ] - ) - } else { - $attrs->{as} = [ map { - m/^\Q${alias}.\E(.+)$/ - ? $1 - : $_ - } @{ $attrs->{select} } - ] + # fill in missing as bits for each pair + # if it's the last pair we can let things slide ( bare +select is sadly popular) + my $out_of_sync; + + for my $i (0 .. $#sel_pairs) { + + my $pairname = $sel_pairs[$i]; + + my ($sel, $as) = @{$selection_pieces->{$pairname}}{qw/select as/}; + + $self->throw_exception( + "Unable to assemble final selection list: $pairname specified in addition to unbalanced $sel_pairs[$i-1]" + ) if ($out_of_sync); + + if (@$sel == @$as) { + next; + } + elsif (@$sel < @$as) { + $self->throw_exception( + "More 'as' elements than 'select' elements for $pairname, unable to continue" + ); + } + else { + # try to deduce the 'as' part, will work only if all the selectors are "plain", or contain an explicit -as + # if we can not deduce something - stop right there and leave the rest of the selector un-as'ed + # if there is an extra selection pair coming after that - it will die due to out_of_sync being set + for my $j ($#$as+1 .. $#$sel) { + if (my $ref = ref $sel->[$j]) { + if ($ref eq 'HASH' and exists $sel->[$j]{-as}) { + push @$as, $sel->[$j]{-as}; + } + else { + $out_of_sync++; + last; + } + } + else { + push @$as, $sel->[$j]; + } + } } } - else { - # otherwise we intialise select & as to empty - $attrs->{select} = []; - $attrs->{as} = []; + # assume all unqualified selectors to apply to the current alias (legacy stuff) + # disqualify all $alias.col as-bits (collapser mandated) + for (values %$selection_pieces) { + $_->{select} = [ map { (ref $_ or $_ =~ /\./) ? $_ : "$alias.$_" } @{$_->{select}} ]; + $_->{as} = [ map { $_ =~ /^\Q$alias.\E(.+)$/ ? $1 : $_ } @{$_->{as}} ]; + } + + # FIXME !!! + # Blatant bugwardness encoded into multiple tests. + # While columns behaves sensibly, +columns is expected + # to dump *any* foreign columns into the main object + # /me vomits + $selection_pieces->{'+columns'}{as} = [ map + { (split /\./, $_)[-1] } + @{$selection_pieces->{'+columns'}{as}} + ]; + + # merge everything + for (@sel_pairs) { + $attrs->{select} = $self->_merge_attr ($attrs->{select}, $selection_pieces->{$_}{select}); + $attrs->{as} = $self->_merge_attr ($attrs->{as}, $selection_pieces->{$_}{as}); + } + + # de-duplicate the result (remove *identical* select/as pairs) + # and also die on duplicate {as} pointing to different {select}s + # not using a c-style for as the condition is prone to shrinkage + my $seen; + my $i = 0; + while ($i <= $#{$attrs->{as}} ) { + my ($sel, $as) = map { $attrs->{$_}[$i] } (qw/select as/); + + if ($seen->{"$sel \x00\x00 $as"}++) { + splice @$_, $i, 1 + for @{$attrs}{qw/select as/}; + } + elsif ($seen->{$as}++) { + $self->throw_exception( + "inflate_result() alias '$as' specified twice with different SQL-side {select}-ors" + ); + } + else { + $i++; + } } - # now add colbits to select/as - push @{ $attrs->{select} }, map values %{$_}, @colbits; - push @{ $attrs->{as} }, map keys %{$_}, @colbits; +## selector resolution done +######## - if ( my $adds = delete $attrs->{'+select'} ) { - $adds = [$adds] unless ref $adds eq 'ARRAY'; - push @{ $attrs->{select} }, - map { /\./ || ref $_ ? $_ : "$alias.$_" } @$adds; - } - if ( my $adds = delete $attrs->{'+as'} ) { - $adds = [$adds] unless ref $adds eq 'ARRAY'; - push @{ $attrs->{as} }, @$adds; - } $attrs->{from} ||= [{ -source_handle => $source->handle, @@ -3099,7 +3204,7 @@ sub _resolved_attrs { my $join = delete $attrs->{join} || {}; if ( defined $attrs->{prefetch} ) { - $join = $self->_merge_attr( $join, $attrs->{prefetch} ); + $join = $self->_merge_joinpref_attr( $join, $attrs->{prefetch} ); } $attrs->{from} = # have to copy here to avoid corrupting the original @@ -3136,40 +3241,15 @@ sub _resolved_attrs { carp ("Useless use of distinct on a grouped resultset ('distinct' is ignored when a 'group_by' is present)"); } else { - my $storage = $self->result_source->schema->storage; - my $rs_column_list = $storage->_resolve_column_info ($attrs->{from}); - - my $group_spec = $attrs->{group_by} = []; - my %group_index; - - for (@{$attrs->{select}}) { - if (! ref($_) or ref ($_) ne 'HASH' ) { - push @$group_spec, $_; - $group_index{$_}++; - if ($rs_column_list->{$_} and $_ !~ /\./ ) { - # add a fully qualified version as well - $group_index{"$rs_column_list->{$_}{-source_alias}.$_"}++; - } - } - } - # add any order_by parts that are not already present in the group_by - # we need to be careful not to add any named functions/aggregates - # i.e. select => [ ... { count => 'foo', -as 'foocount' } ... ] - for my $chunk ($storage->_extract_order_columns($attrs->{order_by})) { - - # only consider real columns (for functions the user got to do an explicit group_by) - my $colinfo = $rs_column_list->{$chunk} - or next; - - $chunk = "$colinfo->{-source_alias}.$chunk" if $chunk !~ /\./; - push @$group_spec, $chunk unless $group_index{$chunk}++; - } + $attrs->{group_by} = $source->storage->_group_over_selection ( + @{$attrs}{qw/from select order_by/} + ); } } $attrs->{collapse} ||= {}; if ( my $prefetch = delete $attrs->{prefetch} ) { - $prefetch = $self->_merge_attr( {}, $prefetch ); + $prefetch = $self->_merge_joinpref_attr( {}, $prefetch ); my $prefetch_ordering = []; @@ -3292,7 +3372,7 @@ sub _calculate_score { } } -sub _merge_attr { +sub _merge_joinpref_attr { my ($self, $orig, $import) = @_; return $import unless defined($orig); @@ -3324,7 +3404,7 @@ sub _merge_attr { $orig->[$best_candidate->{position}] = $import_element; } elsif (ref $import_element eq 'HASH') { my ($key) = keys %{$orig_best}; - $orig->[$best_candidate->{position}] = { $key => $self->_merge_attr($orig_best->{$key}, $import_element->{$key}) }; + $orig->[$best_candidate->{position}] = { $key => $self->_merge_joinpref_attr($orig_best->{$key}, $import_element->{$key}) }; } } $seen_keys->{$import_key} = 1; # don't merge the same key twice @@ -3333,6 +3413,89 @@ sub _merge_attr { return $orig; } +{ + my $hm; + + sub _merge_attr { + $hm ||= do { + my $hm = Hash::Merge->new; + + $hm->specify_behavior({ + SCALAR => { + SCALAR => sub { + my ($defl, $defr) = map { defined $_ } (@_[0,1]); + + if ($defl xor $defr) { + return $defl ? $_[0] : $_[1]; + } + elsif (! $defl) { + return (); + } + elsif (__HM_DEDUP and $_[0] eq $_[1]) { + return $_[0]; + } + else { + return [$_[0], $_[1]]; + } + }, + ARRAY => sub { + return $_[1] if !defined $_[0]; + return $_[1] if __HM_DEDUP and List::Util::first { $_ eq $_[0] } @{$_[1]}; + return [$_[0], @{$_[1]}] + }, + HASH => sub { + return $_[1] if !defined $_[0]; + return $_[0] if !keys %{$_[1]}; + return [$_[0], $_[1]] + }, + }, + ARRAY => { + SCALAR => sub { + return $_[0] if !defined $_[1]; + return $_[0] if __HM_DEDUP and List::Util::first { $_ eq $_[1] } @{$_[0]}; + return [@{$_[0]}, $_[1]] + }, + ARRAY => sub { + my @ret = @{$_[0]} or return $_[1]; + return [ @ret, @{$_[1]} ] unless __HM_DEDUP; + my %idx = map { $_ => 1 } @ret; + push @ret, grep { ! defined $idx{$_} } (@{$_[1]}); + \@ret; + }, + HASH => sub { + return [ $_[1] ] if ! @{$_[0]}; + return $_[0] if !keys %{$_[1]}; + return $_[0] if __HM_DEDUP and List::Util::first { $_ eq $_[1] } @{$_[0]}; + return [ @{$_[0]}, $_[1] ]; + }, + }, + HASH => { + SCALAR => sub { + return $_[0] if !defined $_[1]; + return $_[1] if !keys %{$_[0]}; + return [$_[0], $_[1]] + }, + ARRAY => sub { + return $_[0] if !@{$_[1]}; + return $_[1] if !keys %{$_[0]}; + return $_[1] if __HM_DEDUP and List::Util::first { $_ eq $_[0] } @{$_[1]}; + return [ $_[0], @{$_[1]} ]; + }, + HASH => sub { + return $_[0] if !keys %{$_[1]}; + return $_[1] if !keys %{$_[0]}; + return $_[0] if $_[0] eq $_[1]; + return [ $_[0], $_[1] ]; + }, + } + } => 'DBIC_RS_ATTR_MERGER'); + $hm; + }; + + return $hm->merge ($_[1], $_[2]); + } +} + sub result_source { my $self = shift; @@ -3343,6 +3506,27 @@ sub result_source { } } + +sub STORABLE_freeze { + my ($self, $cloning) = @_; + my $to_serialize = { %$self }; + + # A cursor in progress can't be serialized (and would make little sense anyway) + delete $to_serialize->{cursor}; + + return nfreeze($to_serialize); +} + +# need this hook for symmetry +sub STORABLE_thaw { + my ($self, $cloning, $serialized) = @_; + + %$self = %{ thaw($serialized) }; + + return $self; +} + + =head2 throw_exception See L for details.