X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FResultSet.pm;h=60da1ca5a6c2a597b7123941d2206a984e5698d8;hb=763026c15bf2047020c261f430bd782bb5180850;hp=13e11744ffd256e0d5f95847ac1c233dbf810e18;hpb=639cf8f9d212c3e950ae5bae680133762c1df0aa;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/ResultSet.pm b/lib/DBIx/Class/ResultSet.pm index 13e1174..60da1ca 100644 --- a/lib/DBIx/Class/ResultSet.pm +++ b/lib/DBIx/Class/ResultSet.pm @@ -7,6 +7,7 @@ use overload 'bool' => "_bool", fallback => 1; use Carp::Clan qw/^DBIx::Class/; +use DBIx::Class::Exception; use Data::Page; use Storable; use DBIx::Class::ResultSetColumn; @@ -46,27 +47,13 @@ A new ResultSet is returned from calling L on an existing ResultSet. The new one will contain all the conditions of the original, plus any new conditions added in the C call. -A ResultSet is also an iterator. L is used to return all the -Ls the ResultSet represents. +A ResultSet also incorporates an implicit iterator. L and L +can be used to walk through all the Ls the ResultSet +represents. The query that the ResultSet represents is B executed against the database when these methods are called: - -=over - -=item L - -=item L - -=item L - -=item L - -=item L - -=item L - -=back +L L L L L L =head1 EXAMPLES @@ -527,6 +514,14 @@ sub find { my $unique_query = $self->_build_unique_query($input_query, \@unique_cols); $query = $self->_add_alias($unique_query, $alias); } + elsif ($self->{attrs}{accessor} and $self->{attrs}{accessor} eq 'single') { + # This means that we got here after a merger of relationship conditions + # in ::Relationship::Base::search_related (the row method), and furthermore + # the relationship is of the 'single' type. This means that the condition + # provided by the relationship (already attached to $self) is sufficient, + # as there can be only one row in the databse that would satisfy the + # relationship + } else { my @unique_queries = $self->_unique_queries($input_query, $attrs); $query = @unique_queries @@ -535,27 +530,14 @@ sub find { } # Run the query - if (keys %$attrs) { - my $rs = $self->search($query, $attrs); - if (keys %{$rs->_resolved_attrs->{collapse}}) { - my $row = $rs->next; - carp "Query returned more than one row" if $rs->next; - return $row; - } - else { - return $rs->single; - } + my $rs = $self->search ($query, $attrs); + if (keys %{$rs->_resolved_attrs->{collapse}}) { + my $row = $rs->next; + carp "Query returned more than one row" if $rs->next; + return $row; } else { - if (keys %{$self->_resolved_attrs->{collapse}}) { - my $rs = $self->search($query); - my $row = $rs->next; - carp "Query returned more than one row" if $rs->next; - return $row; - } - else { - return $self->single($query); - } + return $rs->single; } } @@ -589,12 +571,16 @@ sub _unique_queries { my $where = $self->_collapse_cond($self->{attrs}{where} || {}); my $num_where = scalar keys %$where; - my @unique_queries; + my (@unique_queries, %seen_column_combinations); foreach my $name (@constraint_names) { - my @unique_cols = $self->result_source->unique_constraint_columns($name); - my $unique_query = $self->_build_unique_query($query, \@unique_cols); + my @constraint_cols = $self->result_source->unique_constraint_columns($name); + + my $constraint_sig = join "\x00", sort @constraint_cols; + next if $seen_column_combinations{$constraint_sig}++; - my $num_cols = scalar @unique_cols; + my $unique_query = $self->_build_unique_query($query, \@constraint_cols); + + my $num_cols = scalar @constraint_cols; my $num_query = scalar keys %$unique_query; my $total = $num_query + $num_where; @@ -674,7 +660,8 @@ L for more information. sub cursor { my ($self) = @_; - my $attrs = { %{$self->_resolved_attrs} }; + my $attrs = $self->_resolved_attrs_copy; + return $self->{cursor} ||= $self->result_source->storage->select($attrs->{from}, $attrs->{select}, $attrs->{where},$attrs); @@ -711,10 +698,14 @@ a warning: Query returned more than one row -In this case, you should be using L or L instead, or if you really +In this case, you should be using L or L instead, or if you really know what you are doing, use the L attribute to explicitly limit the size of the resultset. +This method will also throw an exception if it is called on a resultset prefetching +has_many, as such a prefetch implies fetching multiple rows from the database in +order to assemble the resulting object. + =back =cut @@ -725,7 +716,14 @@ sub single { $self->throw_exception('single() only takes search conditions, no attributes. You want ->search( $cond, $attrs )->single()'); } - my $attrs = { %{$self->_resolved_attrs} }; + my $attrs = $self->_resolved_attrs_copy; + + if (keys %{$attrs->{collapse}}) { + $self->throw_exception( + 'single() can not be used on resultsets prefetching has_many. Use find( \%cond ) or next() instead' + ); + } + if ($where) { if (defined $attrs->{where}) { $attrs->{where} = { @@ -752,6 +750,7 @@ sub single { return (@data ? ($self->_construct_object(@data))[0] : undef); } + # _is_unique_query # # Try to determine if the specified query is guaranteed to be unique, based on @@ -870,10 +869,10 @@ instead. An example conversion is: sub search_like { my $class = shift; - carp join ("\n", - 'search_like() is deprecated and will be removed in 0.09.', - 'Instead use ->search({ x => { -like => "y%" } })', - '(note the outer pair of {}s - they are important!)' + carp ( + 'search_like() is deprecated and will be removed in DBIC version 0.09.' + .' Instead use ->search({ x => { -like => "y%" } })' + .' (note the outer pair of {}s - they are important!)' ); my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {}); my $query = ref $_[0] eq 'HASH' ? { %{shift()} }: {@_}; @@ -963,7 +962,9 @@ sub next { sub _construct_object { my ($self, @row) = @_; - my $info = $self->_collapse_result($self->{_attrs}{as}, \@row); + + my $info = $self->_collapse_result($self->{_attrs}{as}, \@row) + or return (); my @new = $self->result_class->inflate_result($self->result_source, @$info); @new = $self->{_attrs}{record_filter}->(@new) if exists $self->{_attrs}{record_filter}; @@ -973,6 +974,19 @@ sub _construct_object { sub _collapse_result { my ($self, $as_proto, $row) = @_; + # if the first row that ever came in is totally empty - this means we got + # hit by a smooth^Wempty left-joined resultset. Just noop in that case + # instead of producing a {} + # + my $has_def; + for (@$row) { + if (defined $_) { + $has_def++; + last; + } + } + return undef unless $has_def; + my @copy = @$row; # 'foo' => [ undef, 'foo' ] @@ -1145,8 +1159,8 @@ sub result_class { =back Performs an SQL C with the same query as the resultset was built -with to find the number of elements. If passed arguments, does a search -on the resultset and counts the results of that. +with to find the number of elements. Passing arguments is equivalent to +C<< $rs->search ($cond, \%attrs)->count >> =cut @@ -1155,75 +1169,209 @@ sub count { return $self->search(@_)->count if @_ and defined $_[0]; return scalar @{ $self->get_cache } if $self->get_cache; - my @subq_attrs = qw/prefetch collapse distinct group_by having having_bind/; - my $attrs = $self->_resolved_attrs; + my $attrs = $self->_resolved_attrs_copy; - # if we are not paged - we are simply asking for a limit - if (not $attrs->{page} and not $attrs->{software_limit}) { - push @subq_attrs, qw/rows offset/; + # this is a little optimization - it is faster to do the limit + # adjustments in software, instead of a subquery + my $rows = delete $attrs->{rows}; + my $offset = delete $attrs->{offset}; + + my $crs; + if ($self->_has_resolved_attr (qw/collapse group_by/)) { + $crs = $self->_count_subq_rs ($attrs); + } + else { + $crs = $self->_count_rs ($attrs); } + my $count = $crs->next; - return $self->_has_attr (@subq_attrs) - ? $self->_count_subq - : $self->_count_simple + $count -= $offset if $offset; + $count = $rows if $rows and $rows < $count; + $count = 0 if ($count < 0); + + return $count; } -sub _count_subq { +=head2 count_rs + +=over 4 + +=item Arguments: $cond, \%attrs?? + +=item Return Value: $count_rs + +=back + +Same as L but returns a L object. +This can be very handy for subqueries: + + ->search( { amount => $some_rs->count_rs->as_query } ) + +As with regular resultsets the SQL query will be executed only after +the resultset is accessed via L or L. That would return +the same single value obtainable via L. + +=cut + +sub count_rs { my $self = shift; + return $self->search(@_)->count_rs if @_; - my $attrs = { %{$self->_resolved_attrs} }; + # this may look like a lack of abstraction (count() does about the same) + # but in fact an _rs *must* use a subquery for the limits, as the + # software based limiting can not be ported if this $rs is to be used + # in a subquery itself (i.e. ->as_query) + if ($self->_has_resolved_attr (qw/collapse group_by offset rows/)) { + return $self->_count_subq_rs; + } + else { + return $self->_count_rs; + } +} + +# +# returns a ResultSetColumn object tied to the count query +# +sub _count_rs { + my ($self, $attrs) = @_; + + my $rsrc = $self->result_source; + $attrs ||= $self->_resolved_attrs; + + my $tmp_attrs = { %$attrs }; + + # take off any limits, record_filter is cdbi, and no point of ordering a count + delete $tmp_attrs->{$_} for (qw/select as rows offset order_by record_filter/); + + # overwrite the selector (supplied by the storage) + $tmp_attrs->{select} = $rsrc->storage->_count_select ($rsrc, $tmp_attrs); + $tmp_attrs->{as} = 'count'; + + # read the comment on top of the actual function to see what this does + $tmp_attrs->{from} = $self->_switch_to_inner_join_if_needed ( + $tmp_attrs->{from}, $tmp_attrs->{alias} + ); + + my $tmp_rs = $rsrc->resultset_class->new($rsrc, $tmp_attrs)->get_column ('count'); + + return $tmp_rs; +} + +# +# same as above but uses a subquery +# +sub _count_subq_rs { + my ($self, $attrs) = @_; + + my $rsrc = $self->result_source; + $attrs ||= $self->_resolved_attrs_copy; - # copy for the subquery, we need to do some adjustments to it too my $sub_attrs = { %$attrs }; - # these can not go in the subquery either - delete $sub_attrs->{$_} for qw/prefetch select +select as +as columns +columns/; + # extra selectors do not go in the subquery and there is no point of ordering it + delete $sub_attrs->{$_} for qw/collapse select _prefetch_select as order_by/; + + # if we prefetch, we group_by primary keys only as this is what we would get out + # of the rs via ->next/->all. We DO WANT to clobber old group_by regardless + if ( keys %{$attrs->{collapse}} ) { + $sub_attrs->{group_by} = [ map { "$attrs->{alias}.$_" } ($rsrc->primary_columns) ] + } + + $sub_attrs->{select} = $rsrc->storage->_subq_count_select ($rsrc, $sub_attrs); + + # read the comment on top of the actual function to see what this does + $sub_attrs->{from} = $self->_switch_to_inner_join_if_needed ( + $sub_attrs->{from}, $sub_attrs->{alias} + ); + + # this is so that ordering can be thrown away in things like Top limit + $sub_attrs->{-for_count_only} = 1; - # force a group_by and the same set of columns (most databases require this) - $sub_attrs->{columns} = $sub_attrs->{group_by} ||= [ map { "$attrs->{alias}.$_" } ($self->result_source->primary_columns) ]; + my $sub_rs = $rsrc->resultset_class->new ($rsrc, $sub_attrs); $attrs->{from} = [{ - count_subq => (ref $self)->new ($self->result_source, $sub_attrs )->as_query + -alias => 'count_subq', + -source_handle => $rsrc->handle, + count_subq => $sub_rs->as_query, }]; # the subquery replaces this - delete $attrs->{$_} for qw/where bind prefetch collapse group_by having/; + delete $attrs->{$_} for qw/where bind collapse group_by having having_bind rows offset/; - return $self->__count ($attrs); + return $self->_count_rs ($attrs); } -sub _count_simple { - my $self = shift; - - my $count = $self->__count; - return 0 unless $count; - - # need to take offset from resolved attrs - $count -= $self->{_attrs}{offset} if $self->{_attrs}{offset}; - $count = $self->{attrs}{rows} if - $self->{attrs}{rows} and $self->{attrs}{rows} < $count; - $count = 0 if ($count < 0); - return $count; -} +# The DBIC relationship chaining implementation is pretty simple - every +# new related_relationship is pushed onto the {from} stack, and the {select} +# window simply slides further in. This means that when we count somewhere +# in the middle, we got to make sure that everything in the join chain is an +# actual inner join, otherwise the count will come back with unpredictable +# results (a resultset may be generated with _some_ rows regardless of if +# the relation which the $rs currently selects has rows or not). E.g. +# $artist_rs->cds->count - normally generates: +# SELECT COUNT( * ) FROM artist me LEFT JOIN cd cds ON cds.artist = me.artistid +# which actually returns the number of artists * (number of cds || 1) +# +# So what we do here is crawl {from}, determine if the current alias is at +# the top of the stack, and if not - make sure the chain is inner-joined down +# to the root. +# +sub _switch_to_inner_join_if_needed { + my ($self, $from, $alias) = @_; + + # subqueries and other oddness is naturally not supported + return $from if ( + ref $from ne 'ARRAY' + || + @$from <= 1 + || + ref $from->[0] ne 'HASH' + || + ! $from->[0]{-alias} + || + $from->[0]{-alias} eq $alias + ); -sub __count { - my ($self, $attrs) = @_; + my $switch_branch; + JOINSCAN: + for my $j (@{$from}[1 .. $#$from]) { + if ($j->[0]{-alias} eq $alias) { + $switch_branch = $j->[0]{-join_path}; + last JOINSCAN; + } + } - $attrs ||= { %{$self->_resolved_attrs} }; + # something else went wrong + return $from unless $switch_branch; - # take off any column specs, any pagers, record_filter is cdbi, and no point of ordering a count - delete $attrs->{$_} for (qw/columns +columns select +select as +as rows offset page pager order_by record_filter/); + # So it looks like we will have to switch some stuff around. + # local() is useless here as we will be leaving the scope + # anyway, and deep cloning is just too fucking expensive + # So replace the inner hashref manually + my @new_from = ($from->[0]); + my $sw_idx = { map { $_ => 1 } @$switch_branch }; - $attrs->{select} = { count => '*' }; - $attrs->{as} = [qw/count/]; + for my $j (@{$from}[1 .. $#$from]) { + my $jalias = $j->[0]{-alias}; - my $tmp_rs = (ref $self)->new($self->result_source, $attrs); - my ($count) = $tmp_rs->cursor->next; + if ($sw_idx->{$jalias}) { + my %attrs = %{$j->[0]}; + delete $attrs{-join_type}; + push @new_from, [ + \%attrs, + @{$j}[ 1 .. $#$j ], + ]; + } + else { + push @new_from, $j; + } + } - return $count; + return \@new_from; } + sub _bool { return 1; } @@ -1270,13 +1418,12 @@ sub all { my @obj; - # TODO: don't call resolve here if (keys %{$self->_resolved_attrs->{collapse}}) { -# if ($self->{attrs}{prefetch}) { - # Using $self->cursor->all is really just an optimisation. - # If we're collapsing has_many prefetches it probably makes - # very little difference, and this is cleaner than hacking - # _construct_object to survive the approach + # Using $self->cursor->all is really just an optimisation. + # If we're collapsing has_many prefetches it probably makes + # very little difference, and this is cleaner than hacking + # _construct_object to survive the approach + $self->cursor->reset; my @row = $self->cursor->next; while (@row) { push(@obj, $self->_construct_object(@row)); @@ -1289,6 +1436,7 @@ sub all { } $self->set_cache(\@obj) if $self->{attrs}{cache}; + return @obj; } @@ -1303,6 +1451,8 @@ sub all { =back Resets the resultset's cursor, so you can iterate through the elements again. +Implicitly resets the storage cursor, so a subsequent L will trigger +another query. =cut @@ -1345,15 +1495,15 @@ sub _rs_update_delete { my $rsrc = $self->result_source; - my $needs_group_by_subq = $self->_has_attr (qw/prefetch distinct join seen_join group_by/); - my $needs_subq = $self->_has_attr (qw/row offset page/); + my $needs_group_by_subq = $self->_has_resolved_attr (qw/collapse group_by -join/); + my $needs_subq = $self->_has_resolved_attr (qw/row offset/); if ($needs_group_by_subq or $needs_subq) { # make a new $rs selecting only the PKs (that's all we really need) - my $attrs = $self->_resolved_attrs; + my $attrs = $self->_resolved_attrs_copy; - delete $attrs->{$_} for qw/prefetch select +select as +as columns +columns/; + delete $attrs->{$_} for qw/collapse select as/; $attrs->{columns} = [ map { "$attrs->{alias}.$_" } ($self->result_source->primary_columns) ]; if ($needs_group_by_subq) { @@ -1364,7 +1514,8 @@ sub _rs_update_delete { if (my $g = $attrs->{group_by}) { my @current_group_by = map { $_ =~ /\./ ? $_ : "$attrs->{alias}.$_" } - (ref $g eq 'ARRAY' ? @$g : $g ); + @$g + ; if ( join ("\x00", sort @current_group_by) @@ -1387,7 +1538,7 @@ sub _rs_update_delete { my $subrs = (ref $self)->new($rsrc, $attrs); - return $self->result_source->storage->subq_update_delete($subrs, $op, $values); + return $self->result_source->storage->_subq_update_delete($subrs, $op, $values); } else { return $rsrc->storage->$op( @@ -1512,7 +1663,7 @@ sub update_all { =item Arguments: none -=item Return Value: 1 +=item Return Value: $storage_rv =back @@ -1520,11 +1671,8 @@ Deletes the contents of the resultset from its result source. Note that this will not run DBIC cascade triggers. See L if you need triggers to run. See also L. -delete may not generate correct SQL for a query with joins or a resultset -chained from a related resultset. In this case it will generate a warning:- - -In these cases you may find that delete_all is more appropriate, or you -need to respecify your query in a way that can be expressed without a join. +Return value will be the amount of rows deleted; exact type of return value +is storage-dependent. =cut @@ -1576,8 +1724,9 @@ In void context, C in L is used to insert the data, as this is a faster method. Otherwise, each set of data is inserted into the database using -L, and a arrayref of the resulting row -objects is returned. +L, and the resulting objects are +accumulated into an array. The array itself, or an array reference +is returned depending on scalar or list context. Example: Assuming an Artist Class that has many CDs Classes relating: @@ -1643,7 +1792,7 @@ sub populate { foreach my $item (@$data) { push(@created, $self->create($item)); } - return @created; + return wantarray ? @created : \@created; } else { my ($first, @rest) = @$data; @@ -1653,13 +1802,19 @@ sub populate { ## do the belongs_to relationships foreach my $index (0..$#$data) { - if( grep { !defined $data->[$index]->{$_} } @pks ) { - my @ret = $self->populate($data); - return; + + # delegate to create() for any dataset without primary keys with specified relationships + if (grep { !defined $data->[$index]->{$_} } @pks ) { + for my $r (@rels) { + if (grep { ref $data->[$index]{$r} eq $_ } qw/HASH ARRAY/) { # a related set must be a HASH or AoH + my @ret = $self->populate($data); + return; + } + } } foreach my $rel (@rels) { - next unless $data->[$index]->{$rel} && ref $data->[$index]->{$rel} eq "HASH"; + next unless ref $data->[$index]->{$rel} eq "HASH"; my $result = $self->related_resultset($rel)->create($data->[$index]->{$rel}); my ($reverse) = keys %{$self->result_source->reverse_relationship_info($rel)}; my $related = $result->result_source->_resolve_condition( @@ -1874,22 +2029,31 @@ sub _is_deterministic_value { return 0; } -# _has_attr +# _has_resolved_attr # # determines if the resultset defines at least one # of the attributes supplied # # used to determine if a subquery is neccessary +# +# supports some virtual attributes: +# -join +# This will scan for any joins being present on the resultset. +# It is not a mere key-search but a deep inspection of {from} +# -sub _has_attr { +sub _has_resolved_attr { my ($self, @attr_names) = @_; my $attrs = $self->_resolved_attrs; - my $join_check_req; + my %extra_checks; for my $n (@attr_names) { - ++$join_check_req if $n =~ /join/; + if (grep { $n eq $_ } (qw/-join/) ) { + $extra_checks{$n}++; + next; + } my $attr = $attrs->{$n}; @@ -1906,9 +2070,9 @@ sub _has_attr { } } - # a join can be expressed as a multi-level from + # a resolved join is expressed as a multi-level from return 1 if ( - $join_check_req + $extra_checks{-join} and ref $attrs->{from} eq 'ARRAY' and @@ -1991,7 +2155,22 @@ B: This feature is still experimental. =cut -sub as_query { return shift->cursor->as_query(@_) } +sub as_query { + my $self = shift; + + my $attrs = $self->_resolved_attrs_copy; + + # For future use: + # + # in list ctx: + # my ($sql, \@bind, \%dbi_bind_attrs) = _select_args_to_query (...) + # $sql also has no wrapping parenthesis in list ctx + # + my $sqlbind = $self->result_source->storage + ->_select_args_to_query ($attrs->{from}, $attrs->{select}, $attrs->{where}, $attrs); + + return $sqlbind; +} =head2 find_or_new @@ -2018,13 +2197,14 @@ You most likely want this method when looking for existing rows using a unique constraint that is not the primary key, or looking for related rows. -If you want objects to be saved immediately, use L instead. +If you want objects to be saved immediately, use L +instead. -B: C is probably not what you want when creating a -new row in a table that uses primary keys supplied by the -database. Passing in a primary key column with a value of I -will cause L to attempt to search for a row with a value of -I. +B: Take care when using C with a table having +columns with default values that you intend to be automatically +supplied by the database (e.g. an auto_increment primary key column). +In normal usage, the value of such columns should NOT be included at +all in the call to C, even when set to C. =cut @@ -2032,8 +2212,10 @@ sub find_or_new { my $self = shift; my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {}); my $hash = ref $_[0] eq 'HASH' ? shift : {@_}; - my $exists = $self->find($hash, $attrs); - return defined $exists ? $exists : $self->new_result($hash); + if (keys %$hash and my $row = $self->find($hash, $attrs) ) { + return $row; + } + return $self->new_result($hash); } =head2 create @@ -2057,12 +2239,15 @@ store. If the appropriate relationships are set up, foreign key fields can also be passed an object representing the foreign row, and the value will be set to its primary key. -To create related objects, pass a hashref for the value if the related -item is a foreign key relationship (L), -and use the name of the relationship as the key. (NOT the name of the field, -necessarily). For C and C relationships, pass an arrayref -of hashrefs containing the data for each of the rows to create in the foreign -tables, again using the relationship name as the key. +To create related objects, pass a hashref of related-object column values +B. If the relationship is of type C +(L) - pass an arrayref of hashrefs. +The process will correctly identify columns holding foreign keys, and will +transparrently populate them from the keys of the corresponding relation. +This can be applied recursively, and will work correctly for a structure +with an arbitrary depth and width, as long as the relationships actually +exists and the correct column data has been supplied. + Instead of hashrefs of plain related data (key/value pairs), you may also pass new or inserted objects. New objects (not inserted yet, see @@ -2099,6 +2284,19 @@ Cresultset. Note Hashref. } }); +=over + +=item WARNING + +When subclassing ResultSet never attempt to override this method. Since +it is a simple shortcut for C<< $self->new_result($attrs)->insert >>, a +lot of the internals simply never call it, so your override will be +bypassed more often than not. Override either L +or L depending on how early in the +L process you need to intervene. + +=back + =cut sub create { @@ -2119,7 +2317,7 @@ sub create { =back $cd->cd_to_producer->find_or_create({ producer => $producer }, - { key => 'primary }); + { key => 'primary' }); Tries to find a record based on its primary key or unique constraints; if none is found, creates one and returns that instead. @@ -2148,11 +2346,11 @@ condition. Another process could create a record in the table after the find has completed and before the create has started. To avoid this problem, use find_or_create() inside a transaction. -B: C is probably not what you want when creating -a new row in a table that uses primary keys supplied by the -database. Passing in a primary key column with a value of I -will cause L to attempt to search for a row with a value of -I. +B: Take care when using C with a table having +columns with default values that you intend to be automatically +supplied by the database (e.g. an auto_increment primary key column). +In normal usage, the value of such columns should NOT be included at +all in the call to C, even when set to C. See also L and L. For information on how to declare unique constraints, see L. @@ -2163,8 +2361,10 @@ sub find_or_create { my $self = shift; my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {}); my $hash = ref $_[0] eq 'HASH' ? shift : {@_}; - my $exists = $self->find($hash, $attrs); - return defined $exists ? $exists : $self->create($hash); + if (keys %$hash and my $row = $self->find($hash, $attrs) ) { + return $row; + } + return $self->create($hash); } =head2 update_or_create @@ -2213,11 +2413,11 @@ If the C is specified as C, it searches only on the primary key. See also L and L. For information on how to declare unique constraints, see L. -B: C is probably not what you want when -looking for a row in a table that uses primary keys supplied by the -database, unless you actually have a key value. Passing in a primary -key column with a value of I will cause L to attempt to -search for a row with a value of I. +B: Take care when using C with a table having +columns with default values that you intend to be automatically +supplied by the database (e.g. an auto_increment primary key column). +In normal usage, the value of such columns should NOT be included at +all in the call to C, even when set to C. =cut @@ -2274,7 +2474,13 @@ For example: $cd->insert; } -See also L, L and L. +B: Take care when using C with a table having +columns with default values that you intend to be automatically +supplied by the database (e.g. an auto_increment primary key column). +In normal usage, the value of such columns should NOT be included at +all in the call to C, even when set to C. + +See also L, L and L. =cut @@ -2379,14 +2585,14 @@ sub related_resultset { $self->{related_resultsets} ||= {}; return $self->{related_resultsets}{$rel} ||= do { - my $rel_obj = $self->result_source->relationship_info($rel); + my $rel_info = $self->result_source->relationship_info($rel); $self->throw_exception( "search_related: result source '" . $self->result_source->source_name . "' has no such relationship $rel") - unless $rel_obj; + unless $rel_info; - my ($from,$seen) = $self->_resolve_from($rel); + my ($from,$seen) = $self->_chain_relationship($rel); my $join_count = $seen->{$rel}; my $alias = ($join_count > 1 ? join('_', $rel, $join_count) : $rel); @@ -2484,31 +2690,93 @@ sub current_source_alias { # in order to properly resolve prefetch aliases (any alias # with a relation_chain_depth less than the depth of the # current prefetch is not considered) -sub _resolve_from { - my ($self, $extra_join) = @_; +# +# The increments happen in 1/2s to make it easier to correlate the +# join depth with the join path. An integer means a relationship +# specified via a search_related, whereas a fraction means an added +# join/prefetch via attributes +sub _chain_relationship { + my ($self, $rel) = @_; my $source = $self->result_source; my $attrs = $self->{attrs}; - my $from = $attrs->{from} - || [ { $attrs->{alias} => $source->from } ]; + my $from = [ @{ + $attrs->{from} + || + [{ + -source_handle => $source->handle, + -alias => $attrs->{alias}, + $attrs->{alias} => $source->from, + }] + }]; + + my $seen = { %{$attrs->{seen_join} || {} } }; + my $jpath = ($attrs->{seen_join} && keys %{$attrs->{seen_join}}) + ? $from->[-1][0]{-join_path} + : []; - my $seen = { %{$attrs->{seen_join}||{}} }; # we need to take the prefetch the attrs into account before we # ->_resolve_join as otherwise they get lost - captainL my $merged = $self->_merge_attr( $attrs->{join}, $attrs->{prefetch} ); - push @$from, $source->_resolve_join($merged, $attrs->{alias}, $seen) if ($merged); + my @requested_joins = $source->_resolve_join( + $merged, + $attrs->{alias}, + $seen, + $jpath, + ); + + push @$from, @requested_joins; - ++$seen->{-relation_chain_depth}; + $seen->{-relation_chain_depth} += 0.5; - push @$from, $source->_resolve_join($extra_join, $attrs->{alias}, $seen); + # if $self already had a join/prefetch specified on it, the requested + # $rel might very well be already included. What we do in this case + # is effectively a no-op (except that we bump up the chain_depth on + # the join in question so we could tell it *is* the search_related) + my $already_joined; - ++$seen->{-relation_chain_depth}; + + # we consider the last one thus reverse + for my $j (reverse @requested_joins) { + if ($rel eq $j->[0]{-join_path}[-1]) { + $j->[0]{-relation_chain_depth} += 0.5; + $already_joined++; + last; + } + } + +# alternative way to scan the entire chain - not backwards compatible +# for my $j (reverse @$from) { +# next unless ref $j eq 'ARRAY'; +# if ($j->[0]{-join_path} && $j->[0]{-join_path}[-1] eq $rel) { +# $j->[0]{-relation_chain_depth} += 0.5; +# $already_joined++; +# last; +# } +# } + + unless ($already_joined) { + push @$from, $source->_resolve_join( + $rel, + $attrs->{alias}, + $seen, + $jpath, + ); + } + + $seen->{-relation_chain_depth} += 0.5; return ($from,$seen); } +# too many times we have to do $attrs = { %{$self->_resolved_attrs} } +sub _resolved_attrs_copy { + my $self = shift; + return { %{$self->_resolved_attrs (@_)} }; +} + sub _resolved_attrs { my $self = shift; return $self->{_attrs} if $self->{_attrs}; @@ -2522,24 +2790,38 @@ sub _resolved_attrs { # build columns (as long as select isn't set) into a set of as/select hashes unless ( $attrs->{select} ) { - @colbits = map { - ( ref($_) eq 'HASH' ) - ? $_ - : { - ( - /^\Q${alias}.\E(.+)$/ - ? "$1" - : "$_" - ) - => - ( - /\./ - ? "$_" - : "${alias}.$_" - ) - } - } ( ref($attrs->{columns}) eq 'ARRAY' ) ? @{ delete $attrs->{columns}} : (delete $attrs->{columns} || $source->columns ); + + my @cols = ( ref($attrs->{columns}) eq 'ARRAY' ) + ? @{ delete $attrs->{columns}} + : ( + ( delete $attrs->{columns} ) + || + $source->storage->_order_select_columns( + $source, + [ $source->columns ], + ) + ) + ; + + @colbits = map { + ( ref($_) eq 'HASH' ) + ? $_ + : { + ( + /^\Q${alias}.\E(.+)$/ + ? "$1" + : "$_" + ) + => + ( + /\./ + ? "$_" + : "${alias}.$_" + ) + } + } @cols; } + # add the additional columns on foreach ( 'include_columns', '+columns' ) { push @colbits, map { @@ -2589,58 +2871,91 @@ sub _resolved_attrs { push( @{ $attrs->{as} }, @$adds ); } - $attrs->{from} ||= [ { $self->{attrs}{alias} => $source->from } ]; + $attrs->{from} ||= [ { + -source_handle => $source->handle, + -alias => $self->{attrs}{alias}, + $self->{attrs}{alias} => $source->from, + } ]; + + if ( $attrs->{join} || $attrs->{prefetch} ) { + + $self->throw_exception ('join/prefetch can not be used with a custom {from}') + if ref $attrs->{from} ne 'ARRAY'; - if ( exists $attrs->{join} || exists $attrs->{prefetch} ) { my $join = delete $attrs->{join} || {}; if ( defined $attrs->{prefetch} ) { $join = $self->_merge_attr( $join, $attrs->{prefetch} ); - } $attrs->{from} = # have to copy here to avoid corrupting the original [ - @{ $attrs->{from} }, - $source->_resolve_join( - $join, $alias, { %{ $attrs->{seen_join} || {} } } - ) + @{ $attrs->{from} }, + $source->_resolve_join( + $join, + $alias, + { %{ $attrs->{seen_join} || {} } }, + ($attrs->{seen_join} && keys %{$attrs->{seen_join}}) + ? $attrs->{from}[-1][0]{-join_path} + : [] + , + ) ]; - } - $attrs->{group_by} ||= $attrs->{select} - if delete $attrs->{distinct}; - if ( $attrs->{order_by} ) { + if ( defined $attrs->{order_by} ) { $attrs->{order_by} = ( ref( $attrs->{order_by} ) eq 'ARRAY' ? [ @{ $attrs->{order_by} } ] - : [ $attrs->{order_by} ] + : [ $attrs->{order_by} || () ] ); } - else { - $attrs->{order_by} = []; + + if ($attrs->{group_by} and ref $attrs->{group_by} ne 'ARRAY') { + $attrs->{group_by} = [ $attrs->{group_by} ]; + } + + # generate the distinct induced group_by early, as prefetch will be carried via a + # subquery (since a group_by is present) + if (delete $attrs->{distinct}) { + if ($attrs->{group_by}) { + carp ("Useless use of distinct on a grouped resultset ('distinct' is ignored when a 'group_by' is present)"); + } + else { + $attrs->{group_by} = [ grep { !ref($_) || (ref($_) ne 'HASH') } @{$attrs->{select}} ]; + } } - my $collapse = $attrs->{collapse} || {}; + $attrs->{collapse} ||= {}; if ( my $prefetch = delete $attrs->{prefetch} ) { $prefetch = $self->_merge_attr( {}, $prefetch ); - my @pre_order; - foreach my $p ( ref $prefetch eq 'ARRAY' ? @$prefetch : ($prefetch) ) { - - # bring joins back to level of current class - my $join_map = $self->_joinpath_aliases ($attrs->{from}, $attrs->{seen_join}); - my @prefetch = - $source->_resolve_prefetch( $p, $alias, $join_map, \@pre_order, $collapse ); - push( @{ $attrs->{select} }, map { $_->[0] } @prefetch ); - push( @{ $attrs->{as} }, map { $_->[1] } @prefetch ); - } - push( @{ $attrs->{order_by} }, @pre_order ); + + my $prefetch_ordering = []; + + my $join_map = $self->_joinpath_aliases ($attrs->{from}, $attrs->{seen_join}); + + my @prefetch = + $source->_resolve_prefetch( $prefetch, $alias, $join_map, $prefetch_ordering, $attrs->{collapse} ); + + # we need to somehow mark which columns came from prefetch + $attrs->{_prefetch_select} = [ map { $_->[0] } @prefetch ]; + + push @{ $attrs->{select} }, @{$attrs->{_prefetch_select}}; + push @{ $attrs->{as} }, (map { $_->[1] } @prefetch); + + push( @{$attrs->{order_by}}, @$prefetch_ordering ); + $attrs->{_collapse_order_by} = \@$prefetch_ordering; } - $attrs->{collapse} = $collapse; - if ( $attrs->{page} and not defined $attrs->{offset} ) { - $attrs->{offset} = ( $attrs->{rows} * ( $attrs->{page} - 1 ) ); + # if both page and offset are specified, produce a combined offset + # even though it doesn't make much sense, this is what pre 081xx has + # been doing + if (my $page = delete $attrs->{page}) { + $attrs->{offset} = + ($attrs->{rows} * ($page - 1)) + + + ($attrs->{offset} || 0) + ; } return $self->{_attrs} = $attrs; @@ -2652,14 +2967,22 @@ sub _joinpath_aliases { my $paths = {}; return $paths unless ref $fromspec eq 'ARRAY'; + my $cur_depth = $seen->{-relation_chain_depth} || 0; + + if (int ($cur_depth) != $cur_depth) { + $self->throw_exception ("-relation_chain_depth is not an integer, something went horribly wrong ($cur_depth)"); + } + for my $j (@$fromspec) { next if ref $j ne 'ARRAY'; - next if $j->[0]{-relation_chain_depth} < ( $seen->{-relation_chain_depth} || 0); + next if ($j->[0]{-relation_chain_depth} || 0) < $cur_depth; + + my $jpath = $j->[0]{-join_path}; my $p = $paths; - $p = $p->{$_} ||= {} for @{$j->[0]{-join_path}}; - push @{$p->{-join_aliases} }, $j->[0]{-join_alias}; + $p = $p->{$_} ||= {} for @{$jpath}[$cur_depth .. $#$jpath]; + push @{$p->{-join_aliases} }, $j->[0]{-alias}; } return $paths; @@ -2707,6 +3030,13 @@ sub _rollout_hash { sub _calculate_score { my ($self, $a, $b) = @_; + if (defined $a xor defined $b) { + return 0; + } + elsif (not defined $a) { + return 1; + } + if (ref $b eq 'HASH') { my ($b_key) = keys %{$b}; if (ref $a eq 'HASH') { @@ -2788,12 +3118,13 @@ See L for details. sub throw_exception { my $self=shift; + if (ref $self && $self->_source_handle->schema) { $self->_source_handle->schema->throw_exception(@_) - } else { - croak(@_); } - + else { + DBIx::Class::Exception->throw(@_); + } } # XXX: FIXME: Attributes docs need clearing up @@ -2815,10 +3146,15 @@ These are in no particular order: =back -Which column(s) to order the results by. If a single column name, or -an arrayref of names is supplied, the argument is passed through -directly to SQL. The hashref syntax allows for connection-agnostic -specification of ordering direction: +Which column(s) to order the results by. + +[The full list of suitable values is documented in +L; the following is a summary of +common options.] + +If a single column name, or an arrayref of names is supplied, the +argument is passed through directly to SQL. The hashref syntax allows +for connection-agnostic specification of ordering direction: For descending order: @@ -3097,6 +3433,42 @@ with that artist is given below (assuming many-to-many from artists to tags): B If you specify a C attribute, the C and C