X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FResultSet.pm;h=574b2da18f3dea55c4e8ae5d5065e472921e3ce5;hb=c9733800;hp=c8c89c5bf76b9595222aeaad523f02efc0502039;hpb=be64931c710bcde7abe7334349b4c8a123645332;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/ResultSet.pm b/lib/DBIx/Class/ResultSet.pm index c8c89c5..574b2da 100644 --- a/lib/DBIx/Class/ResultSet.pm +++ b/lib/DBIx/Class/ResultSet.pm @@ -927,7 +927,7 @@ sub _build_unique_cond { =over 4 -=item Arguments: $rel, $cond, \%attrs? +=item Arguments: $rel, $cond?, \%attrs? =item Return Value: $new_resultset (scalar context) || @row_objs (list context) @@ -1541,10 +1541,15 @@ sub _count_subq_rs { # extra selectors do not go in the subquery and there is no point of ordering it, nor locking it delete @{$sub_attrs}{qw/collapse columns as select _prefetch_selector_range order_by for/}; - # if we multi-prefetch we group_by primary keys only as this is what we would + # if we multi-prefetch we group_by something unique, as this is what we would # get out of the rs via ->next/->all. We *DO WANT* to clobber old group_by regardless if ( keys %{$attrs->{collapse}} ) { - $sub_attrs->{group_by} = [ map { "$attrs->{alias}.$_" } ($rsrc->_pri_cols) ] + $sub_attrs->{group_by} = [ map { "$attrs->{alias}.$_" } @{ + $rsrc->_identifying_column_set || $self->throw_exception( + 'Unable to construct a unique group_by criteria properly collapsing the ' + . 'has_many prefetch before count()' + ); + } ] } # Calculate subquery selector @@ -1751,14 +1756,19 @@ sub _rs_update_delete { # simplify the joinmap and maybe decide if a grouping (and thus subquery) is necessary my $relation_classifications; if (ref($attrs->{from}) eq 'ARRAY') { - $attrs->{from} = $storage->_prune_unused_joins ($attrs->{from}, $attrs->{select}, $cond, $attrs); - - $relation_classifications = $storage->_resolve_aliastypes_from_select_args ( - [ @{$attrs->{from}}[1 .. $#{$attrs->{from}}] ], - $attrs->{select}, - $cond, - $attrs - ) unless $needs_group_by_subq; # we already know we need a group, no point of resolving them + if (@{$attrs->{from}} == 1) { + # not a fucking JOIN at all, quit with the dickery + $relation_classifications = {}; + } else { + $attrs->{from} = $storage->_prune_unused_joins ($attrs->{from}, $attrs->{select}, $cond, $attrs); + + $relation_classifications = $storage->_resolve_aliastypes_from_select_args ( + [ @{$attrs->{from}}[1 .. $#{$attrs->{from}}] ], + $attrs->{select}, + $cond, + $attrs + ) unless $needs_group_by_subq; # we already know we need a group, no point of resolving them + } } else { $needs_group_by_subq ||= 1; # if {from} is unparseable assume the worst @@ -1776,39 +1786,37 @@ sub _rs_update_delete { ) { # Most databases do not allow aliasing of tables in UPDATE/DELETE. Thus # a condition containing 'me' or other table prefixes will not work - # at all. What this code tries to do (badly) is to generate a condition - # with the qualifiers removed, by exploiting the quote mechanism of sqla - # - # this is atrocious and should be replaced by normal sqla introspection - # one sunny day - my ($sql, @bind) = do { - my $sqla = $rsrc->storage->sql_maker; - local $sqla->{_dequalify_idents} = 1; - $sqla->_recurse_where($self->{cond}); - } if $self->{cond}; - + # at all. Tell SQLMaker to dequalify idents via a gross hack. + my $sqla = $rsrc->storage->sql_maker; + local $sqla->{_dequalify_idents} = 1; return $rsrc->storage->$op( $rsrc, $op eq 'update' ? $values : (), - $self->{cond} ? \[$sql, @bind] : (), + $self->{cond}, ); } # we got this far - means it is time to wrap a subquery - my $pcols = [ $rsrc->_pri_cols ]; + my $idcols = $rsrc->_identifying_column_set || $self->throw_exception( + sprintf( + "Unable to perform complex resultset %s() without an identifying set of columns on source '%s'", + $op, + $rsrc->source_name, + ) + ); my $existing_group_by = delete $attrs->{group_by}; # make a new $rs selecting only the PKs (that's all we really need for the subq) delete $attrs->{$_} for qw/collapse _collapse_order_by select _prefetch_selector_range as/; - $attrs->{columns} = [ map { "$attrs->{alias}.$_" } @$pcols ]; + $attrs->{columns} = [ map { "$attrs->{alias}.$_" } @$idcols ]; $attrs->{group_by} = \ ''; # FIXME - this is an evil hack, it causes the optimiser to kick in and throw away the LEFT joins my $subrs = (ref $self)->new($rsrc, $attrs); - if (@$pcols == 1) { + if (@$idcols == 1) { return $storage->$op ( $rsrc, $op eq 'update' ? $values : (), - { $pcols->[0] => { -in => $subrs->as_query } }, + { $idcols->[0] => { -in => $subrs->as_query } }, ); } elsif ($storage->_use_multicolumn_in) { @@ -1816,7 +1824,7 @@ sub _rs_update_delete { my $sql_maker = $storage->sql_maker; my ($sql, @bind) = @${$subrs->as_query}; $sql = sprintf ('(%s) IN %s', # the as_query already comes with a set of parenthesis - join (', ', map { $sql_maker->_quote ($_) } @$pcols), + join (', ', map { $sql_maker->_quote ($_) } @$idcols), $sql, ); @@ -1864,8 +1872,8 @@ sub _rs_update_delete { my @op_condition; for my $row ($subrs->search({}, { group_by => $subq_group_by })->cursor->all) { push @op_condition, { map - { $pcols->[$_] => $row->[$_] } - (0 .. $#$pcols) + { $idcols->[$_] => $row->[$_] } + (0 .. $#$idcols) }; } @@ -2701,6 +2709,23 @@ all in the call to C, even when set to C. See also L and L. For information on how to declare unique constraints, see L. +If you need to know if an existing row was found or a new one created use +L and L instead. Don't forget +to call L to save the newly created row to the +database! + + my $cd = $schema->resultset('CD')->find_or_new({ + cdid => 5, + artist => 'Massive Attack', + title => 'Mezzanine', + year => 2005, + }); + + if( $cd->in_storage ) { + # do some stuff + $cd->insert; + } + =cut sub find_or_create { @@ -2762,6 +2787,25 @@ all in the call to C, even when set to C. See also L and L. For information on how to declare unique constraints, see L. +If you need to know if an existing row was updated or a new one created use +L and L instead. Don't forget +to call L to save the newly created row to the +database! + + my $cd = $schema->resultset('CD')->update_or_new( + { + artist => 'Massive Attack', + title => 'Mezzanine', + year => 1998, + }, + { key => 'cd_artist_title' } + ); + + if( $cd->in_storage ) { + # do some stuff + $cd->insert; + } + =cut sub update_or_create { @@ -3051,9 +3095,9 @@ source alias of the current result set: my $me = $self->current_source_alias; - return $self->search( + return $self->search({ "$me.modified" => $user->id, - ); + }); } =cut @@ -3435,7 +3479,6 @@ sub _resolved_attrs { $attrs->{_collapse_order_by} = \@$prefetch_ordering; } - # if both page and offset are specified, produce a combined offset # even though it doesn't make much sense, this is what pre 081xx has # been doing @@ -4184,6 +4227,24 @@ rows per page if the page attribute or method is used. Specifies the (zero-based) row number for the first row to be returned, or the of the first row of the first page if paging is used. +=head2 software_limit + +=over 4 + +=item Value: (0 | 1) + +=back + +When combined with L and/or L the generated SQL will not +include any limit dialect stanzas. Instead the entire result will be selected +as if no limits were specified, and DBIC will perform the limit locally, by +artificially advancing and finishing the resulting L. + +This is the recommended way of performing resultset limiting when no sane RDBMS +implementation is available (e.g. +L using the +L hack) + =head2 group_by =over 4