X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FResultSet.pm;h=aa6477484df5864fc65418e2d19a1441c18ab7e0;hb=3904d3c3bd6e3dda5dbc9dc49f8cc778eef114e2;hp=fbd676fb4d6a92409ae8e4115a6db7682945f010;hpb=03e7644dde21b79305ab80e1d3d9d5ca2948dcd5;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/ResultSet.pm b/lib/DBIx/Class/ResultSet.pm index fbd676f..aa64774 100644 --- a/lib/DBIx/Class/ResultSet.pm +++ b/lib/DBIx/Class/ResultSet.pm @@ -14,8 +14,12 @@ use DBIx::Class::ResultSetColumn; use DBIx::Class::ResultSourceHandle; use List::Util (); use Scalar::Util (); + use base qw/DBIx::Class/; +#use Test::Deep::NoTest (qw/eq_deeply/); +use Data::Dumper::Concise; + __PACKAGE__->mk_group_accessors('simple' => qw/_result_class _source_handle/); =head1 NAME @@ -141,7 +145,7 @@ See: L, L, L, L, L. =head1 OVERLOADING If a resultset is used in a numeric context it returns the L. -However, if it is used in a booleand context it is always true. So if +However, if it is used in a boolean context it is always true. So if you want to check if a resultset has any results use C. C will always be true. @@ -291,10 +295,15 @@ sub search_rs { $rows = $self->get_cache; } + # reset the selector list + if (List::Util::first { exists $attrs->{$_} } qw{columns select as}) { + delete @{$our_attrs}{qw{select as columns +select +as +columns include_columns}}; + } + my $new_attrs = { %{$our_attrs}, %{$attrs} }; # merge new attrs into inherited - foreach my $key (qw/join prefetch +select +as bind/) { + foreach my $key (qw/join prefetch +select +as +columns include_columns bind/) { next unless exists $attrs->{$key}; $new_attrs->{$key} = $self->_merge_attr($our_attrs->{$key}, $attrs->{$key}); } @@ -357,9 +366,9 @@ sub search_rs { } my $rs = (ref $self)->new($self->result_source, $new_attrs); - if ($rows) { - $rs->set_cache($rows); - } + + $rs->set_cache($rows) if ($rows); + return $rs; } @@ -519,7 +528,7 @@ sub find { # in ::Relationship::Base::search_related (the row method), and furthermore # the relationship is of the 'single' type. This means that the condition # provided by the relationship (already attached to $self) is sufficient, - # as there can be only one row in the databse that would satisfy the + # as there can be only one row in the database that would satisfy the # relationship } else { @@ -530,8 +539,8 @@ sub find { } # Run the query - my $rs = $self->search ($query, $attrs); - if (keys %{$rs->_resolved_attrs->{collapse}}) { + my $rs = $self->search ($query, {result_class => $self->result_class, %$attrs}); + if ($rs->_resolved_attrs->{collapse}) { my $row = $rs->next; carp "Query returned more than one row" if $rs->next; return $row; @@ -634,7 +643,7 @@ sub search_related { =head2 search_related_rs This method works exactly the same as search_related, except that -it guarantees a restultset, even in list context. +it guarantees a resultset, even in list context. =cut @@ -692,7 +701,7 @@ L returned. =item B -As of 0.08100, this method enforces the assumption that the preceeding +As of 0.08100, this method enforces the assumption that the preceding query returns only one row. If more than one row is returned, you will receive a warning: @@ -718,7 +727,7 @@ sub single { my $attrs = $self->_resolved_attrs_copy; - if (keys %{$attrs->{collapse}}) { + if ($attrs->{collapse}) { $self->throw_exception( 'single() can not be used on resultsets prefetching has_many. Use find( \%cond ) or next() instead' ); @@ -747,7 +756,10 @@ sub single { $attrs->{where}, $attrs ); - return (@data ? ($self->_construct_object(@data))[0] : undef); + return @data + ? ($self->_construct_objects(@data))[0] + : undef + ; } @@ -955,154 +967,291 @@ sub next { : $self->cursor->next ); return undef unless (@row); - my ($row, @more) = $self->_construct_object(@row); + my ($row, @more) = $self->_construct_objects(@row); $self->{stashed_objects} = \@more if @more; return $row; } -sub _construct_object { +# takes a single DBI-row of data and coinstructs as many objects +# as the resultset attributes call for. +# This can be a bit of an action at a distance - it takes as an argument +# the *current* cursor-row (already taken off the $sth), but if +# collapsing is requested it will keep advancing the cursor either +# until the current row-object is assembled (the collapser was able to +# order the result sensibly) OR until the cursor is exhausted (an +# unordered collapsing resultset effectively triggers ->all) + +# FIXME: why the *FUCK* do we pass around DBI data by copy?! Sadly needs +# assessment before changing... +# +sub _construct_objects { my ($self, @row) = @_; - my $info = $self->_collapse_result($self->{_attrs}{as}, \@row) - or return (); - my @new = $self->result_class->inflate_result($self->result_source, @$info); - @new = $self->{_attrs}{record_filter}->(@new) - if exists $self->{_attrs}{record_filter}; - return @new; -} + my $attrs = $self->_resolved_attrs; + my $keep_collapsing = $attrs->{collapse}; -sub _collapse_result { - my ($self, $as_proto, $row) = @_; + my $res_index; +=begin + do { + my $me_pref_col = $attrs->{_row_parser}->($row_ref); - # if the first row that ever came in is totally empty - this means we got - # hit by a smooth^Wempty left-joined resultset. Just noop in that case - # instead of producing a {} - # - my $has_def; - for (@$row) { - if (defined $_) { - $has_def++; - last; - } - } - return undef unless $has_def; + my $container; + if ($keep_collapsing) { - my @copy = @$row; + # FIXME - we should be able to remove these 2 checks after the design validates + $self->throw_exception ('Collapsing without a top-level collapse-set... can not happen') + unless @{$me_ref_col->[2]}; + $self->throw_exception ('Top-level collapse-set contains a NULL-value... can not happen') + if grep { ! defined $_ } @{$me_pref_col->[2]}; - # 'foo' => [ undef, 'foo' ] - # 'foo.bar' => [ 'foo', 'bar' ] - # 'foo.bar.baz' => [ 'foo.bar', 'baz' ] + my $main_ident = join "\x00", @{$me_pref_col->[2]}; - my @construct_as = map { [ (/^(?:(.*)\.)?([^.]+)$/) ] } @$as_proto; + if (! $res_index->{$main_ident}) { + # this is where we bail out IFF we are ordered, and the $main_ident changes - my %collapse = %{$self->{_attrs}{collapse}||{}}; + $res_index->{$main_ident} = { + all_me_pref => [, + index => scalar keys %$res_index, + }; + } + } - my @pri_index; - # if we're doing collapsing (has_many prefetch) we need to grab records - # until the PK changes, so fill @pri_index. if not, we leave it empty so - # we know we don't have to bother. - # the reason for not using the collapse stuff directly is because if you - # had for e.g. two artists in a row with no cds, the collapse info for - # both would be NULL (undef) so you'd lose the second artist + $container = $res_index->{$main_ident}{container}; + }; - # store just the index so we can check the array positions from the row - # without having to contruct the full hash + push @$container, [ @{$me_pref_col}[0,1] ]; - if (keys %collapse) { - my %pri = map { ($_ => 1) } $self->result_source->primary_columns; - foreach my $i (0 .. $#construct_as) { - next if defined($construct_as[$i][0]); # only self table - if (delete $pri{$construct_as[$i][1]}) { - push(@pri_index, $i); - } - last unless keys %pri; # short circuit (Johnny Five Is Alive!) + + + } while ( + $keep_collapsing + && + do { $row_ref = [$self->cursor->next]; $self->{stashed_row} = $row_ref if @$row_ref; scalar @$row_ref } + ); + + # attempt collapse all rows with same collapse identity + if (@to_collapse > 1) { + my @collapsed; + while (@to_collapse) { + $self->_merge_result(\@collapsed, shift @to_collapse); } } +=cut - # no need to do an if, it'll be empty if @pri_index is empty anyway + my $mepref_structs = $self->_collapse_result(\@row) + or return (); - my %pri_vals = map { ($_ => $copy[$_]) } @pri_index; + my $rsrc = $self->result_source; + my $res_class = $self->result_class; + my $inflator = $res_class->can ('inflate_result'); - my @const_rows; + my @objs = map { + $res_class->$inflator ($rsrc, @$_) + } (@$mepref_structs); - do { # no need to check anything at the front, we always want the first row + if (my $f = $attrs->{record_filter}) { + @objs = map { $f->($_) } @objs; + } + + return @objs; +} - my %const; +=begin - foreach my $this_as (@construct_as) { - $const{$this_as->[0]||''}{$this_as->[1]} = shift(@copy); +# two arguments: $as_proto is an arrayref of column names, +# $row_ref is an arrayref of the data. If none of the row data +# is defined we return undef (that's copied from the old +# _collapse_result). Next we decide whether we need to collapse +# the resultset (i.e. we prefetch something) or not. $collapse +# indicates that. The do-while loop will run once if we do not need +# to collapse the result and will run as long as _merge_result returns +# a true value. It will return undef if the current added row does not +# match the previous row. A bit of stashing and cursor magic is +# required so that the cursor is not mixed up. + +# "$rows" is a bit misleading. In the end, there should only be one +# element in this arrayref. + +sub _collapse_result { + my ( $self, $as_proto, $row_ref ) = @_; + my $has_def; + for (@$row_ref) { + if ( defined $_ ) { + $has_def++; + last; + } } + return undef unless $has_def; + + my $collapse = $self->_resolved_attrs->{collapse}; + my $rows = []; + my @row = @$row_ref; + do { + my $i = 0; + my $row = { map { $_ => $row[ $i++ ] } @$as_proto }; + $row = $self->result_source->_parse_row($row, $collapse); + unless ( scalar @$rows ) { + push( @$rows, $row ); + } + $collapse = undef unless ( $self->_merge_result( $rows, $row ) ); + } while ( + $collapse + && do { @row = $self->cursor->next; $self->{stashed_row} = \@row if @row; } + ); + + return $rows->[0]; + +} + +# _merge_result accepts an arrayref of rows objects (again, an arrayref of two elements) +# and a row object which should be merged into the first object. +# First we try to find out whether $row is already in $rows. If this is the case +# we try to merge them by iteration through their relationship data. We call +# _merge_result again on them, so they get merged. + +# If we don't find the $row in $rows, we append it to $rows and return undef. +# _merge_result returns 1 otherwise (i.e. $row has been found in $rows). + +sub _merge_result { + my ( $self, $rows, $row ) = @_; + my ( $columns, $rels ) = @$row; + my $found = undef; + foreach my $seen (@$rows) { + my $match = 1; + foreach my $column ( keys %$columns ) { + if ( defined $seen->[0]->{$column} ^ defined $columns->{$column} + or defined $columns->{$column} + && $seen->[0]->{$column} ne $columns->{$column} ) + { + + $match = 0; + last; + } + } + if ($match) { + $found = $seen; + last; + } + } + if ($found) { + foreach my $rel ( keys %$rels ) { + my $old_rows = $found->[1]->{$rel}; + $self->_merge_result( + ref $found->[1]->{$rel}->[0] eq 'HASH' ? [ $found->[1]->{$rel} ] + : $found->[1]->{$rel}, + ref $rels->{$rel}->[0] eq 'HASH' ? [ $rels->{$rel}->[0], $rels->{$rel}->[1] ] + : $rels->{$rel}->[0] + ); - push(@const_rows, \%const); + my $attrs = $self->_resolved_attrs; + my ($keep_collapsing, $set_ident) = @{$attrs}{qw/collapse _collapse_ident/}; - } until ( # no pri_index => no collapse => drop straight out - !@pri_index - or - do { # get another row, stash it, drop out if different PK + # FIXME this is temporary, need to calculate in _resolved_attrs + $set_ident ||= { me => [ $self->result_source->_pri_cols ], pref => {} }; - @copy = $self->cursor->next; - $self->{stashed_row} = \@copy; + my @cur_row = @$row_ref; + my (@to_collapse, $last_ident); - # last thing in do block, counts as true if anything doesn't match + do { + my $row_hr = { map { $as_proto->[$_] => $cur_row[$_] } (0 .. $#$as_proto) }; - # check xor defined first for NULL vs. NOT NULL then if one is - # defined the other must be so check string equality + # see if we are switching to another object + # this can be turned off and things will still work + # since _merge_prefetch knows about _collapse_ident +# my $cur_ident = [ @{$row_hr}{@$set_ident} ]; + my $cur_ident = []; + $last_ident ||= $cur_ident; - grep { - (defined $pri_vals{$_} ^ defined $copy[$_]) - || (defined $pri_vals{$_} && ($pri_vals{$_} ne $copy[$_])) - } @pri_index; - } +# if ($keep_collapsing = Test::Deep::eq_deeply ($cur_ident, $last_ident)) { +# push @to_collapse, $self->result_source->_parse_row ( +# $row_hr, +# ); +# } + } while ( + $keep_collapsing + && + do { @cur_row = $self->cursor->next; $self->{stashed_row} = \@cur_row if @cur_row; } ); - my $alias = $self->{attrs}{alias}; - my $info = []; - - my %collapse_pos; + die Dumper \@to_collapse; - my @const_keys; - foreach my $const (@const_rows) { - scalar @const_keys or do { - @const_keys = sort { length($a) <=> length($b) } keys %$const; - }; - foreach my $key (@const_keys) { - if (length $key) { - my $target = $info; - my @parts = split(/\./, $key); - my $cur = ''; - my $data = $const->{$key}; - foreach my $p (@parts) { - $target = $target->[1]->{$p} ||= []; - $cur .= ".${p}"; - if ($cur eq ".${key}" && (my @ckey = @{$collapse{$cur}||[]})) { - # collapsing at this point and on final part - my $pos = $collapse_pos{$cur}; - CK: foreach my $ck (@ckey) { - if (!defined $pos->{$ck} || $pos->{$ck} ne $data->{$ck}) { - $collapse_pos{$cur} = $data; - delete @collapse_pos{ # clear all positioning for sub-entries - grep { m/^\Q${cur}.\E/ } keys %collapse_pos - }; - push(@$target, []); - last CK; - } - } - } - if (exists $collapse{$cur}) { - $target = $target->[-1]; - } - } - $target->[0] = $data; - } else { - $info->[0] = $const->{$key}; - } + # attempt collapse all rows with same collapse identity + if (@to_collapse > 1) { + my @collapsed; + while (@to_collapse) { + $self->_merge_result(\@collapsed, shift @to_collapse); } + @to_collapse = @collapsed; + } + + # still didn't fully collapse + $self->throw_exception ('Resultset collapse failed (theoretically impossible). Maybe a wrong collapse_ident...?') + if (@to_collapse > 1); + + return $to_collapse[0]; +} + + +# two arguments: $as_proto is an arrayref of 'as' column names, +# $row_ref is an arrayref of the data. The do-while loop will run +# once if we do not need to collapse the result and will run as long as +# _merge_result returns a true value. It will return undef if the +# current added row does not match the previous row, which in turn +# means we need to stash the row for the subsequent ->next call +sub _collapse_result { + my ( $self, $as_proto, $row_ref ) = @_; + + my $attrs = $self->_resolved_attrs; + my ($keep_collapsing, $set_ident) = @{$attrs}{qw/collapse _collapse_ident/}; + + die Dumper [$as_proto, $row_ref, $keep_collapsing, $set_ident ]; + + + my @cur_row = @$row_ref; + my (@to_collapse, $last_ident); + + do { + my $row_hr = { map { $as_proto->[$_] => $cur_row[$_] } (0 .. $#$as_proto) }; + + # see if we are switching to another object + # this can be turned off and things will still work + # since _merge_prefetch knows about _collapse_ident +# my $cur_ident = [ @{$row_hr}{@$set_ident} ]; + my $cur_ident = []; + $last_ident ||= $cur_ident; + +# if ($keep_collapsing = eq_deeply ($cur_ident, $last_ident)) { +# push @to_collapse, $self->result_source->_parse_row ( +# $row_hr, +# ); +# } + } while ( + $keep_collapsing + && + do { @cur_row = $self->cursor->next; $self->{stashed_row} = \@cur_row if @cur_row; } + ); + + # attempt collapse all rows with same collapse identity +} +=cut + +# Takes an arrayref of me/pref pairs and a new me/pref pair that should +# be merged on a preexisting matching me (or should be pushed into $merged +# as a new me/pref pair for further invocations). It should be possible to +# use this function to collapse complete ->all results, provided _collapse_result() is adjusted +# to provide everything to this sub not to barf when $merged contains more than one +# arrayref) +sub _merge_prefetch { + my ($self, $merged, $next_row) = @_; + + unless (@$merged) { + push @$merged, $next_row; + return; } - return $info; } =head2 result_source @@ -1144,6 +1293,7 @@ sub result_class { if ($result_class) { $self->ensure_class_loaded($result_class); $self->_result_class($result_class); + $self->{attrs}{result_class} = $result_class if ref $self; } $self->_result_class; } @@ -1247,11 +1397,6 @@ sub _count_rs { $tmp_attrs->{select} = $rsrc->storage->_count_select ($rsrc, $tmp_attrs); $tmp_attrs->{as} = 'count'; - # read the comment on top of the actual function to see what this does - $tmp_attrs->{from} = $self->_switch_to_inner_join_if_needed ( - $tmp_attrs->{from}, $tmp_attrs->{alias} - ); - my $tmp_rs = $rsrc->resultset_class->new($rsrc, $tmp_attrs)->get_column ('count'); return $tmp_rs; @@ -1271,20 +1416,16 @@ sub _count_subq_rs { # extra selectors do not go in the subquery and there is no point of ordering it delete $sub_attrs->{$_} for qw/collapse select _prefetch_select as order_by/; - # if we prefetch, we group_by primary keys only as this is what we would get out - # of the rs via ->next/->all. We DO WANT to clobber old group_by regardless - if ( keys %{$attrs->{collapse}} ) { - $sub_attrs->{group_by} = [ map { "$attrs->{alias}.$_" } ($rsrc->primary_columns) ] + # if we multi-prefetch we group_by primary keys only as this is what we would + # get out of the rs via ->next/->all. We *DO WANT* to clobber old group_by regardless + if ( $attrs->{collapse} ) { + $sub_attrs->{group_by} = [ map { "$attrs->{alias}.$_" } ($rsrc->_pri_cols) ] } - $sub_attrs->{select} = $rsrc->storage->_subq_count_select ($rsrc, $sub_attrs); - - # read the comment on top of the actual function to see what this does - $sub_attrs->{from} = $self->_switch_to_inner_join_if_needed ( - $sub_attrs->{from}, $sub_attrs->{alias} - ); + $sub_attrs->{select} = $rsrc->storage->_subq_count_select ($rsrc, $attrs); - # this is so that ordering can be thrown away in things like Top limit + # this is so that the query can be simplified e.g. + # * ordering can be thrown away in things like Top limit $sub_attrs->{-for_count_only} = 1; my $sub_rs = $rsrc->resultset_class->new ($rsrc, $sub_attrs); @@ -1301,77 +1442,6 @@ sub _count_subq_rs { return $self->_count_rs ($attrs); } - -# The DBIC relationship chaining implementation is pretty simple - every -# new related_relationship is pushed onto the {from} stack, and the {select} -# window simply slides further in. This means that when we count somewhere -# in the middle, we got to make sure that everything in the join chain is an -# actual inner join, otherwise the count will come back with unpredictable -# results (a resultset may be generated with _some_ rows regardless of if -# the relation which the $rs currently selects has rows or not). E.g. -# $artist_rs->cds->count - normally generates: -# SELECT COUNT( * ) FROM artist me LEFT JOIN cd cds ON cds.artist = me.artistid -# which actually returns the number of artists * (number of cds || 1) -# -# So what we do here is crawl {from}, determine if the current alias is at -# the top of the stack, and if not - make sure the chain is inner-joined down -# to the root. -# -sub _switch_to_inner_join_if_needed { - my ($self, $from, $alias) = @_; - - # subqueries and other oddness is naturally not supported - return $from if ( - ref $from ne 'ARRAY' - || - @$from <= 1 - || - ref $from->[0] ne 'HASH' - || - ! $from->[0]{-alias} - || - $from->[0]{-alias} eq $alias - ); - - my $switch_branch; - JOINSCAN: - for my $j (@{$from}[1 .. $#$from]) { - if ($j->[0]{-alias} eq $alias) { - $switch_branch = $j->[0]{-join_path}; - last JOINSCAN; - } - } - - # something else went wrong - return $from unless $switch_branch; - - # So it looks like we will have to switch some stuff around. - # local() is useless here as we will be leaving the scope - # anyway, and deep cloning is just too fucking expensive - # So replace the inner hashref manually - my @new_from = ($from->[0]); - my $sw_idx = { map { $_ => 1 } @$switch_branch }; - - for my $j (@{$from}[1 .. $#$from]) { - my $jalias = $j->[0]{-alias}; - - if ($sw_idx->{$jalias}) { - my %attrs = %{$j->[0]}; - delete $attrs{-join_type}; - push @new_from, [ - \%attrs, - @{$j}[ 1 .. $#$j ], - ]; - } - else { - push @new_from, $j; - } - } - - return \@new_from; -} - - sub _bool { return 1; } @@ -1414,30 +1484,32 @@ sub all { $self->throw_exception("all() doesn't take any arguments, you probably wanted ->search(...)->all()"); } - return @{ $self->get_cache } if $self->get_cache; + if (my $c = $self->get_cache) { + return @$c; + } - my @obj; + my @objects; - if (keys %{$self->_resolved_attrs->{collapse}}) { + if ($self->_resolved_attrs->{collapse}) { # Using $self->cursor->all is really just an optimisation. # If we're collapsing has_many prefetches it probably makes # very little difference, and this is cleaner than hacking - # _construct_object to survive the approach + # _construct_objects to survive the approach $self->cursor->reset; my @row = $self->cursor->next; while (@row) { - push(@obj, $self->_construct_object(@row)); + push(@objects, $self->_construct_objects(@row)); @row = (exists $self->{stashed_row} ? @{delete $self->{stashed_row}} : $self->cursor->next); } } else { - @obj = map { $self->_construct_object(@$_) } $self->cursor->all; + @objects = map { $self->_construct_objects($_) } $self->cursor->all; } - $self->set_cache(\@obj) if $self->{attrs}{cache}; + $self->set_cache(\@objects) if $self->{attrs}{cache}; - return @obj; + return @objects; } =head2 reset @@ -1495,8 +1567,12 @@ sub _rs_update_delete { my $rsrc = $self->result_source; + # if a condition exists we need to strip all table qualifiers + # if this is not possible we'll force a subquery below + my $cond = $rsrc->schema->storage->_strip_cond_qualifiers ($self->{cond}); + my $needs_group_by_subq = $self->_has_resolved_attr (qw/collapse group_by -join/); - my $needs_subq = $self->_has_resolved_attr (qw/row offset/); + my $needs_subq = $needs_group_by_subq || (not defined $cond) || $self->_has_resolved_attr(qw/row offset/); if ($needs_group_by_subq or $needs_subq) { @@ -1504,7 +1580,7 @@ sub _rs_update_delete { my $attrs = $self->_resolved_attrs_copy; delete $attrs->{$_} for qw/collapse select as/; - $attrs->{columns} = [ map { "$attrs->{alias}.$_" } ($self->result_source->primary_columns) ]; + $attrs->{columns} = [ map { "$attrs->{alias}.$_" } ($self->result_source->_pri_cols) ]; if ($needs_group_by_subq) { # make sure no group_by was supplied, or if there is one - make sure it matches @@ -1544,70 +1620,11 @@ sub _rs_update_delete { return $rsrc->storage->$op( $rsrc, $op eq 'update' ? $values : (), - $self->_cond_for_update_delete, + $cond, ); } } - -# _cond_for_update_delete -# -# update/delete require the condition to be modified to handle -# the differing SQL syntax available. This transforms the $self->{cond} -# appropriately, returning the new condition. - -sub _cond_for_update_delete { - my ($self, $full_cond) = @_; - my $cond = {}; - - $full_cond ||= $self->{cond}; - # No-op. No condition, we're updating/deleting everything - return $cond unless ref $full_cond; - - if (ref $full_cond eq 'ARRAY') { - $cond = [ - map { - my %hash; - foreach my $key (keys %{$_}) { - $key =~ /([^.]+)$/; - $hash{$1} = $_->{$key}; - } - \%hash; - } @{$full_cond} - ]; - } - elsif (ref $full_cond eq 'HASH') { - if ((keys %{$full_cond})[0] eq '-and') { - $cond->{-and} = []; - my @cond = @{$full_cond->{-and}}; - for (my $i = 0; $i < @cond; $i++) { - my $entry = $cond[$i]; - my $hash; - if (ref $entry eq 'HASH') { - $hash = $self->_cond_for_update_delete($entry); - } - else { - $entry =~ /([^.]+)$/; - $hash->{$1} = $cond[++$i]; - } - push @{$cond->{-and}}, $hash; - } - } - else { - foreach my $key (keys %{$full_cond}) { - $key =~ /([^.]+)$/; - $cond->{$1} = $full_cond->{$key}; - } - } - } - else { - $self->throw_exception("Can't update/delete on resultset with condition unless hash or array"); - } - - return $cond; -} - - =head2 update =over 4 @@ -1740,7 +1757,7 @@ Example: Assuming an Artist Class that has many CDs Classes relating: ], }, { artistid => 5, name => 'Angsty-Whiny Girl', cds => [ - { title => 'My parents sold me to a record company' ,year => 2005 }, + { title => 'My parents sold me to a record company', year => 2005 }, { title => 'Why Am I So Ugly?', year => 2006 }, { title => 'I Got Surgery and am now Popular', year => 2007 } ], @@ -1768,7 +1785,7 @@ example: [qw/artistid name/], [100, 'A Formally Unknown Singer'], [101, 'A singer that jumped the shark two albums ago'], - [102, 'An actually cool singer.'], + [102, 'An actually cool singer'], ]); Please note an important effect on your data when choosing between void and @@ -1782,10 +1799,10 @@ values. =cut sub populate { - my $self = shift @_; - my $data = ref $_[0][0] eq 'HASH' - ? $_[0] : ref $_[0][0] eq 'ARRAY' ? $self->_normalize_populate_args($_[0]) : - $self->throw_exception('Populate expects an arrayref of hashes or arrayref of arrayrefs'); + my $self = shift; + + # cruft placed in standalone method + my $data = $self->_normalize_populate_args(@_); if(defined wantarray) { my @created; @@ -1794,15 +1811,19 @@ sub populate { } return wantarray ? @created : \@created; } else { - my ($first, @rest) = @$data; - - require overload; - my @names = grep { - (not ref $first->{$_}) || (ref $first->{$_} eq 'SCALAR') || - (overload::Method($first->{$_}, '""')) - } keys %$first; + my $first = $data->[0]; + + # if a column is a registered relationship, and is a non-blessed hash/array, consider + # it relationship data + my (@rels, @columns); + for (keys %$first) { + my $ref = ref $first->{$_}; + $self->result_source->has_relationship($_) && ($ref eq 'ARRAY' or $ref eq 'HASH') + ? push @rels, $_ + : push @columns, $_ + ; + } - my @rels = grep { $self->result_source->has_relationship($_) } keys %$first; my @pks = $self->result_source->primary_columns; ## do the belongs_to relationships @@ -1831,17 +1852,21 @@ sub populate { delete $data->[$index]->{$rel}; $data->[$index] = {%{$data->[$index]}, %$related}; - push @names, keys %$related if $index == 0; + push @columns, keys %$related if $index == 0; } } - ## do bulk insert on current row - my @values = map { [ @$_{@names} ] } @$data; + ## inherit the data locked in the conditions of the resultset + my ($rs_data) = $self->_merge_cond_with_data({}); + delete @{$rs_data}{@columns}; + my @inherit_cols = keys %$rs_data; + my @inherit_data = values %$rs_data; + ## do bulk insert on current row $self->result_source->storage->insert_bulk( $self->result_source, - \@names, - \@values, + [@columns, @inherit_cols], + [ map { [ @$_{@columns}, @inherit_data ] } @$data ], ); ## do the has_many relationships @@ -1850,7 +1875,7 @@ sub populate { foreach my $rel (@rels) { next unless $item->{$rel} && ref $item->{$rel} eq "ARRAY"; - my $parent = $self->find(map {{$_=>$item->{$_}} } @pks) + my $parent = $self->find({map { $_ => $item->{$_} } @pks}) || $self->throw_exception('Cannot find the relating object.'); my $child = $parent->$rel; @@ -1870,26 +1895,27 @@ sub populate { } } -=head2 _normalize_populate_args ($args) - -Private method used by L to normalize its incoming arguments. Factored -out in case you want to subclass and accept new argument structures to the -L method. - -=cut +# populate() argumnets went over several incarnations +# What we ultimately support is AoH sub _normalize_populate_args { - my ($self, $data) = @_; - my @names = @{shift(@$data)}; - my @results_to_create; - foreach my $datum (@$data) { - my %result_to_create; - foreach my $index (0..$#names) { - $result_to_create{$names[$index]} = $$datum[$index]; + my ($self, $arg) = @_; + + if (ref $arg eq 'ARRAY') { + if (ref $arg->[0] eq 'HASH') { + return $arg; + } + elsif (ref $arg->[0] eq 'ARRAY') { + my @ret; + my @colnames = @{$arg->[0]}; + foreach my $values (@{$arg}[1 .. $#$arg]) { + push @ret, { map { $colnames[$_] => $values->[$_] } (0 .. $#colnames) }; + } + return \@ret; } - push @results_to_create, \%result_to_create; } - return \@results_to_create; + + $self->throw_exception('Populate expects an arrayref of hashrefs or arrayref of arrayrefs'); } =head2 pager @@ -1978,46 +2004,66 @@ sub new_result { $self->throw_exception( "new_result needs a hash" ) unless (ref $values eq 'HASH'); - my %new; + my ($merged_cond, $cols_from_relations) = $self->_merge_cond_with_data($values); + + my %new = ( + %$merged_cond, + @$cols_from_relations + ? (-cols_from_relations => $cols_from_relations) + : (), + -source_handle => $self->_source_handle, + -result_source => $self->result_source, # DO NOT REMOVE THIS, REQUIRED + ); + + return $self->result_class->new(\%new); +} + +# _merge_cond_with_data +# +# Takes a simple hash of K/V data and returns its copy merged with the +# condition already present on the resultset. Additionally returns an +# arrayref of value/condition names, which were inferred from related +# objects (this is needed for in-memory related objects) +sub _merge_cond_with_data { + my ($self, $data) = @_; + + my (%new_data, @cols_from_relations); + my $alias = $self->{attrs}{alias}; - if ( - defined $self->{cond} - && $self->{cond} eq $DBIx::Class::ResultSource::UNRESOLVABLE_CONDITION - ) { - %new = %{ $self->{attrs}{related_objects} || {} }; # nothing might have been inserted yet - $new{-from_resultset} = [ keys %new ] if keys %new; - } else { + if (! defined $self->{cond}) { + # just massage $data below + } + elsif ($self->{cond} eq $DBIx::Class::ResultSource::UNRESOLVABLE_CONDITION) { + %new_data = %{ $self->{attrs}{related_objects} || {} }; # nothing might have been inserted yet + @cols_from_relations = keys %new_data; + } + elsif (ref $self->{cond} ne 'HASH') { $self->throw_exception( - "Can't abstract implicit construct, condition not a hash" - ) if ($self->{cond} && !(ref $self->{cond} eq 'HASH')); - - my $collapsed_cond = ( - $self->{cond} - ? $self->_collapse_cond($self->{cond}) - : {} + "Can't abstract implicit construct, resultset condition not a hash" ); - + } + else { # precendence must be given to passed values over values inherited from # the cond, so the order here is important. - my %implied = %{$self->_remove_alias($collapsed_cond, $alias)}; - while( my($col,$value) = each %implied ){ - if(ref($value) eq 'HASH' && keys(%$value) && (keys %$value)[0] eq '='){ - $new{$col} = $value->{'='}; + my $collapsed_cond = $self->_collapse_cond($self->{cond}); + my %implied = %{$self->_remove_alias($collapsed_cond, $alias)}; + + while ( my($col, $value) = each %implied ) { + if (ref($value) eq 'HASH' && keys(%$value) && (keys %$value)[0] eq '=') { + $new_data{$col} = $value->{'='}; next; } - $new{$col} = $value if $self->_is_deterministic_value($value); + $new_data{$col} = $value if $self->_is_deterministic_value($value); } } - %new = ( - %new, - %{ $self->_remove_alias($values, $alias) }, - -source_handle => $self->_source_handle, - -result_source => $self->result_source, # DO NOT REMOVE THIS, REQUIRED + %new_data = ( + %new_data, + %{ $self->_remove_alias($data, $alias) }, ); - return $self->result_class->new(\%new); + return (\%new_data, \@cols_from_relations); } # _is_deterministic_value @@ -2142,7 +2188,7 @@ sub _remove_alias { return \%unaliased; } -=head2 as_query (EXPERIMENTAL) +=head2 as_query =over 4 @@ -2156,8 +2202,6 @@ Returns the SQL query and bind vars associated with the invocant. This is generally used as the RHS for a subquery. -B: This feature is still experimental. - =cut sub as_query { @@ -2248,7 +2292,7 @@ To create related objects, pass a hashref of related-object column values B. If the relationship is of type C (L) - pass an arrayref of hashrefs. The process will correctly identify columns holding foreign keys, and will -transparrently populate them from the keys of the corresponding relation. +transparently populate them from the keys of the corresponding relation. This can be applied recursively, and will work correctly for a structure with an arbitrary depth and width, as long as the relationships actually exists and the correct column data has been supplied. @@ -2586,6 +2630,23 @@ sub is_paged { return !!$self->{attrs}{page}; } +=head2 is_ordered + +=over 4 + +=item Arguments: none + +=item Return Value: true, if the resultset has been ordered with C. + +=back + +=cut + +sub is_ordered { + my ($self) = @_; + return scalar $self->result_source->storage->_parse_order_by($self->{attrs}{order_by}); +} + =head2 related_resultset =over 4 @@ -2607,21 +2668,30 @@ sub related_resultset { $self->{related_resultsets} ||= {}; return $self->{related_resultsets}{$rel} ||= do { - my $rel_info = $self->result_source->relationship_info($rel); + my $rsrc = $self->result_source; + my $rel_info = $rsrc->relationship_info($rel); $self->throw_exception( - "search_related: result source '" . $self->result_source->source_name . + "search_related: result source '" . $rsrc->source_name . "' has no such relationship $rel") unless $rel_info; - my ($from,$seen) = $self->_chain_relationship($rel); + my $attrs = $self->_chain_relationship($rel); + + my $join_count = $attrs->{seen_join}{$rel}; + + my $alias = $self->result_source->storage + ->relname_to_table_alias($rel, $join_count); + + # since this is search_related, and we already slid the select window inwards + # (the select/as attrs were deleted in the beginning), we need to flip all + # left joins to inner, so we get the expected results + # read the comment on top of the actual function to see what this does + $attrs->{from} = $rsrc->schema->storage->_straight_join_to_node ($attrs->{from}, $alias); - my $join_count = $seen->{$rel}; - my $alias = ($join_count > 1 ? join('_', $rel, $join_count) : $rel); #XXX - temp fix for result_class bug. There likely is a more elegant fix -groditi - my %attrs = %{$self->{attrs}||{}}; - delete @attrs{qw(result_class alias)}; + delete @{$attrs}{qw(result_class alias)}; my $new_cache; @@ -2632,7 +2702,7 @@ sub related_resultset { } } - my $rel_source = $self->result_source->related_source($rel); + my $rel_source = $rsrc->related_source($rel); my $new = do { @@ -2642,20 +2712,14 @@ sub related_resultset { # to work sanely (e.g. RestrictWithObject wants to be able to add # extra query restrictions, and these may need to be $alias.) - my $attrs = $rel_source->resultset_attributes; - local $attrs->{alias} = $alias; + my $rel_attrs = $rel_source->resultset_attributes; + local $rel_attrs->{alias} = $alias; $rel_source->resultset ->search_rs( undef, { - %attrs, - join => undef, - prefetch => undef, - select => undef, - as => undef, - where => $self->{cond}, - seen_join => $seen, - from => $from, + %$attrs, + where => $attrs->{where}, }); }; $new->set_cache($new_cache) if $new_cache; @@ -2706,6 +2770,68 @@ sub current_source_alias { return ($self->{attrs} || {})->{alias} || 'me'; } +=head2 as_subselect_rs + +=over 4 + +=item Arguments: none + +=item Return Value: $resultset + +=back + +Act as a barrier to SQL symbols. The resultset provided will be made into a +"virtual view" by including it as a subquery within the from clause. From this +point on, any joined tables are inaccessible to ->search on the resultset (as if +it were simply where-filtered without joins). For example: + + my $rs = $schema->resultset('Bar')->search({'x.name' => 'abc'},{ join => 'x' }); + + # 'x' now pollutes the query namespace + + # So the following works as expected + my $ok_rs = $rs->search({'x.other' => 1}); + + # But this doesn't: instead of finding a 'Bar' related to two x rows (abc and + # def) we look for one row with contradictory terms and join in another table + # (aliased 'x_2') which we never use + my $broken_rs = $rs->search({'x.name' => 'def'}); + + my $rs2 = $rs->as_subselect_rs; + + # doesn't work - 'x' is no longer accessible in $rs2, having been sealed away + my $not_joined_rs = $rs2->search({'x.other' => 1}); + + # works as expected: finds a 'table' row related to two x rows (abc and def) + my $correctly_joined_rs = $rs2->search({'x.name' => 'def'}); + +Another example of when one might use this would be to select a subset of +columns in a group by clause: + + my $rs = $schema->resultset('Bar')->search(undef, { + group_by => [qw{ id foo_id baz_id }], + })->as_subselect_rs->search(undef, { + columns => [qw{ id foo_id }] + }); + +In the above example normally columns would have to be equal to the group by, +but because we isolated the group by into a subselect the above works. + +=cut + +sub as_subselect_rs { + my $self = shift; + + return $self->result_source->resultset->search( undef, { + alias => $self->current_source_alias, + from => [{ + $self->current_source_alias => $self->as_query, + -alias => $self->current_source_alias, + -source_handle => $self->result_source->handle, + }] + }); +} + # This code is called by search_related, and makes sure there # is clear separation between the joins before, during, and # after the relationship. This information is needed later @@ -2713,37 +2839,67 @@ sub current_source_alias { # with a relation_chain_depth less than the depth of the # current prefetch is not considered) # -# The increments happen in 1/2s to make it easier to correlate the -# join depth with the join path. An integer means a relationship -# specified via a search_related, whereas a fraction means an added -# join/prefetch via attributes +# The increments happen twice per join. An even number means a +# relationship specified via a search_related, whereas an odd +# number indicates a join/prefetch added via attributes +# +# Also this code will wrap the current resultset (the one we +# chain to) in a subselect IFF it contains limiting attributes sub _chain_relationship { my ($self, $rel) = @_; my $source = $self->result_source; - my $attrs = $self->{attrs}; + my $attrs = { %{$self->{attrs}||{}} }; - my $from = [ @{ - $attrs->{from} - || - [{ - -source_handle => $source->handle, - -alias => $attrs->{alias}, - $attrs->{alias} => $source->from, - }] - }]; + # we need to take the prefetch the attrs into account before we + # ->_resolve_join as otherwise they get lost - captainL + my $join = $self->_merge_attr( $attrs->{join}, $attrs->{prefetch} ); - my $seen = { %{$attrs->{seen_join} || {} } }; - my $jpath = ($attrs->{seen_join} && keys %{$attrs->{seen_join}}) - ? $from->[-1][0]{-join_path} - : []; + delete @{$attrs}{qw/join prefetch collapse distinct select as columns +select +as +columns/}; + my $seen = { %{ (delete $attrs->{seen_join}) || {} } }; - # we need to take the prefetch the attrs into account before we - # ->_resolve_join as otherwise they get lost - captainL - my $merged = $self->_merge_attr( $attrs->{join}, $attrs->{prefetch} ); + my $from; + my @force_subq_attrs = qw/offset rows group_by having/; + + if ( + ($attrs->{from} && ref $attrs->{from} ne 'ARRAY') + || + $self->_has_resolved_attr (@force_subq_attrs) + ) { + # Nuke the prefetch (if any) before the new $rs attrs + # are resolved (prefetch is useless - we are wrapping + # a subquery anyway). + my $rs_copy = $self->search; + $rs_copy->{attrs}{join} = $self->_merge_attr ( + $rs_copy->{attrs}{join}, + delete $rs_copy->{attrs}{prefetch}, + ); + + $from = [{ + -source_handle => $source->handle, + -alias => $attrs->{alias}, + $attrs->{alias} => $rs_copy->as_query, + }]; + delete @{$attrs}{@force_subq_attrs, 'where'}; + $seen->{-relation_chain_depth} = 0; + } + elsif ($attrs->{from}) { #shallow copy suffices + $from = [ @{$attrs->{from}} ]; + } + else { + $from = [{ + -source_handle => $source->handle, + -alias => $attrs->{alias}, + $attrs->{alias} => $source->from, + }]; + } + + my $jpath = ($seen->{-relation_chain_depth}) + ? $from->[-1][0]{-join_path} + : []; my @requested_joins = $source->_resolve_join( - $merged, + $join, $attrs->{alias}, $seen, $jpath, @@ -2751,7 +2907,7 @@ sub _chain_relationship { push @$from, @requested_joins; - $seen->{-relation_chain_depth} += 0.5; + $seen->{-relation_chain_depth}++; # if $self already had a join/prefetch specified on it, the requested # $rel might very well be already included. What we do in this case @@ -2759,26 +2915,16 @@ sub _chain_relationship { # the join in question so we could tell it *is* the search_related) my $already_joined; - # we consider the last one thus reverse for my $j (reverse @requested_joins) { - if ($rel eq $j->[0]{-join_path}[-1]) { - $j->[0]{-relation_chain_depth} += 0.5; + my ($last_j) = keys %{$j->[0]{-join_path}[-1]}; + if ($rel eq $last_j) { + $j->[0]{-relation_chain_depth}++; $already_joined++; last; } } -# alternative way to scan the entire chain - not backwards compatible -# for my $j (reverse @$from) { -# next unless ref $j eq 'ARRAY'; -# if ($j->[0]{-join_path} && $j->[0]{-join_path}[-1] eq $rel) { -# $j->[0]{-relation_chain_depth} += 0.5; -# $already_joined++; -# last; -# } -# } - unless ($already_joined) { push @$from, $source->_resolve_join( $rel, @@ -2788,9 +2934,9 @@ sub _chain_relationship { ); } - $seen->{-relation_chain_depth} += 0.5; + $seen->{-relation_chain_depth}++; - return ($from,$seen); + return {%$attrs, from => $from, seen_join => $seen}; } # too many times we have to do $attrs = { %{$self->_resolved_attrs} } @@ -2813,41 +2959,46 @@ sub _resolved_attrs { # build columns (as long as select isn't set) into a set of as/select hashes unless ( $attrs->{select} ) { - my @cols = ( ref($attrs->{columns}) eq 'ARRAY' ) - ? @{ delete $attrs->{columns}} - : ( - ( delete $attrs->{columns} ) - || - $source->columns - ) - ; + my @cols; + if ( ref $attrs->{columns} eq 'ARRAY' ) { + @cols = @{ delete $attrs->{columns}} + } elsif ( defined $attrs->{columns} ) { + @cols = delete $attrs->{columns} + } else { + @cols = $source->columns + } - @colbits = map { - ( ref($_) eq 'HASH' ) - ? $_ - : { - ( - /^\Q${alias}.\E(.+)$/ - ? "$1" - : "$_" - ) - => - ( - /\./ - ? "$_" - : "${alias}.$_" - ) - } - } @cols; + for (@cols) { + if ( ref $_ eq 'HASH' ) { + push @colbits, $_ + } else { + my $key = /^\Q${alias}.\E(.+)$/ + ? "$1" + : "$_"; + my $value = /\./ + ? "$_" + : "${alias}.$_"; + push @colbits, { $key => $value }; + } + } } # add the additional columns on - foreach ( 'include_columns', '+columns' ) { - push @colbits, map { - ( ref($_) eq 'HASH' ) - ? $_ - : { ( split( /\./, $_ ) )[-1] => ( /\./ ? $_ : "${alias}.$_" ) } - } ( ref($attrs->{$_}) eq 'ARRAY' ) ? @{ delete $attrs->{$_} } : delete $attrs->{$_} if ( $attrs->{$_} ); + foreach (qw{include_columns +columns}) { + if ( $attrs->{$_} ) { + my @list = ( ref($attrs->{$_}) eq 'ARRAY' ) + ? @{ delete $attrs->{$_} } + : delete $attrs->{$_}; + for (@list) { + if ( ref($_) eq 'HASH' ) { + push @colbits, $_ + } else { + my $key = ( split /\./, $_ )[-1]; + my $value = ( /\./ ? $_ : "$alias.$_" ); + push @colbits, { $key => $value }; + } + } + } } # start with initial select items @@ -2856,45 +3007,48 @@ sub _resolved_attrs { ( ref $attrs->{select} eq 'ARRAY' ) ? [ @{ $attrs->{select} } ] : [ $attrs->{select} ]; - $attrs->{as} = ( - $attrs->{as} - ? ( - ref $attrs->{as} eq 'ARRAY' - ? [ @{ $attrs->{as} } ] - : [ $attrs->{as} ] + + if ( $attrs->{as} ) { + $attrs->{as} = + ( + ref $attrs->{as} eq 'ARRAY' + ? [ @{ $attrs->{as} } ] + : [ $attrs->{as} ] ) - : [ map { m/^\Q${alias}.\E(.+)$/ ? $1 : $_ } @{ $attrs->{select} } ] - ); + } else { + $attrs->{as} = [ map { + m/^\Q${alias}.\E(.+)$/ + ? $1 + : $_ + } @{ $attrs->{select} } + ] + } } else { - # otherwise we intialise select & as to empty $attrs->{select} = []; $attrs->{as} = []; } # now add colbits to select/as - push( @{ $attrs->{select} }, map { values( %{$_} ) } @colbits ); - push( @{ $attrs->{as} }, map { keys( %{$_} ) } @colbits ); + push @{ $attrs->{select} }, map values %{$_}, @colbits; + push @{ $attrs->{as} }, map keys %{$_}, @colbits; - my $adds; - if ( $adds = delete $attrs->{'+select'} ) { + if ( my $adds = delete $attrs->{'+select'} ) { $adds = [$adds] unless ref $adds eq 'ARRAY'; - push( - @{ $attrs->{select} }, - map { /\./ || ref $_ ? $_ : "${alias}.$_" } @$adds - ); + push @{ $attrs->{select} }, + map { /\./ || ref $_ ? $_ : "$alias.$_" } @$adds; } - if ( $adds = delete $attrs->{'+as'} ) { + if ( my $adds = delete $attrs->{'+as'} ) { $adds = [$adds] unless ref $adds eq 'ARRAY'; - push( @{ $attrs->{as} }, @$adds ); + push @{ $attrs->{as} }, @$adds; } - $attrs->{from} ||= [ { + $attrs->{from} ||= [{ -source_handle => $source->handle, -alias => $self->{attrs}{alias}, $self->{attrs}{alias} => $source->from, - } ]; + }]; if ( $attrs->{join} || $attrs->{prefetch} ) { @@ -2914,7 +3068,7 @@ sub _resolved_attrs { $join, $alias, { %{ $attrs->{seen_join} || {} } }, - ($attrs->{seen_join} && keys %{$attrs->{seen_join}}) + ( $attrs->{seen_join} && keys %{$attrs->{seen_join}}) ? $attrs->{from}[-1][0]{-join_path} : [] , @@ -2942,30 +3096,89 @@ sub _resolved_attrs { } else { $attrs->{group_by} = [ grep { !ref($_) || (ref($_) ne 'HASH') } @{$attrs->{select}} ]; + + # add any order_by parts that are not already present in the group_by + # we need to be careful not to add any named functions/aggregates + # i.e. select => [ ... { count => 'foo', -as 'foocount' } ... ] + my %already_grouped = map { $_ => 1 } (@{$attrs->{group_by}}); + + my $storage = $self->result_source->schema->storage; + + my $rs_column_list = $storage->_resolve_column_info ($attrs->{from}); + + for my $chunk ($storage->_parse_order_by($attrs->{order_by})) { + if ($rs_column_list->{$chunk} && not $already_grouped{$chunk}++) { + push @{$attrs->{group_by}}, $chunk; + } + } } } - $attrs->{collapse} ||= {}; + # generate selections based on the prefetch helper if ( my $prefetch = delete $attrs->{prefetch} ) { - $prefetch = $self->_merge_attr( {}, $prefetch ); + $attrs->{collapse} = 1; + + # this is a separate structure (we don't look in {from} directly) + # as the resolver needs to shift things off the lists to work + # properly (identical-prefetches on different branches) + my $join_map = {}; + if (ref $attrs->{from} eq 'ARRAY') { + + my $start_depth = $attrs->{seen_join}{-relation_chain_depth} || 0; + + for my $j ( @{$attrs->{from}}[1 .. $#{$attrs->{from}} ] ) { + next unless $j->[0]{-alias}; + next unless $j->[0]{-join_path}; + next if ($j->[0]{-relation_chain_depth} || 0) < $start_depth; - my $prefetch_ordering = []; + my @jpath = map { keys %$_ } @{$j->[0]{-join_path}}; - my $join_map = $self->_joinpath_aliases ($attrs->{from}, $attrs->{seen_join}); + my $p = $join_map; + $p = $p->{$_} ||= {} for @jpath[ ($start_depth/2) .. $#jpath]; #only even depths are actual jpath boundaries + push @{$p->{-join_aliases} }, $j->[0]{-alias}; + } + } - my @prefetch = - $source->_resolve_prefetch( $prefetch, $alias, $join_map, $prefetch_ordering, $attrs->{collapse} ); + my @prefetch = $source->_resolve_prefetch( $prefetch, $alias, $join_map ); # we need to somehow mark which columns came from prefetch $attrs->{_prefetch_select} = [ map { $_->[0] } @prefetch ]; push @{ $attrs->{select} }, @{$attrs->{_prefetch_select}}; push @{ $attrs->{as} }, (map { $_->[1] } @prefetch); + } + + # run through the resulting joinstructure (starting from our current slot) + # and unset collapse if proven unnesessary + if ($attrs->{collapse} && ref $attrs->{from} eq 'ARRAY') { + + if (@{$attrs->{from}} > 1) { - push( @{$attrs->{order_by}}, @$prefetch_ordering ); - $attrs->{_collapse_order_by} = \@$prefetch_ordering; + # find where our table-spec starts and consider only things after us + my @fromlist = @{$attrs->{from}}; + while (@fromlist) { + my $t = shift @fromlist; + $t = $t->[0] if ref $t eq 'ARRAY'; #me vs join from-spec mismatch + last if ($t->{-alias} && $t->{-alias} eq $alias); + } + + for (@fromlist) { + $attrs->{collapse} = ! $_->[0]{-is_single} + and last; + } + } + else { + # no joins - no collapse + $attrs->{collapse} = 0; + } } + # the row parser generates differently depending on whether collapsing is requested + # the need to look at {select} is temporary + $attrs->{_row_parser} = $source->_mk_row_parser ( + @{$attrs}{qw/as collapse select/} + ); + # if both page and offset are specified, produce a combined offset # even though it doesn't make much sense, this is what pre 081xx has # been doing @@ -2980,33 +3193,6 @@ sub _resolved_attrs { return $self->{_attrs} = $attrs; } -sub _joinpath_aliases { - my ($self, $fromspec, $seen) = @_; - - my $paths = {}; - return $paths unless ref $fromspec eq 'ARRAY'; - - my $cur_depth = $seen->{-relation_chain_depth} || 0; - - if (int ($cur_depth) != $cur_depth) { - $self->throw_exception ("-relation_chain_depth is not an integer, something went horribly wrong ($cur_depth)"); - } - - for my $j (@$fromspec) { - - next if ref $j ne 'ARRAY'; - next if ($j->[0]{-relation_chain_depth} || 0) < $cur_depth; - - my $jpath = $j->[0]{-join_path}; - - my $p = $paths; - $p = $p->{$_} ||= {} for @{$jpath}[$cur_depth .. $#$jpath]; - push @{$p->{-join_aliases} }, $j->[0]{-alias}; - } - - return $paths; -} - sub _rollout_attr { my ($self, $attr) = @_; @@ -3260,6 +3446,9 @@ When you use function/stored procedure names and do not supply an C attribute, the column names returned are storage-dependent. E.g. MySQL would return a column named C in the above example. +B You will almost always need a corresponding 'as' entry when you use +'select'. + =head2 +select =over 4 @@ -3324,7 +3513,7 @@ attempting to use the accessor in an C clause or similar will fail miserably. To get around this limitation, you can supply literal SQL to your -C attribute that contains the C text, e.g. select => [\'myfield AS alias'] @@ -3435,7 +3624,7 @@ for a C attribute in the above search. C can be used with the following relationship types: C, C (or if you're using C, any relationship declared with an accessor type of 'single' or 'filter'). A more complex example that -prefetches an artists cds, the tracks on those cds, and the tags associted +prefetches an artists cds, the tracks on those cds, and the tags associated with that artist is given below (assuming many-to-many from artists to tags): my $rs = $schema->resultset('Artist')->search( @@ -3514,7 +3703,7 @@ C on it. =back -Specifes the maximum number of rows for direct retrieval or the number of +Specifies the maximum number of rows for direct retrieval or the number of rows per page if the page attribute or method is used. =head2 offset