# Run the query, passing the result_class since it should propagate for find
my $rs = $self->search ($final_cond, {result_class => $self->result_class, %$attrs});
- if (keys %{$rs->_resolved_attrs->{collapse}}) {
+ if ($rs->_resolved_attrs->{collapse}) {
my $row = $rs->next;
carp "Query returned more than one row" if $rs->next;
return $row;
my $attrs = $self->_resolved_attrs_copy;
- if (keys %{$attrs->{collapse}}) {
+ if ($attrs->{collapse}) {
$self->throw_exception(
'single() can not be used on resultsets prefetching has_many. Use find( \%cond ) or next() instead'
);
$attrs->{where}, $attrs
);
- return (@data ? ($self->_construct_object(@data))[0] : undef);
+ return @data
+ ? ($self->_construct_objects(@data))[0]
+ : undef
+ ;
}
: $self->cursor->next
);
return undef unless (@row);
- my ($row, @more) = $self->_construct_object(@row);
+ my ($row, @more) = $self->_construct_objects(@row);
$self->{stashed_objects} = \@more if @more;
return $row;
}
-sub _construct_object {
+# takes a single DBI-row of data and coinstructs as many objects
+# as the resultset attributes call for.
+# This can be a bit of an action at a distance - it takes as an argument
+# the *current* cursor-row (already taken off the $sth), but if
+# collapsing is requested it will keep advancing the cursor either
+# until the current row-object is assembled (the collapser was able to
+# order the result sensibly) OR until the cursor is exhausted (an
+# unordered collapsing resultset effectively triggers ->all)
+
+# FIXME: why the *FUCK* do we pass around DBI data by copy?! Sadly needs
+# assessment before changing...
+#
+sub _construct_objects {
my ($self, @row) = @_;
+ my $attrs = $self->_resolved_attrs;
+ my $keep_collapsing = $attrs->{collapse};
+
+ my $res_index;
+=begin
+ do {
+ my $me_pref_col = $attrs->{_row_parser}->($row_ref);
+
+ my $container;
+ if ($keep_collapsing) {
+
+ # FIXME - we should be able to remove these 2 checks after the design validates
+ $self->throw_exception ('Collapsing without a top-level collapse-set... can not happen')
+ unless @{$me_ref_col->[2]};
+ $self->throw_exception ('Top-level collapse-set contains a NULL-value... can not happen')
+ if grep { ! defined $_ } @{$me_pref_col->[2]};
+
+ my $main_ident = join "\x00", @{$me_pref_col->[2]};
+
+ if (! $res_index->{$main_ident}) {
+ # this is where we bail out IFF we are ordered, and the $main_ident changes
+
+ $res_index->{$main_ident} = {
+ all_me_pref => [,
+ index => scalar keys %$res_index,
+ };
+ }
+ }
+
+
- my $info = $self->_collapse_result($self->{_attrs}{as}, \@row)
+ $container = $res_index->{$main_ident}{container};
+ };
+
+ push @$container, [ @{$me_pref_col}[0,1] ];
+
+
+
+ } while (
+ $keep_collapsing
+ &&
+ do { $row_ref = [$self->cursor->next]; $self->{stashed_row} = $row_ref if @$row_ref; scalar @$row_ref }
+ );
+
+ # attempt collapse all rows with same collapse identity
+ if (@to_collapse > 1) {
+ my @collapsed;
+ while (@to_collapse) {
+ $self->_merge_result(\@collapsed, shift @to_collapse);
+ }
+ }
+=cut
+
+ my $mepref_structs = $self->_collapse_result($attrs->{as}, \@row, $keep_collapsing)
or return ();
- my @new = $self->result_class->inflate_result($self->result_source, @$info);
- @new = $self->{_attrs}{record_filter}->(@new)
- if exists $self->{_attrs}{record_filter};
- return @new;
+
+ my $rsrc = $self->result_source;
+ my $res_class = $self->result_class;
+ my $inflator = $res_class->can ('inflate_result');
+
+ my @objs =
+ $res_class->$inflator ($rsrc, @$mepref_structs);
+
+ if (my $f = $attrs->{record_filter}) {
+ @objs = map { $f->($_) } @objs;
+ }
+
+ return @objs;
}
+
sub _collapse_result {
- my ($self, $as_proto, $row) = @_;
+ my ( $self, $as_proto, $row_ref, $keep_collapsing ) = @_;
+ my $collapse = $self->_resolved_attrs->{collapse};
+ my $parser = $self->result_source->_mk_row_parser( $as_proto, $collapse );
+ my $result = [];
+ my $register = {};
+ my $rel_register = {};
- my @copy = @$row;
+ my @row = @$row_ref;
+ do {
+ my $row = $parser->( \@row );
- # 'foo' => [ undef, 'foo' ]
- # 'foo.bar' => [ 'foo', 'bar' ]
- # 'foo.bar.baz' => [ 'foo.bar', 'baz' ]
+ # init register
+ $self->_check_register( $register, $row ) unless ( keys %$register );
- my @construct_as = map { [ (/^(?:(.*)\.)?([^.]+)$/) ] } @$as_proto;
+ $self->_merge_result( $result, $row, $rel_register )
+ if ( !$collapse
+ || ( $collapse = $self->_check_register( $register, $row ) ) );
- my %collapse = %{$self->{_attrs}{collapse}||{}};
+ } while (
+ $collapse
+ && do { @row = $self->cursor->next; $self->{stashed_row} = \@row if @row; }
+
+ # run this as long as there is a next row and we are not yet done collapsing
+ );
+ return $result;
+}
- my @pri_index;
- # if we're doing collapsing (has_many prefetch) we need to grab records
- # until the PK changes, so fill @pri_index. if not, we leave it empty so
- # we know we don't have to bother.
- # the reason for not using the collapse stuff directly is because if you
- # had for e.g. two artists in a row with no cds, the collapse info for
- # both would be NULL (undef) so you'd lose the second artist
+# Taubenschlag
+sub _check_register {
+ my ( $self, $register, $obj ) = @_;
+ return undef unless ( ref $obj eq 'ARRAY' && ref $obj->[2] eq 'ARRAY' );
+ my @ids = @{ $obj->[2] };
+ while ( defined( my $id = shift @ids ) ) {
+ return $register->{$id} if ( exists $register->{$id} && !@ids );
+ $register->{$id} = @ids ? {} : $obj unless ( exists $register->{$id} );
+ $register = $register->{$id};
+ }
+ return undef;
+}
- # store just the index so we can check the array positions from the row
- # without having to contruct the full hash
+sub _merge_result {
+ my ( $self, $result, $row, $register ) = @_;
+ return @$result = @$row if ( @$result == 0 ); # initialize with $row
- if (keys %collapse) {
- my %pri = map { ($_ => 1) } $self->result_source->_pri_cols;
- foreach my $i (0 .. $#construct_as) {
- next if defined($construct_as[$i][0]); # only self table
- if (delete $pri{$construct_as[$i][1]}) {
- push(@pri_index, $i);
- }
- last unless keys %pri; # short circuit (Johnny Five Is Alive!)
+ my ( undef, $rels, $ids ) = @$result;
+ my ( undef, $new_rels, $new_ids ) = @$row;
+
+ my @rels = keys %{ { %{$rels||{} }, %{ $new_rels||{} } } };
+ foreach my $rel (@rels) {
+ $register = $register->{$rel} ||= {};
+
+ my $new_data = $new_rels->{$rel};
+ my $data = $rels->{$rel};
+ @$data = [@$data] unless ( ref $data->[0] eq 'ARRAY' );
+
+ $self->_check_register( $register, $data->[0] )
+ unless ( keys %$register );
+
+ if ( my $found = $self->_check_register( $register, $new_data ) ) {
+ $self->_merge_result( $found, $new_data, $register );
+ }
+ else {
+ push( @$data, $new_data );
}
}
+ return 1;
+}
+
+=begin
+
+# two arguments: $as_proto is an arrayref of column names,
+# $row_ref is an arrayref of the data. If none of the row data
+# is defined we return undef (that's copied from the old
+# _collapse_result). Next we decide whether we need to collapse
+# the resultset (i.e. we prefetch something) or not. $collapse
+# indicates that. The do-while loop will run once if we do not need
+# to collapse the result and will run as long as _merge_result returns
+# a true value. It will return undef if the current added row does not
+# match the previous row. A bit of stashing and cursor magic is
+# required so that the cursor is not mixed up.
+
+# "$rows" is a bit misleading. In the end, there should only be one
+# element in this arrayref.
- # no need to do an if, it'll be empty if @pri_index is empty anyway
+sub _collapse_result {
+ my ( $self, $as_proto, $row_ref ) = @_;
+ my $has_def;
+ for (@$row_ref) {
+ if ( defined $_ ) {
+ $has_def++;
+ last;
+ }
+ }
+ return undef unless $has_def;
+
+ my $collapse = $self->_resolved_attrs->{collapse};
+ my $rows = [];
+ my @row = @$row_ref;
+ do {
+ my $i = 0;
+ my $row = { map { $_ => $row[ $i++ ] } @$as_proto };
+ $row = $self->result_source->_parse_row($row, $collapse);
+ unless ( scalar @$rows ) {
+ push( @$rows, $row );
+ }
+ $collapse = undef unless ( $self->_merge_result( $rows, $row ) );
+ } while (
+ $collapse
+ && do { @row = $self->cursor->next; $self->{stashed_row} = \@row if @row; }
+ );
+
+ return $rows->[0];
- my %pri_vals = map { ($_ => $copy[$_]) } @pri_index;
+}
+
+# _merge_result accepts an arrayref of rows objects (again, an arrayref of two elements)
+# and a row object which should be merged into the first object.
+# First we try to find out whether $row is already in $rows. If this is the case
+# we try to merge them by iteration through their relationship data. We call
+# _merge_result again on them, so they get merged.
+
+# If we don't find the $row in $rows, we append it to $rows and return undef.
+# _merge_result returns 1 otherwise (i.e. $row has been found in $rows).
+
+sub _merge_result {
+ my ( $self, $rows, $row ) = @_;
+ my ( $columns, $rels ) = @$row;
+ my $found = undef;
+ foreach my $seen (@$rows) {
+ my $match = 1;
+ foreach my $column ( keys %$columns ) {
+ if ( defined $seen->[0]->{$column} ^ defined $columns->{$column}
+ or defined $columns->{$column}
+ && $seen->[0]->{$column} ne $columns->{$column} )
+ {
+
+ $match = 0;
+ last;
+ }
+ }
+ if ($match) {
+ $found = $seen;
+ last;
+ }
+ }
+ if ($found) {
+ foreach my $rel ( keys %$rels ) {
+ my $old_rows = $found->[1]->{$rel};
+ $self->_merge_result(
+ ref $found->[1]->{$rel}->[0] eq 'HASH' ? [ $found->[1]->{$rel} ]
+ : $found->[1]->{$rel},
+ ref $rels->{$rel}->[0] eq 'HASH' ? [ $rels->{$rel}->[0], $rels->{$rel}->[1] ]
+ : $rels->{$rel}->[0]
+ );
- my @const_rows;
+ my $attrs = $self->_resolved_attrs;
+ my ($keep_collapsing, $set_ident) = @{$attrs}{qw/collapse _collapse_ident/};
+
+ # FIXME this is temporary, need to calculate in _resolved_attrs
+ $set_ident ||= { me => [ $self->result_source->_pri_cols ], pref => {} };
+
+ my @cur_row = @$row_ref;
+ my (@to_collapse, $last_ident);
+
+ do {
+ my $row_hr = { map { $as_proto->[$_] => $cur_row[$_] } (0 .. $#$as_proto) };
+
+ # see if we are switching to another object
+ # this can be turned off and things will still work
+ # since _merge_prefetch knows about _collapse_ident
+# my $cur_ident = [ @{$row_hr}{@$set_ident} ];
+ my $cur_ident = [];
+ $last_ident ||= $cur_ident;
+
+# if ($keep_collapsing = Test::Deep::eq_deeply ($cur_ident, $last_ident)) {
+# push @to_collapse, $self->result_source->_parse_row (
+# $row_hr,
+# );
+# }
+ } while (
+ $keep_collapsing
+ &&
+ do { @cur_row = $self->cursor->next; $self->{stashed_row} = \@cur_row if @cur_row; }
+ );
- do { # no need to check anything at the front, we always want the first row
+ die Dumper \@to_collapse;
- my %const;
- foreach my $this_as (@construct_as) {
- $const{$this_as->[0]||''}{$this_as->[1]} = shift(@copy);
+ # attempt collapse all rows with same collapse identity
+ if (@to_collapse > 1) {
+ my @collapsed;
+ while (@to_collapse) {
+ $self->_merge_result(\@collapsed, shift @to_collapse);
}
+ @to_collapse = @collapsed;
+ }
- push(@const_rows, \%const);
+ # still didn't fully collapse
+ $self->throw_exception ('Resultset collapse failed (theoretically impossible). Maybe a wrong collapse_ident...?')
+ if (@to_collapse > 1);
- } until ( # no pri_index => no collapse => drop straight out
- !@pri_index
- or
- do { # get another row, stash it, drop out if different PK
+ return $to_collapse[0];
+}
- @copy = $self->cursor->next;
- $self->{stashed_row} = \@copy;
- # last thing in do block, counts as true if anything doesn't match
+# two arguments: $as_proto is an arrayref of 'as' column names,
+# $row_ref is an arrayref of the data. The do-while loop will run
+# once if we do not need to collapse the result and will run as long as
+# _merge_result returns a true value. It will return undef if the
+# current added row does not match the previous row, which in turn
+# means we need to stash the row for the subsequent ->next call
+sub _collapse_result {
+ my ( $self, $as_proto, $row_ref ) = @_;
- # check xor defined first for NULL vs. NOT NULL then if one is
- # defined the other must be so check string equality
+ my $attrs = $self->_resolved_attrs;
+ my ($keep_collapsing, $set_ident) = @{$attrs}{qw/collapse _collapse_ident/};
- grep {
- (defined $pri_vals{$_} ^ defined $copy[$_])
- || (defined $pri_vals{$_} && ($pri_vals{$_} ne $copy[$_]))
- } @pri_index;
- }
- );
+ die Dumper [$as_proto, $row_ref, $keep_collapsing, $set_ident ];
- my $alias = $self->{attrs}{alias};
- my $info = [];
- my %collapse_pos;
+ my @cur_row = @$row_ref;
+ my (@to_collapse, $last_ident);
- my @const_keys;
+ do {
+ my $row_hr = { map { $as_proto->[$_] => $cur_row[$_] } (0 .. $#$as_proto) };
- foreach my $const (@const_rows) {
- scalar @const_keys or do {
- @const_keys = sort { length($a) <=> length($b) } keys %$const;
- };
- foreach my $key (@const_keys) {
- if (length $key) {
- my $target = $info;
- my @parts = split(/\./, $key);
- my $cur = '';
- my $data = $const->{$key};
- foreach my $p (@parts) {
- $target = $target->[1]->{$p} ||= [];
- $cur .= ".${p}";
- if ($cur eq ".${key}" && (my @ckey = @{$collapse{$cur}||[]})) {
- # collapsing at this point and on final part
- my $pos = $collapse_pos{$cur};
- CK: foreach my $ck (@ckey) {
- if (!defined $pos->{$ck} || $pos->{$ck} ne $data->{$ck}) {
- $collapse_pos{$cur} = $data;
- delete @collapse_pos{ # clear all positioning for sub-entries
- grep { m/^\Q${cur}.\E/ } keys %collapse_pos
- };
- push(@$target, []);
- last CK;
- }
- }
- }
- if (exists $collapse{$cur}) {
- $target = $target->[-1];
- }
- }
- $target->[0] = $data;
- } else {
- $info->[0] = $const->{$key};
- }
- }
+ # see if we are switching to another object
+ # this can be turned off and things will still work
+ # since _merge_prefetch knows about _collapse_ident
+# my $cur_ident = [ @{$row_hr}{@$set_ident} ];
+ my $cur_ident = [];
+ $last_ident ||= $cur_ident;
+
+# if ($keep_collapsing = eq_deeply ($cur_ident, $last_ident)) {
+# push @to_collapse, $self->result_source->_parse_row (
+# $row_hr,
+# );
+# }
+ } while (
+ $keep_collapsing
+ &&
+ do { @cur_row = $self->cursor->next; $self->{stashed_row} = \@cur_row if @cur_row; }
+ );
+
+ # attempt collapse all rows with same collapse identity
+}
+=cut
+
+# Takes an arrayref of me/pref pairs and a new me/pref pair that should
+# be merged on a preexisting matching me (or should be pushed into $merged
+# as a new me/pref pair for further invocations). It should be possible to
+# use this function to collapse complete ->all results, provided _collapse_result() is adjusted
+# to provide everything to this sub not to barf when $merged contains more than one
+# arrayref)
+sub _merge_prefetch {
+ my ($self, $merged, $next_row) = @_;
+
+ unless (@$merged) {
+ push @$merged, $next_row;
+ return;
}
- return $info;
}
=head2 result_source
# if we multi-prefetch we group_by something unique, as this is what we would
# get out of the rs via ->next/->all. We *DO WANT* to clobber old group_by regardless
- if ( keys %{$attrs->{collapse}} ) {
+ if ( $attrs->{collapse} ) {
$sub_attrs->{group_by} = [ map { "$attrs->{alias}.$_" } @{
$rsrc->_identifying_column_set || $self->throw_exception(
'Unable to construct a unique group_by criteria properly collapsing the '
$self->throw_exception("all() doesn't take any arguments, you probably wanted ->search(...)->all()");
}
- return @{ $self->get_cache } if $self->get_cache;
+ if (my $c = $self->get_cache) {
+ return @$c;
+ }
- my @obj;
+ my @objects;
- if (keys %{$self->_resolved_attrs->{collapse}}) {
+ if ($self->_resolved_attrs->{collapse}) {
# Using $self->cursor->all is really just an optimisation.
# If we're collapsing has_many prefetches it probably makes
# very little difference, and this is cleaner than hacking
- # _construct_object to survive the approach
+ # _construct_objects to survive the approach
$self->cursor->reset;
my @row = $self->cursor->next;
while (@row) {
- push(@obj, $self->_construct_object(@row));
+ push(@objects, $self->_construct_objects(@row));
@row = (exists $self->{stashed_row}
? @{delete $self->{stashed_row}}
: $self->cursor->next);
}
} else {
- @obj = map { $self->_construct_object(@$_) } $self->cursor->all;
+ @objects = map { $self->_construct_objects(@$_) } $self->cursor->all;
}
- $self->set_cache(\@obj) if $self->{attrs}{cache};
+ $self->set_cache(\@objects) if $self->{attrs}{cache};
- return @obj;
+ return @objects;
}
=head2 reset
}
}
- $attrs->{collapse} ||= {};
- if ($attrs->{prefetch}) {
+ # generate selections based on the prefetch helper
+ my $prefetch;
+ $prefetch = $self->_merge_joinpref_attr( {}, delete $attrs->{prefetch} )
+ if defined $attrs->{prefetch};
+
+ if ($prefetch) {
$self->throw_exception("Unable to prefetch, resultset contains an unnamed selector $attrs->{_dark_selector}{string}")
if $attrs->{_dark_selector};
- my $prefetch = $self->_merge_joinpref_attr( {}, delete $attrs->{prefetch} );
-
- my $prefetch_ordering = [];
+ $attrs->{collapse} = 1;
# this is a separate structure (we don't look in {from} directly)
# as the resolver needs to shift things off the lists to work
}
}
- my @prefetch =
- $source->_resolve_prefetch( $prefetch, $alias, $join_map, $prefetch_ordering, $attrs->{collapse} );
+ my @prefetch = $source->_resolve_prefetch( $prefetch, $alias, $join_map );
# we need to somehow mark which columns came from prefetch
if (@prefetch) {
push @{ $attrs->{select} }, (map { $_->[0] } @prefetch);
push @{ $attrs->{as} }, (map { $_->[1] } @prefetch);
+ }
- push( @{$attrs->{order_by}}, @$prefetch_ordering );
- $attrs->{_collapse_order_by} = \@$prefetch_ordering;
+ # run through the resulting joinstructure (starting from our current slot)
+ # and unset collapse if proven unnesessary
+ if ($attrs->{collapse} && ref $attrs->{from} eq 'ARRAY') {
+
+ if (@{$attrs->{from}} > 1) {
+
+ # find where our table-spec starts and consider only things after us
+ my @fromlist = @{$attrs->{from}};
+ while (@fromlist) {
+ my $t = shift @fromlist;
+ $t = $t->[0] if ref $t eq 'ARRAY'; #me vs join from-spec mismatch
+ last if ($t->{-alias} && $t->{-alias} eq $alias);
+ }
+
+ for (@fromlist) {
+ $attrs->{collapse} = ! $_->[0]{-is_single}
+ and last;
+ }
+ }
+ else {
+ # no joins - no collapse
+ $attrs->{collapse} = 0;
+ }
}
# {collapse} would mean a has_many join was injected, which in turn means
# we need to group *IF WE CAN* (only if the column in question is unique)
- if (!$orig_attrs->{group_by} && keys %{$orig_attrs->{collapse}}) {
+ if (!$orig_attrs->{group_by} && $orig_attrs->{collapse}) {
if ($colmap->{$select} and $rsrc->_identifying_column_set([$colmap->{$select}])) {
$new_attrs->{group_by} = [ $select ];
use strict;
use warnings;
+use base qw/DBIx::Class/;
+
use DBIx::Class::ResultSet;
use DBIx::Class::ResultSourceHandle;
use Scalar::Util qw/blessed weaken isweak/;
use namespace::clean;
-use base qw/DBIx::Class/;
-
__PACKAGE__->mk_group_accessors(simple => qw/
source_name name source_info
_ordered_columns _columns _primaries _unique_constraints
# prefixed relative to the current source, in accordance with where they appear
# in the supplied relationships.
sub _resolve_prefetch {
- my ($self, $pre, $alias, $alias_map, $order, $collapse, $pref_path) = @_;
+ my ($self, $pre, $alias, $alias_map, $order, $pref_path) = @_;
$pref_path ||= [];
if (not defined $pre or not length $pre) {
}
elsif( ref $pre eq 'ARRAY' ) {
return
- map { $self->_resolve_prefetch( $_, $alias, $alias_map, $order, $collapse, [ @$pref_path ] ) }
+ map { $self->_resolve_prefetch( $_, $alias, $alias_map, $order, [ @$pref_path ] ) }
@$pre;
}
elsif( ref $pre eq 'HASH' ) {
my @ret =
map {
- $self->_resolve_prefetch($_, $alias, $alias_map, $order, $collapse, [ @$pref_path ] ),
+ $self->_resolve_prefetch($_, $alias, $alias_map, $order, [ @$pref_path ] ),
$self->related_source($_)->_resolve_prefetch(
- $pre->{$_}, "${alias}.$_", $alias_map, $order, $collapse, [ @$pref_path, $_] )
+ $pre->{$_}, "${alias}.$_", $alias_map, $order, [ @$pref_path, $_] )
} keys %$pre;
return @ret;
}
unless ref($rel_info->{cond}) eq 'HASH';
my $dots = @{[$as_prefix =~ m/\./g]} + 1; # +1 to match the ".${as_prefix}"
- if (my ($fail) = grep { @{[$_ =~ m/\./g]} == $dots }
- keys %{$collapse}) {
- my ($last) = ($fail =~ /([^\.]+)$/);
- carp (
- "Prefetching multiple has_many rels ${last} and ${pre} "
- .(length($as_prefix)
- ? "at the same level (${as_prefix}) "
- : "at top level "
- )
- . 'will explode the number of row objects retrievable via ->next or ->all. '
- . 'Use at your own risk.'
- );
- }
-
#my @col = map { (/^self\.(.+)$/ ? ("${as_prefix}.$1") : ()); }
# values %{$rel_info->{cond}};
- $collapse->{".${as_prefix}${pre}"} = [ $rel_source->_pri_cols ];
- # action at a distance. prepending the '.' allows simpler code
- # in ResultSet->_collapse_result
my @key = map { (/^foreign\.(.+)$/ ? ($1) : ()); }
keys %{$rel_info->{cond}};
+
push @$order, map { "${as}.$_" } @key;
if (my $rel_order = $rel_info->{attrs}{order_by}) {
}
}
+# Takes a selection list and generates a collapse-map representing
+# row-object fold-points. Every relationship is assigned a set of unique,
+# non-nullable columns (which may *not even be* from the same resultset)
+# and the collapser will use this information to correctly distinguish
+# data of individual to-be-row-objects.
+sub _resolve_collapse {
+ my ($self, $as, $as_fq_idx, $rel_chain, $parent_info) = @_;
+
+ # for comprehensible error messages put ourselves at the head of the relationship chain
+ $rel_chain ||= [ $self->source_name ];
+
+ # record top-level fully-qualified column index
+ $as_fq_idx ||= { %$as };
+
+ my ($my_cols, $rel_cols);
+ for (keys %$as) {
+ if ($_ =~ /^ ([^\.]+) \. (.+) /x) {
+ $rel_cols->{$1}{$2} = 1;
+ }
+ else {
+ $my_cols->{$_} = {}; # important for ||= below
+ }
+ }
+
+ my $relinfo;
+ # run through relationships, collect metadata, inject non-left fk-bridges from
+ # *INNER-JOINED* children (if any)
+ for my $rel (keys %$rel_cols) {
+ my $rel_src = $self->related_source ($rel);
+ my $inf = $self->relationship_info ($rel);
+
+ $relinfo->{$rel}{is_single} = $inf->{attrs}{accessor} && $inf->{attrs}{accessor} ne 'multi';
+ $relinfo->{$rel}{is_inner} = ( $inf->{attrs}{join_type} || '' ) !~ /^left/i;
+ $relinfo->{$rel}{rsrc} = $rel_src;
+
+ my $cond = $inf->{cond};
+
+ if (
+ ref $cond eq 'HASH'
+ and
+ keys %$cond
+ and
+ ! List::Util::first { $_ !~ /^foreign\./ } (keys %$cond)
+ and
+ ! List::Util::first { $_ !~ /^self\./ } (values %$cond)
+ ) {
+ for my $f (keys %$cond) {
+ my $s = $cond->{$f};
+ $_ =~ s/^ (?: foreign | self ) \.//x for ($f, $s);
+ $relinfo->{$rel}{fk_map}{$s} = $f;
+
+ $my_cols->{$s} ||= { via_fk => "$rel.$f" } # need to know source from *our* pov
+ if ($relinfo->{$rel}{is_inner} && defined $rel_cols->{$rel}{$f}); # only if it is inner and in fact selected of course
+ }
+ }
+ }
+
+ # if the parent is already defined, assume all of its related FKs are selected
+ # (even if they in fact are NOT in the select list). Keep a record of what we
+ # assumed, and if any such phantom-column becomes part of our own collapser,
+ # throw everything assumed-from-parent away and replace with the collapser of
+ # the parent (whatever it may be)
+ my $assumed_from_parent;
+ unless ($parent_info->{underdefined}) {
+ $assumed_from_parent->{columns} = { map
+ # only add to the list if we do not already select said columns
+ { ! exists $my_cols->{$_} ? ( $_ => 1 ) : () }
+ values %{$parent_info->{rel_condition} || {}}
+ };
+
+ $my_cols->{$_} = { via_collapse => $parent_info->{collapse_on} }
+ for keys %{$assumed_from_parent->{columns}};
+ }
+
+ # get colinfo for everything
+ if ($my_cols) {
+ $my_cols->{$_}{colinfo} = (
+ $self->has_column ($_) ? $self->column_info ($_) : undef
+ ) for keys %$my_cols;
+ }
+
+ my $collapse_map;
+
+ # try to resolve based on our columns (plus already inserted FK bridges)
+ if (
+ $my_cols
+ and
+ my $uset = $self->_unique_column_set ($my_cols)
+ ) {
+ # see if the resulting collapser relies on any implied columns,
+ # and fix stuff up if this is the case
+
+ my $parent_collapser_used;
+
+ if (List::Util::first
+ { exists $assumed_from_parent->{columns}{$_} }
+ keys %$uset
+ ) {
+ # remove implied stuff from the uset, we will inject the equivalent collapser a bit below
+ delete @{$uset}{keys %{$assumed_from_parent->{columns}}};
+ $parent_collapser_used = 1;
+ }
+
+ $collapse_map->{-collapse_on} = {
+ %{ $parent_collapser_used ? $parent_info->{collapse_on} : {} },
+ (map
+ {
+ my $fqc = join ('.',
+ @{$rel_chain}[1 .. $#$rel_chain],
+ ( $my_cols->{$_}{via_fk} || $_ ),
+ );
+
+ $fqc => $as_fq_idx->{$fqc};
+ }
+ keys %$uset
+ ),
+ };
+ }
+
+ # don't know how to collapse - keep descending down 1:1 chains - if
+ # a related non-LEFT 1:1 is resolvable - its condition will collapse us
+ # too
+ unless ($collapse_map->{-collapse_on}) {
+ my @candidates;
+
+ for my $rel (keys %$relinfo) {
+ next unless ($relinfo->{$rel}{is_single} && $relinfo->{$rel}{is_inner});
+
+ if ( my $rel_collapse = $relinfo->{$rel}{rsrc}->_resolve_collapse (
+ $rel_cols->{$rel},
+ $as_fq_idx,
+ [ @$rel_chain, $rel ],
+ { underdefined => 1 }
+ )) {
+ push @candidates, $rel_collapse->{-collapse_on};
+ }
+ }
+
+ # get the set with least amount of columns
+ # FIXME - maybe need to implement a data type order as well (i.e. prefer several ints
+ # to a single varchar)
+ if (@candidates) {
+ ($collapse_map->{-collapse_on}) = sort { keys %$a <=> keys %$b } (@candidates);
+ }
+ }
+
+ # Still dont know how to collapse - see if the parent passed us anything
+ # (i.e. reuse collapser over 1:1)
+ unless ($collapse_map->{-collapse_on}) {
+ $collapse_map->{-collapse_on} = $parent_info->{collapse_on}
+ if $parent_info->{collapser_reusable};
+ }
+
+
+ # stop descending into children if we were called by a parent for first-pass
+ # and don't despair if nothing was found (there may be other parallel branches
+ # to dive into)
+ if ($parent_info->{underdefined}) {
+ return $collapse_map->{-collapse_on} ? $collapse_map : undef
+ }
+ # nothing down the chain resolved - can't calculate a collapse-map
+ elsif (! $collapse_map->{-collapse_on}) {
+ $self->throw_exception ( sprintf
+ "Unable to calculate a definitive collapse column set for %s%s: fetch more unique non-nullable columns",
+ $self->source_name,
+ @$rel_chain > 1
+ ? sprintf (' (last member of the %s chain)', join ' -> ', @$rel_chain )
+ : ''
+ ,
+ );
+ }
+
+
+ # If we got that far - we are collapsable - GREAT! Now go down all children
+ # a second time, and fill in the rest
+
+ for my $rel (keys %$relinfo) {
+
+ $collapse_map->{$rel} = $relinfo->{$rel}{rsrc}->_resolve_collapse (
+ { map { $_ => 1 } ( keys %{$rel_cols->{$rel}} ) },
+
+ $as_fq_idx,
+
+ [ @$rel_chain, $rel],
+
+ {
+ collapse_on => { %{$collapse_map->{-collapse_on}} },
+
+ rel_condition => $relinfo->{$rel}{fk_map},
+
+ # if this is a 1:1 our own collapser can be used as a collapse-map
+ # (regardless of left or not)
+ collapser_reusable => $relinfo->{$rel}{is_single},
+ },
+ );
+ }
+
+ return $collapse_map;
+}
+
+sub _unique_column_set {
+ my ($self, $cols) = @_;
+
+ my %unique = $self->unique_constraints;
+
+ # always prefer the PK first, and then shortest constraints first
+ USET:
+ for my $set (delete $unique{primary}, sort { @$a <=> @$b } (values %unique) ) {
+ next unless $set && @$set;
+
+ for (@$set) {
+ next USET unless ($cols->{$_} && $cols->{$_}{colinfo} && !$cols->{$_}{colinfo}{is_nullable} );
+ }
+
+ return { map { $_ => 1 } @$set };
+ }
+
+ return undef;
+}
+
+# Takes an arrayref of {as} dbic column aliases and the collapse and select
+# attributes from the same $rs (the slector requirement is a temporary
+# workaround), and returns a coderef capable of:
+# my $me_pref_clps = $coderef->([$rs->cursor->next])
+# Where the $me_pref_clps arrayref is the future argument to
+# ::ResultSet::_collapse_result.
+#
+# $me_pref_clps->[0] is always returned (even if as an empty hash with no
+# rowdata), however branches of related data in $me_pref_clps->[1] may be
+# pruned short of what was originally requested based on {as}, depending
+# on:
+#
+# * If collapse is requested, a definitive collapse map is calculated for
+# every relationship "fold-point", consisting of a set of values (which
+# may not even be contained in the future 'me' of said relationship
+# (for example a cd.artist_id defines the related inner-joined artist)).
+# Thus a definedness check is carried on all collapse-condition values
+# and if at least one is undef it is assumed that we are dealing with a
+# NULLed right-side of a left-join, so we don't return a related data
+# container at all, which implies no related objects
+#
+# * If we are not collapsing, there is no constraint on having a selector
+# uniquely identifying all possible objects, and the user might have very
+# well requested a column that just *happens* to be all NULLs. What we do
+# in this case is fallback to the old behavior (which is a potential FIXME)
+# by always returning a data container, but only filling it with columns
+# IFF at least one of them is defined. This way we do not get an object
+# with a bunch of has_column_loaded to undef, but at the same time do not
+# further relationships based off this "null" object (e.g. in case the user
+# deliberately skipped link-table values). I am pretty sure there are some
+# tests that codify this behavior, need to find the exact testname.
+#
+# For an example of this coderef in action (and to see its guts) look at
+# t/prefetch/_internals.t
+#
+# This is a huge performance win, as we call the same code for
+# every row returned from the db, thus avoiding repeated method
+# lookups when traversing relationships
+#
+# Also since the coderef is completely stateless (the returned structure is
+# always fresh on every new invocation) this is a very good opportunity for
+# memoization if further speed improvements are needed
+#
+# The way we construct this coderef is somewhat fugly, although I am not
+# sure if the string eval is *that* bad of an idea. The alternative is to
+# have a *very* large number of anon coderefs calling each other in a twisty
+# maze, whereas the current result is a nice, smooth, single-pass function.
+# In any case - the output of this thing is meticulously micro-tested, so
+# any sort of rewrite should be relatively easy
+#
+sub _mk_row_parser {
+ my ($self, $as, $with_collapse, $select) = @_;
+
+ my $as_indexed = { map
+ { $as->[$_] => $_ }
+ ( 0 .. $#$as )
+ };
+
+ # calculate collapse fold-points if needed
+ my $collapse_on = do {
+ # FIXME
+ # only consider real columns (not functions) during collapse resolution
+ # this check shouldn't really be here, as fucktards are not supposed to
+ # alias random crap to existing column names anyway, but still - just in
+ # case (also saves us from select/as mismatches which need fixing as well...)
+
+ my $plain_as = { %$as_indexed };
+ for (keys %$plain_as) {
+ delete $plain_as->{$_} if ref $select->[$plain_as->{$_}];
+ }
+ $self->_resolve_collapse ($plain_as);
+
+ } if $with_collapse;
+
+ my $perl = $self->__visit_as ($as_indexed, $collapse_on);
+ my $cref = eval "sub { $perl }"
+ or die "Oops! _mk_row_parser generated invalid perl:\n$@\n\n$perl\n";
+ return $cref;
+}
+
+{
+ my $visit_as_dumper; # keep our own DD object around so we don't have to fitz with quoting
+
+ sub __visit_as {
+ my ($self, $as, $collapse_on, $known_defined) = @_;
+ $known_defined ||= {};
+
+ # prepopulate the known defined map with our own collapse value positions
+ # the rationale is that if an Artist needs column 0 to be uniquely
+ # identified, and related CDs need columns 0 and 1, by the time we get to
+ # CDs we already know that column 0 is defined (otherwise there would be
+ # no related CDs as there is no Artist in the 1st place). So we use this
+ # index to cut on repetitive defined() checks.
+ $known_defined->{$_}++ for ( values %{$collapse_on->{-collapse_on} || {}} );
+
+ my $my_cols = {};
+ my $rel_cols;
+ for (keys %$as) {
+ if ($_ =~ /^ ([^\.]+) \. (.+) /x) {
+ $rel_cols->{$1}{$2} = $as->{$_};
+ }
+ else {
+ $my_cols->{$_} = $as->{$_};
+ }
+ }
+
+ my @relperl;
+ for my $rel (sort keys %$rel_cols) {
+ my $rel_node = $self->__visit_as($rel_cols->{$rel}, $collapse_on->{$rel}, {%$known_defined} );
+
+ my @null_checks;
+ if ($collapse_on->{$rel}{-collapse_on}) {
+ @null_checks = map
+ { "(! defined '__VALPOS__${_}__')" }
+ ( grep
+ { ! $known_defined->{$_} }
+ ( sort
+ { $a <=> $b }
+ values %{$collapse_on->{$rel}{-collapse_on}}
+ )
+ )
+ ;
+ }
+
+ if (@null_checks) {
+ push @relperl, sprintf ( '(%s) ? () : ( %s => %s )',
+ join (' || ', @null_checks ),
+ $rel,
+ $rel_node,
+ );
+ }
+ else {
+ push @relperl, "$rel => $rel_node";
+ }
+ }
+ my $rels = @relperl
+ ? sprintf ('{ %s }', join (',', @relperl))
+ : 'undef'
+ ;
+
+ my $me = {
+ map { $_ => "__VALPOS__$my_cols->{$_}__" } (keys %$my_cols)
+ };
+
+ my $clps = undef; # funny thing, but this prevents a memory leak, I guess it's Data::Dumper#s fault (mo)
+ $clps = [
+ map { "__VALPOS__${_}__" } ( sort { $a <=> $b } (values %{$collapse_on->{-collapse_on}}) )
+ ] if $collapse_on->{-collapse_on};
+
+ # we actually will be producing functional perl code here,
+ # thus no second-guessing of what these globals might have
+ # been set to. DO NOT CHANGE!
+ $visit_as_dumper ||= do {
+ require Data::Dumper;
+ Data::Dumper->new([])
+ ->Purity (1)
+ ->Pad ('')
+ ->Useqq (0)
+ ->Terse (1)
+ ->Quotekeys (1)
+ ->Deepcopy (1)
+ ->Deparse (0)
+ ->Maxdepth (0)
+ ->Indent (0)
+ };
+ for ($me, $clps) {
+ $_ = $visit_as_dumper->Values ([$_])->Dump;
+ }
+
+ unless ($collapse_on->{-collapse_on}) { # we are not collapsing, insert a definedness check on 'me'
+ $me = sprintf ( '(%s) ? %s : {}',
+ join (' || ', map { "( defined '__VALPOS__${_}__')" } (sort { $a <=> $b } values %$my_cols) ),
+ $me,
+ );
+ }
+
+ my @rv_list = ($me, $rels, $clps);
+ pop @rv_list while ($rv_list[-1] eq 'undef'); # strip trailing undefs
+
+ # change the quoted placeholders to unquoted alias-references
+ $_ =~ s/ \' __VALPOS__(\d+)__ \' /sprintf ('$_[0][%d]', $1)/gex
+ for grep { defined $_ } @rv_list;
+ return sprintf '[%s]', join (',', @rv_list);
+ }
+}
+
=head2 related_source
=over 4
my @pre_objects;
for my $me_pref (@pre_vals) {
-
- # FIXME - this should not be necessary
- # the collapser currently *could* return bogus elements with all
- # columns set to undef
- my $has_def;
- for (values %{$me_pref->[0]}) {
- if (defined $_) {
- $has_def++;
- last;
- }
- }
- next unless $has_def;
-
push @pre_objects, $pre_source->result_class->inflate_result(
$pre_source, @$me_pref
);
# see if we need to tear the prefetch apart otherwise delegate the limiting to the
# storage, unless software limit was requested
if (
- #limited has_many
- ( $attrs->{rows} && keys %{$attrs->{collapse}} )
+ # limited collapsing has_many
+ ( $attrs->{rows} && $attrs->{collapse} )
||
# grouped prefetch (to satisfy group_by == select)
( $attrs->{group_by}
# bring over all non-collapse-induced order_by into the inner query (if any)
# the outer one will have to keep them all
delete $inner_attrs->{order_by};
- if (my $ord_cnt = @{$outer_attrs->{order_by}} - @{$outer_attrs->{_collapse_order_by}} ) {
+ if (my $ord_cnt = @{$outer_attrs->{order_by}} - @{$outer_attrs->{_collapse_order_by}||[]} ) {
$inner_attrs->{order_by} = [
@{$outer_attrs->{order_by}}[ 0 .. $ord_cnt - 1]
];
use Test::More;
use Test::Exception;
+
use lib qw(t/lib);
use DBICTest;
use DBIC::SqlMakerTest;
my $schema = DBICTest->init_schema();
- {
- my $rs = $schema->resultset( 'CD' )->search(
- {
- 'producer.name' => 'blah',
- 'producer_2.name' => 'foo',
- },
- {
- 'join' => [
- { cd_to_producer => 'producer' },
- { cd_to_producer => 'producer' },
- ],
- 'prefetch' => [
- 'artist',
- { cd_to_producer => 'producer' },
- ],
- }
- );
-
- lives_ok {
- my @rows = $rs->all();
- };
- }
+lives_ok (sub {
+ my $rs = $schema->resultset( 'CD' )->search(
+ {
+ 'producer.name' => 'blah',
+ 'producer_2.name' => 'foo',
+ },
+ {
+ 'join' => [
+ { cd_to_producer => 'producer' },
+ { cd_to_producer => 'producer' },
+ ],
+ 'prefetch' => [
+ 'artist',
+ { cd_to_producer => { producer => 'producer_to_cd' } },
+ ],
+ }
+ );
+
+ my @executed = $rs->all();
+
+ is_same_sql_bind (
+ $rs->as_query,
+ '(
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+ artist.artistid, artist.name, artist.rank, artist.charfield,
+ cd_to_producer.cd, cd_to_producer.producer, cd_to_producer.attribute,
+ producer.producerid, producer.name,
+ producer_to_cd.cd, producer_to_cd.producer, producer_to_cd.attribute
+ FROM cd me
+ LEFT JOIN cd_to_producer cd_to_producer
+ ON cd_to_producer.cd = me.cdid
+ LEFT JOIN producer producer
+ ON producer.producerid = cd_to_producer.producer
+ LEFT JOIN cd_to_producer producer_to_cd
+ ON producer_to_cd.producer = producer.producerid
+ LEFT JOIN cd_to_producer cd_to_producer_2
+ ON cd_to_producer_2.cd = me.cdid
+ LEFT JOIN producer producer_2
+ ON producer_2.producerid = cd_to_producer_2.producer
+ JOIN artist artist ON artist.artistid = me.artist
+ WHERE ( ( producer.name = ? AND producer_2.name = ? ) )
+ ORDER BY cd_to_producer.cd, producer_to_cd.producer
+ )',
+ [
+ [ 'producer.name' => 'blah' ],
+ [ 'producer_2.name' => 'foo' ],
+ ],
+ );
+}, 'Complex join parsed/executed properly');
my @rs1a_results = $schema->resultset("Artist")->search_related('cds', {title => 'Forkful of bees'}, {order_by => 'title'});
is($rs1a_results[0]->title, 'Forkful of bees', "bare field conditions okay after search related");
my @dbic_reltable = $dbic_obj->$col;
my @hashref_reltable = @{$datahashref->{$col}};
- is (scalar @dbic_reltable, scalar @hashref_reltable, 'number of related entries');
+ is (scalar @hashref_reltable, scalar @dbic_reltable, 'number of related entries');
# for my $index (0..scalar @hashref_reltable) {
for my $index (0..scalar @dbic_reltable) {
{ join_type => 'left'}
);
+# add a non-left single relationship for the complex prefetch tests
+__PACKAGE__->belongs_to( existing_single_track => 'DBICTest::Schema::Track', 'single_track');
+
__PACKAGE__->has_many( tracks => 'DBICTest::Schema::Track' );
__PACKAGE__->has_many(
tags => 'DBICTest::Schema::Tag', undef,
},
);
__PACKAGE__->set_primary_key('id');
+__PACKAGE__->add_unique_constraint ([qw/lyric_id text/]);
__PACKAGE__->belongs_to('lyric', 'DBICTest::Schema::Lyrics', 'lyric_id');
1;
--- /dev/null
+use strict;
+use warnings;
+
+use Data::Dumper;
+BEGIN { $Data::Dumper::Sortkeys = 1 }; # so we can compare the deparsed coderefs below
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use B::Deparse;
+
+
+my $schema = DBICTest->init_schema(no_deploy => 1);
+
+my ($as, $vals, @pairs);
+
+# artwork-artist deliberately mixed around
+@pairs = (
+ 'artwork_to_artist.artist_id' => '2',
+
+ 'cd_id' => '1',
+
+ 'artwork_to_artist.artwork_cd_id' => '1',
+
+ 'cd.artist' => '1',
+ 'cd.cdid' => '1',
+ 'cd.title' => 'Spoonful of bees',
+
+ 'cd.artist.artistid' => '7',
+ 'cd.artist.name' => 'Caterwauler McCrae',
+ 'artwork_to_artist.artist.name' => 'xenowhinycide',
+);
+while (@pairs) {
+ push @$as, shift @pairs;
+ push @$vals, shift @pairs;
+}
+
+my $parser = $schema->source ('Artwork')->_mk_row_parser($as, 'collapse requested');
+
+is_deeply (
+ $parser->($vals),
+ [
+ {
+ cd_id => 1,
+ },
+
+ {
+ artwork_to_artist => [
+ {
+ artist_id => 2,
+ artwork_cd_id => 1,
+ },
+ {
+ artist => [
+ {
+ name => 'xenowhinycide',
+ },
+ undef,
+ [ 2, 1 ], # inherited from artwork_to_artist (child-parent definition)
+ ],
+ },
+ [ 2, 1 ] # artwork_to_artist own data, in selection order
+ ],
+
+ cd => [
+ {
+ artist => 1,
+ cdid => 1,
+ title => 'Spoonful of bees',
+ },
+ {
+ artist => [
+ {
+ artistid => 7,
+ name => 'Caterwauler McCrae',
+ },
+ undef,
+ [ 7 ], # our own id
+ ]
+ },
+ [ 1 ], # our cdid fk
+ ]
+ },
+ [ 1 ], # our id
+ ],
+ 'generated row parser works as expected',
+);
+
+undef $_ for ($as, $vals);
+@pairs = (
+ 'name' => 'Caterwauler McCrae',
+ 'cds.tracks.cd' => '3',
+ 'cds.tracks.title' => 'Fowlin',
+ 'cds.tracks.cd_single.title' => 'Awesome single',
+);
+while (@pairs) {
+ push @$as, shift @pairs;
+ push @$vals, shift @pairs;
+}
+$parser = $schema->source ('Artist')->_mk_row_parser($as);
+
+is_deeply (
+ $parser->($vals),
+ [
+ {
+ name => 'Caterwauler McCrae'
+ },
+ {
+ cds => [
+ {},
+ {
+ tracks => [
+ {
+ cd => 3,
+ title => 'Fowlin'
+ },
+ {
+ cd_single => [
+ {
+ title => 'Awesome single',
+ },
+ ],
+ },
+ ]
+ }
+ ]
+ }
+ ],
+ 'generated parser works as expected over missing joins (no collapse)',
+);
+
+undef $_ for ($as, $vals);
+@pairs = (
+ 'tracks.lyrics.lyric_versions.text' => 'unique when combined with the lyric collapsable by the 1:1 tracks-parent',
+ 'existing_single_track.cd.artist.artistid' => 'artist_id (gives uniq. to its entire parent chain)',
+ 'existing_single_track.cd.artist.cds.year' => 'non-unique cds col (year)',
+ 'year' => 'non unique main year',
+ 'genreid' => 'non-unique/nullable main genid',
+ 'tracks.title' => 'non-unique title (missing multicol const. part)',
+ 'existing_single_track.cd.artist.cds.cdid' => 'cds unique id col to give uniquiness to ...tracks.title below',
+ 'latest_cd' => 'random function (not a colname)',
+ 'existing_single_track.cd.artist.cds.tracks.title' => 'unique track title (when combined with ...cds.cdid above)',
+ 'existing_single_track.cd.artist.cds.genreid' => 'nullable cds col (genreid)',
+);
+while (@pairs) {
+ push @$as, shift @pairs;
+ push @$vals, shift @pairs;
+}
+
+is_deeply (
+ $schema->source ('CD')->_resolve_collapse ( { map { $as->[$_] => $_ } (0 .. $#$as) } ),
+ {
+ -collapse_on => {
+ 'existing_single_track.cd.artist.artistid' => 1,
+ },
+
+ existing_single_track => {
+ -collapse_on => {
+ 'existing_single_track.cd.artist.artistid' => 1,
+ },
+
+ cd => {
+ -collapse_on => {
+ 'existing_single_track.cd.artist.artistid' => 1,
+ },
+
+ artist => {
+ -collapse_on => {
+ 'existing_single_track.cd.artist.artistid' => 1,
+ },
+
+ cds => {
+ -collapse_on => {
+ 'existing_single_track.cd.artist.cds.cdid' => 6,
+ },
+
+ tracks => {
+ -collapse_on => {
+ 'existing_single_track.cd.artist.cds.cdid' => 6,
+ 'existing_single_track.cd.artist.cds.tracks.title' => 8,
+ }
+ }
+ }
+ }
+ }
+ },
+ tracks => {
+ -collapse_on => {
+ 'existing_single_track.cd.artist.artistid' => 1,
+ 'tracks.title' => 5,
+ },
+
+ lyrics => {
+ -collapse_on => {
+ 'existing_single_track.cd.artist.artistid' => 1,
+ 'tracks.title' => 5,
+ },
+
+ lyric_versions => {
+ -collapse_on => {
+ 'existing_single_track.cd.artist.artistid' => 1,
+ 'tracks.title' => 5,
+ 'tracks.lyrics.lyric_versions.text' => 0,
+ },
+ },
+ },
+ }
+ },
+ 'Correct collapse map constructed',
+);
+
+
+$parser = $schema->source ('CD')->_mk_row_parser ($as, 'add collapse data');
+
+is_deeply (
+ $parser->($vals),
+ [
+ {
+ latest_cd => 'random function (not a colname)',
+ year => 'non unique main year',
+ genreid => 'non-unique/nullable main genid'
+ },
+ {
+ existing_single_track => [
+ {},
+ {
+ cd => [
+ {},
+ {
+ artist => [
+ { artistid => 'artist_id (gives uniq. to its entire parent chain)' },
+ {
+ cds => [
+ {
+ cdid => 'cds unique id col to give uniquiness to ...tracks.title below',
+ year => 'non-unique cds col (year)',
+ genreid => 'nullable cds col (genreid)'
+ },
+ {
+ tracks => [
+ {
+ title => 'unique track title (when combined with ...cds.cdid above)'
+ },
+ undef,
+ [
+ 'cds unique id col to give uniquiness to ...tracks.title below',
+ 'unique track title (when combined with ...cds.cdid above)',
+ ],
+ ]
+ },
+ [ 'cds unique id col to give uniquiness to ...tracks.title below' ],
+ ]
+ },
+ [ 'artist_id (gives uniq. to its entire parent chain)' ],
+ ]
+ },
+ [ 'artist_id (gives uniq. to its entire parent chain)' ],
+ ]
+ },
+ [ 'artist_id (gives uniq. to its entire parent chain)' ],
+ ],
+ tracks => [
+ {
+ title => 'non-unique title (missing multicol const. part)'
+ },
+ {
+ lyrics => [
+ {},
+ {
+ lyric_versions => [
+ {
+ text => 'unique when combined with the lyric collapsable by the 1:1 tracks-parent',
+ },
+ undef,
+ [
+ 'unique when combined with the lyric collapsable by the 1:1 tracks-parent',
+ 'artist_id (gives uniq. to its entire parent chain)',
+ 'non-unique title (missing multicol const. part)',
+ ],
+ ],
+ },
+ [
+ 'artist_id (gives uniq. to its entire parent chain)',
+ 'non-unique title (missing multicol const. part)',
+ ],
+ ],
+ },
+ [
+ 'artist_id (gives uniq. to its entire parent chain)',
+ 'non-unique title (missing multicol const. part)',
+ ],
+ ],
+ },
+ [ 'artist_id (gives uniq. to its entire parent chain)' ],
+ ],
+ 'Proper row parser constructed',
+);
+
+# For extra insanity test/showcase the parser's guts:
+my $deparser = B::Deparse->new;
+is (
+ $deparser->coderef2text ($parser),
+ $deparser->coderef2text ( sub { package DBIx::Class::ResultSource;
+ [
+ {
+ genreid => $_[0][4],
+ latest_cd => $_[0][7],
+ year => $_[0][3]
+ },
+ {
+
+ existing_single_track => [
+ {},
+ {
+ cd => [
+ {},
+ {
+ artist => [
+ {
+ artistid => $_[0][1]
+ },
+ {
+
+ !defined($_[0][6]) ? () : (
+ cds => [
+ {
+ cdid => $_[0][6],
+ genreid => $_[0][9],
+ year => $_[0][2]
+ },
+ {
+
+ !defined($_[0][8]) ? () : (
+ tracks => [
+ {
+ title => $_[0][8]
+ },
+ undef,
+ [ $_[0][6], $_[0][8] ]
+ ])
+
+ },
+ [ $_[0][6] ]
+ ]),
+
+ },
+ [ $_[0][1] ],
+ ],
+ },
+ [ $_[0][1] ],
+ ],
+ },
+ [ $_[0][1] ],
+ ],
+
+ !defined($_[0][5]) ? () : (
+ tracks => [
+ {
+ title => $_[0][5],
+ },
+ {
+
+ lyrics => [
+ {},
+ {
+
+ !defined($_[0][0]) ? () : (
+ lyric_versions => [
+ {
+ text => $_[0][0]
+ },
+ undef,
+ [ $_[0][0], $_[0][1], $_[0][5] ],
+ ]),
+
+ },
+ [ $_[0][1], $_[0][5] ],
+ ],
+
+ },
+ [ $_[0][1], $_[0][5] ],
+ ]),
+ },
+ [ $_[0][1] ],
+ ];
+ }),
+ 'Deparsed version of the parser coderef looks correct',
+);
+
+done_testing;
prefetch => [ qw/ cds / ],
order_by => [ { -desc => 'me.name' }, 'cds.title' ],
select => [qw/ me.name cds.title / ],
- }
+ },
);
is ($rs->count, 2, 'Correct number of collapsed artists');
}, 'explicit prefetch on a keyless object works');
+lives_ok ( sub {
+
+ my $rs = $schema->resultset('CD')->search(
+ {},
+ {
+ order_by => [ { -desc => 'me.year' } ],
+ }
+ );
+ my $years = [qw/ 2001 2001 1999 1998 1997/];
+
+ is_deeply (
+ [ $rs->search->get_column('me.year')->all ],
+ $years,
+ 'Expected years (at least one duplicate)',
+ );
+
+ my @cds_and_tracks;
+ for my $cd ($rs->all) {
+ my $data->{year} = $cd->year;
+ for my $tr ($cd->tracks->all) {
+ push @{$data->{tracks}}, { $tr->get_columns };
+ }
+ push @cds_and_tracks, $data;
+ }
+
+ my $pref_rs = $rs->search ({}, { columns => ['year'], prefetch => 'tracks' });
+
+ my @pref_cds_and_tracks;
+ for my $cd ($pref_rs->all) {
+ my $data = { $cd->get_columns };
+ for my $tr ($cd->tracks->all) {
+ push @{$data->{tracks}}, { $tr->get_columns };
+ }
+ push @pref_cds_and_tracks, $data;
+ }
+
+ is_deeply (
+ \@pref_cds_and_tracks,
+ \@cds_and_tracks,
+ 'Correct collapsing on non-unique primary object'
+ );
+
+ is_deeply (
+ [ $pref_rs->search ({}, { result_class => 'DBIx::Class::ResultClass::HashRefInflator' })->all ],
+ \@cds_and_tracks,
+ 'Correct HRI collapsing on non-unique primary object'
+ );
+
+}, 'weird collapse lives');
+
+
lives_ok(sub {
# test implicit prefetch as well
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $rs = $schema->resultset ('CD')->search ({}, {
+ join => [ 'tracks', { single_track => { cd => { artist => { cds => 'tracks' } } } } ],
+ collapse => 1,
+ columns => [
+ { 'year' => 'me.year' }, # non-unique
+ { 'genreid' => 'me.genreid' }, # nullable
+ { 'tracks.title' => 'tracks.title' }, # non-unique (no me.id)
+ { 'single_track.cd.artist.cds.cdid' => 'cds.cdid' }, # to give uniquiness to ...tracks.title below
+ { 'single_track.cd.artist.artistid' => 'artist.artistid' }, # uniqufies entire parental chain
+ { 'single_track.cd.artist.cds.year' => 'cds.year' }, # non-unique
+ { 'single_track.cd.artist.cds.genreid' => 'cds.genreid' }, # nullable
+ { 'single_track.cd.artist.cds.tracks.title' => 'tracks_2.title' }, # unique when combined with ...cds.cdid above
+ { 'latest_cd' => { max => 'cds.year' } }, # random function
+ { 'title' => 'me.title' }, # uniquiness for me
+ { 'artist' => 'me.artist' }, # uniquiness for me
+ ],
+ result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+});
+
+use Data::Dumper::Concise;
+die Dumper [$rs->all];
+
+
use Test::More;
use lib qw(t/lib);
use DBICTest;
-use IO::File;
my $schema = DBICTest->init_schema();
my $sdebug = $schema->storage->debug;
-# once the following TODO is complete, remove the 2 warning tests immediately
-# after the TODO block
-# (the TODO block itself contains tests ensuring that the warns are removed)
-TODO: {
- local $TODO = 'Prefetch of multiple has_many rels at the same level (currently warn to protect the clueless git)';
+#( 1 -> M + M )
+my $cd_rs = $schema->resultset('CD')->search( { 'me.title' => 'Forkful of bees' } );
+my $pr_cd_rs = $cd_rs->search( {}, { prefetch => [qw/tracks tags/], } );
- #( 1 -> M + M )
- my $cd_rs = $schema->resultset('CD')->search ({ 'me.title' => 'Forkful of bees' });
- my $pr_cd_rs = $cd_rs->search ({}, {
- prefetch => [qw/tracks tags/],
- });
+my $tracks_rs = $cd_rs->first->tracks;
+my $tracks_count = $tracks_rs->count;
- my $tracks_rs = $cd_rs->first->tracks;
- my $tracks_count = $tracks_rs->count;
+my ( $pr_tracks_rs, $pr_tracks_count );
- my ($pr_tracks_rs, $pr_tracks_count);
+my $queries = 0;
+$schema->storage->debugcb( sub { $queries++ } );
+$schema->storage->debug(1);
- my $queries = 0;
- $schema->storage->debugcb(sub { $queries++ });
- $schema->storage->debug(1);
-
- my $o_mm_warn;
- {
- local $SIG{__WARN__} = sub { $o_mm_warn = shift };
- $pr_tracks_rs = $pr_cd_rs->first->tracks;
- };
- $pr_tracks_count = $pr_tracks_rs->count;
-
- ok(! $o_mm_warn, 'no warning on attempt to prefetch several same level has_many\'s (1 -> M + M)');
-
- is($queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query');
- $schema->storage->debugcb (undef);
- $schema->storage->debug ($sdebug);
-
- is($pr_tracks_count, $tracks_count, 'equal count of prefetched relations over several same level has_many\'s (1 -> M + M)');
- is ($pr_tracks_rs->all, $tracks_rs->all, 'equal amount of objects returned with and without prefetch over several same level has_many\'s (1 -> M + M)');
-
- #( M -> 1 -> M + M )
- my $note_rs = $schema->resultset('LinerNotes')->search ({ notes => 'Buy Whiskey!' });
- my $pr_note_rs = $note_rs->search ({}, {
- prefetch => {
- cd => [qw/tracks tags/]
- },
- });
-
- my $tags_rs = $note_rs->first->cd->tags;
- my $tags_count = $tags_rs->count;
-
- my ($pr_tags_rs, $pr_tags_count);
-
- $queries = 0;
- $schema->storage->debugcb(sub { $queries++ });
- $schema->storage->debug(1);
-
- my $m_o_mm_warn;
- {
- local $SIG{__WARN__} = sub { $m_o_mm_warn = shift };
- $pr_tags_rs = $pr_note_rs->first->cd->tags;
- };
- $pr_tags_count = $pr_tags_rs->count;
-
- ok(! $m_o_mm_warn, 'no warning on attempt to prefetch several same level has_many\'s (M -> 1 -> M + M)');
-
- is($queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query');
- $schema->storage->debugcb (undef);
- $schema->storage->debug ($sdebug);
-
- is($pr_tags_count, $tags_count, 'equal count of prefetched relations over several same level has_many\'s (M -> 1 -> M + M)');
- is($pr_tags_rs->all, $tags_rs->all, 'equal amount of objects with and without prefetch over several same level has_many\'s (M -> 1 -> M + M)');
-}
-
-# remove this closure once the TODO above is working
+my $o_mm_warn;
{
- my $warn_re = qr/will explode the number of row objects retrievable via/;
-
- my (@w, @dummy);
- local $SIG{__WARN__} = sub { $_[0] =~ $warn_re ? push @w, @_ : warn @_ };
-
- my $rs = $schema->resultset('CD')->search ({ 'me.title' => 'Forkful of bees' }, { prefetch => [qw/tracks tags/] });
- @w = ();
- @dummy = $rs->first;
- is (@w, 1, 'warning on attempt prefetching several same level has_manys (1 -> M + M)');
-
- my $rs2 = $schema->resultset('LinerNotes')->search ({ notes => 'Buy Whiskey!' }, { prefetch => { cd => [qw/tags tracks/] } });
- @w = ();
- @dummy = $rs2->first;
- is (@w, 1, 'warning on attempt prefetching several same level has_manys (M -> 1 -> M + M)');
-}
+ local $SIG{__WARN__} = sub { $o_mm_warn = shift };
+ $pr_tracks_rs = $pr_cd_rs->first->tracks;
+};
+$pr_tracks_count = $pr_tracks_rs->count;
+
+ok( !$o_mm_warn,
+'no warning on attempt to prefetch several same level has_many\'s (1 -> M + M)'
+);
+
+is( $queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query' );
+$schema->storage->debugcb(undef);
+$schema->storage->debug($sdebug);
+
+is( $pr_tracks_count, $tracks_count,
+'equal count of prefetched relations over several same level has_many\'s (1 -> M + M)'
+);
+is( $pr_tracks_rs->all, $tracks_rs->all,
+'equal amount of objects returned with and without prefetch over several same level has_many\'s (1 -> M + M)'
+);
+
+#( M -> 1 -> M + M )
+my $note_rs =
+ $schema->resultset('LinerNotes')->search( { notes => 'Buy Whiskey!' } );
+my $pr_note_rs =
+ $note_rs->search( {}, { prefetch => { cd => [qw/tracks tags/] }, } );
+
+my $tags_rs = $note_rs->first->cd->tags;
+my $tags_count = $tags_rs->count;
+
+my ( $pr_tags_rs, $pr_tags_count );
+
+$queries = 0;
+$schema->storage->debugcb( sub { $queries++ } );
+$schema->storage->debug(1);
+
+my $m_o_mm_warn;
+{
+ local $SIG{__WARN__} = sub { $m_o_mm_warn = shift };
+ $pr_tags_rs = $pr_note_rs->first->cd->tags;
+};
+$pr_tags_count = $pr_tags_rs->count;
+
+ok( !$m_o_mm_warn,
+'no warning on attempt to prefetch several same level has_many\'s (M -> 1 -> M + M)'
+);
+
+is( $queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query' );
+$schema->storage->debugcb(undef);
+$schema->storage->debug($sdebug);
+
+is( $pr_tags_count, $tags_count,
+'equal count of prefetched relations over several same level has_many\'s (M -> 1 -> M + M)'
+);
+is( $pr_tags_rs->all, $tags_rs->all,
+'equal amount of objects with and without prefetch over several same level has_many\'s (M -> 1 -> M + M)'
+);
done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $mo_rs = $schema->resultset('Artist')->search(
+ { 'me.artistid' => 4 },
+ {
+ prefetch => [
+ {
+ cds => [
+ { tracks => { cd_single => 'tracks' } },
+ { cd_to_producer => 'producer' }
+ ]
+ },
+ { artwork_to_artist => 'artwork' }
+ ],
+
+ result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+ }
+);
+
+
+$schema->resultset('Artist')->create(
+ {
+ name => 'mo',
+ rank => '1337',
+ cds => [
+ {
+ title => 'Song of a Foo',
+ year => '1999',
+ tracks => [
+ {
+ title => 'Foo Me Baby One More Time',
+ },
+ {
+ title => 'Foo Me Baby One More Time II',
+ },
+ {
+ title => 'Foo Me Baby One More Time III',
+ },
+ {
+ title => 'Foo Me Baby One More Time IV',
+ cd_single =>
+ { artist => 1, title => 'MO! Single', year => 2021, tracks => [
+ { title => 'singled out' }, { title => 'still alone' },
+ ] },
+ }
+ ],
+ cd_to_producer => [
+ { producer => { name => 'riba' } },
+ { producer => { name => 'sushi' } },
+ ]
+ },
+ {
+ title => 'Song of a Foo II',
+ year => '2002',
+ tracks => [
+ {
+ title => 'Quit Playing Games With My Heart',
+ },
+ {
+ title => 'Bar Foo',
+ },
+ {
+ title => 'Foo Bar',
+ cd_single =>
+ { artist => 2, title => 'MO! Single', year => 2020, tracks => [
+ { title => 'singled out' }, { title => 'still alone' },
+ ] },
+ }
+ ],
+ cd_to_producer => [
+ { producer => { name => 'riba' } },
+ { producer => { name => 'sushi' } },
+ ],
+ }
+ ],
+ artwork_to_artist =>
+ [ { artwork => { cd_id => 1 } }, { artwork => { cd_id => 2 } } ]
+ }
+);
+
+my $mo = $mo_rs->next;
+
+is( @{$mo->{cds}}, 2, 'two CDs' );
+
+is_deeply(
+ $mo,
+ {
+ 'cds' => [
+ {
+ 'single_track' => undef,
+ 'tracks' => [
+ {
+ 'small_dt' => undef,
+ 'cd' => '6',
+ 'position' => '1',
+ 'trackid' => '19',
+ 'title' => 'Foo Me Baby One More Time',
+ 'cd_single' => undef,
+ 'last_updated_on' => undef,
+ 'last_updated_at' => undef
+ },
+ {
+ 'small_dt' => undef,
+ 'cd' => '6',
+ 'position' => '2',
+ 'trackid' => '20',
+ 'title' => 'Foo Me Baby One More Time II',
+ 'cd_single' => undef,
+ 'last_updated_on' => undef,
+ 'last_updated_at' => undef
+ },
+ {
+ 'small_dt' => undef,
+ 'cd' => '6',
+ 'position' => '3',
+ 'trackid' => '21',
+ 'title' => 'Foo Me Baby One More Time III',
+ 'cd_single' => undef,
+ 'last_updated_on' => undef,
+ 'last_updated_at' => undef
+ },
+ {
+ 'small_dt' => undef,
+ 'cd' => '6',
+ 'position' => '4',
+ 'trackid' => '22',
+ 'title' => 'Foo Me Baby One More Time IV',
+ 'last_updated_on' => undef,
+ 'last_updated_at' => undef,
+ 'cd_single' => {
+ 'single_track' => '22',
+ 'artist' => '1',
+ 'cdid' => '7',
+ 'title' => 'MO! Single',
+ 'genreid' => undef,
+ 'year' => '2021',
+ 'tracks' => [
+ {
+ 'small_dt' => undef,
+ 'cd' => '7',
+ 'position' => '1',
+ 'title' => 'singled out',
+ 'trackid' => '23',
+ 'last_updated_at' => undef,
+ 'last_updated_on' => undef
+ },
+ {
+ 'small_dt' => undef,
+ 'cd' => '7',
+ 'position' => '2',
+ 'title' => 'still alone',
+ 'trackid' => '24',
+ 'last_updated_at' => undef,
+ 'last_updated_on' => undef
+ },
+ ],
+ },
+ }
+ ],
+ 'artist' => '4',
+ 'cdid' => '6',
+ 'cd_to_producer' => [
+ {
+ 'attribute' => undef,
+ 'cd' => '6',
+ 'producer' => {
+ 'name' => 'riba',
+ 'producerid' => '4'
+ }
+ },
+ {
+ 'attribute' => undef,
+ 'cd' => '6',
+ 'producer' => {
+ 'name' => 'sushi',
+ 'producerid' => '5'
+ }
+ }
+ ],
+ 'title' => 'Song of a Foo',
+ 'genreid' => undef,
+ 'year' => '1999'
+ },
+ {
+ 'single_track' => undef,
+ 'tracks' => [
+ # FIXME
+ # although the positional ordering is correct, SQLite seems to return
+ # the rows randomly if an ORDER BY is not supplied. Of course ordering
+ # by right side of prefetch joins is not yet possible, thus we just hope
+ # that the order is stable
+ {
+ 'small_dt' => undef,
+ 'cd' => '8',
+ 'position' => '2',
+ 'trackid' => '26',
+ 'title' => 'Bar Foo',
+ 'cd_single' => undef,
+ 'last_updated_on' => undef,
+ 'last_updated_at' => undef
+ },
+ {
+ 'small_dt' => undef,
+ 'cd' => '8',
+ 'position' => '1',
+ 'trackid' => '25',
+ 'title' => 'Quit Playing Games With My Heart',
+ 'last_updated_on' => undef,
+ 'last_updated_at' => undef,
+ 'cd_single' => undef,
+ },
+ {
+ 'small_dt' => undef,
+ 'cd' => '8',
+ 'position' => '3',
+ 'trackid' => '27',
+ 'title' => 'Foo Bar',
+ 'last_updated_on' => undef,
+ 'last_updated_at' => undef,
+ 'cd_single' => {
+ 'single_track' => '27',
+ 'artist' => '2',
+ 'cdid' => '9',
+ 'title' => 'MO! Single',
+ 'genreid' => undef,
+ 'year' => '2020',
+ 'tracks' => [
+ {
+ 'small_dt' => undef,
+ 'cd' => '9',
+ 'position' => '1',
+ 'title' => 'singled out',
+ 'trackid' => '28',
+ 'last_updated_at' => undef,
+ 'last_updated_on' => undef
+ },
+ {
+ 'small_dt' => undef,
+ 'cd' => '9',
+ 'position' => '2',
+ 'title' => 'still alone',
+ 'trackid' => '29',
+ 'last_updated_at' => undef,
+ 'last_updated_on' => undef
+ },
+ ],
+
+ },
+ },
+ ],
+ 'artist' => '4',
+ 'cdid' => '8',
+ 'cd_to_producer' => [
+ {
+ 'attribute' => undef,
+ 'cd' => '8',
+ 'producer' => {
+ 'name' => 'riba',
+ 'producerid' => '4'
+ }
+ },
+ {
+ 'attribute' => undef,
+ 'cd' => '8',
+ 'producer' => {
+ 'name' => 'sushi',
+ 'producerid' => '5'
+ }
+ }
+ ],
+ 'title' => 'Song of a Foo II',
+ 'genreid' => undef,
+ 'year' => '2002'
+ }
+ ],
+ 'artistid' => '4',
+ 'charfield' => undef,
+ 'name' => 'mo',
+ 'artwork_to_artist' => [
+ {
+ 'artwork' => { 'cd_id' => '1' },
+ 'artist_id' => '4',
+ 'artwork_cd_id' => '1'
+ },
+ {
+ 'artwork' => { 'cd_id' => '2' },
+ 'artist_id' => '4',
+ 'artwork_cd_id' => '2'
+ }
+ ],
+ 'rank' => '1337'
+ }
+);
+
+done_testing;