$schema->deploy;
my $rs = $schema->resultset ('Artist');
-
-my $hri_rs = $rs->search ({}, { result_class => 'DBIx::Class::ResultClass::HashRefInflator' } );
-
-#DB::enable_profile();
-#my @foo = $hri_rs->all;
-#DB::disable_profile();
-#exit;
+$rs->populate ([ map { { name => "Art_$_"} } (1 .. 10000) ]);
my $dbh = $schema->storage->dbh;
my $sql = sprintf ('SELECT %s FROM %s %s',
$rs->_resolved_attrs->{alias},
);
-for (1,10,20,50,200,2500,10000) {
- $rs->delete;
- $rs->populate ([ map { { name => "Art_$_"} } (1 .. $_) ]);
- print "\nRetrieval of $_ rows\n";
- bench();
-}
-
-sub bench {
- cmpthese(-3, {
- Cursor => sub { my @r = $rs->cursor->all },
- HRI => sub { my @r = $hri_rs->all },
- RowObj => sub { my @r = $rs->all },
- DBI_AoH => sub { my @r = @{ $dbh->selectall_arrayref ($sql, { Slice => {} }) } },
- DBI_AoA=> sub { my @r = @{ $dbh->selectall_arrayref ($sql) } },
- });
-}
+my $compdbi = sub {
+ my @r = $schema->storage->dbh->selectall_arrayref ('SELECT * FROM ' . ${$rs->as_query}->[0] )
+} if $rs->can ('as_query');
+
+cmpthese(-3, {
+ Cursor => sub { $rs->reset; my @r = $rs->cursor->all },
+ HRI => sub { $rs->reset; my @r = $rs->search ({}, { result_class => 'DBIx::Class::ResultClass::HashRefInflator' } )->all },
+ RowObj => sub { $rs->reset; my @r = $rs->all },
+ RawDBI => sub { my @r = $dbh->selectall_arrayref ($sql) },
+ $compdbi ? (CompDBI => $compdbi) : (),
+});
# Run the query, passing the result_class since it should propagate for find
my $rs = $self->search ($final_cond, {result_class => $self->result_class, %$attrs});
- if ($rs->_resolved_attrs->{collapse}) {
+ if (keys %{$rs->_resolved_attrs->{collapse}}) {
my $row = $rs->next;
carp "Query returned more than one row" if $rs->next;
return $row;
my $attrs = $self->_resolved_attrs_copy;
- $self->throw_exception(
- 'single() can not be used on resultsets prefetching has_many. Use find( \%cond ) or next() instead'
- ) if $attrs->{collapse};
+ if (keys %{$attrs->{collapse}}) {
+ $self->throw_exception(
+ 'single() can not be used on resultsets prefetching has_many. Use find( \%cond ) or next() instead'
+ );
+ }
if ($where) {
if (defined $attrs->{where}) {
}
}
- my $data = [ $self->result_source->storage->select_single(
+ my @data = $self->result_source->storage->select_single(
$attrs->{from}, $attrs->{select},
$attrs->{where}, $attrs
- )];
- return undef unless @$data;
- $self->{stashed_rows} = [ $data ];
- $self->_construct_objects->[0];
+ );
+
+ return (@data ? ($self->_construct_object(@data))[0] : undef);
}
sub next {
my ($self) = @_;
-
if (my $cache = $self->get_cache) {
$self->{all_cache_position} ||= 0;
return $cache->[$self->{all_cache_position}++];
}
-
if ($self->{attrs}{cache}) {
delete $self->{pager};
$self->{all_cache_position} = 1;
return ($self->all)[0];
}
+ if ($self->{stashed_objects}) {
+ my $obj = shift(@{$self->{stashed_objects}});
+ delete $self->{stashed_objects} unless @{$self->{stashed_objects}};
+ return $obj;
+ }
+ my @row = (
+ exists $self->{stashed_row}
+ ? @{delete $self->{stashed_row}}
+ : $self->cursor->next
+ );
+ return undef unless (@row);
+ my ($row, @more) = $self->_construct_object(@row);
+ $self->{stashed_objects} = \@more if @more;
+ return $row;
+}
- return shift(@{$self->{stashed_objects}}) if @{ $self->{stashed_objects}||[] };
-
- $self->{stashed_objects} = $self->_construct_objects
- or return undef;
+sub _construct_object {
+ my ($self, @row) = @_;
- return shift @{$self->{stashed_objects}};
+ my $info = $self->_collapse_result($self->{_attrs}{as}, \@row)
+ or return ();
+ my @new = $self->result_class->inflate_result($self->result_source, @$info);
+ @new = $self->{_attrs}{record_filter}->(@new)
+ if exists $self->{_attrs}{record_filter};
+ return @new;
}
-# Constructs as many objects as it can in one pass while respecting
-# cursor laziness. Several modes of operation:
-#
-# * Always builds everything present in @{$self->{stashed_rows}}
-# * If called with $fetch_all true - pulls everything off the cursor and
-# builds all objects in one pass
-# * If $self->_resolved_attrs->{collapse} is true, checks the order_by
-# and if the resultset is ordered properly by the left side:
-# * Fetches stuff off the cursor until the "master object" changes,
-# and saves the last extra row (if any) in @{$self->{stashed_rows}}
-# OR
-# * Just fetches, and collapses/constructs everything as if $fetch_all
-# was requested (there is no other way to collapse except for an
-# eager cursor)
-# * If no collapse is requested - just get the next row, construct and
-# return
-sub _construct_objects {
- my ($self, $fetch_all) = @_;
+sub _collapse_result {
+ my ($self, $as_proto, $row) = @_;
- my $rsrc = $self->result_source;
- my $attrs = $self->_resolved_attrs;
- my $cursor = $self->cursor;
-
- # this will be used as both initial raw-row collector AND as a RV of
- # _construct_objects. Not regrowing the array twice matters a lot...
- # a suprising amount actually
- my $rows = (delete $self->{stashed_rows}) || [];
- if ($fetch_all) {
- # FIXME SUBOPTIMAL - we can do better, cursor->next/all (well diff. methods) should return a ref
- $rows = [ @$rows, $cursor->all ];
- }
- elsif (!$attrs->{collapse}) {
- # FIXME SUBOPTIMAL - we can do better, cursor->next/all (well diff. methods) should return a ref
- push @$rows, do { my @r = $cursor->next; @r ? \@r : () }
- unless @$rows;
- }
- else {
- $attrs->{_ordered_for_collapse} ||= (!$attrs->{order_by}) ? undef : do {
- my $st = $rsrc->schema->storage;
- my @ord_cols = map
- { $_->[0] }
- ( $st->_extract_order_criteria($attrs->{order_by}) )
- ;
+ my @copy = @$row;
- my $colinfos = $st->_resolve_column_info($attrs->{from}, \@ord_cols);
+ # 'foo' => [ undef, 'foo' ]
+ # 'foo.bar' => [ 'foo', 'bar' ]
+ # 'foo.bar.baz' => [ 'foo.bar', 'baz' ]
- for (0 .. $#ord_cols) {
- if (
- ! $colinfos->{$ord_cols[$_]}
- or
- $colinfos->{$ord_cols[$_]}{-result_source} != $rsrc
- ) {
- splice @ord_cols, $_;
- last;
- }
- }
+ my @construct_as = map { [ (/^(?:(.*)\.)?([^.]+)$/) ] } @$as_proto;
- # since all we check here are the start of the order_by belonging to the
- # top level $rsrc, a present identifying set will mean that the resultset
- # is ordered by its leftmost table in a tsable manner
- (@ord_cols and $rsrc->_identifying_column_set({ map
- { $colinfos->{$_}{-colname} => $colinfos->{$_} }
- @ord_cols
- })) ? 1 : 0;
- };
+ my %collapse = %{$self->{_attrs}{collapse}||{}};
- if ($attrs->{_ordered_for_collapse}) {
- push @$rows, do { my @r = $cursor->next; @r ? \@r : () };
- }
- # instead of looping over ->next, use ->all in stealth mode
- # FIXME - encapsulation breach, got to be a better way
- elsif (! $cursor->{done}) {
- push @$rows, $cursor->all;
- $cursor->{done} = 1;
- $fetch_all = 1;
+ my @pri_index;
+
+ # if we're doing collapsing (has_many prefetch) we need to grab records
+ # until the PK changes, so fill @pri_index. if not, we leave it empty so
+ # we know we don't have to bother.
+
+ # the reason for not using the collapse stuff directly is because if you
+ # had for e.g. two artists in a row with no cds, the collapse info for
+ # both would be NULL (undef) so you'd lose the second artist
+
+ # store just the index so we can check the array positions from the row
+ # without having to contruct the full hash
+
+ if (keys %collapse) {
+ my %pri = map { ($_ => 1) } $self->result_source->_pri_cols;
+ foreach my $i (0 .. $#construct_as) {
+ next if defined($construct_as[$i][0]); # only self table
+ if (delete $pri{$construct_as[$i][1]}) {
+ push(@pri_index, $i);
+ }
+ last unless keys %pri; # short circuit (Johnny Five Is Alive!)
}
}
- return undef unless @$rows;
+ # no need to do an if, it'll be empty if @pri_index is empty anyway
- my $res_class = $self->result_class;
- my $inflator = $res_class->can ('inflate_result')
- or $self->throw_exception("Inflator $res_class does not provide an inflate_result() method");
+ my %pri_vals = map { ($_ => $copy[$_]) } @pri_index;
- my $infmap = $attrs->{as};
+ my @const_rows;
- if (!$attrs->{collapse} and $attrs->{_single_object_inflation}) {
- # construct a much simpler array->hash folder for the one-table cases right here
+ do { # no need to check anything at the front, we always want the first row
- # FIXME SUBOPTIMAL this is a very very very hot spot
- # while rather optimal we can *still* do much better, by
- # building a smarter [Row|HRI]::inflate_result(), and
- # switch to feeding it data via a much leaner interface
- #
- # crude unscientific benchmarking indicated the shortcut eval is not worth it for
- # this particular resultset size
- if (@$rows < 60) {
- my @as_idx = 0..$#$infmap;
- for my $r (@$rows) {
- $r = $inflator->($res_class, $rsrc, { map { $infmap->[$_] => $r->[$_] } @as_idx } );
- }
- }
- else {
- eval sprintf (
- '$_ = $inflator->($res_class, $rsrc, { %s }) for @$rows',
- join (', ', map { "\$infmap->[$_] => \$_->[$_]" } 0..$#$infmap )
- );
+ my %const;
+
+ foreach my $this_as (@construct_as) {
+ $const{$this_as->[0]||''}{$this_as->[1]} = shift(@copy);
}
- }
- else {
- ($self->{_row_parser} ||= eval sprintf 'sub { %s }', $rsrc->_mk_row_parser({
- inflate_map => $infmap,
- selection => $attrs->{select},
- collapse => $attrs->{collapse},
- }) or die $@)->($rows, $fetch_all ? () : (
- # FIXME SUBOPTIMAL - we can do better, cursor->next/all (well diff. methods) should return a ref
- sub { my @r = $cursor->next or return; \@r }, # how the collapser gets more rows
- ($self->{stashed_rows} = []), # where does it stuff excess
- )); # modify $rows in-place, shrinking/extending as necessary
-
- $_ = $inflator->($res_class, $rsrc, @$_) for @$rows;
- }
+ push(@const_rows, \%const);
+
+ } until ( # no pri_index => no collapse => drop straight out
+ !@pri_index
+ or
+ do { # get another row, stash it, drop out if different PK
+
+ @copy = $self->cursor->next;
+ $self->{stashed_row} = \@copy;
+
+ # last thing in do block, counts as true if anything doesn't match
- # CDBI compat stuff
- if ($attrs->{record_filter}) {
- $_ = $attrs->{record_filter}->($_) for @$rows;
+ # check xor defined first for NULL vs. NOT NULL then if one is
+ # defined the other must be so check string equality
+
+ grep {
+ (defined $pri_vals{$_} ^ defined $copy[$_])
+ || (defined $pri_vals{$_} && ($pri_vals{$_} ne $copy[$_]))
+ } @pri_index;
+ }
+ );
+
+ my $alias = $self->{attrs}{alias};
+ my $info = [];
+
+ my %collapse_pos;
+
+ my @const_keys;
+
+ foreach my $const (@const_rows) {
+ scalar @const_keys or do {
+ @const_keys = sort { length($a) <=> length($b) } keys %$const;
+ };
+ foreach my $key (@const_keys) {
+ if (length $key) {
+ my $target = $info;
+ my @parts = split(/\./, $key);
+ my $cur = '';
+ my $data = $const->{$key};
+ foreach my $p (@parts) {
+ $target = $target->[1]->{$p} ||= [];
+ $cur .= ".${p}";
+ if ($cur eq ".${key}" && (my @ckey = @{$collapse{$cur}||[]})) {
+ # collapsing at this point and on final part
+ my $pos = $collapse_pos{$cur};
+ CK: foreach my $ck (@ckey) {
+ if (!defined $pos->{$ck} || $pos->{$ck} ne $data->{$ck}) {
+ $collapse_pos{$cur} = $data;
+ delete @collapse_pos{ # clear all positioning for sub-entries
+ grep { m/^\Q${cur}.\E/ } keys %collapse_pos
+ };
+ push(@$target, []);
+ last CK;
+ }
+ }
+ }
+ if (exists $collapse{$cur}) {
+ $target = $target->[-1];
+ }
+ }
+ $target->[0] = $data;
+ } else {
+ $info->[0] = $const->{$key};
+ }
+ }
}
- return $rows;
+ return $info;
}
=head2 result_source
# this is a little optimization - it is faster to do the limit
# adjustments in software, instead of a subquery
- my ($rows, $offset) = delete @{$attrs}{qw/rows offset/};
+ my $rows = delete $attrs->{rows};
+ my $offset = delete $attrs->{offset};
my $crs;
if ($self->_has_resolved_attr (qw/collapse group_by/)) {
# overwrite the selector (supplied by the storage)
$tmp_attrs->{select} = $rsrc->storage->_count_select ($rsrc, $attrs);
$tmp_attrs->{as} = 'count';
+ delete @{$tmp_attrs}{qw/columns/};
my $tmp_rs = $rsrc->resultset_class->new($rsrc, $tmp_attrs)->get_column ('count');
# if we multi-prefetch we group_by something unique, as this is what we would
# get out of the rs via ->next/->all. We *DO WANT* to clobber old group_by regardless
- if ( $attrs->{collapse} ) {
+ if ( keys %{$attrs->{collapse}} ) {
$sub_attrs->{group_by} = [ map { "$attrs->{alias}.$_" } @{
$rsrc->_identifying_column_set || $self->throw_exception(
'Unable to construct a unique group_by criteria properly collapsing the '
sub all {
my $self = shift;
if(@_) {
- $self->throw_exception("all() doesn't take any arguments, you probably wanted ->search(...)->all()");
+ $self->throw_exception("all() doesn't take any arguments, you probably wanted ->search(...)->all()");
}
- delete @{$self}{qw/stashed_rows stashed_objects/};
-
- if (my $c = $self->get_cache) {
- return @$c;
+ return @{ $self->get_cache } if $self->get_cache;
+
+ my @obj;
+
+ if (keys %{$self->_resolved_attrs->{collapse}}) {
+ # Using $self->cursor->all is really just an optimisation.
+ # If we're collapsing has_many prefetches it probably makes
+ # very little difference, and this is cleaner than hacking
+ # _construct_object to survive the approach
+ $self->cursor->reset;
+ my @row = $self->cursor->next;
+ while (@row) {
+ push(@obj, $self->_construct_object(@row));
+ @row = (exists $self->{stashed_row}
+ ? @{delete $self->{stashed_row}}
+ : $self->cursor->next);
+ }
+ } else {
+ @obj = map { $self->_construct_object(@$_) } $self->cursor->all;
}
- $self->cursor->reset;
-
- my $objs = $self->_construct_objects('fetch_all') || [];
-
- $self->set_cache($objs) if $self->{attrs}{cache};
+ $self->set_cache(\@obj) if $self->{attrs}{cache};
- return @$objs;
+ return @obj;
}
=head2 reset
sub reset {
my ($self) = @_;
-
- delete @{$self}{qw/_attrs stashed_rows stashed_objects/};
-
+ delete $self->{_attrs} if exists $self->{_attrs};
$self->{all_cache_position} = 0;
$self->cursor->reset;
return $self;
my $existing_group_by = delete $attrs->{group_by};
# make a new $rs selecting only the PKs (that's all we really need for the subq)
- delete @{$attrs}{qw/collapse select _prefetch_selector_range as/};
+ delete $attrs->{$_} for qw/collapse _collapse_order_by select _prefetch_selector_range as/;
$attrs->{columns} = [ map { "$attrs->{alias}.$_" } @$idcols ];
$attrs->{group_by} = \ ''; # FIXME - this is an evil hack, it causes the optimiser to kick in and throw away the LEFT joins
my $subrs = (ref $self)->new($rsrc, $attrs);
# throw away the paging flags and re-run the count (possibly
# with a subselect) to get the real total count
my $count_attrs = { %$attrs };
- delete @{$count_attrs}{qw/rows offset page pager/};
+ delete $count_attrs->{$_} for qw/rows offset page pager/;
my $total_rs = (ref $self)->new($self->result_source, $count_attrs);
if (my $cache = $self->get_cache) {
if ($cache->[0] && $cache->[0]->related_resultset($rel)->get_cache) {
- $new_cache = [ map { @{$_->related_resultset($rel)->get_cache||[]} }
+ $new_cache = [ map { @{$_->related_resultset($rel)->get_cache} }
@$cache ];
}
}
if $attrs->{select};
# assume all unqualified selectors to apply to the current alias (legacy stuff)
- $_ = (ref $_ or $_ =~ /\./) ? $_ : "$alias.$_" for @sel;
+ for (@sel) {
+ $_ = (ref $_ or $_ =~ /\./) ? $_ : "$alias.$_";
+ }
- # disqualify all $alias.col as-bits (inflate-map mandated)
- $_ = ($_ =~ /^\Q$alias.\E(.+)$/) ? $1 : $_ for @as;
+ # disqualify all $alias.col as-bits (collapser mandated)
+ for (@as) {
+ $_ = ($_ =~ /^\Q$alias.\E(.+)$/) ? $1 : $_;
+ }
# de-duplicate the result (remove *identical* select/as pairs)
# and also die on duplicate {as} pointing to different {select}s
}
}
- # generate selections based on the prefetch helper
- my $prefetch;
- $prefetch = $self->_merge_joinpref_attr( {}, delete $attrs->{prefetch} )
- if defined $attrs->{prefetch};
-
- if ($prefetch) {
+ $attrs->{collapse} ||= {};
+ if ($attrs->{prefetch}) {
$self->throw_exception("Unable to prefetch, resultset contains an unnamed selector $attrs->{_dark_selector}{string}")
if $attrs->{_dark_selector};
- $attrs->{collapse} = 1;
+ my $prefetch = $self->_merge_joinpref_attr( {}, delete $attrs->{prefetch} );
+
+ my $prefetch_ordering = [];
# this is a separate structure (we don't look in {from} directly)
# as the resolver needs to shift things off the lists to work
}
}
- my @prefetch = $source->_resolve_prefetch( $prefetch, $alias, $join_map );
+ my @prefetch =
+ $source->_resolve_prefetch( $prefetch, $alias, $join_map, $prefetch_ordering, $attrs->{collapse} );
# we need to somehow mark which columns came from prefetch
if (@prefetch) {
push @{ $attrs->{select} }, (map { $_->[0] } @prefetch);
push @{ $attrs->{as} }, (map { $_->[1] } @prefetch);
- }
-
- $attrs->{_single_object_inflation} = ! List::Util::first { $_ =~ /\./ } @{$attrs->{as}};
-
- # run through the resulting joinstructure (starting from our current slot)
- # and unset collapse if proven unnesessary
- if ($attrs->{collapse} && ref $attrs->{from} eq 'ARRAY') {
-
- if (@{$attrs->{from}} > 1) {
-
- # find where our table-spec starts and consider only things after us
- my @fromlist = @{$attrs->{from}};
- while (@fromlist) {
- my $t = shift @fromlist;
- $t = $t->[0] if ref $t eq 'ARRAY'; #me vs join from-spec mismatch
- last if ($t->{-alias} && $t->{-alias} eq $alias);
- }
-
- for (@fromlist) {
- $attrs->{collapse} = ! $_->[0]{-is_single}
- and last;
- }
- }
- else {
- # no joins - no collapse
- $attrs->{collapse} = 0;
- }
- }
- if (! $attrs->{order_by} and $attrs->{collapse}) {
- # default order for collapsing unless the user asked for something
- $attrs->{order_by} = [ map { "$alias.$_" } $source->primary_columns ];
- $attrs->{_ordered_for_collapse} = 1;
- $attrs->{_order_is_artificial} = 1;
+ push( @{$attrs->{order_by}}, @$prefetch_ordering );
+ $attrs->{_collapse_order_by} = \@$prefetch_ordering;
}
# if both page and offset are specified, produce a combined offset
my $to_serialize = { %$self };
# A cursor in progress can't be serialized (and would make little sense anyway)
- # the parser can be regenerated (and can't be serialized)
- delete @{$to_serialize}{qw/cursor _row_parser/};
+ delete $to_serialize->{cursor};
# nor is it sensical to store a not-yet-fired-count pager
if ($to_serialize->{pager} and ref $to_serialize->{pager}{total_entries} eq 'CODE') {
# {collapse} would mean a has_many join was injected, which in turn means
# we need to group *IF WE CAN* (only if the column in question is unique)
- if (!$orig_attrs->{group_by} && $orig_attrs->{collapse}) {
+ if (!$orig_attrs->{group_by} && keys %{$orig_attrs->{collapse}}) {
if ($colmap->{$select} and $rsrc->_identifying_column_set([$colmap->{$select}])) {
$new_attrs->{group_by} = [ $select ];
use strict;
use warnings;
-use base qw/DBIx::Class::ResultSource::RowParser DBIx::Class/;
-
use DBIx::Class::ResultSet;
use DBIx::Class::ResultSourceHandle;
use Try::Tiny;
use List::Util 'first';
use Scalar::Util qw/blessed weaken isweak/;
-
use namespace::clean;
+use base qw/DBIx::Class/;
+
__PACKAGE__->mk_group_accessors(simple => qw/
source_name name source_info
_ordered_columns _columns _primaries _unique_constraints
,
-join_path => [@$jpath, { $join => $as } ],
-is_single => (
- (! $rel_info->{attrs}{accessor})
- or
+ $rel_info->{attrs}{accessor}
+ &&
first { $rel_info->{attrs}{accessor} eq $_ } (qw/single filter/)
),
-alias => $as,
}
}
+# Accepts one or more relationships for the current source and returns an
+# array of column names for each of those relationships. Column names are
+# prefixed relative to the current source, in accordance with where they appear
+# in the supplied relationships.
+sub _resolve_prefetch {
+ my ($self, $pre, $alias, $alias_map, $order, $collapse, $pref_path) = @_;
+ $pref_path ||= [];
+
+ if (not defined $pre or not length $pre) {
+ return ();
+ }
+ elsif( ref $pre eq 'ARRAY' ) {
+ return
+ map { $self->_resolve_prefetch( $_, $alias, $alias_map, $order, $collapse, [ @$pref_path ] ) }
+ @$pre;
+ }
+ elsif( ref $pre eq 'HASH' ) {
+ my @ret =
+ map {
+ $self->_resolve_prefetch($_, $alias, $alias_map, $order, $collapse, [ @$pref_path ] ),
+ $self->related_source($_)->_resolve_prefetch(
+ $pre->{$_}, "${alias}.$_", $alias_map, $order, $collapse, [ @$pref_path, $_] )
+ } keys %$pre;
+ return @ret;
+ }
+ elsif( ref $pre ) {
+ $self->throw_exception(
+ "don't know how to resolve prefetch reftype ".ref($pre));
+ }
+ else {
+ my $p = $alias_map;
+ $p = $p->{$_} for (@$pref_path, $pre);
+
+ $self->throw_exception (
+ "Unable to resolve prefetch '$pre' - join alias map does not contain an entry for path: "
+ . join (' -> ', @$pref_path, $pre)
+ ) if (ref $p->{-join_aliases} ne 'ARRAY' or not @{$p->{-join_aliases}} );
+
+ my $as = shift @{$p->{-join_aliases}};
+
+ my $rel_info = $self->relationship_info( $pre );
+ $self->throw_exception( $self->source_name . " has no such relationship '$pre'" )
+ unless $rel_info;
+ my $as_prefix = ($alias =~ /^.*?\.(.+)$/ ? $1.'.' : '');
+ my $rel_source = $self->related_source($pre);
+
+ if ($rel_info->{attrs}{accessor} && $rel_info->{attrs}{accessor} eq 'multi') {
+ $self->throw_exception(
+ "Can't prefetch has_many ${pre} (join cond too complex)")
+ unless ref($rel_info->{cond}) eq 'HASH';
+ my $dots = @{[$as_prefix =~ m/\./g]} + 1; # +1 to match the ".${as_prefix}"
+
+ if (my ($fail) = grep { @{[$_ =~ m/\./g]} == $dots }
+ keys %{$collapse}) {
+ my ($last) = ($fail =~ /([^\.]+)$/);
+ carp (
+ "Prefetching multiple has_many rels ${last} and ${pre} "
+ .(length($as_prefix)
+ ? "at the same level (${as_prefix}) "
+ : "at top level "
+ )
+ . 'will explode the number of row objects retrievable via ->next or ->all. '
+ . 'Use at your own risk.'
+ );
+ }
+
+ #my @col = map { (/^self\.(.+)$/ ? ("${as_prefix}.$1") : ()); }
+ # values %{$rel_info->{cond}};
+ $collapse->{".${as_prefix}${pre}"} = [ $rel_source->_pri_cols ];
+ # action at a distance. prepending the '.' allows simpler code
+ # in ResultSet->_collapse_result
+ my @key = map { (/^foreign\.(.+)$/ ? ($1) : ()); }
+ keys %{$rel_info->{cond}};
+ push @$order, map { "${as}.$_" } @key;
+
+ if (my $rel_order = $rel_info->{attrs}{order_by}) {
+ # this is kludgy and incomplete, I am well aware
+ # but the parent method is going away entirely anyway
+ # so sod it
+ my $sql_maker = $self->storage->sql_maker;
+ my ($orig_ql, $orig_qr) = $sql_maker->_quote_chars;
+ my $sep = $sql_maker->name_sep;
+
+ # install our own quoter, so we can catch unqualified stuff
+ local $sql_maker->{quote_char} = ["\x00", "\xFF"];
+
+ my $quoted_prefix = "\x00${as}\xFF";
+
+ for my $chunk ( $sql_maker->_order_by_chunks ($rel_order) ) {
+ my @bind;
+ ($chunk, @bind) = @$chunk if ref $chunk;
+
+ $chunk = "${quoted_prefix}${sep}${chunk}"
+ unless $chunk =~ /\Q$sep/;
+
+ $chunk =~ s/\x00/$orig_ql/g;
+ $chunk =~ s/\xFF/$orig_qr/g;
+ push @$order, \[$chunk, @bind];
+ }
+ }
+ }
+
+ return map { [ "${as}.$_", "${as_prefix}${pre}.$_", ] }
+ $rel_source->columns;
+ }
+}
+
=head2 related_source
=over 4
+++ /dev/null
-package # hide from the pauses
- DBIx::Class::ResultSource::RowParser;
-
-use strict;
-use warnings;
-
-use Try::Tiny;
-use List::Util 'first';
-use B 'perlstring';
-
-use namespace::clean;
-
-use base 'DBIx::Class';
-
-# Accepts one or more relationships for the current source and returns an
-# array of column names for each of those relationships. Column names are
-# prefixed relative to the current source, in accordance with where they appear
-# in the supplied relationships.
-sub _resolve_prefetch {
- my ($self, $pre, $alias, $alias_map, $order, $pref_path) = @_;
- $pref_path ||= [];
-
- if (not defined $pre or not length $pre) {
- return ();
- }
- elsif( ref $pre eq 'ARRAY' ) {
- return
- map { $self->_resolve_prefetch( $_, $alias, $alias_map, $order, [ @$pref_path ] ) }
- @$pre;
- }
- elsif( ref $pre eq 'HASH' ) {
- my @ret =
- map {
- $self->_resolve_prefetch($_, $alias, $alias_map, $order, [ @$pref_path ] ),
- $self->related_source($_)->_resolve_prefetch(
- $pre->{$_}, "${alias}.$_", $alias_map, $order, [ @$pref_path, $_] )
- } keys %$pre;
- return @ret;
- }
- elsif( ref $pre ) {
- $self->throw_exception(
- "don't know how to resolve prefetch reftype ".ref($pre));
- }
- else {
- my $p = $alias_map;
- $p = $p->{$_} for (@$pref_path, $pre);
-
- $self->throw_exception (
- "Unable to resolve prefetch '$pre' - join alias map does not contain an entry for path: "
- . join (' -> ', @$pref_path, $pre)
- ) if (ref $p->{-join_aliases} ne 'ARRAY' or not @{$p->{-join_aliases}} );
-
- my $as = shift @{$p->{-join_aliases}};
-
- my $rel_info = $self->relationship_info( $pre );
- $self->throw_exception( $self->source_name . " has no such relationship '$pre'" )
- unless $rel_info;
-
- my $as_prefix = ($alias =~ /^.*?\.(.+)$/ ? $1.'.' : '');
-
- return map { [ "${as}.$_", "${as_prefix}${pre}.$_", ] }
- $self->related_source($pre)->columns;
- }
-}
-
-# Takes an arrayref selection list and generates a collapse-map representing
-# row-object fold-points. Every relationship is assigned a set of unique,
-# non-nullable columns (which may *not even be* from the same resultset)
-# and the collapser will use this information to correctly distinguish
-# data of individual to-be-row-objects. See t/resultset/rowparser_internals.t
-# for extensive RV examples
-sub _resolve_collapse {
- my ($self, $as, $as_fq_idx, $rel_chain, $parent_info, $node_idx_ref) = @_;
-
- # for comprehensible error messages put ourselves at the head of the relationship chain
- $rel_chain ||= [ $self->source_name ];
-
- # record top-level fully-qualified column index
- $as_fq_idx ||= { %$as };
-
- my ($my_cols, $rel_cols);
- for (keys %$as) {
- if ($_ =~ /^ ([^\.]+) \. (.+) /x) {
- $rel_cols->{$1}{$2} = 1;
- }
- else {
- $my_cols->{$_} = {}; # important for ||= below
- }
- }
-
- my $relinfo;
- # run through relationships, collect metadata, inject non-left fk-bridges from
- # *INNER-JOINED* children (if any)
- for my $rel (keys %$rel_cols) {
- my $rel_src = __get_related_source($self, $rel, $rel_cols->{$rel});
-
- my $inf = $self->relationship_info ($rel);
-
- $relinfo->{$rel}{is_single} = $inf->{attrs}{accessor} && $inf->{attrs}{accessor} ne 'multi';
- $relinfo->{$rel}{is_inner} = ( $inf->{attrs}{join_type} || '' ) !~ /^left/i;
- $relinfo->{$rel}{rsrc} = $rel_src;
-
- my $cond = $inf->{cond};
-
- if (
- ref $cond eq 'HASH'
- and
- keys %$cond
- and
- ! first { $_ !~ /^foreign\./ } (keys %$cond)
- and
- ! first { $_ !~ /^self\./ } (values %$cond)
- ) {
- for my $f (keys %$cond) {
- my $s = $cond->{$f};
- $_ =~ s/^ (?: foreign | self ) \.//x for ($f, $s);
- $relinfo->{$rel}{fk_map}{$s} = $f;
-
- # need to know source from *our* pov, hence $rel.
- $my_cols->{$s} ||= { via_fk => "$rel.$f" } if (
- defined $rel_cols->{$rel}{$f} # in fact selected
- and
- $relinfo->{$rel}{is_inner}
- );
- }
- }
- }
-
- # if the parent is already defined, assume all of its related FKs are selected
- # (even if they in fact are NOT in the select list). Keep a record of what we
- # assumed, and if any such phantom-column becomes part of our own collapser,
- # throw everything assumed-from-parent away and replace with the collapser of
- # the parent (whatever it may be)
- my $assumed_from_parent;
- unless ($parent_info->{underdefined}) {
- $assumed_from_parent->{columns} = { map
- # only add to the list if we do not already select said columns
- { ! exists $my_cols->{$_} ? ( $_ => 1 ) : () }
- values %{$parent_info->{rel_condition} || {}}
- };
-
- $my_cols->{$_} = { via_collapse => $parent_info->{collapse_on} }
- for keys %{$assumed_from_parent->{columns}};
- }
-
- # get colinfo for everything
- if ($my_cols) {
- my $ci = $self->columns_info;
- $my_cols->{$_}{colinfo} = $ci->{$_} for keys %$my_cols;
- }
-
- my $collapse_map;
-
- # try to resolve based on our columns (plus already inserted FK bridges)
- if (
- $my_cols
- and
- my $idset = $self->_identifying_column_set ({map { $_ => $my_cols->{$_}{colinfo} } keys %$my_cols})
- ) {
- # see if the resulting collapser relies on any implied columns,
- # and fix stuff up if this is the case
- my @reduced_set = grep { ! $assumed_from_parent->{columns}{$_} } @$idset;
-
- $collapse_map->{-node_id} = __unique_numlist(
- (@reduced_set != @$idset) ? @{$parent_info->{collapse_on}} : (),
- (map
- {
- my $fqc = join ('.',
- @{$rel_chain}[1 .. $#$rel_chain],
- ( $my_cols->{$_}{via_fk} || $_ ),
- );
-
- $as_fq_idx->{$fqc};
- }
- @reduced_set
- ),
- );
- }
-
- # Stil don't know how to collapse - keep descending down 1:1 chains - if
- # a related non-LEFT 1:1 is resolvable - its condition will collapse us
- # too
- unless ($collapse_map->{-node_id}) {
- my @candidates;
-
- for my $rel (keys %$relinfo) {
- next unless ($relinfo->{$rel}{is_single} && $relinfo->{$rel}{is_inner});
-
- if ( my $rel_collapse = $relinfo->{$rel}{rsrc}->_resolve_collapse (
- $rel_cols->{$rel},
- $as_fq_idx,
- [ @$rel_chain, $rel ],
- { underdefined => 1 }
- )) {
- push @candidates, $rel_collapse->{-node_id};
- }
- }
-
- # get the set with least amount of columns
- # FIXME - maybe need to implement a data type order as well (i.e. prefer several ints
- # to a single varchar)
- if (@candidates) {
- ($collapse_map->{-node_id}) = sort { scalar @$a <=> scalar @$b } (@candidates);
- }
- }
-
- # Still dont know how to collapse - see if the parent passed us anything
- # (i.e. reuse collapser over 1:1)
- unless ($collapse_map->{-node_id}) {
- $collapse_map->{-node_id} = $parent_info->{collapse_on}
- if $parent_info->{collapser_reusable};
- }
-
- # stop descending into children if we were called by a parent for first-pass
- # and don't despair if nothing was found (there may be other parallel branches
- # to dive into)
- if ($parent_info->{underdefined}) {
- return $collapse_map->{-node_id} ? $collapse_map : undef
- }
- # nothing down the chain resolved - can't calculate a collapse-map
- elsif (! $collapse_map->{-node_id}) {
- $self->throw_exception ( sprintf
- "Unable to calculate a definitive collapse column set for %s%s: fetch more unique non-nullable columns",
- $self->source_name,
- @$rel_chain > 1
- ? sprintf (' (last member of the %s chain)', join ' -> ', @$rel_chain )
- : ''
- ,
- );
- }
-
- # If we got that far - we are collapsable - GREAT! Now go down all children
- # a second time, and fill in the rest
-
- $collapse_map->{-is_optional} = 1 if $parent_info->{is_optional};
- $collapse_map->{-node_index} = ${ $node_idx_ref ||= \do { my $x = 1 } }++; # this is *deliberately* not 0-based
-
- my (@id_sets, $multis_in_chain);
- for my $rel (sort keys %$relinfo) {
-
- $collapse_map->{$rel} = $relinfo->{$rel}{rsrc}->_resolve_collapse (
- { map { $_ => 1 } ( keys %{$rel_cols->{$rel}} ) },
-
- $as_fq_idx,
-
- [ @$rel_chain, $rel],
-
- {
- collapse_on => [ @{$collapse_map->{-node_id}} ],
-
- rel_condition => $relinfo->{$rel}{fk_map},
-
- is_optional => $collapse_map->{-is_optional},
-
- # if this is a 1:1 our own collapser can be used as a collapse-map
- # (regardless of left or not)
- collapser_reusable => $relinfo->{$rel}{is_single},
- },
-
- $node_idx_ref,
- );
-
- $collapse_map->{$rel}{-is_single} = 1 if $relinfo->{$rel}{is_single};
- $collapse_map->{$rel}{-is_optional} ||= 1 unless $relinfo->{$rel}{is_inner};
- push @id_sets, @{ $collapse_map->{$rel}{-branch_id} };
- }
-
- $collapse_map->{-branch_id} = __unique_numlist( @id_sets, @{$collapse_map->{-node_id}} );
-
- return $collapse_map;
-}
-
-# Takes an arrayref of {as} dbic column aliases and the collapse and select
-# attributes from the same $rs (the selector requirement is a temporary
-# workaround... I hope), and returns a coderef capable of:
-# my $me_pref_clps = $coderef->([$rs->cursor->next/all])
-# Where the $me_pref_clps arrayref is the future argument to inflate_result()
-#
-# For an example of this coderef in action (and to see its guts) look at
-# t/resultset/rowparser_internals.t
-#
-# This is a huge performance win, as we call the same code for # every row
-# returned from the db, thus avoiding repeated method lookups when traversing
-# relationships
-#
-# Also since the coderef is completely stateless (the returned structure is
-# always fresh on every new invocation) this is a very good opportunity for
-# memoization if further speed improvements are needed
-#
-# The way we construct this coderef is somewhat fugly, although the result is
-# really worth it. The final coderef does not perform any kind of recursion -
-# the entire nested structure constructor is rolled out into a single scope.
-#
-# In any case - the output of this thing is meticulously micro-tested, so
-# any sort of adjustment/rewrite should be relatively easy (fsvo relatively)
-#
-sub _mk_row_parser {
- my ($self, $args) = @_;
-
- my $inflate_index = { map
- { $args->{inflate_map}[$_] => $_ }
- ( 0 .. $#{$args->{inflate_map}} )
- };
-
- my $parser_src;
-
- # the non-collapsing assembler is easy
- # FIXME SUBOPTIMAL there could be a yet faster way to do things here, but
- # need to try an actual implementation and benchmark it:
- #
- # <timbunce_> First setup the nested data structure you want for each row
- # Then call bind_col() to alias the row fields into the right place in
- # the data structure, then to fetch the data do:
- # push @rows, dclone($row_data_struct) while ($sth->fetchrow);
- #
- if (!$args->{collapse}) {
- $parser_src = sprintf('$_ = %s for @{$_[0]}', __visit_infmap_simple(
- $inflate_index,
- { rsrc => $self }, # need the $rsrc to sanity-check inflation map once
- ));
-
- # change the quoted placeholders to unquoted alias-references
- $parser_src =~ s/ \' \xFF__VALPOS__(\d+)__\xFF \' /"\$_->[$1]"/gex;
- }
-
- # the collapsing parser is more complicated - it needs to keep a lot of state
- #
- else {
-
- my $collapse_map = $self->_resolve_collapse (
- # FIXME
- # only consider real columns (not functions) during collapse resolution
- # this check shouldn't really be here, as fucktards are not supposed to
- # alias random crap to existing column names anyway, but still - just in
- # case
- # FIXME !!!! - this does not yet deal with unbalanced selectors correctly
- # (it is now trivial as the attrs specify where things go out of sync
- # needs MOAR tests)
- { map
- { ref $args->{selection}[$inflate_index->{$_}] ? () : ( $_ => $inflate_index->{$_} ) }
- keys %$inflate_index
- }
- );
-
- my $top_branch_idx_list = join (', ', @{$collapse_map->{-branch_id}});
-
- my $top_node_id_path = join ('', map
- { "{'\xFF__IDVALPOS__${_}__\xFF'}" }
- @{$collapse_map->{-node_id}}
- );
-
- my $rel_assemblers = __visit_infmap_collapse (
- $inflate_index, $collapse_map
- );
-
- $parser_src = sprintf (<<'EOS', $top_branch_idx_list, $top_node_id_path, $rel_assemblers);
-### BEGIN STRING EVAL
-
- my ($rows_pos, $result_pos, $cur_row, @cur_row_ids, @collapse_idx, $is_new_res) = (0,0);
-
- # this loop is a bit arcane - the rationale is that the passed in
- # $_[0] will either have only one row (->next) or will have all
- # rows already pulled in (->all and/or unordered). Given that the
- # result can be rather large - we reuse the same already allocated
- # array, since the collapsed prefetch is smaller by definition.
- # At the end we cut the leftovers away and move on.
- while ($cur_row =
- ( ( $rows_pos >= 0 and $_[0][$rows_pos++] ) or do { $rows_pos = -1; undef } )
- ||
- ($_[1] and $_[1]->())
- ) {
-
- $cur_row_ids[$_] = defined $cur_row->[$_] ? $cur_row->[$_] : "\xFF\xFFN\xFFU\xFFL\xFFL\xFF\xFF"
- for (%1$s); # the top branch_id includes all id values
-
- $is_new_res = ! $collapse_idx[1]%2$s and (
- $_[1] and $result_pos and (unshift @{$_[2]}, $cur_row) and last
- );
-
- %3$s
-
- $_[0][$result_pos++] = $collapse_idx[1]%2$s
- if $is_new_res;
- }
-
- splice @{$_[0]}, $result_pos; # truncate the passed in array for cases of collapsing ->all()
-### END STRING EVAL
-EOS
-
- # !!! note - different var than the one above
- # change the quoted placeholders to unquoted alias-references
- $parser_src =~ s/ \' \xFF__VALPOS__(\d+)__\xFF \' /"\$cur_row->[$1]"/gex;
- $parser_src =~ s/ \' \xFF__IDVALPOS__(\d+)__\xFF \' /"\$cur_row_ids[$1]"/gex;
- }
-
- $parser_src;
-}
-
-# the simple non-collapsing nested structure recursor
-sub __visit_infmap_simple {
- my ($val_idx, $args) = @_;
-
- my $my_cols = {};
- my $rel_cols;
- for (keys %$val_idx) {
- if ($_ =~ /^ ([^\.]+) \. (.+) /x) {
- $rel_cols->{$1}{$2} = $val_idx->{$_};
- }
- else {
- $my_cols->{$_} = $val_idx->{$_};
- }
- }
- my @relperl;
- for my $rel (sort keys %$rel_cols) {
-
- # DISABLEPRUNE
- #my $optional = $args->{is_optional};
- #$optional ||= ($args->{rsrc}->relationship_info($rel)->{attrs}{join_type} || '') =~ /^left/i;
-
- push @relperl, join ' => ', perlstring($rel), __visit_infmap_simple($rel_cols->{$rel}, {
- rsrc => __get_related_source($args->{rsrc}, $rel, $rel_cols->{$rel}),
- # DISABLEPRUNE
- #non_top => 1,
- #is_optional => $optional,
- });
-
- # FIXME SUBOPTIMAL DISABLEPRUNE - disabled to satisfy t/resultset/inflate_result_api.t
- #if ($optional and my @branch_null_checks = map
- # { "(! defined '\xFF__VALPOS__${_}__\xFF')" }
- # sort { $a <=> $b } values %{$rel_cols->{$rel}}
- #) {
- # $relperl[-1] = sprintf ( '(%s) ? ( %s => [] ) : ( %s )',
- # join (' && ', @branch_null_checks ),
- # perlstring($rel),
- # $relperl[-1],
- # );
- #}
- }
-
- my $me_struct = keys %$my_cols
- ? __visit_dump({ map { $_ => "\xFF__VALPOS__$my_cols->{$_}__\xFF" } (keys %$my_cols) })
- : 'undef'
- ;
-
- return sprintf '[%s]', join (',',
- $me_struct,
- @relperl ? sprintf ('{ %s }', join (',', @relperl)) : (),
- );
-}
-
-# the collapsing nested structure recursor
-sub __visit_infmap_collapse {
-
- my ($val_idx, $collapse_map, $parent_info) = @_;
-
- my $my_cols = {};
- my $rel_cols;
- for (keys %$val_idx) {
- if ($_ =~ /^ ([^\.]+) \. (.+) /x) {
- $rel_cols->{$1}{$2} = $val_idx->{$_};
- }
- else {
- $my_cols->{$_} = $val_idx->{$_};
- }
- }
-
- my $sequenced_node_id = join ('', map
- { "{'\xFF__IDVALPOS__${_}__\xFF'}" }
- @{$collapse_map->{-node_id}}
- );
-
- my $me_struct = keys %$my_cols
- ? __visit_dump([{ map { $_ => "\xFF__VALPOS__$my_cols->{$_}__\xFF" } (keys %$my_cols) }])
- : undef
- ;
- my $node_idx_ref = sprintf '$collapse_idx[%d]%s', $collapse_map->{-node_index}, $sequenced_node_id;
-
- my $parent_idx_ref = sprintf( '$collapse_idx[%d]%s[1]{%s}',
- @{$parent_info}{qw/node_idx sequenced_node_id/},
- perlstring($parent_info->{relname}),
- ) if $parent_info;
-
- my @src;
- if ($collapse_map->{-node_index} == 1) {
- push @src, sprintf( '%s ||= %s;',
- $node_idx_ref,
- $me_struct,
- ) if $me_struct;
- }
- elsif ($collapse_map->{-is_single}) {
- push @src, sprintf ( '%s ||= %s%s;',
- $parent_idx_ref,
- $node_idx_ref,
- $me_struct ? " ||= $me_struct" : '',
- );
- }
- else {
- push @src, sprintf('push @{%s}, %s%s unless %s;',
- $parent_idx_ref,
- $node_idx_ref,
- $me_struct ? " ||= $me_struct" : '',
- $node_idx_ref,
- );
- }
-
- # DISABLEPRUNE
- #my $known_defined = { %{ $parent_info->{known_defined} || {} } };
- #$known_defined->{$_}++ for @{$collapse_map->{-node_id}};
-
- for my $rel (sort keys %$rel_cols) {
-
- push @src, sprintf( '%s[1]{%s} ||= [];', $node_idx_ref, perlstring($rel) )
- unless $collapse_map->{$rel}{-is_single};
-
- push @src, __visit_infmap_collapse($rel_cols->{$rel}, $collapse_map->{$rel}, {
- node_idx => $collapse_map->{-node_index},
- sequenced_node_id => $sequenced_node_id,
- relname => $rel,
- # DISABLEPRUNE
- #known_defined => $known_defined,
- });
-
- # FIXME SUBOPTIMAL DISABLEPRUNE - disabled to satisfy t/resultset/inflate_result_api.t
- #if ($collapse_map->{$rel}{-is_optional} and my @null_checks = map
- # { "(! defined '\xFF__IDVALPOS__${_}__\xFF')" }
- # sort { $a <=> $b } grep
- # { ! $known_defined->{$_} }
- # @{$collapse_map->{$rel}{-node_id}}
- #) {
- # $src[-1] = sprintf( '(%s) or %s',
- # join (' || ', @null_checks ),
- # $src[-1],
- # );
- #}
- }
-
- join "\n", @src;
-}
-
-# adding a dep on MoreUtils *just* for this is retarded
-sub __unique_numlist {
- [ sort { $a <=> $b } keys %{ {map { $_ => 1 } @_ }} ]
-}
-
-# This error must be thrown from two distinct codepaths, joining them is
-# rather hard. Go for this hack instead.
-sub __get_related_source {
- my ($rsrc, $rel, $relcols) = @_;
- try {
- $rsrc->related_source ($rel)
- } catch {
- $rsrc->throw_exception(sprintf(
- "Can't inflate prefetch into non-existent relationship '%s' from '%s', "
- . "check the inflation specification (columns/as) ending in '...%s.%s'.",
- $rel,
- $rsrc->source_name,
- $rel,
- (sort { length($a) <=> length ($b) } keys %$relcols)[0],
- ))};
-}
-
-# keep our own DD object around so we don't have to fitz with quoting
-my $dumper_obj;
-sub __visit_dump {
- # we actually will be producing functional perl code here,
- # thus no second-guessing of what these globals might have
- # been set to. DO NOT CHANGE!
- ($dumper_obj ||= do {
- require Data::Dumper;
- Data::Dumper->new([])
- ->Useperl (0)
- ->Purity (1)
- ->Pad ('')
- ->Useqq (0)
- ->Terse (1)
- ->Quotekeys (1)
- ->Deepcopy (0)
- ->Deparse (0)
- ->Maxdepth (0)
- ->Indent (0) # faster but harder to read, perhaps leave at 1 ?
- })->Values ([$_[0]])->Dump;
-}
-
-1;
foreach my $pre (keys %{$prefetch||{}}) {
- my @pre_vals;
- @pre_vals = (ref $prefetch->{$pre}[0] eq 'ARRAY')
- ? @{$prefetch->{$pre}} : $prefetch->{$pre}
- if @{$prefetch->{$pre}};
+ my (@pre_vals, $is_multi);
+ if (ref $prefetch->{$pre}[0] eq 'ARRAY') {
+ $is_multi = 1;
+ @pre_vals = @{$prefetch->{$pre}};
+ }
+ else {
+ @pre_vals = $prefetch->{$pre};
+ }
- my $pre_source = $source->related_source($pre);
+ my $pre_source = try {
+ $source->related_source($pre)
+ }
+ catch {
+ $class->throw_exception(sprintf
+
+ "Can't inflate manual prefetch into non-existent relationship '%s' from '%s', "
+ . "check the inflation specification (columns/as) ending in '%s.%s'.",
+
+ $pre,
+ $source->source_name,
+ $pre,
+ (keys %{$pre_vals[0][0]})[0] || 'something.something...',
+ );
+ };
my $accessor = $source->relationship_info($pre)->{attrs}{accessor}
- or $class->throw_exception("No accessor type declared for prefetched relationship '$pre'");
+ or $class->throw_exception("No accessor type declared for prefetched $pre");
+
+ if (! $is_multi and $accessor eq 'multi') {
+ $class->throw_exception("Manual prefetch (via select/columns) not supported with accessor 'multi'");
+ }
my @pre_objects;
for my $me_pref (@pre_vals) {
- # FIXME SUBOPTIMAL - the new row parsers can very well optimize
- # this away entirely, and *never* return such empty rows.
- # For now we maintain inflate_result API backcompat, see
- # t/resultset/inflate_result_api.t
- next unless first { defined $_ } values %{$me_pref->[0]};
+ # FIXME - this should not be necessary
+ # the collapser currently *could* return bogus elements with all
+ # columns set to undef
+ my $has_def;
+ for (values %{$me_pref->[0]}) {
+ if (defined $_) {
+ $has_def++;
+ last;
+ }
+ }
+ next unless $has_def;
- push @pre_objects, $pre_source->result_class->inflate_result(
- $pre_source, @$me_pref
- );
+ push @pre_objects, $pre_source->result_class->inflate_result(
+ $pre_source, @$me_pref
+ );
}
if ($accessor eq 'single') {
# see if we need to tear the prefetch apart otherwise delegate the limiting to the
# storage, unless software limit was requested
if (
- # limited collapsing has_many
- ( $attrs->{rows} && $attrs->{collapse} )
+ #limited has_many
+ ( $attrs->{rows} && keys %{$attrs->{collapse}} )
||
# grouped prefetch (to satisfy group_by == select)
( $attrs->{group_by}
delete $outer_attrs->{$_} for qw/where bind rows offset group_by having/;
my $inner_attrs = { %$attrs };
- delete $inner_attrs->{$_} for qw/for collapse _prefetch_selector_range select as/;
-
- # if the user did not request it, there is no point using it inside
- delete $inner_attrs->{order_by} if delete $inner_attrs->{_order_is_artificial};
+ delete $inner_attrs->{$_} for qw/for collapse _prefetch_selector_range _collapse_order_by select as/;
+
+ # bring over all non-collapse-induced order_by into the inner query (if any)
+ # the outer one will have to keep them all
+ delete $inner_attrs->{order_by};
+ if (my $ord_cnt = @{$outer_attrs->{order_by}} - @{$outer_attrs->{_collapse_order_by}} ) {
+ $inner_attrs->{order_by} = [
+ @{$outer_attrs->{order_by}}[ 0 .. $ord_cnt - 1]
+ ];
+ }
# generate the inner/outer select lists
# for inside we consider only stuff *not* brought in by the prefetch
delete $weak_registry->{$slot}
unless $cleared->{hash_merge_singleton}{$weak_registry->{$slot}{weakref}{behavior}}++;
}
- elsif (
- $slot =~ /^Data::Dumper/
- and
- $weak_registry->{$slot}{stacktrace} =~ /\QDBIx::Class::ResultSource::RowParser::_mk_row_parser/
- ) {
- # there should be only one D::D object (used to construct the rowparser)
- # more would indicate trouble
- delete $weak_registry->{$slot}
- unless $cleared->{mk_row_parser_dd_singleton}++;
- }
elsif (DBIx::Class::_ENV_::INVISIBLE_DOLLAR_AT and $slot =~ /^__TxnScopeGuard__FIXUP__/) {
delete $weak_registry->{$slot}
}
push @objs, $tag->id; #warn "tag: ", $tag->ID;
}
-is_deeply( \@objs, [ 2, 5, 8 ], 'third cd has correct tags' );
+is_deeply( \@objs, [ 1 ], 'second cd has correct tags' );
$tags = $cds->next->tags;
@objs = ();
push @objs, $tag->id; #warn "tag: ", $tag->ID;
}
-is_deeply( \@objs, [ 1 ], 'second cd has correct tags' );
+is_deeply( \@objs, [ 2, 5, 8 ], 'third cd has correct tags' );
is( $queries, 0, 'no additional SQL statements while checking nested data' );
'prefetch properly collapses amount of rows from get_column',
);
-$rs->reset;
-my $pob_rs = $rs->search({}, {
- select => ['me.title', 'tracks.title'],
- prefetch => 'tracks',
- order_by => [{-asc => ['position']}],
- group_by => ['me.title', 'tracks.title'],
-});
-is_same_sql_bind (
- $pob_rs->get_column("me.title")->as_query,
- '(SELECT me.title FROM (SELECT me.title, tracks.title FROM cd me LEFT JOIN track tracks ON tracks.cd = me.cdid GROUP BY me.title, tracks.title ORDER BY position ASC) me)',
- [],
- 'Correct SQL for prefetch/order_by/group_by'
-);
-
done_testing;
use Test::More;
use Test::Exception;
-
use lib qw(t/lib);
use DBICTest;
use DBIC::SqlMakerTest;
my $schema = DBICTest->init_schema();
-lives_ok (sub {
- my $rs = $schema->resultset( 'CD' )->search(
- {
- 'producer.name' => 'blah',
- 'producer_2.name' => 'foo',
- },
- {
- 'join' => [
- { cd_to_producer => 'producer' },
- { cd_to_producer => 'producer' },
- ],
- 'prefetch' => [
- 'artist',
- { cd_to_producer => { producer => 'producer_to_cd' } },
- ],
- }
- );
-
- my @executed = $rs->all();
-
- is_same_sql_bind (
- $rs->as_query,
- '(
- SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
- artist.artistid, artist.name, artist.rank, artist.charfield,
- cd_to_producer.cd, cd_to_producer.producer, cd_to_producer.attribute,
- producer.producerid, producer.name,
- producer_to_cd.cd, producer_to_cd.producer, producer_to_cd.attribute
- FROM cd me
- LEFT JOIN cd_to_producer cd_to_producer
- ON cd_to_producer.cd = me.cdid
- LEFT JOIN producer producer
- ON producer.producerid = cd_to_producer.producer
- LEFT JOIN cd_to_producer producer_to_cd
- ON producer_to_cd.producer = producer.producerid
- LEFT JOIN cd_to_producer cd_to_producer_2
- ON cd_to_producer_2.cd = me.cdid
- LEFT JOIN producer producer_2
- ON producer_2.producerid = cd_to_producer_2.producer
- JOIN artist artist ON artist.artistid = me.artist
- WHERE ( ( producer.name = ? AND producer_2.name = ? ) )
- ORDER BY me.cdid
- )',
- [
- [ { sqlt_datatype => 'varchar', dbic_colname => 'producer.name', sqlt_size => 100 }
- => 'blah' ],
- [ { sqlt_datatype => 'varchar', dbic_colname => 'producer_2.name', sqlt_size => 100 }
- => 'foo' ],
- ],
- );
+ {
+ my $rs = $schema->resultset( 'CD' )->search(
+ {
+ 'producer.name' => 'blah',
+ 'producer_2.name' => 'foo',
+ },
+ {
+ 'join' => [
+ { cd_to_producer => 'producer' },
+ { cd_to_producer => 'producer' },
+ ],
+ 'prefetch' => [
+ 'artist',
+ { cd_to_producer => 'producer' },
+ ],
+ }
+ );
+
+ lives_ok {
+ my @rows = $rs->all();
+ };
+ }
-}, 'Complex join parsed/executed properly');
my @rs1a_results = $schema->resultset("Artist")->search_related('cds', {title => 'Forkful of bees'}, {order_by => 'title'});
is($rs1a_results[0]->title, 'Forkful of bees', "bare field conditions okay after search related");
throws_ok {
$artist_rs->first
- } qr/\QInflator IWillExplode does not provide an inflate_result() method/,
+ } qr/Can't locate object method "inflate_result" via package "IWillExplode"/,
'IWillExplode explodes on inflate';
my $cd_rs = $artist_rs->related_resultset('cds');
my @dbic_reltable = $dbic_obj->$col;
my @hashref_reltable = @{$datahashref->{$col}};
- is (scalar @hashref_reltable, scalar @dbic_reltable, 'number of related entries');
+ is (scalar @dbic_reltable, scalar @hashref_reltable, 'number of related entries');
# for my $index (0..scalar @hashref_reltable) {
for my $index (0..scalar @dbic_reltable) {
{ join_type => 'left'}
);
-# add a non-left single relationship for the complex prefetch tests
-__PACKAGE__->belongs_to( existing_single_track => 'DBICTest::Schema::Track', 'single_track');
-
__PACKAGE__->has_many( tracks => 'DBICTest::Schema::Track' );
__PACKAGE__->has_many(
tags => 'DBICTest::Schema::Tag', undef,
},
);
__PACKAGE__->set_primary_key('id');
-__PACKAGE__->add_unique_constraint ([qw/lyric_id text/]);
__PACKAGE__->belongs_to('lyric', 'DBICTest::Schema::Lyrics', 'lyric_id');
1;
use lib qw(t/lib);
use DBICTest;
+plan tests => 2;
+
my $schema = DBICTest->init_schema();
-my $link = $schema->resultset ('Link')->create ({
- url => 'loldogs!',
- bookmarks => [
- { link => 'Mein Hund ist schwul'},
- { link => 'Mein Hund ist schwul'},
- ]
+my $track_no_lyrics = $schema->resultset ('Track')
+ ->search ({ 'lyrics.lyric_id' => undef }, { join => 'lyrics' })
+ ->first;
+
+my $lyric = $track_no_lyrics->create_related ('lyrics', {
+ lyric_versions => [
+ { text => 'english doubled' },
+ { text => 'english doubled' },
+ ],
});
-is ($link->bookmarks->count, 2, "Two identical default-insert has_many's created");
+is ($lyric->lyric_versions->count, 2, "Two identical has_many's created");
-$link = $schema->resultset ('Link')->create ({
+my $link = $schema->resultset ('Link')->create ({
url => 'lolcats!',
bookmarks => [
{},
]
});
is ($link->bookmarks->count, 2, "Two identical default-insert has_many's created");
-
-done_testing;
LEFT JOIN track tracks
ON tracks.cd = me.cdid
WHERE me.artist != ?
- ORDER BY me.cdid
+ ORDER BY tracks.cd
)',
[
LEFT JOIN track tracks
ON tracks.cd = me.cdid
WHERE me.artist != ?
- ORDER BY me.cdid
+ ORDER BY tracks.cd
)',
[
LEFT JOIN track tracks ON tracks.cd = me.cdid
LEFT JOIN liner_notes liner_notes ON liner_notes.liner_id = me.cdid
WHERE ( me.cdid IS NOT NULL )
- ORDER BY track_count DESC, maxtr ASC
+ ORDER BY track_count DESC, maxtr ASC, tracks.cd
)',
[[$ROWS => 2]],
'next() query generated expected SQL',
ORDER BY cdid
) me
LEFT JOIN tags tags ON tags.cd = me.cdid
- ORDER BY cdid
+ ORDER BY cdid, tags.cd, tags.tag
)',
[],
'Prefetch + distinct resulted in correct group_by',
GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
) me
JOIN artist artist ON artist.artistid = me.artist
- ORDER BY me.cdid
)',
[],
);
WHERE ( tracks.title != ? )
GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
artist.artistid, artist.name, artist.rank, artist.charfield
- ORDER BY me.cdid
)',
[ map { [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'tracks.title' }
=> 'ugabuganoexist' ] } (1,2)
ORDER BY tags.tag ASC LIMIT ?)
me
LEFT JOIN tags tags ON tags.cd = me.cdid
- ORDER BY tags.tag ASC
+ ORDER BY tags.tag ASC, tags.cd, tags.tag
)
}, [[$ROWS => 1]]);
}
lives_ok(sub {
# while cds.* will be selected anyway (prefetch currently forces the result of _resolve_prefetch)
- # only the requested me.name/me.artistid columns will be fetched.
+ # only the requested me.name column will be fetched.
# reference sql with select => [...]
- # SELECT me.name, cds.title, me.artistid, cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track FROM ...
+ # SELECT me.name, cds.title, cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track FROM ...
my $rs = $schema->resultset('Artist')->search(
{ 'cds.title' => { '!=', 'Generic Manufactured Singles' } },
{
prefetch => [ qw/ cds / ],
order_by => [ { -desc => 'me.name' }, 'cds.title' ],
- select => [qw/ me.name cds.title me.artistid / ],
- },
+ select => [qw/ me.name cds.title / ],
+ }
);
is ($rs->count, 2, 'Correct number of collapsed artists');
is ($we_are_goth->cds->first->title, 'Come Be Depressed With Us', 'Correct cd for artist');
}, 'explicit prefetch on a keyless object works');
-lives_ok ( sub {
-
- my $rs = $schema->resultset('CD')->search(
- {},
- {
- order_by => [ { -desc => 'me.year' } ],
- }
- );
- my $years = [qw/ 2001 2001 1999 1998 1997/];
-
- is_deeply (
- [ $rs->search->get_column('me.year')->all ],
- $years,
- 'Expected years (at least one duplicate)',
- );
-
- my @cds_and_tracks;
- for my $cd ($rs->all) {
- my $data = { year => $cd->year, cdid => $cd->cdid };
- for my $tr ($cd->tracks->all) {
- push @{$data->{tracks}}, { $tr->get_columns };
- }
- push @cds_and_tracks, $data;
- }
-
- my $pref_rs = $rs->search ({}, { columns => [qw/year cdid/], prefetch => 'tracks' });
-
- my @pref_cds_and_tracks;
- for my $cd ($pref_rs->all) {
- my $data = { $cd->get_columns };
- for my $tr ($cd->tracks->all) {
- push @{$data->{tracks}}, { $tr->get_columns };
- }
- push @pref_cds_and_tracks, $data;
- }
-
- is_deeply (
- \@pref_cds_and_tracks,
- \@cds_and_tracks,
- 'Correct collapsing on non-unique primary object'
- );
-
- is_deeply (
- [ $pref_rs->search ({}, { result_class => 'DBIx::Class::ResultClass::HashRefInflator' })->all ],
- \@cds_and_tracks,
- 'Correct HRI collapsing on non-unique primary object'
- );
-
-}, 'weird collapse lives');
-
lives_ok(sub {
# test implicit prefetch as well
sub {
$schema->resultset('Track')->search({}, { join => { cd => 'artist' }, '+columns' => 'artist.name' } )->next;
},
- qr|\QCan't inflate prefetch into non-existent relationship 'artist' from 'Track', check the inflation specification (columns/as) ending in '...artist.name'|,
+ qr|\QCan't inflate manual prefetch into non-existent relationship 'artist' from 'Track', check the inflation specification (columns/as) ending in 'artist.name'|,
'Sensible error message on mis-specified "as"',
);
JOIN artist artist ON artist.artistid = me.artist
LEFT JOIN cd cds ON cds.artist = artist.artistid
LEFT JOIN artist artist_2 ON artist_2.artistid = cds.artist
- ORDER BY me.cdid
+ ORDER BY cds.artist, cds.year ASC
)',
[],
);
+++ /dev/null
-use strict;
-use warnings;
-
-use Test::More;
-use Test::Exception;
-use lib qw(t/lib);
-use DBICTest;
-
-my $schema = DBICTest->init_schema();
-
-my $rs = $schema->resultset('Artist')->search({}, {
- select => 'artistid',
- prefetch => { cds => 'tracks' },
-});
-
-my $initial_artists_cnt = $rs->count;
-
-# create one extra artist with just one cd with just one track
-# and then an artist with nothing at all
-# the implicit order by me.artistid will get them back in correct order
-$rs->create({
- name => 'foo',
- cds => [{
- year => 2012,
- title => 'foocd',
- tracks => [{
- title => 'footrack',
- }]
- }],
-});
-$rs->create({ name => 'bar' });
-$rs->create({ name => 'baz' });
-
-# make sure we are reentrant, and also check with explicit order_by
-for (undef, undef, 'me.artistid') {
- $rs = $rs->search({}, { order_by => $_ }) if $_;
-
- for (1 .. $initial_artists_cnt) {
- is ($rs->next->artistid, $_, 'Default fixture artists in order') || exit;
- }
-
- my $foo_artist = $rs->next;
- is ($foo_artist->cds->next->tracks->next->title, 'footrack', 'Right track');
-
- is (
- [$rs->cursor->next]->[0],
- $initial_artists_cnt + 3,
- 'Very last artist still on the cursor'
- );
-
- is_deeply ([$rs->cursor->next], [], 'Nothing else left');
-
- is ($rs->next->artistid, $initial_artists_cnt + 2, 'Row stashed in resultset still accessible');
- is ($rs->next, undef, 'Nothing left in resultset either');
-
- $rs->reset;
-}
-
-$rs->next;
-
-my @objs = $rs->all;
-is (@objs, $initial_artists_cnt + 3, '->all resets everything correctly');
-is ( ($rs->cursor->next)[0], 1, 'Cursor auto-rewound after all()');
-is ($rs->{stashed_rows}, undef, 'Nothing else left in $rs stash');
-
-my $unordered_rs = $rs->search({}, { order_by => 'cds.title' });
-ok ($unordered_rs->next, 'got row 1');
-is_deeply ([$unordered_rs->cursor->next], [], 'Nothing left on cursor, eager slurp');
-ok ($unordered_rs->next, "got row $_") for (2 .. $initial_artists_cnt + 3);
-is ($unordered_rs->next, undef, 'End of RS reached');
-is ($unordered_rs->next, undef, 'End of RS not lost');
-
-done_testing;
+++ /dev/null
-use strict;
-use warnings;
-
-use Test::More;
-use Test::Exception;
-use lib qw(t/lib);
-use DBICTest;
-
-my $schema = DBICTest->init_schema(no_populate => 1);
-
-$schema->resultset('CD')->create({
- title => 'Equinoxe',
- year => 1978,
- artist => { name => 'JMJ' },
- genre => { name => 'electro' },
- tracks => [
- { title => 'e1' },
- { title => 'e2' },
- { title => 'e3' },
- ],
- single_track => {
- title => 'o1',
- cd => {
- title => 'Oxygene',
- year => 1976,
- artist => {
- name => 'JMJ',
- cds => [
- {
- title => 'Magnetic Fields',
- year => 1981,
- genre => { name => 'electro' },
- tracks => [
- { title => 'm1' },
- { title => 'm2' },
- { title => 'm3' },
- { title => 'm4' },
- ],
- },
- ],
- },
- tracks => [
- { title => 'o2', position => 2}, # the position should not be here, bug in MC
- ],
- },
- },
-});
-
-my $rs = $schema->resultset ('CD')->search ({}, {
- join => [ 'tracks', { single_track => { cd => { artist => { cds => 'tracks' } } } } ],
- collapse => 1,
- columns => [
- { 'year' => 'me.year' }, # non-unique
- { 'genreid' => 'me.genreid' }, # nullable
- { 'tracks.title' => 'tracks.title' }, # non-unique (no me.id)
- { 'single_track.cd.artist.cds.cdid' => 'cds.cdid' }, # to give uniquiness to ...tracks.title below
- { 'single_track.cd.artist.artistid' => 'artist.artistid' }, # uniqufies entire parental chain
- { 'single_track.cd.artist.cds.year' => 'cds.year' }, # non-unique
- { 'single_track.cd.artist.cds.genreid' => 'cds.genreid' }, # nullable
- { 'single_track.cd.artist.cds.tracks.title' => 'tracks_2.title' }, # unique when combined with ...cds.cdid above
- { 'latest_cd' => \ "(SELECT MAX(year) FROM cd)" }, # random function
- { 'title' => 'me.title' }, # uniquiness for me
- { 'artist' => 'me.artist' }, # uniquiness for me
- ],
- order_by => [{ -desc => 'cds.year' }, { -desc => 'me.title'} ],
-});
-
-my $hri_rs = $rs->search({}, { result_class => 'DBIx::Class::ResultClass::HashRefInflator' });
-
-is_deeply (
- [$hri_rs->all],
- [
- {
- artist => 1,
- genreid => 1,
- latest_cd => 1981,
- single_track => {
- cd => {
- artist => {
- artistid => 1,
- cds => [
- {
- cdid => 1,
- genreid => 1,
- tracks => [
- {
- title => "m1"
- },
- {
- title => "m2"
- },
- {
- title => "m3"
- },
- {
- title => "m4"
- }
- ],
- year => 1981
- },
- {
- cdid => 3,
- genreid => 1,
- tracks => [
- {
- title => "e1"
- },
- {
- title => "e2"
- },
- {
- title => "e3"
- }
- ],
- year => 1978
- },
- {
- cdid => 2,
- genreid => undef,
- tracks => [
- {
- title => "o1"
- },
- {
- title => "o2"
- }
- ],
- year => 1976
- }
- ]
- }
- }
- },
- title => "Equinoxe",
- tracks => [
- {
- title => "e1"
- },
- {
- title => "e2"
- },
- {
- title => "e3"
- }
- ],
- year => 1978
- },
- {
- artist => 1,
- genreid => undef,
- latest_cd => 1981,
- single_track => undef,
- title => "Oxygene",
- tracks => [
- {
- title => "o1"
- },
- {
- title => "o2"
- }
- ],
- year => 1976
- },
- {
- artist => 1,
- genreid => 1,
- latest_cd => 1981,
- single_track => undef,
- title => "Magnetic Fields",
- tracks => [
- {
- title => "m1"
- },
- {
- title => "m2"
- },
- {
- title => "m3"
- },
- {
- title => "m4"
- }
- ],
- year => 1981
- },
- ],
- 'W00T, manual prefetch with collapse works'
-);
-
-my $row = $rs->next;
-
-TODO: {
- local $TODO = 'Something is wrong with filter type rels, they throw on incomplete objects >.<';
-
- lives_ok {
- is_deeply (
- { $row->single_track->get_columns },
- {},
- 'empty intermediate object ok',
- )
- } 'no exception';
-}
-
-is ($rs->cursor->next, undef, 'cursor exhausted');
-
-TODO: {
-local $TODO = 'this does not work at all, need to promote rsattrs to an object on its own';
-# make sure has_many column redirection does not do weird stuff when collapse is requested
-for my $pref_args (
- { prefetch => 'cds'},
- { collapse => 1 }
-) {
- for my $col_and_join_args (
- { '+columns' => { 'cd_title' => 'cds_2.title' }, join => [ 'cds', 'cds' ] },
- { '+columns' => { 'cd_title' => 'cds.title' }, join => 'cds', }
- ) {
-
- my $weird_rs = $schema->resultset('Artist')->search({}, {
- %$col_and_join_args, %$pref_args,
- });
-
- for (qw/next all first/) {
- throws_ok { $weird_rs->$_ } qr/not yet determined exception text/;
- }
- }
-}
-}
-
-done_testing;
use Test::More;
use lib qw(t/lib);
use DBICTest;
+use IO::File;
my $schema = DBICTest->init_schema();
my $sdebug = $schema->storage->debug;
-#( 1 -> M + M )
-my $cd_rs = $schema->resultset('CD')->search( { 'me.title' => 'Forkful of bees' } );
-my $pr_cd_rs = $cd_rs->search( {}, { prefetch => [qw/tracks tags/], } );
+# once the following TODO is complete, remove the 2 warning tests immediately
+# after the TODO block
+# (the TODO block itself contains tests ensuring that the warns are removed)
+TODO: {
+ local $TODO = 'Prefetch of multiple has_many rels at the same level (currently warn to protect the clueless git)';
-my $tracks_rs = $cd_rs->first->tracks;
-my $tracks_count = $tracks_rs->count;
+ #( 1 -> M + M )
+ my $cd_rs = $schema->resultset('CD')->search ({ 'me.title' => 'Forkful of bees' });
+ my $pr_cd_rs = $cd_rs->search ({}, {
+ prefetch => [qw/tracks tags/],
+ });
-my ( $pr_tracks_rs, $pr_tracks_count );
+ my $tracks_rs = $cd_rs->first->tracks;
+ my $tracks_count = $tracks_rs->count;
-my $queries = 0;
-$schema->storage->debugcb( sub { $queries++ } );
-$schema->storage->debug(1);
+ my ($pr_tracks_rs, $pr_tracks_count);
-my $o_mm_warn;
-{
- local $SIG{__WARN__} = sub { $o_mm_warn = shift };
- $pr_tracks_rs = $pr_cd_rs->first->tracks;
-};
-$pr_tracks_count = $pr_tracks_rs->count;
-
-ok( !$o_mm_warn,
-'no warning on attempt to prefetch several same level has_many\'s (1 -> M + M)'
-);
-
-is( $queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query' );
-$schema->storage->debugcb(undef);
-$schema->storage->debug($sdebug);
-
-is( $pr_tracks_count, $tracks_count,
-'equal count of prefetched relations over several same level has_many\'s (1 -> M + M)'
-);
-is( $pr_tracks_rs->all, $tracks_rs->all,
-'equal amount of objects returned with and without prefetch over several same level has_many\'s (1 -> M + M)'
-);
-
-#( M -> 1 -> M + M )
-my $note_rs =
- $schema->resultset('LinerNotes')->search( { notes => 'Buy Whiskey!' } );
-my $pr_note_rs =
- $note_rs->search( {}, { prefetch => { cd => [qw/tracks tags/] }, } );
-
-my $tags_rs = $note_rs->first->cd->tags;
-my $tags_count = $tags_rs->count;
-
-my ( $pr_tags_rs, $pr_tags_count );
-
-$queries = 0;
-$schema->storage->debugcb( sub { $queries++ } );
-$schema->storage->debug(1);
-
-my $m_o_mm_warn;
+ my $queries = 0;
+ $schema->storage->debugcb(sub { $queries++ });
+ $schema->storage->debug(1);
+
+ my $o_mm_warn;
+ {
+ local $SIG{__WARN__} = sub { $o_mm_warn = shift };
+ $pr_tracks_rs = $pr_cd_rs->first->tracks;
+ };
+ $pr_tracks_count = $pr_tracks_rs->count;
+
+ ok(! $o_mm_warn, 'no warning on attempt to prefetch several same level has_many\'s (1 -> M + M)');
+
+ is($queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query');
+ $schema->storage->debugcb (undef);
+ $schema->storage->debug ($sdebug);
+
+ is($pr_tracks_count, $tracks_count, 'equal count of prefetched relations over several same level has_many\'s (1 -> M + M)');
+ is ($pr_tracks_rs->all, $tracks_rs->all, 'equal amount of objects returned with and without prefetch over several same level has_many\'s (1 -> M + M)');
+
+ #( M -> 1 -> M + M )
+ my $note_rs = $schema->resultset('LinerNotes')->search ({ notes => 'Buy Whiskey!' });
+ my $pr_note_rs = $note_rs->search ({}, {
+ prefetch => {
+ cd => [qw/tracks tags/]
+ },
+ });
+
+ my $tags_rs = $note_rs->first->cd->tags;
+ my $tags_count = $tags_rs->count;
+
+ my ($pr_tags_rs, $pr_tags_count);
+
+ $queries = 0;
+ $schema->storage->debugcb(sub { $queries++ });
+ $schema->storage->debug(1);
+
+ my $m_o_mm_warn;
+ {
+ local $SIG{__WARN__} = sub { $m_o_mm_warn = shift };
+ $pr_tags_rs = $pr_note_rs->first->cd->tags;
+ };
+ $pr_tags_count = $pr_tags_rs->count;
+
+ ok(! $m_o_mm_warn, 'no warning on attempt to prefetch several same level has_many\'s (M -> 1 -> M + M)');
+
+ is($queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query');
+ $schema->storage->debugcb (undef);
+ $schema->storage->debug ($sdebug);
+
+ is($pr_tags_count, $tags_count, 'equal count of prefetched relations over several same level has_many\'s (M -> 1 -> M + M)');
+ is($pr_tags_rs->all, $tags_rs->all, 'equal amount of objects with and without prefetch over several same level has_many\'s (M -> 1 -> M + M)');
+}
+
+# remove this closure once the TODO above is working
{
- local $SIG{__WARN__} = sub { $m_o_mm_warn = shift };
- $pr_tags_rs = $pr_note_rs->first->cd->tags;
-};
-$pr_tags_count = $pr_tags_rs->count;
-
-ok( !$m_o_mm_warn,
-'no warning on attempt to prefetch several same level has_many\'s (M -> 1 -> M + M)'
-);
-
-is( $queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query' );
-$schema->storage->debugcb(undef);
-$schema->storage->debug($sdebug);
-
-is( $pr_tags_count, $tags_count,
-'equal count of prefetched relations over several same level has_many\'s (M -> 1 -> M + M)'
-);
-is( $pr_tags_rs->all, $tags_rs->all,
-'equal amount of objects with and without prefetch over several same level has_many\'s (M -> 1 -> M + M)'
-);
+ my $warn_re = qr/will explode the number of row objects retrievable via/;
+
+ my (@w, @dummy);
+ local $SIG{__WARN__} = sub { $_[0] =~ $warn_re ? push @w, @_ : warn @_ };
+
+ my $rs = $schema->resultset('CD')->search ({ 'me.title' => 'Forkful of bees' }, { prefetch => [qw/tracks tags/] });
+ @w = ();
+ @dummy = $rs->first;
+ is (@w, 1, 'warning on attempt prefetching several same level has_manys (1 -> M + M)');
+
+ my $rs2 = $schema->resultset('LinerNotes')->search ({ notes => 'Buy Whiskey!' }, { prefetch => { cd => [qw/tags tracks/] } });
+ @w = ();
+ @dummy = $rs2->first;
+ is (@w, 1, 'warning on attempt prefetching several same level has_manys (M -> 1 -> M + M)');
+}
done_testing;
+++ /dev/null
-use strict;
-use warnings;
-
-use Test::More;
-use Test::Exception;
-use lib qw(t/lib);
-use DBICTest;
-
-my $schema = DBICTest->init_schema();
-
-my $mo_rs = $schema->resultset('Artist')->search(
- { 'me.artistid' => 4 },
- {
- prefetch => [
- {
- cds => [
- { tracks => { cd_single => 'tracks' } },
- { cd_to_producer => 'producer' }
- ]
- },
- { artwork_to_artist => 'artwork' }
- ],
-
- result_class => 'DBIx::Class::ResultClass::HashRefInflator',
-
- order_by => [qw/tracks.position tracks.trackid producer.producerid/],
- }
-);
-
-$schema->resultset('Artist')->create(
- {
- name => 'mo',
- rank => '1337',
- cds => [
- {
- title => 'Song of a Foo',
- year => '1999',
- tracks => [
- {
- title => 'Foo Me Baby One More Time',
- },
- {
- title => 'Foo Me Baby One More Time II',
- },
- {
- title => 'Foo Me Baby One More Time III',
- },
- {
- title => 'Foo Me Baby One More Time IV',
- cd_single =>
- { artist => 1, title => 'MO! Single', year => 2021, tracks => [
- { title => 'singled out' }, { title => 'still alone' },
- ] },
- }
- ],
- cd_to_producer => [
- { producer => { name => 'riba' } },
- { producer => { name => 'sushi' } },
- ]
- },
- {
- title => 'Song of a Foo II',
- year => '2002',
- tracks => [
- {
- title => 'Quit Playing Games With My Heart',
- },
- {
- title => 'Bar Foo',
- },
- {
- title => 'Foo Bar',
- cd_single =>
- { artist => 2, title => 'MO! Single', year => 2020, tracks => [
- { title => 'singled out' }, { title => 'still alone' },
- ] },
- }
- ],
- cd_to_producer => [
- { producer => { name => 'riba' } },
- { producer => { name => 'sushi' } },
- ],
- }
- ],
- artwork_to_artist =>
- [ { artwork => { cd_id => 1 } }, { artwork => { cd_id => 2 } } ]
- }
-);
-
-my $mo = $mo_rs->next;
-
-is( @{$mo->{cds}}, 2, 'two CDs' );
-
-is_deeply(
- $mo,
- {
- 'cds' => [
- {
- 'single_track' => undef,
- 'tracks' => [
- {
- 'cd' => '6',
- 'position' => '1',
- 'trackid' => '19',
- 'title' => 'Foo Me Baby One More Time',
- 'cd_single' => undef,
- 'last_updated_on' => undef,
- 'last_updated_at' => undef
- },
- {
- 'cd' => '6',
- 'position' => '2',
- 'trackid' => '20',
- 'title' => 'Foo Me Baby One More Time II',
- 'cd_single' => undef,
- 'last_updated_on' => undef,
- 'last_updated_at' => undef
- },
- {
- 'cd' => '6',
- 'position' => '3',
- 'trackid' => '21',
- 'title' => 'Foo Me Baby One More Time III',
- 'cd_single' => undef,
- 'last_updated_on' => undef,
- 'last_updated_at' => undef
- },
- {
- 'cd' => '6',
- 'position' => '4',
- 'trackid' => '22',
- 'title' => 'Foo Me Baby One More Time IV',
- 'last_updated_on' => undef,
- 'last_updated_at' => undef,
- 'cd_single' => {
- 'single_track' => '22',
- 'artist' => '1',
- 'cdid' => '7',
- 'title' => 'MO! Single',
- 'genreid' => undef,
- 'year' => '2021',
- 'tracks' => [
- {
- 'cd' => '7',
- 'position' => '1',
- 'title' => 'singled out',
- 'trackid' => '23',
- 'last_updated_at' => undef,
- 'last_updated_on' => undef
- },
- {
- 'cd' => '7',
- 'position' => '2',
- 'title' => 'still alone',
- 'trackid' => '24',
- 'last_updated_at' => undef,
- 'last_updated_on' => undef
- },
- ],
- },
- }
- ],
- 'artist' => '4',
- 'cdid' => '6',
- 'cd_to_producer' => [
- {
- 'attribute' => undef,
- 'cd' => '6',
- 'producer' => {
- 'name' => 'riba',
- 'producerid' => '4'
- }
- },
- {
- 'attribute' => undef,
- 'cd' => '6',
- 'producer' => {
- 'name' => 'sushi',
- 'producerid' => '5'
- }
- }
- ],
- 'title' => 'Song of a Foo',
- 'genreid' => undef,
- 'year' => '1999'
- },
- {
- 'single_track' => undef,
- 'tracks' => [
- {
- 'cd' => '8',
- 'position' => '1',
- 'trackid' => '25',
- 'title' => 'Quit Playing Games With My Heart',
- 'last_updated_on' => undef,
- 'last_updated_at' => undef,
- 'cd_single' => undef,
- },
- {
- 'cd' => '8',
- 'position' => '2',
- 'trackid' => '26',
- 'title' => 'Bar Foo',
- 'cd_single' => undef,
- 'last_updated_on' => undef,
- 'last_updated_at' => undef
- },
- {
- 'cd' => '8',
- 'position' => '3',
- 'trackid' => '27',
- 'title' => 'Foo Bar',
- 'last_updated_on' => undef,
- 'last_updated_at' => undef,
- 'cd_single' => {
- 'single_track' => '27',
- 'artist' => '2',
- 'cdid' => '9',
- 'title' => 'MO! Single',
- 'genreid' => undef,
- 'year' => '2020',
- 'tracks' => [
- {
- 'cd' => '9',
- 'position' => '1',
- 'title' => 'singled out',
- 'trackid' => '28',
- 'last_updated_at' => undef,
- 'last_updated_on' => undef
- },
- {
- 'cd' => '9',
- 'position' => '2',
- 'title' => 'still alone',
- 'trackid' => '29',
- 'last_updated_at' => undef,
- 'last_updated_on' => undef
- },
- ],
-
- },
- },
- ],
- 'artist' => '4',
- 'cdid' => '8',
- 'cd_to_producer' => [
- {
- 'attribute' => undef,
- 'cd' => '8',
- 'producer' => {
- 'name' => 'riba',
- 'producerid' => '4'
- }
- },
- {
- 'attribute' => undef,
- 'cd' => '8',
- 'producer' => {
- 'name' => 'sushi',
- 'producerid' => '5'
- }
- }
- ],
- 'title' => 'Song of a Foo II',
- 'genreid' => undef,
- 'year' => '2002'
- }
- ],
- 'artistid' => '4',
- 'charfield' => undef,
- 'name' => 'mo',
- 'artwork_to_artist' => [
- {
- 'artwork' => { 'cd_id' => '1' },
- 'artist_id' => '4',
- 'artwork_cd_id' => '1'
- },
- {
- 'artwork' => { 'cd_id' => '2' },
- 'artist_id' => '4',
- 'artwork_cd_id' => '2'
- }
- ],
- 'rank' => '1337'
- }
-);
-
-done_testing;
{ "$ar.rank" => 13 },
{
prefetch => [ 'tracks' ],
- order_by => [ 'tracks.position DESC', { -asc => "$ar.name" }, "$ar.artistid DESC" ],
- offset => 13,
+ order_by => [ { -asc => "$ar.name" }, "$ar.artistid DESC" ],
+ offset => 3,
rows => 3,
},
);
FROM artist me
JOIN cd cds_unordered
ON cds_unordered.artist = me.artistid
- LEFT JOIN track tracks
- ON tracks.cd = cds_unordered.cdid
WHERE ( me.rank = ? )
- ORDER BY tracks.position DESC, me.name ASC, me.artistid DESC
+ ORDER BY me.name ASC, me.artistid DESC
LIMIT ?
OFFSET ?
) cds_unordered
LEFT JOIN track tracks
ON tracks.cd = cds_unordered.cdid
WHERE ( me.rank = ? )
- ORDER BY tracks.position DESC, me.name ASC, me.artistid DESC
+ ORDER BY me.name ASC, me.artistid DESC, tracks.cd
)},
[
[ { sqlt_datatype => 'integer', dbic_colname => 'me.rank' } => 13 ],
[ $ROWS => 3 ],
- [ $OFFSET => 13 ],
+ [ $OFFSET => 3 ],
[ { sqlt_datatype => 'integer', dbic_colname => 'me.rank' } => 13 ],
],
'correct SQL on limited prefetch over search_related ordered by root',
'cd' => '4',
'last_updated_at' => undef,
'last_updated_on' => undef,
- 'position' => '3',
- 'title' => 'No More Ideas',
- 'trackid' => '12'
+ 'position' => '1',
+ 'title' => 'Boring Name',
+ 'trackid' => '10'
},
{
'cd' => '4',
'cd' => '4',
'last_updated_at' => undef,
'last_updated_on' => undef,
- 'position' => '1',
- 'title' => 'Boring Name',
- 'trackid' => '10'
+ 'position' => '3',
+ 'title' => 'No More Ideas',
+ 'trackid' => '12'
}
],
'year' => '2001'
'cd' => '5',
'last_updated_at' => undef,
'last_updated_on' => undef,
+ 'position' => '1',
+ 'title' => 'Sad',
+ 'trackid' => '13'
+ },
+ {
+ 'cd' => '5',
+ 'last_updated_at' => undef,
+ 'last_updated_on' => undef,
'position' => '3',
'title' => 'Suicidal',
'trackid' => '15'
'position' => '2',
'title' => 'Under The Weather',
'trackid' => '14'
- },
- {
- 'cd' => '5',
- 'last_updated_at' => undef,
- 'last_updated_on' => undef,
- 'position' => '1',
- 'title' => 'Sad',
- 'trackid' => '13'
}
],
'year' => '1998'
$schema->storage->debugcb(sub { $queries++ });
$schema->storage->debug(1);
+
my $pref = $schema->resultset ('Artist')
->search ({ 'me.artistid' => $artist->id }, { prefetch => { cds => 'genre' } })
->next;
is ($pref->cds->count, 3, 'Correct number of CDs prefetched');
is ($pref->cds->search_related ('genre')->count, 1, 'Only one of the prefetched cds has a prefetched genre');
+
is ($queries, 1, 'All happened within one query only');
$schema->storage->debugcb($orig_cb);
$schema->storage->debug(0);
+
done_testing;
my $rs = shift;
my $struc = {};
- # all of these ought to work, but do not for some reason
- # a noop cloning search() pollution?
- #foreach my $art ( $rs->search({}, { order_by => 'me.artistid' })->all ) {
- #foreach my $art ( $rs->search({}, {})->all ) {
- #foreach my $art ( $rs->search()->all ) {
foreach my $art ( $rs->all ) {
foreach my $cd ( $art->cds ) {
foreach my $track ( $cd->tracks ) {
WHERE artwork.cd_id IS NULL
OR tracks.title != ?
GROUP BY me.artistid + ?, me.artistid, me.name, cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track
- ORDER BY name DESC
+ ORDER BY name DESC, cds.artist, cds.year ASC
)',
[
$bind_int_resolved->(), # outer select
JOIN artist artist
ON artist.artistid = me.artist
WHERE ( ( artist.name = ? AND me.year = ? ) )
- ORDER BY me.cdid
+ ORDER BY tracks.cd
)',
[
[ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'artist.name' } => 'foo' ],
'16 correct cds found'
);
+TODO: {
+local $TODO = 'Prefetch on custom rels can not work until the collapse rewrite is finished '
+ . '(currently collapser requires a right-side (which is indeterministic) order-by)';
lives_ok {
my @all_artists_with_80_cds_pref = $schema->resultset("Artist")->search
);
} 'prefetchy-fetchy-fetch';
+} # end of TODO
# try to create_related a 80s cd
+++ /dev/null
-use strict;
-use warnings;
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-
-my $schema = DBICTest->init_schema(no_populate => 1);
-
-$schema->resultset('CD')->create({
- title => 'Equinoxe',
- year => 1978,
- artist => { name => 'JMJ' },
- genre => { name => 'electro' },
- tracks => [
- { title => 'e1' },
- { title => 'e2' },
- { title => 'e3' },
- ],
- single_track => {
- title => 'o1',
- cd => {
- title => 'Oxygene',
- year => 1976,
- artist => {
- name => 'JMJ',
- cds => [
- {
- title => 'Magnetic Fields',
- year => 1981,
- genre => { name => 'electro' },
- tracks => [
- { title => 'm1' },
- { title => 'm2' },
- { title => 'm3' },
- { title => 'm4' },
- ],
- },
- ],
- },
- tracks => [
- { title => 'o2', position => 2}, # the position should not be needed here, bug in MC
- ],
- },
- },
-});
-
-{
- package DBICTest::_IRCapture;
- sub inflate_result { [@_[2,3]] };
-}
-
-is_deeply(
- ([$schema->resultset ('CD')->search ({}, {
- result_class => 'DBICTest::_IRCapture',
- prefetch => { single_track => { cd => 'artist' } },
- order_by => 'me.cdid',
- })->all]),
- [
- [
- { cdid => 1, single_track => undef, artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
- { single_track => [
- { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
- { cd => [
- { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
- {
- artist => [
- { artistid => undef, name => undef, charfield => undef, rank => undef }
- ]
- }
- ] }
- ] }
- ],
- [
- { cdid => 2, single_track => undef, artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
- { single_track => [
- { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
- { cd => [
- { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
- {
- artist => [
- { artistid => undef, name => undef, charfield => undef, rank => undef }
- ]
- }
- ] }
- ] }
- ],
- [
- { cdid => 3, single_track => 6, artist => 1, genreid => 1, year => 1978, title => "Equinoxe" },
- { single_track => [
- { trackid => 6, title => 'o1', position => 1, cd => 2, last_updated_at => undef, last_updated_on => undef },
- { cd => [
- { cdid => 2, single_track => undef, artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
- {
- artist => [
- { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 }
- ]
- }
- ] }
- ] }
- ],
- ],
- 'Simple 1:1 descend with classic prefetch ok'
-);
-
-is_deeply(
- [$schema->resultset ('CD')->search ({}, {
- result_class => 'DBICTest::_IRCapture',
- join => { single_track => { cd => 'artist' } },
- columns => [
- { 'year' => 'me.year' },
- { 'genreid' => 'me.genreid' },
- { 'single_track.cd.artist.artistid' => 'artist.artistid' },
- { 'title' => 'me.title' },
- { 'artist' => 'me.artist' },
- ],
- order_by => 'me.cdid',
- })->all],
- [
- [
- { artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
- { single_track => [
- undef,
- { cd => [
- undef,
- {
- artist => [
- { artistid => undef }
- ]
- }
- ] }
- ] }
- ],
- [
- { artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
- { single_track => [
- undef,
- { cd => [
- undef,
- {
- artist => [
- { artistid => undef }
- ]
- }
- ] }
- ] }
- ],
- [
- { artist => 1, genreid => 1, year => 1978, title => "Equinoxe" },
- { single_track => [
- undef,
- { cd => [
- undef,
- {
- artist => [
- { artistid => 1 }
- ]
- }
- ] }
- ] }
- ],
- ],
- 'Simple 1:1 descend with missing selectors ok'
-);
-
-is_deeply(
- ([$schema->resultset ('CD')->search ({}, {
- result_class => 'DBICTest::_IRCapture',
- prefetch => [ { single_track => { cd => { artist => { cds => 'tracks' } } } } ],
- order_by => [qw/me.cdid tracks.trackid/],
- })->all]),
- [
- [
- { cdid => 1, single_track => undef, artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
- { single_track => [
- { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
- { cd => [
- { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
- {
- artist => [
- { artistid => undef, name => undef, charfield => undef, rank => undef },
- { cds => [ [
- { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
- { tracks => [ [
- { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
- ] ] },
- ]]},
- ],
- },
- ] },
- ] },
- ],
- [
- { cdid => 2, single_track => undef, artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
- { single_track => [
- { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
- { cd => [
- { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
- {
- artist => [
- { artistid => undef, name => undef, charfield => undef, rank => undef },
- { cds => [ [
- { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
- { tracks => [ [
- { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
- ] ] },
- ]]},
- ]
- }
- ] }
- ] }
- ],
- [
- { cdid => 3, single_track => 6, artist => 1, genreid => 1, year => 1978, title => "Equinoxe" },
- { single_track => [
- { trackid => 6, title => 'o1', position => 1, cd => 2, last_updated_at => undef, last_updated_on => undef },
- { cd => [
- { cdid => 2, single_track => undef, artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
- {
- artist => [
- { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
- { cds => [
- [
- { cdid => 1, single_track => undef, artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
- { tracks => [
- [ { trackid => 1, title => 'm1', position => 1, cd => 1, last_updated_at => undef, last_updated_on => undef } ],
- [ { trackid => 2, title => 'm2', position => 2, cd => 1, last_updated_at => undef, last_updated_on => undef } ],
- [ { trackid => 3, title => 'm3', position => 3, cd => 1, last_updated_at => undef, last_updated_on => undef } ],
- [ { trackid => 4, title => 'm4', position => 4, cd => 1, last_updated_at => undef, last_updated_on => undef } ],
- ]},
- ],
- [
- { cdid => 2, single_track => undef, artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
- { tracks => [
- [ { trackid => 5, title => 'o2', position => 2, cd => 2, last_updated_at => undef, last_updated_on => undef } ],
- [ { trackid => 6, title => 'o1', position => 1, cd => 2, last_updated_at => undef, last_updated_on => undef } ],
- ]},
- ],
- [
- { cdid => 3, single_track => 6, artist => 1, genreid => 1, year => 1978, title => "Equinoxe" },
- { tracks => [
- [ { trackid => 7, title => 'e1', position => 1, cd => 3, last_updated_at => undef, last_updated_on => undef } ],
- [ { trackid => 8, title => 'e2', position => 2, cd => 3, last_updated_at => undef, last_updated_on => undef } ],
- [ { trackid => 9, title => 'e3', position => 3, cd => 3, last_updated_at => undef, last_updated_on => undef } ],
- ]},
- ],
- ]},
- ]
- }
- ] }
- ] }
- ],
- ],
- 'Collapsing 1:1 ending in chained has_many with classic prefetch ok'
-);
-
-is_deeply (
- ([$schema->resultset ('Artist')->search ({}, {
- result_class => 'DBICTest::_IRCapture',
- join => { cds => 'tracks' },
- '+columns' => [
- (map { "cds.$_" } $schema->source('CD')->columns),
- (map { +{ "cds.tracks.$_" => "tracks.$_" } } $schema->source('Track')->columns),
- ],
- order_by => [qw/cds.cdid tracks.trackid/],
- })->all]),
- [
- [
- { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
- { cds => [
- { cdid => 1, single_track => undef, artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
- { tracks => [
- { trackid => 1, title => 'm1', position => 1, cd => 1, last_updated_at => undef, last_updated_on => undef },
- ]},
- ]},
- ],
- [
- { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
- { cds => [
- { cdid => 1, single_track => undef, artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
- { tracks => [
- { trackid => 2, title => 'm2', position => 2, cd => 1, last_updated_at => undef, last_updated_on => undef },
- ]},
- ]},
- ],
- [
- { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
- { cds => [
- { cdid => 1, single_track => undef, artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
- { tracks => [
- { trackid => 3, title => 'm3', position => 3, cd => 1, last_updated_at => undef, last_updated_on => undef },
- ]},
- ]},
- ],
- [
- { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
- { cds => [
- { cdid => 1, single_track => undef, artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
- { tracks => [
- { trackid => 4, title => 'm4', position => 4, cd => 1, last_updated_at => undef, last_updated_on => undef },
- ]},
- ]},
- ],
- [
- { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
- { cds => [
- { cdid => 2, single_track => undef, artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
- { tracks => [
- { trackid => 5, title => 'o2', position => 2, cd => 2, last_updated_at => undef, last_updated_on => undef },
- ]},
- ]},
- ],
- [
- { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
- { cds => [
- { cdid => 2, single_track => undef, artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
- { tracks => [
- { trackid => 6, title => 'o1', position => 1, cd => 2, last_updated_at => undef, last_updated_on => undef },
- ]},
- ]},
- ],
- [
- { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
- { cds => [
- { cdid => 3, single_track => 6, artist => 1, genreid => 1, year => 1978, title => "Equinoxe" },
- { tracks => [
- { trackid => 7, title => 'e1', position => 1, cd => 3, last_updated_at => undef, last_updated_on => undef },
- ]},
- ]},
- ],
- [
- { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
- { cds => [
- { cdid => 3, single_track => 6, artist => 1, genreid => 1, year => 1978, title => "Equinoxe" },
- { tracks => [
- { trackid => 8, title => 'e2', position => 2, cd => 3, last_updated_at => undef, last_updated_on => undef },
- ]},
- ]},
- ],
- [
- { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
- { cds => [
- { cdid => 3, single_track => 6, artist => 1, genreid => 1, year => 1978, title => "Equinoxe" },
- { tracks => [
- { trackid => 9, title => 'e3', position => 3, cd => 3, last_updated_at => undef, last_updated_on => undef },
- ]},
- ]},
- ],
- ],
- 'Non-Collapsing chained has_many ok'
-);
-
-done_testing;
+++ /dev/null
-use strict;
-use warnings;
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use B::Deparse;
-
-my $schema = DBICTest->init_schema(no_deploy => 1);
-my $infmap = [qw/single_track.cd.artist.name year/];
-
-is_same_src (
- $schema->source ('CD')->_mk_row_parser({
- inflate_map => $infmap,
- }),
- '$_ = [
- { year => $_->[1] },
- { single_track => [
- undef,
- { cd => [
- undef,
- { artist => [
- { name => $_->[0] },
- ] },
- ]},
- ]},
- ] for @{$_[0]}',
- 'Simple 1:1 descending non-collapsing parser',
-);
-
-$infmap = [qw/
- single_track.cd.artist.artistid
- year
- single_track.cd.artist.cds.tracks.title
- single_track.cd.artist.cds.cdid
- title
- artist
-/];
-is_same_src (
- $schema->source ('CD')->_mk_row_parser({
- inflate_map => $infmap,
- }),
- '$_ = [
- { artist => $_->[5], title => $_->[4], year => $_->[1] },
- { single_track => [
- undef,
- { cd => [
- undef,
- { artist => [
- { artistid => $_->[0] },
- { cds => [
- { cdid => $_->[3] },
- { tracks => [
- { title => $_->[2] }
- ] },
- ] },
- ] },
- ] },
- ] },
- ] for @{$_[0]}',
- '1:1 descending non-collapsing parser terminating with chained 1:M:M',
-);
-
-is_deeply (
- $schema->source('CD')->_resolve_collapse({map { $infmap->[$_] => $_ } 0 .. $#$infmap}),
- {
- -node_index => 1,
- -node_id => [ 4, 5 ],
- -branch_id => [ 0, 2, 3, 4, 5 ],
-
- single_track => {
- -node_index => 2,
- -node_id => [ 4, 5],
- -branch_id => [ 0, 2, 3, 4, 5],
- -is_optional => 1,
- -is_single => 1,
-
- cd => {
- -node_index => 3,
- -node_id => [ 4, 5 ],
- -branch_id => [ 0, 2, 3, 4, 5 ],
- -is_single => 1,
-
- artist => {
- -node_index => 4,
- -node_id => [ 0 ],
- -branch_id => [ 0, 2, 3 ],
- -is_single => 1,
-
- cds => {
- -node_index => 5,
- -node_id => [ 3 ],
- -branch_id => [ 2, 3 ],
- -is_optional => 1,
-
- tracks => {
- -node_index => 6,
- -node_id => [ 2, 3 ],
- -branch_id => [ 2, 3 ],
- -is_optional => 1,
- },
- },
- },
- },
- },
- },
- 'Correct collapse map for 1:1 descending chain terminating with chained 1:M:M'
-);
-
-is_same_src (
- $schema->source ('CD')->_mk_row_parser({
- inflate_map => $infmap,
- collapse => 1,
- }),
- ' my($rows_pos, $result_pos, $cur_row, @cur_row_ids, @collapse_idx, $is_new_res) = (0, 0);
-
- while ($cur_row = (
- ( $rows_pos >= 0 and $_[0][$rows_pos++] ) or do { $rows_pos = -1; undef } )
- ||
- ( $_[1] and $_[1]->() )
- ) {
-
- $cur_row_ids[$_] = defined $cur_row->[$_] ? $cur_row->[$_] : "\xFF\xFFN\xFFU\xFFL\xFFL\xFF\xFF"
- for (0, 2, 3, 4, 5);
-
- # a present cref implies lazy prefetch, implies a supplied stash in $_[2]
- $_[1] and $result_pos and unshift(@{$_[2]}, $cur_row) and last
- if $is_new_res = ! $collapse_idx[1]{$cur_row_ids[4]}{$cur_row_ids[5]};
-
- $collapse_idx[1]{$cur_row_ids[4]}{$cur_row_ids[5]} ||= [{ artist => $cur_row->[5], title => $cur_row->[4], year => $cur_row->[1] }];
- $collapse_idx[1]{$cur_row_ids[4]}{$cur_row_ids[5]}[1]{single_track} ||= $collapse_idx[2]{$cur_row_ids[4]}{$cur_row_ids[5]};
- $collapse_idx[2]{$cur_row_ids[4]}{$cur_row_ids[5]}[1]{cd} ||= $collapse_idx[3]{$cur_row_ids[4]}{$cur_row_ids[5]};
- $collapse_idx[3]{$cur_row_ids[4]}{$cur_row_ids[5]}[1]{artist} ||= $collapse_idx[4]{$cur_row_ids[0]} ||= [{ artistid => $cur_row->[0] }];
-
- $collapse_idx[4]{$cur_row_ids[0]}[1]{cds} ||= [];
- push @{$collapse_idx[4]{$cur_row_ids[0]}[1]{cds}}, $collapse_idx[5]{$cur_row_ids[3]} ||= [{ cdid => $cur_row->[3] }]
- unless $collapse_idx[5]{$cur_row_ids[3]};
-
- $collapse_idx[5]{$cur_row_ids[3]}[1]{tracks} ||= [];
- push @{$collapse_idx[5]{$cur_row_ids[3]}[1]{tracks}}, $collapse_idx[6]{$cur_row_ids[2]}{$cur_row_ids[3]} ||= [{ title => $cur_row->[2] }]
- unless $collapse_idx[6]{$cur_row_ids[2]}{$cur_row_ids[3]};
-
- $_[0][$result_pos++] = $collapse_idx[1]{$cur_row_ids[4]}{$cur_row_ids[5]}
- if $is_new_res;
- }
- splice @{$_[0]}, $result_pos;
- ',
- 'Same 1:1 descending terminating with chained 1:M:M but with collapse',
-);
-
-$infmap = [qw/
- tracks.lyrics.lyric_versions.text
- existing_single_track.cd.artist.artistid
- existing_single_track.cd.artist.cds.year
- year
- genreid
- tracks.title
- existing_single_track.cd.artist.cds.cdid
- latest_cd
- existing_single_track.cd.artist.cds.tracks.title
- existing_single_track.cd.artist.cds.genreid
-/];
-
-is_deeply (
- $schema->source('CD')->_resolve_collapse({map { $infmap->[$_] => $_ } 0 .. $#$infmap}),
- {
- -node_index => 1,
- -node_id => [ 1 ], # existing_single_track.cd.artist.artistid
- -branch_id => [ 0, 1, 5, 6, 8 ],
-
- existing_single_track => {
- -node_index => 2,
- -node_id => [ 1 ], # existing_single_track.cd.artist.artistid
- -branch_id => [ 1, 6, 8 ],
- -is_single => 1,
-
- cd => {
- -node_index => 3,
- -node_id => [ 1 ], # existing_single_track.cd.artist.artistid
- -branch_id => [ 1, 6, 8 ],
- -is_single => 1,
-
- artist => {
- -node_index => 4,
- -node_id => [ 1 ], # existing_single_track.cd.artist.artistid
- -branch_id => [ 1, 6, 8 ],
- -is_single => 1,
-
- cds => {
- -node_index => 5,
- -node_id => [ 6 ], # existing_single_track.cd.artist.cds.cdid
- -branch_id => [ 6, 8 ],
- -is_optional => 1,
-
- tracks => {
- -node_index => 6,
- -node_id => [ 6, 8 ], # existing_single_track.cd.artist.cds.cdid, existing_single_track.cd.artist.cds.tracks.title
- -branch_id => [ 6, 8 ],
- -is_optional => 1,
- }
- }
- }
- }
- },
- tracks => {
- -node_index => 7,
- -node_id => [ 1, 5 ], # existing_single_track.cd.artist.artistid, tracks.title
- -branch_id => [ 0, 1, 5 ],
- -is_optional => 1,
-
- lyrics => {
- -node_index => 8,
- -node_id => [ 1, 5 ], # existing_single_track.cd.artist.artistid, tracks.title
- -branch_id => [ 0, 1, 5 ],
- -is_single => 1,
- -is_optional => 1,
-
- lyric_versions => {
- -node_index => 9,
- -node_id => [ 0, 1, 5 ], # tracks.lyrics.lyric_versions.text, existing_single_track.cd.artist.artistid, tracks.title
- -branch_id => [ 0, 1, 5 ],
- -is_optional => 1,
- },
- },
- }
- },
- 'Correct collapse map constructed',
-);
-
-is_same_src (
- $schema->source ('CD')->_mk_row_parser({
- inflate_map => $infmap,
- collapse => 1,
- }),
- ' my ($rows_pos, $result_pos, $cur_row, @cur_row_ids, @collapse_idx, $is_new_res) = (0,0);
-
- while ($cur_row = (
- ( $rows_pos >= 0 and $_[0][$rows_pos++] ) or do { $rows_pos = -1; undef } )
- ||
- ( $_[1] and $_[1]->() )
- ) {
-
- $cur_row_ids[$_] = defined $cur_row->[$_] ? $cur_row->[$_] : "\xFF\xFFN\xFFU\xFFL\xFFL\xFF\xFF"
- for (0, 1, 5, 6, 8);
-
- $is_new_res = ! $collapse_idx[1]{$cur_row_ids[1]} and (
- $_[1] and $result_pos and (unshift @{$_[2]}, $cur_row) and last
- );
-
- $collapse_idx[1]{$cur_row_ids[1]} ||= [{ latest_cd => $cur_row->[7], year => $cur_row->[3], genreid => $cur_row->[4] }];
-
- $collapse_idx[1]{$cur_row_ids[1]}[1]{existing_single_track} ||= $collapse_idx[2]{$cur_row_ids[1]};
- $collapse_idx[2]{$cur_row_ids[1]}[1]{cd} ||= $collapse_idx[3]{$cur_row_ids[1]};
- $collapse_idx[3]{$cur_row_ids[1]}[1]{artist} ||= $collapse_idx[4]{$cur_row_ids[1]} ||= [{ artistid => $cur_row->[1] }];
-
- $collapse_idx[4]{$cur_row_ids[1]}[1]{cds} ||= [];
- push @{ $collapse_idx[4]{$cur_row_ids[1]}[1]{cds} }, $collapse_idx[5]{$cur_row_ids[6]} ||= [{ cdid => $cur_row->[6], genreid => $cur_row->[9], year => $cur_row->[2] }]
- unless $collapse_idx[5]{$cur_row_ids[6]};
-
- $collapse_idx[5]{$cur_row_ids[6]}[1]{tracks} ||= [];
- push @{ $collapse_idx[5]{$cur_row_ids[6]}[1]{tracks} }, $collapse_idx[6]{$cur_row_ids[6]}{$cur_row_ids[8]} ||= [{ title => $cur_row->[8] }]
- unless $collapse_idx[6]{$cur_row_ids[6]}{$cur_row_ids[8]};
-
- $collapse_idx[1]{$cur_row_ids[1]}[1]{tracks} ||= [];
- push @{ $collapse_idx[1]{$cur_row_ids[1]}[1]{tracks} }, $collapse_idx[7]{$cur_row_ids[1]}{$cur_row_ids[5]} ||= [{ title => $cur_row->[5] }]
- unless $collapse_idx[7]{$cur_row_ids[1]}{$cur_row_ids[5]};
-
- $collapse_idx[7]{$cur_row_ids[1]}{$cur_row_ids[5]}[1]{lyrics} ||= $collapse_idx[8]{$cur_row_ids[1]}{$cur_row_ids[5] };
-
- $collapse_idx[8]{$cur_row_ids[1]}{$cur_row_ids[5]}[1]{lyric_versions} ||= [];
- push @{ $collapse_idx[8]{$cur_row_ids[1]}{$cur_row_ids[5]}[1]{lyric_versions} }, $collapse_idx[9]{$cur_row_ids[0]}{$cur_row_ids[1]}{$cur_row_ids[5]} ||= [{ text => $cur_row->[0] }]
- unless $collapse_idx[9]{$cur_row_ids[0]}{$cur_row_ids[1]}{$cur_row_ids[5]};
-
- $_[0][$result_pos++] = $collapse_idx[1]{$cur_row_ids[1]}
- if $is_new_res;
- }
-
- splice @{$_[0]}, $result_pos;
- ',
- 'Multiple has_many on multiple branches torture test',
-);
-
-done_testing;
-
-my $deparser;
-sub is_same_src {
- $deparser ||= B::Deparse->new;
- local $Test::Builder::Level = $Test::Builder::Level + 1;
-
- my ($got, $expect) = map {
- my $cref = eval "sub { $_ }" or do {
- fail "Coderef does not compile!\n\n$@\n\n$_";
- return undef;
- };
- $deparser->coderef2text($cref);
- } @_[0,1];
-
- is ($got, $expect, $_[2]||() )
- or note ("Originals source:\n\n$_[0]\n\n$_[1]\n");
-}
-
{ id => 'foo.id' },
{ 'ends_with_me.id' => 'ends_with_me.id' },
],
- order_by => [qw( year artist title )],
+ order_by => [qw( artist title )],
}),
sql => '(
SELECT id, ends_with_me__id
SELECT foo.id AS id, ends_with_me.id AS ends_with_me__id
FROM cd me
WHERE id = ?
- ORDER BY year, artist, title
+ ORDER BY artist, title
) me
WHERE ROWNUM <= ?
) me
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY me.id
+ ORDER BY books.owner
)',
[
[ { sqlt_datatype => 'integer' } => 3 ],
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY me.id
+ ORDER BY books.owner
)',
[
[ { sqlt_datatype => 'integer' } => 1 ],
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY me.id
+ ORDER BY books.owner
)',
[
[ { sqlt_datatype => 'integer' } => 1 ],
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY me.id
+ ORDER BY books.owner
)',
[
[ { sqlt_datatype => 'integer' } => 3 ],
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY me.id
+ ORDER BY books.owner
)',
[
[ { sqlt_datatype => 'integer' } => 2 ],
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY me.id
+ ORDER BY books.owner
)',
[
[ { sqlt_datatype => 'integer' } => 2 ],
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY me.id
+ ORDER BY books.owner
)',
[],
],
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY me.id
+ ORDER BY books.owner
)',
[],
],
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY me.id
+ ORDER BY me.id, books.owner
)',
[
[ { sqlt_datatype => 'integer' } => 1 ],