Revision history for DBIx::Class
+ * New Features / Changes
+ - Rewrite from scratch the result constructor codepath - many bugfixes
+ and performance improvements (the current codebase is now capable of
+ outperforming both DBIx::DataModel and Rose::DB::Object on some
+ workloads). Some notable benefits:
+ - Multiple has_many prefetch
+ - Partial prefetch - you now can select only columns you are
+ interested in, while preserving the collapse functionality
+ (collapse is now exposed as a first-class API attribute)
+ - Prefetch of resultsets with arbitrary order
+ (RT#54949, RT#74024, RT#74584)
+ - Prefetch no longer inserts right-side table order_by clauses
+ (massively helps the deficient MySQL optimizer)
+ - Prefetch with limit on right-side ordered resultsets now works
+ correctly (via aggregated grouping)
+ - No longer order the insides of a complex prefetch subquery,
+ unless required to satisfy a limit
+ - Stop erroneously considering order_by criteria from a join under
+ distinct => 1 (the distinct should apply to the main source only)
+ - Massively optimize codepath around ->cursor(), over 10x speedup
+ on some iterating workloads.
+ - Changing the result_class of a ResultSet in progress is now
+ explicitly forbidden. The behavior was undefined before, and
+ would result in wildly differing outcomes depending on $rs
+ attributes.
+ - Deprecate returning of prefetched 'filter' rels as part of
+ get_columns() and get_inflated_columns() data
+ - Invoking get_inflated_columns() no longer fires get_columns() but
+ instead retrieves data from individual non-inflatable columns via
+ get_column()
+ - Emit a warning on incorrect use of nullable columns within a
+ primary key
+ - Limited checks are performed on whether columns without declared
+ is_nullable => 1 metadata do in fact sometimes fetch NULLs from
+ the database (the check is currently very limited and is performed
+ only on resultset collapse when the alternative is rather worse)
+
* Fixes
+ - Fix _dbi_attrs_for_bind() being called befor DBI has been loaded
+ (regression in 0.08210)
- Fix update/delete operations on resultsets *joining* the updated
table failing on MySQL. Resolves oversights in the fixes for
RT#81378 and RT#81897
+ - Fix open cursors silently resetting when inherited across a fork
+ or a thread
+ - Properly support "MySQL-style" left-side group_by with prefetch
+ - Fix $grouped_rs->get_column($col)->func($func) producing incorrect
+ SQL (RT#81127)
- Stop Sybase ASE storage from generating invalid SQL in subselects
when a limit without offset is encountered
+ - Even more robust behavior of GenericSubQuery limit dialect
0.08210 2013-04-04 15:30 (UTC)
* New Features / Changes
my $test_requires = {
'File::Temp' => '0.22',
+ 'Test::Deep' => '0.101',
'Test::Exception' => '0.31',
'Test::Warn' => '0.21',
'Test::More' => '0.94',
+++ /dev/null
-* a48693f4 adds 5 files for a test that may even be the same as that from
-571df676 - please rewrite using the existing schema and delete the rest
$schema->deploy;
my $rs = $schema->resultset ('Artist');
-$rs->populate ([ map { { name => "Art_$_"} } (1 .. 10000) ]);
+
+my $hri_rs = $rs->search ({}, { result_class => 'DBIx::Class::ResultClass::HashRefInflator' } );
+
+#DB::enable_profile();
+#my @foo = $hri_rs->all;
+#DB::disable_profile();
+#exit;
my $dbh = $schema->storage->dbh;
my $sql = sprintf ('SELECT %s FROM %s %s',
$rs->_resolved_attrs->{alias},
);
-my $compdbi = sub {
- my @r = $schema->storage->dbh->selectall_arrayref ('SELECT * FROM ' . ${$rs->as_query}->[0] )
-} if $rs->can ('as_query');
-
-cmpthese(-3, {
- Cursor => sub { $rs->reset; my @r = $rs->cursor->all },
- HRI => sub { $rs->reset; my @r = $rs->search ({}, { result_class => 'DBIx::Class::ResultClass::HashRefInflator' } )->all },
- RowObj => sub { $rs->reset; my @r = $rs->all },
- RawDBI => sub { my @r = $dbh->selectall_arrayref ($sql) },
- $compdbi ? (CompDBI => $compdbi) : (),
-});
+for (1,10,20,50,200,2500,10000) {
+ $rs->delete;
+ $rs->populate ([ map { { name => "Art_$_"} } (1 .. $_) ]);
+ print "\nRetrieval of $_ rows\n";
+ bench();
+}
+
+sub bench {
+ cmpthese(-3, {
+ Cursor => sub { my @r = $rs->cursor->all },
+ HRI => sub { my @r = $hri_rs->all },
+ RowObj => sub { my @r = $rs->all },
+ DBI_AoH => sub { my @r = @{ $dbh->selectall_arrayref ($sql, { Slice => {} }) } },
+ DBI_AoA=> sub { my @r = @{ $dbh->selectall_arrayref ($sql) } },
+ });
+}
# $VERSION declaration must stay up here, ahead of any other package
# declarations, as to not confuse various modules attempting to determine
# this ones version, whether that be s.c.o. or Module::Metadata, etc
-$VERSION = '0.08210';
+$VERSION = '0.08242';
$VERSION = eval $VERSION if $VERSION =~ /_/; # numify for warning-free dev releases
return $_[0]->count;
}
+sub _construct_results {
+ my $self = shift;
+
+ my $rows = $self->next::method(@_);
+
+ if (my $f = $self->_resolved_attrs->{record_filter}) {
+ $_ = $f->($_) for @$rows;
+ }
+
+ return $rows;
+}
+
1;
use strict;
use warnings;
-use Sub::Name ();
+use Sub::Name;
+use DBIx::Class::Carp;
+use namespace::clean;
our %_pod_inherit_config =
(
deflate => sub {
my ($val, $self) = @_;
$self->throw_exception("'$val' isn't a $f_class") unless $val->isa($f_class);
- return ($val->_ident_values)[0];
- # WARNING: probably breaks for multi-pri sometimes. FIXME
+
+ # MASSIVE FIXME - this code assumes we pointed at the PK, but the belongs_to
+ # helper does not check any of this
+ # fixup the code a bit to make things saner, but ideally 'filter' needs to
+ # be deprecated ASAP and removed shortly after
+ # Not doing so before 0.08250 however, too many things in motion already
+ my ($pk_col, @rest) = $val->_pri_cols;
+ $self->throw_exception(
+ "Relationship '$rel' of type 'filter' can not work with a multicolumn primary key on source '$f_class'"
+ ) if @rest;
+
+ my $v = $val->$pk_col;
+ carp_unique (
+ "Unable to deflate 'filter'-type relationship '$rel' (related object "
+ . "primary key not retrieved), assuming undef instead"
+ ) if ( ! defined $v and $val->in_storage );
+
+ return $v;
}
}
);
no warnings 'redefine';
foreach my $meth (keys %meth) {
my $name = join '::', $class, $meth;
- *$name = Sub::Name::subname($name, $meth{$meth});
+ *$name = subname($name, $meth{$meth});
}
}
}
sub related_resultset {
my $self = shift;
+
$self->throw_exception("Can't call *_related as class methods")
unless ref $self;
+
my $rel = shift;
- my $rel_info = $self->relationship_info($rel);
- $self->throw_exception( "No such relationship '$rel'" )
- unless $rel_info;
- return $self->{related_resultsets}{$rel} ||= do {
+ return $self->{related_resultsets}{$rel}
+ if defined $self->{related_resultsets}{$rel};
+
+ return $self->{related_resultsets}{$rel} = do {
+
+ my $rel_info = $self->relationship_info($rel)
+ or $self->throw_exception( "No such relationship '$rel'" );
+
my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {});
$attrs = { %{$rel_info->{attrs} || {}}, %$attrs };
if (@_ > 1 && (@_ % 2 == 1));
my $query = ((@_ > 1) ? {@_} : shift);
- my $source = $self->result_source;
+ my $rsrc = $self->result_source;
# condition resolution may fail if an incomplete master-object prefetch
# is encountered - that is ok during prefetch construction (not yet in_storage)
my ($cond, $is_crosstable) = try {
- $source->_resolve_condition( $rel_info->{cond}, $rel, $self, $rel )
+ $rsrc->_resolve_condition( $rel_info->{cond}, $rel, $self, $rel )
}
catch {
if ($self->in_storage) {
# root alias as 'me', instead of $rel (as opposed to invoking
# $rs->search_related)
- local $source->{_relationships}{me} = $source->{_relationships}{$rel}; # make the fake 'me' rel
- my $obj_table_alias = lc($source->source_name) . '__row';
+ local $rsrc->{_relationships}{me} = $rsrc->{_relationships}{$rel}; # make the fake 'me' rel
+ my $obj_table_alias = lc($rsrc->source_name) . '__row';
$obj_table_alias =~ s/\W+/_/g;
- $source->resultset->search(
+ $rsrc->resultset->search(
$self->ident_condition($obj_table_alias),
{ alias => $obj_table_alias },
)->search_related('me', $query, $attrs)
# at some point what it does. Also the entire UNRESOLVABLE_CONDITION
# business seems shady - we could simply not query *at all*
if ($cond eq $DBIx::Class::ResultSource::UNRESOLVABLE_CONDITION) {
- my $reverse = $source->reverse_relationship_info($rel);
+ my $reverse = $rsrc->reverse_relationship_info($rel);
foreach my $rev_rel (keys %$reverse) {
if ($reverse->{$rev_rel}{attrs}{accessor} && $reverse->{$rev_rel}{attrs}{accessor} eq 'multi') {
weaken($attrs->{related_objects}{$rev_rel}[0] = $self);
}
$query = ($query ? { '-and' => [ $cond, $query ] } : $cond);
- $self->result_source->related_source($rel)->resultset->search(
+ $rsrc->related_source($rel)->resultset->search(
$query, $attrs
);
}
if (ref $self) { # cdbi calls this as a class method, /me vomits
my $rsrc = $self->result_source;
- my (undef, $crosstable, $relcols) = $rsrc->_resolve_condition (
+ my (undef, $crosstable, $cond_targets) = $rsrc->_resolve_condition (
$rsrc->relationship_info($rel)->{cond}, $rel, $self, $rel
);
$self->throw_exception("Custom relationship '$rel' does not resolve to a join-free condition fragment")
if $crosstable;
- if (@{$relcols || []} and @$relcols = grep { ! exists $values->{$_} } @$relcols) {
+ if (my @unspecified_rel_condition_chunks = grep { ! exists $values->{$_} } @{$cond_targets||[]} ) {
$self->throw_exception(sprintf (
"Custom relationship '%s' not definitive - returns conditions instead of values for column(s): %s",
$rel,
- map { "'$_'" } @$relcols
+ map { "'$_'" } @unspecified_rel_condition_chunks
));
}
}
#
# sanity check - currently throw when a complex coderef rel is encountered
# FIXME - should THROW MOAR!
- my ($cond, $crosstable, $relcols) = $rsrc->_resolve_condition (
+ my ($cond, $crosstable, $cond_targets) = $rsrc->_resolve_condition (
$rel_info->{cond}, $f_obj, $rel, $rel
);
$self->throw_exception("Custom relationship '$rel' does not resolve to a join-free condition fragment")
$self->throw_exception(sprintf (
"Custom relationship '%s' not definitive - returns conditions instead of values for column(s): %s",
$rel,
- map { "'$_'" } @$relcols
- )) if @{$relcols || []};
+ map { "'$_'" } @$cond_targets
+ )) if $cond_targets;
$self->set_columns($cond);
and
keys %$cond == 1
and
+ (keys %$cond)[0] =~ /^foreign\./
+ and
$class->has_column($rel)
) ? 'filter' : 'single';
$mk_hash = sub {
my $hash = {
+
# the main hash could be an undef if we are processing a skipped-over join
$_[0] ? %{$_[0]} : (),
# the second arg is a hash of arrays for each prefetched relation
- map {
- ref $_[1]->{$_}[0] eq 'ARRAY' # multi rel or not?
- ? ( $_ => [ map
- { $mk_hash->(@$_) || () }
- @{$_[1]->{$_}}
- ] )
- : ( $_ => $mk_hash->( @{$_[1]->{$_}} ) )
-
- } ( $_[1] ? ( keys %{$_[1]} ) : () )
+ map { $_ => (
+
+ # null-branch or not
+ ref $_[1]->{$_} eq $DBIx::Class::ResultSource::RowParser::Util::null_branch_class
+
+ ? ref $_[1]->{$_}[0] eq 'ARRAY' ? [] : undef
+
+ : ref $_[1]->{$_}[0] eq 'ARRAY'
+ ? [ map { $mk_hash->( @$_ ) || () } @{$_[1]->{$_}} ]
+ : $mk_hash->( @{$_[1]->{$_}} )
+
+ ) } ($_[1] ? keys %{$_[1]} : ())
};
- # if there is at least one defined column *OR* we are at the root of
- # the resultset - consider the result real (and not an emtpy has_many
- # rel containing one empty hashref)
- # an empty arrayref is an empty multi-sub-prefetch - don't consider
- # those either
- return $hash if $_[2];
-
- for (values %$hash) {
- return $hash if (
- defined $_
- and
- (ref $_ ne 'ARRAY' or scalar @$_)
- );
- }
-
- return undef;
+ ($_[2] || keys %$hash) ? $hash : undef;
};
=head1 METHODS
=head3 Resolving conditions and attributes
-When a resultset is chained from another resultset, conditions and
-attributes with the same keys need resolving.
+When a resultset is chained from another resultset (ie:
+C<my $new_rs = $old_rs->search(\%extra_cond, \%attrs)>), conditions
+and attributes with the same keys need resolving.
-L</join>, L</prefetch>, L</+select>, L</+as> attributes are merged
-into the existing ones from the original resultset.
+If any of L</columns>, L</select>, L</as> are present, they reset the
+original selection, and start the selection "clean".
+
+The L</join>, L</prefetch>, L</+columns>, L</+select>, L</+as> attributes
+are merged into the existing ones from the original resultset.
The L</where> and L</having> attributes, and any search conditions, are
merged with an SQL C<AND> to the existing condition from the original
my ($source, $attrs) = @_;
$source = $source->resolve
if $source->isa('DBIx::Class::ResultSourceHandle');
+
$attrs = { %{$attrs||{}} };
+ delete @{$attrs}{qw(_sqlmaker_select_args _related_results_construction)};
if ($attrs->{page}) {
$attrs->{rows} ||= 10;
}
my $old_attrs = { %{$self->{attrs}} };
- my $old_having = delete $old_attrs->{having};
- my $old_where = delete $old_attrs->{where};
+ my ($old_having, $old_where) = delete @{$old_attrs}{qw(having where)};
my $new_attrs = { %$old_attrs };
# Run the query, passing the result_class since it should propagate for find
my $rs = $self->search ($final_cond, {result_class => $self->result_class, %$attrs});
- if (keys %{$rs->_resolved_attrs->{collapse}}) {
+ if ($rs->_resolved_attrs->{collapse}) {
my $row = $rs->next;
carp "Query returned more than one row" if $rs->next;
return $row;
my $self = shift;
return $self->{cursor} ||= do {
- my $attrs = { %{$self->_resolved_attrs } };
+ my $attrs = $self->_resolved_attrs;
$self->result_source->storage->select(
$attrs->{from}, $attrs->{select}, $attrs->{where}, $attrs
);
my $attrs = { %{$self->_resolved_attrs} };
- if (keys %{$attrs->{collapse}}) {
- $self->throw_exception(
- 'single() can not be used on resultsets prefetching has_many. Use find( \%cond ) or next() instead'
- );
- }
+ $self->throw_exception(
+ 'single() can not be used on resultsets collapsing a has_many. Use find( \%cond ) or next() instead'
+ ) if $attrs->{collapse};
if ($where) {
if (defined $attrs->{where}) {
}
}
- my @data = $self->result_source->storage->select_single(
+ my $data = [ $self->result_source->storage->select_single(
$attrs->{from}, $attrs->{select},
$attrs->{where}, $attrs
- );
-
- return (@data ? ($self->_construct_object(@data))[0] : undef);
+ )];
+ $self->{_attrs}{_sqlmaker_select_args} = $attrs->{_sqlmaker_select_args};
+ return undef unless @$data;
+ $self->{_stashed_rows} = [ $data ];
+ $self->_construct_results->[0];
}
sub next {
my ($self) = @_;
+
if (my $cache = $self->get_cache) {
$self->{all_cache_position} ||= 0;
return $cache->[$self->{all_cache_position}++];
}
+
if ($self->{attrs}{cache}) {
delete $self->{pager};
$self->{all_cache_position} = 1;
return ($self->all)[0];
}
- if ($self->{stashed_objects}) {
- my $obj = shift(@{$self->{stashed_objects}});
- delete $self->{stashed_objects} unless @{$self->{stashed_objects}};
- return $obj;
- }
- my @row = (
- exists $self->{stashed_row}
- ? @{delete $self->{stashed_row}}
- : $self->cursor->next
- );
- return undef unless (@row);
- my ($row, @more) = $self->_construct_object(@row);
- $self->{stashed_objects} = \@more if @more;
- return $row;
-}
-sub _construct_object {
- my ($self, @row) = @_;
+ return shift(@{$self->{_stashed_results}}) if @{ $self->{_stashed_results}||[] };
- my $info = $self->_collapse_result($self->{_attrs}{as}, \@row)
- or return ();
- my @new = $self->result_class->inflate_result($self->result_source, @$info);
- @new = $self->{_attrs}{record_filter}->(@new)
- if exists $self->{_attrs}{record_filter};
- return @new;
-}
+ $self->{_stashed_results} = $self->_construct_results
+ or return undef;
-sub _collapse_result {
- my ($self, $as_proto, $row) = @_;
-
- my @copy = @$row;
+ return shift @{$self->{_stashed_results}};
+}
- # 'foo' => [ undef, 'foo' ]
- # 'foo.bar' => [ 'foo', 'bar' ]
- # 'foo.bar.baz' => [ 'foo.bar', 'baz' ]
+# Constructs as many results as it can in one pass while respecting
+# cursor laziness. Several modes of operation:
+#
+# * Always builds everything present in @{$self->{_stashed_rows}}
+# * If called with $fetch_all true - pulls everything off the cursor and
+# builds all result structures (or objects) in one pass
+# * If $self->_resolved_attrs->{collapse} is true, checks the order_by
+# and if the resultset is ordered properly by the left side:
+# * Fetches stuff off the cursor until the "master object" changes,
+# and saves the last extra row (if any) in @{$self->{_stashed_rows}}
+# OR
+# * Just fetches, and collapses/constructs everything as if $fetch_all
+# was requested (there is no other way to collapse except for an
+# eager cursor)
+# * If no collapse is requested - just get the next row, construct and
+# return
+sub _construct_results {
+ my ($self, $fetch_all) = @_;
- my @construct_as = map { [ (/^(?:(.*)\.)?([^.]+)$/) ] } @$as_proto;
+ my $rsrc = $self->result_source;
+ my $attrs = $self->_resolved_attrs;
- my %collapse = %{$self->{_attrs}{collapse}||{}};
+ if (
+ ! $fetch_all
+ and
+ ! $attrs->{order_by}
+ and
+ $attrs->{collapse}
+ and
+ my @pcols = $rsrc->primary_columns
+ ) {
+ # default order for collapsing unless the user asked for something
+ $attrs->{order_by} = [ map { join '.', $attrs->{alias}, $_} @pcols ];
+ $attrs->{_ordered_for_collapse} = 1;
+ $attrs->{_order_is_artificial} = 1;
+ }
- my @pri_index;
+ my $cursor = $self->cursor;
- # if we're doing collapsing (has_many prefetch) we need to grab records
- # until the PK changes, so fill @pri_index. if not, we leave it empty so
- # we know we don't have to bother.
+ # this will be used as both initial raw-row collector AND as a RV of
+ # _construct_results. Not regrowing the array twice matters a lot...
+ # a surprising amount actually
+ my $rows = delete $self->{_stashed_rows};
- # the reason for not using the collapse stuff directly is because if you
- # had for e.g. two artists in a row with no cds, the collapse info for
- # both would be NULL (undef) so you'd lose the second artist
+ my $did_fetch_all = $fetch_all;
- # store just the index so we can check the array positions from the row
- # without having to contruct the full hash
+ if ($fetch_all) {
+ # FIXME SUBOPTIMAL - we can do better, cursor->next/all (well diff. methods) should return a ref
+ $rows = [ ($rows ? @$rows : ()), $cursor->all ];
+ }
+ elsif( $attrs->{collapse} ) {
- if (keys %collapse) {
- my %pri = map { ($_ => 1) } $self->result_source->_pri_cols;
- foreach my $i (0 .. $#construct_as) {
- next if defined($construct_as[$i][0]); # only self table
- if (delete $pri{$construct_as[$i][1]}) {
- push(@pri_index, $i);
+ $attrs->{_ordered_for_collapse} = (
+ (
+ $attrs->{order_by}
+ and
+ $rsrc->schema
+ ->storage
+ ->_main_source_order_by_portion_is_stable($rsrc, $attrs->{order_by}, $attrs->{where})
+ ) ? 1 : 0
+ ) unless defined $attrs->{_ordered_for_collapse};
+
+ if (! $attrs->{_ordered_for_collapse}) {
+ $did_fetch_all = 1;
+
+ # instead of looping over ->next, use ->all in stealth mode
+ # *without* calling a ->reset afterwards
+ # FIXME ENCAPSULATION - encapsulation breach, cursor method additions pending
+ if (! $cursor->{_done}) {
+ $rows = [ ($rows ? @$rows : ()), $cursor->all ];
+ $cursor->{_done} = 1;
}
- last unless keys %pri; # short circuit (Johnny Five Is Alive!)
}
}
- # no need to do an if, it'll be empty if @pri_index is empty anyway
+ if (! $did_fetch_all and ! @{$rows||[]} ) {
+ # FIXME SUBOPTIMAL - we can do better, cursor->next/all (well diff. methods) should return a ref
+ if (scalar (my @r = $cursor->next) ) {
+ $rows = [ \@r ];
+ }
+ }
- my %pri_vals = map { ($_ => $copy[$_]) } @pri_index;
+ return undef unless @{$rows||[]};
- my @const_rows;
+ # sanity check - people are too clever for their own good
+ if ($attrs->{collapse} and my $aliastypes = $attrs->{_sqlmaker_select_args}[3]{_aliastypes} ) {
- do { # no need to check anything at the front, we always want the first row
+ my $multiplied_selectors;
+ for my $sel_alias ( grep { $_ ne $attrs->{alias} } keys %{ $aliastypes->{selecting} } ) {
+ if (
+ $aliastypes->{multiplying}{$sel_alias}
+ or
+ scalar grep { $aliastypes->{multiplying}{(values %$_)[0]} } @{ $aliastypes->{selecting}{$sel_alias}{-parents} }
+ ) {
+ $multiplied_selectors->{$_} = 1 for values %{$aliastypes->{selecting}{$sel_alias}{-seen_columns}}
+ }
+ }
- my %const;
+ for my $i (0 .. $#{$attrs->{as}} ) {
+ my $sel = $attrs->{select}[$i];
- foreach my $this_as (@construct_as) {
- $const{$this_as->[0]||''}{$this_as->[1]} = shift(@copy);
+ if (ref $sel eq 'SCALAR') {
+ $sel = $$sel;
+ }
+ elsif( ref $sel eq 'REF' and ref $$sel eq 'ARRAY' ) {
+ $sel = $$sel->[0];
+ }
+
+ $self->throw_exception(
+ 'Result collapse not possible - selection from a has_many source redirected to the main object'
+ ) if ($multiplied_selectors->{$sel} and $attrs->{as}[$i] !~ /\./);
}
+ }
- push(@const_rows, \%const);
+ # hotspot - skip the setter
+ my $res_class = $self->_result_class;
- } until ( # no pri_index => no collapse => drop straight out
- !@pri_index
- or
- do { # get another row, stash it, drop out if different PK
+ my $inflator_cref = $self->{_result_inflator}{cref} ||= do {
+ $res_class->can ('inflate_result')
+ or $self->throw_exception("Inflator $res_class does not provide an inflate_result() method");
+ };
- @copy = $self->cursor->next;
- $self->{stashed_row} = \@copy;
+ my $infmap = $attrs->{as};
- # last thing in do block, counts as true if anything doesn't match
+ $self->{_result_inflator}{is_core_row} = ( (
+ $inflator_cref
+ ==
+ ( \&DBIx::Class::Row::inflate_result || die "No ::Row::inflate_result() - can't happen" )
+ ) ? 1 : 0 ) unless defined $self->{_result_inflator}{is_core_row};
+
+ $self->{_result_inflator}{is_hri} = ( (
+ ! $self->{_result_inflator}{is_core_row}
+ and
+ $inflator_cref == (
+ require DBIx::Class::ResultClass::HashRefInflator
+ &&
+ DBIx::Class::ResultClass::HashRefInflator->can('inflate_result')
+ )
+ ) ? 1 : 0 ) unless defined $self->{_result_inflator}{is_hri};
- # check xor defined first for NULL vs. NOT NULL then if one is
- # defined the other must be so check string equality
- grep {
- (defined $pri_vals{$_} ^ defined $copy[$_])
- || (defined $pri_vals{$_} && ($pri_vals{$_} ne $copy[$_]))
- } @pri_index;
+ if (! $attrs->{_related_results_construction}) {
+ # construct a much simpler array->hash folder for the one-table cases right here
+ if ($self->{_result_inflator}{is_hri}) {
+ for my $r (@$rows) {
+ $r = { map { $infmap->[$_] => $r->[$_] } 0..$#$infmap };
}
- );
+ }
+ # FIXME SUBOPTIMAL this is a very very very hot spot
+ # while rather optimal we can *still* do much better, by
+ # building a smarter Row::inflate_result(), and
+ # switch to feeding it data via a much leaner interface
+ #
+ # crude unscientific benchmarking indicated the shortcut eval is not worth it for
+ # this particular resultset size
+ elsif (@$rows < 60) {
+ for my $r (@$rows) {
+ $r = $inflator_cref->($res_class, $rsrc, { map { $infmap->[$_] => $r->[$_] } (0..$#$infmap) } );
+ }
+ }
+ else {
+ eval sprintf (
+ '$_ = $inflator_cref->($res_class, $rsrc, { %s }) for @$rows',
+ join (', ', map { "\$infmap->[$_] => \$_->[$_]" } 0..$#$infmap )
+ );
+ }
+ }
+ else {
+ my $parser_type =
+ $self->{_result_inflator}{is_hri} ? 'hri'
+ : $self->{_result_inflator}{is_core_row} ? 'classic_pruning'
+ : 'classic_nonpruning'
+ ;
- my $alias = $self->{attrs}{alias};
- my $info = [];
+ # $args and $attrs to _mk_row_parser are seperated to delineate what is
+ # core collapser stuff and what is dbic $rs specific
+ @{$self->{_row_parser}{$parser_type}}{qw(cref nullcheck)} = $rsrc->_mk_row_parser({
+ eval => 1,
+ inflate_map => $infmap,
+ collapse => $attrs->{collapse},
+ premultiplied => $attrs->{_main_source_premultiplied},
+ hri_style => $self->{_result_inflator}{is_hri},
+ prune_null_branches => $self->{_result_inflator}{is_hri} || $self->{_result_inflator}{is_core_row},
+ }, $attrs) unless $self->{_row_parser}{$parser_type}{cref};
+
+ # column_info metadata historically hasn't been too reliable.
+ # We need to start fixing this somehow (the collapse resolver
+ # can't work without it). Add an explicit check for the *main*
+ # result, hopefully this will gradually weed out such errors
+ #
+ # FIXME - this is a temporary kludge that reduces perfromance
+ # It is however necessary for the time being
+ my ($unrolled_non_null_cols_to_check, $err);
+
+ if (my $check_non_null_cols = $self->{_row_parser}{$parser_type}{nullcheck} ) {
+
+ $err =
+ 'Collapse aborted due to invalid ResultSource metadata - the following '
+ . 'selections are declared non-nullable but NULLs were retrieved: '
+ ;
- my %collapse_pos;
+ my @violating_idx;
+ COL: for my $i (@$check_non_null_cols) {
+ ! defined $_->[$i] and push @violating_idx, $i and next COL for @$rows;
+ }
- my @const_keys;
+ $self->throw_exception( $err . join (', ', map { "'$infmap->[$_]'" } @violating_idx ) )
+ if @violating_idx;
- foreach my $const (@const_rows) {
- scalar @const_keys or do {
- @const_keys = sort { length($a) <=> length($b) } keys %$const;
- };
- foreach my $key (@const_keys) {
- if (length $key) {
- my $target = $info;
- my @parts = split(/\./, $key);
- my $cur = '';
- my $data = $const->{$key};
- foreach my $p (@parts) {
- $target = $target->[1]->{$p} ||= [];
- $cur .= ".${p}";
- if ($cur eq ".${key}" && (my @ckey = @{$collapse{$cur}||[]})) {
- # collapsing at this point and on final part
- my $pos = $collapse_pos{$cur};
- CK: foreach my $ck (@ckey) {
- if (!defined $pos->{$ck} || $pos->{$ck} ne $data->{$ck}) {
- $collapse_pos{$cur} = $data;
- delete @collapse_pos{ # clear all positioning for sub-entries
- grep { m/^\Q${cur}.\E/ } keys %collapse_pos
- };
- push(@$target, []);
- last CK;
- }
- }
- }
- if (exists $collapse{$cur}) {
- $target = $target->[-1];
- }
- }
- $target->[0] = $data;
- } else {
- $info->[0] = $const->{$key};
+ $unrolled_non_null_cols_to_check = join (',', @$check_non_null_cols);
+ }
+
+ my $next_cref =
+ ($did_fetch_all or ! $attrs->{collapse}) ? undef
+ : defined $unrolled_non_null_cols_to_check ? eval sprintf <<'EOS', $unrolled_non_null_cols_to_check
+sub {
+ # FIXME SUBOPTIMAL - we can do better, cursor->next/all (well diff. methods) should return a ref
+ my @r = $cursor->next or return;
+ if (my @violating_idx = grep { ! defined $r[$_] } (%s) ) {
+ $self->throw_exception( $err . join (', ', map { "'$infmap->[$_]'" } @violating_idx ) )
+ }
+ \@r
+}
+EOS
+ : sub {
+ # FIXME SUBOPTIMAL - we can do better, cursor->next/all (well diff. methods) should return a ref
+ my @r = $cursor->next or return;
+ \@r
}
+ ;
+
+ $self->{_row_parser}{$parser_type}{cref}->(
+ $rows,
+ $next_cref ? ( $next_cref, $self->{_stashed_rows} = [] ) : (),
+ );
+
+ # Special-case multi-object HRI - there is no $inflator_cref pass
+ unless ($self->{_result_inflator}{is_hri}) {
+ $_ = $inflator_cref->($res_class, $rsrc, @$_) for @$rows
}
}
- return $info;
+ # The @$rows check seems odd at first - why wouldn't we want to warn
+ # regardless? The issue is things like find() etc, where the user
+ # *knows* only one result will come back. In these cases the ->all
+ # is not a pessimization, but rather something we actually want
+ carp_unique(
+ 'Unable to properly collapse has_many results in iterator mode due '
+ . 'to order criteria - performed an eager cursor slurp underneath. '
+ . 'Consider using ->all() instead'
+ ) if ( ! $fetch_all and @$rows > 1 );
+
+ return $rows;
}
=head2 result_source
sub result_class {
my ($self, $result_class) = @_;
if ($result_class) {
- unless (ref $result_class) { # don't fire this for an object
- $self->ensure_class_loaded($result_class);
+
+ # don't fire this for an object
+ $self->ensure_class_loaded($result_class)
+ unless ref($result_class);
+
+ if ($self->get_cache) {
+ carp_unique('Changing the result_class of a ResultSet instance with cached results is a noop - the cache contents will not be altered');
}
+ # FIXME ENCAPSULATION - encapsulation breach, cursor method additions pending
+ elsif ($self->{cursor} && $self->{cursor}{_pos}) {
+ $self->throw_exception('Changing the result_class of a ResultSet instance with an active cursor is not supported');
+ }
+
$self->_result_class($result_class);
- # THIS LINE WOULD BE A BUG - this accessor specifically exists to
- # permit the user to set result class on one result set only; it only
- # chains if provided to search()
- #$self->{attrs}{result_class} = $result_class if ref $self;
+
+ delete $self->{_result_inflator};
}
$self->_result_class;
}
# this is a little optimization - it is faster to do the limit
# adjustments in software, instead of a subquery
- my $rows = delete $attrs->{rows};
- my $offset = delete $attrs->{offset};
+ my ($rows, $offset) = delete @{$attrs}{qw/rows offset/};
my $crs;
if ($self->_has_resolved_attr (qw/collapse group_by/)) {
# software based limiting can not be ported if this $rs is to be used
# in a subquery itself (i.e. ->as_query)
if ($self->_has_resolved_attr (qw/collapse group_by offset rows/)) {
- return $self->_count_subq_rs;
+ return $self->_count_subq_rs($self->{_attrs});
}
else {
- return $self->_count_rs;
+ return $self->_count_rs($self->{_attrs});
}
}
my ($self, $attrs) = @_;
my $rsrc = $self->result_source;
- $attrs ||= $self->_resolved_attrs;
my $tmp_attrs = { %$attrs };
# take off any limits, record_filter is cdbi, and no point of ordering nor locking a count
delete @{$tmp_attrs}{qw/rows offset order_by record_filter for/};
# overwrite the selector (supplied by the storage)
- $tmp_attrs->{select} = $rsrc->storage->_count_select ($rsrc, $attrs);
- $tmp_attrs->{as} = 'count';
- delete @{$tmp_attrs}{qw/columns/};
-
- my $tmp_rs = $rsrc->resultset_class->new($rsrc, $tmp_attrs)->get_column ('count');
-
- return $tmp_rs;
+ $rsrc->resultset_class->new($rsrc, {
+ %$tmp_attrs,
+ select => $rsrc->storage->_count_select ($rsrc, $attrs),
+ as => 'count',
+ })->get_column ('count');
}
#
my ($self, $attrs) = @_;
my $rsrc = $self->result_source;
- $attrs ||= $self->_resolved_attrs;
my $sub_attrs = { %$attrs };
# extra selectors do not go in the subquery and there is no point of ordering it, nor locking it
- delete @{$sub_attrs}{qw/collapse columns as select _prefetch_selector_range order_by for/};
+ delete @{$sub_attrs}{qw/collapse columns as select order_by for/};
# if we multi-prefetch we group_by something unique, as this is what we would
# get out of the rs via ->next/->all. We *DO WANT* to clobber old group_by regardless
- if ( keys %{$attrs->{collapse}} ) {
+ if ( $attrs->{collapse} ) {
$sub_attrs->{group_by} = [ map { "$attrs->{alias}.$_" } @{
$rsrc->_identifying_column_set || $self->throw_exception(
'Unable to construct a unique group_by criteria properly collapsing the '
sub all {
my $self = shift;
if(@_) {
- $self->throw_exception("all() doesn't take any arguments, you probably wanted ->search(...)->all()");
+ $self->throw_exception("all() doesn't take any arguments, you probably wanted ->search(...)->all()");
}
- return @{ $self->get_cache } if $self->get_cache;
-
- my @obj;
-
- if (keys %{$self->_resolved_attrs->{collapse}}) {
- # Using $self->cursor->all is really just an optimisation.
- # If we're collapsing has_many prefetches it probably makes
- # very little difference, and this is cleaner than hacking
- # _construct_object to survive the approach
- $self->cursor->reset;
- my @row = $self->cursor->next;
- while (@row) {
- push(@obj, $self->_construct_object(@row));
- @row = (exists $self->{stashed_row}
- ? @{delete $self->{stashed_row}}
- : $self->cursor->next);
- }
- } else {
- @obj = map { $self->_construct_object(@$_) } $self->cursor->all;
+ delete @{$self}{qw/_stashed_rows _stashed_results/};
+
+ if (my $c = $self->get_cache) {
+ return @$c;
}
- $self->set_cache(\@obj) if $self->{attrs}{cache};
+ $self->cursor->reset;
+
+ my $objs = $self->_construct_results('fetch_all') || [];
+
+ $self->set_cache($objs) if $self->{attrs}{cache};
- return @obj;
+ return @$objs;
}
=head2 reset
sub reset {
my ($self) = @_;
+
+ delete @{$self}{qw/_stashed_rows _stashed_results/};
$self->{all_cache_position} = 0;
$self->cursor->reset;
return $self;
my $attrs = { %{$self->_resolved_attrs} };
my $join_classifications;
- my $existing_group_by = delete $attrs->{group_by};
+ my ($existing_group_by) = delete @{$attrs}{qw(group_by _grouped_by_distinct)};
# do we need a subquery for any reason?
my $needs_subq = (
# simplify the joinmap, so we can further decide if a subq is necessary
if (!$needs_subq and @{$attrs->{from}} > 1) {
- $attrs->{from} = $storage->_prune_unused_joins ($attrs->{from}, $attrs->{select}, $self->{cond}, $attrs);
-
- # check if there are any joins left after the prune
- if ( @{$attrs->{from}} > 1 ) {
- $join_classifications = $storage->_resolve_aliastypes_from_select_args (
- [ @{$attrs->{from}}[1 .. $#{$attrs->{from}}] ],
- $attrs->{select},
- $self->{cond},
- $attrs
- );
- # any non-pruneable joins imply subq
- $needs_subq = scalar keys %{ $join_classifications->{restricting} || {} };
- }
+ ($attrs->{from}, $join_classifications) =
+ $storage->_prune_unused_joins ($attrs->{from}, $attrs->{select}, $self->{cond}, $attrs);
+
+ # any non-pruneable non-local restricting joins imply subq
+ $needs_subq = defined List::Util::first { $_ ne $attrs->{alias} } keys %{ $join_classifications->{restricting} || {} };
}
# check if the head is composite (by now all joins are thrown out unless $needs_subq)
);
# make a new $rs selecting only the PKs (that's all we really need for the subq)
- delete $attrs->{$_} for qw/collapse _collapse_order_by select _prefetch_selector_range as/;
+ delete $attrs->{$_} for qw/select as collapse/;
$attrs->{columns} = [ map { "$attrs->{alias}.$_" } @$idcols ];
- $attrs->{group_by} = \ ''; # FIXME - this is an evil hack, it causes the optimiser to kick in and throw away the LEFT joins
+
+ # this will be consumed by the pruner waaaaay down the stack
+ $attrs->{_force_prune_multiplying_joins} = 1;
+
my $subrs = (ref $self)->new($rsrc, $attrs);
if (@$idcols == 1) {
# throw away the paging flags and re-run the count (possibly
# with a subselect) to get the real total count
my $count_attrs = { %$attrs };
- delete $count_attrs->{$_} for qw/rows offset page pager/;
+ delete @{$count_attrs}{qw/rows offset page pager/};
my $total_rs = (ref $self)->new($self->result_source, $count_attrs);
my $attrs = { %{ $self->_resolved_attrs } };
- $self->result_source->storage->_select_args_to_query (
+ my $aq = $self->result_source->storage->_select_args_to_query (
$attrs->{from}, $attrs->{select}, $attrs->{where}, $attrs
);
+
+ $self->{_attrs}{_sqlmaker_select_args} = $attrs->{_sqlmaker_select_args};
+
+ $aq;
}
=head2 find_or_new
=cut
sub create {
- my ($self, $attrs) = @_;
+ my ($self, $col_data) = @_;
$self->throw_exception( "create needs a hashref" )
- unless ref $attrs eq 'HASH';
- return $self->new_result($attrs)->insert;
+ unless ref $col_data eq 'HASH';
+ return $self->new_result($col_data)->insert;
}
=head2 find_or_create
sub related_resultset {
my ($self, $rel) = @_;
- $self->{related_resultsets} ||= {};
- return $self->{related_resultsets}{$rel} ||= do {
+ return $self->{related_resultsets}{$rel}
+ if defined $self->{related_resultsets}{$rel};
+
+ return $self->{related_resultsets}{$rel} = do {
my $rsrc = $self->result_source;
my $rel_info = $rsrc->relationship_info($rel);
#XXX - temp fix for result_class bug. There likely is a more elegant fix -groditi
delete @{$attrs}{qw(result_class alias)};
- my $new_cache;
+ my $related_cache;
if (my $cache = $self->get_cache) {
- if ($cache->[0] && $cache->[0]->related_resultset($rel)->get_cache) {
- $new_cache = [ map { @{$_->related_resultset($rel)->get_cache} }
- @$cache ];
- }
+ $related_cache = [ map
+ { @{$_->related_resultset($rel)->get_cache||[]} }
+ @$cache
+ ];
}
my $rel_source = $rsrc->related_source($rel);
where => $attrs->{where},
});
};
- $new->set_cache($new_cache) if $new_cache;
+ $new->set_cache($related_cache) if $related_cache;
$new;
};
}
# ->_resolve_join as otherwise they get lost - captainL
my $join = $self->_merge_joinpref_attr( $attrs->{join}, $attrs->{prefetch} );
- delete @{$attrs}{qw/join prefetch collapse group_by distinct select as columns +select +as +columns/};
+ delete @{$attrs}{qw/join prefetch collapse group_by distinct _grouped_by_distinct select as columns +select +as +columns/};
my $seen = { %{ (delete $attrs->{seen_join}) || {} } };
if $attrs->{select};
# assume all unqualified selectors to apply to the current alias (legacy stuff)
- for (@sel) {
- $_ = (ref $_ or $_ =~ /\./) ? $_ : "$alias.$_";
- }
+ $_ = (ref $_ or $_ =~ /\./) ? $_ : "$alias.$_" for @sel;
- # disqualify all $alias.col as-bits (collapser mandated)
- for (@as) {
- $_ = ($_ =~ /^\Q$alias.\E(.+)$/) ? $1 : $_;
- }
+ # disqualify all $alias.col as-bits (inflate-map mandated)
+ $_ = ($_ =~ /^\Q$alias.\E(.+)$/) ? $1 : $_ for @as;
# de-duplicate the result (remove *identical* select/as pairs)
# and also die on duplicate {as} pointing to different {select}s
carp_unique ("Useless use of distinct on a grouped resultset ('distinct' is ignored when a 'group_by' is present)");
}
else {
+ $attrs->{_grouped_by_distinct} = 1;
# distinct affects only the main selection part, not what prefetch may
# add below.
- $attrs->{group_by} = $source->storage->_group_over_selection (
- $attrs->{from},
- $attrs->{select},
- $attrs->{order_by},
- );
+ $attrs->{group_by} = $source->storage->_group_over_selection($attrs);
}
}
- $attrs->{collapse} ||= {};
- if ($attrs->{prefetch}) {
+ # generate selections based on the prefetch helper
+ my $prefetch;
+ $prefetch = $self->_merge_joinpref_attr( {}, delete $attrs->{prefetch} )
+ if defined $attrs->{prefetch};
+
+ if ($prefetch) {
$self->throw_exception("Unable to prefetch, resultset contains an unnamed selector $attrs->{_dark_selector}{string}")
if $attrs->{_dark_selector};
- my $prefetch = $self->_merge_joinpref_attr( {}, delete $attrs->{prefetch} );
-
- my $prefetch_ordering = [];
+ $attrs->{collapse} = 1;
# this is a separate structure (we don't look in {from} directly)
# as the resolver needs to shift things off the lists to work
}
}
- my @prefetch =
- $source->_resolve_prefetch( $prefetch, $alias, $join_map, $prefetch_ordering, $attrs->{collapse} );
-
- # we need to somehow mark which columns came from prefetch
- if (@prefetch) {
- my $sel_end = $#{$attrs->{select}};
- $attrs->{_prefetch_selector_range} = [ $sel_end + 1, $sel_end + @prefetch ];
- }
+ my @prefetch = $source->_resolve_prefetch( $prefetch, $alias, $join_map );
push @{ $attrs->{select} }, (map { $_->[0] } @prefetch);
push @{ $attrs->{as} }, (map { $_->[1] } @prefetch);
+ }
+
+ if ( List::Util::first { $_ =~ /\./ } @{$attrs->{as}} ) {
+ $attrs->{_related_results_construction} = 1;
+ }
+
+ # run through the resulting joinstructure (starting from our current slot)
+ # and unset collapse if proven unnesessary
+ #
+ # also while we are at it find out if the current root source has
+ # been premultiplied by previous related_source chaining
+ #
+ # this allows to predict whether a root object with all other relation
+ # data set to NULL is in fact unique
+ if ($attrs->{collapse}) {
+
+ if (ref $attrs->{from} eq 'ARRAY') {
+
+ if (@{$attrs->{from}} == 1) {
+ # no joins - no collapse
+ $attrs->{collapse} = 0;
+ }
+ else {
+ # find where our table-spec starts
+ my @fromlist = @{$attrs->{from}};
+ while (@fromlist) {
+ my $t = shift @fromlist;
+
+ my $is_multi;
+ # me vs join from-spec distinction - a ref means non-root
+ if (ref $t eq 'ARRAY') {
+ $t = $t->[0];
+ $is_multi ||= ! $t->{-is_single};
+ }
+ last if ($t->{-alias} && $t->{-alias} eq $alias);
+ $attrs->{_main_source_premultiplied} ||= $is_multi;
+ }
+
+ # no non-singles remaining, nor any premultiplication - nothing to collapse
+ if (
+ ! $attrs->{_main_source_premultiplied}
+ and
+ ! List::Util::first { ! $_->[0]{-is_single} } @fromlist
+ ) {
+ $attrs->{collapse} = 0;
+ }
+ }
+ }
- push( @{$attrs->{order_by}}, @$prefetch_ordering );
- $attrs->{_collapse_order_by} = \@$prefetch_ordering;
+ else {
+ # if we can not analyze the from - err on the side of safety
+ $attrs->{_main_source_premultiplied} = 1;
+ }
}
# if both page and offset are specified, produce a combined offset
$seen_keys->{$import_key} = 1; # don't merge the same key twice
}
- return $orig;
+ return @$orig ? $orig : ();
}
{
my $to_serialize = { %$self };
# A cursor in progress can't be serialized (and would make little sense anyway)
- delete $to_serialize->{cursor};
+ # the parser can be regenerated (and can't be serialized)
+ delete @{$to_serialize}{qw/cursor _row_parser _result_inflator/};
# nor is it sensical to store a not-yet-fired-count pager
if ($to_serialize->{pager} and ref $to_serialize->{pager}{total_entries} eq 'CODE') {
}
}
+1;
+
+__END__
+
# XXX: FIXME: Attributes docs need clearing up
=head1 ATTRIBUTES
=over 4
-=item Value: \@columns
+=item Value: \@columns | \%columns | $column
=back
=back
-=head2 +as
-
-=over 4
-
-Indicates additional column names for those added via L</+select>. See L</as>.
-
-=back
-
=head2 as
=over 4
You can create your own accessors if required - see
L<DBIx::Class::Manual::Cookbook> for details.
+=head2 +as
+
+=over 4
+
+Indicates additional column names for those added via L</+select>. See L</as>.
+
+=back
+
=head2 join
=over 4
will return a set of all artists that have both a cd with title 'Down
to Earth' and a cd with title 'Popular'.
-If you want to fetch related objects from other tables as well, see C<prefetch>
+If you want to fetch related objects from other tables as well, see L</prefetch>
below.
NOTE: An internal join-chain pruner will discard certain joins while
For more help on using joins with search, see L<DBIx::Class::Manual::Joining>.
-=head2 prefetch
+=head2 collapse
=over 4
-=item Value: ($rel_name | \@rel_names | \%rel_names)
+=item Value: (0 | 1)
=back
-Contains one or more relationships that should be fetched along with
-the main query (when they are accessed afterwards the data will
-already be available, without extra queries to the database). This is
-useful for when you know you will need the related objects, because it
-saves at least one query:
-
- my $rs = $schema->resultset('Tag')->search(
- undef,
- {
- prefetch => {
- cd => 'artist'
- }
- }
- );
-
-The initial search results in SQL like the following:
-
- SELECT tag.*, cd.*, artist.* FROM tag
- JOIN cd ON tag.cd = cd.cdid
- JOIN artist ON cd.artist = artist.artistid
-
-L<DBIx::Class> has no need to go back to the database when we access the
-C<cd> or C<artist> relationships, which saves us two SQL statements in this
-case.
-
-Simple prefetches will be joined automatically, so there is no need
-for a C<join> attribute in the above search.
-
-L</prefetch> can be used with the any of the relationship types and
-multiple prefetches can be specified together. Below is a more complex
-example that prefetches a CD's artist, its liner notes (if present),
-the cover image, the tracks on that cd, and the guests on those
-tracks.
-
- # Assuming:
- My::Schema::CD->belongs_to( artist => 'My::Schema::Artist' );
- My::Schema::CD->might_have( liner_note => 'My::Schema::LinerNotes' );
- My::Schema::CD->has_one( cover_image => 'My::Schema::Artwork' );
- My::Schema::CD->has_many( tracks => 'My::Schema::Track' );
-
- My::Schema::Artist->belongs_to( record_label => 'My::Schema::RecordLabel' );
-
- My::Schema::Track->has_many( guests => 'My::Schema::Guest' );
-
-
- my $rs = $schema->resultset('CD')->search(
- undef,
- {
- prefetch => [
- { artist => 'record_label'}, # belongs_to => belongs_to
- 'liner_note', # might_have
- 'cover_image', # has_one
- { tracks => 'guests' }, # has_many => has_many
- ]
- }
- );
+When set to a true value, indicates that any rows fetched from joined has_many
+relationships are to be aggregated into the corresponding "parent" object. For
+example, the resultset:
-This will produce SQL like the following:
-
- SELECT cd.*, artist.*, record_label.*, liner_note.*, cover_image.*,
- tracks.*, guests.*
- FROM cd me
- JOIN artist artist
- ON artist.artistid = me.artistid
- JOIN record_label record_label
- ON record_label.labelid = artist.labelid
- LEFT JOIN track tracks
- ON tracks.cdid = me.cdid
- LEFT JOIN guest guests
- ON guests.trackid = track.trackid
- LEFT JOIN liner_notes liner_note
- ON liner_note.cdid = me.cdid
- JOIN cd_artwork cover_image
- ON cover_image.cdid = me.cdid
- ORDER BY tracks.cd
-
-Now the C<artist>, C<record_label>, C<liner_note>, C<cover_image>,
-C<tracks>, and C<guests> of the CD will all be available through the
-relationship accessors without the need for additional queries to the
-database.
-
-However, there is one caveat to be observed: it can be dangerous to
-prefetch more than one L<has_many|DBIx::Class::Relationship/has_many>
-relationship on a given level. e.g.:
-
- my $rs = $schema->resultset('CD')->search(
- undef,
- {
- prefetch => [
- 'tracks', # has_many
- { cd_to_producer => 'producer' }, # has_many => belongs_to (i.e. m2m)
- ]
- }
- );
-
-The collapser currently can't identify duplicate tuples for multiple
-L<has_many|DBIx::Class::Relationship/has_many> relationships and as a
-result the second L<has_many|DBIx::Class::Relationship/has_many>
-relation could contain redundant objects.
+ my $rs = $schema->resultset('CD')->search({}, {
+ '+columns' => [ qw/ tracks.title tracks.position / ],
+ join => 'tracks',
+ collapse => 1,
+ });
-=head3 Using L</prefetch> with L</join>
+While executing the following query:
-L</prefetch> implies a L</join> with the equivalent argument, and is
-properly merged with any existing L</join> specification. So the
-following:
+ SELECT me.*, tracks.title, tracks.position
+ FROM cd me
+ LEFT JOIN track tracks
+ ON tracks.cdid = me.cdid
- my $rs = $schema->resultset('CD')->search(
- {'record_label.name' => 'Music Product Ltd.'},
- {
- join => {artist => 'record_label'},
- prefetch => 'artist',
- }
- );
+Will return only as many objects as there are rows in the CD source, even
+though the result of the query may span many rows. Each of these CD objects
+will in turn have multiple "Track" objects hidden behind the has_many
+generated accessor C<tracks>. Without C<< collapse => 1 >>, the return values
+of this resultset would be as many CD objects as there are tracks (a "Cartesian
+product"), with each CD object containing exactly one of all fetched Track data.
-... will work, searching on the record label's name, but only
-prefetching the C<artist>.
+When a collapse is requested on a non-ordered resultset, an order by some
+unique part of the main source (the left-most table) is inserted automatically.
+This is done so that the resultset is allowed to be "lazy" - calling
+L<< $rs->next|/next >> will fetch only as many rows as it needs to build the next
+object with all of its related data.
-=head3 Using L</prefetch> with L</select> / L</+select> / L</as> / L</+as>
+If an L</order_by> is already declared, and orders the resultset in a way that
+makes collapsing as described above impossible (e.g. C<< ORDER BY
+has_many_rel.column >> or C<ORDER BY RANDOM()>), DBIC will automatically
+switch to "eager" mode and slurp the entire resultset before consturcting the
+first object returned by L</next>.
-L</prefetch> implies a L</+select>/L</+as> with the fields of the
-prefetched relations. So given:
+Setting this attribute on a resultset that does not join any has_many
+relations is a no-op.
- my $rs = $schema->resultset('CD')->search(
- undef,
- {
- select => ['cd.title'],
- as => ['cd_title'],
- prefetch => 'artist',
- }
- );
+For a more in-depth discussion, see L</PREFETCHING>.
-The L</select> becomes: C<'cd.title', 'artist.*'> and the L</as>
-becomes: C<'cd_title', 'artist.*'>.
-
-=head3 CAVEATS
-
-Prefetch does a lot of deep magic. As such, it may not behave exactly
-as you might expect.
+=head2 prefetch
=over 4
-=item *
+=item Value: ($rel_name | \@rel_names | \%rel_names)
-Prefetch uses the L</cache> to populate the prefetched relationships. This
-may or may not be what you want.
+=back
-=item *
+This attribute is a shorthand for specifying a L</join> spec, adding all
+columns from the joined related sources as L</+columns> and setting
+L</collapse> to a true value. For example, the following two queries are
+equivalent:
-If you specify a condition on a prefetched relationship, ONLY those
-rows that match the prefetched condition will be fetched into that relationship.
-This means that adding prefetch to a search() B<may alter> what is returned by
-traversing a relationship. So, if you have C<< Artist->has_many(CDs) >> and you do
-
- my $artist_rs = $schema->resultset('Artist')->search({
- 'cds.year' => 2008,
- }, {
- join => 'cds',
+ my $rs = $schema->resultset('Artist')->search({}, {
+ prefetch => { cds => ['genre', 'tracks' ] },
});
- my $count = $artist_rs->first->cds->count;
+and
- my $artist_rs_prefetch = $artist_rs->search( {}, { prefetch => 'cds' } );
+ my $rs = $schema->resultset('Artist')->search({}, {
+ join => { cds => ['genre', 'tracks' ] },
+ collapse => 1,
+ '+columns' => [
+ (map
+ { +{ "cds.$_" => "cds.$_" } }
+ $schema->source('Artist')->related_source('cds')->columns
+ ),
+ (map
+ { +{ "cds.genre.$_" => "genre.$_" } }
+ $schema->source('Artist')->related_source('cds')->related_source('genre')->columns
+ ),
+ (map
+ { +{ "cds.tracks.$_" => "tracks.$_" } }
+ $schema->source('Artist')->related_source('cds')->related_source('tracks')->columns
+ ),
+ ],
+ });
- my $prefetch_count = $artist_rs_prefetch->first->cds->count;
+Both producing the following SQL:
+
+ SELECT me.artistid, me.name, me.rank, me.charfield,
+ cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track,
+ genre.genreid, genre.name,
+ tracks.trackid, tracks.cd, tracks.position, tracks.title, tracks.last_updated_on, tracks.last_updated_at
+ FROM artist me
+ LEFT JOIN cd cds
+ ON cds.artist = me.artistid
+ LEFT JOIN genre genre
+ ON genre.genreid = cds.genreid
+ LEFT JOIN track tracks
+ ON tracks.cd = cds.cdid
+ ORDER BY me.artistid
+
+While L</prefetch> implies a L</join>, it is ok to mix the two together, as
+the arguments are properly merged and generally do the right thing. For
+example, you may want to do the following:
+
+ my $artists_and_cds_without_genre = $schema->resultset('Artist')->search(
+ { 'genre.genreid' => undef },
+ {
+ join => { cds => 'genre' },
+ prefetch => 'cds',
+ }
+ );
- cmp_ok( $count, '==', $prefetch_count, "Counts should be the same" );
+Which generates the following SQL:
-that cmp_ok() may or may not pass depending on the datasets involved. This
-behavior may or may not survive the 0.09 transition.
+ SELECT me.artistid, me.name, me.rank, me.charfield,
+ cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track
+ FROM artist me
+ LEFT JOIN cd cds
+ ON cds.artist = me.artistid
+ LEFT JOIN genre genre
+ ON genre.genreid = cds.genreid
+ WHERE genre.genreid IS NULL
+ ORDER BY me.artistid
-=back
+For a more in-depth discussion, see L</PREFETCHING>.
=head2 alias
... FOR SHARED. If \$scalar is passed, this is taken directly and embedded in the
query.
+=head1 PREFETCHING
+
+DBIx::Class supports arbitrary related data prefetching from multiple related
+sources. Any combination of relationship types and column sets are supported.
+If L<collapsing|/collapse> is requested, there is an additional requirement of
+selecting enough data to make every individual object uniquely identifiable.
+
+Here are some more involved examples, based on the following relationship map:
+
+ # Assuming:
+ My::Schema::CD->belongs_to( artist => 'My::Schema::Artist' );
+ My::Schema::CD->might_have( liner_note => 'My::Schema::LinerNotes' );
+ My::Schema::CD->has_many( tracks => 'My::Schema::Track' );
+
+ My::Schema::Artist->belongs_to( record_label => 'My::Schema::RecordLabel' );
+
+ My::Schema::Track->has_many( guests => 'My::Schema::Guest' );
+
+
+
+ my $rs = $schema->resultset('Tag')->search(
+ undef,
+ {
+ prefetch => {
+ cd => 'artist'
+ }
+ }
+ );
+
+The initial search results in SQL like the following:
+
+ SELECT tag.*, cd.*, artist.* FROM tag
+ JOIN cd ON tag.cd = cd.cdid
+ JOIN artist ON cd.artist = artist.artistid
+
+L<DBIx::Class> has no need to go back to the database when we access the
+C<cd> or C<artist> relationships, which saves us two SQL statements in this
+case.
+
+Simple prefetches will be joined automatically, so there is no need
+for a C<join> attribute in the above search.
+
+The L</prefetch> attribute can be used with any of the relationship types
+and multiple prefetches can be specified together. Below is a more complex
+example that prefetches a CD's artist, its liner notes (if present),
+the cover image, the tracks on that CD, and the guests on those
+tracks.
+
+ my $rs = $schema->resultset('CD')->search(
+ undef,
+ {
+ prefetch => [
+ { artist => 'record_label'}, # belongs_to => belongs_to
+ 'liner_note', # might_have
+ 'cover_image', # has_one
+ { tracks => 'guests' }, # has_many => has_many
+ ]
+ }
+ );
+
+This will produce SQL like the following:
+
+ SELECT cd.*, artist.*, record_label.*, liner_note.*, cover_image.*,
+ tracks.*, guests.*
+ FROM cd me
+ JOIN artist artist
+ ON artist.artistid = me.artistid
+ JOIN record_label record_label
+ ON record_label.labelid = artist.labelid
+ LEFT JOIN track tracks
+ ON tracks.cdid = me.cdid
+ LEFT JOIN guest guests
+ ON guests.trackid = track.trackid
+ LEFT JOIN liner_notes liner_note
+ ON liner_note.cdid = me.cdid
+ JOIN cd_artwork cover_image
+ ON cover_image.cdid = me.cdid
+ ORDER BY tracks.cd
+
+Now the C<artist>, C<record_label>, C<liner_note>, C<cover_image>,
+C<tracks>, and C<guests> of the CD will all be available through the
+relationship accessors without the need for additional queries to the
+database.
+
+=head3 CAVEATS
+
+Prefetch does a lot of deep magic. As such, it may not behave exactly
+as you might expect.
+
+=over 4
+
+=item *
+
+Prefetch uses the L</cache> to populate the prefetched relationships. This
+may or may not be what you want.
+
+=item *
+
+If you specify a condition on a prefetched relationship, ONLY those
+rows that match the prefetched condition will be fetched into that relationship.
+This means that adding prefetch to a search() B<may alter> what is returned by
+traversing a relationship. So, if you have C<< Artist->has_many(CDs) >> and you do
+
+ my $artist_rs = $schema->resultset('Artist')->search({
+ 'cds.year' => 2008,
+ }, {
+ join => 'cds',
+ });
+
+ my $count = $artist_rs->first->cds->count;
+
+ my $artist_rs_prefetch = $artist_rs->search( {}, { prefetch => 'cds' } );
+
+ my $prefetch_count = $artist_rs_prefetch->first->cds->count;
+
+ cmp_ok( $count, '==', $prefetch_count, "Counts should be the same" );
+
+That cmp_ok() may or may not pass depending on the datasets involved. In other
+words the C<WHERE> condition would apply to the entire dataset, just like
+it would in regular SQL. If you want to add a condition only to the "right side"
+of a C<LEFT JOIN> - consider declaring and using a L<relationship with a custom
+condition|DBIx::Class::Relationship::Base/condition>
+
+=back
+
=head1 DBIC BIND VALUES
Because DBIC may need more information to bind values than just the column name
You may distribute this code under the same terms as Perl itself.
-=cut
-
-1;
# {collapse} would mean a has_many join was injected, which in turn means
# we need to group *IF WE CAN* (only if the column in question is unique)
- if (!$orig_attrs->{group_by} && keys %{$orig_attrs->{collapse}}) {
+ if (!$orig_attrs->{group_by} && $orig_attrs->{collapse}) {
if ($colmap->{$select} and $rsrc->_identifying_column_set([$colmap->{$select}])) {
$new_attrs->{group_by} = [ $select ];
- delete $new_attrs->{distinct}; # it is ignored when group_by is present
+ delete @{$new_attrs}{qw(distinct _grouped_by_distinct)}; # it is ignored when group_by is present
}
else {
carp (
sub func_rs {
my ($self,$function) = @_;
- return $self->{_parent_resultset}->search(
- undef, {
- select => {$function => $self->{_select}},
- as => [$self->{_as}],
- },
- );
+
+ my $rs = $self->{_parent_resultset};
+ my $select = $self->{_select};
+
+ # wrap a grouped rs
+ if ($rs->_resolved_attrs->{group_by}) {
+ $select = $self->{_as};
+ $rs = $rs->as_subselect_rs;
+ }
+
+ $rs->search( undef, {
+ columns => { $self->{_as} => { $function => $select } }
+ } );
}
=head2 throw_exception
use strict;
use warnings;
+use base qw/DBIx::Class::ResultSource::RowParser DBIx::Class/;
+
use DBIx::Class::ResultSet;
use DBIx::Class::ResultSourceHandle;
use Try::Tiny;
use List::Util 'first';
use Scalar::Util qw/blessed weaken isweak/;
-use namespace::clean;
-use base qw/DBIx::Class/;
+use namespace::clean;
__PACKAGE__->mk_group_accessors(simple => qw/
source_name name source_info
}
else {
$self->throw_exception( sprintf (
- "No such column '%s' on source %s",
+ "No such column '%s' on source '%s'",
$_,
- $self->source_name,
+ $self->source_name || $self->name || 'Unknown source...?',
));
}
}
sub set_primary_key {
my ($self, @cols) = @_;
- # check if primary key columns are valid columns
- foreach my $col (@cols) {
- $self->throw_exception("No such column $col on table " . $self->name)
- unless $self->has_column($col);
+
+ my $colinfo = $self->columns_info(\@cols);
+ for my $col (@cols) {
+ carp_unique(sprintf (
+ "Primary key of source '%s' includes the column '%s' which has its "
+ . "'is_nullable' attribute set to true. This is a mistake and will cause "
+ . 'various Result-object operations to fail',
+ $self->source_name || $self->name || 'Unknown source...?',
+ $col,
+ )) if $colinfo->{$col}{is_nullable};
}
+
$self->_primaries(\@cols);
$self->add_unique_constraint(primary => \@cols);
my $stripped_cond = $self->__strip_relcond ($rel_info->{cond});
- my $rsrc_schema_moniker = $self->source_name
- if try { $self->schema };
+ my $registered_source_name = $self->source_name;
# this may be a partial schema or something else equally esoteric
- my $other_rsrc = try { $self->related_source($rel) }
- or return $ret;
+ my $other_rsrc = $self->related_source($rel);
# Get all the relationships for that source that related to this source
# whose foreign column set are our self columns on $rel and whose self
my $roundtrip_rsrc = try { $other_rsrc->related_source($other_rel) }
or next;
- if ($rsrc_schema_moniker and try { $roundtrip_rsrc->schema } ) {
- next unless $rsrc_schema_moniker eq $roundtrip_rsrc->source_name;
+ if ($registered_source_name) {
+ next if $registered_source_name ne ($roundtrip_rsrc->source_name || '')
}
else {
- next unless $self->result_class eq $roundtrip_rsrc->result_class;
+ next if $self->result_class ne $roundtrip_rsrc->result_class;
}
my $other_rel_info = $other_rsrc->relationship_info($other_rel);
,
-join_path => [@$jpath, { $join => $as } ],
-is_single => (
- $rel_info->{attrs}{accessor}
- &&
+ (! $rel_info->{attrs}{accessor})
+ or
first { $rel_info->{attrs}{accessor} eq $_ } (qw/single filter/)
),
-alias => $as,
- -relation_chain_depth => $seen->{-relation_chain_depth} || 0,
+ -relation_chain_depth => ( $seen->{-relation_chain_depth} || 0 ) + 1,
},
scalar $self->_resolve_condition($rel_info->{cond}, $as, $alias, $join)
];
sub _resolve_condition {
my ($self, $cond, $as, $for, $rel_name) = @_;
- my $obj_rel = !!blessed $for;
+ my $obj_rel = defined blessed $for;
if (ref $cond eq 'CODE') {
my $relalias = $obj_rel ? 'me' : $as;
}
}
-# Accepts one or more relationships for the current source and returns an
-# array of column names for each of those relationships. Column names are
-# prefixed relative to the current source, in accordance with where they appear
-# in the supplied relationships.
-sub _resolve_prefetch {
- my ($self, $pre, $alias, $alias_map, $order, $collapse, $pref_path) = @_;
- $pref_path ||= [];
-
- if (not defined $pre or not length $pre) {
- return ();
- }
- elsif( ref $pre eq 'ARRAY' ) {
- return
- map { $self->_resolve_prefetch( $_, $alias, $alias_map, $order, $collapse, [ @$pref_path ] ) }
- @$pre;
- }
- elsif( ref $pre eq 'HASH' ) {
- my @ret =
- map {
- $self->_resolve_prefetch($_, $alias, $alias_map, $order, $collapse, [ @$pref_path ] ),
- $self->related_source($_)->_resolve_prefetch(
- $pre->{$_}, "${alias}.$_", $alias_map, $order, $collapse, [ @$pref_path, $_] )
- } keys %$pre;
- return @ret;
- }
- elsif( ref $pre ) {
- $self->throw_exception(
- "don't know how to resolve prefetch reftype ".ref($pre));
- }
- else {
- my $p = $alias_map;
- $p = $p->{$_} for (@$pref_path, $pre);
-
- $self->throw_exception (
- "Unable to resolve prefetch '$pre' - join alias map does not contain an entry for path: "
- . join (' -> ', @$pref_path, $pre)
- ) if (ref $p->{-join_aliases} ne 'ARRAY' or not @{$p->{-join_aliases}} );
-
- my $as = shift @{$p->{-join_aliases}};
-
- my $rel_info = $self->relationship_info( $pre );
- $self->throw_exception( $self->source_name . " has no such relationship '$pre'" )
- unless $rel_info;
- my $as_prefix = ($alias =~ /^.*?\.(.+)$/ ? $1.'.' : '');
- my $rel_source = $self->related_source($pre);
-
- if ($rel_info->{attrs}{accessor} && $rel_info->{attrs}{accessor} eq 'multi') {
- $self->throw_exception(
- "Can't prefetch has_many ${pre} (join cond too complex)")
- unless ref($rel_info->{cond}) eq 'HASH';
- my $dots = @{[$as_prefix =~ m/\./g]} + 1; # +1 to match the ".${as_prefix}"
-
- if (my ($fail) = grep { @{[$_ =~ m/\./g]} == $dots }
- keys %{$collapse}) {
- my ($last) = ($fail =~ /([^\.]+)$/);
- carp (
- "Prefetching multiple has_many rels ${last} and ${pre} "
- .(length($as_prefix)
- ? "at the same level (${as_prefix}) "
- : "at top level "
- )
- . 'will explode the number of row objects retrievable via ->next or ->all. '
- . 'Use at your own risk.'
- );
- }
-
- #my @col = map { (/^self\.(.+)$/ ? ("${as_prefix}.$1") : ()); }
- # values %{$rel_info->{cond}};
- $collapse->{".${as_prefix}${pre}"} = [ $rel_source->_pri_cols ];
- # action at a distance. prepending the '.' allows simpler code
- # in ResultSet->_collapse_result
- my @key = map { (/^foreign\.(.+)$/ ? ($1) : ()); }
- keys %{$rel_info->{cond}};
- push @$order, map { "${as}.$_" } @key;
-
- if (my $rel_order = $rel_info->{attrs}{order_by}) {
- # this is kludgy and incomplete, I am well aware
- # but the parent method is going away entirely anyway
- # so sod it
- my $sql_maker = $self->storage->sql_maker;
- my ($orig_ql, $orig_qr) = $sql_maker->_quote_chars;
- my $sep = $sql_maker->name_sep;
-
- # install our own quoter, so we can catch unqualified stuff
- local $sql_maker->{quote_char} = ["\x00", "\xFF"];
-
- my $quoted_prefix = "\x00${as}\xFF";
-
- for my $chunk ( $sql_maker->_order_by_chunks ($rel_order) ) {
- my @bind;
- ($chunk, @bind) = @$chunk if ref $chunk;
-
- $chunk = "${quoted_prefix}${sep}${chunk}"
- unless $chunk =~ /\Q$sep/;
-
- $chunk =~ s/\x00/$orig_ql/g;
- $chunk =~ s/\xFF/$orig_qr/g;
- push @$order, \[$chunk, @bind];
- }
- }
- }
-
- return map { [ "${as}.$_", "${as_prefix}${pre}.$_", ] }
- $rel_source->columns;
- }
-}
-
=head2 related_source
=over 4
--- /dev/null
+package # hide from the pauses
+ DBIx::Class::ResultSource::RowParser;
+
+use strict;
+use warnings;
+
+use base 'DBIx::Class';
+
+use Try::Tiny;
+use List::Util qw(first max);
+use B 'perlstring';
+
+use DBIx::Class::ResultSource::RowParser::Util qw(
+ assemble_simple_parser
+ assemble_collapsing_parser
+);
+
+use namespace::clean;
+
+# Accepts one or more relationships for the current source and returns an
+# array of column names for each of those relationships. Column names are
+# prefixed relative to the current source, in accordance with where they appear
+# in the supplied relationships.
+sub _resolve_prefetch {
+ my ($self, $pre, $alias, $alias_map, $order, $pref_path) = @_;
+ $pref_path ||= [];
+
+ if (not defined $pre or not length $pre) {
+ return ();
+ }
+ elsif( ref $pre eq 'ARRAY' ) {
+ return
+ map { $self->_resolve_prefetch( $_, $alias, $alias_map, $order, [ @$pref_path ] ) }
+ @$pre;
+ }
+ elsif( ref $pre eq 'HASH' ) {
+ my @ret =
+ map {
+ $self->_resolve_prefetch($_, $alias, $alias_map, $order, [ @$pref_path ] ),
+ $self->related_source($_)->_resolve_prefetch(
+ $pre->{$_}, "${alias}.$_", $alias_map, $order, [ @$pref_path, $_] )
+ } keys %$pre;
+ return @ret;
+ }
+ elsif( ref $pre ) {
+ $self->throw_exception(
+ "don't know how to resolve prefetch reftype ".ref($pre));
+ }
+ else {
+ my $p = $alias_map;
+ $p = $p->{$_} for (@$pref_path, $pre);
+
+ $self->throw_exception (
+ "Unable to resolve prefetch '$pre' - join alias map does not contain an entry for path: "
+ . join (' -> ', @$pref_path, $pre)
+ ) if (ref $p->{-join_aliases} ne 'ARRAY' or not @{$p->{-join_aliases}} );
+
+ my $as = shift @{$p->{-join_aliases}};
+
+ my $rel_info = $self->relationship_info( $pre );
+ $self->throw_exception( $self->source_name . " has no such relationship '$pre'" )
+ unless $rel_info;
+
+ my $as_prefix = ($alias =~ /^.*?\.(.+)$/ ? $1.'.' : '');
+
+ return map { [ "${as}.$_", "${as_prefix}${pre}.$_", ] }
+ $self->related_source($pre)->columns;
+ }
+}
+
+# Takes an arrayref of {as} dbic column aliases and the collapse and select
+# attributes from the same $rs (the selector requirement is a temporary
+# workaround... I hope), and returns a coderef capable of:
+# my $me_pref_clps = $coderef->([$rs->cursor->next/all])
+# Where the $me_pref_clps arrayref is the future argument to inflate_result()
+#
+# For an example of this coderef in action (and to see its guts) look at
+# t/resultset/rowparser_internals.t
+#
+# This is a huge performance win, as we call the same code for every row
+# returned from the db, thus avoiding repeated method lookups when traversing
+# relationships
+#
+# Also since the coderef is completely stateless (the returned structure is
+# always fresh on every new invocation) this is a very good opportunity for
+# memoization if further speed improvements are needed
+#
+# The way we construct this coderef is somewhat fugly, although the result is
+# really worth it. The final coderef does not perform any kind of recursion -
+# the entire nested structure constructor is rolled out into a single scope.
+#
+# In any case - the output of this thing is meticulously micro-tested, so
+# any sort of adjustment/rewrite should be relatively easy (fsvo relatively)
+#
+sub _mk_row_parser {
+ # $args and $attrs are seperated to delineate what is core collapser stuff and
+ # what is dbic $rs specific
+ my ($self, $args, $attrs) = @_;
+
+ die "HRI without pruning makes zero sense"
+ if ( $args->{hri_style} && ! $args->{prune_null_branches} );
+
+ my %common = (
+ hri_style => $args->{hri_style},
+ prune_null_branches => $args->{prune_null_branches},
+ val_index => { map
+ { $args->{inflate_map}[$_] => $_ }
+ ( 0 .. $#{$args->{inflate_map}} )
+ },
+ );
+
+ my $check_null_columns;
+
+ my $src = (! $args->{collapse} ) ? assemble_simple_parser(\%common) : do {
+ my $collapse_map = $self->_resolve_collapse ({
+ # FIXME
+ # only consider real columns (not functions) during collapse resolution
+ # this check shouldn't really be here, as fucktards are not supposed to
+ # alias random crap to existing column names anyway, but still - just in
+ # case
+ # FIXME !!!! - this does not yet deal with unbalanced selectors correctly
+ # (it is now trivial as the attrs specify where things go out of sync
+ # needs MOAR tests)
+ as => { map
+ { ref $attrs->{select}[$common{val_index}{$_}] ? () : ( $_ => $common{val_index}{$_} ) }
+ keys %{$common{val_index}}
+ },
+ premultiplied => $args->{premultiplied},
+ });
+
+ $check_null_columns = $collapse_map->{-identifying_columns}
+ if @{$collapse_map->{-identifying_columns}};
+
+ assemble_collapsing_parser({
+ %common,
+ collapse_map => $collapse_map,
+ });
+ };
+
+ return (
+ $args->{eval} ? ( eval "sub $src" || die $@ ) : $src,
+ $check_null_columns,
+ );
+}
+
+
+# Takes an arrayref selection list and generates a collapse-map representing
+# row-object fold-points. Every relationship is assigned a set of unique,
+# non-nullable columns (which may *not even be* from the same resultset)
+# and the collapser will use this information to correctly distinguish
+# data of individual to-be-row-objects. See t/resultset/rowparser_internals.t
+# for extensive RV examples
+sub _resolve_collapse {
+ my ($self, $args, $common_args) = @_;
+
+ # for comprehensible error messages put ourselves at the head of the relationship chain
+ $args->{_rel_chain} ||= [ $self->source_name ];
+
+ # record top-level fully-qualified column index, signify toplevelness
+ unless ($common_args->{_as_fq_idx}) {
+ $common_args->{_as_fq_idx} = { %{$args->{as}} };
+ $args->{_is_top_level} = 1;
+ };
+
+ my ($my_cols, $rel_cols);
+ for (keys %{$args->{as}}) {
+ if ($_ =~ /^ ([^\.]+) \. (.+) /x) {
+ $rel_cols->{$1}{$2} = 1;
+ }
+ else {
+ $my_cols->{$_} = {}; # important for ||='s below
+ }
+ }
+
+ my $relinfo;
+ # run through relationships, collect metadata
+ for my $rel (keys %$rel_cols) {
+ my $inf = $self->relationship_info ($rel);
+
+ $relinfo->{$rel} = {
+ is_single => ( $inf->{attrs}{accessor} && $inf->{attrs}{accessor} ne 'multi' ),
+ is_inner => ( ( $inf->{attrs}{join_type} || '' ) !~ /^left/i),
+ rsrc => $self->related_source($rel),
+ };
+
+ # FIME - need to use _resolve_cond here instead
+ my $cond = $inf->{cond};
+
+ if (
+ ref $cond eq 'HASH'
+ and
+ keys %$cond
+ and
+ ! defined first { $_ !~ /^foreign\./ } (keys %$cond)
+ and
+ ! defined first { $_ !~ /^self\./ } (values %$cond)
+ ) {
+ for my $f (keys %$cond) {
+ my $s = $cond->{$f};
+ $_ =~ s/^ (?: foreign | self ) \.//x for ($f, $s);
+ $relinfo->{$rel}{fk_map}{$s} = $f;
+ }
+ }
+ }
+
+ # inject non-left fk-bridges from *INNER-JOINED* children (if any)
+ for my $rel (grep { $relinfo->{$_}{is_inner} } keys %$relinfo) {
+ my $ri = $relinfo->{$rel};
+ for (keys %{$ri->{fk_map}} ) {
+ # need to know source from *our* pov, hence $rel.col
+ $my_cols->{$_} ||= { via_fk => "$rel.$ri->{fk_map}{$_}" }
+ if defined $rel_cols->{$rel}{$ri->{fk_map}{$_}} # in fact selected
+ }
+ }
+
+ # if the parent is already defined *AND* we have an inner reverse relationship
+ # (i.e. do not exist without it) , assume all of its related FKs are selected
+ # (even if they in fact are NOT in the select list). Keep a record of what we
+ # assumed, and if any such phantom-column becomes part of our own collapser,
+ # throw everything assumed-from-parent away and replace with the collapser of
+ # the parent (whatever it may be)
+ my $assumed_from_parent;
+ if ( ! $args->{_parent_info}{underdefined} and ! $args->{_parent_info}{rev_rel_is_optional} ) {
+ for my $col ( values %{$args->{_parent_info}{rel_condition} || {}} ) {
+ next if exists $my_cols->{$col};
+ $my_cols->{$col} = { via_collapse => $args->{_parent_info}{collapse_on_idcols} };
+ $assumed_from_parent->{columns}{$col}++;
+ }
+ }
+
+ # get colinfo for everything
+ if ($my_cols) {
+ my $ci = $self->columns_info;
+ $my_cols->{$_}{colinfo} = $ci->{$_} for keys %$my_cols;
+ }
+
+ my $collapse_map;
+
+ # first try to reuse the parent's collapser (i.e. reuse collapser over 1:1)
+ # (makes for a leaner coderef later)
+ unless ($collapse_map->{-identifying_columns}) {
+ $collapse_map->{-identifying_columns} = $args->{_parent_info}{collapse_on_idcols}
+ if $args->{_parent_info}{collapser_reusable};
+ }
+
+ # Still dont know how to collapse - try to resolve based on our columns (plus already inserted FK bridges)
+ if (
+ ! $collapse_map->{-identifying_columns}
+ and
+ $my_cols
+ and
+ my $idset = $self->_identifying_column_set ({map { $_ => $my_cols->{$_}{colinfo} } keys %$my_cols})
+ ) {
+ # see if the resulting collapser relies on any implied columns,
+ # and fix stuff up if this is the case
+ my @reduced_set = grep { ! $assumed_from_parent->{columns}{$_} } @$idset;
+
+ $collapse_map->{-identifying_columns} = [ __unique_numlist(
+ @{ $args->{_parent_info}{collapse_on_idcols}||[] },
+
+ (map
+ {
+ my $fqc = join ('.',
+ @{$args->{_rel_chain}}[1 .. $#{$args->{_rel_chain}}],
+ ( $my_cols->{$_}{via_fk} || $_ ),
+ );
+
+ $common_args->{_as_fq_idx}->{$fqc};
+ }
+ @reduced_set
+ ),
+ )];
+ }
+
+ # Stil don't know how to collapse - keep descending down 1:1 chains - if
+ # a related non-LEFT 1:1 is resolvable - its condition will collapse us
+ # too
+ unless ($collapse_map->{-identifying_columns}) {
+ my @candidates;
+
+ for my $rel (keys %$relinfo) {
+ next unless ($relinfo->{$rel}{is_single} && $relinfo->{$rel}{is_inner});
+
+ if ( my $rel_collapse = $relinfo->{$rel}{rsrc}->_resolve_collapse ({
+ as => $rel_cols->{$rel},
+ _rel_chain => [ @{$args->{_rel_chain}}, $rel ],
+ _parent_info => { underdefined => 1 },
+ }, $common_args)) {
+ push @candidates, $rel_collapse->{-identifying_columns};
+ }
+ }
+
+ # get the set with least amount of columns
+ # FIXME - maybe need to implement a data type order as well (i.e. prefer several ints
+ # to a single varchar)
+ if (@candidates) {
+ ($collapse_map->{-identifying_columns}) = sort { scalar @$a <=> scalar @$b } (@candidates);
+ }
+ }
+
+ # Stil don't know how to collapse, and we are the root node. Last ditch
+ # effort in case we are *NOT* premultiplied.
+ # Run through *each multi* all the way down, left or not, and all
+ # *left* singles (a single may become a multi underneath) . When everything
+ # gets back see if all the rels link to us definitively. If this is the
+ # case we are good - either one of them will define us, or if all are NULLs
+ # we know we are "unique" due to the "non-premultiplied" check
+ if (
+ ! $collapse_map->{-identifying_columns}
+ and
+ ! $args->{premultiplied}
+ and
+ $args->{_is_top_level}
+ ) {
+ my (@collapse_sets, $uncollapsible_chain);
+
+ for my $rel (keys %$relinfo) {
+
+ # we already looked at these higher up
+ next if ($relinfo->{$rel}{is_single} && $relinfo->{$rel}{is_inner});
+
+ if (my $clps = $relinfo->{$rel}{rsrc}->_resolve_collapse ({
+ as => $rel_cols->{$rel},
+ _rel_chain => [ @{$args->{_rel_chain}}, $rel ],
+ _parent_info => { underdefined => 1 },
+ }, $common_args) ) {
+
+ # for singles use the idcols wholesale (either there or not)
+ if ($relinfo->{$rel}{is_single}) {
+ push @collapse_sets, $clps->{-identifying_columns};
+ }
+ elsif (! $relinfo->{$rel}{fk_map}) {
+ $uncollapsible_chain = 1;
+ last;
+ }
+ else {
+ my $defined_cols_parent_side;
+
+ for my $fq_col ( grep { /^$rel\.[^\.]+$/ } keys %{$args->{as}} ) {
+ my ($col) = $fq_col =~ /([^\.]+)$/;
+
+ $defined_cols_parent_side->{$_} = $args->{as}{$fq_col} for grep
+ { $relinfo->{$rel}{fk_map}{$_} eq $col }
+ keys %{$relinfo->{$rel}{fk_map}}
+ ;
+ }
+
+ if (my $set = $self->_identifying_column_set([ keys %$defined_cols_parent_side ]) ) {
+ push @collapse_sets, [ sort map { $defined_cols_parent_side->{$_} } @$set ];
+ }
+ else {
+ $uncollapsible_chain = 1;
+ last;
+ }
+ }
+ }
+ else {
+ $uncollapsible_chain = 1;
+ last;
+ }
+ }
+
+ unless ($uncollapsible_chain) {
+ # if we got here - we are good to go, but the construction is tricky
+ # since our children will want to include our collapse criteria - we
+ # don't give them anything (safe, since they are all collapsible on their own)
+ # in addition we record the individual collapse posibilities
+ # of all left children node collapsers, and merge them in the rowparser
+ # coderef later
+ $collapse_map->{-identifying_columns} = [];
+ $collapse_map->{-identifying_columns_variants} = [ sort {
+ (scalar @$a) <=> (scalar @$b) or max(@$a) <=> max(@$b)
+ } @collapse_sets ];
+ }
+ }
+
+ # stop descending into children if we were called by a parent for first-pass
+ # and don't despair if nothing was found (there may be other parallel branches
+ # to dive into)
+ if ($args->{_parent_info}{underdefined}) {
+ return $collapse_map->{-identifying_columns} ? $collapse_map : undef
+ }
+ # nothing down the chain resolved - can't calculate a collapse-map
+ elsif (! $collapse_map->{-identifying_columns}) {
+ $self->throw_exception ( sprintf
+ "Unable to calculate a definitive collapse column set for %s%s: fetch more unique non-nullable columns",
+ $self->source_name,
+ @{$args->{_rel_chain}} > 1
+ ? sprintf (' (last member of the %s chain)', join ' -> ', @{$args->{_rel_chain}} )
+ : ''
+ ,
+ );
+ }
+
+ # If we got that far - we are collapsable - GREAT! Now go down all children
+ # a second time, and fill in the rest
+
+ $collapse_map->{-identifying_columns} = [ __unique_numlist(
+ @{ $args->{_parent_info}{collapse_on_idcols}||[] },
+ @{ $collapse_map->{-identifying_columns} },
+ )];
+
+ my @id_sets;
+ for my $rel (sort keys %$relinfo) {
+
+ $collapse_map->{$rel} = $relinfo->{$rel}{rsrc}->_resolve_collapse ({
+ as => { map { $_ => 1 } ( keys %{$rel_cols->{$rel}} ) },
+ _rel_chain => [ @{$args->{_rel_chain}}, $rel],
+ _parent_info => {
+ # shallow copy
+ collapse_on_idcols => [ @{$collapse_map->{-identifying_columns}} ],
+
+ rel_condition => $relinfo->{$rel}{fk_map},
+
+ is_optional => ! $relinfo->{$rel}{is_inner},
+
+ # if there is at least one *inner* reverse relationship which is HASH-based (equality only)
+ # we can safely assume that the child can not exist without us
+ rev_rel_is_optional => ( first
+ { ref $_->{cond} eq 'HASH' and ($_->{attrs}{join_type}||'') !~ /^left/i }
+ values %{ $self->reverse_relationship_info($rel) },
+ ) ? 0 : 1,
+
+ # if this is a 1:1 our own collapser can be used as a collapse-map
+ # (regardless of left or not)
+ collapser_reusable => (
+ $relinfo->{$rel}{is_single}
+ &&
+ $relinfo->{$rel}{is_inner}
+ &&
+ @{$collapse_map->{-identifying_columns}}
+ ) ? 1 : 0,
+ },
+ }, $common_args );
+
+ $collapse_map->{$rel}{-is_single} = 1 if $relinfo->{$rel}{is_single};
+ $collapse_map->{$rel}{-is_optional} ||= 1 unless $relinfo->{$rel}{is_inner};
+ }
+
+ return $collapse_map;
+}
+
+# adding a dep on MoreUtils *just* for this is retarded
+sub __unique_numlist {
+ sort { $a <=> $b } keys %{ {map { $_ => 1 } @_ }}
+}
+
+1;
--- /dev/null
+package # hide from the pauses
+ DBIx::Class::ResultSource::RowParser::Util;
+
+use strict;
+use warnings;
+
+use List::Util 'first';
+use B 'perlstring';
+
+use constant HAS_DOR => ( $] < 5.010 ? 0 : 1 );
+
+use base 'Exporter';
+our @EXPORT_OK = qw(
+ assemble_simple_parser
+ assemble_collapsing_parser
+);
+
+# working title - we are hoping to extract this eventually...
+our $null_branch_class = 'DBIx::ResultParser::RelatedNullBranch';
+
+sub assemble_simple_parser {
+ #my ($args) = @_;
+
+ # the non-collapsing assembler is easy
+ # FIXME SUBOPTIMAL there could be a yet faster way to do things here, but
+ # need to try an actual implementation and benchmark it:
+ #
+ # <timbunce_> First setup the nested data structure you want for each row
+ # Then call bind_col() to alias the row fields into the right place in
+ # the data structure, then to fetch the data do:
+ # push @rows, dclone($row_data_struct) while ($sth->fetchrow);
+ #
+ my $parser_src = sprintf('$_ = %s for @{$_[0]}', __visit_infmap_simple($_[0]) );
+
+ # change the quoted placeholders to unquoted alias-references
+ $parser_src =~ s/ \' \xFF__VALPOS__(\d+)__\xFF \' /"\$_->[$1]"/gex;
+
+ $parser_src = " { use strict; use warnings FATAL => 'all';\n$parser_src\n }";
+}
+
+# the simple non-collapsing nested structure recursor
+sub __visit_infmap_simple {
+ my $args = shift;
+
+ my $my_cols = {};
+ my $rel_cols;
+ for (keys %{$args->{val_index}}) {
+ if ($_ =~ /^ ([^\.]+) \. (.+) /x) {
+ $rel_cols->{$1}{$2} = $args->{val_index}{$_};
+ }
+ else {
+ $my_cols->{$_} = $args->{val_index}{$_};
+ }
+ }
+
+ my @relperl;
+ for my $rel (sort keys %$rel_cols) {
+
+ my $rel_struct = __visit_infmap_simple({ %$args,
+ val_index => $rel_cols->{$rel},
+ });
+
+ if (keys %$my_cols) {
+
+ my $branch_null_checks = join ' && ', map
+ { "( ! defined '\xFF__VALPOS__${_}__\xFF' )" }
+ sort { $a <=> $b } values %{$rel_cols->{$rel}}
+ ;
+
+ if ($args->{prune_null_branches}) {
+ $rel_struct = sprintf ( '( (%s) ? undef : %s )',
+ $branch_null_checks,
+ $rel_struct,
+ );
+ }
+ else {
+ $rel_struct = sprintf ( '( (%s) ? bless( (%s), %s ) : %s )',
+ $branch_null_checks,
+ $rel_struct,
+ perlstring($null_branch_class),
+ $rel_struct,
+ );
+ }
+ }
+
+ push @relperl, sprintf '( %s => %s )',
+ perlstring($rel),
+ $rel_struct,
+ ;
+
+ }
+
+ my $me_struct;
+ $me_struct = __result_struct_to_source($my_cols) if keys %$my_cols;
+
+ if ($args->{hri_style}) {
+ $me_struct =~ s/^ \s* \{ | \} \s* $//gx
+ if $me_struct;
+
+ return sprintf '{ %s }', join (', ', $me_struct||(), @relperl);
+ }
+ else {
+ return sprintf '[%s]', join (',',
+ $me_struct || 'undef',
+ @relperl ? sprintf ('{ %s }', join (',', @relperl)) : (),
+ );
+ }
+}
+
+sub assemble_collapsing_parser {
+ my $args = shift;
+
+ # it may get unset further down
+ my $no_rowid_container = $args->{prune_null_branches};
+
+ my ($top_node_key, $top_node_key_assembler);
+
+ if (scalar @{$args->{collapse_map}{-identifying_columns}}) {
+ $top_node_key = join ('', map
+ { "{'\xFF__IDVALPOS__${_}__\xFF'}" }
+ @{$args->{collapse_map}{-identifying_columns}}
+ );
+ }
+ elsif( my @variants = @{$args->{collapse_map}{-identifying_columns_variants}} ) {
+
+ my @path_parts = map { sprintf
+ "( ( defined '\xFF__VALPOS__%d__\xFF' ) && (join qq(\xFF), '', %s, '') )",
+ $_->[0], # checking just first is enough - one ID defined, all defined
+ ( join ', ', map { "'\xFF__VALPOS__${_}__\xFF'" } @$_ ),
+ } @variants;
+
+ my $virtual_column_idx = (scalar keys %{$args->{val_index}} ) + 1;
+
+ $top_node_key = "{'\xFF__IDVALPOS__${virtual_column_idx}__\xFF'}";
+
+ $top_node_key_assembler = sprintf "'\xFF__IDVALPOS__%d__\xFF' = (%s);",
+ $virtual_column_idx,
+ "\n" . join( "\n or\n", @path_parts, qq{"\0\$rows_pos\0"} )
+ ;
+
+ $args->{collapse_map} = {
+ %{$args->{collapse_map}},
+ -custom_node_key => $top_node_key,
+ };
+
+ $no_rowid_container = 0;
+ }
+ else {
+ die('Unexpected collapse map contents');
+ }
+
+ my ($data_assemblers, $stats) = __visit_infmap_collapse ($args);
+
+ my @idcol_args = $no_rowid_container ? ('', '') : (
+ ', %cur_row_ids', # only declare the variable if we'll use it
+ join ("\n", map { qq(\$cur_row_ids{$_} = ) . (
+ # in case we prune - we will never hit these undefs
+ $args->{prune_null_branches} ? qq(\$cur_row_data->[$_];)
+ : HAS_DOR ? qq(\$cur_row_data->[$_] // "\0NULL\xFF\$rows_pos\xFF$_\0";)
+ : qq(defined(\$cur_row_data->[$_]) ? \$cur_row_data->[$_] : "\0NULL\xFF\$rows_pos\xFF$_\0";)
+ ) } sort { $a <=> $b } keys %{ $stats->{idcols_seen} } ),
+ );
+
+ my $parser_src = sprintf (<<'EOS', @idcol_args, $top_node_key_assembler||'', $top_node_key, join( "\n", @{$data_assemblers||[]} ) );
+### BEGIN LITERAL STRING EVAL
+ my $rows_pos = 0;
+ my ($result_pos, @collapse_idx, $cur_row_data %1$s);
+
+ # this loop is a bit arcane - the rationale is that the passed in
+ # $_[0] will either have only one row (->next) or will have all
+ # rows already pulled in (->all and/or unordered). Given that the
+ # result can be rather large - we reuse the same already allocated
+ # array, since the collapsed prefetch is smaller by definition.
+ # At the end we cut the leftovers away and move on.
+ while ($cur_row_data = (
+ ( $rows_pos >= 0 and $_[0][$rows_pos++] )
+ or
+ ( $_[1] and $rows_pos = -1 and $_[1]->() )
+ ) ) {
+
+ # this code exists only when we are using a cur_row_ids
+ # furthermore the undef checks may or may not be there
+ # depending on whether we prune or not
+ #
+ # due to left joins some of the ids may be NULL/undef, and
+ # won't play well when used as hash lookups
+ # we also need to differentiate NULLs on per-row/per-col basis
+ # (otherwise folding of optional 1:1s will be greatly confused
+%2$s
+
+ # in the case of an underdefined root - calculate the virtual id (otherwise no code at all)
+%3$s
+
+ # if we were supplied a coderef - we are collapsing lazily (the set
+ # is ordered properly)
+ # as long as we have a result already and the next result is new we
+ # return the pre-read data and bail
+$_[1] and $result_pos and ! $collapse_idx[0]%4$s and (unshift @{$_[2]}, $cur_row_data) and last;
+
+ # the rel assemblers
+%5$s
+
+ }
+
+ $#{$_[0]} = $result_pos - 1; # truncate the passed in array to where we filled it with results
+### END LITERAL STRING EVAL
+EOS
+
+ # !!! note - different var than the one above
+ # change the quoted placeholders to unquoted alias-references
+ $parser_src =~ s/ \' \xFF__VALPOS__(\d+)__\xFF \' /"\$cur_row_data->[$1]"/gex;
+ $parser_src =~ s/
+ \' \xFF__IDVALPOS__(\d+)__\xFF \'
+ /
+ $no_rowid_container ? "\$cur_row_data->[$1]" : "\$cur_row_ids{$1}"
+ /gex;
+
+ $parser_src = " { use strict; use warnings FATAL => 'all';\n$parser_src\n }";
+}
+
+
+# the collapsing nested structure recursor
+sub __visit_infmap_collapse {
+ my $args = {%{ shift() }};
+
+ my $cur_node_idx = ${ $args->{-node_idx_counter} ||= \do { my $x = 0} }++;
+
+ my ($my_cols, $rel_cols) = {};
+ for ( keys %{$args->{val_index}} ) {
+ if ($_ =~ /^ ([^\.]+) \. (.+) /x) {
+ $rel_cols->{$1}{$2} = $args->{val_index}{$_};
+ }
+ else {
+ $my_cols->{$_} = $args->{val_index}{$_};
+ }
+ }
+
+
+ if ($args->{hri_style}) {
+ delete $my_cols->{$_} for grep { $rel_cols->{$_} } keys %$my_cols;
+ }
+
+ my $me_struct;
+ $me_struct = __result_struct_to_source($my_cols) if keys %$my_cols;
+
+ $me_struct = sprintf( '[ %s ]', $me_struct||'' )
+ unless $args->{hri_style};
+
+
+ my $node_key = $args->{collapse_map}->{-custom_node_key} || join ('', map
+ { "{'\xFF__IDVALPOS__${_}__\xFF'}" }
+ @{$args->{collapse_map}->{-identifying_columns}}
+ );
+ my $node_idx_slot = sprintf '$collapse_idx[%d]%s', $cur_node_idx, $node_key;
+
+
+ my @src;
+
+ if ($cur_node_idx == 0) {
+ push @src, sprintf( '%s %s $_[0][$result_pos++] = %s;',
+ $node_idx_slot,
+ (HAS_DOR ? '//=' : '||='),
+ $me_struct || '{}',
+ );
+ }
+ else {
+ my $parent_attach_slot = sprintf( '$collapse_idx[%d]%s%s{%s}',
+ @{$args}{qw/-parent_node_idx -parent_node_key/},
+ $args->{hri_style} ? '' : '[1]',
+ perlstring($args->{-node_relname}),
+ );
+
+ if ($args->{collapse_map}->{-is_single}) {
+ push @src, sprintf ( '%s %s %s%s;',
+ $parent_attach_slot,
+ (HAS_DOR ? '//=' : '||='),
+ $node_idx_slot,
+ $me_struct ? " = $me_struct" : '',
+ );
+ }
+ else {
+ push @src, sprintf('(! %s) and push @{%s}, %s%s;',
+ $node_idx_slot,
+ $parent_attach_slot,
+ $node_idx_slot,
+ $me_struct ? " = $me_struct" : '',
+ );
+ }
+ }
+
+ my $known_present_ids = { map { $_ => 1 } @{$args->{collapse_map}{-identifying_columns}} };
+ my ($stats, $rel_src);
+
+ for my $rel (sort keys %$rel_cols) {
+
+ my $relinfo = $args->{collapse_map}{$rel};
+
+ ($rel_src, $stats->{$rel}) = __visit_infmap_collapse({ %$args,
+ val_index => $rel_cols->{$rel},
+ collapse_map => $relinfo,
+ -parent_node_idx => $cur_node_idx,
+ -parent_node_key => $node_key,
+ -node_relname => $rel,
+ });
+
+ my $rel_src_pos = $#src + 1;
+ push @src, @$rel_src;
+
+ if (
+ $relinfo->{-is_optional}
+ and
+ defined ( my $first_distinct_child_idcol = first
+ { ! $known_present_ids->{$_} }
+ @{$relinfo->{-identifying_columns}}
+ )
+ ) {
+
+ if ($args->{prune_null_branches}) {
+
+ # start of wrap of the entire chain in a conditional
+ splice @src, $rel_src_pos, 0, sprintf "( ! defined %s )\n ? %s%s{%s} = %s\n : do {",
+ "'\xFF__VALPOS__${first_distinct_child_idcol}__\xFF'",
+ $node_idx_slot,
+ $args->{hri_style} ? '' : '[1]',
+ perlstring($rel),
+ ($args->{hri_style} && $relinfo->{-is_single}) ? 'undef' : '[]'
+ ;
+
+ # end of wrap
+ push @src, '};'
+ }
+ else {
+
+ splice @src, $rel_src_pos + 1, 0, sprintf ( '(defined %s) or bless (%s[1]{%s}, %s);',
+ "'\xFF__VALPOS__${first_distinct_child_idcol}__\xFF'",
+ $node_idx_slot,
+ perlstring($rel),
+ perlstring($null_branch_class),
+ );
+ }
+ }
+ }
+
+ return (
+ \@src,
+ {
+ idcols_seen => {
+ ( map { %{ $_->{idcols_seen} } } values %$stats ),
+ ( map { $_ => 1 } @{$args->{collapse_map}->{-identifying_columns}} ),
+ }
+ }
+ );
+}
+
+sub __result_struct_to_source {
+ sprintf( '{ %s }', join (', ', map
+ { sprintf "%s => '\xFF__VALPOS__%d__\xFF'", perlstring($_), $_[0]{$_} }
+ sort keys %{$_[0]}
+ ));
+}
+
+1;
use Scalar::Util 'blessed';
use List::Util 'first';
use Try::Tiny;
+use DBIx::Class::Carp;
###
### Internal method
use namespace::clean;
+__PACKAGE__->mk_group_accessors ( simple => [ in_storage => '_in_storage' ] );
+
=head1 NAME
DBIx::Class::Row - Basic row methods
## tests!
sub __new_related_find_or_new_helper {
- my ($self, $relname, $data) = @_;
+ my ($self, $relname, $values) = @_;
my $rsrc = $self->result_source;
# create a mock-object so all new/set_column component overrides will run:
my $rel_rs = $rsrc->related_source($relname)->resultset;
- my $new_rel_obj = $rel_rs->new_result($data);
+ my $new_rel_obj = $rel_rs->new_result($values);
my $proc_data = { $new_rel_obj->get_columns };
if ($self->__their_pk_needs_us($relname)) {
sub __their_pk_needs_us { # this should maybe be in resultsource.
my ($self, $relname) = @_;
- my $source = $self->result_source;
- my $reverse = $source->reverse_relationship_info($relname);
- my $rel_source = $source->related_source($relname);
+ my $rsrc = $self->result_source;
+ my $reverse = $rsrc->reverse_relationship_info($relname);
+ my $rel_source = $rsrc->related_source($relname);
my $us = { $self->get_columns };
foreach my $key (keys %$reverse) {
# if their primary key depends on us, then we have to
my ($class, $attrs) = @_;
$class = ref $class if ref $class;
- my $new = bless { _column_data => {} }, $class;
+ my $new = bless { _column_data => {}, _in_storage => 0 }, $class;
if ($attrs) {
$new->throw_exception("attrs must be a hashref")
unless ref($attrs) eq 'HASH';
- my $source = delete $attrs->{-result_source};
+ my $rsrc = delete $attrs->{-result_source};
if ( my $h = delete $attrs->{-source_handle} ) {
- $source ||= $h->resolve;
+ $rsrc ||= $h->resolve;
}
- $new->result_source($source) if $source;
+ $new->result_source($rsrc) if $rsrc;
if (my $col_from_rel = delete $attrs->{-cols_from_relations}) {
@{$new->{_ignore_at_insert}={}}{@$col_from_rel} = ();
if (ref $attrs->{$key}) {
## Can we extract this lot to use with update(_or .. ) ?
$new->throw_exception("Can't do multi-create without result source")
- unless $source;
- my $info = $source->relationship_info($key);
+ unless $rsrc;
+ my $info = $rsrc->relationship_info($key);
my $acc_type = $info->{attrs}{accessor} || '';
if ($acc_type eq 'single') {
my $rel_obj = delete $attrs->{$key};
sub insert {
my ($self) = @_;
return $self if $self->in_storage;
- my $source = $self->result_source;
+ my $rsrc = $self->result_source;
$self->throw_exception("No result_source set on this object; can't insert")
- unless $source;
+ unless $rsrc;
- my $storage = $source->storage;
+ my $storage = $rsrc->storage;
my $rollback_guard;
if (! $self->{_rel_in_storage}{$relname}) {
next unless (blessed $rel_obj && $rel_obj->isa('DBIx::Class::Row'));
- next unless $source->_pk_depends_on(
+ next unless $rsrc->_pk_depends_on(
$relname, { $rel_obj->get_columns }
);
# (autoinc primary columns and any retrieve_on_insert columns)
my %current_rowdata = $self->get_columns;
my $returned_cols = $storage->insert(
- $source,
+ $rsrc,
{ %current_rowdata }, # what to insert, copy because the storage *will* change it
);
$self->{related_resultsets} = {};
foreach my $relname (keys %related_stuff) {
- next unless $source->has_relationship ($relname);
+ next unless $rsrc->has_relationship ($relname);
my @cands = ref $related_stuff{$relname} eq 'ARRAY'
? @{$related_stuff{$relname}}
if (@cands && blessed $cands[0] && $cands[0]->isa('DBIx::Class::Row')
) {
- my $reverse = $source->reverse_relationship_info($relname);
+ my $reverse = $rsrc->reverse_relationship_info($relname);
foreach my $obj (@cands) {
$obj->set_from_related($_, $self) for keys %$reverse;
if ($self->__their_pk_needs_us($relname)) {
Creating a result object using L<DBIx::Class::ResultSet/new_result>, or
calling L</delete> on one, sets it to false.
-=cut
-
-sub in_storage {
- my ($self, $val) = @_;
- $self->{_in_storage} = $val if @_ > 1;
- return $self->{_in_storage} ? 1 : 0;
-}
=head2 update
);
delete $self->{_column_data_in_storage};
- $self->in_storage(undef);
+ $self->in_storage(0);
}
else {
my $rsrc = try { $self->result_source_instance }
my $self = shift;
if (exists $self->{_inflated_column}) {
foreach my $col (keys %{$self->{_inflated_column}}) {
- $self->store_column($col, $self->_deflated_column($col, $self->{_inflated_column}{$col}))
- unless exists $self->{_column_data}{$col};
+ unless (exists $self->{_column_data}{$col}) {
+
+ # if cached related_resultset is present assume this was a prefetch
+ carp_unique(
+ "Returning primary keys of prefetched 'filter' rels as part of get_columns() is deprecated and will "
+ . 'eventually be removed entirely (set DBIC_COLUMNS_INCLUDE_FILTER_RELS to disable this warning)'
+ ) if (
+ ! $ENV{DBIC_COLUMNS_INCLUDE_FILTER_RELS}
+ and
+ defined $self->{related_resultsets}{$col}
+ and
+ defined $self->{related_resultsets}{$col}->get_cache
+ );
+
+ $self->store_column($col, $self->_deflated_column($col, $self->{_inflated_column}{$col}));
+ }
}
}
return %{$self->{_column_data}};
really changed.
=cut
+
sub make_column_dirty {
my ($self, $column) = @_;
grep { $self->has_column_loaded($_) } $self->columns
]);
- my %inflated;
- for my $col (keys %$loaded_colinfo) {
- if (exists $loaded_colinfo->{$col}{accessor}) {
- my $acc = $loaded_colinfo->{$col}{accessor};
- $inflated{$col} = $self->$acc if defined $acc;
- }
- else {
- $inflated{$col} = $self->$col;
+ my %cols_to_return = ( %{$self->{_column_data}}, %$loaded_colinfo );
+
+ unless ($ENV{DBIC_COLUMNS_INCLUDE_FILTER_RELS}) {
+ for (keys %$loaded_colinfo) {
+ # if cached related_resultset is present assume this was a prefetch
+ if (
+ $loaded_colinfo->{$_}{_inflate_info}
+ and
+ defined $self->{related_resultsets}{$_}
+ and
+ defined $self->{related_resultsets}{$_}->get_cache
+ ) {
+ carp_unique(
+ "Returning prefetched 'filter' rels as part of get_inflated_columns() is deprecated and will "
+ . 'eventually be removed entirely (set DBIC_COLUMNS_INCLUDE_FILTER_RELS to disable this warning)'
+ );
+ last;
+ }
}
}
- # return all loaded columns with the inflations overlayed on top
- return %{ { $self->get_columns, %inflated } };
+ map { $_ => (
+ (
+ ! exists $loaded_colinfo->{$_}
+ or
+ (
+ exists $loaded_colinfo->{$_}{accessor}
+ and
+ ! defined $loaded_colinfo->{$_}{accessor}
+ )
+ ) ? $self->get_column($_)
+ : $self->${ \(
+ defined $loaded_colinfo->{$_}{accessor}
+ ? $loaded_colinfo->{$_}{accessor}
+ : $_
+ )}
+ )} keys %cols_to_return;
}
sub _is_column_numeric {
#
# FIXME - this is a quick *largely incorrect* hack, pending a more
# serious rework during the merge of single and filter rels
- my $rels = $self->result_source->{_relationships};
- for my $rel (keys %$rels) {
+ my $relnames = $self->result_source->{_relationships};
+ for my $relname (keys %$relnames) {
- my $acc = $rels->{$rel}{attrs}{accessor} || '';
+ my $acc = $relnames->{$relname}{attrs}{accessor} || '';
- if ( $acc eq 'single' and $rels->{$rel}{attrs}{fk_columns}{$column} ) {
- delete $self->{related_resultsets}{$rel};
- delete $self->{_relationship_data}{$rel};
- #delete $self->{_inflated_column}{$rel};
+ if ( $acc eq 'single' and $relnames->{$relname}{attrs}{fk_columns}{$column} ) {
+ delete $self->{related_resultsets}{$relname};
+ delete $self->{_relationship_data}{$relname};
+ #delete $self->{_inflated_column}{$relname};
}
- elsif ( $acc eq 'filter' and $rel eq $column) {
- delete $self->{related_resultsets}{$rel};
- #delete $self->{_relationship_data}{$rel};
- delete $self->{_inflated_column}{$rel};
+ elsif ( $acc eq 'filter' and $relname eq $column) {
+ delete $self->{related_resultsets}{$relname};
+ #delete $self->{_relationship_data}{$relname};
+ delete $self->{_inflated_column}{$relname};
}
}
=cut
sub set_columns {
- my ($self,$data) = @_;
- foreach my $col (keys %$data) {
- $self->set_column($col,$data->{$col});
- }
+ my ($self, $values) = @_;
+ $self->set_column( $_, $values->{$_} ) for keys %$values;
return $self;
}
my $info = $self->relationship_info($key);
my $acc_type = $info->{attrs}{accessor} || '';
if ($acc_type eq 'single') {
- my $rel = delete $upd->{$key};
- $self->set_from_related($key => $rel);
- $self->{_relationship_data}{$key} = $rel;
+ my $rel_obj = delete $upd->{$key};
+ $self->set_from_related($key => $rel_obj);
+ $self->{_relationship_data}{$key} = $rel_obj;
}
elsif ($acc_type eq 'multi') {
$self->throw_exception(
# Its possible we'll have 2 relations to the same Source. We need to make
# sure we don't try to insert the same row twice else we'll violate unique
# constraints
- my $rels_copied = {};
+ my $relnames_copied = {};
- foreach my $rel ($self->result_source->relationships) {
- my $rel_info = $self->result_source->relationship_info($rel);
+ foreach my $relname ($self->result_source->relationships) {
+ my $rel_info = $self->result_source->relationship_info($relname);
next unless $rel_info->{attrs}{cascade_copy};
my $resolved = $self->result_source->_resolve_condition(
- $rel_info->{cond}, $rel, $new, $rel
+ $rel_info->{cond}, $relname, $new, $relname
);
- my $copied = $rels_copied->{ $rel_info->{source} } ||= {};
- foreach my $related ($self->search_related($rel)) {
+ my $copied = $relnames_copied->{ $rel_info->{source} } ||= {};
+ foreach my $related ($self->search_related($relname)) {
my $id_str = join("\0", $related->id);
next if $copied->{$id_str};
$copied->{$id_str} = 1;
=cut
sub inflate_result {
- my ($class, $source, $me, $prefetch) = @_;
-
- $source = $source->resolve
- if $source->isa('DBIx::Class::ResultSourceHandle');
+ my ($class, $rsrc, $me, $prefetch) = @_;
my $new = bless
- { _column_data => $me, _result_source => $source },
+ { _column_data => $me, _result_source => $rsrc },
ref $class || $class
;
- foreach my $pre (keys %{$prefetch||{}}) {
+ if ($prefetch) {
+ for my $relname ( keys %$prefetch ) {
- my (@pre_vals, $is_multi);
- if (ref $prefetch->{$pre}[0] eq 'ARRAY') {
- $is_multi = 1;
- @pre_vals = @{$prefetch->{$pre}};
- }
- else {
- @pre_vals = $prefetch->{$pre};
- }
-
- my $pre_source = try {
- $source->related_source($pre)
- }
- catch {
- $class->throw_exception(sprintf
+ my $relinfo = $rsrc->relationship_info($relname) or do {
+ my $err = sprintf
+ "Inflation into non-existent relationship '%s' of '%s' requested",
+ $relname,
+ $rsrc->source_name,
+ ;
+ if (my ($colname) = sort { length($a) <=> length ($b) } keys %{$prefetch->{$relname}[0] || {}} ) {
+ $err .= sprintf ", check the inflation specification (columns/as) ending in '...%s.%s'",
+ $relname,
+ $colname,
+ }
- "Can't inflate manual prefetch into non-existent relationship '%s' from '%s', "
- . "check the inflation specification (columns/as) ending in '%s.%s'.",
+ $rsrc->throw_exception($err);
+ };
- $pre,
- $source->source_name,
- $pre,
- (keys %{$pre_vals[0][0]})[0] || 'something.something...',
- );
- };
+ $class->throw_exception("No accessor type declared for prefetched relationship '$relname'")
+ unless $relinfo->{attrs}{accessor};
- my $accessor = $source->relationship_info($pre)->{attrs}{accessor}
- or $class->throw_exception("No accessor type declared for prefetched $pre");
+ my @rel_objects;
+ if (
+ $prefetch->{$relname}
+ and
+ @{$prefetch->{$relname}}
+ and
+ ref($prefetch->{$relname}) ne $DBIx::Class::ResultSource::RowParser::Util::null_branch_class
+ ) {
- if (! $is_multi and $accessor eq 'multi') {
- $class->throw_exception("Manual prefetch (via select/columns) not supported with accessor 'multi'");
- }
+ my $rel_rs = $new->related_resultset($relname);
- my @pre_objects;
- for my $me_pref (@pre_vals) {
-
- # FIXME - this should not be necessary
- # the collapser currently *could* return bogus elements with all
- # columns set to undef
- my $has_def;
- for (values %{$me_pref->[0]}) {
- if (defined $_) {
- $has_def++;
- last;
- }
+ if (ref $prefetch->{$relname}[0] eq 'ARRAY') {
+ my $rel_rsrc = $rel_rs->result_source;
+ my $rel_class = $rel_rs->result_class;
+ my $rel_inflator = $rel_class->can('inflate_result');
+ @rel_objects = map
+ { $rel_class->$rel_inflator ( $rel_rsrc, @$_ ) }
+ @{$prefetch->{$relname}}
+ ;
+ }
+ else {
+ @rel_objects = $rel_rs->result_class->inflate_result(
+ $rel_rs->result_source, @{$prefetch->{$relname}}
+ );
}
- next unless $has_def;
+ }
- push @pre_objects, $pre_source->result_class->inflate_result(
- $pre_source, @$me_pref
- );
- }
+ if ($relinfo->{attrs}{accessor} eq 'single') {
+ $new->{_relationship_data}{$relname} = $rel_objects[0];
+ }
+ elsif ($relinfo->{attrs}{accessor} eq 'filter') {
+ $new->{_inflated_column}{$relname} = $rel_objects[0];
+ }
- if ($accessor eq 'single') {
- $new->{_relationship_data}{$pre} = $pre_objects[0];
+ $new->related_resultset($relname)->set_cache(\@rel_objects);
}
- elsif ($accessor eq 'filter') {
- $new->{_inflated_column}{$pre} = $pre_objects[0];
- }
-
- $new->related_resultset($pre)->set_cache(\@pre_objects);
}
$new->in_storage (1);
}
}
+sub _split_order_chunk {
+ my ($self, $chunk) = @_;
+
+ # strip off sort modifiers, but always succeed, so $1 gets reset
+ $chunk =~ s/ (?: \s+ (ASC|DESC) )? \s* $//ix;
+
+ return (
+ $chunk,
+ ( $1 and uc($1) eq 'DESC' ) ? 1 : 0,
+ );
+}
+
sub _table {
# optimized due to hotttnesss
# my ($self, $from) = @_;
sub _recurse_from {
my $self = shift;
-
return join (' ', $self->_gen_from_blocks(@_) );
}
for my $ch ($self->_order_by_chunks ($inner_order)) {
$ch = $ch->[0] if ref $ch eq 'ARRAY';
- my $is_desc = (
- $ch =~ s/\s+ ( ASC|DESC ) \s* $//ix
- and
- uc($1) eq 'DESC'
- ) ? 1 : 0;
- push @out_chunks, \join (' ', $ch, $is_desc ? 'ASC' : 'DESC' );
+ ($ch, my $is_desc) = $self->_split_order_chunk($ch);
+
+ # !NOTE! outside chunks come in reverse order ( !$is_desc )
+ push @out_chunks, { ($is_desc ? '-asc' : '-desc') => \$ch };
}
$sq_attrs->{order_by_middle} = $self->_order_by (\@out_chunks);
my ($self, $sql, $rs_attrs, $rows, $offset) = @_;
my $root_rsrc = $rs_attrs->{_rsroot_rsrc};
- my $root_tbl_name = $root_rsrc->name;
- my ($first_order_by) = do {
+ # Explicitly require an order_by
+ # GenSubQ is slow enough as it is, just emulating things
+ # like in other cases is not wise - make the user work
+ # to shoot their DBA in the foot
+ my $supplied_order = delete $rs_attrs->{order_by} or $self->throw_exception (
+ 'Generic Subquery Limit does not work on resultsets without an order. Provide a stable, '
+ . 'root-table-based order criteria.'
+ );
+
+ my $usable_order_ci = $root_rsrc->storage->_main_source_order_by_portion_is_stable(
+ $root_rsrc,
+ $supplied_order,
+ $rs_attrs->{where},
+ ) or $self->throw_exception(
+ 'Generic Subquery Limit can not work with order criteria based on sources other than the current one'
+ );
+
+###
+###
+### we need to know the directions after we figured out the above - reextract *again*
+### this is eyebleed - trying to get it to work at first
+ my @order_bits = do {
local $self->{quote_char};
local $self->{order_bind};
- map { ref $_ ? $_->[0] : $_ } $self->_order_by_chunks ($rs_attrs->{order_by})
- } or $self->throw_exception (
- 'Generic Subquery Limit does not work on resultsets without an order. Provide a single, '
- . 'unique-column order criteria.'
- );
+ map { ref $_ ? $_->[0] : $_ } $self->_order_by_chunks ($supplied_order)
+ };
- my $direction = (
- $first_order_by =~ s/\s+ ( ASC|DESC ) \s* $//ix
- ) ? lc($1) : 'asc';
+ # truncate to what we'll use
+ $#order_bits = ( (keys %$usable_order_ci) - 1 );
- my ($first_ord_alias, $first_ord_col) = $first_order_by =~ /^ (?: ([^\.]+) \. )? ([^\.]+) $/x;
+ # @order_bits likely will come back quoted (due to how the prefetch
+ # rewriter operates
+ # Hence supplement the column_info lookup table with quoted versions
+ if ($self->quote_char) {
+ $usable_order_ci->{$self->_quote($_)} = $usable_order_ci->{$_}
+ for keys %$usable_order_ci;
+ }
- $self->throw_exception(sprintf
- "Generic Subquery Limit order criteria can be only based on the root-source '%s'"
- . " (aliased as '%s')", $root_rsrc->source_name, $rs_attrs->{alias},
- ) if ($first_ord_alias and $first_ord_alias ne $rs_attrs->{alias});
+# calculate the condition
+ my $count_tbl_alias = 'rownum__emulation';
+ my $root_alias = $rs_attrs->{alias};
+ my $root_tbl_name = $root_rsrc->name;
- $first_ord_alias ||= $rs_attrs->{alias};
+ my (@unqualified_names, @qualified_names, @is_desc, @new_order_by);
- $self->throw_exception(
- "Generic Subquery Limit first order criteria '$first_ord_col' must be unique"
- ) unless $root_rsrc->_identifying_column_set([$first_ord_col]);
-
- my $sq_attrs = do {
- # perform the mangling only using the very first order crietria
- # (the one we care about)
- local $rs_attrs->{order_by} = $first_order_by;
- $self->_subqueried_limit_attrs ($sql, $rs_attrs);
- };
+ for my $bit (@order_bits) {
- my $cmp_op = $direction eq 'desc' ? '>' : '<';
- my $count_tbl_alias = 'rownum__emulation';
+ ($bit, my $is_desc) = $self->_split_order_chunk($bit);
- my ($order_sql, @order_bind) = do {
- local $self->{order_bind};
- my $s = $self->_order_by (delete $rs_attrs->{order_by});
- ($s, @{$self->{order_bind}});
+ push @is_desc, $is_desc;
+ push @unqualified_names, $usable_order_ci->{$bit}{-colname};
+ push @qualified_names, $usable_order_ci->{$bit}{-fq_colname};
+
+ push @new_order_by, { ($is_desc ? '-desc' : '-asc') => $usable_order_ci->{$bit}{-fq_colname} };
};
- my $group_having_sql = $self->_parse_rs_attrs($rs_attrs);
- my $in_sel = $sq_attrs->{selection_inner};
+ my (@where_cond, @skip_colpair_stack);
+ for my $i (0 .. $#order_bits) {
+ my $ci = $usable_order_ci->{$order_bits[$i]};
+
+ my ($subq_col, $main_col) = map { "$_.$ci->{-colname}" } ($count_tbl_alias, $root_alias);
+ my $cur_cond = { $subq_col => { ($is_desc[$i] ? '>' : '<') => { -ident => $main_col } } };
+
+ push @skip_colpair_stack, [
+ { $main_col => { -ident => $subq_col } },
+ ];
+
+ # we can trust the nullability flag because
+ # we already used it during _id_col_set resolution
+ #
+ if ($ci->{is_nullable}) {
+ push @{$skip_colpair_stack[-1]}, { $main_col => undef, $subq_col=> undef };
+
+ $cur_cond = [
+ {
+ ($is_desc[$i] ? $subq_col : $main_col) => { '!=', undef },
+ ($is_desc[$i] ? $main_col : $subq_col) => undef,
+ },
+ {
+ $subq_col => { '!=', undef },
+ $main_col => { '!=', undef },
+ -and => $cur_cond,
+ },
+ ];
+ }
- # add the order supplement (if any) as this is what will be used for the outer WHERE
- $in_sel .= ", $_" for keys %{$sq_attrs->{order_supplement}};
+ push @where_cond, { '-and', => [ @skip_colpair_stack[0..$i-1], $cur_cond ] };
+ }
+
+# reuse the sqlmaker WHERE, this will not be returning binds
+ my $counted_where = do {
+ local $self->{where_bind};
+ $self->where(\@where_cond);
+ };
+# construct the rownum condition by hand
my $rownum_cond;
if ($offset) {
$rownum_cond = 'BETWEEN ? AND ?';
-
push @{$self->{limit_bind}},
[ $self->__offset_bindtype => $offset ],
[ $self->__total_bindtype => $offset + $rows - 1]
}
else {
$rownum_cond = '< ?';
-
push @{$self->{limit_bind}},
[ $self->__rows_bindtype => $rows ]
;
}
- # even though binds in order_by make no sense here (the rs needs to be
- # ordered by a unique column first) - pass whatever there may be through
- # anyway
- push @{$self->{limit_bind}}, @order_bind;
+# and what we will order by inside
+ my $inner_order_sql = do {
+ local $self->{order_bind};
+
+ my $s = $self->_order_by (\@new_order_by);
+
+ $self->throw_exception('Inner gensubq order may not contain binds... something went wrong')
+ if @{$self->{order_bind}};
+
+ $s;
+ };
+
+### resume originally scheduled programming
+###
+###
+
+ # we need to supply the order for the supplements to be properly calculated
+ my $sq_attrs = $self->_subqueried_limit_attrs (
+ $sql, { %$rs_attrs, order_by => \@new_order_by }
+ );
+
+ my $in_sel = $sq_attrs->{selection_inner};
+
+ # add the order supplement (if any) as this is what will be used for the outer WHERE
+ $in_sel .= ", $_" for sort keys %{$sq_attrs->{order_supplement}};
+
+ my $group_having_sql = $self->_parse_rs_attrs($rs_attrs);
+
return sprintf ("
SELECT $sq_attrs->{selection_outer}
FROM (
SELECT $in_sel $sq_attrs->{query_leftover}${group_having_sql}
) %s
-WHERE ( SELECT COUNT(*) FROM %s %s WHERE %s $cmp_op %s ) $rownum_cond
-$order_sql
+WHERE ( SELECT COUNT(*) FROM %s %s $counted_where ) $rownum_cond
+$inner_order_sql
", map { $self->_quote ($_) } (
$rs_attrs->{alias},
$root_tbl_name,
$count_tbl_alias,
- "$count_tbl_alias.$first_ord_col",
- "$first_ord_alias.$first_ord_col",
));
}
for my $chunk ($self->_order_by_chunks ($rs_attrs->{order_by})) {
# order with bind
$chunk = $chunk->[0] if (ref $chunk) eq 'ARRAY';
- $chunk =~ s/\s+ (?: ASC|DESC ) \s* $//ix;
+ ($chunk) = $self->_split_order_chunk($chunk);
next if $in_sel_index->{$chunk};
$new->_sql_maker_opts({});
$new->_dbh_details({});
$new->{_in_do_block} = 0;
- $new->{_dbh_gen} = 0;
# read below to see what this does
$new->_arm_global_destructor;
# soon as possible (DBIC will reconnect only on demand from within
# the thread)
my @instances = grep { defined $_ } values %seek_and_destroy;
+ %seek_and_destroy = ();
+
for (@instances) {
- $_->{_dbh_gen}++; # so that existing cursors will drop as well
$_->_dbh(undef);
$_->transaction_depth(0);
$_->savepoints([]);
- }
- # properly renumber all existing refs
- %seek_and_destroy = ();
- $_->_arm_global_destructor for @instances;
+ # properly renumber existing refs
+ $_->_arm_global_destructor
+ }
}
}
my $pid = $self->_conn_pid;
if( defined $pid and $pid != $$ and my $dbh = $self->_dbh ) {
$dbh->{InactiveDestroy} = 1;
- $self->{_dbh_gen}++;
$self->_dbh(undef);
$self->transaction_depth(0);
$self->savepoints([]);
%{ $self->_dbh->{CachedKids} } = ();
$self->_dbh->disconnect;
$self->_dbh(undef);
- $self->{_dbh_gen}++;
}
}
my ($sql, $bind) = $self->_prep_for_execute($op, $ident, \@args);
- shift->dbh_do( _dbh_execute => # retry over disconnects
+ # not even a PID check - we do not care about the state of the _dbh.
+ # All we need is to get the appropriate drivers loaded if they aren't
+ # already so that the assumption in ad7c50fc26e holds
+ $self->_populate_dbh unless $self->_dbh;
+
+ $self->dbh_do( _dbh_execute => # retry over disconnects
$sql,
$bind,
$self->_dbi_attrs_for_bind($ident, $bind),
unless (@pri_values == @missing_pri);
@returned_cols{@missing_pri} = @pri_values;
- delete $retrieve_cols{$_} for @missing_pri;
+ delete @retrieve_cols{@missing_pri};
}
# if there is more left to pull
}
sub _select_args {
- my ($self, $ident, $select, $where, $attrs) = @_;
+ my ($self, $ident, $select, $where, $orig_attrs) = @_;
+
+ return (
+ 'select', @{$orig_attrs->{_sqlmaker_select_args}}
+ ) if $orig_attrs->{_sqlmaker_select_args};
my $sql_maker = $self->sql_maker;
- my ($alias2source, $rs_alias) = $self->_resolve_ident_sources ($ident);
+ my $alias2source = $self->_resolve_ident_sources ($ident);
- $attrs = {
- %$attrs,
+ my $attrs = {
+ %$orig_attrs,
select => $select,
from => $ident,
where => $where,
- $rs_alias && $alias2source->{$rs_alias}
- ? ( _rsroot_rsrc => $alias2source->{$rs_alias} )
+
+ # limit dialects use this stuff
+ # yes, some CDBICompat crap does not supply an {alias} >.<
+ ( $orig_attrs->{alias} and $alias2source->{$orig_attrs->{alias}} )
+ ? ( _rsroot_rsrc => $alias2source->{$orig_attrs->{alias}} )
: ()
,
};
$attrs->{rows} = $sql_maker->__max_int;
}
- my @limit;
-
- # see if we need to tear the prefetch apart otherwise delegate the limiting to the
- # storage, unless software limit was requested
- if (
- #limited has_many
- ( $attrs->{rows} && keys %{$attrs->{collapse}} )
- ||
- # grouped prefetch (to satisfy group_by == select)
- ( $attrs->{group_by}
- &&
- @{$attrs->{group_by}}
- &&
- $attrs->{_prefetch_selector_range}
- )
+ # see if we will need to tear the prefetch apart to satisfy group_by == select
+ # this is *extremely tricky* to get right, I am still not sure I did
+ #
+ my ($prefetch_needs_subquery, @limit_args);
+
+ if ( $attrs->{_grouped_by_distinct} and $attrs->{collapse} ) {
+ # we already know there is a valid group_by and we know it is intended
+ # to be based *only* on the main result columns
+ # short circuit the group_by parsing below
+ $prefetch_needs_subquery = 1;
+ }
+ elsif (
+ # The rationale is that even if we do *not* have collapse, we still
+ # need to wrap the core grouped select/group_by in a subquery
+ # so that databases that care about group_by/select equivalence
+ # are happy (this includes MySQL in strict_mode)
+ # If any of the other joined tables are referenced in the group_by
+ # however - the user is on their own
+ ( $prefetch_needs_subquery or $attrs->{_related_results_construction} )
+ and
+ $attrs->{group_by}
+ and
+ @{$attrs->{group_by}}
+ and
+ my $grp_aliases = try { # try{} because $attrs->{from} may be unreadable
+ $self->_resolve_aliastypes_from_select_args( $attrs->{from}, undef, undef, { group_by => $attrs->{group_by} } )
+ }
) {
- ($ident, $select, $where, $attrs)
- = $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs);
+ # no aliases other than our own in group_by
+ # if there are - do not allow subquery even if limit is present
+ $prefetch_needs_subquery = ! scalar grep { $_ ne $attrs->{alias} } keys %{ $grp_aliases->{grouping} || {} };
+ }
+ elsif ( $attrs->{rows} && $attrs->{collapse} ) {
+ # active collapse with a limit - that one is a no-brainer unless
+ # overruled by a group_by above
+ $prefetch_needs_subquery = 1;
+ }
+
+ if ($prefetch_needs_subquery) {
+ ($ident, $select, $where, $attrs) =
+ $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs);
}
elsif (! $attrs->{software_limit} ) {
- push @limit, (
+ push @limit_args, (
$attrs->{rows} || (),
$attrs->{offset} || (),
);
# try to simplify the joinmap further (prune unreferenced type-single joins)
if (
+ ! $prefetch_needs_subquery # already pruned
+ and
ref $ident
and
reftype $ident eq 'ARRAY'
and
@$ident != 1
) {
- $ident = $self->_prune_unused_joins ($ident, $select, $where, $attrs);
+ ($ident, $attrs->{_aliastypes}) = $self->_prune_unused_joins ($ident, $select, $where, $attrs);
}
###
# invoked, and that's just bad...
###
- return ('select', $ident, $select, $where, $attrs, @limit);
+ return ( 'select', @{ $orig_attrs->{_sqlmaker_select_args} = [
+ $ident, $select, $where, $attrs, @limit_args
+ ]} );
}
# Returns a counting SELECT for a simple count
=cut
-sub _dbh_next {
- my ($storage, $dbh, $self) = @_;
+sub next {
+ my $self = shift;
- my $next = $self->next::can;
+ my @row = $self->next::method(@_);
- my @row = $next->(@_);
-
- my $col_infos = $storage->_resolve_column_info($self->args->[0]);
-
- my $select = $self->args->[1];
-
- _normalize_guids($select, $col_infos, \@row, $storage);
+ _normalize_guids(
+ $self->args->[1],
+ $self->{_colinfos} ||= $self->storage->_resolve_column_info($self->args->[0]),
+ \@row,
+ $self->storage
+ );
return @row;
}
-sub _dbh_all {
- my ($storage, $dbh, $self) = @_;
-
- my $next = $self->next::can;
-
- my @rows = $next->(@_);
-
- my $col_infos = $storage->_resolve_column_info($self->args->[0]);
+sub all {
+ my $self = shift;
- my $select = $self->args->[1];
+ my @rows = $self->next::method(@_);
- _normalize_guids($select, $col_infos, $_, $storage) for @rows;
+ _normalize_guids(
+ $self->args->[1],
+ $self->{_colinfos} ||= $self->storage->_resolve_column_info($self->args->[0]),
+ $_,
+ $self->storage
+ ) for @rows;
return @rows;
}
=cut
-sub _dbh_next {
- my ($storage, $dbh, $self) = @_;
+sub next {
+ my $self = shift;
- my $next = $self->next::can;
+ my @row = $self->next::method(@_);
- my @row = $next->(@_);
+ $self->{_colinfos} ||= $self->storage->_resolve_column_info($self->args->[0]);
- my $col_infos = $storage->_resolve_column_info($self->args->[0]);
+ _normalize_guids(
+ $self->args->[1],
+ $self->{_colinfos},
+ \@row,
+ $self->storage
+ );
- my $select = $self->args->[1];
-
- _normalize_guids($select, $col_infos, \@row, $storage);
- _strip_trailing_binary_nulls($select, $col_infos, \@row, $storage);
+ _strip_trailing_binary_nulls(
+ $self->args->[1],
+ $self->{_colinfos},
+ \@row,
+ $self->storage
+ );
return @row;
}
-sub _dbh_all {
- my ($storage, $dbh, $self) = @_;
-
- my $next = $self->next::can;
-
- my @rows = $next->(@_);
+sub all {
+ my $self = shift;
- my $col_infos = $storage->_resolve_column_info($self->args->[0]);
+ my @rows = $self->next::method(@_);
- my $select = $self->args->[1];
+ $self->{_colinfos} ||= $self->storage->_resolve_column_info($self->args->[0]);
for (@rows) {
- _normalize_guids($select, $col_infos, $_, $storage);
- _strip_trailing_binary_nulls($select, $col_infos, $_, $storage);
+ _normalize_guids(
+ $self->args->[1],
+ $self->{_colinfos},
+ $_,
+ $self->storage
+ );
+
+ _strip_trailing_binary_nulls(
+ $self->args->[1],
+ $self->{_colinfos},
+ $_,
+ $self->storage
+ );
}
return @rows;
use base qw/DBIx::Class::Cursor/;
use Try::Tiny;
+use Scalar::Util qw/refaddr weaken/;
use namespace::clean;
__PACKAGE__->mk_group_accessors('simple' =>
- qw/sth storage args attrs/
+ qw/storage args attrs/
);
=head1 NAME
=cut
-sub new {
- my ($class, $storage, $args, $attrs) = @_;
- $class = ref $class if ref $class;
+{
+ my %cursor_registry;
- my $new = {
- storage => $storage,
- args => $args,
- attrs => $attrs,
- _dbh_gen => $storage->{_dbh_gen},
- _pos => 0,
- _done => 0,
- };
+ sub new {
+ my ($class, $storage, $args, $attrs) = @_;
- return bless ($new, $class);
+ my $self = bless {
+ storage => $storage,
+ args => $args,
+ attrs => $attrs,
+ }, ref $class || $class;
+
+ weaken( $cursor_registry{ refaddr($self) } = $self )
+ if DBIx::Class::_ENV_::HAS_ITHREADS;
+
+ return $self;
+ }
+
+ sub CLONE {
+ for (keys %cursor_registry) {
+ # once marked we no longer care about them, hence no
+ # need to keep in the registry, left alone renumber the
+ # keys (all addresses are now different)
+ my $self = delete $cursor_registry{$_}
+ or next;
+
+ $self->{_intra_thread} = 1;
+ }
+ }
}
=head2 next
=cut
-sub _dbh_next {
- my ($storage, $dbh, $self) = @_;
+sub next {
+ my $self = shift;
+
+ return if $self->{_done};
+
+ my $sth;
- $self->_check_dbh_gen;
if (
$self->{attrs}{software_limit}
&& $self->{attrs}{rows}
- && $self->{_pos} >= $self->{attrs}{rows}
+ && ($self->{_pos}||0) >= $self->{attrs}{rows}
) {
- $self->sth->finish if $self->sth->{Active};
- $self->sth(undef);
+ if ($sth = $self->sth) {
+ # explicit finish will issue warnings, unlike the DESTROY below
+ $sth->finish if $sth->FETCH('Active');
+ }
$self->{_done} = 1;
+ return;
}
- return if $self->{_done};
+ unless ($sth = $self->sth) {
+ (undef, $sth, undef) = $self->storage->_select( @{$self->{args}} );
+
+ $self->{_results} = [ (undef) x $sth->FETCH('NUM_OF_FIELDS') ];
+ $sth->bind_columns( \( @{$self->{_results}} ) );
- unless ($self->sth) {
- $self->sth(($storage->_select(@{$self->{args}}))[1]);
- if ($self->{attrs}{software_limit}) {
- if (my $offset = $self->{attrs}{offset}) {
- $self->sth->fetch for 1 .. $offset;
- }
+ if ( $self->{attrs}{software_limit} and $self->{attrs}{offset} ) {
+ $sth->fetch for 1 .. $self->{attrs}{offset};
}
+
+ $self->sth($sth);
}
- my @row = $self->sth->fetchrow_array;
- if (@row) {
+
+ if ($sth->fetch) {
$self->{_pos}++;
+ return @{$self->{_results}};
} else {
- $self->sth(undef);
$self->{_done} = 1;
+ return ();
}
- return @row;
}
-sub next {
- my ($self) = @_;
- $self->{storage}->dbh_do($self->can('_dbh_next'), $self);
-}
=head2 all
=cut
-sub _dbh_all {
- my ($storage, $dbh, $self) = @_;
+sub all {
+ my $self = shift;
+
+ # delegate to DBIC::Cursor which will delegate back to next()
+ if ($self->{attrs}{software_limit}
+ && ($self->{attrs}{offset} || $self->{attrs}{rows})) {
+ return $self->next::method(@_);
+ }
+
+ my $sth;
+
+ if ($sth = $self->sth) {
+ # explicit finish will issue warnings, unlike the DESTROY below
+ $sth->finish if ( ! $self->{_done} and $sth->FETCH('Active') );
+ $self->sth(undef);
+ }
+
+ (undef, $sth) = $self->storage->_select( @{$self->{args}} );
- $self->_check_dbh_gen;
- $self->sth->finish if $self->sth && $self->sth->{Active};
- $self->sth(undef);
- my ($rv, $sth) = $storage->_select(@{$self->{args}});
return @{$sth->fetchall_arrayref};
}
-sub all {
- my ($self) = @_;
- if ($self->{attrs}{software_limit}
- && ($self->{attrs}{offset} || $self->{attrs}{rows})) {
- return $self->next::method;
+sub sth {
+ my $self = shift;
+
+ if (@_) {
+ delete @{$self}{qw/_pos _done _pid _intra_thread/};
+
+ $self->{sth} = $_[0];
+ $self->{_pid} = $$ if ! DBIx::Class::_ENV_::BROKEN_FORK and $_[0];
}
+ elsif ($self->{sth} and ! $self->{_done}) {
+
+ my $invalidate_handle_reason;
+
+ if (DBIx::Class::_ENV_::HAS_ITHREADS and $self->{_intra_thread} ) {
+ $invalidate_handle_reason = 'Multi-thread';
+ }
+ elsif (!DBIx::Class::_ENV_::BROKEN_FORK and $self->{_pid} != $$ ) {
+ $invalidate_handle_reason = 'Multi-process';
+ }
- $self->{storage}->dbh_do($self->can('_dbh_all'), $self);
+ if ($invalidate_handle_reason) {
+ $self->storage->throw_exception("$invalidate_handle_reason access attempted while cursor in progress (position $self->{_pos})")
+ if $self->{_pos};
+
+ # reinvokes the reset logic above
+ $self->sth(undef);
+ }
+ }
+
+ return $self->{sth};
}
=head2 reset
=cut
sub reset {
- my ($self) = @_;
-
- # No need to care about failures here
- try { $self->sth->finish }
- if $self->sth && $self->sth->{Active};
- $self->_soft_reset;
- return undef;
+ $_[0]->__finish_sth if $_[0]->{sth};
+ $_[0]->sth(undef);
}
-sub _soft_reset {
- my ($self) = @_;
- $self->sth(undef);
- $self->{_done} = 0;
- $self->{_pos} = 0;
+sub DESTROY {
+ $_[0]->__finish_sth if $_[0]->{sth};
}
-sub _check_dbh_gen {
- my ($self) = @_;
+sub __finish_sth {
+ # It is (sadly) extremely important to finish() handles we are about
+ # to lose (due to reset() or a DESTROY() ). $rs->reset is the closest
+ # thing the user has to getting to the underlying finish() API and some
+ # DBDs mandate this (e.g. DBD::InterBase will segfault, DBD::Sybase
+ # won't start a transaction sanely, etc)
+ # We also can't use the accessor here, as it will trigger a fork/thread
+ # check, and resetting a cursor in a child is perfectly valid
- if($self->{_dbh_gen} != $self->{storage}->{_dbh_gen}) {
- $self->{_dbh_gen} = $self->{storage}->{_dbh_gen};
- $self->_soft_reset;
- }
-}
+ my $self = shift;
-sub DESTROY {
- # None of the reasons this would die matter if we're in DESTROY anyways
- if (my $sth = $_[0]->sth) {
- local $SIG{__WARN__} = sub {};
- try { $sth->finish } if $sth->FETCH('Active');
- }
+ # No need to care about failures here
+ try { local $SIG{__WARN__} = sub {}; $self->{sth}->finish } if (
+ $self->{sth} and ! try { ! $self->{sth}->FETCH('Active') }
+ );
}
1;
=cut
-sub _dbh_next {
- my ($storage, $dbh, $self) = @_;
-
- my $next = $self->next::can;
-
- my @row = $next->(@_);
-
- my $col_info = $storage->_resolve_column_info($self->args->[0]);
-
- my $select = $self->args->[1];
+my $unpack_guids = sub {
+ my ($select, $col_infos, $data, $storage) = @_;
for my $select_idx (0..$#$select) {
- my $selected = $select->[$select_idx];
-
- next if ref $selected;
+ next unless (
+ defined $data->[$select_idx]
+ and
+ length($data->[$select_idx]) == 16
+ );
- my $data_type = $col_info->{$selected}{data_type};
+ my $selected = $select->[$select_idx];
- if ($storage->_is_guid_type($data_type)) {
- my $returned = $row[$select_idx];
+ my $data_type = $col_infos->{$select->[$select_idx]}{data_type}
+ or next;
- if (length $returned == 16) {
- $row[$select_idx] = $storage->_uuid_to_str($returned);
- }
- }
+ $data->[$select_idx] = $storage->_uuid_to_str($data->[$select_idx])
+ if $storage->_is_guid_type($data_type);
}
+};
- return @row;
-}
-
-sub _dbh_all {
- my ($storage, $dbh, $self) = @_;
-
- my $next = $self->next::can;
- my @rows = $next->(@_);
+sub next {
+ my $self = shift;
- my $col_info = $storage->_resolve_column_info($self->args->[0]);
+ my @row = $self->next::method(@_);
- my $select = $self->args->[1];
+ $unpack_guids->(
+ $self->args->[1],
+ $self->{_colinfos} ||= $self->storage->_resolve_column_info($self->args->[0]),
+ \@row,
+ $self->storage
+ );
- for my $row (@rows) {
- for my $select_idx (0..$#$select) {
- my $selected = $select->[$select_idx];
+ return @row;
+}
- next if ref $selected;
+sub all {
+ my $self = shift;
- my $data_type = $col_info->{$selected}{data_type};
+ my @rows = $self->next::method(@_);
- if ($storage->_is_guid_type($data_type)) {
- my $returned = $row->[$select_idx];
+ $unpack_guids->(
+ $self->args->[1],
+ $self->{_colinfos} ||= $self->storage->_resolve_column_info($self->args->[0]),
+ $_,
+ $self->storage
+ ) for @rows;
- if (length $returned == 16) {
- $row->[$select_idx] = $storage->_uuid_to_str($returned);
- }
- }
- }
- }
return @rows;
}
my $aliastypes = $self->_resolve_aliastypes_from_select_args(@_);
+ my $orig_joins = delete $aliastypes->{joining};
+ my $orig_multiplying = $aliastypes->{multiplying};
+
# a grouped set will not be affected by amount of rows. Thus any
# {multiplying} joins can go
- delete $aliastypes->{multiplying} if $attrs->{group_by};
+ delete $aliastypes->{multiplying}
+ if $attrs->{_force_prune_multiplying_joins} or $attrs->{group_by};
my @newfrom = $from->[0]; # FROM head is always present
my %need_joins;
+
for (values %$aliastypes) {
# add all requested aliases
$need_joins{$_} = 1 for keys %$_;
# add all their parents (as per joinpath which is an AoH { table => alias })
- $need_joins{$_} = 1 for map { values %$_ } map { @$_ } values %$_;
+ $need_joins{$_} = 1 for map { values %$_ } map { @{$_->{-parents}} } values %$_;
}
+
for my $j (@{$from}[1..$#$from]) {
push @newfrom, $j if (
- (! $j->[0]{-alias}) # legacy crap
+ (! defined $j->[0]{-alias}) # legacy crap
||
$need_joins{$j->[0]{-alias}}
);
}
- return \@newfrom;
+ return ( \@newfrom, {
+ multiplying => { map { $need_joins{$_} ? ($_ => $orig_multiplying->{$_}) : () } keys %$orig_multiplying },
+ %$aliastypes,
+ joining => { map { $_ => $orig_joins->{$_} } keys %need_joins },
+ } );
}
#
sub _adjust_select_args_for_complex_prefetch {
my ($self, $from, $select, $where, $attrs) = @_;
- $self->throw_exception ('Nothing to prefetch... how did we get here?!')
- if not @{$attrs->{_prefetch_selector_range}};
-
$self->throw_exception ('Complex prefetches are not supported on resultsets with a custom from attribute')
if (ref $from ne 'ARRAY' || ref $from->[0] ne 'HASH' || ref $from->[1] ne 'ARRAY');
+ my $root_alias = $attrs->{alias};
+
# generate inner/outer attribute lists, remove stuff that doesn't apply
my $outer_attrs = { %$attrs };
- delete $outer_attrs->{$_} for qw/where bind rows offset group_by having/;
+ delete @{$outer_attrs}{qw(where bind rows offset group_by _grouped_by_distinct having)};
my $inner_attrs = { %$attrs };
- delete $inner_attrs->{$_} for qw/for collapse _prefetch_selector_range _collapse_order_by select as/;
-
- # bring over all non-collapse-induced order_by into the inner query (if any)
- # the outer one will have to keep them all
- delete $inner_attrs->{order_by};
- if (my $ord_cnt = @{$outer_attrs->{order_by}} - @{$outer_attrs->{_collapse_order_by}} ) {
- $inner_attrs->{order_by} = [
- @{$outer_attrs->{order_by}}[ 0 .. $ord_cnt - 1]
- ];
- }
+ delete @{$inner_attrs}{qw(from for collapse select as _related_results_construction)};
+
+ # there is no point of ordering the insides if there is no limit
+ delete $inner_attrs->{order_by} if (
+ delete $inner_attrs->{_order_is_artificial}
+ or
+ ! $inner_attrs->{rows}
+ );
# generate the inner/outer select lists
# for inside we consider only stuff *not* brought in by the prefetch
# on the outside we substitute any function for its alias
my $outer_select = [ @$select ];
- my $inner_select = [];
+ my $inner_select;
- my ($root_source, $root_source_offset);
+ my ($root_node, $root_node_offset);
for my $i (0 .. $#$from) {
my $node = $from->[$i];
: next
;
- if ( ($h->{-alias}||'') eq $attrs->{alias} and $root_source = $h->{-rsrc} ) {
- $root_source_offset = $i;
+ if ( ($h->{-alias}||'') eq $root_alias and $h->{-rsrc} ) {
+ $root_node = $h;
+ $root_node_offset = $i;
last;
}
}
$self->throw_exception ('Complex prefetches are not supported on resultsets with a custom from attribute')
- unless $root_source;
+ unless $root_node;
# use the heavy duty resolver to take care of aliased/nonaliased naming
my $colinfo = $self->_resolve_column_info($from);
my $selected_root_columns;
- my ($p_start, $p_end) = @{$outer_attrs->{_prefetch_selector_range}};
- for my $i (0 .. $p_start - 1, $p_end + 1 .. $#$outer_select) {
+ for my $i (0 .. $#$outer_select) {
my $sel = $outer_select->[$i];
+ next if (
+ $colinfo->{$sel} and $colinfo->{$sel}{-source_alias} ne $root_alias
+ );
+
if (ref $sel eq 'HASH' ) {
$sel->{-as} ||= $attrs->{as}[$i];
- $outer_select->[$i] = join ('.', $attrs->{alias}, ($sel->{-as} || "inner_column_$i") );
+ $outer_select->[$i] = join ('.', $root_alias, ($sel->{-as} || "inner_column_$i") );
}
elsif (! ref $sel and my $ci = $colinfo->{$sel}) {
$selected_root_columns->{$ci->{-colname}} = 1;
push @{$inner_attrs->{as}}, $attrs->{as}[$i];
}
- # We will need to fetch all native columns in the inner subquery, which may be a part
- # of an *outer* join condition. We can not just fetch everything because a potential
- # has_many restricting join collapse *will not work* on heavy data types.
- # Time for more horrible SQL parsing, aughhhh
-
- # MASSIVE FIXME - in fact when we are fully transitioned to DQ and the support is
- # is sane - we will need to trim the select list to *only* fetch stuff that is
- # necessary to build joins. In the current implementation if I am selecting a blob
- # and the group_by kicks in - we are fucked, and all the user can do is not select
- # that column. This is silly!
-
- my $retardo_sqla_cache = {};
- for my $cond ( map { $_->[1] } @{$from}[$root_source_offset + 1 .. $#$from] ) {
- for my $col (@{$self->_extract_condition_columns($cond, $retardo_sqla_cache)}) {
- my $ci = $colinfo->{$col};
- if (
- $ci
- and
- $ci->{-source_alias} eq $attrs->{alias}
- and
- ! $selected_root_columns->{$ci->{-colname}}++
- ) {
- # adding it to both to keep limits not supporting dark selectors happy
- push @$inner_select, $ci->{-fq_colname};
- push @{$inner_attrs->{as}}, $ci->{-fq_colname};
- }
+ # We will need to fetch all native columns in the inner subquery, which may
+ # be a part of an *outer* join condition, or an order_by (which needs to be
+ # preserved outside)
+ # We can not just fetch everything because a potential has_many restricting
+ # join collapse *will not work* on heavy data types.
+ my $connecting_aliastypes = $self->_resolve_aliastypes_from_select_args(
+ $from,
+ [],
+ $where,
+ $inner_attrs
+ );
+
+ for (sort map { keys %{$_->{-seen_columns}||{}} } map { values %$_ } values %$connecting_aliastypes) {
+ my $ci = $colinfo->{$_} or next;
+ if (
+ $ci->{-source_alias} eq $root_alias
+ and
+ ! $selected_root_columns->{$ci->{-colname}}++
+ ) {
+ # adding it to both to keep limits not supporting dark selectors happy
+ push @$inner_select, $ci->{-fq_colname};
+ push @{$inner_attrs->{as}}, $ci->{-fq_colname};
}
}
# construct the inner $from and lock it in a subquery
# we need to prune first, because this will determine if we need a group_by below
- # the fake group_by is so that the pruner throws away all non-selecting, non-restricting
- # multijoins (since we def. do not care about those inside the subquery)
-
+ # throw away all non-selecting, non-restricting multijoins
+ # (since we def. do not care about multiplication those inside the subquery)
my $inner_subq = do {
# must use it here regardless of user requests
local $self->{_use_join_optimizer} = 1;
- my $inner_from = $self->_prune_unused_joins ($from, $inner_select, $where, {
- group_by => ['dummy'], %$inner_attrs,
+ # throw away multijoins since we def. do not care about those inside the subquery
+ my ($inner_from, $inner_aliastypes) = $self->_prune_unused_joins ($from, $inner_select, $where, {
+ %$inner_attrs, _force_prune_multiplying_joins => 1
});
- my $inner_aliastypes =
- $self->_resolve_aliastypes_from_select_args( $inner_from, $inner_select, $where, $inner_attrs );
-
- # we need to simulate collapse in the subq if a multiplying join is pulled
- # by being a non-selecting restrictor
+ # uh-oh a multiplier (which is not us) left in, this is a problem
if (
- ! $inner_attrs->{group_by}
+ $inner_aliastypes->{multiplying}
+ and
+ # if there are user-supplied groups - assume user knows wtf they are up to
+ ( ! $inner_aliastypes->{grouping} or $inner_attrs->{_grouped_by_distinct} )
and
- first {
- $inner_aliastypes->{restricting}{$_}
- and
- ! $inner_aliastypes->{selecting}{$_}
- } ( keys %{$inner_aliastypes->{multiplying}||{}} )
+ my @multipliers = grep { $_ ne $root_alias } keys %{$inner_aliastypes->{multiplying}}
) {
- my $unprocessed_order_chunks;
- ($inner_attrs->{group_by}, $unprocessed_order_chunks) = $self->_group_over_selection (
- $inner_from, $inner_select, $inner_attrs->{order_by}
- );
-
- $self->throw_exception (
- 'A required group_by clause could not be constructed automatically due to a complex '
- . 'order_by criteria. Either order_by columns only (no functions) or construct a suitable '
- . 'group_by by hand'
- ) if $unprocessed_order_chunks;
+
+ # if none of the multipliers came from an order_by (guaranteed to have been combined
+ # with a limit) - easy - just slap a group_by to simulate a collape and be on our way
+ if (
+ ! $inner_aliastypes->{ordering}
+ or
+ ! first { $inner_aliastypes->{ordering}{$_} } @multipliers
+ ) {
+
+ my $unprocessed_order_chunks;
+ ($inner_attrs->{group_by}, $unprocessed_order_chunks) = $self->_group_over_selection ({
+ %$inner_attrs,
+ from => $inner_from,
+ select => $inner_select,
+ });
+
+ $self->throw_exception (
+ 'A required group_by clause could not be constructed automatically due to a complex '
+ . 'order_by criteria. Either order_by columns only (no functions) or construct a suitable '
+ . 'group_by by hand'
+ ) if $unprocessed_order_chunks;
+ }
+ else {
+ # We need to order by external columns and group at the same time
+ # so we can calculate the proper limit
+ # This doesn't really make sense in SQL, however from DBICs point
+ # of view is rather valid (order the leftmost objects by whatever
+ # criteria and get the offset/rows many). There is a way around
+ # this however in SQL - we simply tae the direction of each piece
+ # of the foreign order and convert them to MIN(X) for ASC or MAX(X)
+ # for DESC, and group_by the root columns. The end result should be
+ # exactly what we expect
+
+ # supplement the main selection with pks if not already there,
+ # as they will have to be a part of the group_by to colapse
+ # things properly
+ my $cur_sel = { map { $_ => 1 } @$inner_select };
+
+ my @pks = map { "$root_alias.$_" } $root_node->{-rsrc}->primary_columns
+ or $self->throw_exception( sprintf
+ 'Unable to perform complex limited prefetch off %s without declared primary key',
+ $root_node->{-rsrc}->source_name,
+ );
+ for my $col (@pks) {
+ push @$inner_select, $col
+ unless $cur_sel->{$col}++;
+ }
+
+ # wrap any part of the order_by that "responds" to an ordering alias
+ # into a MIN/MAX
+ # FIXME - this code is a joke, will need to be completely rewritten in
+ # the DQ branch. But I need to push a POC here, otherwise the
+ # pesky tests won't pass
+ my $sql_maker = $self->sql_maker;
+ my ($lquote, $rquote, $sep) = map { quotemeta $_ } ($sql_maker->_quote_chars, $sql_maker->name_sep);
+ my $own_re = qr/ $lquote \Q$root_alias\E $rquote $sep | \b \Q$root_alias\E $sep /x;
+ my @order_chunks = map { ref $_ eq 'ARRAY' ? $_ : [ $_ ] } $sql_maker->_order_by_chunks($attrs->{order_by});
+ my @new_order = map { \$_ } @order_chunks;
+ my $inner_columns_info = $self->_resolve_column_info($inner_from);
+
+ # loop through and replace stuff that is not "ours" with a min/max func
+ # everything is a literal at this point, since we are likely properly
+ # quoted and stuff
+ for my $i (0 .. $#new_order) {
+ my $chunk = $order_chunks[$i][0];
+
+ # skip ourselves
+ next if $chunk =~ $own_re;
+
+ ($chunk, my $is_desc) = $sql_maker->_split_order_chunk($chunk);
+
+ # maybe our own unqualified column
+ my $ord_bit = (
+ $lquote and $sep and $chunk =~ /^ $lquote ([^$sep]+) $rquote $/x
+ ) ? $1 : $chunk;
+
+ next if (
+ $ord_bit
+ and
+ $inner_columns_info->{$ord_bit}
+ and
+ $inner_columns_info->{$ord_bit}{-source_alias} eq $root_alias
+ );
+
+ $new_order[$i] = \[
+ sprintf(
+ '%s(%s)%s',
+ ($is_desc ? 'MAX' : 'MIN'),
+ $chunk,
+ ($is_desc ? ' DESC' : ''),
+ ),
+ @ {$order_chunks[$i]} [ 1 .. $#{$order_chunks[$i]} ]
+ ];
+ }
+
+ $inner_attrs->{order_by} = \@new_order;
+
+ # do not care about leftovers here - it will be all the functions
+ # we just created
+ ($inner_attrs->{group_by}) = $self->_group_over_selection ({
+ %$inner_attrs,
+ from => $inner_from,
+ select => $inner_select,
+ });
+ }
}
# we already optimized $inner_from above
- local $self->{_use_join_optimizer} = 0;
+ # and already local()ized
+ $self->{_use_join_optimizer} = 0;
# generate the subquery
$self->_select_args_to_query (
my @outer_from;
# we may not be the head
- if ($root_source_offset) {
- # first generate the outer_from, up to the substitution point
- @outer_from = splice @$from, 0, $root_source_offset;
-
- my $root_node = shift @$from;
+ if ($root_node_offset) {
+ # first generate the outer_from, up and including the substitution point
+ @outer_from = splice @$from, 0, $root_node_offset;
push @outer_from, [
{
- -alias => $attrs->{alias},
- -rsrc => $root_node->[0]{-rsrc},
- $attrs->{alias} => $inner_subq,
+ -alias => $root_alias,
+ -rsrc => $root_node->{-rsrc},
+ $root_alias => $inner_subq,
},
- @{$root_node}[1 .. $#$root_node],
+ @{$from->[0]}[1 .. $#{$from->[0]}],
];
}
else {
- my $root_node = shift @$from;
-
@outer_from = {
- -alias => $attrs->{alias},
+ -alias => $root_alias,
-rsrc => $root_node->{-rsrc},
- $attrs->{alias} => $inner_subq,
+ $root_alias => $inner_subq,
};
}
+ shift @$from; # what we just replaced above
+
# scan the *remaining* from spec against different attributes, and see which joins are needed
# in what role
- my $outer_aliastypes =
+ my $outer_aliastypes = $outer_attrs->{_aliastypes} =
$self->_resolve_aliastypes_from_select_args( $from, $outer_select, $where, $outer_attrs );
# unroll parents
- my ($outer_select_chain, $outer_restrict_chain) = map { +{
- map { $_ => 1 } map { values %$_} map { @$_ } values %{ $outer_aliastypes->{$_} || {} }
- } } qw/selecting restricting/;
+ my ($outer_select_chain, @outer_nonselecting_chains) = map { +{
+ map { $_ => 1 } map { values %$_} map { @{$_->{-parents}} } values %{ $outer_aliastypes->{$_} || {} }
+ } } qw/selecting restricting grouping ordering/;
# see what's left - throw away if not selecting/restricting
# also throw in a group_by if a non-selecting multiplier,
) {
push @outer_from, $j
}
- elsif ($outer_restrict_chain->{$alias}) {
+ elsif (first { $_->{$alias} } @outer_nonselecting_chains ) {
push @outer_from, $j;
$need_outer_group_by ||= $outer_aliastypes->{multiplying}{$alias} ? 1 : 0;
}
}
- if ($need_outer_group_by and ! $outer_attrs->{group_by}) {
-
+ if ( $need_outer_group_by and $attrs->{_grouped_by_distinct} ) {
my $unprocessed_order_chunks;
- ($outer_attrs->{group_by}, $unprocessed_order_chunks) = $self->_group_over_selection (
- \@outer_from, $outer_select, $outer_attrs->{order_by}
- );
+ ($outer_attrs->{group_by}, $unprocessed_order_chunks) = $self->_group_over_selection ({
+ %$outer_attrs,
+ from => \@outer_from,
+ select => $outer_select,
+ });
$self->throw_exception (
'A required group_by clause could not be constructed automatically due to a complex '
or next;
$alias_list->{$al} = $j;
- $aliases_by_type->{multiplying}{$al} ||= $j->{-join_path}||[] if (
+ $aliases_by_type->{multiplying}{$al} ||= { -parents => $j->{-join_path}||[] } if (
# not array == {from} head == can't be multiplying
( ref($_) eq 'ARRAY' and ! $j->{-is_single} )
or
);
}
- # get a column to source/alias map (including unqualified ones)
+ # get a column to source/alias map (including unambiguous unqualified ones)
my $colinfo = $self->_resolve_column_info ($from);
# set up a botched SQLA
local $sql_maker->{where_bind};
local $sql_maker->{group_bind};
local $sql_maker->{having_bind};
+ local $sql_maker->{from_bind};
# we can't scan properly without any quoting (\b doesn't cut it
# everywhere), so unless there is proper quoting set - use our
my $to_scan = {
restricting => [
$sql_maker->_recurse_where ($where),
- $sql_maker->_parse_rs_attrs ({
- map { $_ => $attrs->{$_} } (qw/group_by having/)
- }),
+ $sql_maker->_parse_rs_attrs ({ having => $attrs->{having} }),
+ ],
+ grouping => [
+ $sql_maker->_parse_rs_attrs ({ group_by => $attrs->{group_by} }),
+ ],
+ joining => [
+ $sql_maker->_recurse_from (
+ ref $from->[0] eq 'ARRAY' ? $from->[0][0] : $from->[0],
+ @{$from}[1 .. $#$from],
+ ),
],
selecting => [
$sql_maker->_recurse_fields ($select),
- ( map { $_->[0] } $self->_extract_order_criteria ($attrs->{order_by}, $sql_maker) ),
+ ],
+ ordering => [
+ map { $_->[0] } $self->_extract_order_criteria ($attrs->{order_by}, $sql_maker),
],
};
# throw away empty chunks
$_ = [ map { $_ || () } @$_ ] for values %$to_scan;
- # first loop through all fully qualified columns and get the corresponding
+ # first see if we have any exact matches (qualified or unqualified)
+ for my $type (keys %$to_scan) {
+ for my $piece (@{$to_scan->{$type}}) {
+ if ($colinfo->{$piece} and my $alias = $colinfo->{$piece}{-source_alias}) {
+ $aliases_by_type->{$type}{$alias} ||= { -parents => $alias_list->{$alias}{-join_path}||[] };
+ $aliases_by_type->{$type}{$alias}{-seen_columns}{$colinfo->{$piece}{-fq_colname}} = $piece;
+ }
+ }
+ }
+
+ # now loop through all fully qualified columns and get the corresponding
# alias (should work even if they are in scalarrefs)
for my $alias (keys %$alias_list) {
my $al_re = qr/
- $lquote $alias $rquote $sep
+ $lquote $alias $rquote $sep (?: $lquote ([^$rquote]+) $rquote )?
|
- \b $alias \.
+ \b $alias \. ([^\s\)\($rquote]+)?
/x;
for my $type (keys %$to_scan) {
for my $piece (@{$to_scan->{$type}}) {
- $aliases_by_type->{$type}{$alias} ||= $alias_list->{$alias}{-join_path}||[]
- if ($piece =~ $al_re);
+ if (my @matches = $piece =~ /$al_re/g) {
+ $aliases_by_type->{$type}{$alias} ||= { -parents => $alias_list->{$alias}{-join_path}||[] };
+ $aliases_by_type->{$type}{$alias}{-seen_columns}{"$alias.$_"} = "$alias.$_"
+ for grep { defined $_ } @matches;
+ }
}
}
}
for my $col (keys %$colinfo) {
next if $col =~ / \. /x; # if column is qualified it was caught by the above
- my $col_re = qr/ $lquote $col $rquote /x;
+ my $col_re = qr/ $lquote ($col) $rquote /x;
for my $type (keys %$to_scan) {
for my $piece (@{$to_scan->{$type}}) {
- if ($piece =~ $col_re) {
+ if ( my @matches = $piece =~ /$col_re/g) {
my $alias = $colinfo->{$col}{-source_alias};
- $aliases_by_type->{$type}{$alias} ||= $alias_list->{$alias}{-join_path}||[];
+ $aliases_by_type->{$type}{$alias} ||= { -parents => $alias_list->{$alias}{-join_path}||[] };
+ $aliases_by_type->{$type}{$alias}{-seen_columns}{"$alias.$_"} = $_
+ for grep { defined $_ } @matches;
}
}
}
# Add any non-left joins to the restriction list (such joins are indeed restrictions)
for my $j (values %$alias_list) {
my $alias = $j->{-alias} or next;
- $aliases_by_type->{restricting}{$alias} ||= $j->{-join_path}||[] if (
+ $aliases_by_type->{restricting}{$alias} ||= { -parents => $j->{-join_path}||[] } if (
(not $j->{-join_type})
or
($j->{-join_type} !~ /^left (?: \s+ outer)? $/xi)
);
}
+ for (keys %$aliases_by_type) {
+ delete $aliases_by_type->{$_} unless keys %{$aliases_by_type->{$_}};
+ }
+
return $aliases_by_type;
}
# This is the engine behind { distinct => 1 }
sub _group_over_selection {
- my ($self, $from, $select, $order_by) = @_;
+ my ($self, $attrs) = @_;
- my $rs_column_list = $self->_resolve_column_info ($from);
+ my $colinfos = $self->_resolve_column_info ($attrs->{from});
my (@group_by, %group_index);
# the logic is: if it is a { func => val } we assume an aggregate,
# otherwise if \'...' or \[...] we assume the user knows what is
# going on thus group over it
- for (@$select) {
+ for (@{$attrs->{select}}) {
if (! ref($_) or ref ($_) ne 'HASH' ) {
push @group_by, $_;
$group_index{$_}++;
- if ($rs_column_list->{$_} and $_ !~ /\./ ) {
+ if ($colinfos->{$_} and $_ !~ /\./ ) {
# add a fully qualified version as well
- $group_index{"$rs_column_list->{$_}{-source_alias}.$_"}++;
+ $group_index{"$colinfos->{$_}{-source_alias}.$_"}++;
}
}
}
- # add any order_by parts that are not already present in the group_by
+ # add any order_by parts *from the main source* that are not already
+ # present in the group_by
# we need to be careful not to add any named functions/aggregates
# i.e. order_by => [ ... { count => 'foo' } ... ]
my @leftovers;
- for ($self->_extract_order_criteria($order_by)) {
+ for ($self->_extract_order_criteria($attrs->{order_by})) {
# only consider real columns (for functions the user got to do an explicit group_by)
if (@$_ != 1) {
push @leftovers, $_;
next;
}
my $chunk = $_->[0];
- my $colinfo = $rs_column_list->{$chunk} or do {
+
+ if (
+ !$colinfos->{$chunk}
+ or
+ $colinfos->{$chunk}{-source_alias} ne $attrs->{alias}
+ ) {
push @leftovers, $_;
next;
- };
+ }
- $chunk = "$colinfo->{-source_alias}.$chunk" if $chunk !~ /\./;
+ $chunk = $colinfos->{$chunk}{-fq_colname};
push @group_by, $chunk unless $group_index{$chunk}++;
}
my ($self, $ident) = @_;
my $alias2source = {};
- my $rs_alias;
# the reason this is so contrived is that $ident may be a {from}
# structure, specifying multiple tables to join
if ( blessed $ident && $ident->isa("DBIx::Class::ResultSource") ) {
# this is compat mode for insert/update/delete which do not deal with aliases
$alias2source->{me} = $ident;
- $rs_alias = 'me';
}
elsif (ref $ident eq 'ARRAY') {
my $tabinfo;
if (ref $_ eq 'HASH') {
$tabinfo = $_;
- $rs_alias = $tabinfo->{-alias};
}
if (ref $_ eq 'ARRAY' and ref $_->[0] eq 'HASH') {
$tabinfo = $_->[0];
}
}
- return ($alias2source, $rs_alias);
+ return $alias2source;
}
# Takes $ident, \@column_names
# for all sources
sub _resolve_column_info {
my ($self, $ident, $colnames) = @_;
- my ($alias2src, $root_alias) = $self->_resolve_ident_sources($ident);
+ my $alias2src = $self->_resolve_ident_sources($ident);
my (%seen_cols, @auto_colnames);
return \@new_from;
}
-# yet another atrocity: attempt to extract all columns from a
-# where condition by hooking _quote
-sub _extract_condition_columns {
- my ($self, $cond, $sql_maker_cache) = @_;
-
- return [] unless $cond;
-
- my $sm = $sql_maker_cache->{condparser} ||= $self->{_sql_ident_capturer} ||= do {
- # FIXME - replace with a Moo trait
- my $orig_sm_class = ref $self->sql_maker;
- my $smic_class = "${orig_sm_class}::_IdentCapture_";
-
- unless ($smic_class->isa('SQL::Abstract')) {
-
- no strict 'refs';
- *{"${smic_class}::_quote"} = subname "${smic_class}::_quote" => sub {
- my ($self, $ident) = @_;
- if (ref $ident eq 'SCALAR') {
- $ident = $$ident;
- my $storage_quotes = $self->sql_quote_char || '"';
- my ($ql, $qr) = map
- { quotemeta $_ }
- (ref $storage_quotes eq 'ARRAY' ? @$storage_quotes : ($storage_quotes) x 2 )
- ;
-
- while ($ident =~ /
- $ql (\w+) $qr
- |
- ([\w\.]+)
- /xg) {
- $self->{_captured_idents}{$1||$2}++;
- }
- }
- else {
- $self->{_captured_idents}{$ident}++;
- }
- return $ident;
- };
-
- *{"${smic_class}::_get_captured_idents"} = subname "${smic_class}::_get_captures" => sub {
- (delete shift->{_captured_idents}) || {};
- };
-
- $self->inject_base ($smic_class, $orig_sm_class);
-
- }
-
- $smic_class->new();
- };
-
- $sm->_recurse_where($cond);
-
- return [ sort keys %{$sm->_get_captured_idents} ];
-}
-
sub _extract_order_criteria {
my ($self, $order_by, $sql_maker) = @_;
my $parser = sub {
- my ($sql_maker, $order_by) = @_;
+ my ($sql_maker, $order_by, $orig_quote_chars) = @_;
return scalar $sql_maker->_order_by_chunks ($order_by)
unless wantarray;
+ my ($lq, $rq, $sep) = map { quotemeta($_) } (
+ ($orig_quote_chars ? @$orig_quote_chars : $sql_maker->_quote_chars),
+ $sql_maker->name_sep
+ );
+
my @chunks;
for ($sql_maker->_order_by_chunks ($order_by) ) {
- my $chunk = ref $_ ? $_ : [ $_ ];
- $chunk->[0] =~ s/\s+ (?: ASC|DESC ) \s* $//ix;
+ my $chunk = ref $_ ? [ @$_ ] : [ $_ ];
+ ($chunk->[0]) = $sql_maker->_split_order_chunk($chunk->[0]);
+
+ # order criteria may have come back pre-quoted (literals and whatnot)
+ # this is fragile, but the best we can currently do
+ $chunk->[0] =~ s/^ $lq (.+?) $rq $sep $lq (.+?) $rq $/"$1.$2"/xe
+ or $chunk->[0] =~ s/^ $lq (.+) $rq $/$1/x;
+
push @chunks, $chunk;
}
}
else {
$sql_maker = $self->sql_maker;
+
+ # pass these in to deal with literals coming from
+ # the user or the deep guts of prefetch
+ my $orig_quote_chars = [$sql_maker->_quote_chars];
+
local $sql_maker->{quote_char};
- return $parser->($sql_maker, $order_by);
+ return $parser->($sql_maker, $order_by, $orig_quote_chars);
}
}
return undef;
}
+# this is almost identical to the above, except it accepts only
+# a single rsrc, and will succeed only if the first portion of the order
+# by is stable.
+# returns that portion as a colinfo hashref on success
+sub _main_source_order_by_portion_is_stable {
+ my ($self, $main_rsrc, $order_by, $where) = @_;
+
+ die "Huh... I expect a blessed result_source..."
+ if ref($main_rsrc) eq 'ARRAY';
+
+ my @ord_cols = map
+ { $_->[0] }
+ ( $self->_extract_order_criteria($order_by) )
+ ;
+ return unless @ord_cols;
+
+ my $colinfos = $self->_resolve_column_info($main_rsrc);
+
+ for (0 .. $#ord_cols) {
+ if (
+ ! $colinfos->{$ord_cols[$_]}
+ or
+ $colinfos->{$ord_cols[$_]}{-result_source} != $main_rsrc
+ ) {
+ $#ord_cols = $_ - 1;
+ last;
+ }
+ }
+
+ # we just truncated it above
+ return unless @ord_cols;
+
+ my $order_portion_ci = { map {
+ $colinfos->{$_}{-colname} => $colinfos->{$_},
+ $colinfos->{$_}{-fq_colname} => $colinfos->{$_},
+ } @ord_cols };
+
+ # since all we check here are the start of the order_by belonging to the
+ # top level $rsrc, a present identifying set will mean that the resultset
+ # is ordered by its leftmost table in a stable manner
+ #
+ # RV of _identifying_column_set contains unqualified names only
+ my $unqualified_idset = $main_rsrc->_identifying_column_set({
+ ( $where ? %{
+ $self->_resolve_column_info(
+ $main_rsrc, $self->_extract_fixed_condition_columns($where)
+ )
+ } : () ),
+ %$order_portion_ci
+ }) or return;
+
+ my $ret_info;
+ my %unqualified_idcols_from_order = map {
+ $order_portion_ci->{$_} ? ( $_ => $order_portion_ci->{$_} ) : ()
+ } @$unqualified_idset;
+
+ # extra optimization - cut the order_by at the end of the identifying set
+ # (just in case the user was stupid and overlooked the obvious)
+ for my $i (0 .. $#ord_cols) {
+ my $col = $ord_cols[$i];
+ my $unqualified_colname = $order_portion_ci->{$col}{-colname};
+ $ret_info->{$col} = { %{$order_portion_ci->{$col}}, -idx_in_order_subset => $i };
+ delete $unqualified_idcols_from_order{$ret_info->{$col}{-colname}};
+
+ # we didn't reach the end of the identifying portion yet
+ return $ret_info unless keys %unqualified_idcols_from_order;
+ }
+
+ die 'How did we get here...';
+}
+
# returns an arrayref of column names which *definitely* have som
# sort of non-nullable equality requested in the given condition
# specification. This is used to figure out if a resultset is
die "Illegal version $version_string - we are still in the 0.08 cycle\n"
}
+if ($v_point >= 300) {
+ die "Illegal version $version_string - we are still in the 0.082xx cycle\n"
+}
-# all odd releases *after* 0.08200 generate a -TRIAL, no exceptions
-Meta->makemaker_args->{DISTVNAME} = Meta->name . "-$version_string-TRIAL"
- if ( $v_point > 200 and int($v_point / 100) % 2 );
+Meta->makemaker_args->{DISTVNAME} = Meta->name . "-$version_string-TRIAL" if (
+ # 0.08240 ~ 0.08249 shall be TRIALs for the collapser rewrite
+ ( $v_point >= 240 and $v_point <= 249 )
+ or
+ # all odd releases *after* 0.08200 generate a -TRIAL, no exceptions
+ ( $v_point > 200 and int($v_point / 100) % 2 )
+);
my $tags = { map { chomp $_; $_ => 1} `git tag` };
use strict;
use warnings;
use Test::More;
+use Test::Exception;
use lib qw(t/lib);
use DBICTest;
$schema->resultset('CD')->create({ title => 'vacation in antarctica part 2', artist => 456, year => 1901 });
$parent_rs = $schema->resultset('CD')->search({ year => 1901 });
- $parent_rs->next;
+ is ($parent_rs->count, 2);
};
ok(!$@) or diag "Creation eval failed: $@";
+# basic tests
{
- my $pid = fork;
- if(!defined $pid) {
- die "fork failed: $!";
+ ok ($schema->storage->connected(), 'Parent is connected');
+ is ($parent_rs->next->id, 1, 'Cursor advanced');
+
+ my ($parent_in, $child_out);
+ pipe( $parent_in, $child_out ) or die "Pipe open failed: $!";
+
+ my $pid = fork;
+ if(!defined $pid) {
+ die "fork failed: $!";
+ }
+
+ if (!$pid) {
+ close $parent_in;
+
+ #simulate a subtest to not confuse the parent TAP emission
+ my $tb = Test::More->builder;
+ $tb->reset;
+ for (qw/output failure_output todo_output/) {
+ close $tb->$_;
+ open ($tb->$_, '>&', $child_out);
}
- if (!$pid) {
- exit $schema->storage->connected ? 1 : 0;
+ ok(!$schema->storage->connected, "storage->connected() false in child");
+ for (1,2) {
+ throws_ok { $parent_rs->next } qr/\QMulti-process access attempted while cursor in progress (position 1)/;
}
- if (waitpid($pid, 0) == $pid) {
- my $ex = $? >> 8;
- ok($ex == 0, "storage->connected() returns false in child");
- exit $ex if $ex; # skip remaining tests
- }
+ $parent_rs->reset;
+ is($parent_rs->next->id, 1, 'Resetting cursor reprepares it within child environment');
+
+ done_testing;
+ exit 0;
+ }
+
+ close $child_out;
+ while (my $ln = <$parent_in>) {
+ print " $ln";
+ }
+ waitpid( $pid, 0 );
+ ok(!$?, 'Child subtests passed');
+
+ is ($parent_rs->next->id, 2, 'Cursor still intact in parent');
+ is ($parent_rs->next, undef, 'Cursor exhausted');
}
+$parent_rs->reset;
my @pids;
while(@pids < $num_children) {
$schema->resultset('CD')->create({ title => 'vacation in antarctica part 2', artist => 456, year => 1901 });
$parent_rs = $schema->resultset('CD')->search({ year => 1901 });
- $parent_rs->next;
+ is ($parent_rs->count, 2);
}, 'populate successfull');
+# basic tests
+{
+ ok ($schema->storage->connected(), 'Parent is connected');
+ is ($parent_rs->next->id, 1, 'Cursor advanced');
+ my $ct_num = Test::More->builder->current_test;
+
+ my $newthread = async {
+ my $out = '';
+
+ #simulate a subtest to not confuse the parent TAP emission
+ my $tb = Test::More->builder;
+ $tb->reset;
+ for (qw/output failure_output todo_output/) {
+ close $tb->$_;
+ open ($tb->$_, '>', \$out);
+ }
+
+ ok(!$schema->storage->connected, "storage->connected() false in child");
+ for (1,2) {
+ throws_ok { $parent_rs->next } qr/\QMulti-thread access attempted while cursor in progress (position 1)/;
+ }
+
+ $parent_rs->reset;
+ is($parent_rs->next->id, 1, 'Resetting cursor reprepares it within child environment');
+
+ done_testing;
+
+ close $tb->$_ for (qw/output failure_output todo_output/);
+ sleep(1); # tasty crashes without this
+
+ $out;
+ };
+ die "Thread creation failed: $! $@" if !defined $newthread;
+
+ my $out = $newthread->join;
+ $out =~ s/^/ /gm;
+ print $out;
+
+ # workaround for older Test::More confusing the plan under threads
+ Test::More->builder->current_test($ct_num);
+
+ is ($parent_rs->next->id, 2, 'Cursor still intact in parent');
+ is ($parent_rs->next, undef, 'Cursor exhausted');
+}
+
+$parent_rs->reset;
my @children;
while(@children < $num_children) {
}
ok(1, "Made it to the end");
+undef $parent_rs;
$schema->storage->dbh->do("DROP TABLE cd");
$schema->resultset('CD')->create({ title => 'vacation in antarctica part 2', artist => 456, year => 1901 });
$parent_rs = $schema->resultset('CD')->search({ year => 1901 });
- $parent_rs->next;
+ is ($parent_rs->count, 2);
};
ok(!$@) or diag "Creation eval failed: $@";
# this subclass is expected to inherit whatever crap comes
# from the parent
'DBIx::Class::ResultSet::Pager',
+
+ # a utility class, not part of the inheritance chain
+ 'DBIx::Class::ResultSource::RowParser::Util',
) };
my $has_cmop = eval { require Class::MOP };
is ($collapsed_or_rs->count, 4, 'Collapsed search count with OR ok');
# make sure sure distinct on a grouped rs is warned about
-my $cd_rs = $schema->resultset ('CD')
- ->search ({}, { distinct => 1, group_by => 'title' });
-warnings_exist (sub {
- $cd_rs->next;
-}, qr/Useless use of distinct/, 'UUoD warning');
+{
+ my $cd_rs = $schema->resultset ('CD')
+ ->search ({}, { distinct => 1, group_by => 'title' });
+ warnings_exist (sub {
+ $cd_rs->next;
+ }, qr/Useless use of distinct/, 'UUoD warning');
+}
{
my $tcount = $schema->resultset('Track')->search(
$or_rs->reset;
$rel_rs->reset;
+# at this point there should be no active statements
+# (finish() was called everywhere, either explicitly via
+# reset() or on DESTROY)
+for (keys %{$schema->storage->dbh->{CachedKids}}) {
+ fail("Unreachable cached statement still active: $_")
+ if $schema->storage->dbh->{CachedKids}{$_}->FETCH('Active');
+}
+
my $tag = $schema->resultset('Tag')->search(
[ { 'me.tag' => 'Blue' } ],
{ columns => 'tagid' }
}
catch {
if ($opts{$opts_name}{required}) {
- BAIL_OUT "on_connect_call option '$opts_name' is not functional: $_";
+ die "on_connect_call option '$opts_name' is not functional: $_";
}
else {
skip
} 'inferring generator from trigger source works';
}
+ # at this point there should be no active statements
+ # (finish() was called everywhere, either explicitly via
+ # reset() or on DESTROY)
+ for (keys %{$schema->storage->dbh->{CachedKids}}) {
+ fail("Unreachable cached statement still active: $_")
+ if $schema->storage->dbh->{CachedKids}{$_}->FETCH('Active');
+ }
+
# test blobs (stolen from 73oracle.t)
eval { $dbh->do('DROP TABLE "bindtype_test"') };
$dbh->do(q[
push @objs, $tag->id; #warn "tag: ", $tag->ID;
}
-is_deeply( \@objs, [ 1 ], 'second cd has correct tags' );
+is_deeply( \@objs, [ 2, 5, 8 ], 'third cd has correct tags' );
$tags = $cds->next->tags;
@objs = ();
push @objs, $tag->id; #warn "tag: ", $tag->ID;
}
-is_deeply( \@objs, [ 2, 5, 8 ], 'third cd has correct tags' );
+is_deeply( \@objs, [ 1 ], 'second cd has correct tags' );
is( $queries, 0, 'no additional SQL statements while checking nested data' );
'prefetch properly collapses amount of rows from get_column',
);
+$rs->reset;
+my $pob_rs = $rs->search({}, {
+ select => ['me.title', 'tracks.title'],
+ prefetch => 'tracks',
+ order_by => [{-asc => ['position']}],
+ group_by => ['me.title', 'tracks.title'],
+});
+is_same_sql_bind (
+ $pob_rs->get_column("me.title")->as_query,
+ '(SELECT me.title FROM (SELECT me.title, tracks.title FROM cd me LEFT JOIN track tracks ON tracks.cd = me.cdid GROUP BY me.title, tracks.title ORDER BY position ASC) me)',
+ [],
+ 'Correct SQL for prefetch/order_by/group_by'
+);
+
+# test aggregate on a function
+{
+ my $tr_rs = $schema->resultset("Track");
+ $tr_rs->create({ cd => 2, title => 'dealbreaker' });
+
+ is(
+ $tr_rs->get_column('cd')->max,
+ 5,
+ "Correct: Max cd in Track is 5"
+ );
+
+ my $track_counts_per_cd_via_group_by = $tr_rs->search({}, {
+ columns => [ 'cd', { cnt => { count => 'trackid', -as => 'cnt' } } ],
+ group_by => 'cd',
+ })->get_column('cnt');
+
+ is ($track_counts_per_cd_via_group_by->max, 4, 'Correct max tracks per cd');
+ is ($track_counts_per_cd_via_group_by->min, 3, 'Correct min tracks per cd');
+ is (
+ sprintf('%0.1f', $track_counts_per_cd_via_group_by->func('avg') ),
+ '3.2',
+ 'Correct avg tracks per cd'
+ );
+}
+
done_testing;
use Test::More;
use Test::Exception;
+
use lib qw(t/lib);
use DBICTest;
use DBIC::SqlMakerTest;
my $schema = DBICTest->init_schema();
- {
- my $rs = $schema->resultset( 'CD' )->search(
- {
- 'producer.name' => 'blah',
- 'producer_2.name' => 'foo',
- },
- {
- 'join' => [
- { cd_to_producer => 'producer' },
- { cd_to_producer => 'producer' },
- ],
- 'prefetch' => [
- 'artist',
- { cd_to_producer => 'producer' },
- ],
- }
- );
-
- lives_ok {
- my @rows = $rs->all();
- };
- }
+lives_ok (sub {
+ my $rs = $schema->resultset( 'CD' )->search(
+ {
+ 'producer.name' => 'blah',
+ 'producer_2.name' => 'foo',
+ },
+ {
+ 'join' => [
+ { cd_to_producer => 'producer' },
+ { cd_to_producer => 'producer' },
+ ],
+ 'prefetch' => [
+ 'artist',
+ { cd_to_producer => { producer => 'producer_to_cd' } },
+ ],
+ }
+ );
+
+ my @executed = $rs->all();
+
+ is_same_sql_bind (
+ $rs->as_query,
+ '(
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+ artist.artistid, artist.name, artist.rank, artist.charfield,
+ cd_to_producer.cd, cd_to_producer.producer, cd_to_producer.attribute,
+ producer.producerid, producer.name,
+ producer_to_cd.cd, producer_to_cd.producer, producer_to_cd.attribute
+ FROM cd me
+ LEFT JOIN cd_to_producer cd_to_producer
+ ON cd_to_producer.cd = me.cdid
+ LEFT JOIN producer producer
+ ON producer.producerid = cd_to_producer.producer
+ LEFT JOIN cd_to_producer producer_to_cd
+ ON producer_to_cd.producer = producer.producerid
+ LEFT JOIN cd_to_producer cd_to_producer_2
+ ON cd_to_producer_2.cd = me.cdid
+ LEFT JOIN producer producer_2
+ ON producer_2.producerid = cd_to_producer_2.producer
+ JOIN artist artist ON artist.artistid = me.artist
+ WHERE ( ( producer.name = ? AND producer_2.name = ? ) )
+ )',
+ [
+ [ { sqlt_datatype => 'varchar', dbic_colname => 'producer.name', sqlt_size => 100 }
+ => 'blah' ],
+ [ { sqlt_datatype => 'varchar', dbic_colname => 'producer_2.name', sqlt_size => 100 }
+ => 'foo' ],
+ ],
+ );
+}, 'Complex join parsed/executed properly');
my @rs1a_results = $schema->resultset("Artist")->search_related('cds', {title => 'Forkful of bees'}, {order_by => 'title'});
is($rs1a_results[0]->title, 'Forkful of bees', "bare field conditions okay after search related");
use warnings;
use Test::More;
+use Test::Warn;
use Test::Exception;
use lib qw(t/lib);
use DBICTest;
my $schema = DBICTest->init_schema();
-plan tests => 12;
-
{
my $cd_rc = $schema->resultset("CD")->result_class;
throws_ok {
$artist_rs->first
- } qr/Can't locate object method "inflate_result" via package "IWillExplode"/,
+ } qr/\QInflator IWillExplode does not provide an inflate_result() method/,
'IWillExplode explodes on inflate';
my $cd_rs = $artist_rs->related_resultset('cds');
isa_ok(eval{ $cd_rs->find(1) }, $cd_rc, 'Inflated into correct cd result_class');
isa_ok(eval{ $cd_rs->search({ cdid => 1 })->first }, $cd_rc, 'Inflated into correct cd result_class');
}
+
+{
+ my $rs = $schema->resultset('Artist')->search(
+ { 'cds.title' => 'Spoonful of bees' },
+ { prefetch => 'cds', result_class => 'DBIx::Class::ResultClass::HashRefInflator' },
+ );
+
+ is ($rs->result_class, 'DBIx::Class::ResultClass::HashRefInflator', 'starting with correct resultclass');
+
+ $rs->result_class('DBICTest::Artist');
+ is ($rs->result_class, 'DBICTest::Artist', 'resultclass changed');
+
+ my $art = $rs->next;
+ is (ref $art, 'DBICTest::Artist', 'Correcty blessed output');
+
+ throws_ok
+ { $rs->result_class('IWillExplode') }
+ qr/\QChanging the result_class of a ResultSet instance with an active cursor is not supported/,
+ 'Throws on result class change in progress'
+ ;
+
+ my $cds = $art->cds;
+
+ warnings_exist
+ { $cds->result_class('IWillExplode') }
+ qr/\QChanging the result_class of a ResultSet instance with cached results is a noop/,
+ 'Warning on noop result_class change'
+ ;
+
+ is ($cds->result_class, 'IWillExplode', 'class changed anyway');
+
+ # even though the original was HRI (at $rs definition time above)
+ # we lost the control over the *prefetched* object result class
+ # when we handed the root object creation to ::Row::inflate_result
+ is( ref $cds->next, 'DBICTest::CD', 'Correctly inflated prefetched result');
+}
+
+done_testing;
is ($rs->result_class, 'DBICTest::CDSubclass', 'original class unchanged');
is ($hri_rs->result_class, 'DBIx::Class::ResultClass::HashRefInflator', 'result_class accessor pre-set via attribute');
-
my $datahashref1 = $hri_rs->next;
is_deeply(
[ sort keys %$datahashref1 ],
[ sort $rs->result_source->columns ],
'returned correct columns',
);
+ $hri_rs->reset;
$cd = $hri_rs->find ({cdid => 1});
is_deeply ( $cd, $datahashref1, 'first/find return the same thing (result_class attr propagates)');
my @dbic_reltable = $dbic_obj->$col;
my @hashref_reltable = @{$datahashref->{$col}};
- is (scalar @dbic_reltable, scalar @hashref_reltable, 'number of related entries');
+ is (scalar @hashref_reltable, scalar @dbic_reltable, 'number of related entries');
# for my $index (0..scalar @hashref_reltable) {
for my $index (0..scalar @dbic_reltable) {
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Deep;
+use lib qw(t/lib);
+use DBICTest;
+
+# More tests like this in t/prefetch/manual.t
+
+my $schema = DBICTest->init_schema(no_populate => 1, quote_names => 1);
+$schema->resultset('Artist')->create({ name => 'JMJ', cds => [{
+ title => 'Magnetic Fields',
+ year => 1981,
+ genre => { name => 'electro' },
+ tracks => [
+ { title => 'm1' },
+ { title => 'm2' },
+ { title => 'm3' },
+ { title => 'm4' },
+ ],
+} ] });
+
+
+$schema->resultset('CD')->create({
+ title => 'Equinoxe',
+ year => 1978,
+ artist => { name => 'JMJ' },
+ genre => { name => 'electro' },
+ tracks => [
+ { title => 'e1' },
+ { title => 'e2' },
+ { title => 'e3' },
+ ],
+ single_track => {
+ title => 'o1',
+ cd => {
+ title => 'Oxygene',
+ year => 1976,
+ artist => { name => 'JMJ' },
+ tracks => [
+ { title => 'o2', position => 2}, # the position should not be needed here, bug in MC
+ ],
+ },
+ },
+});
+
+for (1,2) {
+ $schema->resultset('CD')->create({ artist => 1, year => 1977, title => "fuzzy_$_" });
+}
+
+{
+ package DBICTest::HRI::Subclass;
+ use base 'DBIx::Class::ResultClass::HashRefInflator';
+}
+
+{
+ package DBICTest::HRI::Around;
+ use base 'DBIx::Class::ResultClass::HashRefInflator';
+
+ sub inflate_result { shift->next::method(@_) }
+}
+
+for my $rs (
+ $schema->resultset('CD')->search_rs({}, { result_class => 'DBIx::Class::ResultClass::HashRefInflator' }),
+ $schema->resultset('CD')->search_rs({}, { result_class => 'DBICTest::HRI::Subclass' }),
+ $schema->resultset('CD')->search_rs({}, { result_class => 'DBICTest::HRI::Around' }),
+) {
+
+cmp_deeply
+ [ $rs->search({}, {
+ columns => {
+ year => 'me.year',
+ 'single_track.cd.artist.name' => 'artist.name',
+ },
+ join => { single_track => { cd => 'artist' } },
+ order_by => [qw/me.cdid artist.artistid/],
+ })->all ],
+ [
+ { year => 1981, single_track => undef },
+ { year => 1976, single_track => undef },
+ { year => 1978, single_track => {
+ cd => {
+ artist => { name => "JMJ" }
+ },
+ }},
+ { year => 1977, single_track => undef },
+ { year => 1977, single_track => undef },
+
+ ],
+ 'plain 1:1 descending chain ' . $rs->result_class
+;
+
+cmp_deeply
+ [ $rs->search({}, {
+ columns => {
+ 'artist' => 'me.artist',
+ 'title' => 'me.title',
+ 'year' => 'me.year',
+ 'single_track.cd.artist.artistid' => 'artist.artistid',
+ 'single_track.cd.artist.cds.cdid' => 'cds.cdid',
+ 'single_track.cd.artist.cds.tracks.title' => 'tracks.title',
+ },
+ join => { single_track => { cd => { artist => { cds => 'tracks' } } } },
+ order_by => [qw/me.cdid artist.artistid cds.cdid tracks.trackid/],
+ })->all ],
+ [
+ {
+ artist => 1, title => "Magnetic Fields", year => 1981, single_track => undef,
+ },
+ {
+ artist => 1, title => "Oxygene", year => 1976, single_track => undef,
+ },
+ {
+ artist => 1, title => "Equinoxe", year => 1978, single_track => {
+ cd => {
+ artist => {
+ artistid => 1, cds => {
+ cdid => 1, tracks => {
+ title => "m1"
+ }
+ }
+ }
+ }
+ },
+ },
+ {
+ artist => 1, title => "Equinoxe", year => 1978, single_track => {
+ cd => {
+ artist => {
+ artistid => 1, cds => {
+ cdid => 1, tracks => {
+ title => "m2"
+ }
+ }
+ }
+ }
+ },
+ },
+ {
+ artist => 1, title => "Equinoxe", year => 1978, single_track => {
+ cd => {
+ artist => {
+ artistid => 1, cds => {
+ cdid => 1, tracks => {
+ title => "m3"
+ }
+ }
+ }
+ }
+ },
+ },
+ {
+ artist => 1, title => "Equinoxe", year => 1978, single_track => {
+ cd => {
+ artist => {
+ artistid => 1, cds => {
+ cdid => 1, tracks => {
+ title => "m4"
+ }
+ }
+ }
+ }
+ },
+ },
+ {
+ artist => 1, title => "Equinoxe", year => 1978, single_track => {
+ cd => {
+ artist => {
+ artistid => 1, cds => {
+ cdid => 2, tracks => {
+ title => "o2"
+ }
+ }
+ }
+ }
+ },
+ },
+ {
+ artist => 1, title => "Equinoxe", year => 1978, single_track => {
+ cd => {
+ artist => {
+ artistid => 1, cds => {
+ cdid => 2, tracks => {
+ title => "o1"
+ }
+ }
+ }
+ }
+ },
+ },
+ {
+ artist => 1, title => "Equinoxe", year => 1978, single_track => {
+ cd => {
+ artist => {
+ artistid => 1, cds => {
+ cdid => 3, tracks => {
+ title => "e1"
+ }
+ }
+ }
+ }
+ },
+ },
+ {
+ artist => 1, title => "Equinoxe", year => 1978, single_track => {
+ cd => {
+ artist => {
+ artistid => 1, cds => {
+ cdid => 3, tracks => {
+ title => "e2"
+ }
+ }
+ }
+ }
+ },
+ },
+ {
+ artist => 1, title => "Equinoxe", year => 1978, single_track => {
+ cd => {
+ artist => {
+ artistid => 1, cds => {
+ cdid => 3, tracks => {
+ title => "e3"
+ }
+ }
+ }
+ }
+ },
+ },
+ {
+ artist => 1, title => "Equinoxe", year => 1978, single_track => {
+ cd => {
+ artist => {
+ artistid => 1, cds => {
+ cdid => 4, tracks => undef
+ }
+ }
+ }
+ },
+ },
+ {
+ artist => 1, title => "Equinoxe", year => 1978, single_track => {
+ cd => {
+ artist => {
+ artistid => 1, cds => {
+ cdid => 5, tracks => undef
+ }
+ }
+ }
+ },
+ },
+ {
+ artist => 1, title => "fuzzy_1", year => 1977, single_track => undef,
+ },
+ {
+ artist => 1, title => "fuzzy_2", year => 1977, single_track => undef,
+ }
+ ],
+ 'non-collapsing 1:1:1:M:M chain ' . $rs->result_class,
+;
+
+cmp_deeply
+ [ $rs->search({}, {
+ columns => {
+ 'artist' => 'me.artist',
+ 'title' => 'me.title',
+ 'year' => 'me.year',
+ 'single_track.cd.artist.artistid' => 'artist.artistid',
+ 'single_track.cd.artist.cds.cdid' => 'cds.cdid',
+ 'single_track.cd.artist.cds.tracks.title' => 'tracks.title',
+ },
+ join => { single_track => { cd => { artist => { cds => 'tracks' } } } },
+ order_by => [qw/me.cdid artist.artistid cds.cdid tracks.trackid/],
+ collapse => 1,
+ })->all ],
+ [
+ {
+ artist => 1, title => "Magnetic Fields", year => 1981, single_track => undef,
+ },
+ {
+ artist => 1, title => "Oxygene", year => 1976, single_track => undef,
+ },
+ {
+ artist => 1, title => "Equinoxe", year => 1978, single_track => {
+ cd => {
+ artist => {
+ artistid => 1, cds => [
+ {
+ cdid => 1, tracks => [
+ { title => "m1" },
+ { title => "m2" },
+ { title => "m3" },
+ { title => "m4" },
+ ]
+ },
+ {
+ cdid => 2, tracks => [
+ { title => "o2" },
+ { title => "o1" },
+ ]
+ },
+ {
+ cdid => 3, tracks => [
+ { title => "e1" },
+ { title => "e2" },
+ { title => "e3" },
+ ]
+ },
+ {
+ cdid => 4, tracks => [],
+ },
+ {
+ cdid => 5, tracks => [],
+ }
+ ]
+ }
+ }
+ },
+ },
+ {
+ artist => 1, title => "fuzzy_1", year => 1977, single_track => undef,
+ },
+ {
+ artist => 1, title => "fuzzy_2", year => 1977, single_track => undef,
+ }
+ ],
+ 'collapsing 1:1:1:M:M chain ' . $rs->result_class,
+;
+
+}
+
+done_testing;
{ join_type => 'left'}
);
+# add a non-left single relationship for the complex prefetch tests
+__PACKAGE__->belongs_to( existing_single_track => 'DBICTest::Schema::Track', 'single_track');
+
__PACKAGE__->has_many( tracks => 'DBICTest::Schema::Track' );
__PACKAGE__->has_many(
tags => 'DBICTest::Schema::Tag', undef,
},
);
__PACKAGE__->set_primary_key('id');
+__PACKAGE__->add_unique_constraint ([qw/lyric_id text/]);
__PACKAGE__->belongs_to('lyric', 'DBICTest::Schema::Lyrics', 'lyric_id');
1;
__PACKAGE__->belongs_to('track', 'DBICTest::Schema::Track', 'track_id');
__PACKAGE__->has_many('lyric_versions', 'DBICTest::Schema::LyricVersion', 'lyric_id');
+__PACKAGE__->has_many('existing_lyric_versions', 'DBICTest::Schema::LyricVersion', 'lyric_id', {
+ join_type => 'inner',
+});
+
1;
+++ /dev/null
-package PrefetchBug::Left;
-
-use strict;
-use warnings;
-
-use base 'DBIx::Class::Core';
-
-__PACKAGE__->table('prefetchbug_left');
-__PACKAGE__->add_columns(
- id => { data_type => 'integer', is_auto_increment => 1 },
-);
-
-__PACKAGE__->set_primary_key('id');
-
-__PACKAGE__->has_many(
- prefetch_leftright => 'PrefetchBug::LeftRight',
- 'left_id'
-);
-
-1;
+++ /dev/null
-package
- PrefetchBug::LeftRight;
-
-use strict;
-use warnings;
-
-use base 'DBIx::Class::Core';
-
-__PACKAGE__->table('prefetchbug_left_right');
-__PACKAGE__->add_columns(
- left_id => { data_type => 'integer' },
- right_id => { data_type => 'integer' },
- value => {});
-
-__PACKAGE__->set_primary_key('left_id', 'right_id');
-__PACKAGE__->belongs_to(left => 'PrefetchBug::Left', 'left_id');
-__PACKAGE__->belongs_to(
- right => 'PrefetchBug::Right',
- 'right_id',
-# {join_type => 'left'}
-);
-
-
-1;
+++ /dev/null
-package
- PrefetchBug::Right;
-
-use strict;
-use warnings;
-
-use base 'DBIx::Class::Core';
-
-__PACKAGE__->table('prefetchbug_right');
-__PACKAGE__->add_columns(qw/ id name category description propagates locked/);
-__PACKAGE__->set_primary_key('id');
-
-__PACKAGE__->has_many('prefetch_leftright', 'PrefetchBug::LeftRight', 'right_id');
-1;
"genreid" integer,
"single_track" integer,
FOREIGN KEY ("artist") REFERENCES "artist"("artistid") ON DELETE CASCADE ON UPDATE CASCADE,
- FOREIGN KEY ("genreid") REFERENCES "genre"("genreid") ON DELETE SET NULL ON UPDATE CASCADE,
- FOREIGN KEY ("single_track") REFERENCES "track"("trackid") ON DELETE CASCADE
+ FOREIGN KEY ("single_track") REFERENCES "track"("trackid") ON DELETE CASCADE,
+ FOREIGN KEY ("genreid") REFERENCES "genre"("genreid") ON DELETE SET NULL ON UPDATE CASCADE
);
CREATE INDEX "cd_idx_artist" ON "cd" ("artist");
-CREATE INDEX "cd_idx_genreid" ON "cd" ("genreid");
-
CREATE INDEX "cd_idx_single_track" ON "cd" ("single_track");
+CREATE INDEX "cd_idx_genreid" ON "cd" ("genreid");
+
CREATE UNIQUE INDEX "cd_artist_title" ON "cd" ("artist", "title");
CREATE TABLE "collection_object" (
CREATE INDEX "lyric_versions_idx_lyric_id" ON "lyric_versions" ("lyric_id");
+CREATE UNIQUE INDEX "lyric_versions_lyric_id_text" ON "lyric_versions" ("lyric_id", "text");
+
CREATE TABLE "tags" (
"tagid" INTEGER PRIMARY KEY NOT NULL,
"cd" integer NOT NULL,
use lib qw(t/lib);
use DBICTest;
-plan tests => 2;
-
my $schema = DBICTest->init_schema();
-my $track_no_lyrics = $schema->resultset ('Track')
- ->search ({ 'lyrics.lyric_id' => undef }, { join => 'lyrics' })
- ->first;
-
-my $lyric = $track_no_lyrics->create_related ('lyrics', {
- lyric_versions => [
- { text => 'english doubled' },
- { text => 'english doubled' },
- ],
+my $link = $schema->resultset ('Link')->create ({
+ url => 'loldogs!',
+ bookmarks => [
+ { link => 'Mein Hund ist schwul'},
+ { link => 'Mein Hund ist schwul'},
+ ]
});
-is ($lyric->lyric_versions->count, 2, "Two identical has_many's created");
+is ($link->bookmarks->count, 2, "Two identical default-insert has_many's created");
-my $link = $schema->resultset ('Link')->create ({
+$link = $schema->resultset ('Link')->create ({
url => 'lolcats!',
bookmarks => [
{},
]
});
is ($link->bookmarks->count, 2, "Two identical default-insert has_many's created");
+
+done_testing;
use warnings;
use Test::More;
+use Test::Deep;
use lib qw(t/lib);
use DBICTest;
use DBIC::SqlMakerTest;
LEFT JOIN track tracks
ON tracks.cd = me.cdid
WHERE me.artist != ?
- ORDER BY tracks.cd
)',
[
$schema->storage->debugcb(sub { $queries++; });
$schema->storage->debug(1);
-is_deeply (
+cmp_deeply (
{ map
{ $_->cdid => {
track_titles => [ map { $_->title } ($_->tracks->all) ],
LEFT JOIN track tracks
ON tracks.cd = me.cdid
WHERE me.artist != ?
- ORDER BY tracks.cd
)',
[
my $schema = DBICTest->init_schema();
-plan tests => 1;
-
# While this is a rather GIGO case, make sure it behaves as pre-103,
# as it may result in hard-to-track bugs
my $cds = $schema->resultset('Artist')
LEFT JOIN cd cd ON cd.cdid = single_track_2.cd
)',
);
+
+done_testing;
use strict;
use Test::More;
+use Test::Deep;
use lib qw(t/lib);
use DBICTest;
-my $schema = DBICTest->init_schema(
- no_populate => 1,
-);
+my $schema = DBICTest->init_schema( no_populate => 1 );
$schema->resultset('CD')->create({
- cdid => 0,
- artist => {
- artistid => 0,
- name => '',
- rank => 0,
- charfield => 0,
- },
- title => '',
- year => 0,
- genreid => 0,
- single_track => 0,
+ cdid => 0, title => '', year => 0, genreid => 0, single_track => 0, artist => {
+ artistid => 0, name => '', rank => 0, charfield => 0,
+ },
});
my $orig_debug = $schema->storage->debug;
my $cd = $schema->resultset('CD')->search( {}, { prefetch => 'artist' })->next;
-is_deeply
+cmp_deeply
{ $cd->get_columns },
- {
- artist => 0,
- cdid => 0,
- genreid => 0,
- single_track => 0,
- title => '',
- year => 0,
- },
+ { artist => 0, cdid => 0, genreid => 0, single_track => 0, title => '', year => 0 },
'Expected CD columns present',
;
-is_deeply
+cmp_deeply
{ $cd->artist->get_columns },
- {
- artistid => 0,
- charfield => 0,
- name => "",
- rank => 0,
- },
+ { artistid => 0, charfield => 0, name => "", rank => 0 },
'Expected Artist columns present',
;
use DBIx::Class::SQLMaker::LimitDialects;
my $ROWS = DBIx::Class::SQLMaker::LimitDialects->__rows_bindtype;
+my $OFFSET = DBIx::Class::SQLMaker::LimitDialects->__offset_bindtype;
my $schema = DBICTest->init_schema();
my $sdebug = $schema->storage->debug;
LEFT JOIN track tracks ON tracks.cd = me.cdid
LEFT JOIN liner_notes liner_notes ON liner_notes.liner_id = me.cdid
WHERE ( me.cdid IS NOT NULL )
- ORDER BY track_count DESC, maxtr ASC, tracks.cd
+ ORDER BY track_count DESC, maxtr ASC
)',
[[$ROWS => 2]],
'next() query generated expected SQL',
);
is ($most_tracks_rs->count, 2, 'Limit works');
- my $top_cd = $most_tracks_rs->first;
+ my ($top_cd) = $most_tracks_rs->all;
is ($top_cd->id, 2, 'Correct cd fetched on top'); # 2 because of the slice(1,1) earlier
my $query_cnt = 0;
$schema->storage->debug ($sdebug);
}
+{
+ # test lifted from soulchild
+
+ my $most_tracks_rs = $schema->resultset ('CD')->search (
+ {
+ 'me.cdid' => { '!=' => undef }, # this is just to test WHERE
+ 'tracks.trackid' => { '!=' => undef },
+ },
+ {
+ join => 'tracks',
+ prefetch => 'liner_notes',
+ select => ['me.cdid', 'liner_notes.notes', { count => 'tracks.trackid', -as => 'tr_count' }, { max => 'tracks.trackid', -as => 'tr_maxid'} ],
+ as => [qw/cdid notes track_count max_track_id/],
+ order_by => [ { -desc => 'tr_count' }, { -asc => 'tr_maxid' } ],
+ group_by => 'me.cdid',
+ rows => 2,
+ }
+ );
+
+ is_same_sql_bind(
+ $most_tracks_rs->as_query,
+ '(SELECT me.cdid, liner_notes.notes, me.tr_count, me.tr_maxid,
+ liner_notes.liner_id, liner_notes.notes
+ FROM (
+ SELECT me.cdid, COUNT(tracks.trackid) AS tr_count, MAX(tracks.trackid) AS tr_maxid
+ FROM cd me
+ LEFT JOIN track tracks
+ ON tracks.cd = me.cdid
+ WHERE me.cdid IS NOT NULL AND tracks.trackid IS NOT NULL
+ GROUP BY me.cdid
+ ORDER BY tr_count DESC, tr_maxid ASC
+ LIMIT ?
+ ) me
+ LEFT JOIN track tracks
+ ON tracks.cd = me.cdid
+ LEFT JOIN liner_notes liner_notes
+ ON liner_notes.liner_id = me.cdid
+ WHERE me.cdid IS NOT NULL AND tracks.trackid IS NOT NULL
+ ORDER BY tr_count DESC, tr_maxid ASC
+ )',
+ [[$ROWS => 2]],
+ 'Oddball mysql-ish group_by usage yields valid SQL',
+ );
+
+ is ($most_tracks_rs->count, 2, 'Limit works');
+ my ($top_cd) = $most_tracks_rs->all;
+ is ($top_cd->id, 2, 'Correct cd fetched on top'); # 2 because of the slice(1,1) earlier
+
+ my $query_cnt = 0;
+ $schema->storage->debugcb ( sub { $query_cnt++ } );
+ $schema->storage->debug (1);
+
+ is ($top_cd->get_column ('track_count'), 4, 'Track count fetched correctly');
+ is (
+ $top_cd->liner_notes->notes,
+ 'Buy Whiskey!',
+ 'Correct liner pre-fetched with top cd',
+ );
+
+ is ($query_cnt, 0, 'No queries executed during prefetched data access');
+ $schema->storage->debugcb (undef);
+ $schema->storage->debug ($sdebug);
+}
+
+
# make sure that distinct still works
{
my $rs = $schema->resultset("CD")->search({}, {
SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
FROM cd me
GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
- ORDER BY cdid
) me
LEFT JOIN tags tags ON tags.cd = me.cdid
- ORDER BY cdid, tags.cd, tags.tag
+ ORDER BY cdid
)',
[],
'Prefetch + distinct resulted in correct group_by',
);
}
+# make sure distinct applies to the CD part only, not to the order_by part
{
- my $rs = $schema->resultset('CD')->search({},
- {
- '+select' => [{ count => 'tags.tag' }],
- '+as' => ['test_count'],
- prefetch => ['tags'],
- distinct => 1,
- order_by => {'-asc' => 'tags.tag'},
- rows => 1
- }
+ my $rs = $schema->resultset('CD')->search({}, {
+ columns => [qw( cdid title )],
+ '+select' => [{ count => 'tags.tag' }],
+ '+as' => ['test_count'],
+ prefetch => ['tags'],
+ distinct => 1,
+ order_by => {'-desc' => 'tags.tag'},
+ offset => 1,
+ rows => 3,
+ });
+
+ is_same_sql_bind($rs->as_query,
+ '(
+ SELECT me.cdid, me.title, me.test_count,
+ tags.tagid, tags.cd, tags.tag
+ FROM (
+ SELECT me.cdid, me.title,
+ COUNT( tags.tag ) AS test_count
+ FROM cd me
+ LEFT JOIN tags tags
+ ON tags.cd = me.cdid
+ GROUP BY me.cdid, me.title
+ ORDER BY MAX( tags.tag ) DESC
+ LIMIT ?
+ OFFSET ?
+ ) me
+ LEFT JOIN tags tags
+ ON tags.cd = me.cdid
+ ORDER BY tags.tag DESC
+ )',
+ [ [$ROWS => 3], [$OFFSET => 1] ],
+ 'Expected limited prefetch with distinct SQL',
+ );
+
+ my $expected_hri = [
+ { cdid => 4, test_count => 2, title => "Generic Manufactured Singles", tags => [
+ { cd => 4, tag => "Shiny", tagid => 9 },
+ { cd => 4, tag => "Cheesy", tagid => 6 },
+ ]},
+ {
+ cdid => 5, test_count => 2, title => "Come Be Depressed With Us", tags => [
+ { cd => 5, tag => "Cheesy", tagid => 7 },
+ { cd => 5, tag => "Blue", tagid => 4 },
+ ]},
+ {
+ cdid => 1, test_count => 1, title => "Spoonful of bees", tags => [
+ { cd => 1, tag => "Blue", tagid => 1 },
+ ]},
+ ];
+
+ is_deeply (
+ $rs->all_hri,
+ $expected_hri,
+ 'HRI dump of limited prefetch with distinct as expected'
+ );
+
+ # pre-multiplied main source also should work
+ $rs = $schema->resultset('CD')->search_related('artist')->search_related('cds', {}, {
+ columns => [qw( cdid title )],
+ '+select' => [{ count => 'tags.tag' }],
+ '+as' => ['test_count'],
+ prefetch => ['tags'],
+ distinct => 1,
+ order_by => {'-desc' => 'tags.tag'},
+ offset => 1,
+ rows => 3,
+ });
+
+ is_same_sql_bind($rs->as_query,
+ '(
+ SELECT cds.cdid, cds.title, cds.test_count,
+ tags.tagid, tags.cd, tags.tag
+ FROM cd me
+ JOIN artist artist
+ ON artist.artistid = me.artist
+ JOIN (
+ SELECT cds.cdid, cds.title,
+ COUNT( tags.tag ) AS test_count,
+ cds.artist
+ FROM cd me
+ JOIN artist artist
+ ON artist.artistid = me.artist
+ JOIN cd cds
+ ON cds.artist = artist.artistid
+ LEFT JOIN tags tags
+ ON tags.cd = cds.cdid
+ GROUP BY cds.cdid, cds.title, cds.artist
+ ORDER BY MAX( tags.tag ) DESC
+ LIMIT ?
+ OFFSET ?
+ ) cds
+ ON cds.artist = artist.artistid
+ LEFT JOIN tags tags
+ ON tags.cd = cds.cdid
+ ORDER BY tags.tag DESC
+ )',
+ [ [$ROWS => 3], [$OFFSET => 1] ],
+ 'Expected limited prefetch with distinct SQL on premultiplied head',
+ );
+
+ # Tag counts are multiplied by the cd->artist->cds multiplication
+ # I would *almost* call this "expected" without wraping an as_subselect_rs
+ {
+ local $TODO = 'Not sure if we can stop the count/group of premultiplication abstraction leak';
+ is_deeply (
+ $rs->all_hri,
+ $expected_hri,
+ 'HRI dump of limited prefetch with distinct as expected on premultiplid head'
);
- is_same_sql_bind($rs->as_query, q{
- (SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track, me.test_count, tags.tagid, tags.cd, tags.tag
- FROM (SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track, COUNT( tags.tag ) AS test_count
- FROM cd me LEFT JOIN tags tags ON tags.cd = me.cdid
- GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track, tags.tag
- ORDER BY tags.tag ASC LIMIT ?)
- me
- LEFT JOIN tags tags ON tags.cd = me.cdid
- ORDER BY tags.tag ASC, tags.cd, tags.tag
- )
- }, [[$ROWS => 1]]);
+ }
}
done_testing;
use warnings;
use Test::More;
+use Test::Deep;
use Test::Exception;
use lib qw(t/lib);
use DBICTest;
+use DBIC::SqlMakerTest;
my $schema = DBICTest->init_schema();
lives_ok(sub {
- # while cds.* will be selected anyway (prefetch currently forces the result of _resolve_prefetch)
+ # while cds.* will be selected anyway (prefetch implies it)
# only the requested me.name column will be fetched.
# reference sql with select => [...]
{
prefetch => [ qw/ cds / ],
order_by => [ { -desc => 'me.name' }, 'cds.title' ],
- select => [qw/ me.name cds.title / ],
- }
+ select => [qw/ me.name cds.title / ],
+ },
);
is ($rs->count, 2, 'Correct number of collapsed artists');
- my $we_are_goth = $rs->first;
+ my ($we_are_goth) = $rs->all;
is ($we_are_goth->name, 'We Are Goth', 'Correct first artist');
is ($we_are_goth->cds->count, 1, 'Correct number of CDs for first artist');
is ($we_are_goth->cds->first->title, 'Come Be Depressed With Us', 'Correct cd for artist');
}, 'explicit prefetch on a keyless object works');
+lives_ok ( sub {
+
+ my $rs = $schema->resultset('CD')->search(
+ {},
+ {
+ order_by => [ { -desc => 'me.year' } ],
+ }
+ );
+ my $years = [qw/ 2001 2001 1999 1998 1997/];
+
+ cmp_deeply (
+ [ $rs->search->get_column('me.year')->all ],
+ $years,
+ 'Expected years (at least one duplicate)',
+ );
+
+ my @cds_and_tracks;
+ for my $cd ($rs->all) {
+ my $data = { year => $cd->year, cdid => $cd->cdid };
+ for my $tr ($cd->tracks->all) {
+ push @{$data->{tracks}}, { $tr->get_columns };
+ }
+ push @cds_and_tracks, $data;
+ }
+
+ my $pref_rs = $rs->search ({}, { columns => [qw/year cdid/], prefetch => 'tracks' });
+
+ my @pref_cds_and_tracks;
+ for my $cd ($pref_rs->all) {
+ my $data = { $cd->get_columns };
+ for my $tr ($cd->tracks->all) {
+ push @{$data->{tracks}}, { $tr->get_columns };
+ }
+ push @pref_cds_and_tracks, $data;
+ }
+
+ cmp_deeply (
+ \@pref_cds_and_tracks,
+ \@cds_and_tracks,
+ 'Correct collapsing on non-unique primary object'
+ );
+
+ cmp_deeply (
+ [ $pref_rs->search ({}, { result_class => 'DBIx::Class::ResultClass::HashRefInflator' })->all ],
+ \@cds_and_tracks,
+ 'Correct HRI collapsing on non-unique primary object'
+ );
+
+}, 'weird collapse lives');
+
lives_ok(sub {
# test implicit prefetch as well
sub {
$schema->resultset('Track')->search({}, { join => { cd => 'artist' }, '+columns' => 'artist.name' } )->next;
},
- qr|\QCan't inflate manual prefetch into non-existent relationship 'artist' from 'Track', check the inflation specification (columns/as) ending in 'artist.name'|,
+ qr|\QInflation into non-existent relationship 'artist' of 'Track' requested, check the inflation specification (columns/as) ending in '...artist.name'|,
'Sensible error message on mis-specified "as"',
);
prefetch => 'books',
});
- lives_ok {
- is ($pref_rs->all, 1, 'Expected count of objects on limtied prefetch')
- } "Complex limited prefetch works with non-selected join condition";
+ is_same_sql_bind(
+ $pref_rs->as_query,
+ '(
+ SELECT me.name, books.id, books.source, books.owner, books.title, books.price
+ FROM (
+ SELECT me.name, me.id
+ FROM owners me
+ LIMIT ?
+ OFFSET ?
+ ) me
+ LEFT JOIN books books
+ ON books.owner = me.id
+ )',
+ [ [ { sqlt_datatype => "integer" } => 3 ], [ { sqlt_datatype => "integer" } => 1 ] ],
+ 'Expected SQL on complex limited prefetch with non-selected join condition',
+ );
+
+ is_deeply (
+ $pref_rs->all_hri,
+ [ {
+ name => "Waltham",
+ books => [ {
+ id => 3,
+ owner => 2,
+ price => 65,
+ source => "Library",
+ title => "Best Recipe Cookbook",
+ } ],
+ } ],
+ 'Expected result on complex limited prefetch with non-selected join condition'
+ );
+
+ my $empty_ordered_pref_rs = $pref_rs->search({}, {
+ columns => [], # nothing, we only prefetch the book data
+ order_by => 'me.name',
+ });
+ my $empty_ordered_pref_hri = [ {
+ books => [ {
+ id => 3,
+ owner => 2,
+ price => 65,
+ source => "Library",
+ title => "Best Recipe Cookbook",
+ } ],
+ } ];
+
+ is_same_sql_bind(
+ $empty_ordered_pref_rs->as_query,
+ '(
+ SELECT books.id, books.source, books.owner, books.title, books.price
+ FROM (
+ SELECT me.id, me.name
+ FROM owners me
+ ORDER BY me.name
+ LIMIT ?
+ OFFSET ?
+ ) me
+ LEFT JOIN books books
+ ON books.owner = me.id
+ ORDER BY me.name
+ )',
+ [ [ { sqlt_datatype => "integer" } => 3 ], [ { sqlt_datatype => "integer" } => 1 ] ],
+ 'Expected SQL on *ordered* complex limited prefetch with non-selected root data',
+ );
+
+ is_deeply (
+ $empty_ordered_pref_rs->all_hri,
+ $empty_ordered_pref_hri,
+ 'Expected result on *ordered* complex limited prefetch with non-selected root data'
+ );
+
+ $empty_ordered_pref_rs = $empty_ordered_pref_rs->search({}, {
+ order_by => [ \ 'LENGTH(me.name)', \ 'RANDOM()' ],
+ });
+
+ is_same_sql_bind(
+ $empty_ordered_pref_rs->as_query,
+ '(
+ SELECT books.id, books.source, books.owner, books.title, books.price
+ FROM (
+ SELECT me.id, me.name
+ FROM owners me
+ ORDER BY LENGTH(me.name), RANDOM()
+ LIMIT ?
+ OFFSET ?
+ ) me
+ LEFT JOIN books books
+ ON books.owner = me.id
+ ORDER BY LENGTH(me.name), RANDOM()
+ )',
+ [ [ { sqlt_datatype => "integer" } => 3 ], [ { sqlt_datatype => "integer" } => 1 ] ],
+ 'Expected SQL on *function-ordered* complex limited prefetch with non-selected root data',
+ );
+
+ is_deeply (
+ $empty_ordered_pref_rs->all_hri,
+ $empty_ordered_pref_hri,
+ 'Expected result on *function-ordered* complex limited prefetch with non-selected root data'
+ );
}
JOIN artist artist ON artist.artistid = me.artist
LEFT JOIN cd cds ON cds.artist = artist.artistid
LEFT JOIN artist artist_2 ON artist_2.artistid = cds.artist
- ORDER BY cds.artist, cds.year ASC
)',
[],
);
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Warn;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $rs = $schema->resultset('Artist')->search({}, {
+ select => 'artistid',
+ prefetch => { cds => 'tracks' },
+});
+
+my $initial_artists_cnt = $rs->count;
+
+# create one extra artist with just one cd with just one track
+# and then an artist with nothing at all
+# the implicit order by me.artistid will get them back in correct order
+$rs->create({
+ name => 'foo',
+ cds => [{
+ year => 2012,
+ title => 'foocd',
+ tracks => [{
+ title => 'footrack',
+ }]
+ }],
+});
+$rs->create({ name => 'bar' });
+$rs->create({ name => 'baz' });
+
+# make sure we are reentrant, and also check with explicit order_by
+for (undef, undef, 'me.artistid') {
+ $rs = $rs->search({}, { order_by => $_ }) if $_;
+
+ for (1 .. $initial_artists_cnt) {
+ is ($rs->next->artistid, $_, 'Default fixture artists in order') || exit;
+ }
+
+ my $foo_artist = $rs->next;
+ is ($foo_artist->cds->next->tracks->next->title, 'footrack', 'Right track');
+
+ is (
+ [$rs->cursor->next]->[0],
+ $initial_artists_cnt + 3,
+ 'Very last artist still on the cursor'
+ );
+
+ is_deeply ([$rs->cursor->next], [], 'Nothing else left');
+
+ is ($rs->next->artistid, $initial_artists_cnt + 2, 'Row stashed in resultset still accessible');
+ is ($rs->next, undef, 'Nothing left in resultset either');
+
+ $rs->reset;
+}
+
+$rs->next;
+
+my @objs = $rs->all;
+is (@objs, $initial_artists_cnt + 3, '->all resets everything correctly');
+is ( ($rs->cursor->next)[0], 1, 'Cursor auto-rewound after all()');
+is ($rs->{_stashed_rows}, undef, 'Nothing else left in $rs stash');
+
+my $unordered_rs = $rs->search({}, { order_by => 'cds.title' });
+
+warnings_exist {
+ ok ($unordered_rs->next, 'got row 1');
+} qr/performed an eager cursor slurp underneath/, 'Warned on auto-eager cursor';
+
+is_deeply ([$unordered_rs->cursor->next], [], 'Nothing left on cursor, eager slurp');
+ok ($unordered_rs->next, "got row $_") for (2 .. $initial_artists_cnt + 3);
+is ($unordered_rs->next, undef, 'End of RS reached');
+is ($unordered_rs->next, undef, 'End of RS not lost');
+
+{
+ my $non_uniquely_ordered_constrained = $schema->resultset('CD')->search(
+ { artist => 1 },
+ { order_by => [qw( me.genreid me.title me.year )], prefetch => 'tracks' },
+ );
+
+ isa_ok ($non_uniquely_ordered_constrained->next, 'DBICTest::CD' );
+
+ ok( defined $non_uniquely_ordered_constrained->cursor->next, 'Cursor not exhausted' );
+}
+
+done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Deep;
+use Test::Warn;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+delete $ENV{DBIC_COLUMNS_INCLUDE_FILTER_RELS};
+
+my $schema = DBICTest->init_schema(no_populate => 1);
+
+$schema->resultset('Artist')->create({ name => 'JMJ', cds => [{
+ title => 'Magnetic Fields',
+ year => 1981,
+ genre => { name => 'electro' },
+ tracks => [
+ { title => 'm1' },
+ { title => 'm2' },
+ { title => 'm3' },
+ { title => 'm4' },
+ ],
+} ] });
+
+$schema->resultset('CD')->create({
+ title => 'Equinoxe',
+ year => 1978,
+ artist => { name => 'JMJ' },
+ genre => { name => 'electro' },
+ tracks => [
+ { title => 'e1' },
+ { title => 'e2' },
+ { title => 'e3' },
+ ],
+ single_track => {
+ title => 'o1',
+ cd => {
+ title => 'Oxygene',
+ year => 1976,
+ artist => { name => 'JMJ' },
+ tracks => [
+ { title => 'o2', position => 2}, # the position should not be here, bug in MC
+ ],
+ },
+ },
+});
+
+my $rs = $schema->resultset ('CD')->search ({}, {
+ join => [ 'tracks', { single_track => { cd => { artist => { cds => 'tracks' } } } } ],
+ collapse => 1,
+ columns => [
+ { 'year' => 'me.year' }, # non-unique
+ { 'genreid' => 'me.genreid' }, # nullable
+ { 'tracks.title' => 'tracks.title' }, # non-unique (no me.id)
+ { 'single_track.cd.artist.cds.cdid' => 'cds.cdid' }, # to give uniquiness to ...tracks.title below
+ { 'single_track.cd.artist.artistid' => 'artist.artistid' }, # uniqufies entire parental chain
+ { 'single_track.cd.artist.cds.year' => 'cds.year' }, # non-unique
+ { 'single_track.cd.artist.cds.genreid' => 'cds.genreid' }, # nullable
+ { 'single_track.cd.artist.cds.tracks.title' => 'tracks_2.title' }, # unique when combined with ...cds.cdid above
+ { 'latest_cd' => \ "(SELECT MAX(year) FROM cd)" }, # random function
+ { 'title' => 'me.title' }, # uniquiness for me
+ { 'artist' => 'me.artist' }, # uniquiness for me
+ ],
+ order_by => [{ -desc => 'cds.year' }, { -desc => 'me.title'} ],
+});
+
+my $hri_rs = $rs->search({}, { result_class => 'DBIx::Class::ResultClass::HashRefInflator' });
+
+cmp_deeply (
+ [$hri_rs->all],
+ [
+ { artist => 1, genreid => 1, latest_cd => 1981, title => "Equinoxe", year => 1978,
+ single_track => {
+ cd => {
+ artist => { artistid => 1, cds => [
+ { cdid => 1, genreid => 1, year => 1981, tracks => [
+ { title => "m1" },
+ { title => "m2" },
+ { title => "m3" },
+ { title => "m4" },
+ ]},
+ { cdid => 3, genreid => 1, year => 1978, tracks => [
+ { title => "e1" },
+ { title => "e2" },
+ { title => "e3" },
+ ]},
+ { cdid => 2, genreid => undef, year => 1976, tracks => [
+ { title => "o1" },
+ { title => "o2" },
+ ]},
+ ]},
+ },
+ },
+ tracks => [
+ { title => "e1" },
+ { title => "e2" },
+ { title => "e3" },
+ ],
+ },
+ {
+ artist => 1, genreid => undef, latest_cd => 1981, title => "Oxygene", year => 1976, single_track => undef,
+ tracks => [
+ { title => "o1" },
+ { title => "o2" },
+ ],
+ },
+ {
+ artist => 1, genreid => 1, latest_cd => 1981, title => "Magnetic Fields", year => 1981, single_track => undef,
+ tracks => [
+ { title => "m1" },
+ { title => "m2" },
+ { title => "m3" },
+ { title => "m4" },
+ ],
+ },
+ ],
+ 'W00T, manual prefetch with collapse works'
+);
+
+lives_ok { my $dummy = $rs; warnings_exist {
+
+##############
+### This is a bunch of workarounds for deprecated behavior - delete entire block when fixed
+ my $cd_obj = ($rs->all)[0]->single_track->cd;
+ my $art_obj = $cd_obj->artist;
+
+ my $empty_single_columns = {
+ cd => undef
+ };
+ my $empty_single_inflated_columns = {
+ cd => $cd_obj
+ };
+ my $empty_cd_columns = {
+ artist => $art_obj->artistid
+ };
+ my $empty_cd_inflated_columns = {
+ artist => $art_obj
+ };
+
+ {
+ local $TODO = "Returning prefetched 'filter' rels as part of get_columns/get_inflated_columns is deprecated";
+ is_deeply($_, {}) for (
+ $empty_single_columns, $empty_single_inflated_columns, $empty_cd_columns, $empty_cd_inflated_columns
+ );
+ }
+##############
+
+
+### this tests the standard root -> single -> filter ->filter
+ my ($row) = $rs->all; # don't trigger order warnings
+
+ is_deeply(
+ { $row->single_track->get_columns },
+ $empty_single_columns,
+ "No unexpected columns available on intermediate 'single' rel with a chained 'filter' prefetch",
+ );
+
+ is_deeply(
+ { $row->single_track->get_inflated_columns },
+ $empty_single_inflated_columns,
+ "No unexpected inflated columns available on intermediate 'single' rel with a chained 'filter' prefetch",
+ );
+
+ is_deeply(
+ { $row->single_track->cd->get_columns },
+ $empty_cd_columns,
+ "No unexpected columns available on intermediate 'single' rel with 2x chained 'filter' prefetch",
+ );
+
+ is_deeply(
+ { $row->single_track->cd->get_inflated_columns },
+ $empty_cd_inflated_columns,
+ "No unexpected inflated columns available on intermediate 'single' rel with 2x chained 'filter' prefetch",
+ );
+
+### also try a different arangement root -> single -> single ->filter
+ ($row) = $rs->result_source->resultset->search({ 'artist.artistid' => 1 }, {
+ join => { single_track => { disc => { artist => 'cds' } } },
+ '+columns' => {
+ 'single_track.disc.artist.artistid' => 'artist.artistid',
+ 'single_track.disc.artist.cds.cdid' => 'cds.cdid',
+ },
+ collapse => 1,
+ })->all;
+
+ is_deeply(
+ { $row->single_track->get_columns },
+ {},
+ "No unexpected columns available on intermediate 'single' rel with a chained 'single' prefetch",
+ );
+
+ is_deeply(
+ { $row->single_track->get_inflated_columns },
+ {},
+ "No unexpected inflated columns available on intermediate 'single' rel with a chained 'single' prefetch",
+ );
+
+ is_deeply(
+ { $row->single_track->disc->get_columns },
+ $empty_cd_columns,
+ "No unexpected columns available on intermediate 'single' rel with chained 'single' and chained 'filter' prefetch",
+ );
+
+ is_deeply(
+ { $row->single_track->disc->get_inflated_columns },
+ $empty_cd_inflated_columns,
+ "No unexpected inflated columns available on intermediate 'single' rel with chained 'single' and chained 'filter' prefetch",
+ );
+
+} [
+ qr/\QReturning primary keys of prefetched 'filter' rels as part of get_columns()/,
+ qr/\QUnable to deflate 'filter'-type relationship 'cd' (related object primary key not retrieved)/,
+ qr/\QReturning prefetched 'filter' rels as part of get_inflated_columns()/,
+ qr/\QReturning primary keys of prefetched 'filter' rels as part of get_columns()/,
+ qr/\QReturning prefetched 'filter' rels as part of get_inflated_columns()/,
+ qr/\QReturning primary keys of prefetched 'filter' rels as part of get_columns()/,
+ qr/\QReturning prefetched 'filter' rels as part of get_inflated_columns()/,
+], 'expected_warnings'
+} 'traversing prefetch chain with empty intermediates works';
+
+# multi-has_many with underdefined root, with rather random order
+$rs = $schema->resultset ('CD')->search ({}, {
+ join => [ 'tracks', { single_track => { cd => { artist => { cds => 'tracks' } } } } ],
+ collapse => 1,
+ columns => [
+ { 'single_track.trackid' => 'single_track.trackid' }, # definitive link to root from 1:1:1:1:M:M chain
+ { 'year' => 'me.year' }, # non-unique
+ { 'tracks.cd' => 'tracks.cd' }, # \ together both uniqueness for second multirel
+ { 'tracks.title' => 'tracks.title' }, # / and definitive link back to root
+ { 'single_track.cd.artist.cds.cdid' => 'cds.cdid' }, # to give uniquiness to ...tracks.title below
+ { 'single_track.cd.artist.cds.year' => 'cds.year' }, # non-unique
+ { 'single_track.cd.artist.artistid' => 'artist.artistid' }, # uniqufies entire parental chain
+ { 'single_track.cd.artist.cds.genreid' => 'cds.genreid' }, # nullable
+ { 'single_track.cd.artist.cds.tracks.title' => 'tracks_2.title' }, # unique when combined with ...cds.cdid above
+ ],
+});
+
+for (1..3) {
+ $rs->create({ artist => 1, year => 1977, title => "fuzzy_$_" });
+}
+
+my $rs_random = $rs->search({}, { order_by => \ 'RANDOM()' });
+is ($rs_random->count, 6, 'row count matches');
+
+if ($ENV{TEST_VERBOSE}) {
+ my @lines = (
+ [ "What are we actually trying to collapse (Select/As, tests below will see a *DIFFERENT* random order):" ],
+ [ map { my $s = $_; $s =~ s/single_track\./sngl_tr./; $s } @{$rs_random->{_attrs}{select} } ],
+ $rs_random->{_attrs}{as},
+ [ "-" x 159 ],
+ $rs_random->cursor->all,
+ );
+
+ diag join ' # ', map { sprintf '% 15s', (defined $_ ? $_ : 'NULL') } @$_
+ for @lines;
+}
+
+{
+ my $queries = 0;
+ $schema->storage->debugcb(sub { $queries++ });
+ my $orig_debug = $schema->storage->debug;
+ $schema->storage->debug (1);
+
+ for my $use_next (0, 1) {
+ my @random_cds;
+ if ($use_next) {
+ warnings_exist {
+ while (my $o = $rs_random->next) {
+ push @random_cds, $o;
+ }
+ } qr/performed an eager cursor slurp underneath/,
+ 'Warned on auto-eager cursor';
+ }
+ else {
+ @random_cds = $rs_random->all;
+ }
+
+ is (@random_cds, 6, 'object count matches');
+
+ for my $cd (@random_cds) {
+ if ($cd->year == 1977) {
+ is( scalar $cd->tracks, 0, 'no tracks on 1977 cd' );
+ is( $cd->single_track, undef, 'no single_track on 1977 cd' );
+ }
+ elsif ($cd->year == 1976) {
+ is( scalar $cd->tracks, 2, 'Two tracks on 1976 cd' );
+ like( $_->title, qr/^o\d/, "correct title" )
+ for $cd->tracks;
+ is( $cd->single_track, undef, 'no single_track on 1976 cd' );
+ }
+ elsif ($cd->year == 1981) {
+ is( scalar $cd->tracks, 4, 'Four tracks on 1981 cd' );
+ like( $_->title, qr/^m\d/, "correct title" )
+ for $cd->tracks;
+ is( $cd->single_track, undef, 'no single_track on 1981 cd' );
+ }
+ elsif ($cd->year == 1978) {
+ is( scalar $cd->tracks, 3, 'Three tracks on 1978 cd' );
+ like( $_->title, qr/^e\d/, "correct title" )
+ for $cd->tracks;
+ ok( defined $cd->single_track, 'single track prefetched on 1987 cd' );
+ is( $cd->single_track->cd->artist->id, 1, 'Single_track->cd->artist prefetched on 1978 cd' );
+ is( scalar $cd->single_track->cd->artist->cds, 6, '6 cds prefetched on artist' );
+ }
+ }
+ }
+
+ $schema->storage->debugcb(undef);
+ $schema->storage->debug($orig_debug);
+ is ($queries, 2, "Only two queries for two prefetch calls total");
+}
+
+# can't cmp_deeply a random set - need *some* order
+my $ord_rs = $rs->search({}, {
+ order_by => [ 'tracks_2.title', 'tracks.title', 'cds.cdid', \ 'RANDOM()' ],
+ result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+});
+my @hris_all = sort { $a->{year} cmp $b->{year} } $ord_rs->all;
+is (@hris_all, 6, 'hri count matches' );
+
+my $iter_rs = $rs->search({}, {
+ order_by => [ 'me.year', 'me.cdid', 'tracks_2.title', 'tracks.title', 'cds.cdid', \ 'RANDOM()' ],
+ result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+});
+my @hris_iter;
+while (my $r = $iter_rs->next) {
+ push @hris_iter, $r;
+}
+
+cmp_deeply(
+ \@hris_iter,
+ \@hris_all,
+ 'Iteration works correctly',
+);
+
+my @hri_contents = (
+ { year => 1976, single_track => undef, tracks => [
+ { cd => 2, title => "o1" },
+ { cd => 2, title => "o2" },
+ ]},
+ { year => 1977, single_track => undef, tracks => [] },
+ { year => 1977, single_track => undef, tracks => [] },
+ { year => 1977, single_track => undef, tracks => [] },
+ {
+ year => 1978,
+ single_track => {
+ trackid => 6,
+ cd => {
+ artist => {
+ artistid => 1, cds => [
+ { cdid => 4, genreid => undef, year => 1977, tracks => [] },
+ { cdid => 5, genreid => undef, year => 1977, tracks => [] },
+ { cdid => 6, genreid => undef, year => 1977, tracks => [] },
+ { cdid => 3, genreid => 1, year => 1978, tracks => [
+ { title => "e1" },
+ { title => "e2" },
+ { title => "e3" },
+ ]},
+ { cdid => 1, genreid => 1, year => 1981, tracks => [
+ { title => "m1" },
+ { title => "m2" },
+ { title => "m3" },
+ { title => "m4" },
+ ]},
+ { cdid => 2, genreid => undef, year => 1976, tracks => [
+ { title => "o1" },
+ { title => "o2" },
+ ]},
+ ]
+ },
+ },
+ },
+ tracks => [
+ { cd => 3, title => "e1" },
+ { cd => 3, title => "e2" },
+ { cd => 3, title => "e3" },
+ ],
+ },
+ { year => 1981, single_track => undef, tracks => [
+ { cd => 1, title => "m1" },
+ { cd => 1, title => "m2" },
+ { cd => 1, title => "m3" },
+ { cd => 1, title => "m4" },
+ ]},
+);
+
+cmp_deeply (\@hris_all, \@hri_contents, 'W00T, multi-has_many manual underdefined root prefetch with collapse works');
+
+cmp_deeply(
+ $rs->search({}, {
+ order_by => [ 'me.year', 'tracks_2.title', 'tracks.title', 'cds.cdid', { -desc => 'name' } ],
+ rows => 4,
+ offset => 2,
+ })->all_hri,
+ [ @hri_contents[2..5] ],
+ 'multi-has_many prefetch with limit works too',
+);
+
+# left-ordered real iterator
+$rs = $rs->search({}, { order_by => [ 'me.year', 'me.cdid', \ 'RANDOM()' ] });
+my @objs_iter;
+while (my $r = $rs->next) {
+ push @objs_iter, $r;
+}
+
+for my $i (0 .. $#objs_iter) {
+ is ($objs_iter[$i]->year, $hris_all[$i]{year}, "Expected year on object $i" );
+ is (
+ (defined $objs_iter[$i]->single_track),
+ (defined $hris_all[$i]{single_track}),
+ "Expected single relation on object $i"
+ );
+}
+
+$rs = $schema->resultset('Artist')->search({}, {
+ join => 'cds',
+ columns => ['cds.title', 'cds.artist' ],
+ collapse => 1,
+ order_by => [qw( me.name cds.title )],
+});
+
+$rs->create({ name => "${_}_cdless" })
+ for (qw( Z A ));
+
+cmp_deeply (
+ $rs->all_hri,
+ [
+ { cds => [] },
+ { cds => [
+ { artist => 1, title => "Equinoxe" },
+ { artist => 1, title => "Magnetic Fields" },
+ { artist => 1, title => "Oxygene" },
+ { artist => 1, title => "fuzzy_1" },
+ { artist => 1, title => "fuzzy_2" },
+ { artist => 1, title => "fuzzy_3" },
+ ] },
+ { cds => [] },
+ ],
+ 'Expected HRI of 1:M with empty root selection',
+);
+
+done_testing;
my $schema = DBICTest->init_schema();
my $sdebug = $schema->storage->debug;
-# once the following TODO is complete, remove the 2 warning tests immediately
-# after the TODO block
-# (the TODO block itself contains tests ensuring that the warns are removed)
-TODO: {
- local $TODO = 'Prefetch of multiple has_many rels at the same level (currently warn to protect the clueless git)';
-
- #( 1 -> M + M )
- my $cd_rs = $schema->resultset('CD')->search ({ 'me.title' => 'Forkful of bees' });
- my $pr_cd_rs = $cd_rs->search ({}, {
- prefetch => [qw/tracks tags/],
- });
-
- my $tracks_rs = $cd_rs->first->tracks;
- my $tracks_count = $tracks_rs->count;
-
- my ($pr_tracks_rs, $pr_tracks_count);
-
- my $queries = 0;
- $schema->storage->debugcb(sub { $queries++ });
- $schema->storage->debug(1);
-
- my $o_mm_warn;
- {
- local $SIG{__WARN__} = sub { $o_mm_warn = shift };
- $pr_tracks_rs = $pr_cd_rs->first->tracks;
- };
- $pr_tracks_count = $pr_tracks_rs->count;
-
- ok(! $o_mm_warn, 'no warning on attempt to prefetch several same level has_many\'s (1 -> M + M)');
-
- {
- local $TODO;
- is($queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query');
- }
- $schema->storage->debugcb (undef);
- $schema->storage->debug ($sdebug);
-
- is($pr_tracks_count, $tracks_count, 'equal count of prefetched relations over several same level has_many\'s (1 -> M + M)');
- is ($pr_tracks_rs->all, $tracks_rs->all, 'equal amount of objects returned with and without prefetch over several same level has_many\'s (1 -> M + M)');
-
- #( M -> 1 -> M + M )
- my $note_rs = $schema->resultset('LinerNotes')->search ({ notes => 'Buy Whiskey!' });
- my $pr_note_rs = $note_rs->search ({}, {
- prefetch => {
- cd => [qw/tracks tags/]
- },
- });
-
- my $tags_rs = $note_rs->first->cd->tags;
- my $tags_count = $tags_rs->count;
-
- my ($pr_tags_rs, $pr_tags_count);
-
- $queries = 0;
- $schema->storage->debugcb(sub { $queries++ });
- $schema->storage->debug(1);
-
- my $m_o_mm_warn;
- {
- local $SIG{__WARN__} = sub { $m_o_mm_warn = shift };
- $pr_tags_rs = $pr_note_rs->first->cd->tags;
- };
- $pr_tags_count = $pr_tags_rs->count;
-
- ok(! $m_o_mm_warn, 'no warning on attempt to prefetch several same level has_many\'s (M -> 1 -> M + M)');
-
- {
- local $TODO;
-
- is($queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query');
- $schema->storage->debugcb (undef);
- $schema->storage->debug ($sdebug);
-
- is($pr_tags_count, $tags_count, 'equal count of prefetched relations over several same level has_many\'s (M -> 1 -> M + M)');
- is($pr_tags_rs->all, $tags_rs->all, 'equal amount of objects with and without prefetch over several same level has_many\'s (M -> 1 -> M + M)');
- }
-}
-
-# remove this closure once the TODO above is working
-{
- my $warn_re = qr/will explode the number of row objects retrievable via/;
+#( 1 -> M + M )
+my $cd_rs = $schema->resultset('CD')->search( { 'me.title' => 'Forkful of bees' } );
+my $pr_cd_rs = $cd_rs->search( {}, { prefetch => [qw/tracks tags/], } );
+
+my $tracks_rs = $cd_rs->first->tracks;
+my $tracks_count = $tracks_rs->count;
- my (@w, @dummy);
- local $SIG{__WARN__} = sub { $_[0] =~ $warn_re ? push @w, @_ : warn @_ };
+my ( $pr_tracks_rs, $pr_tracks_count );
- my $rs = $schema->resultset('CD')->search ({ 'me.title' => 'Forkful of bees' }, { prefetch => [qw/tracks tags/] });
- @w = ();
- @dummy = $rs->first;
- is (@w, 1, 'warning on attempt prefetching several same level has_manys (1 -> M + M)');
+my $queries = 0;
+$schema->storage->debugcb( sub { $queries++ } );
+$schema->storage->debug(1);
- my $rs2 = $schema->resultset('LinerNotes')->search ({ notes => 'Buy Whiskey!' }, { prefetch => { cd => [qw/tags tracks/] } });
- @w = ();
- @dummy = $rs2->first;
- is (@w, 1, 'warning on attempt prefetching several same level has_manys (M -> 1 -> M + M)');
-}
+my $o_mm_warn;
+{
+ local $SIG{__WARN__} = sub { $o_mm_warn = shift };
+ $pr_tracks_rs = $pr_cd_rs->first->tracks;
+};
+$pr_tracks_count = $pr_tracks_rs->count;
+
+ok( !$o_mm_warn,
+'no warning on attempt to prefetch several same level has_many\'s (1 -> M + M)'
+);
+
+is( $queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query' );
+$schema->storage->debugcb(undef);
+$schema->storage->debug($sdebug);
+
+is( $pr_tracks_count, $tracks_count,
+'equal count of prefetched relations over several same level has_many\'s (1 -> M + M)'
+);
+is( $pr_tracks_rs->all, $tracks_rs->all,
+'equal amount of objects returned with and without prefetch over several same level has_many\'s (1 -> M + M)'
+);
+
+#( M -> 1 -> M + M )
+my $note_rs =
+ $schema->resultset('LinerNotes')->search( { notes => 'Buy Whiskey!' } );
+my $pr_note_rs =
+ $note_rs->search( {}, { prefetch => { cd => [qw/tracks tags/] }, } );
+
+my $tags_rs = $note_rs->first->cd->tags;
+my $tags_count = $tags_rs->count;
+
+my ( $pr_tags_rs, $pr_tags_count );
+
+$queries = 0;
+$schema->storage->debugcb( sub { $queries++ } );
+$schema->storage->debug(1);
+
+my $m_o_mm_warn;
+{
+ local $SIG{__WARN__} = sub { $m_o_mm_warn = shift };
+ $pr_tags_rs = $pr_note_rs->first->cd->tags;
+};
+$pr_tags_count = $pr_tags_rs->count;
+
+ok( !$m_o_mm_warn,
+'no warning on attempt to prefetch several same level has_many\'s (M -> 1 -> M + M)'
+);
+
+is( $queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query' );
+$schema->storage->debugcb(undef);
+$schema->storage->debug($sdebug);
+
+is( $pr_tags_count, $tags_count,
+'equal count of prefetched relations over several same level has_many\'s (M -> 1 -> M + M)'
+);
+is( $pr_tags_rs->all, $tags_rs->all,
+'equal amount of objects with and without prefetch over several same level has_many\'s (M -> 1 -> M + M)'
+);
done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Deep;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $mo_rs = $schema->resultset('Artist')->search(
+ { 'me.artistid' => 4 },
+ {
+ prefetch => [
+ {
+ cds => [
+ { tracks => { cd_single => 'tracks' } },
+ { cd_to_producer => 'producer' }
+ ]
+ },
+ { artwork_to_artist => 'artwork' }
+ ],
+
+ result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+
+ order_by => [qw/tracks.position tracks.trackid producer.producerid/],
+ }
+);
+
+$schema->resultset('Artist')->create(
+ {
+ name => 'mo',
+ rank => '1337',
+ cds => [
+ {
+ title => 'Song of a Foo',
+ year => '1999',
+ tracks => [
+ { title => 'Foo Me Baby One More Time' },
+ { title => 'Foo Me Baby One More Time II' },
+ { title => 'Foo Me Baby One More Time III' },
+ { title => 'Foo Me Baby One More Time IV', cd_single => {
+ artist => 1, title => 'MO! Single', year => 2021, tracks => [
+ { title => 'singled out' },
+ { title => 'still alone' },
+ ]
+ } }
+ ],
+ cd_to_producer => [
+ { producer => { name => 'riba' } },
+ { producer => { name => 'sushi' } },
+ ]
+ },
+ {
+ title => 'Song of a Foo II',
+ year => '2002',
+ tracks => [
+ { title => 'Quit Playing Games With My Heart' },
+ { title => 'Bar Foo' },
+ { title => 'Foo Bar', cd_single => {
+ artist => 2, title => 'MO! Single', year => 2020, tracks => [
+ { title => 'singled out' },
+ { title => 'still alone' },
+ ]
+ } }
+ ],
+ cd_to_producer => [
+ { producer => { name => 'riba' } },
+ { producer => { name => 'sushi' } },
+ ],
+ }
+ ],
+ artwork_to_artist => [
+ { artwork => {cd_id => 1 } },
+ { artwork => { cd_id => 2 } }
+ ]
+ }
+);
+
+my $mo = $mo_rs->next;
+
+is( @{$mo->{cds}}, 2, 'two CDs' );
+
+cmp_deeply( $mo, {
+ artistid => 4, charfield => undef, name => 'mo', rank => 1337,
+ artwork_to_artist => [
+ { artist_id => 4, artwork_cd_id => 1, artwork => { cd_id => 1 } },
+ { artist_id => 4, artwork_cd_id => 2, artwork => { cd_id => 2 } },
+ ],
+ cds => [
+ {
+ artist => 4, cdid => 6, title => 'Song of a Foo', genreid => undef, year => 1999, single_track => undef,
+ cd_to_producer => [
+ { attribute => undef, cd => 6, producer => { name => 'riba', producerid => 4 } },
+ { attribute => undef, cd => 6, producer => { name => 'sushi', producerid => 5 } },
+ ],
+ tracks => [
+ { cd => 6, position => 1, trackid => 19, title => 'Foo Me Baby One More Time', last_updated_on => undef, last_updated_at => undef, cd_single => undef },
+ { cd => 6, position => 2, trackid => 20, title => 'Foo Me Baby One More Time II', last_updated_on => undef, last_updated_at => undef, cd_single => undef },
+ { cd => 6, position => 3, trackid => 21, title => 'Foo Me Baby One More Time III', last_updated_on => undef, last_updated_at => undef, cd_single => undef },
+ { cd => 6, position => 4, trackid => 22, title => 'Foo Me Baby One More Time IV', last_updated_on => undef, last_updated_at => undef, cd_single => {
+ single_track => 22, artist => 1, cdid => 7, title => 'MO! Single', genreid => undef, year => 2021, tracks => [
+ { cd => 7, position => 1, title => 'singled out', trackid => '23', last_updated_at => undef, last_updated_on => undef },
+ { cd => 7, position => 2, title => 'still alone', trackid => '24', last_updated_at => undef, last_updated_on => undef },
+ ],
+ } }
+ ],
+ },
+ {
+ artist => 4, cdid => 8, title => 'Song of a Foo II', genreid => undef, year => 2002, single_track => undef,
+ cd_to_producer => [
+ { attribute => undef, cd => 8, producer => { name => 'riba', producerid => 4 } },
+ { attribute => undef, cd => 8, producer => { name => 'sushi', producerid => 5 } },
+ ],
+ tracks => [
+ { cd => 8, position => 1, trackid => 25, title => 'Quit Playing Games With My Heart', last_updated_on => undef, last_updated_at => undef, cd_single => undef },
+ { cd => 8, position => 2, trackid => 26, title => 'Bar Foo', last_updated_on => undef, last_updated_at => undef, cd_single => undef },
+ { cd => 8, position => 3, trackid => 27, title => 'Foo Bar', last_updated_on => undef, last_updated_at => undef, cd_single => {
+ single_track => 27, artist => 2, cdid => 9, title => 'MO! Single', genreid => undef, year => 2020, tracks => [
+ { cd => 9, position => 1, title => 'singled out', trackid => '28', last_updated_at => undef, last_updated_on => undef },
+ { cd => 9, position => 2, title => 'still alone', trackid => '29', last_updated_at => undef, last_updated_on => undef },
+ ],
+ } }
+ ],
+ }
+ ],
+});
+
+done_testing;
DBIx::Class::SQLMaker::LimitDialects->__offset_bindtype,
);
-my $schema = DBICTest->init_schema();
+my $schema = DBICTest->init_schema(quote_names => 1);
my $artist_rs = $schema->resultset('Artist');
-my $ar = $artist_rs->current_source_alias;
my $filtered_cd_rs = $artist_rs->search_related('cds_unordered',
- { "$ar.rank" => 13 },
+ { "me.rank" => 13 },
{
- prefetch => [ 'tracks' ],
- order_by => [ { -asc => "$ar.name" }, "$ar.artistid DESC" ],
- offset => 3,
- rows => 3,
+ prefetch => 'tracks',
+ join => 'genre',
+ order_by => [ { -desc => 'genre.name' }, { -desc => \ 'tracks.title' }, { -asc => "me.name" }, { -desc => [qw(year cds_unordered.title)] } ], # me. is the artist, *NOT* the cd
},
);
-is_same_sql_bind(
- $filtered_cd_rs->as_query,
- q{(
- SELECT cds_unordered.cdid, cds_unordered.artist, cds_unordered.title, cds_unordered.year, cds_unordered.genreid, cds_unordered.single_track,
- tracks.trackid, tracks.cd, tracks.position, tracks.title, tracks.last_updated_on, tracks.last_updated_at
- FROM artist me
- JOIN (
- SELECT cds_unordered.cdid, cds_unordered.artist, cds_unordered.title, cds_unordered.year, cds_unordered.genreid, cds_unordered.single_track
- FROM artist me
- JOIN cd cds_unordered
- ON cds_unordered.artist = me.artistid
- WHERE ( me.rank = ? )
- ORDER BY me.name ASC, me.artistid DESC
- LIMIT ?
- OFFSET ?
- ) cds_unordered
- ON cds_unordered.artist = me.artistid
- LEFT JOIN track tracks
- ON tracks.cd = cds_unordered.cdid
- WHERE ( me.rank = ? )
- ORDER BY me.name ASC, me.artistid DESC, tracks.cd
- )},
- [
- [ { sqlt_datatype => 'integer', dbic_colname => 'me.rank' } => 13 ],
- [ $ROWS => 3 ],
- [ $OFFSET => 3 ],
- [ { sqlt_datatype => 'integer', dbic_colname => 'me.rank' } => 13 ],
- ],
- 'correct SQL on limited prefetch over search_related ordered by root',
-);
+my $hri_contents = [
+ {
+ artist => 1, cdid => 1, genreid => 1, single_track => undef, title => "Spoonful of bees", year => 1999, tracks => [
+ { cd => 1, last_updated_at => undef, last_updated_on => undef, position => 1, title => "The Bees Knees", trackid => 16 },
+ { cd => 1, last_updated_at => undef, last_updated_on => undef, position => 3, title => "Beehind You", trackid => 18 },
+ { cd => 1, last_updated_at => undef, last_updated_on => undef, position => 2, title => "Apiary", trackid => 17 },
+ ],
+ },
+ {
+ artist => 1, cdid => 3, genreid => undef, single_track => undef, title => "Caterwaulin' Blues", year => 1997, tracks => [
+ { cd => 3, last_updated_at => undef, last_updated_on => undef, position => 1, title => "Yowlin", trackid => 7 },
+ { cd => 3, last_updated_at => undef, last_updated_on => undef, position => 2, title => "Howlin", trackid => 8 },
+ { cd => 3, last_updated_at => undef, last_updated_on => undef, position => 3, title => "Fowlin", trackid => 9 },
+ ],
+ },
+ {
+ artist => 3, cdid => 5, genreid => undef, single_track => undef, title => "Come Be Depressed With Us", year => 1998, tracks => [
+ { cd => 5, last_updated_at => undef, last_updated_on => undef, position => 2, title => "Under The Weather", trackid => 14 },
+ { cd => 5, last_updated_at => undef, last_updated_on => undef, position => 3, title => "Suicidal", trackid => 15 },
+ { cd => 5, last_updated_at => undef, last_updated_on => undef, position => 1, title => "Sad", trackid => 13 },
+ ],
+ },
+ {
+ artist => 1, cdid => 2, genreid => undef, single_track => undef, title => "Forkful of bees", year => 2001, tracks => [
+ { cd => 2, last_updated_at => undef, last_updated_on => undef, position => 1, title => "Stung with Success", trackid => 4 },
+ { cd => 2, last_updated_at => undef, last_updated_on => undef, position => 2, title => "Stripy", trackid => 5 },
+ { cd => 2, last_updated_at => undef, last_updated_on => undef, position => 3, title => "Sticky Honey", trackid => 6 },
+ ],
+ },
+ {
+ artist => 2, cdid => 4, genreid => undef, single_track => undef, title => "Generic Manufactured Singles", year => 2001, tracks => [
+ { cd => 4, last_updated_at => undef, last_updated_on => undef, position => 3, title => "No More Ideas", trackid => 12 },
+ { cd => 4, last_updated_at => undef, last_updated_on => undef, position => 2, title => "Boring Song", trackid => 11 },
+ { cd => 4, last_updated_at => undef, last_updated_on => undef, position => 1, title => "Boring Name", trackid => 10},
+ ],
+ },
+];
-# note: we only requested "get all cds of all artists with rank 13 then order
-# by the artist name and give me the fourth, fifth and sixth", consequently the
-# cds that belong to the same artist are unordered; fortunately we know that
-# the first artist have 3 cds and the second and third artist both have only
-# one, so the first 3 cds belong to the first artist and the fourth and fifth
-# cds belong to the second and third artist, respectively, and there's no sixth
-# row
-is_deeply (
+is_deeply(
$filtered_cd_rs->all_hri,
- [
- {
- 'artist' => '2',
- 'cdid' => '4',
- 'genreid' => undef,
- 'single_track' => undef,
- 'title' => 'Generic Manufactured Singles',
- 'tracks' => [
- {
- 'cd' => '4',
- 'last_updated_at' => undef,
- 'last_updated_on' => undef,
- 'position' => '1',
- 'title' => 'Boring Name',
- 'trackid' => '10'
- },
- {
- 'cd' => '4',
- 'last_updated_at' => undef,
- 'last_updated_on' => undef,
- 'position' => '2',
- 'title' => 'Boring Song',
- 'trackid' => '11'
- },
- {
- 'cd' => '4',
- 'last_updated_at' => undef,
- 'last_updated_on' => undef,
- 'position' => '3',
- 'title' => 'No More Ideas',
- 'trackid' => '12'
- }
- ],
- 'year' => '2001'
- },
- {
- 'artist' => '3',
- 'cdid' => '5',
- 'genreid' => undef,
- 'single_track' => undef,
- 'title' => 'Come Be Depressed With Us',
- 'tracks' => [
- {
- 'cd' => '5',
- 'last_updated_at' => undef,
- 'last_updated_on' => undef,
- 'position' => '1',
- 'title' => 'Sad',
- 'trackid' => '13'
- },
- {
- 'cd' => '5',
- 'last_updated_at' => undef,
- 'last_updated_on' => undef,
- 'position' => '3',
- 'title' => 'Suicidal',
- 'trackid' => '15'
- },
- {
- 'cd' => '5',
- 'last_updated_at' => undef,
- 'last_updated_on' => undef,
- 'position' => '2',
- 'title' => 'Under The Weather',
- 'trackid' => '14'
- }
- ],
- 'year' => '1998'
- }
- ],
- 'Correctly ordered result',
+ $hri_contents,
+ 'Expected ordered unlimited contents',
);
+for (
+ [ 0, 1 ],
+ [ 2, 0 ],
+ [ 20, 2 ],
+ [ 1, 3 ],
+ [ 2, 4 ],
+) {
+ my ($limit, $offset) = @$_;
+
+ my $rs = $filtered_cd_rs->search({}, { $limit ? (rows => $limit) : (), offset => $offset });
+
+ my $used_limit = $limit || DBIx::Class::SQLMaker->__max_int;
+ my $offset_str = $offset ? 'OFFSET ?' : '';
+
+ is_same_sql_bind(
+ $rs->as_query,
+ qq{(
+ SELECT "cds_unordered"."cdid", "cds_unordered"."artist", "cds_unordered"."title", "cds_unordered"."year", "cds_unordered"."genreid", "cds_unordered"."single_track",
+ "tracks"."trackid", "tracks"."cd", "tracks"."position", "tracks"."title", "tracks"."last_updated_on", "tracks"."last_updated_at"
+ FROM "artist" "me"
+ JOIN (
+ SELECT "cds_unordered"."cdid", "cds_unordered"."artist", "cds_unordered"."title", "cds_unordered"."year", "cds_unordered"."genreid", "cds_unordered"."single_track"
+ FROM "artist" "me"
+ JOIN cd "cds_unordered"
+ ON "cds_unordered"."artist" = "me"."artistid"
+ LEFT JOIN "genre" "genre"
+ ON "genre"."genreid" = "cds_unordered"."genreid"
+ LEFT JOIN "track" "tracks"
+ ON "tracks"."cd" = "cds_unordered"."cdid"
+ WHERE "me"."rank" = ?
+ GROUP BY "cds_unordered"."cdid", "cds_unordered"."artist", "cds_unordered"."title", "cds_unordered"."year", "cds_unordered"."genreid", "cds_unordered"."single_track"
+ ORDER BY MAX("genre"."name") DESC,
+ MAX( tracks.title ) DESC,
+ MIN("me"."name"),
+ "year" DESC,
+ "cds_unordered"."title" DESC
+ LIMIT ?
+ $offset_str
+ ) "cds_unordered"
+ ON "cds_unordered"."artist" = "me"."artistid"
+ LEFT JOIN "genre" "genre"
+ ON "genre"."genreid" = "cds_unordered"."genreid"
+ LEFT JOIN "track" "tracks"
+ ON "tracks"."cd" = "cds_unordered"."cdid"
+ WHERE "me"."rank" = ?
+ ORDER BY "genre"."name" DESC,
+ tracks.title DESC,
+ "me"."name" ASC,
+ "year" DESC,
+ "cds_unordered"."title" DESC
+ )},
+ [
+ [ { sqlt_datatype => 'integer', dbic_colname => 'me.rank' } => 13 ],
+ [ $ROWS => $used_limit ],
+ $offset ? [ $OFFSET => $offset ] : (),
+ [ { sqlt_datatype => 'integer', dbic_colname => 'me.rank' } => 13 ],
+ ],
+ "correct SQL on prefetch over search_related ordered by external joins with limit '$limit', offset '$offset'",
+ );
+
+ is_deeply(
+ $rs->all_hri,
+ [ @{$hri_contents}[$offset .. List::Util::min( $used_limit+$offset-1, $#$hri_contents)] ],
+ "Correct slice of the resultset returned with limit '$limit', offset '$offset'",
+ );
+}
+
done_testing;
$schema->storage->debugcb(sub { $queries++ });
$schema->storage->debug(1);
-
my $pref = $schema->resultset ('Artist')
->search ({ 'me.artistid' => $artist->id }, { prefetch => { cds => 'genre' } })
->next;
is ($pref->cds->count, 3, 'Correct number of CDs prefetched');
is ($pref->cds->search_related ('genre')->count, 1, 'Only one of the prefetched cds has a prefetched genre');
-
is ($queries, 1, 'All happened within one query only');
$schema->storage->debugcb($orig_cb);
$schema->storage->debug(0);
-
done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $cds_rs = $schema->resultset('CD')->search(
+ [
+ {
+ 'me.title' => "Caterwaulin' Blues",
+ 'cds.title' => { '!=' => 'Forkful of bees' }
+ },
+ {
+ 'me.title' => { '!=', => "Caterwaulin' Blues" },
+ 'cds.title' => 'Forkful of bees'
+ },
+ ],
+ {
+ order_by => 'me.cdid',
+ prefetch => { artist => 'cds' },
+ result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+ },
+);
+
+is_deeply [ $cds_rs->all ], [
+ {
+ 'single_track' => undef,
+ 'cdid' => '1',
+ 'artist' => {
+ 'cds' => [
+ {
+ 'single_track' => undef,
+ 'artist' => '1',
+ 'cdid' => '2',
+ 'title' => 'Forkful of bees',
+ 'genreid' => undef,
+ 'year' => '2001'
+ },
+ ],
+ 'artistid' => '1',
+ 'charfield' => undef,
+ 'name' => 'Caterwauler McCrae',
+ 'rank' => '13'
+ },
+ 'title' => 'Spoonful of bees',
+ 'year' => '1999',
+ 'genreid' => '1'
+ },
+ {
+ 'single_track' => undef,
+ 'cdid' => '2',
+ 'artist' => {
+ 'cds' => [
+ {
+ 'single_track' => undef,
+ 'artist' => '1',
+ 'cdid' => '2',
+ 'title' => 'Forkful of bees',
+ 'genreid' => undef,
+ 'year' => '2001'
+ },
+ ],
+ 'artistid' => '1',
+ 'charfield' => undef,
+ 'name' => 'Caterwauler McCrae',
+ 'rank' => '13'
+ },
+ 'title' => 'Forkful of bees',
+ 'year' => '2001',
+ 'genreid' => undef
+ },
+ {
+ 'single_track' => undef,
+ 'cdid' => '3',
+ 'artist' => {
+ 'cds' => [
+ {
+ 'single_track' => undef,
+ 'artist' => '1',
+ 'cdid' => '3',
+ 'title' => 'Caterwaulin\' Blues',
+ 'genreid' => undef,
+ 'year' => '1997'
+ },
+ {
+ 'single_track' => undef,
+ 'artist' => '1',
+ 'cdid' => '1',
+ 'title' => 'Spoonful of bees',
+ 'genreid' => '1',
+ 'year' => '1999'
+ }
+ ],
+ 'artistid' => '1',
+ 'charfield' => undef,
+ 'name' => 'Caterwauler McCrae',
+ 'rank' => '13'
+ },
+ 'title' => 'Caterwaulin\' Blues',
+ 'year' => '1997',
+ 'genreid' => undef
+ }
+], 'multi-level prefetch with restrictions ok';
+
+done_testing;
use warnings;
use Test::More;
+use Test::Exception;
use lib qw(t/lib);
use DBICTest;
my $schema = DBICTest->init_schema();
my $orig_debug = $schema->storage->debug;
-plan tests => 44;
-
my $queries = 0;
$schema->storage->debugcb(sub { $queries++; });
$schema->storage->debug(1);
@artists = $rs->search(undef, { prefetch => 'cds', order_by => 'artistid' });
is(scalar @artists, 5, 'has_many prefetch with adjacent empty rows ok');
+lives_ok { @artists = $rs->search(undef, {
+ join => ['cds'],
+ prefetch => [],
+ rows => 20,
+ });
+} 'join and empty prefetch ok';
+
# -------------
#
# Tests for multilevel has_many prefetch
my $rs = shift;
my $struc = {};
+ # all of these ought to work, but do not for some reason
+ # a noop cloning search() pollution?
+ #foreach my $art ( $rs->search({}, { order_by => 'me.artistid' })->all ) {
+ #foreach my $art ( $rs->search({}, {})->all ) {
+ #foreach my $art ( $rs->search()->all ) {
foreach my $art ( $rs->all ) {
foreach my $cd ( $art->cds ) {
foreach my $track ( $cd->tracks ) {
$schema->storage->debug($orig_debug);
$schema->storage->debugobj->callback(undef);
+
+done_testing;
+++ /dev/null
-use strict;
-use warnings;
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use PrefetchBug;
-
-my $schema = PrefetchBug->connect( DBICTest->_database (quote_char => '"') );
-ok( $schema, 'Connected to PrefetchBug schema OK' );
-
-$schema->storage->dbh->do(<<"EOF");
-CREATE TABLE prefetchbug_left (
- id INTEGER PRIMARY KEY
-)
-EOF
-
-$schema->storage->dbh->do(<<"EOF");
-CREATE TABLE prefetchbug_right (
- id INTEGER PRIMARY KEY,
- name TEXT,
- category TEXT,
- description TEXT,
- propagates INT,
- locked INT
-)
-EOF
-
-$schema->storage->dbh->do(<<"EOF");
-CREATE TABLE prefetchbug_left_right (
- left_id INTEGER REFERENCES prefetchbug_left(id),
- right_id INTEGER REFERENCES prefetchbug_right(id),
- value TEXT,
- PRIMARY KEY (left_id, right_id)
-)
-EOF
-
-# Test simple has_many prefetch:
-
-my $leftc = $schema->resultset('Left')->create({});
-
-my $rightc = $schema->resultset('Right')->create({ id => 60, name => 'Johnny', category => 'something', description=> 'blah', propagates => 0, locked => 1 });
-$rightc->create_related('prefetch_leftright', { left => $leftc, value => 'lr' });
-
-# start with fresh whatsit
-my $left = $schema->resultset('Left')->find({ id => $leftc->id });
-
-my @left_rights = $left->search_related('prefetch_leftright', {}, { prefetch => 'right' });
-ok(defined $left_rights[0]->right, 'Prefetched Right side correctly');
-
-done_testing;
$schema->storage->debug ($orig_debug);
}, 'distinct generally works with prefetch on deep search_related chains');
+# pathological "user knows what they're doing" case
+# lifted from production somewhere
+{
+ $schema->resultset('CD')
+ ->search({ cdid => [1,2] })
+ ->search_related('tracks', { position => [3,1] })
+ ->delete_all;
+
+ my $rs = $schema->resultset('CD')->search_related('tracks', {}, {
+ group_by => 'me.title',
+ columns => { title => 'me.title', max_trk => \ 'MAX(tracks.position)' },
+ });
+
+ is_deeply(
+ $rs->all_hri,
+ [
+ { title => "Caterwaulin' Blues", max_trk => 3 },
+ { title => "Come Be Depressed With Us", max_trk => 3 },
+ { title => "Forkful of bees", max_trk => 1 },
+ { title => "Generic Manufactured Singles", max_trk => 3 },
+ { title => "Spoonful of bees", max_trk => 1 },
+ ],
+ 'Expected nonsense',
+ );
+}
+
done_testing;
ON tracks.cd = cds.cdid
WHERE artwork.cd_id IS NULL
OR tracks.title != ?
- GROUP BY me.artistid + ?, me.artistid, me.name, cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track
- ORDER BY name DESC, cds.artist, cds.year ASC
+ ORDER BY name DESC
)',
[
$bind_int_resolved->(), # outer select
$bind_int_resolved->(), # inner group_by
[ $ROWS => 3 ],
$bind_vc_resolved->(), # outer where
- $bind_int_resolved->(), # outer group_by
],
'Expected SQL on complex limited prefetch'
);
throws_ok (
sub { $use_prefetch->single },
- qr/resultsets prefetching has_many/,
+ qr/\Qsingle() can not be used on resultsets collapsing a has_many/,
'single() with multiprefetch is illegal',
);
JOIN artist artist
ON artist.artistid = me.artist
WHERE ( ( artist.name = ? AND me.year = ? ) )
- ORDER BY tracks.cd
)',
[
[ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'artist.name' } => 'foo' ],
'16 correct cds found'
);
-TODO: {
-local $TODO = 'Prefetch on custom rels can not work until the collapse rewrite is finished '
- . '(currently collapser requires a right-side (which is indeterministic) order-by)';
lives_ok {
my @all_artists_with_80_cds_pref = $schema->resultset("Artist")->search
);
} 'prefetchy-fetchy-fetch';
-} # end of TODO
# try to create_related a 80s cd
--- /dev/null
+use strict;
+use warnings;
+no warnings 'exiting';
+
+use Test::More;
+use Test::Deep;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema(no_populate => 1);
+
+$schema->resultset('Artist')->create({ name => 'JMJ', cds => [{
+ title => 'Magnetic Fields',
+ year => 1981,
+ genre => { name => 'electro' },
+ tracks => [
+ { title => 'm1' },
+ { title => 'm2' },
+ { title => 'm3' },
+ { title => 'm4' },
+ ],
+} ] });
+
+$schema->resultset('CD')->create({
+ title => 'Equinoxe',
+ year => 1978,
+ artist => { name => 'JMJ' },
+ genre => { name => 'electro' },
+ tracks => [
+ { title => 'e1' },
+ { title => 'e2' },
+ { title => 'e3' },
+ ],
+ single_track => {
+ title => 'o1',
+ cd => {
+ title => 'Oxygene',
+ year => 1976,
+ artist => { name => 'JMJ' },
+ tracks => [
+ { title => 'o2', position => 2}, # the position should not be needed here, bug in MC
+ ],
+ },
+ },
+});
+
+$schema->resultset('CD')->create({ artist => 1, year => 1977, title => "fuzzy_1" });
+
+$schema->resultset('Artist')->create({ name => "${_}_cdless" })
+ for (qw( Z A ));
+
+# subs at the end of the test refer to this
+my $native_inflator;
+
+### TESTS START
+# run entire test twice - with and without "native inflator"
+INFTYPE: for ('', '(native inflator)') {
+
+ $native_inflator = $_;
+
+ cmp_structures(
+ rs_contents( $schema->resultset ('CD')->search_rs ({}, {
+ prefetch => { single_track => { cd => 'artist' } },
+ order_by => 'me.cdid',
+ }) ),
+ [
+ [
+ { cdid => 1, single_track => undef, artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
+ { single_track => code(sub { null_branch ( \@_, [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ { cd => code(sub { null_branch ( \@_, [
+ { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
+ {
+ artist => code(sub { null_branch ( \@_, [
+ { artistid => undef, name => undef, charfield => undef, rank => undef }
+ ] ) } )
+ }
+ ] ) } ) }
+ ] ) } ) }
+ ],
+ [
+ { cdid => 2, single_track => undef, artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
+ { single_track => code(sub { null_branch ( \@_, [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ { cd => code(sub { null_branch ( \@_, [
+ { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
+ {
+ artist => code(sub { null_branch ( \@_, [
+ { artistid => undef, name => undef, charfield => undef, rank => undef }
+ ] ) } )
+ }
+ ] ) } ) }
+ ] ) } ) }
+ ],
+ [
+ { cdid => 3, single_track => 6, artist => 1, genreid => 1, year => 1978, title => "Equinoxe" },
+ { single_track => [
+ { trackid => 6, title => 'o1', position => 1, cd => 2, last_updated_at => undef, last_updated_on => undef },
+ { cd => [
+ { cdid => 2, single_track => undef, artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
+ {
+ artist => [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 }
+ ]
+ }
+ ] }
+ ] }
+ ],
+ [
+ { cdid => 4, single_track => undef, artist => 1, genreid => undef, year => 1977, title => "fuzzy_1" },
+ { single_track => code(sub { null_branch ( \@_, [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ { cd => code(sub { null_branch ( \@_, [
+ { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
+ {
+ artist => code(sub { null_branch ( \@_, [
+ { artistid => undef, name => undef, charfield => undef, rank => undef }
+ ] ) } )
+ }
+ ] ) } ) }
+ ] ) } ) }
+ ],
+ ],
+ "Simple 1:1 descend with classic prefetch $native_inflator"
+ );
+
+ cmp_structures(
+ rs_contents( $schema->resultset ('CD')->search_rs ({}, {
+ join => { single_track => { cd => 'artist' } },
+ columns => [
+ { 'year' => 'me.year' },
+ { 'genreid' => 'me.genreid' },
+ { 'single_track.cd.artist.artistid' => 'artist.artistid' },
+ { 'title' => 'me.title' },
+ { 'artist' => 'me.artist' },
+ ],
+ order_by => 'me.cdid',
+ }) ),
+ [
+ [
+ { artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
+ { single_track => code(sub { null_branch ( \@_, [
+ undef,
+ { cd => [
+ undef,
+ {
+ artist => [
+ { artistid => undef }
+ ]
+ }
+ ] }
+ ] ) } ) }
+ ],
+ [
+ { artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
+ { single_track => code(sub { null_branch ( \@_, [
+ undef,
+ { cd => [
+ undef,
+ {
+ artist => [
+ { artistid => undef }
+ ]
+ }
+ ] }
+ ] ) } ) }
+ ],
+ [
+ { artist => 1, genreid => 1, year => 1978, title => "Equinoxe" },
+ { single_track => [
+ undef,
+ { cd => [
+ undef,
+ {
+ artist => [
+ { artistid => 1 }
+ ]
+ }
+ ] }
+ ] }
+ ],
+ [
+ { artist => 1, genreid => undef, year => 1977, title => "fuzzy_1" },
+ { single_track => code(sub { null_branch ( \@_, [
+ undef,
+ { cd => [
+ undef,
+ {
+ artist => [
+ { artistid => undef }
+ ]
+ }
+ ] }
+ ] ) } ) }
+ ],
+ ],
+ "Simple 1:1 descend with missing selectors $native_inflator",
+ );
+
+ cmp_structures(
+ rs_contents( $schema->resultset ('CD')->search_rs ({}, {
+ prefetch => [ { single_track => { cd => { artist => { cds => 'tracks' } } } } ],
+ order_by => [qw/me.cdid tracks.trackid/],
+ }) ),
+ [
+ [
+ { cdid => 1, single_track => undef, artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
+ { single_track => code(sub { null_collapsed_branch ( \@_, [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ { cd => [
+ { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
+ {
+ artist => [
+ { artistid => undef, name => undef, charfield => undef, rank => undef },
+ { cds => code(sub { null_collapsed_branch ( \@_, [ [
+ { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
+ { tracks => code(sub { null_collapsed_branch ( \@_, [ [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ ] ] ) } ) },
+ ] ] ) } ) },
+ ],
+ },
+ ] },
+ ] ) } ) },
+ ],
+ [
+ { cdid => 2, single_track => undef, artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
+ { single_track => code(sub { null_collapsed_branch ( \@_, [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ { cd => [
+ { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
+ {
+ artist => [
+ { artistid => undef, name => undef, charfield => undef, rank => undef },
+ { cds => code(sub { null_collapsed_branch ( \@_, [ [
+ { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
+ { tracks => code(sub { null_collapsed_branch ( \@_, [ [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ ] ] ) } ) },
+ ] ] ) } ) },
+ ],
+ },
+ ] },
+ ] ) } ) },
+ ],
+ [
+ { cdid => 3, single_track => 6, artist => 1, genreid => 1, year => 1978, title => "Equinoxe" },
+ { single_track => [
+ { trackid => 6, title => 'o1', position => 1, cd => 2, last_updated_at => undef, last_updated_on => undef },
+ { cd => [
+ { cdid => 2, single_track => undef, artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
+ {
+ artist => [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ [
+ { cdid => 4, single_track => undef, artist => 1, genreid => undef, year => 1977, title => "fuzzy_1" },
+ { tracks => code(sub { null_collapsed_branch ( \@_, [
+ [ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef } ],
+ ] ) } ) },
+ ],
+ [
+ { cdid => 1, single_track => undef, artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
+ { tracks => [
+ [ { trackid => 1, title => 'm1', position => 1, cd => 1, last_updated_at => undef, last_updated_on => undef } ],
+ [ { trackid => 2, title => 'm2', position => 2, cd => 1, last_updated_at => undef, last_updated_on => undef } ],
+ [ { trackid => 3, title => 'm3', position => 3, cd => 1, last_updated_at => undef, last_updated_on => undef } ],
+ [ { trackid => 4, title => 'm4', position => 4, cd => 1, last_updated_at => undef, last_updated_on => undef } ],
+ ]},
+ ],
+ [
+ { cdid => 2, single_track => undef, artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
+ { tracks => [
+ [ { trackid => 5, title => 'o2', position => 2, cd => 2, last_updated_at => undef, last_updated_on => undef } ],
+ [ { trackid => 6, title => 'o1', position => 1, cd => 2, last_updated_at => undef, last_updated_on => undef } ],
+ ]},
+ ],
+ [
+ { cdid => 3, single_track => 6, artist => 1, genreid => 1, year => 1978, title => "Equinoxe" },
+ { tracks => [
+ [ { trackid => 7, title => 'e1', position => 1, cd => 3, last_updated_at => undef, last_updated_on => undef } ],
+ [ { trackid => 8, title => 'e2', position => 2, cd => 3, last_updated_at => undef, last_updated_on => undef } ],
+ [ { trackid => 9, title => 'e3', position => 3, cd => 3, last_updated_at => undef, last_updated_on => undef } ],
+ ]},
+ ],
+ ]},
+ ]
+ }
+ ] }
+ ] }
+ ],
+ [
+ { cdid => 4, single_track => undef, artist => 1, genreid => undef, year => 1977, title => "fuzzy_1" },
+ { single_track => code(sub { null_collapsed_branch ( \@_, [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ { cd => [
+ { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
+ {
+ artist => [
+ { artistid => undef, name => undef, charfield => undef, rank => undef },
+ { cds => code(sub { null_collapsed_branch ( \@_, [ [
+ { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
+ { tracks => code(sub { null_collapsed_branch ( \@_, [ [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ ] ] ) } ) },
+ ] ] ) } ) },
+ ],
+ },
+ ] },
+ ] ) } ) },
+ ],
+ ],
+ "Collapsing 1:1 ending in chained has_many with classic prefetch $native_inflator",
+ );
+
+ cmp_structures (
+ rs_contents( $schema->resultset ('Artist')->search_rs ({}, {
+ join => { cds => 'tracks' },
+ '+columns' => [
+ (map { "cds.$_" } $schema->source('CD')->columns),
+ (map { +{ "cds.tracks.$_" => "tracks.$_" } } $schema->source('Track')->columns),
+ ],
+ order_by => [qw/cds.cdid tracks.trackid me.name/],
+ }) ),
+ [
+ [
+ { artistid => 3, name => 'A_cdless', charfield => undef, rank => 13 },
+ { cds => code(sub { null_branch ( \@_, [
+ { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
+ { tracks => code(sub { null_branch ( \@_, [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ ] ) } ) },
+ ] ) } ) },
+ ],
+ [
+ { artistid => 2, name => 'Z_cdless', charfield => undef, rank => 13 },
+ { cds => code(sub { null_branch ( \@_, [
+ { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
+ { tracks => code(sub { null_branch ( \@_, [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ ] ) } ) },
+ ] ) } ) },
+ ],
+ [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ { cdid => 1, single_track => undef, artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
+ { tracks => [
+ { trackid => 1, title => 'm1', position => 1, cd => 1, last_updated_at => undef, last_updated_on => undef },
+ ]},
+ ]},
+ ],
+ [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ { cdid => 1, single_track => undef, artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
+ { tracks => [
+ { trackid => 2, title => 'm2', position => 2, cd => 1, last_updated_at => undef, last_updated_on => undef },
+ ]},
+ ]},
+ ],
+ [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ { cdid => 1, single_track => undef, artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
+ { tracks => [
+ { trackid => 3, title => 'm3', position => 3, cd => 1, last_updated_at => undef, last_updated_on => undef },
+ ]},
+ ]},
+ ],
+ [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ { cdid => 1, single_track => undef, artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
+ { tracks => [
+ { trackid => 4, title => 'm4', position => 4, cd => 1, last_updated_at => undef, last_updated_on => undef },
+ ]},
+ ]},
+ ],
+ [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ { cdid => 2, single_track => undef, artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
+ { tracks => [
+ { trackid => 5, title => 'o2', position => 2, cd => 2, last_updated_at => undef, last_updated_on => undef },
+ ]},
+ ]},
+ ],
+ [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ { cdid => 2, single_track => undef, artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
+ { tracks => [
+ { trackid => 6, title => 'o1', position => 1, cd => 2, last_updated_at => undef, last_updated_on => undef },
+ ]},
+ ]},
+ ],
+ [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ { cdid => 3, single_track => 6, artist => 1, genreid => 1, year => 1978, title => "Equinoxe" },
+ { tracks => [
+ { trackid => 7, title => 'e1', position => 1, cd => 3, last_updated_at => undef, last_updated_on => undef },
+ ]},
+ ]},
+ ],
+ [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ { cdid => 3, single_track => 6, artist => 1, genreid => 1, year => 1978, title => "Equinoxe" },
+ { tracks => [
+ { trackid => 8, title => 'e2', position => 2, cd => 3, last_updated_at => undef, last_updated_on => undef },
+ ]},
+ ]},
+ ],
+ [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ { cdid => 3, single_track => 6, artist => 1, genreid => 1, year => 1978, title => "Equinoxe" },
+ { tracks => [
+ { trackid => 9, title => 'e3', position => 3, cd => 3, last_updated_at => undef, last_updated_on => undef },
+ ]},
+ ]},
+ ],
+ [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ { cdid => 4, single_track => undef, artist => 1, genreid => undef, year => 1977, title => "fuzzy_1" },
+ { tracks => code(sub { null_branch ( \@_, [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ ] ) } ) },
+ ]},
+ ],
+ ],
+ "Non-Collapsing chained has_many $native_inflator",
+ );
+
+ cmp_structures (
+ rs_contents( $schema->resultset ('Artist')->search_rs ({}, {
+ collapse => 1,
+ join => 'cds',
+ columns => [qw( cds.title cds.artist )],
+ order_by => [qw( me.name cds.title )],
+ }) ),
+ [
+ [
+ undef,
+ { cds => code(sub { null_collapsed_branch ( \@_, [
+ [ { artist => undef, title => undef } ]
+ ] ) } ) },
+ ],
+ [
+ undef,
+ { cds => [
+ [ { artist => 1, title => "Equinoxe" } ],
+ [ { artist => 1, title => "Magnetic Fields" } ],
+ [ { artist => 1, title => "Oxygene" } ],
+ [ { artist => 1, title => "fuzzy_1" } ],
+ ] }
+ ],
+ [
+ undef,
+ { cds => code(sub { null_collapsed_branch ( \@_, [
+ [ { artist => undef, title => undef } ]
+ ] ) } ) },
+ ],
+ ],
+ "Expected output of collapsing 1:M with empty root selection $native_inflator",
+ );
+}
+
+sub null_branch {
+ cmp_deeply(
+ $_[0][0],
+ $native_inflator ? undef : bless( $_[1], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ),
+ );
+}
+sub null_collapsed_branch {
+ cmp_deeply(
+ $_[0][0],
+ $native_inflator ? [] : bless( $_[1], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ),
+ );
+}
+
+{
+ package DBICTest::_IRCapture;
+ sub inflate_result { [@_[2,3]] };
+}
+
+sub rs_contents {
+ my $rs = shift;
+ $rs->result_class('DBICTest::_IRCapture');
+ die 'eeeeek - preprocessed $rs' if defined $rs->{_result_inflator}{is_core_row};
+ $rs->{_result_inflator}{is_core_row} = 1 if $native_inflator;
+ [$rs->all],
+}
+
+sub cmp_structures {
+ my ($left, $right, $msg) = @_;
+
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ cmp_deeply($left, $right, $msg||()) or next INFTYPE;
+}
+
+done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+# From http://lists.scsys.co.uk/pipermail/dbix-class/2013-February/011119.html
+#
+# > Right, at this point we have an "undefined situation turned into an
+# > unplanned feature", therefore 0.08242 will downgrade the exception to a
+# > single-warning-per-process. This seems like a sane middle ground for
+# > "you gave me an 'as' that worked by accident before - fix it at your
+# > convenience".
+#
+# When the things were reshuffled it became apparent implementing a warning
+# for the HRI case *only* is going to complicate the code a lot, without
+# adding much benefit at this point. So just make sure everything works the
+# way it used to and move on
+
+
+my $s = DBICTest->init_schema;
+
+my $rs_2nd_track = $s->resultset('Track')->search(
+ { 'me.position' => 2 },
+ {
+ join => { cd => 'artist' },
+ 'columns' => [ 'me.title', { 'artist.cdtitle' => 'cd.title' }, 'artist.name' ],
+ order_by => 'artist.name',
+ }
+);
+
+is_deeply (
+ [ map { $_->[-1] } $rs_2nd_track->cursor->all ],
+ [ ('Caterwauler McCrae') x 3, 'Random Boy Band', 'We Are Goth' ],
+ 'Artist name cartesian product correct off cursor',
+);
+
+is_deeply (
+ $rs_2nd_track->all_hri,
+ [
+ {
+ artist => { cdtitle => "Caterwaulin' Blues", name => "Caterwauler McCrae" },
+ title => "Howlin"
+ },
+ {
+ artist => { cdtitle => "Forkful of bees", name => "Caterwauler McCrae" },
+ title => "Stripy"
+ },
+ {
+ artist => { cdtitle => "Spoonful of bees", name => "Caterwauler McCrae" },
+ title => "Apiary"
+ },
+ {
+ artist => { cdtitle => "Generic Manufactured Singles", name => "Random Boy Band" },
+ title => "Boring Song"
+ },
+ {
+ artist => { cdtitle => "Come Be Depressed With Us", name => "We Are Goth" },
+ title => "Under The Weather"
+ }
+ ],
+ 'HRI with invalid inflate map works'
+);
+
+throws_ok
+ { $rs_2nd_track->next }
+ qr!\QInflation into non-existent relationship 'artist' of 'Track' requested, check the inflation specification (columns/as) ending in '...artist.name'!,
+ 'Correct exception on illegal ::Row inflation attempt'
+;
+
+# make sure has_many column redirection does not do weird stuff when collapse is requested
+for my $pref_args (
+ { prefetch => 'cds'},
+ { collapse => 1 }
+) {
+ for my $col_and_join_args (
+ { '+columns' => { 'cd_title' => 'cds_2.title' }, join => [ 'cds', 'cds' ] },
+ { '+columns' => { 'cd_title' => 'cds.title' }, join => 'cds' },
+ { '+columns' => { 'cd_gr_name' => 'genre.name' }, join => { cds => 'genre' } },
+ ) {
+ for my $call (qw(next all first)) {
+
+ my $weird_rs = $s->resultset('Artist')->search({}, {
+ %$col_and_join_args, %$pref_args,
+ });
+
+ throws_ok
+ { $weird_rs->$call }
+ qr/\QResult collapse not possible - selection from a has_many source redirected to the main object/
+ for (1,2);
+ }
+ }
+}
+
+done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use B::Deparse;
+
+# globally set for the rest of test
+# the rowparser maker does not order its hashes by default for the miniscule
+# speed gain. But it does not disable sorting either - for this test
+# everything will be ordered nicely, and the hash randomization of 5.18
+# will not trip up anything
+use Data::Dumper;
+$Data::Dumper::Sortkeys = 1;
+
+my $schema = DBICTest->init_schema(no_deploy => 1);
+my $infmap = [qw/
+ single_track.cd.artist.name
+ year
+/];
+
+is_same_src (
+ ($schema->source ('CD')->_mk_row_parser({
+ inflate_map => $infmap,
+ }))[0],
+ '$_ = [
+ { year => $_->[1] },
+ { single_track => ( ! defined( $_->[0]) )
+ ? bless( [
+ undef,
+ { cd => [
+ undef,
+ { artist => [
+ { name => $_->[0] },
+ ] },
+ ] },
+ ], __NBC__ )
+ : [
+ undef,
+ { cd => [
+ undef,
+ { artist => [
+ { name => $_->[0] },
+ ] },
+ ] },
+ ]
+ },
+ ] for @{$_[0]}',
+ 'Simple 1:1 descending non-collapsing parser',
+);
+
+$infmap = [qw/
+ single_track.cd.artist.cds.tracks.title
+ single_track.cd.artist.artistid
+ year
+ single_track.cd.artist.cds.cdid
+ title
+ artist
+/];
+
+is_same_src (
+ ($schema->source ('CD')->_mk_row_parser({
+ inflate_map => $infmap,
+ }))[0],
+ '$_ = [
+ { artist => $_->[5], title => $_->[4], year => $_->[2] },
+ {
+ single_track => ( (! defined $_->[0] ) && (! defined $_->[1]) && (! defined $_->[3] ) )
+ ? bless( [
+ undef,
+ {
+ cd => [
+ undef,
+ {
+ artist => [
+ { artistid => $_->[1] },
+ {
+ cds => ( (! defined $_->[0] ) && ( ! defined $_->[3] ) )
+ ? bless ([
+ { cdid => $_->[3] },
+ {
+ tracks => ( ! defined $_->[0] )
+ ? bless ( [{ title => $_->[0] }], __NBC__ )
+ : [{ title => $_->[0] }]
+ }
+ ], __NBC__)
+ : [
+ { cdid => $_->[3] },
+ {
+ tracks => ( ! defined $_->[0] )
+ ? bless ( [{ title => $_->[0] }], __NBC__ )
+ : [{ title => $_->[0] }]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ], __NBC__)
+ : [
+ undef,
+ {
+ cd => [
+ undef,
+ {
+ artist => [
+ { artistid => $_->[1] },
+ {
+ cds => ( (! defined $_->[0] ) && ( ! defined $_->[3] ) )
+ ? bless ([
+ { cdid => $_->[3] },
+ {
+ tracks => ( ! defined $_->[0] )
+ ? bless ( [{ title => $_->[0] }], __NBC__ )
+ : [{ title => $_->[0] }]
+ }
+ ], __NBC__)
+ : [
+ { cdid => $_->[3] },
+ {
+ tracks => ( ! defined $_->[0] )
+ ? bless ( [{ title => $_->[0] }], __NBC__ )
+ : [{ title => $_->[0] }]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ] for @{$_[0]}',
+ '1:1 descending non-collapsing parser terminating with chained 1:M:M',
+);
+
+is_same_src (
+ ($schema->source ('CD')->_mk_row_parser({
+ prune_null_branches => 1,
+ inflate_map => $infmap,
+ }))[0],
+ '$_ = [
+ { artist => $_->[5], title => $_->[4], year => $_->[2] },
+ {
+ single_track => ( (! defined $_->[0] ) && (! defined $_->[1]) && (! defined $_->[3] ) ) ? undef : [
+ undef,
+ {
+ cd => [
+ undef,
+ {
+ artist => [
+ { artistid => $_->[1] },
+ {
+ cds => ( (! defined $_->[0] ) && ( ! defined $_->[3] ) ) ? undef : [
+ { cdid => $_->[3] },
+ {
+ tracks => ( ! defined $_->[0] ) ? undef : [
+ { title => $_->[0] },
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ] for @{$_[0]}',
+ '1:1 descending non-collapsing pruning parser terminating with chained 1:M:M',
+);
+
+is_same_src (
+ ($schema->source ('CD')->_mk_row_parser({
+ hri_style => 1,
+ prune_null_branches => 1,
+ inflate_map => $infmap,
+ }))[0],
+ '$_ = {
+ artist => $_->[5], title => $_->[4], year => $_->[2],
+
+ ( single_track => ( (! defined $_->[0] ) && (! defined $_->[1]) && (! defined $_->[3] ) )
+ ? undef
+ : {
+ cd =>
+ {
+ artist => {
+ artistid => $_->[1],
+ ( cds => ( (! defined $_->[0] ) && ( ! defined $_->[3] ) )
+ ? undef
+ : {
+ cdid => $_->[3],
+ ( tracks => ( ! defined $_->[0] )
+ ? undef
+ : { title => $_->[0] }
+ )
+ }
+ )
+ }
+ }
+ }
+ )
+ } for @{$_[0]}',
+ '1:1 descending non-collapsing HRI-direct parser terminating with chained 1:M:M',
+);
+
+
+
+is_deeply (
+ ($schema->source('CD')->_resolve_collapse({ as => {map { $infmap->[$_] => $_ } 0 .. $#$infmap} })),
+ {
+ -identifying_columns => [ 4, 5 ],
+
+ single_track => {
+ -identifying_columns => [ 1, 4, 5 ],
+ -is_optional => 1,
+ -is_single => 1,
+
+ cd => {
+ -identifying_columns => [ 1, 4, 5 ],
+ -is_single => 1,
+
+ artist => {
+ -identifying_columns => [ 1, 4, 5 ],
+ -is_single => 1,
+
+ cds => {
+ -identifying_columns => [ 1, 3, 4, 5 ],
+ -is_optional => 1,
+
+ tracks => {
+ -identifying_columns => [ 0, 1, 3, 4, 5 ],
+ -is_optional => 1,
+ },
+ },
+ },
+ },
+ },
+ },
+ 'Correct collapse map for 1:1 descending chain terminating with chained 1:M:M'
+);
+
+is_same_src (
+ ($schema->source ('CD')->_mk_row_parser({
+ inflate_map => $infmap,
+ collapse => 1,
+ }))[0],
+ ' my $rows_pos = 0;
+ my ($result_pos, @collapse_idx, $cur_row_data, %cur_row_ids);
+
+ while ($cur_row_data = (
+ ( $rows_pos >= 0 and $_[0][$rows_pos++] )
+ ||
+ ( $_[1] and $rows_pos = -1 and $_[1]->() )
+ ) ) {
+
+ $cur_row_ids{0} = $cur_row_data->[0] // "\0NULL\xFF$rows_pos\xFF0\0";
+ $cur_row_ids{1} = $cur_row_data->[1] // "\0NULL\xFF$rows_pos\xFF1\0";
+ $cur_row_ids{3} = $cur_row_data->[3] // "\0NULL\xFF$rows_pos\xFF3\0";
+ $cur_row_ids{4} = $cur_row_data->[4] // "\0NULL\xFF$rows_pos\xFF4\0";
+ $cur_row_ids{5} = $cur_row_data->[5] // "\0NULL\xFF$rows_pos\xFF5\0";
+
+ # a present cref in $_[1] implies lazy prefetch, implies a supplied stash in $_[2]
+ $_[1] and $result_pos and ! $collapse_idx[0]{$cur_row_ids{4}}{$cur_row_ids{5}} and (unshift @{$_[2]}, $cur_row_data) and last;
+
+ # the rowdata itself for root node
+ $collapse_idx[0]{$cur_row_ids{4}}{$cur_row_ids{5}} //= $_[0][$result_pos++] = [{ artist => $cur_row_data->[5], title => $cur_row_data->[4], year => $cur_row_data->[2] }];
+
+ # prefetch data of single_track (placed in root)
+ $collapse_idx[0]{$cur_row_ids{4}}{$cur_row_ids{5}}[1]{single_track} //= $collapse_idx[1]{$cur_row_ids{1}}{$cur_row_ids{4}}{$cur_row_ids{5}} = [];
+ defined($cur_row_data->[1]) or bless( $collapse_idx[0]{$cur_row_ids{4}}{$cur_row_ids{5}}[1]{single_track}, __NBC__ );
+
+ # prefetch data of cd (placed in single_track)
+ $collapse_idx[1]{$cur_row_ids{1}}{$cur_row_ids{4}}{$cur_row_ids{5}}[1]{cd} //= $collapse_idx[2]{$cur_row_ids{1}}{$cur_row_ids{4}}{$cur_row_ids{5}} = [];
+
+ # prefetch data of artist ( placed in single_track->cd)
+ $collapse_idx[2]{$cur_row_ids{1}}{$cur_row_ids{4}}{$cur_row_ids{5}}[1]{artist} //= $collapse_idx[3]{$cur_row_ids{1}}{$cur_row_ids{4}}{$cur_row_ids{5}} = [{ artistid => $cur_row_data->[1] }];
+
+ # prefetch data of cds (if available)
+ (! $collapse_idx[4]{$cur_row_ids{1}}{$cur_row_ids{3}}{$cur_row_ids{4}}{$cur_row_ids{5}} )
+ and
+ push @{$collapse_idx[3]{$cur_row_ids{1}}{$cur_row_ids{4}}{$cur_row_ids{5}}[1]{cds}}, (
+ $collapse_idx[4]{$cur_row_ids{1}}{$cur_row_ids{3}}{$cur_row_ids{4}}{$cur_row_ids{5}} = [{ cdid => $cur_row_data->[3] }]
+ );
+ defined($cur_row_data->[3]) or bless( $collapse_idx[3]{$cur_row_ids{1}}{$cur_row_ids{4}}{$cur_row_ids{5}}[1]{cds}, __NBC__ );
+
+ # prefetch data of tracks (if available)
+ (! $collapse_idx[5]{$cur_row_ids{0}}{$cur_row_ids{1}}{$cur_row_ids{3}}{$cur_row_ids{4}}{$cur_row_ids{5}} )
+ and
+ push @{$collapse_idx[4]{$cur_row_ids{1}}{$cur_row_ids{3}}{$cur_row_ids{4}}{$cur_row_ids{5}}[1]{tracks}}, (
+ $collapse_idx[5]{$cur_row_ids{0}}{$cur_row_ids{1}}{$cur_row_ids{3}}{$cur_row_ids{4}}{$cur_row_ids{5}} = [{ title => $cur_row_data->[0] }]
+ );
+ defined($cur_row_data->[0]) or bless( $collapse_idx[4]{$cur_row_ids{1}}{$cur_row_ids{3}}{$cur_row_ids{4}}{$cur_row_ids{5}}[1]{tracks}, __NBC__ );
+
+ }
+ $#{$_[0]} = $result_pos - 1;
+ ',
+ 'Same 1:1 descending terminating with chained 1:M:M but with collapse',
+);
+
+is_same_src (
+ ($schema->source ('CD')->_mk_row_parser({
+ inflate_map => $infmap,
+ collapse => 1,
+ hri_style => 1,
+ prune_null_branches => 1,
+ }))[0],
+ ' my $rows_pos = 0;
+ my ($result_pos, @collapse_idx, $cur_row_data);
+
+ while ($cur_row_data = (
+ ( $rows_pos >= 0 and $_[0][$rows_pos++] )
+ ||
+ ( $_[1] and $rows_pos = -1 and $_[1]->() )
+ ) ) {
+
+ # a present cref in $_[1] implies lazy prefetch, implies a supplied stash in $_[2]
+ $_[1] and $result_pos and ! $collapse_idx[0]{$cur_row_data->[4]}{$cur_row_data->[5]} and (unshift @{$_[2]}, $cur_row_data) and last;
+
+ # the rowdata itself for root node
+ $collapse_idx[0]{$cur_row_data->[4]}{$cur_row_data->[5]} //= $_[0][$result_pos++] = { artist => $cur_row_data->[5], title => $cur_row_data->[4], year => $cur_row_data->[2] };
+
+ # prefetch data of single_track (placed in root)
+ (! defined($cur_row_data->[1]) ) ? $collapse_idx[0]{$cur_row_data->[4]}{$cur_row_data->[5]}{single_track} = undef : do {
+ $collapse_idx[0]{$cur_row_data->[4]}{$cur_row_data->[5]}{single_track} //= $collapse_idx[1]{$cur_row_data->[1]}{$cur_row_data->[4]}{$cur_row_data->[5]};
+
+ # prefetch data of cd (placed in single_track)
+ $collapse_idx[1]{$cur_row_data->[1]}{$cur_row_data->[4]}{$cur_row_data->[5]}{cd} //= $collapse_idx[2]{$cur_row_data->[1]}{$cur_row_data->[4]}{$cur_row_data->[5]};
+
+ # prefetch data of artist ( placed in single_track->cd)
+ $collapse_idx[2]{$cur_row_data->[1]}{$cur_row_data->[4]}{$cur_row_data->[5]}{artist} //= $collapse_idx[3]{$cur_row_data->[1]}{$cur_row_data->[4]}{$cur_row_data->[5]} = { artistid => $cur_row_data->[1] };
+
+ # prefetch data of cds (if available)
+ (! defined $cur_row_data->[3] ) ? $collapse_idx[3]{$cur_row_data->[1]}{$cur_row_data->[4]}{$cur_row_data->[5]}{cds} = [] : do {
+
+ (! $collapse_idx[4]{$cur_row_data->[1]}{$cur_row_data->[3]}{$cur_row_data->[4]}{$cur_row_data->[5]} )
+ and
+ push @{$collapse_idx[3]{$cur_row_data->[1]}{$cur_row_data->[4]}{$cur_row_data->[5]}{cds}}, (
+ $collapse_idx[4]{$cur_row_data->[1]}{$cur_row_data->[3]}{$cur_row_data->[4]}{$cur_row_data->[5]} = { cdid => $cur_row_data->[3] }
+ );
+
+ # prefetch data of tracks (if available)
+ ( ! defined $cur_row_data->[0] ) ? $collapse_idx[4]{$cur_row_data->[1]}{$cur_row_data->[3]}{$cur_row_data->[4]}{$cur_row_data->[5]}{tracks} = [] : do {
+
+ (! $collapse_idx[5]{$cur_row_data->[0]}{$cur_row_data->[1]}{$cur_row_data->[3]}{$cur_row_data->[4]}{$cur_row_data->[5]} )
+ and
+ push @{$collapse_idx[4]{$cur_row_data->[1]}{$cur_row_data->[3]}{$cur_row_data->[4]}{$cur_row_data->[5]}{tracks}}, (
+ $collapse_idx[5]{$cur_row_data->[0]}{$cur_row_data->[1]}{$cur_row_data->[3]}{$cur_row_data->[4]}{$cur_row_data->[5]} = { title => $cur_row_data->[0] }
+ );
+ };
+ };
+ };
+ }
+ $#{$_[0]} = $result_pos - 1;
+ ',
+ 'Same 1:1 descending terminating with chained 1:M:M but with collapse, HRI-direct',
+);
+
+$infmap = [qw/
+ tracks.lyrics.existing_lyric_versions.text
+ existing_single_track.cd.artist.artistid
+ existing_single_track.cd.artist.cds.year
+ year
+ genreid
+ tracks.title
+ existing_single_track.cd.artist.cds.cdid
+ latest_cd
+ existing_single_track.cd.artist.cds.tracks.title
+ existing_single_track.cd.artist.cds.genreid
+ tracks.lyrics.existing_lyric_versions.lyric_id
+/];
+
+is_deeply (
+ $schema->source('CD')->_resolve_collapse({ as => {map { $infmap->[$_] => $_ } 0 .. $#$infmap} }),
+ {
+ -identifying_columns => [ 1 ], # existing_single_track.cd.artist.artistid
+
+ existing_single_track => {
+ -identifying_columns => [ 1 ], # existing_single_track.cd.artist.artistid
+ -is_single => 1,
+
+ cd => {
+ -identifying_columns => [ 1 ], # existing_single_track.cd.artist.artistid
+ -is_single => 1,
+
+ artist => {
+ -identifying_columns => [ 1 ], # existing_single_track.cd.artist.artistid
+ -is_single => 1,
+
+ cds => {
+ -identifying_columns => [ 1, 6 ], # existing_single_track.cd.artist.cds.cdid
+ -is_optional => 1,
+
+ tracks => {
+ -identifying_columns => [ 1, 6, 8 ], # existing_single_track.cd.artist.cds.cdid, existing_single_track.cd.artist.cds.tracks.title
+ -is_optional => 1,
+ }
+ }
+ }
+ }
+ },
+ tracks => {
+ -identifying_columns => [ 1, 5 ], # existing_single_track.cd.artist.artistid, tracks.title
+ -is_optional => 1,
+
+ lyrics => {
+ -identifying_columns => [ 1, 5, 10 ], # existing_single_track.cd.artist.artistid, tracks.title, tracks.lyrics.existing_lyric_versions.lyric_id
+ -is_single => 1,
+ -is_optional => 1,
+
+ existing_lyric_versions => {
+ -identifying_columns => [ 0, 1, 5, 10 ], # tracks.lyrics.existing_lyric_versions.text, existing_single_track.cd.artist.artistid, tracks.title, tracks.lyrics.existing_lyric_versions.lyric_id
+ },
+ },
+ }
+ },
+ 'Correct collapse map constructed',
+);
+
+is_same_src (
+ ($schema->source ('CD')->_mk_row_parser({
+ inflate_map => $infmap,
+ collapse => 1,
+ }))[0],
+ ' my $rows_pos = 0;
+ my ($result_pos, @collapse_idx, $cur_row_data, %cur_row_ids);
+
+ while ($cur_row_data = (
+ ( $rows_pos >= 0 and $_[0][$rows_pos++] )
+ ||
+ ( $_[1] and $rows_pos = -1 and $_[1]->() )
+ ) ) {
+
+ $cur_row_ids{0} = $cur_row_data->[0] // "\0NULL\xFF$rows_pos\xFF0\0";
+ $cur_row_ids{1} = $cur_row_data->[1] // "\0NULL\xFF$rows_pos\xFF1\0";
+ $cur_row_ids{5} = $cur_row_data->[5] // "\0NULL\xFF$rows_pos\xFF5\0";
+ $cur_row_ids{6} = $cur_row_data->[6] // "\0NULL\xFF$rows_pos\xFF6\0";
+ $cur_row_ids{8} = $cur_row_data->[8] // "\0NULL\xFF$rows_pos\xFF8\0";
+ $cur_row_ids{10} = $cur_row_data->[10] // "\0NULL\xFF$rows_pos\xFF10\0";
+
+ # a present cref in $_[1] implies lazy prefetch, implies a supplied stash in $_[2]
+ $_[1] and $result_pos and ! $collapse_idx[0]{$cur_row_ids{1}} and (unshift @{$_[2]}, $cur_row_data) and last;
+
+ $collapse_idx[0]{$cur_row_ids{1}} //= $_[0][$result_pos++] = [{ genreid => $cur_row_data->[4], latest_cd => $cur_row_data->[7], year => $cur_row_data->[3] }];
+
+ $collapse_idx[0]{$cur_row_ids{1}}[1]{existing_single_track} //= $collapse_idx[1]{$cur_row_ids{1}} = [];
+ $collapse_idx[1]{$cur_row_ids{1}}[1]{cd} //= $collapse_idx[2]{$cur_row_ids{1}} = [];
+ $collapse_idx[2]{$cur_row_ids{1}}[1]{artist} //= $collapse_idx[3]{$cur_row_ids{1}} = [{ artistid => $cur_row_data->[1] }];
+
+ (! $collapse_idx[4]{$cur_row_ids{1}}{$cur_row_ids{6}} )
+ and
+ push @{ $collapse_idx[3]{$cur_row_ids{1}}[1]{cds} }, (
+ $collapse_idx[4]{$cur_row_ids{1}}{$cur_row_ids{6}} = [{ cdid => $cur_row_data->[6], genreid => $cur_row_data->[9], year => $cur_row_data->[2] }]
+ );
+ defined($cur_row_data->[6]) or bless( $collapse_idx[3]{$cur_row_ids{1}}[1]{cds}, __NBC__ );
+
+ (! $collapse_idx[5]{$cur_row_ids{1}}{$cur_row_ids{6}}{$cur_row_ids{8}} )
+ and
+ push @{ $collapse_idx[4]{$cur_row_ids{1}}{$cur_row_ids{6}}[1]{tracks} }, (
+ $collapse_idx[5]{$cur_row_ids{1}}{$cur_row_ids{6}}{$cur_row_ids{8}} = [{ title => $cur_row_data->[8] }]
+ );
+ defined($cur_row_data->[8]) or bless( $collapse_idx[4]{$cur_row_ids{1}}{$cur_row_ids{6}}[1]{tracks}, __NBC__ );
+
+ (! $collapse_idx[6]{$cur_row_ids{1}}{$cur_row_ids{5}} )
+ and
+ push @{ $collapse_idx[0]{$cur_row_ids{1}}[1]{tracks} }, (
+ $collapse_idx[6]{$cur_row_ids{1}}{$cur_row_ids{5}} = [{ title => $cur_row_data->[5] }]
+ );
+ defined($cur_row_data->[5]) or bless( $collapse_idx[0]{$cur_row_ids{1}}[1]{tracks}, __NBC__ );
+
+ $collapse_idx[6]{$cur_row_ids{1}}{$cur_row_ids{5}}[1]{lyrics} //= $collapse_idx[7]{$cur_row_ids{1}}{$cur_row_ids{5}}{$cur_row_ids{10}} = [];
+ defined($cur_row_data->[10]) or bless( $collapse_idx[6]{$cur_row_ids{1}}{$cur_row_ids{5}}[1]{lyrics}, __NBC__ );
+
+ (! $collapse_idx[8]{$cur_row_ids{0}}{$cur_row_ids{1}}{$cur_row_ids{5}}{$cur_row_ids{10}} )
+ and
+ push @{ $collapse_idx[7]{$cur_row_ids{1}}{$cur_row_ids{5}}{$cur_row_ids{10}}[1]{existing_lyric_versions} }, (
+ $collapse_idx[8]{$cur_row_ids{0}}{$cur_row_ids{1}}{$cur_row_ids{5}}{$cur_row_ids{10}} = [{ lyric_id => $cur_row_data->[10], text => $cur_row_data->[0] }]
+ );
+ }
+
+ $#{$_[0]} = $result_pos - 1;
+ ',
+ 'Multiple has_many on multiple branches torture test',
+);
+
+is_same_src (
+ ($schema->source ('CD')->_mk_row_parser({
+ inflate_map => $infmap,
+ collapse => 1,
+ prune_null_branches => 1,
+ }))[0],
+ ' my $rows_pos = 0;
+ my ($result_pos, @collapse_idx, $cur_row_data);
+
+ while ($cur_row_data = (
+ ( $rows_pos >= 0 and $_[0][$rows_pos++] )
+ ||
+ ( $_[1] and $rows_pos = -1 and $_[1]->() )
+ ) ) {
+
+ # a present cref in $_[1] implies lazy prefetch, implies a supplied stash in $_[2]
+ $_[1] and $result_pos and ! $collapse_idx[0]{$cur_row_data->[1]} and (unshift @{$_[2]}, $cur_row_data) and last;
+
+ $collapse_idx[0]{$cur_row_data->[1]} //= $_[0][$result_pos++] = [{ genreid => $cur_row_data->[4], latest_cd => $cur_row_data->[7], year => $cur_row_data->[3] }];
+
+ $collapse_idx[0]{$cur_row_data->[1]}[1]{existing_single_track} //= $collapse_idx[1]{$cur_row_data->[1]} = [];
+ $collapse_idx[1]{$cur_row_data->[1]}[1]{cd} //= $collapse_idx[2]{$cur_row_data->[1]} = [];
+ $collapse_idx[2]{$cur_row_data->[1]}[1]{artist} //= $collapse_idx[3]{$cur_row_data->[1]} = [{ artistid => $cur_row_data->[1] }];
+
+ (! defined($cur_row_data->[6])) ? $collapse_idx[3]{$cur_row_data->[1]}[1]{cds} = [] : do {
+ (! $collapse_idx[4]{$cur_row_data->[1]}{$cur_row_data->[6]} )
+ and
+ push @{ $collapse_idx[3]{$cur_row_data->[1]}[1]{cds} }, (
+ $collapse_idx[4]{$cur_row_data->[1]}{$cur_row_data->[6]} = [{ cdid => $cur_row_data->[6], genreid => $cur_row_data->[9], year => $cur_row_data->[2] }]
+ );
+
+ (! defined($cur_row_data->[8]) ) ? $collapse_idx[4]{$cur_row_data->[1]}{$cur_row_data->[6]}[1]{tracks} = [] : do {
+
+ (! $collapse_idx[5]{$cur_row_data->[1]}{$cur_row_data->[6]}{$cur_row_data->[8]} )
+ and
+ push @{ $collapse_idx[4]{$cur_row_data->[1]}{$cur_row_data->[6]}[1]{tracks} }, (
+ $collapse_idx[5]{$cur_row_data->[1]}{$cur_row_data->[6]}{$cur_row_data->[8]} = [{ title => $cur_row_data->[8] }]
+ );
+ };
+ };
+
+ (! defined($cur_row_data->[5]) ) ? $collapse_idx[0]{$cur_row_data->[1]}[1]{tracks} = [] : do {
+
+ (! $collapse_idx[6]{$cur_row_data->[1]}{$cur_row_data->[5]} )
+ and
+ push @{ $collapse_idx[0]{$cur_row_data->[1]}[1]{tracks} }, (
+ $collapse_idx[6]{$cur_row_data->[1]}{$cur_row_data->[5]} = [{ title => $cur_row_data->[5] }]
+ );
+
+ (! defined($cur_row_data->[10]) ) ? $collapse_idx[6]{$cur_row_data->[1]}{$cur_row_data->[5]}[1]{lyrics} = [] : do {
+
+ $collapse_idx[6]{$cur_row_data->[1]}{$cur_row_data->[5]}[1]{lyrics} //= $collapse_idx[7]{$cur_row_data->[1]}{$cur_row_data->[5]}{$cur_row_data->[10]} = [];
+
+ (! $collapse_idx[8]{$cur_row_data->[0]}{$cur_row_data->[1]}{$cur_row_data->[5]}{$cur_row_data->[10]} )
+ and
+ push @{ $collapse_idx[7]{$cur_row_data->[1]}{$cur_row_data->[5]}{$cur_row_data->[10]}[1]{existing_lyric_versions} }, (
+ $collapse_idx[8]{$cur_row_data->[0]}{$cur_row_data->[1]}{$cur_row_data->[5]}{$cur_row_data->[10]} = [{ lyric_id => $cur_row_data->[10], text => $cur_row_data->[0] }]
+ );
+ };
+ };
+ }
+
+ $#{$_[0]} = $result_pos - 1;
+ ',
+ 'Multiple has_many on multiple branches with branch pruning torture test',
+);
+
+$infmap = [
+ 'single_track.trackid', # (0) definitive link to root from 1:1:1:1:M:M chain
+ 'year', # (1) non-unique
+ 'tracks.cd', # (2) \ together both uniqueness for second multirel
+ 'tracks.title', # (3) / and definitive link back to root
+ 'single_track.cd.artist.cds.cdid', # (4) to give uniquiness to ...tracks.title below
+ 'single_track.cd.artist.cds.year', # (5) non-unique
+ 'single_track.cd.artist.artistid', # (6) uniqufies entire parental chain
+ 'single_track.cd.artist.cds.genreid', # (7) nullable
+ 'single_track.cd.artist.cds.tracks.title',# (8) unique when combined with ...cds.cdid above
+];
+
+is_deeply (
+ $schema->source('CD')->_resolve_collapse({ as => {map { $infmap->[$_] => $_ } 0 .. $#$infmap} }),
+ {
+ -identifying_columns => [],
+ -identifying_columns_variants => [
+ [ 0 ], [ 2 ],
+ ],
+ single_track => {
+ -identifying_columns => [ 0 ],
+ -is_optional => 1,
+ -is_single => 1,
+ cd => {
+ -identifying_columns => [ 0 ],
+ -is_single => 1,
+ artist => {
+ -identifying_columns => [ 0 ],
+ -is_single => 1,
+ cds => {
+ -identifying_columns => [ 0, 4 ],
+ -is_optional => 1,
+ tracks => {
+ -identifying_columns => [ 0, 4, 8 ],
+ -is_optional => 1,
+ }
+ }
+ }
+ }
+ },
+ tracks => {
+ -identifying_columns => [ 2, 3 ],
+ -is_optional => 1,
+ }
+ },
+ 'Correct underdefined root collapse map constructed'
+);
+
+is_same_src (
+ ($schema->source ('CD')->_mk_row_parser({
+ inflate_map => $infmap,
+ collapse => 1,
+ }))[0],
+ ' my $rows_pos = 0;
+ my ($result_pos, @collapse_idx, $cur_row_data, %cur_row_ids);
+
+ while ($cur_row_data = (
+ ( $rows_pos >= 0 and $_[0][$rows_pos++] )
+ ||
+ ( $_[1] and $rows_pos = -1 and $_[1]->() )
+ ) ) {
+
+ $cur_row_ids{0} = $cur_row_data->[0] // "\0NULL\xFF$rows_pos\xFF0\0";
+ $cur_row_ids{2} = $cur_row_data->[2] // "\0NULL\xFF$rows_pos\xFF2\0";
+ $cur_row_ids{3} = $cur_row_data->[3] // "\0NULL\xFF$rows_pos\xFF3\0";
+ $cur_row_ids{4} = $cur_row_data->[4] // "\0NULL\xFF$rows_pos\xFF4\0";
+ $cur_row_ids{8} = $cur_row_data->[8] // "\0NULL\xFF$rows_pos\xFF8\0";
+
+ # cache expensive set of ops in a non-existent rowid slot
+ $cur_row_ids{10} = (
+ ( ( defined $cur_row_data->[0] ) && (join "\xFF", q{}, $cur_row_data->[0], q{} ))
+ or
+ ( ( defined $cur_row_data->[2] ) && (join "\xFF", q{}, $cur_row_data->[2], q{} ))
+ or
+ "\0$rows_pos\0"
+ );
+
+ # a present cref in $_[1] implies lazy prefetch, implies a supplied stash in $_[2]
+ $_[1] and $result_pos and ! $collapse_idx[0]{$cur_row_ids{10}} and (unshift @{$_[2]}, $cur_row_data) and last;
+
+ $collapse_idx[0]{$cur_row_ids{10}} //= $_[0][$result_pos++] = [{ year => $$cur_row_data[1] }];
+
+ $collapse_idx[0]{$cur_row_ids{10}}[1]{single_track} //= ($collapse_idx[1]{$cur_row_ids{0}} = [{ trackid => $cur_row_data->[0] }]);
+ defined($cur_row_data->[0]) or bless ( $collapse_idx[0]{$cur_row_ids{10}}[1]{single_track}, __NBC__ );
+
+ $collapse_idx[1]{$cur_row_ids{0}}[1]{cd} //= $collapse_idx[2]{$cur_row_ids{0}} = [];
+
+ $collapse_idx[2]{$cur_row_ids{0}}[1]{artist} //= ($collapse_idx[3]{$cur_row_ids{0}} = [{ artistid => $cur_row_data->[6] }]);
+
+ (! $collapse_idx[4]{$cur_row_ids{0}}{$cur_row_ids{4}} )
+ and
+ push @{$collapse_idx[3]{$cur_row_ids{0}}[1]{cds}}, (
+ $collapse_idx[4]{$cur_row_ids{0}}{$cur_row_ids{4}} = [{ cdid => $cur_row_data->[4], genreid => $cur_row_data->[7], year => $cur_row_data->[5] }]
+ );
+ defined($cur_row_data->[4]) or bless ( $collapse_idx[3]{$cur_row_ids{0}}[1]{cds}, __NBC__ );
+
+ (! $collapse_idx[5]{$cur_row_ids{0}}{$cur_row_ids{4}}{$cur_row_ids{8}} )
+ and
+ push @{$collapse_idx[4]{$cur_row_ids{0}}{$cur_row_ids{4}}[1]{tracks}}, (
+ $collapse_idx[5]{$cur_row_ids{0}}{$cur_row_ids{4}}{$cur_row_ids{8}} = [{ title => $cur_row_data->[8] }]
+ );
+ defined($cur_row_data->[8]) or bless ( $collapse_idx[4]{$cur_row_ids{0}}{$cur_row_ids{4}}[1]{tracks}, __NBC__ );
+
+ (! $collapse_idx[6]{$cur_row_ids{2}}{$cur_row_ids{3}} )
+ and
+ push @{$collapse_idx[0]{$cur_row_ids{10}}[1]{tracks}}, (
+ $collapse_idx[6]{$cur_row_ids{2}}{$cur_row_ids{3}} = [{ cd => $$cur_row_data[2], title => $cur_row_data->[3] }]
+ );
+ defined($cur_row_data->[2]) or bless ( $collapse_idx[0]{$cur_row_ids{10}}[1]{tracks}, __NBC__ );
+ }
+
+ $#{$_[0]} = $result_pos - 1;
+ ',
+ 'Multiple has_many on multiple branches with underdefined root torture test',
+);
+
+is_same_src (
+ ($schema->source ('CD')->_mk_row_parser({
+ inflate_map => $infmap,
+ collapse => 1,
+ hri_style => 1,
+ prune_null_branches => 1,
+ }))[0],
+ ' my $rows_pos = 0;
+ my ($result_pos, @collapse_idx, $cur_row_data, %cur_row_ids);
+
+ while ($cur_row_data = (
+ ( $rows_pos >= 0 and $_[0][$rows_pos++] )
+ ||
+ ( $_[1] and $rows_pos = -1 and $_[1]->() )
+ ) ) {
+
+ # do not care about nullability here
+ $cur_row_ids{0} = $cur_row_data->[0];
+ $cur_row_ids{2} = $cur_row_data->[2];
+ $cur_row_ids{3} = $cur_row_data->[3];
+ $cur_row_ids{4} = $cur_row_data->[4];
+ $cur_row_ids{8} = $cur_row_data->[8];
+
+ # cache expensive set of ops in a non-existent rowid slot
+ $cur_row_ids{10} = (
+ ( ( defined $cur_row_data->[0] ) && (join "\xFF", q{}, $cur_row_data->[0], q{} ))
+ or
+ ( ( defined $cur_row_data->[2] ) && (join "\xFF", q{}, $cur_row_data->[2], q{} ))
+ or
+ "\0$rows_pos\0"
+ );
+
+ # a present cref in $_[1] implies lazy prefetch, implies a supplied stash in $_[2]
+ $_[1] and $result_pos and ! $collapse_idx[0]{$cur_row_ids{10}} and (unshift @{$_[2]}, $cur_row_data) and last;
+
+ $collapse_idx[0]{$cur_row_ids{10}} //= $_[0][$result_pos++] = { year => $$cur_row_data[1] };
+
+ (! defined $cur_row_data->[0] ) ? $collapse_idx[0]{$cur_row_ids{10}}{single_track} = undef : do {
+
+ $collapse_idx[0]{$cur_row_ids{10}}{single_track} //= ($collapse_idx[1]{$cur_row_ids{0}} = { trackid => $$cur_row_data[0] });
+
+ $collapse_idx[1]{$cur_row_ids{0}}{cd} //= $collapse_idx[2]{$cur_row_ids{0}};
+
+ $collapse_idx[2]{$cur_row_ids{0}}{artist} //= ($collapse_idx[3]{$cur_row_ids{0}} = { artistid => $$cur_row_data[6] });
+
+ (! defined $cur_row_data->[4] ) ? $collapse_idx[3]{$cur_row_ids{0}}{cds} = [] : do {
+
+ (! $collapse_idx[4]{$cur_row_ids{0}}{$cur_row_ids{4}} )
+ and
+ push @{$collapse_idx[3]{$cur_row_ids{0}}{cds}}, (
+ $collapse_idx[4]{$cur_row_ids{0}}{$cur_row_ids{4}} = { cdid => $$cur_row_data[4], genreid => $$cur_row_data[7], year => $$cur_row_data[5] }
+ );
+
+ (! defined $cur_row_data->[8] ) ? $collapse_idx[4]{$cur_row_ids{0}}{$cur_row_ids{4}}{tracks} = [] : do {
+
+ (! $collapse_idx[5]{$cur_row_ids{0}}{$cur_row_ids{4}}{$cur_row_ids{8}} )
+ and
+ push @{$collapse_idx[4]{$cur_row_ids{0}}{$cur_row_ids{4}}{tracks}}, (
+ $collapse_idx[5]{$cur_row_ids{0}}{$cur_row_ids{4}}{$cur_row_ids{8}} = { title => $$cur_row_data[8] }
+ );
+ };
+ };
+ };
+
+ (! defined $cur_row_data->[2] ) ? $collapse_idx[0]{$cur_row_ids{10}}{tracks} = [] : do {
+ (! $collapse_idx[6]{$cur_row_ids{2}}{$cur_row_ids{3}} )
+ and
+ push @{$collapse_idx[0]{$cur_row_ids{10}}{tracks}}, (
+ $collapse_idx[6]{$cur_row_ids{2}}{$cur_row_ids{3}} = { cd => $$cur_row_data[2], title => $$cur_row_data[3] }
+ );
+ };
+ }
+
+ $#{$_[0]} = $result_pos - 1;
+ ',
+ 'Multiple has_many on multiple branches with underdefined root, HRI-direct torture test',
+);
+
+done_testing;
+
+my $deparser;
+sub is_same_src { SKIP: {
+ $deparser ||= B::Deparse->new;
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my ($got, $expect) = @_;
+
+ skip "Not testing equality of source containing defined-or operator on this perl $]", 1
+ if ($] < 5.010 and$expect =~ m!\Q//=!);
+
+ $expect =~ s/__NBC__/B::perlstring($DBIx::Class::ResultSource::RowParser::Util::null_branch_class)/ge;
+
+ $expect = " { use strict; use warnings FATAL => 'all';\n$expect\n }";
+
+ my @normalized = map {
+ my $cref = eval "sub { $_ }" or do {
+ fail "Coderef does not compile!\n\n$@\n\n$_";
+ return undef;
+ };
+ $deparser->coderef2text($cref);
+ } ($got, $expect);
+
+ &is (@normalized, $_[2]||() ) or do {
+ eval { require Test::Differences }
+ ? &Test::Differences::eq_or_diff( @normalized, $_[2]||() )
+ : note ("Original sources:\n\n$got\n\n$expect\n")
+ ;
+ exit 1;
+ };
+} }
--- /dev/null
+use strict;
+use warnings;
+use Test::More;
+use Test::Exception;
+use Test::Warn;
+
+use lib 't/lib';
+use DBICTest;
+
+throws_ok {
+ package Foo;
+ use base 'DBIx::Class::Core';
+ __PACKAGE__->table('foo');
+ __PACKAGE__->set_primary_key('bar')
+} qr/No such column 'bar' on source 'foo' /,
+'proper exception on non-existing column as PK';
+
+warnings_exist {
+ package Foo2;
+ use base 'DBIx::Class::Core';
+ __PACKAGE__->table('foo');
+ __PACKAGE__->add_columns(
+ foo => {},
+ bar => { is_nullable => 1 },
+ );
+ __PACKAGE__->set_primary_key(qw(foo bar))
+} qr/Primary key of source 'foo' includes the column 'bar' which has its 'is_nullable' attribute set to true/,
+'proper exception on is_nullable column as PK';
+
+done_testing;
warnings_exist { is_same_sql_bind (
$rs->search ({}, { rows => 1, offset => 3,columns => [
{ id => 'foo.id' },
- { 'bar.id' => 'bar.id' },
+ { 'artist.id' => 'bar.id' },
{ bleh => \ 'TO_CHAR (foo.womble, "blah")' },
]})->as_query,
'(
$books_45_and_owners->search ({}, { group_by => 'title', order_by => 'title' })->as_query,
'(SELECT me.id, me.source, me.owner, me.price, owner.id, owner.name
FROM (
- SELECT me.id, me.source, me.owner, me.price
+ SELECT me.id, me.source, me.owner, me.price, me.title
FROM (
- SELECT me.id, me.source, me.owner, me.price, ORDER__BY__001
+ SELECT me.id, me.source, me.owner, me.price, me.title
FROM (
- SELECT me.id, me.source, me.owner, me.price, title AS ORDER__BY__001
+ SELECT me.id, me.source, me.owner, me.price, me.title
FROM books me
JOIN owners owner ON owner.id = me.owner
WHERE ( source = ? )
ORDER BY title
FETCH FIRST 5 ROWS ONLY
) me
- ORDER BY ORDER__BY__001 DESC
+ ORDER BY title DESC
FETCH FIRST 2 ROWS ONLY
) me
- ORDER BY ORDER__BY__001
+ ORDER BY title
) me
JOIN owners owner ON owner.id = me.owner
WHERE ( source = ? )
use Test::More;
use lib qw(t/lib);
+use List::Util 'min';
use DBICTest;
use DBIC::SqlMakerTest;
use DBIx::Class::SQLMaker::LimitDialects;
FROM books rownum__emulation
WHERE rownum__emulation.title < me.title
) < ?
- ORDER BY me.title
+ ORDER BY me.title ASC
)',
[
[ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' } => 'Library' ],
FROM "books" "rownum__emulation"
WHERE "rownum__emulation"."title" > "me"."title"
) BETWEEN ? AND ?
- ORDER BY "title" DESC
+ ORDER BY "me"."title" DESC
)',
[
[ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' } => 'Library' ],
'(
SELECT "owner_name"
FROM (
- SELECT "owner"."name" AS "owner_name", "title"
+ SELECT "owner"."name" AS "owner_name", "me"."title"
FROM "books" "me"
JOIN "owners" "owner" ON "owner"."id" = "me"."owner"
WHERE ( "source" = ? )
FROM "books" "rownum__emulation"
WHERE "rownum__emulation"."title" < "me"."title"
) BETWEEN ? AND ?
- ORDER BY "title"
+ ORDER BY "me"."title" ASC
)',
[
[ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' } => 'Library' ],
'Correct columns selected with rows',
);
+$rs = $schema->resultset('CD')->search({}, {
+ columns => [qw( me.cdid me.title me.genreid me.year tracks.position tracks.title )],
+ join => 'tracks',
+ collapse => 1,
+ order_by => [ { -asc => 'me.genreid' }, { -desc => 'year' }, 'me.title', \ 'single_track DESC', { -desc => [qw( me.cdid tracks.position )] } ],
+});
+
+my @full_res = @{$rs->all_hri};
+
+is (@full_res, 5, 'Expected amount of CDs');
+
+is_deeply (
+ \@full_res,
+ [
+ { cdid => 2, genreid => undef, title => "Forkful of bees", year => 2001, tracks => [
+ { position => 3, title => "Sticky Honey" },
+ { position => 2, title => "Stripy" },
+ { position => 1, title => "Stung with Success" },
+ ] },
+ { cdid => 4, genreid => undef, title => "Generic Manufactured Singles", year => 2001, tracks => [
+ { position => 3, title => "No More Ideas" },
+ { position => 2, title => "Boring Song" },
+ { position => 1, title => "Boring Name" },
+ ] },
+ { cdid => 5, genreid => undef, title => "Come Be Depressed With Us", year => 1998, tracks => [
+ { position => 3, title => "Suicidal" },
+ { position => 2, title => "Under The Weather" },
+ { position => 1, title => "Sad" },
+ ] },
+ { cdid => 3, genreid => undef, title => "Caterwaulin' Blues", year => 1997, tracks => [
+ { position => 3, title => "Fowlin" },
+ { position => 2, title => "Howlin" },
+ { position => 1, title => "Yowlin" },
+ ] },
+ { cdid => 1, genreid => 1, title => "Spoonful of bees", year => 1999, tracks => [
+ { position => 3, title => "Beehind You" },
+ { position => 2, title => "Apiary" },
+ { position => 1, title => "The Bees Knees" },
+ ] },
+ ],
+ 'Complex ordered gensubq limited cds and tracks in expected sqlite order'
+);
+
+for my $slice (
+ [0, 10],
+ [3, 5 ],
+ [4, 6 ],
+ [0, 2 ],
+ [1, 3 ],
+) {
+
+ my $rownum_cmp_op = $slice->[0]
+ ? 'BETWEEN ? AND ?'
+ : ' < ?'
+ ;
+
+ is_deeply(
+ $rs->slice(@$slice)->all_hri,
+ [ @full_res[ $slice->[0] .. min($#full_res, $slice->[1]) ] ],
+ "Expected array slice on complex ordered limited gensubq ($slice->[0] : $slice->[1])",
+ );
+
+ is_same_sql_bind(
+ $rs->slice(@$slice)->as_query,
+ qq{(
+ SELECT "me"."cdid", "me"."title", "me"."genreid", "me"."year",
+ "tracks"."position", "tracks"."title"
+ FROM (
+ SELECT "me"."cdid", "me"."title", "me"."genreid", "me"."year", "me"."single_track"
+ FROM (
+ SELECT "me"."cdid", "me"."title", "me"."genreid", "me"."year", "me"."single_track"
+ FROM cd "me"
+ LEFT JOIN "track" "tracks"
+ ON "tracks"."cd" = "me"."cdid"
+ GROUP BY "me"."cdid", "me"."title", "me"."genreid", "me"."year", "me"."single_track"
+ ) "me"
+ WHERE (
+ SELECT COUNT( * )
+ FROM cd "rownum__emulation"
+ WHERE (
+ ( "me"."genreid" IS NOT NULL AND "rownum__emulation"."genreid" IS NULL )
+ OR
+ (
+ "rownum__emulation"."genreid" < "me"."genreid"
+ AND
+ "me"."genreid" IS NOT NULL
+ AND
+ "rownum__emulation"."genreid" IS NOT NULL
+ )
+ OR
+ (
+ (
+ "me"."genreid" = "rownum__emulation"."genreid"
+ OR
+ ( "me"."genreid" IS NULL AND "rownum__emulation"."genreid" IS NULL )
+ )
+ AND
+ "rownum__emulation"."year" > "me"."year"
+ )
+ OR
+ (
+ (
+ "me"."genreid" = "rownum__emulation"."genreid"
+ OR
+ ( "me"."genreid" IS NULL AND "rownum__emulation"."genreid" IS NULL )
+ )
+ AND
+ "me"."year" = "rownum__emulation"."year"
+ AND
+ "rownum__emulation"."title" < "me"."title"
+ )
+ OR
+ (
+ (
+ "me"."genreid" = "rownum__emulation"."genreid"
+ OR
+ ( "me"."genreid" IS NULL AND "rownum__emulation"."genreid" IS NULL )
+ )
+ AND
+ "me"."year" = "rownum__emulation"."year"
+ AND
+ "me"."title" = "rownum__emulation"."title"
+ AND
+ (
+ ("me"."single_track" IS NULL AND "rownum__emulation"."single_track" IS NOT NULL )
+ OR
+ (
+ "rownum__emulation"."single_track" > "me"."single_track"
+ AND
+ "me"."single_track" IS NOT NULL
+ AND
+ "rownum__emulation"."single_track" IS NOT NULL
+ )
+ )
+ )
+ OR
+ (
+ (
+ "me"."genreid" = "rownum__emulation"."genreid"
+ OR
+ ( "me"."genreid" IS NULL AND "rownum__emulation"."genreid" IS NULL )
+ )
+ AND
+ "me"."year" = "rownum__emulation"."year"
+ AND
+ "me"."title" = "rownum__emulation"."title"
+ AND
+ (
+ ( "me"."single_track" = "rownum__emulation"."single_track" )
+ OR
+ ( "me"."single_track" IS NULL AND "rownum__emulation"."single_track" IS NULL )
+ )
+ AND
+ "rownum__emulation"."cdid" > "me"."cdid"
+ )
+ )
+ ) $rownum_cmp_op
+ ORDER BY "me"."genreid" ASC, "me"."year" DESC, "me"."title" ASC, "me"."single_track" DESC, "me"."cdid" DESC
+ ) "me"
+ LEFT JOIN "track" "tracks"
+ ON "tracks"."cd" = "me"."cdid"
+ ORDER BY "me"."genreid" ASC, "year" DESC, "me"."title", single_track DESC, "me"."cdid" DESC, "tracks"."position" DESC
+ )},
+ [
+ ( $slice->[0] ? [ $OFFSET => $slice->[0] ] : () ),
+ [ $TOTAL => $slice->[1] + ($slice->[0] ? 0 : 1 ) ],
+ ],
+ "Expected sql on complex ordered limited gensubq ($slice->[0] : $slice->[1])",
+ );
+}
+
{
$rs = $schema->resultset('Artist')->search({}, {
columns => 'artistid',
);
}
-# this is a nonsensical order_by, we are just making sure the bind-transport is correct
-# (not that it'll be useful anywhere in the near future)
-my $attr = {};
-my $rs_selectas_rel = $schema->resultset('BooksInLibrary')->search(undef, {
- columns => 'me.id',
- offset => 3,
- rows => 4,
- '+columns' => { bar => \['? * ?', [ $attr => 11 ], [ $attr => 12 ]], baz => \[ '?', [ $attr => 13 ]] },
- order_by => [ 'id', \['? / ?', [ $attr => 1 ], [ $attr => 2 ]], \[ '?', [ $attr => 3 ]] ],
- having => \[ '?', [ $attr => 21 ] ],
-});
-
-is_same_sql_bind(
- $rs_selectas_rel->as_query,
- '(
- SELECT "me"."id", "bar", "baz"
- FROM (
- SELECT "me"."id", ? * ? AS "bar", ? AS "baz"
- FROM "books" "me"
- WHERE ( "source" = ? )
- HAVING ?
- ) "me"
- WHERE ( SELECT COUNT(*) FROM "books" "rownum__emulation" WHERE "rownum__emulation"."id" < "me"."id" ) BETWEEN ? AND ?
- ORDER BY "id", ? / ?, ?
- )',
- [
- [ $attr => 11 ], [ $attr => 12 ], [ $attr => 13 ],
- [ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'source' } => 'Library' ],
- [ $attr => 21 ],
- [ {%$OFFSET} => 3 ],
- [ {%$TOTAL} => 6 ],
- [ $attr => 1 ], [ $attr => 2 ], [ $attr => 3 ],
- ],
- 'Pagination with sub-query in ORDER BY works'
-);
-
done_testing;
my $rs = $s->resultset ('CD')->search({ id => 1 });
+# important for a test below, never traversed
+$rs->result_source->add_relationship(
+ ends_with_me => 'DBICTest::Schema::Artist', sub {}
+);
+
+
my $where_bind = [ { dbic_colname => 'id' }, 1 ];
for my $test_set (
offset => 3,
columns => [
{ id => 'foo.id' },
- { 'bar.id' => 'bar.id' },
+ { 'artist.id' => 'bar.id' },
{ bleh => \'TO_CHAR (foo.womble, "blah")' },
]
}),
sql => '(
- SELECT id, bar__id, bleh
+ SELECT id, artist__id, bleh
FROM (
- SELECT id, bar__id, bleh, ROWNUM rownum__index
+ SELECT id, artist__id, bleh, ROWNUM rownum__index
FROM (
- SELECT foo.id AS id, bar.id AS bar__id, TO_CHAR (foo.womble, "blah") AS bleh
+ SELECT foo.id AS id, bar.id AS artist__id, TO_CHAR (foo.womble, "blah") AS bleh
FROM cd me
WHERE id = ?
) me
offset => 3,
columns => [
{ id => 'foo.id' },
- { 'bar.id' => 'bar.id' },
+ { 'artist.id' => 'bar.id' },
{ bleh => \'TO_CHAR (foo.womble, "blah")' },
],
order_by => [qw( artist title )],
}),
sql => '(
- SELECT id, bar__id, bleh
+ SELECT id, artist__id, bleh
FROM (
- SELECT id, bar__id, bleh, ROWNUM rownum__index
+ SELECT id, artist__id, bleh, ROWNUM rownum__index
FROM (
- SELECT foo.id AS id, bar.id AS bar__id, TO_CHAR(foo.womble, "blah") AS bleh
+ SELECT foo.id AS id, bar.id AS artist__id, TO_CHAR(foo.womble, "blah") AS bleh
FROM cd me
WHERE id = ?
ORDER BY artist, title
offset => 3,
columns => [
{ id => 'foo.id' },
- { 'bar.id' => 'bar.id' },
+ { 'artist.id' => 'bar.id' },
{ bleh => \'TO_CHAR (foo.womble, "blah")' },
],
order_by => 'artist',
}),
sql => '(
- SELECT id, bar__id, bleh
+ SELECT id, artist__id, bleh
FROM (
- SELECT id, bar__id, bleh, ROWNUM rownum__index
+ SELECT id, artist__id, bleh, ROWNUM rownum__index
FROM (
- SELECT foo.id AS id, bar.id AS bar__id, TO_CHAR(foo.womble, "blah") AS bleh
+ SELECT foo.id AS id, bar.id AS artist__id, TO_CHAR(foo.womble, "blah") AS bleh
FROM cd me
WHERE id = ?
ORDER BY artist
{ id => 'foo.id' },
{ 'ends_with_me.id' => 'ends_with_me.id' },
],
- order_by => [qw( artist title )],
+ order_by => [qw( year artist title )],
}),
sql => '(
SELECT id, ends_with_me__id
SELECT foo.id AS id, ends_with_me.id AS ends_with_me__id
FROM cd me
WHERE id = ?
- ORDER BY artist, title
+ ORDER BY year, artist, title
) me
WHERE ROWNUM <= ?
) me
$books_45_and_owners->search ({}, { group_by => 'title', order_by => 'title' })->as_query,
'(SELECT me.id, me.source, me.owner, me.price, owner.id, owner.name
FROM (
- SELECT me.id, me.source, me.owner, me.price
+ SELECT me.id, me.source, me.owner, me.price, me.title
FROM (
SELECT TOP 2
- me.id, me.source, me.owner, me.price, ORDER__BY__001
+ me.id, me.source, me.owner, me.price, me.title
FROM (
SELECT TOP 5
- me.id, me.source, me.owner, me.price, title AS ORDER__BY__001
+ me.id, me.source, me.owner, me.price, me.title
FROM books me
JOIN owners owner ON owner.id = me.owner
WHERE ( source = ? )
GROUP BY title
ORDER BY title
) me
- ORDER BY ORDER__BY__001 DESC
+ ORDER BY title DESC
) me
- ORDER BY ORDER__BY__001
+ ORDER BY title
) me
JOIN owners owner ON owner.id = me.owner
WHERE ( source = ? )
my $tests = {
LimitOffset => {
+ limit => [
+ '(
+ SELECT me.id, owner.id, owner.name, ? * ?, ?
+ FROM books me
+ JOIN owners owner
+ ON owner.id = me.owner
+ WHERE source != ? AND me.title = ? AND source = ?
+ GROUP BY (me.id / ?), owner.id
+ HAVING ?
+ LIMIT ?
+ )',
+ [
+ @select_bind,
+ @where_bind,
+ @group_bind,
+ @having_bind,
+ [ { sqlt_datatype => 'integer' } => 4 ],
+ ],
+ ],
+ limit_offset => [
+ '(
+ SELECT me.id, owner.id, owner.name, ? * ?, ?
+ FROM books me
+ JOIN owners owner
+ ON owner.id = me.owner
+ WHERE source != ? AND me.title = ? AND source = ?
+ GROUP BY (me.id / ?), owner.id
+ HAVING ?
+ LIMIT ?
+ OFFSET ?
+ )',
+ [
+ @select_bind,
+ @where_bind,
+ @group_bind,
+ @having_bind,
+ [ { sqlt_datatype => 'integer' } => 4 ],
+ [ { sqlt_datatype => 'integer' } => 3 ],
+ ],
+ ],
+ ordered_limit => [
+ '(
+ SELECT me.id, owner.id, owner.name, ? * ?, ?
+ FROM books me
+ JOIN owners owner
+ ON owner.id = me.owner
+ WHERE source != ? AND me.title = ? AND source = ?
+ GROUP BY (me.id / ?), owner.id
+ HAVING ?
+ ORDER BY ? / ?, ?
+ LIMIT ?
+ )',
+ [
+ @select_bind,
+ @where_bind,
+ @group_bind,
+ @having_bind,
+ @order_bind,
+ [ { sqlt_datatype => 'integer' } => 4 ],
+ ]
+ ],
ordered_limit_offset => [
'(
SELECT me.id, owner.id, owner.name, ? * ?, ?
JOIN owners owner
ON owner.id = me.owner
WHERE source != ? AND me.title = ? AND source = ?
- GROUP BY avg(me.id / ?)
+ GROUP BY (me.id / ?), owner.id
HAVING ?
ORDER BY ? / ?, ?
LIMIT ?
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY books.owner
)',
[
[ { sqlt_datatype => 'integer' } => 3 ],
JOIN owners owner
ON owner.id = me.owner
WHERE source != ? AND me.title = ? AND source = ?
- GROUP BY avg(me.id / ?)
+ GROUP BY (me.id / ?), owner.id
HAVING ?
ORDER BY ? / ?, ?
LIMIT ?, ?
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY books.owner
)',
[
[ { sqlt_datatype => 'integer' } => 1 ],
JOIN owners owner
ON owner.id = me.owner
WHERE source != ? AND me.title = ? AND source = ?
- GROUP BY avg(me.id / ?)
+ GROUP BY (me.id / ?), owner.id
HAVING ?
ORDER BY ? / ?, ?
)',
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY books.owner
)',
[
[ { sqlt_datatype => 'integer' } => 1 ],
JOIN owners owner
ON owner.id = me.owner
WHERE source != ? AND me.title = ? AND source = ?
- GROUP BY avg(me.id / ?)
+ GROUP BY (me.id / ?), owner.id
HAVING ?
ORDER BY ? / ?, ?
)',
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY books.owner
)',
[
[ { sqlt_datatype => 'integer' } => 3 ],
JOIN owners owner
ON owner.id = me.owner
WHERE source != ? AND me.title = ? AND source = ?
- GROUP BY avg(me.id / ?)
+ GROUP BY (me.id / ?), owner.id
HAVING ?
) me
) me
JOIN owners owner
ON owner.id = me.owner
WHERE source != ? AND me.title = ? AND source = ?
- GROUP BY avg(me.id / ?)
+ GROUP BY (me.id / ?), owner.id
HAVING ?
) me
) me
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY books.owner
)',
[
[ { sqlt_datatype => 'integer' } => 2 ],
JOIN owners owner
ON owner.id = me.owner
WHERE source != ? AND me.title = ? AND source = ?
- GROUP BY avg(me.id / ?)
+ GROUP BY (me.id / ?), owner.id
HAVING ?
%s
) me
JOIN owners owner
ON owner.id = me.owner
WHERE source != ? AND me.title = ? AND source = ?
- GROUP BY avg(me.id / ?)
+ GROUP BY (me.id / ?), owner.id
HAVING ?
) me
) me
JOIN owners owner
ON owner.id = me.owner
WHERE source != ? AND me.title = ? AND source = ?
- GROUP BY avg(me.id / ?)
+ GROUP BY (me.id / ?), owner.id
HAVING ?
ORDER BY ? / ?, ?
) me
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY books.owner
)',
[
[ { sqlt_datatype => 'integer' } => 2 ],
JOIN owners owner
ON owner.id = me.owner
WHERE source != ? AND me.title = ? AND source = ?
- GROUP BY avg(me.id / ?)
+ GROUP BY (me.id / ?), owner.id
HAVING ?
FETCH FIRST 4 ROWS ONLY
)',
JOIN owners owner
ON owner.id = me.owner
WHERE source != ? AND me.title = ? AND source = ?
- GROUP BY avg(me.id / ?)
+ GROUP BY (me.id / ?), owner.id
HAVING ?
ORDER BY me.id
FETCH FIRST 7 ROWS ONLY
JOIN owners owner
ON owner.id = me.owner
WHERE source != ? AND me.title = ? AND source = ?
- GROUP BY avg(me.id / ?)
+ GROUP BY (me.id / ?), owner.id
HAVING ?
ORDER BY ? / ?, ?
FETCH FIRST 4 ROWS ONLY
JOIN owners owner
ON owner.id = me.owner
WHERE source != ? AND me.title = ? AND source = ?
- GROUP BY avg(me.id / ?)
+ GROUP BY (me.id / ?), owner.id
HAVING ?
ORDER BY ? / ?, ?
FETCH FIRST 7 ROWS ONLY
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY books.owner
)',
[],
],
JOIN owners owner
ON owner.id = me.owner
WHERE source != ? AND me.title = ? AND source = ?
- GROUP BY avg(me.id / ?)
+ GROUP BY (me.id / ?), owner.id
HAVING ?
)',
[
JOIN owners owner
ON owner.id = me.owner
WHERE source != ? AND me.title = ? AND source = ?
- GROUP BY avg(me.id / ?)
+ GROUP BY (me.id / ?), owner.id
HAVING ?
ORDER BY me.id
) me
JOIN owners owner
ON owner.id = me.owner
WHERE source != ? AND me.title = ? AND source = ?
- GROUP BY avg(me.id / ?)
+ GROUP BY (me.id / ?), owner.id
HAVING ?
ORDER BY ? / ?, ?
)',
JOIN owners owner
ON owner.id = me.owner
WHERE source != ? AND me.title = ? AND source = ?
- GROUP BY avg(me.id / ?)
+ GROUP BY (me.id / ?), owner.id
HAVING ?
ORDER BY ? / ?, ?
) me
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY books.owner
)',
[],
],
},
GenericSubQ => {
- limit => [
+ ordered_limit => [
'(
SELECT me.id, owner__id, owner__name, bar, baz
FROM (
- SELECT me.id, owner.id AS owner__id, owner.name AS owner__name, ? * ? AS bar, ? AS baz
+ SELECT me.id, owner.id AS owner__id, owner.name AS owner__name, ? * ? AS bar, ? AS baz, me.price
FROM books me
JOIN owners owner
ON owner.id = me.owner
WHERE source != ? AND me.title = ? AND source = ?
- GROUP BY avg( me.id / ? )
+ GROUP BY (me.id / ?), owner.id
HAVING ?
) me
WHERE (
SELECT COUNT( * )
FROM books rownum__emulation
- WHERE rownum__emulation.id < me.id
- ) < ?
- ORDER BY me.id
+ WHERE
+ ( me.price IS NULL AND rownum__emulation.price IS NOT NULL )
+ OR
+ (
+ rownum__emulation.price > me.price
+ AND
+ me.price IS NOT NULL
+ AND
+ rownum__emulation.price IS NOT NULL
+ )
+ OR
+ (
+ (
+ me.price = rownum__emulation.price
+ OR
+ ( me.price IS NULL AND rownum__emulation.price IS NULL )
+ )
+ AND
+ rownum__emulation.id < me.id
+ )
+ ) < ?
+ ORDER BY me.price DESC, me.id ASC
)',
[
@select_bind,
[ { sqlt_datatype => 'integer' } => 4 ],
],
],
- limit_offset => [
+ ordered_limit_offset => [
'(
SELECT me.id, owner__id, owner__name, bar, baz
FROM (
- SELECT me.id, owner.id AS owner__id, owner.name AS owner__name, ? * ? AS bar, ? AS baz
+ SELECT me.id, owner.id AS owner__id, owner.name AS owner__name, ? * ? AS bar, ? AS baz, me.price
FROM books me
JOIN owners owner
ON owner.id = me.owner
WHERE source != ? AND me.title = ? AND source = ?
- GROUP BY avg( me.id / ? )
+ GROUP BY (me.id / ?), owner.id
HAVING ?
) me
WHERE (
SELECT COUNT( * )
FROM books rownum__emulation
- WHERE rownum__emulation.id < me.id
- ) BETWEEN ? AND ?
- ORDER BY me.id
+ WHERE
+ ( me.price IS NULL AND rownum__emulation.price IS NOT NULL )
+ OR
+ (
+ rownum__emulation.price > me.price
+ AND
+ me.price IS NOT NULL
+ AND
+ rownum__emulation.price IS NOT NULL
+ )
+ OR
+ (
+ (
+ me.price = rownum__emulation.price
+ OR
+ ( me.price IS NULL AND rownum__emulation.price IS NULL )
+ )
+ AND
+ rownum__emulation.id < me.id
+ )
+ ) BETWEEN ? AND ?
+ ORDER BY me.price DESC, me.id ASC
)',
[
@select_bind,
FROM (
SELECT me.name, me.id
FROM (
- SELECT me.name, me.id FROM owners me
+ SELECT me.name, me.id
+ FROM owners me
) me
- WHERE (
- SELECT COUNT(*)
- FROM owners rownum__emulation
- WHERE rownum__emulation.id < me.id
- ) BETWEEN ? AND ?
- ORDER BY me.id
+ WHERE
+ (
+ SELECT COUNT(*)
+ FROM owners rownum__emulation
+ WHERE (
+ rownum__emulation.name < me.name
+ OR
+ (
+ me.name = rownum__emulation.name
+ AND
+ rownum__emulation.id > me.id
+ )
+ )
+ ) BETWEEN ? AND ?
+ ORDER BY me.name ASC, me.id DESC
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY me.id, books.owner
+ ORDER BY me.name ASC, me.id DESC
)',
[
[ { sqlt_datatype => 'integer' } => 1 ],
delete $schema->storage->_sql_maker->{_cached_syntax};
$schema->storage->_sql_maker->limit_dialect ($limtype);
+ my $can_run = ($limtype eq $native_limit_dialect or $limtype eq 'GenericSubQ');
+
# chained search is necessary to exercise the recursive {where} parser
my $rs = $schema->resultset('BooksInLibrary')->search({ 'me.title' => { '=' => 'kama sutra' } })->search({ source => { '!=', 'Study' } }, {
columns => [ { identifier => 'me.id' }, 'owner.id', 'owner.name' ], # people actually do that. BLEH!!! :)
join => 'owner', # single-rel manual prefetch
rows => 4,
'+columns' => { bar => \['? * ?', [ $attr => 11 ], [ $attr => 12 ]], baz => \[ '?', [ $attr => 13 ]] },
- group_by => \[ 'avg(me.id / ?)', [ $attr => 21 ] ],
+ group_by => \[ '(me.id / ?), owner.id', [ $attr => 21 ] ],
having => \[ '?', [ $attr => 31 ] ],
- ($limtype =~ /GenericSubQ/ ? ( order_by => 'me.id' ) : () ), # needs a simple-column stable order to be happy
});
#
#
# only limit, no offset, no order
- is_same_sql_bind(
- $rs->as_query,
- @{$tests->{$limtype}{limit}},
- "$limtype: Unordered limit with select/group/having",
- ) if $tests->{$limtype}{limit};
+ if ($tests->{$limtype}{limit}) {
+ is_same_sql_bind(
+ $rs->as_query,
+ @{$tests->{$limtype}{limit}},
+ "$limtype: Unordered limit with select/group/having",
+ );
+
+ lives_ok { $rs->all } "Grouped limit runs under $limtype"
+ if $can_run;
+ }
# limit + offset, no order
- is_same_sql_bind(
- $rs->search({}, { offset => 3 })->as_query,
- @{$tests->{$limtype}{limit_offset}},
- "$limtype: Unordered limit+offset with select/group/having",
- ) if $tests->{$limtype}{limit_offset};
+ if ($tests->{$limtype}{limit_offset}) {
+ my $subrs = $rs->search({}, { offset => 3 });
+ is_same_sql_bind(
+ $subrs->as_query,
+ @{$tests->{$limtype}{limit_offset}},
+ "$limtype: Unordered limit+offset with select/group/having",
+ );
+
+ lives_ok { $subrs->all } "Grouped limit+offset runs under $limtype"
+ if $can_run;
+ }
# order + limit, no offset
$rs = $rs->search(undef, {
- order_by => [ \['? / ?', [ $attr => 1 ], [ $attr => 2 ]], \[ '?', [ $attr => 3 ]] ],
+ order_by => ( $limtype =~ /GenericSubQ/
+ ? [ { -desc => 'price' }, 'me.id', \[ 'owner.name + ?', [ {} => 'bah' ] ] ] # needs a same-table stable order to be happy
+ : [ \['? / ?', [ $attr => 1 ], [ $attr => 2 ]], \[ '?', [ $attr => 3 ]] ]
+ ),
});
- is_same_sql_bind(
- $rs->as_query,
- @{$tests->{$limtype}{ordered_limit}},
- "$limtype: Ordered limit with select/group/having",
- ) if $tests->{$limtype}{ordered_limit};
+ if ($tests->{$limtype}{ordered_limit}) {
+ is_same_sql_bind(
+ $rs->as_query,
+ @{$tests->{$limtype}{ordered_limit}},
+ "$limtype: Ordered limit with select/group/having",
+ );
+
+ lives_ok { $rs->all } "Grouped ordered limit runs under $limtype"
+ if $can_run;
+ }
# order + limit + offset
- is_same_sql_bind(
- $rs->search({}, { offset => 3 })->as_query,
- @{$tests->{$limtype}{ordered_limit_offset}},
- "$limtype: Ordered limit+offset with select/group/having",
- ) if $tests->{$limtype}{ordered_limit_offset};
+ if ($tests->{$limtype}{ordered_limit_offset}) {
+ my $subrs = $rs->search({}, { offset => 3 });
+ is_same_sql_bind(
+ $subrs->as_query,
+ @{$tests->{$limtype}{ordered_limit_offset}},
+ "$limtype: Ordered limit+offset with select/group/having",
+ );
+
+ lives_ok { $subrs->all } "Grouped ordered limit+offset runs under $limtype"
+ if $can_run;
+ }
# complex prefetch on partial-fetch root with limit
my $pref_rs = $schema->resultset('Owners')->search({}, {
offset => 1,
columns => 'name', # only the owner name, still prefetch all the books
prefetch => 'books',
- ($limtype =~ /GenericSubQ/ ? ( order_by => 'me.id' ) : () ), # needs a simple-column stable order to be happy
+ ($limtype !~ /GenericSubQ/ ? () : (
+ # needs a same-table stable order to be happy
+ order_by => [ { -asc => 'me.name' }, \'me.id DESC' ]
+ )),
});
is_same_sql_bind (
"$limtype: Prefetch with limit+offset",
) if $tests->{$limtype}{limit_offset_prefetch};
- # we can actually run the query
- if ($limtype eq $native_limit_dialect or $limtype eq 'GenericSubQ') {
- lives_ok { is ($pref_rs->all, 1, 'Expected count of objects on limtied prefetch') }
- "Complex limited prefetch works with supported limit $limtype"
+ if ($can_run) {
+ lives_ok { is ($pref_rs->all, 1, 'Expected count of objects on limited prefetch') }
+ "Complex limited prefetch runs under $limtype"
}
}