tarball contents (implicitly fixes RT#83084)
- Added strict and warnings tests for all lib and test files
+0.08241-TRIAL (EXPERIMENTAL BETA RELEASE) 2013-02-20 11:97 (UTC)
+ * New Features / Changes
+ - Revert to passing the original (pre-0.08240) arguments to
+ inflate_result() and remove the warning about ResultClass
+ inheritance.
+ - Optimize the generated rowparsers even more - no user-visible
+ changes.
+ - Emit a warning on incorrect use of nullable columns within a
+ primary key
+
+0.08240-TRIAL (EXPERIMENTAL BETA RELEASE) 2013-02-14 05:56 (UTC)
+ * New Features / Changes
+ - Rewrite from scratch the result constructor codepath - many bugfixes
+ and performance improvements (the current codebase is now capable of
+ outperforming both DBIx::DataModel and Rose::DB::Object on some
+ workloads). Some notable benefits:
+ - Multiple has_many prefetch
+ - Partial prefetch - you now can select only columns you are
+ interested in, while preserving the collapse functionality
+ (collapse is now exposed as a first-class API attribute)
+ - Prefetch of resultsets with arbitrary order
+ (RT#54949, RT#74024, RT#74584)
+ - Prefetch no longer inserts right-side table order_by clauses
+ (massively helps the deficient MySQL optimizer)
+ - Massively optimize codepath around ->cursor(), over 10x speedup
+ on some iterating workloads.
+
+ * Fixes
+ - Fix open cursors silently resetting when inherited across a fork
+ or a thread
+ - Fix duplicated selected columns when calling 'count' when a same
+ aggregate function is used more than once in a 'having' clause
+ (RT#83305)
+
+ * Misc
+ - Fixup our distbuilding process to stop creating world-writable
+ tarball contents (implicitly fixes RT#83084)
+ - Added strict and warnings tests for all lib and test files
+
0.08206 2013-02-08
* Fixes
- Fix dbh_do() failing to properly reconnect (regression in 0.08205)
my $test_requires = {
'File::Temp' => '0.22',
+ 'Test::Deep' => '0.101',
'Test::Exception' => '0.31',
'Test::Warn' => '0.21',
'Test::More' => '0.94',
+++ /dev/null
-* a48693f4 adds 5 files for a test that may even be the same as that from
-571df676 - please rewrite using the existing schema and delete the rest
$schema->deploy;
my $rs = $schema->resultset ('Artist');
-$rs->populate ([ map { { name => "Art_$_"} } (1 .. 10000) ]);
+
+my $hri_rs = $rs->search ({}, { result_class => 'DBIx::Class::ResultClass::HashRefInflator' } );
+
+#DB::enable_profile();
+#my @foo = $hri_rs->all;
+#DB::disable_profile();
+#exit;
my $dbh = $schema->storage->dbh;
my $sql = sprintf ('SELECT %s FROM %s %s',
$rs->_resolved_attrs->{alias},
);
-my $compdbi = sub {
- my @r = $schema->storage->dbh->selectall_arrayref ('SELECT * FROM ' . ${$rs->as_query}->[0] )
-} if $rs->can ('as_query');
-
-cmpthese(-3, {
- Cursor => sub { $rs->reset; my @r = $rs->cursor->all },
- HRI => sub { $rs->reset; my @r = $rs->search ({}, { result_class => 'DBIx::Class::ResultClass::HashRefInflator' } )->all },
- RowObj => sub { $rs->reset; my @r = $rs->all },
- RawDBI => sub { my @r = $dbh->selectall_arrayref ($sql) },
- $compdbi ? (CompDBI => $compdbi) : (),
-});
+for (1,10,20,50,200,2500,10000) {
+ $rs->delete;
+ $rs->populate ([ map { { name => "Art_$_"} } (1 .. $_) ]);
+ print "\nRetrieval of $_ rows\n";
+ bench();
+}
+
+sub bench {
+ cmpthese(-3, {
+ Cursor => sub { my @r = $rs->cursor->all },
+ HRI => sub { my @r = $hri_rs->all },
+ RowObj => sub { my @r = $rs->all },
+ DBI_AoH => sub { my @r = @{ $dbh->selectall_arrayref ($sql, { Slice => {} }) } },
+ DBI_AoA=> sub { my @r = @{ $dbh->selectall_arrayref ($sql) } },
+ });
+}
# $VERSION declaration must stay up here, ahead of any other package
# declarations, as to not confuse various modules attempting to determine
# this ones version, whether that be s.c.o. or Module::Metadata, etc
-$VERSION = '0.08209';
+$VERSION = '0.08241';
$VERSION = eval $VERSION if $VERSION =~ /_/; # numify for warning-free dev releases
$mk_hash = sub {
my $hash = {
+
# the main hash could be an undef if we are processing a skipped-over join
$_[0] ? %{$_[0]} : (),
# the second arg is a hash of arrays for each prefetched relation
- map {
- ref $_[1]->{$_}[0] eq 'ARRAY' # multi rel or not?
- ? ( $_ => [ map
- { $mk_hash->(@$_) || () }
- @{$_[1]->{$_}}
- ] )
- : ( $_ => $mk_hash->( @{$_[1]->{$_}} ) )
-
- } ( $_[1] ? ( keys %{$_[1]} ) : () )
+ map { $_ => (
+
+ # null-branch or not
+ ref $_[1]->{$_} eq $DBIx::Class::ResultSource::RowParser::Util::null_branch_class
+
+ ? ref $_[1]->{$_}[0] eq 'ARRAY' ? [] : undef
+
+ : ref $_[1]->{$_}[0] eq 'ARRAY'
+ ? [ map { $mk_hash->( @$_ ) || () } @{$_[1]->{$_}} ]
+ : $mk_hash->( @{$_[1]->{$_}} )
+
+ ) } ($_[1] ? keys %{$_[1]} : ())
};
- # if there is at least one defined column *OR* we are at the root of
- # the resultset - consider the result real (and not an emtpy has_many
- # rel containing one empty hashref)
- # an empty arrayref is an empty multi-sub-prefetch - don't consider
- # those either
- return $hash if $_[2];
-
- for (values %$hash) {
- return $hash if (
- defined $_
- and
- (ref $_ ne 'ARRAY' or scalar @$_)
- );
- }
-
- return undef;
+ ($_[2] || keys %$hash) ? $hash : undef;
};
=head1 METHODS
=head3 Resolving conditions and attributes
-When a resultset is chained from another resultset, conditions and
-attributes with the same keys need resolving.
+When a resultset is chained from another resultset (ie:
+C<my $new_rs = $old_rs->search(\%extra_cond, \%attrs)>), conditions
+and attributes with the same keys need resolving.
-L</join>, L</prefetch>, L</+select>, L</+as> attributes are merged
-into the existing ones from the original resultset.
+If any of L</columns>, L</select>, L</as> are present, they reset the
+original selection, and start the selection "clean".
+
+The L</join>, L</prefetch>, L</+columns>, L</+select>, L</+as> attributes
+are merged into the existing ones from the original resultset.
The L</where> and L</having> attributes, and any search conditions, are
merged with an SQL C<AND> to the existing condition from the original
# Run the query, passing the result_class since it should propagate for find
my $rs = $self->search ($final_cond, {result_class => $self->result_class, %$attrs});
- if (keys %{$rs->_resolved_attrs->{collapse}}) {
+ if ($rs->_resolved_attrs->{collapse}) {
my $row = $rs->next;
carp "Query returned more than one row" if $rs->next;
return $row;
my $attrs = { %{$self->_resolved_attrs} };
- if (keys %{$attrs->{collapse}}) {
- $self->throw_exception(
- 'single() can not be used on resultsets prefetching has_many. Use find( \%cond ) or next() instead'
- );
- }
+ $self->throw_exception(
+ 'single() can not be used on resultsets prefetching has_many. Use find( \%cond ) or next() instead'
+ ) if $attrs->{collapse};
if ($where) {
if (defined $attrs->{where}) {
}
}
- my @data = $self->result_source->storage->select_single(
+ my $data = [ $self->result_source->storage->select_single(
$attrs->{from}, $attrs->{select},
$attrs->{where}, $attrs
- );
-
- return (@data ? ($self->_construct_object(@data))[0] : undef);
+ )];
+ return undef unless @$data;
+ $self->{stashed_rows} = [ $data ];
+ $self->_construct_objects->[0];
}
sub next {
my ($self) = @_;
+
if (my $cache = $self->get_cache) {
$self->{all_cache_position} ||= 0;
return $cache->[$self->{all_cache_position}++];
}
+
if ($self->{attrs}{cache}) {
delete $self->{pager};
$self->{all_cache_position} = 1;
return ($self->all)[0];
}
- if ($self->{stashed_objects}) {
- my $obj = shift(@{$self->{stashed_objects}});
- delete $self->{stashed_objects} unless @{$self->{stashed_objects}};
- return $obj;
- }
- my @row = (
- exists $self->{stashed_row}
- ? @{delete $self->{stashed_row}}
- : $self->cursor->next
- );
- return undef unless (@row);
- my ($row, @more) = $self->_construct_object(@row);
- $self->{stashed_objects} = \@more if @more;
- return $row;
-}
-sub _construct_object {
- my ($self, @row) = @_;
+ return shift(@{$self->{stashed_objects}}) if @{ $self->{stashed_objects}||[] };
+
+ $self->{stashed_objects} = $self->_construct_objects
+ or return undef;
- my $info = $self->_collapse_result($self->{_attrs}{as}, \@row)
- or return ();
- my @new = $self->result_class->inflate_result($self->result_source, @$info);
- @new = $self->{_attrs}{record_filter}->(@new)
- if exists $self->{_attrs}{record_filter};
- return @new;
+ return shift @{$self->{stashed_objects}};
}
-sub _collapse_result {
- my ($self, $as_proto, $row) = @_;
+# Constructs as many objects as it can in one pass while respecting
+# cursor laziness. Several modes of operation:
+#
+# * Always builds everything present in @{$self->{stashed_rows}}
+# * If called with $fetch_all true - pulls everything off the cursor and
+# builds all objects in one pass
+# * If $self->_resolved_attrs->{collapse} is true, checks the order_by
+# and if the resultset is ordered properly by the left side:
+# * Fetches stuff off the cursor until the "master object" changes,
+# and saves the last extra row (if any) in @{$self->{stashed_rows}}
+# OR
+# * Just fetches, and collapses/constructs everything as if $fetch_all
+# was requested (there is no other way to collapse except for an
+# eager cursor)
+# * If no collapse is requested - just get the next row, construct and
+# return
+sub _construct_objects {
+ my ($self, $fetch_all) = @_;
- my @copy = @$row;
+ my $rsrc = $self->result_source;
+ my $attrs = $self->_resolved_attrs;
- # 'foo' => [ undef, 'foo' ]
- # 'foo.bar' => [ 'foo', 'bar' ]
- # 'foo.bar.baz' => [ 'foo.bar', 'baz' ]
+ if (!$fetch_all and ! $attrs->{order_by} and $attrs->{collapse}) {
+ # default order for collapsing unless the user asked for something
+ $attrs->{order_by} = [ map { join '.', $attrs->{alias}, $_} $rsrc->primary_columns ];
+ $attrs->{_ordered_for_collapse} = 1;
+ $attrs->{_order_is_artificial} = 1;
+ }
- my @construct_as = map { [ (/^(?:(.*)\.)?([^.]+)$/) ] } @$as_proto;
+ my $cursor = $self->cursor;
- my %collapse = %{$self->{_attrs}{collapse}||{}};
+ # this will be used as both initial raw-row collector AND as a RV of
+ # _construct_objects. Not regrowing the array twice matters a lot...
+ # a suprising amount actually
+ my $rows = delete $self->{stashed_rows};
- my @pri_index;
+ if ($fetch_all) {
+ # FIXME SUBOPTIMAL - we can do better, cursor->next/all (well diff. methods) should return a ref
+ $rows = [ ($rows ? @$rows : ()), $cursor->all ];
+ }
+ elsif( $attrs->{collapse} ) {
- # if we're doing collapsing (has_many prefetch) we need to grab records
- # until the PK changes, so fill @pri_index. if not, we leave it empty so
- # we know we don't have to bother.
+ $attrs->{_ordered_for_collapse} = (!$attrs->{order_by}) ? 0 : do {
+ my $st = $rsrc->schema->storage;
+ my @ord_cols = map
+ { $_->[0] }
+ ( $st->_extract_order_criteria($attrs->{order_by}) )
+ ;
- # the reason for not using the collapse stuff directly is because if you
- # had for e.g. two artists in a row with no cds, the collapse info for
- # both would be NULL (undef) so you'd lose the second artist
+ my $colinfos = $st->_resolve_column_info($attrs->{from}, \@ord_cols);
- # store just the index so we can check the array positions from the row
- # without having to contruct the full hash
+ for (0 .. $#ord_cols) {
+ if (
+ ! $colinfos->{$ord_cols[$_]}
+ or
+ $colinfos->{$ord_cols[$_]}{-result_source} != $rsrc
+ ) {
+ splice @ord_cols, $_;
+ last;
+ }
+ }
- if (keys %collapse) {
- my %pri = map { ($_ => 1) } $self->result_source->_pri_cols;
- foreach my $i (0 .. $#construct_as) {
- next if defined($construct_as[$i][0]); # only self table
- if (delete $pri{$construct_as[$i][1]}) {
- push(@pri_index, $i);
+ # since all we check here are the start of the order_by belonging to the
+ # top level $rsrc, a present identifying set will mean that the resultset
+ # is ordered by its leftmost table in a tsable manner
+ (@ord_cols and $rsrc->_identifying_column_set({ map
+ { $colinfos->{$_}{-colname} => $colinfos->{$_} }
+ @ord_cols
+ })) ? 1 : 0;
+ } unless defined $attrs->{_ordered_for_collapse};
+
+ if (! $attrs->{_ordered_for_collapse}) {
+ $fetch_all = 1;
+
+ # instead of looping over ->next, use ->all in stealth mode
+ # *without* calling a ->reset afterwards
+ # FIXME - encapsulation breach, got to be a better way
+ if (! $cursor->{_done}) {
+ $rows = [ ($rows ? @$rows : ()), $cursor->all ];
+ $cursor->{_done} = 1;
}
- last unless keys %pri; # short circuit (Johnny Five Is Alive!)
}
}
- # no need to do an if, it'll be empty if @pri_index is empty anyway
-
- my %pri_vals = map { ($_ => $copy[$_]) } @pri_index;
-
- my @const_rows;
-
- do { # no need to check anything at the front, we always want the first row
+ if (! $fetch_all and ! @{$rows||[]} ) {
+ # FIXME SUBOPTIMAL - we can do better, cursor->next/all (well diff. methods) should return a ref
+ if (scalar (my @r = $cursor->next) ) {
+ $rows = [ \@r ];
+ }
+ }
- my %const;
+ return undef unless @{$rows||[]};
- foreach my $this_as (@construct_as) {
- $const{$this_as->[0]||''}{$this_as->[1]} = shift(@copy);
- }
+ my @extra_collapser_args;
+ if ($attrs->{collapse} and ! $fetch_all ) {
- push(@const_rows, \%const);
+ @extra_collapser_args = (
+ # FIXME SUBOPTIMAL - we can do better, cursor->next/all (well diff. methods) should return a ref
+ sub { my @r = $cursor->next or return; \@r }, # how the collapser gets more rows
+ ($self->{stashed_rows} = []), # where does it stuff excess
+ );
+ }
- } until ( # no pri_index => no collapse => drop straight out
- !@pri_index
- or
- do { # get another row, stash it, drop out if different PK
+ # hotspot - skip the setter
+ my $res_class = $self->_result_class;
- @copy = $self->cursor->next;
- $self->{stashed_row} = \@copy;
+ my $inflator_cref = $self->{_result_inflator}{cref} ||= do {
+ $res_class->can ('inflate_result')
+ or $self->throw_exception("Inflator $res_class does not provide an inflate_result() method");
+ };
- # last thing in do block, counts as true if anything doesn't match
+ my $infmap = $attrs->{as};
- # check xor defined first for NULL vs. NOT NULL then if one is
- # defined the other must be so check string equality
+ $self->{_result_inflator}{is_hri} = do { ( $inflator_cref == (
+ require DBIx::Class::ResultClass::HashRefInflator
+ &&
+ DBIx::Class::ResultClass::HashRefInflator->can('inflate_result')
+ ) ) ? 1 : 0
+ } unless defined $self->{_result_inflator}{is_hri};
- grep {
- (defined $pri_vals{$_} ^ defined $copy[$_])
- || (defined $pri_vals{$_} && ($pri_vals{$_} ne $copy[$_]))
- } @pri_index;
+ if ($attrs->{_single_resultclass_inflation}) {
+ # construct a much simpler array->hash folder for the one-table cases right here
+ if ($self->{_result_inflator}{is_hri}) {
+ for my $r (@$rows) {
+ $r = { map { $infmap->[$_] => $r->[$_] } 0..$#$infmap };
}
- );
-
- my $alias = $self->{attrs}{alias};
- my $info = [];
+ }
+ # FIXME SUBOPTIMAL this is a very very very hot spot
+ # while rather optimal we can *still* do much better, by
+ # building a smarter Row::inflate_result(), and
+ # switch to feeding it data via a much leaner interface
+ #
+ # crude unscientific benchmarking indicated the shortcut eval is not worth it for
+ # this particular resultset size
+ elsif (@$rows < 60) {
+ for my $r (@$rows) {
+ $r = $inflator_cref->($res_class, $rsrc, { map { $infmap->[$_] => $r->[$_] } (0..$#$infmap) } );
+ }
+ }
+ else {
+ eval sprintf (
+ '$_ = $inflator_cref->($res_class, $rsrc, { %s }) for @$rows',
+ join (', ', map { "\$infmap->[$_] => \$_->[$_]" } 0..$#$infmap )
+ );
+ }
+ }
+ # Special-case multi-object HRI (we always prune)
+ elsif ($self->{_result_inflator}{is_hri}) {
+ ( $self->{_row_parser}{hri} ||= $rsrc->_mk_row_parser({
+ eval => 1,
+ inflate_map => $infmap,
+ selection => $attrs->{select},
+ collapse => $attrs->{collapse},
+ premultiplied => $attrs->{_main_source_premultiplied},
+ hri_style => 1,
+ }) )->($rows, @extra_collapser_args);
+ }
+ # Regular multi-object
+ else {
- my %collapse_pos;
+ ( $self->{_row_parser}{classic} ||= $rsrc->_mk_row_parser({
+ eval => 1,
+ inflate_map => $infmap,
+ selection => $attrs->{select},
+ collapse => $attrs->{collapse},
+ premultiplied => $attrs->{_main_source_premultiplied},
+ }) )->($rows, @extra_collapser_args);
- my @const_keys;
+ $_ = $inflator_cref->($res_class, $rsrc, @$_) for @$rows;
+ }
- foreach my $const (@const_rows) {
- scalar @const_keys or do {
- @const_keys = sort { length($a) <=> length($b) } keys %$const;
- };
- foreach my $key (@const_keys) {
- if (length $key) {
- my $target = $info;
- my @parts = split(/\./, $key);
- my $cur = '';
- my $data = $const->{$key};
- foreach my $p (@parts) {
- $target = $target->[1]->{$p} ||= [];
- $cur .= ".${p}";
- if ($cur eq ".${key}" && (my @ckey = @{$collapse{$cur}||[]})) {
- # collapsing at this point and on final part
- my $pos = $collapse_pos{$cur};
- CK: foreach my $ck (@ckey) {
- if (!defined $pos->{$ck} || $pos->{$ck} ne $data->{$ck}) {
- $collapse_pos{$cur} = $data;
- delete @collapse_pos{ # clear all positioning for sub-entries
- grep { m/^\Q${cur}.\E/ } keys %collapse_pos
- };
- push(@$target, []);
- last CK;
- }
- }
- }
- if (exists $collapse{$cur}) {
- $target = $target->[-1];
- }
- }
- $target->[0] = $data;
- } else {
- $info->[0] = $const->{$key};
- }
- }
+ # CDBI compat stuff
+ if ($attrs->{record_filter}) {
+ $_ = $attrs->{record_filter}->($_) for @$rows;
}
- return $info;
+ return $rows;
}
=head2 result_source
sub result_class {
my ($self, $result_class) = @_;
if ($result_class) {
+
unless (ref $result_class) { # don't fire this for an object
$self->ensure_class_loaded($result_class);
}
# permit the user to set result class on one result set only; it only
# chains if provided to search()
#$self->{attrs}{result_class} = $result_class if ref $self;
+
+ delete $self->{_result_inflator};
}
$self->_result_class;
}
# this is a little optimization - it is faster to do the limit
# adjustments in software, instead of a subquery
- my $rows = delete $attrs->{rows};
- my $offset = delete $attrs->{offset};
+ my ($rows, $offset) = delete @{$attrs}{qw/rows offset/};
my $crs;
if ($self->_has_resolved_attr (qw/collapse group_by/)) {
# overwrite the selector (supplied by the storage)
$tmp_attrs->{select} = $rsrc->storage->_count_select ($rsrc, $attrs);
$tmp_attrs->{as} = 'count';
- delete @{$tmp_attrs}{qw/columns/};
my $tmp_rs = $rsrc->resultset_class->new($rsrc, $tmp_attrs)->get_column ('count');
# if we multi-prefetch we group_by something unique, as this is what we would
# get out of the rs via ->next/->all. We *DO WANT* to clobber old group_by regardless
- if ( keys %{$attrs->{collapse}} ) {
+ if ( $attrs->{collapse} ) {
$sub_attrs->{group_by} = [ map { "$attrs->{alias}.$_" } @{
$rsrc->_identifying_column_set || $self->throw_exception(
'Unable to construct a unique group_by criteria properly collapsing the '
sub all {
my $self = shift;
if(@_) {
- $self->throw_exception("all() doesn't take any arguments, you probably wanted ->search(...)->all()");
+ $self->throw_exception("all() doesn't take any arguments, you probably wanted ->search(...)->all()");
}
- return @{ $self->get_cache } if $self->get_cache;
-
- my @obj;
-
- if (keys %{$self->_resolved_attrs->{collapse}}) {
- # Using $self->cursor->all is really just an optimisation.
- # If we're collapsing has_many prefetches it probably makes
- # very little difference, and this is cleaner than hacking
- # _construct_object to survive the approach
- $self->cursor->reset;
- my @row = $self->cursor->next;
- while (@row) {
- push(@obj, $self->_construct_object(@row));
- @row = (exists $self->{stashed_row}
- ? @{delete $self->{stashed_row}}
- : $self->cursor->next);
- }
- } else {
- @obj = map { $self->_construct_object(@$_) } $self->cursor->all;
+ delete @{$self}{qw/stashed_rows stashed_objects/};
+
+ if (my $c = $self->get_cache) {
+ return @$c;
}
- $self->set_cache(\@obj) if $self->{attrs}{cache};
+ $self->cursor->reset;
+
+ my $objs = $self->_construct_objects('fetch_all') || [];
- return @obj;
+ $self->set_cache($objs) if $self->{attrs}{cache};
+
+ return @$objs;
}
=head2 reset
sub reset {
my ($self) = @_;
+
+ delete @{$self}{qw/stashed_rows stashed_objects/};
$self->{all_cache_position} = 0;
$self->cursor->reset;
return $self;
);
# make a new $rs selecting only the PKs (that's all we really need for the subq)
- delete $attrs->{$_} for qw/collapse _collapse_order_by select _prefetch_selector_range as/;
+ delete $attrs->{$_} for qw/collapse select _prefetch_selector_range as/;
$attrs->{columns} = [ map { "$attrs->{alias}.$_" } @$idcols ];
$attrs->{group_by} = \ ''; # FIXME - this is an evil hack, it causes the optimiser to kick in and throw away the LEFT joins
my $subrs = (ref $self)->new($rsrc, $attrs);
# throw away the paging flags and re-run the count (possibly
# with a subselect) to get the real total count
my $count_attrs = { %$attrs };
- delete $count_attrs->{$_} for qw/rows offset page pager/;
+ delete @{$count_attrs}{qw/rows offset page pager/};
my $total_rs = (ref $self)->new($self->result_source, $count_attrs);
sub related_resultset {
my ($self, $rel) = @_;
- $self->{related_resultsets} ||= {};
return $self->{related_resultsets}{$rel} ||= do {
my $rsrc = $self->result_source;
my $rel_info = $rsrc->relationship_info($rel);
#XXX - temp fix for result_class bug. There likely is a more elegant fix -groditi
delete @{$attrs}{qw(result_class alias)};
- my $new_cache;
+ my $related_cache;
if (my $cache = $self->get_cache) {
- if ($cache->[0] && $cache->[0]->related_resultset($rel)->get_cache) {
- $new_cache = [ map { @{$_->related_resultset($rel)->get_cache} }
- @$cache ];
- }
+ $related_cache = [ map
+ { @{$_->related_resultset($rel)->get_cache||[]} }
+ @$cache
+ ];
}
my $rel_source = $rsrc->related_source($rel);
where => $attrs->{where},
});
};
- $new->set_cache($new_cache) if $new_cache;
+ $new->set_cache($related_cache) if $related_cache;
$new;
};
}
return {%$attrs, from => $from, seen_join => $seen};
}
+# FIXME - this needs to go live in Schema with the tree walker... or
+# something
+my $inflatemap_checker;
+$inflatemap_checker = sub {
+ my ($rsrc, $relpaths) = @_;
+
+ my $rels;
+
+ for (@$relpaths) {
+ $_ =~ /^ ( [^\.]+ ) \. (.+) $/x
+ or next;
+
+ push @{$rels->{$1}}, $2;
+ }
+
+ for my $rel (keys %$rels) {
+ my $rel_rsrc = try {
+ $rsrc->related_source ($rel)
+ } catch {
+ $rsrc->throw_exception(sprintf(
+ "Inflation into non-existent relationship '%s' of '%s' requested, "
+ . "check the inflation specification (columns/as) ending in '...%s.%s'",
+ $rel,
+ $rsrc->source_name,
+ $rel,
+ ( sort { length($a) <=> length ($b) } @{$rels->{$rel}} )[0],
+ ))};
+
+ $inflatemap_checker->($rel_rsrc, $rels->{$rel});
+ }
+
+ return;
+};
+
sub _resolved_attrs {
my $self = shift;
return $self->{_attrs} if $self->{_attrs};
if $attrs->{select};
# assume all unqualified selectors to apply to the current alias (legacy stuff)
- for (@sel) {
- $_ = (ref $_ or $_ =~ /\./) ? $_ : "$alias.$_";
- }
+ $_ = (ref $_ or $_ =~ /\./) ? $_ : "$alias.$_" for @sel;
- # disqualify all $alias.col as-bits (collapser mandated)
- for (@as) {
- $_ = ($_ =~ /^\Q$alias.\E(.+)$/) ? $1 : $_;
- }
+ # disqualify all $alias.col as-bits (inflate-map mandated)
+ $_ = ($_ =~ /^\Q$alias.\E(.+)$/) ? $1 : $_ for @as;
# de-duplicate the result (remove *identical* select/as pairs)
# and also die on duplicate {as} pointing to different {select}s
}
}
+ # validate the user-supplied 'as' chain
+ # folks get too confused by the (logical) exception message, need to
+ # go to some lengths to clarify the text
+ #
+ # FIXME - this needs to go live in Schema with the tree walker... or
+ # something
+ $inflatemap_checker->($source, \@as);
+
$attrs->{select} = \@sel;
$attrs->{as} = \@as;
}
}
- $attrs->{collapse} ||= {};
- if ($attrs->{prefetch}) {
+ # generate selections based on the prefetch helper
+ my $prefetch;
+ $prefetch = $self->_merge_joinpref_attr( {}, delete $attrs->{prefetch} )
+ if defined $attrs->{prefetch};
+
+ if ($prefetch) {
$self->throw_exception("Unable to prefetch, resultset contains an unnamed selector $attrs->{_dark_selector}{string}")
if $attrs->{_dark_selector};
- my $prefetch = $self->_merge_joinpref_attr( {}, delete $attrs->{prefetch} );
-
- my $prefetch_ordering = [];
+ $attrs->{collapse} = 1;
# this is a separate structure (we don't look in {from} directly)
# as the resolver needs to shift things off the lists to work
}
}
- my @prefetch =
- $source->_resolve_prefetch( $prefetch, $alias, $join_map, $prefetch_ordering, $attrs->{collapse} );
+ my @prefetch = $source->_resolve_prefetch( $prefetch, $alias, $join_map );
# we need to somehow mark which columns came from prefetch
if (@prefetch) {
push @{ $attrs->{select} }, (map { $_->[0] } @prefetch);
push @{ $attrs->{as} }, (map { $_->[1] } @prefetch);
+ }
+
+ if ( ! List::Util::first { $_ =~ /\./ } @{$attrs->{as}} ) {
+ $attrs->{_single_resultclass_inflation} = 1;
+ $attrs->{collapse} = 0;
+ }
+
+ # run through the resulting joinstructure (starting from our current slot)
+ # and unset collapse if proven unnesessary
+ #
+ # also while we are at it find out if the current root source has
+ # been premultiplied by previous related_source chaining
+ #
+ # this allows to predict whether a root object with all other relation
+ # data set to NULL is in fact unique
+ if ($attrs->{collapse}) {
+
+ if (ref $attrs->{from} eq 'ARRAY') {
+
+ if (@{$attrs->{from}} <= 1) {
+ # no joins - no collapse
+ $attrs->{collapse} = 0;
+ }
+ else {
+ # find where our table-spec starts
+ my @fromlist = @{$attrs->{from}};
+ while (@fromlist) {
+ my $t = shift @fromlist;
+
+ my $is_multi;
+ # me vs join from-spec distinction - a ref means non-root
+ if (ref $t eq 'ARRAY') {
+ $t = $t->[0];
+ $is_multi ||= ! $t->{-is_single};
+ }
+ last if ($t->{-alias} && $t->{-alias} eq $alias);
+ $attrs->{_main_source_premultiplied} ||= $is_multi;
+ }
- push( @{$attrs->{order_by}}, @$prefetch_ordering );
- $attrs->{_collapse_order_by} = \@$prefetch_ordering;
+ # no non-singles remaining, nor any premultiplication - nothing to collapse
+ if (
+ ! $attrs->{_main_source_premultiplied}
+ and
+ ! List::Util::first { ! $_->[0]{-is_single} } @fromlist
+ ) {
+ $attrs->{collapse} = 0;
+ }
+ }
+ }
+
+ else {
+ # if we can not analyze the from - err on the side of safety
+ $attrs->{_main_source_premultiplied} = 1;
+ }
}
# if both page and offset are specified, produce a combined offset
$seen_keys->{$import_key} = 1; # don't merge the same key twice
}
- return $orig;
+ return @$orig ? $orig : ();
}
{
my $to_serialize = { %$self };
# A cursor in progress can't be serialized (and would make little sense anyway)
- delete $to_serialize->{cursor};
+ # the parser can be regenerated (and can't be serialized)
+ delete @{$to_serialize}{qw/cursor _row_parser _result_inflator/};
# nor is it sensical to store a not-yet-fired-count pager
if ($to_serialize->{pager} and ref $to_serialize->{pager}{total_entries} eq 'CODE') {
}
}
+1;
+
+__END__
+
# XXX: FIXME: Attributes docs need clearing up
=head1 ATTRIBUTES
=over 4
-=item Value: \@columns
+=item Value: \@columns | \%columns | $column
=back
=back
-=head2 +as
-
-=over 4
-
-Indicates additional column names for those added via L</+select>. See L</as>.
-
-=back
-
=head2 as
=over 4
You can create your own accessors if required - see
L<DBIx::Class::Manual::Cookbook> for details.
+=head2 +as
+
+=over 4
+
+Indicates additional column names for those added via L</+select>. See L</as>.
+
+=back
+
=head2 join
=over 4
will return a set of all artists that have both a cd with title 'Down
to Earth' and a cd with title 'Popular'.
-If you want to fetch related objects from other tables as well, see C<prefetch>
+If you want to fetch related objects from other tables as well, see L</prefetch>
below.
NOTE: An internal join-chain pruner will discard certain joins while
For more help on using joins with search, see L<DBIx::Class::Manual::Joining>.
-=head2 prefetch
+=head2 collapse
=over 4
-=item Value: ($rel_name | \@rel_names | \%rel_names)
+=item Value: (0 | 1)
=back
-Contains one or more relationships that should be fetched along with
-the main query (when they are accessed afterwards the data will
-already be available, without extra queries to the database). This is
-useful for when you know you will need the related objects, because it
-saves at least one query:
-
- my $rs = $schema->resultset('Tag')->search(
- undef,
- {
- prefetch => {
- cd => 'artist'
- }
- }
- );
-
-The initial search results in SQL like the following:
-
- SELECT tag.*, cd.*, artist.* FROM tag
- JOIN cd ON tag.cd = cd.cdid
- JOIN artist ON cd.artist = artist.artistid
-
-L<DBIx::Class> has no need to go back to the database when we access the
-C<cd> or C<artist> relationships, which saves us two SQL statements in this
-case.
-
-Simple prefetches will be joined automatically, so there is no need
-for a C<join> attribute in the above search.
+When set to a true value, indicates that any rows fetched from joined has_many
+relationships are to be aggregated into the corresponding "parent" object. For
+example, the resultset:
-L</prefetch> can be used with the any of the relationship types and
-multiple prefetches can be specified together. Below is a more complex
-example that prefetches a CD's artist, its liner notes (if present),
-the cover image, the tracks on that cd, and the guests on those
-tracks.
-
- # Assuming:
- My::Schema::CD->belongs_to( artist => 'My::Schema::Artist' );
- My::Schema::CD->might_have( liner_note => 'My::Schema::LinerNotes' );
- My::Schema::CD->has_one( cover_image => 'My::Schema::Artwork' );
- My::Schema::CD->has_many( tracks => 'My::Schema::Track' );
-
- My::Schema::Artist->belongs_to( record_label => 'My::Schema::RecordLabel' );
-
- My::Schema::Track->has_many( guests => 'My::Schema::Guest' );
-
-
- my $rs = $schema->resultset('CD')->search(
- undef,
- {
- prefetch => [
- { artist => 'record_label'}, # belongs_to => belongs_to
- 'liner_note', # might_have
- 'cover_image', # has_one
- { tracks => 'guests' }, # has_many => has_many
- ]
- }
- );
-
-This will produce SQL like the following:
-
- SELECT cd.*, artist.*, record_label.*, liner_note.*, cover_image.*,
- tracks.*, guests.*
- FROM cd me
- JOIN artist artist
- ON artist.artistid = me.artistid
- JOIN record_label record_label
- ON record_label.labelid = artist.labelid
- LEFT JOIN track tracks
- ON tracks.cdid = me.cdid
- LEFT JOIN guest guests
- ON guests.trackid = track.trackid
- LEFT JOIN liner_notes liner_note
- ON liner_note.cdid = me.cdid
- JOIN cd_artwork cover_image
- ON cover_image.cdid = me.cdid
- ORDER BY tracks.cd
-
-Now the C<artist>, C<record_label>, C<liner_note>, C<cover_image>,
-C<tracks>, and C<guests> of the CD will all be available through the
-relationship accessors without the need for additional queries to the
-database.
-
-However, there is one caveat to be observed: it can be dangerous to
-prefetch more than one L<has_many|DBIx::Class::Relationship/has_many>
-relationship on a given level. e.g.:
-
- my $rs = $schema->resultset('CD')->search(
- undef,
- {
- prefetch => [
- 'tracks', # has_many
- { cd_to_producer => 'producer' }, # has_many => belongs_to (i.e. m2m)
- ]
- }
- );
-
-The collapser currently can't identify duplicate tuples for multiple
-L<has_many|DBIx::Class::Relationship/has_many> relationships and as a
-result the second L<has_many|DBIx::Class::Relationship/has_many>
-relation could contain redundant objects.
+ my $rs = $schema->resultset('CD')->search({}, {
+ '+columns' => [ qw/ tracks.title tracks.position / ],
+ join => 'tracks',
+ collapse => 1,
+ });
-=head3 Using L</prefetch> with L</join>
+While executing the following query:
-L</prefetch> implies a L</join> with the equivalent argument, and is
-properly merged with any existing L</join> specification. So the
-following:
+ SELECT me.*, tracks.title, tracks.position
+ FROM cd me
+ LEFT JOIN track tracks
+ ON tracks.cdid = me.cdid
- my $rs = $schema->resultset('CD')->search(
- {'record_label.name' => 'Music Product Ltd.'},
- {
- join => {artist => 'record_label'},
- prefetch => 'artist',
- }
- );
+Will return only as many objects as there are rows in the CD source, even
+though the result of the query may span many rows. Each of these CD objects
+will in turn have multiple "Track" objects hidden behind the has_many
+generated accessor C<tracks>. Without C<< collapse => 1 >>, the return values
+of this resultset would be as many CD objects as there are tracks (a "Cartesian
+product"), with each CD object containing exactly one of all fetched Track data.
-... will work, searching on the record label's name, but only
-prefetching the C<artist>.
+When a collapse is requested on a non-ordered resultset, an order by some
+unique part of the main source (the left-most table) is inserted automatically.
+This is done so that the resultset is allowed to be "lazy" - calling
+L<< $rs->next|/next >> will fetch only as many rows as it needs to build the next
+object with all of its related data.
-=head3 Using L</prefetch> with L</select> / L</+select> / L</as> / L</+as>
+If an L</order_by> is already declared, and orders the resultset in a way that
+makes collapsing as described above impossible (e.g. C<< ORDER BY
+has_many_rel.column >> or C<ORDER BY RANDOM()>), DBIC will automatically
+switch to "eager" mode and slurp the entire resultset before consturcting the
+first object returned by L</next>.
-L</prefetch> implies a L</+select>/L</+as> with the fields of the
-prefetched relations. So given:
+Setting this attribute on a resultset that does not join any has_many
+relations is a no-op.
- my $rs = $schema->resultset('CD')->search(
- undef,
- {
- select => ['cd.title'],
- as => ['cd_title'],
- prefetch => 'artist',
- }
- );
+For a more in-depth discussion, see L</PREFETCHING>.
-The L</select> becomes: C<'cd.title', 'artist.*'> and the L</as>
-becomes: C<'cd_title', 'artist.*'>.
-
-=head3 CAVEATS
-
-Prefetch does a lot of deep magic. As such, it may not behave exactly
-as you might expect.
+=head2 prefetch
=over 4
-=item *
+=item Value: ($rel_name | \@rel_names | \%rel_names)
-Prefetch uses the L</cache> to populate the prefetched relationships. This
-may or may not be what you want.
+=back
-=item *
+This attribute is a shorthand for specifying a L</join> spec, adding all
+columns from the joined related sources as L</+columns> and setting
+L</collapse> to a true value. For example, the following two queries are
+equivalent:
-If you specify a condition on a prefetched relationship, ONLY those
-rows that match the prefetched condition will be fetched into that relationship.
-This means that adding prefetch to a search() B<may alter> what is returned by
-traversing a relationship. So, if you have C<< Artist->has_many(CDs) >> and you do
-
- my $artist_rs = $schema->resultset('Artist')->search({
- 'cds.year' => 2008,
- }, {
- join => 'cds',
+ my $rs = $schema->resultset('Artist')->search({}, {
+ prefetch => { cds => ['genre', 'tracks' ] },
});
- my $count = $artist_rs->first->cds->count;
+and
- my $artist_rs_prefetch = $artist_rs->search( {}, { prefetch => 'cds' } );
+ my $rs = $schema->resultset('Artist')->search({}, {
+ join => { cds => ['genre', 'tracks' ] },
+ collapse => 1,
+ '+columns' => [
+ (map
+ { +{ "cds.$_" => "cds.$_" } }
+ $schema->source('Artist')->related_source('cds')->columns
+ ),
+ (map
+ { +{ "cds.genre.$_" => "genre.$_" } }
+ $schema->source('Artist')->related_source('cds')->related_source('genre')->columns
+ ),
+ (map
+ { +{ "cds.tracks.$_" => "tracks.$_" } }
+ $schema->source('Artist')->related_source('cds')->related_source('tracks')->columns
+ ),
+ ],
+ });
- my $prefetch_count = $artist_rs_prefetch->first->cds->count;
+Both producing the following SQL:
+
+ SELECT me.artistid, me.name, me.rank, me.charfield,
+ cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track,
+ genre.genreid, genre.name,
+ tracks.trackid, tracks.cd, tracks.position, tracks.title, tracks.last_updated_on, tracks.last_updated_at
+ FROM artist me
+ LEFT JOIN cd cds
+ ON cds.artist = me.artistid
+ LEFT JOIN genre genre
+ ON genre.genreid = cds.genreid
+ LEFT JOIN track tracks
+ ON tracks.cd = cds.cdid
+ ORDER BY me.artistid
+
+While L</prefetch> implies a L</join>, it is ok to mix the two together, as
+the arguments are properly merged and generally do the right thing. For
+example, you may want to do the following:
+
+ my $artists_and_cds_without_genre = $schema->resultset('Artist')->search(
+ { 'genre.genreid' => undef },
+ {
+ join => { cds => 'genre' },
+ prefetch => 'cds',
+ }
+ );
- cmp_ok( $count, '==', $prefetch_count, "Counts should be the same" );
+Which generates the following SQL:
-that cmp_ok() may or may not pass depending on the datasets involved. This
-behavior may or may not survive the 0.09 transition.
+ SELECT me.artistid, me.name, me.rank, me.charfield,
+ cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track
+ FROM artist me
+ LEFT JOIN cd cds
+ ON cds.artist = me.artistid
+ LEFT JOIN genre genre
+ ON genre.genreid = cds.genreid
+ WHERE genre.genreid IS NULL
+ ORDER BY me.artistid
-=back
+For a more in-depth discussion, see L</PREFETCHING>.
=head2 alias
... FOR SHARED. If \$scalar is passed, this is taken directly and embedded in the
query.
+=head1 PREFETCHING
+
+DBIx::Class supports arbitrary related data prefetching from multiple related
+sources. Any combination of relationship types and column sets are supported.
+If L<collapsing|/collapse> is requested, there is an additional requirement of
+selecting enough data to make every individual object uniquely identifiable.
+
+Here are some more involved examples, based on the following relationship map:
+
+ # Assuming:
+ My::Schema::CD->belongs_to( artist => 'My::Schema::Artist' );
+ My::Schema::CD->might_have( liner_note => 'My::Schema::LinerNotes' );
+ My::Schema::CD->has_many( tracks => 'My::Schema::Track' );
+
+ My::Schema::Artist->belongs_to( record_label => 'My::Schema::RecordLabel' );
+
+ My::Schema::Track->has_many( guests => 'My::Schema::Guest' );
+
+
+
+ my $rs = $schema->resultset('Tag')->search(
+ undef,
+ {
+ prefetch => {
+ cd => 'artist'
+ }
+ }
+ );
+
+The initial search results in SQL like the following:
+
+ SELECT tag.*, cd.*, artist.* FROM tag
+ JOIN cd ON tag.cd = cd.cdid
+ JOIN artist ON cd.artist = artist.artistid
+
+L<DBIx::Class> has no need to go back to the database when we access the
+C<cd> or C<artist> relationships, which saves us two SQL statements in this
+case.
+
+Simple prefetches will be joined automatically, so there is no need
+for a C<join> attribute in the above search.
+
+The L</prefetch> attribute can be used with any of the relationship types
+and multiple prefetches can be specified together. Below is a more complex
+example that prefetches a CD's artist, its liner notes (if present),
+the cover image, the tracks on that CD, and the guests on those
+tracks.
+
+ my $rs = $schema->resultset('CD')->search(
+ undef,
+ {
+ prefetch => [
+ { artist => 'record_label'}, # belongs_to => belongs_to
+ 'liner_note', # might_have
+ 'cover_image', # has_one
+ { tracks => 'guests' }, # has_many => has_many
+ ]
+ }
+ );
+
+This will produce SQL like the following:
+
+ SELECT cd.*, artist.*, record_label.*, liner_note.*, cover_image.*,
+ tracks.*, guests.*
+ FROM cd me
+ JOIN artist artist
+ ON artist.artistid = me.artistid
+ JOIN record_label record_label
+ ON record_label.labelid = artist.labelid
+ LEFT JOIN track tracks
+ ON tracks.cdid = me.cdid
+ LEFT JOIN guest guests
+ ON guests.trackid = track.trackid
+ LEFT JOIN liner_notes liner_note
+ ON liner_note.cdid = me.cdid
+ JOIN cd_artwork cover_image
+ ON cover_image.cdid = me.cdid
+ ORDER BY tracks.cd
+
+Now the C<artist>, C<record_label>, C<liner_note>, C<cover_image>,
+C<tracks>, and C<guests> of the CD will all be available through the
+relationship accessors without the need for additional queries to the
+database.
+
+=head3 CAVEATS
+
+Prefetch does a lot of deep magic. As such, it may not behave exactly
+as you might expect.
+
+=over 4
+
+=item *
+
+Prefetch uses the L</cache> to populate the prefetched relationships. This
+may or may not be what you want.
+
+=item *
+
+If you specify a condition on a prefetched relationship, ONLY those
+rows that match the prefetched condition will be fetched into that relationship.
+This means that adding prefetch to a search() B<may alter> what is returned by
+traversing a relationship. So, if you have C<< Artist->has_many(CDs) >> and you do
+
+ my $artist_rs = $schema->resultset('Artist')->search({
+ 'cds.year' => 2008,
+ }, {
+ join => 'cds',
+ });
+
+ my $count = $artist_rs->first->cds->count;
+
+ my $artist_rs_prefetch = $artist_rs->search( {}, { prefetch => 'cds' } );
+
+ my $prefetch_count = $artist_rs_prefetch->first->cds->count;
+
+ cmp_ok( $count, '==', $prefetch_count, "Counts should be the same" );
+
+That cmp_ok() may or may not pass depending on the datasets involved. In other
+words the C<WHERE> condition would apply to the entire dataset, just like
+it would in regular SQL. If you want to add a condition only to the "right side"
+of a C<LEFT JOIN> - consider declaring and using a L<relationship with a custom
+condition|DBIx::Class::Relationship::Base/condition>
+
+=back
+
=head1 DBIC BIND VALUES
Because DBIC may need more information to bind values than just the column name
You may distribute this code under the same terms as Perl itself.
-=cut
-
-1;
# {collapse} would mean a has_many join was injected, which in turn means
# we need to group *IF WE CAN* (only if the column in question is unique)
- if (!$orig_attrs->{group_by} && keys %{$orig_attrs->{collapse}}) {
+ if (!$orig_attrs->{group_by} && $orig_attrs->{collapse}) {
if ($colmap->{$select} and $rsrc->_identifying_column_set([$colmap->{$select}])) {
$new_attrs->{group_by} = [ $select ];
use strict;
use warnings;
+use base qw/DBIx::Class::ResultSource::RowParser DBIx::Class/;
+
use DBIx::Class::ResultSet;
use DBIx::Class::ResultSourceHandle;
use Try::Tiny;
use List::Util 'first';
use Scalar::Util qw/blessed weaken isweak/;
-use namespace::clean;
-use base qw/DBIx::Class/;
+use namespace::clean;
__PACKAGE__->mk_group_accessors(simple => qw/
source_name name source_info
}
else {
$self->throw_exception( sprintf (
- "No such column '%s' on source %s",
+ "No such column '%s' on source '%s'",
$_,
- $self->source_name,
+ $self->source_name || $self->name || 'Unknown source...?',
));
}
}
sub set_primary_key {
my ($self, @cols) = @_;
- # check if primary key columns are valid columns
- foreach my $col (@cols) {
- $self->throw_exception("No such column $col on table " . $self->name)
- unless $self->has_column($col);
+
+ my $colinfo = $self->columns_info(\@cols);
+ for my $col (@cols) {
+ carp_unique(sprintf (
+ "Primary key of source '%s' includes the column '%s' which has its "
+ . "'is_nullable' attribute set to true. This is a mistake and will cause "
+ . 'various Result-object operations to fail',
+ $self->source_name || $self->name || 'Unknown source...?',
+ $col,
+ )) if $colinfo->{$col}{is_nullable};
}
+
$self->_primaries(\@cols);
$self->add_unique_constraint(primary => \@cols);
,
-join_path => [@$jpath, { $join => $as } ],
-is_single => (
- $rel_info->{attrs}{accessor}
- &&
+ (! $rel_info->{attrs}{accessor})
+ or
first { $rel_info->{attrs}{accessor} eq $_ } (qw/single filter/)
),
-alias => $as,
- -relation_chain_depth => $seen->{-relation_chain_depth} || 0,
+ -relation_chain_depth => ( $seen->{-relation_chain_depth} || 0 ) + 1,
},
scalar $self->_resolve_condition($rel_info->{cond}, $as, $alias, $join)
];
}
}
-# Accepts one or more relationships for the current source and returns an
-# array of column names for each of those relationships. Column names are
-# prefixed relative to the current source, in accordance with where they appear
-# in the supplied relationships.
-sub _resolve_prefetch {
- my ($self, $pre, $alias, $alias_map, $order, $collapse, $pref_path) = @_;
- $pref_path ||= [];
-
- if (not defined $pre or not length $pre) {
- return ();
- }
- elsif( ref $pre eq 'ARRAY' ) {
- return
- map { $self->_resolve_prefetch( $_, $alias, $alias_map, $order, $collapse, [ @$pref_path ] ) }
- @$pre;
- }
- elsif( ref $pre eq 'HASH' ) {
- my @ret =
- map {
- $self->_resolve_prefetch($_, $alias, $alias_map, $order, $collapse, [ @$pref_path ] ),
- $self->related_source($_)->_resolve_prefetch(
- $pre->{$_}, "${alias}.$_", $alias_map, $order, $collapse, [ @$pref_path, $_] )
- } keys %$pre;
- return @ret;
- }
- elsif( ref $pre ) {
- $self->throw_exception(
- "don't know how to resolve prefetch reftype ".ref($pre));
- }
- else {
- my $p = $alias_map;
- $p = $p->{$_} for (@$pref_path, $pre);
-
- $self->throw_exception (
- "Unable to resolve prefetch '$pre' - join alias map does not contain an entry for path: "
- . join (' -> ', @$pref_path, $pre)
- ) if (ref $p->{-join_aliases} ne 'ARRAY' or not @{$p->{-join_aliases}} );
-
- my $as = shift @{$p->{-join_aliases}};
-
- my $rel_info = $self->relationship_info( $pre );
- $self->throw_exception( $self->source_name . " has no such relationship '$pre'" )
- unless $rel_info;
- my $as_prefix = ($alias =~ /^.*?\.(.+)$/ ? $1.'.' : '');
- my $rel_source = $self->related_source($pre);
-
- if ($rel_info->{attrs}{accessor} && $rel_info->{attrs}{accessor} eq 'multi') {
- $self->throw_exception(
- "Can't prefetch has_many ${pre} (join cond too complex)")
- unless ref($rel_info->{cond}) eq 'HASH';
- my $dots = @{[$as_prefix =~ m/\./g]} + 1; # +1 to match the ".${as_prefix}"
-
- if (my ($fail) = grep { @{[$_ =~ m/\./g]} == $dots }
- keys %{$collapse}) {
- my ($last) = ($fail =~ /([^\.]+)$/);
- carp (
- "Prefetching multiple has_many rels ${last} and ${pre} "
- .(length($as_prefix)
- ? "at the same level (${as_prefix}) "
- : "at top level "
- )
- . 'will explode the number of row objects retrievable via ->next or ->all. '
- . 'Use at your own risk.'
- );
- }
-
- #my @col = map { (/^self\.(.+)$/ ? ("${as_prefix}.$1") : ()); }
- # values %{$rel_info->{cond}};
- $collapse->{".${as_prefix}${pre}"} = [ $rel_source->_pri_cols ];
- # action at a distance. prepending the '.' allows simpler code
- # in ResultSet->_collapse_result
- my @key = map { (/^foreign\.(.+)$/ ? ($1) : ()); }
- keys %{$rel_info->{cond}};
- push @$order, map { "${as}.$_" } @key;
-
- if (my $rel_order = $rel_info->{attrs}{order_by}) {
- # this is kludgy and incomplete, I am well aware
- # but the parent method is going away entirely anyway
- # so sod it
- my $sql_maker = $self->storage->sql_maker;
- my ($orig_ql, $orig_qr) = $sql_maker->_quote_chars;
- my $sep = $sql_maker->name_sep;
-
- # install our own quoter, so we can catch unqualified stuff
- local $sql_maker->{quote_char} = ["\x00", "\xFF"];
-
- my $quoted_prefix = "\x00${as}\xFF";
-
- for my $chunk ( $sql_maker->_order_by_chunks ($rel_order) ) {
- my @bind;
- ($chunk, @bind) = @$chunk if ref $chunk;
-
- $chunk = "${quoted_prefix}${sep}${chunk}"
- unless $chunk =~ /\Q$sep/;
-
- $chunk =~ s/\x00/$orig_ql/g;
- $chunk =~ s/\xFF/$orig_qr/g;
- push @$order, \[$chunk, @bind];
- }
- }
- }
-
- return map { [ "${as}.$_", "${as_prefix}${pre}.$_", ] }
- $rel_source->columns;
- }
-}
-
=head2 related_source
=over 4
--- /dev/null
+package # hide from the pauses
+ DBIx::Class::ResultSource::RowParser;
+
+use strict;
+use warnings;
+
+use base 'DBIx::Class';
+
+use Try::Tiny;
+use List::Util qw(first max);
+use B 'perlstring';
+
+use DBIx::Class::ResultSource::RowParser::Util qw(
+ assemble_simple_parser
+ assemble_collapsing_parser
+);
+
+use namespace::clean;
+
+# Accepts one or more relationships for the current source and returns an
+# array of column names for each of those relationships. Column names are
+# prefixed relative to the current source, in accordance with where they appear
+# in the supplied relationships.
+sub _resolve_prefetch {
+ my ($self, $pre, $alias, $alias_map, $order, $pref_path) = @_;
+ $pref_path ||= [];
+
+ if (not defined $pre or not length $pre) {
+ return ();
+ }
+ elsif( ref $pre eq 'ARRAY' ) {
+ return
+ map { $self->_resolve_prefetch( $_, $alias, $alias_map, $order, [ @$pref_path ] ) }
+ @$pre;
+ }
+ elsif( ref $pre eq 'HASH' ) {
+ my @ret =
+ map {
+ $self->_resolve_prefetch($_, $alias, $alias_map, $order, [ @$pref_path ] ),
+ $self->related_source($_)->_resolve_prefetch(
+ $pre->{$_}, "${alias}.$_", $alias_map, $order, [ @$pref_path, $_] )
+ } keys %$pre;
+ return @ret;
+ }
+ elsif( ref $pre ) {
+ $self->throw_exception(
+ "don't know how to resolve prefetch reftype ".ref($pre));
+ }
+ else {
+ my $p = $alias_map;
+ $p = $p->{$_} for (@$pref_path, $pre);
+
+ $self->throw_exception (
+ "Unable to resolve prefetch '$pre' - join alias map does not contain an entry for path: "
+ . join (' -> ', @$pref_path, $pre)
+ ) if (ref $p->{-join_aliases} ne 'ARRAY' or not @{$p->{-join_aliases}} );
+
+ my $as = shift @{$p->{-join_aliases}};
+
+ my $rel_info = $self->relationship_info( $pre );
+ $self->throw_exception( $self->source_name . " has no such relationship '$pre'" )
+ unless $rel_info;
+
+ my $as_prefix = ($alias =~ /^.*?\.(.+)$/ ? $1.'.' : '');
+
+ return map { [ "${as}.$_", "${as_prefix}${pre}.$_", ] }
+ $self->related_source($pre)->columns;
+ }
+}
+
+# Takes an arrayref of {as} dbic column aliases and the collapse and select
+# attributes from the same $rs (the selector requirement is a temporary
+# workaround... I hope), and returns a coderef capable of:
+# my $me_pref_clps = $coderef->([$rs->cursor->next/all])
+# Where the $me_pref_clps arrayref is the future argument to inflate_result()
+#
+# For an example of this coderef in action (and to see its guts) look at
+# t/resultset/rowparser_internals.t
+#
+# This is a huge performance win, as we call the same code for every row
+# returned from the db, thus avoiding repeated method lookups when traversing
+# relationships
+#
+# Also since the coderef is completely stateless (the returned structure is
+# always fresh on every new invocation) this is a very good opportunity for
+# memoization if further speed improvements are needed
+#
+# The way we construct this coderef is somewhat fugly, although the result is
+# really worth it. The final coderef does not perform any kind of recursion -
+# the entire nested structure constructor is rolled out into a single scope.
+#
+# In any case - the output of this thing is meticulously micro-tested, so
+# any sort of adjustment/rewrite should be relatively easy (fsvo relatively)
+#
+sub _mk_row_parser {
+ my ($self, $args) = @_;
+
+ my $val_index = { map
+ { $args->{inflate_map}[$_] => $_ }
+ ( 0 .. $#{$args->{inflate_map}} )
+ };
+
+ my $src;
+
+ if (! $args->{collapse} ) {
+ $src = assemble_simple_parser({
+ val_index => $val_index,
+ hri_style => $args->{hri_style},
+ });
+ }
+ else {
+ my $collapse_map = $self->_resolve_collapse ({
+ premultiplied => $args->{premultiplied},
+ # FIXME
+ # only consider real columns (not functions) during collapse resolution
+ # this check shouldn't really be here, as fucktards are not supposed to
+ # alias random crap to existing column names anyway, but still - just in
+ # case
+ # FIXME !!!! - this does not yet deal with unbalanced selectors correctly
+ # (it is now trivial as the attrs specify where things go out of sync
+ # needs MOAR tests)
+ as => { map
+ { ref $args->{selection}[$val_index->{$_}] ? () : ( $_ => $val_index->{$_} ) }
+ keys %$val_index
+ }
+ });
+
+ $src = assemble_collapsing_parser({
+ val_index => $val_index,
+ collapse_map => $collapse_map,
+ hri_style => $args->{hri_style},
+ });
+ }
+
+ return $args->{eval}
+ ? ( eval "sub $src" || die $@ )
+ : $src
+ ;
+}
+
+
+# Takes an arrayref selection list and generates a collapse-map representing
+# row-object fold-points. Every relationship is assigned a set of unique,
+# non-nullable columns (which may *not even be* from the same resultset)
+# and the collapser will use this information to correctly distinguish
+# data of individual to-be-row-objects. See t/resultset/rowparser_internals.t
+# for extensive RV examples
+sub _resolve_collapse {
+ my ($self, $args, $common_args) = @_;
+
+ # for comprehensible error messages put ourselves at the head of the relationship chain
+ $args->{_rel_chain} ||= [ $self->source_name ];
+
+ # record top-level fully-qualified column index, signify toplevelness
+ unless ($common_args->{_as_fq_idx}) {
+ $common_args->{_as_fq_idx} = { %{$args->{as}} };
+ $args->{_is_top_level} = 1;
+ };
+
+ my ($my_cols, $rel_cols);
+ for (keys %{$args->{as}}) {
+ if ($_ =~ /^ ([^\.]+) \. (.+) /x) {
+ $rel_cols->{$1}{$2} = 1;
+ }
+ else {
+ $my_cols->{$_} = {}; # important for ||='s below
+ }
+ }
+
+ my $relinfo;
+ # run through relationships, collect metadata
+ for my $rel (keys %$rel_cols) {
+ my $inf = $self->relationship_info ($rel);
+
+ $relinfo->{$rel} = {
+ is_single => ( $inf->{attrs}{accessor} && $inf->{attrs}{accessor} ne 'multi' ),
+ is_inner => ( ( $inf->{attrs}{join_type} || '' ) !~ /^left/i),
+ rsrc => $self->related_source($rel),
+ };
+
+ # FIME - need to use _resolve_cond here instead
+ my $cond = $inf->{cond};
+
+ if (
+ ref $cond eq 'HASH'
+ and
+ keys %$cond
+ and
+ ! defined first { $_ !~ /^foreign\./ } (keys %$cond)
+ and
+ ! defined first { $_ !~ /^self\./ } (values %$cond)
+ ) {
+ for my $f (keys %$cond) {
+ my $s = $cond->{$f};
+ $_ =~ s/^ (?: foreign | self ) \.//x for ($f, $s);
+ $relinfo->{$rel}{fk_map}{$s} = $f;
+ }
+ }
+ }
+
+ # inject non-left fk-bridges from *INNER-JOINED* children (if any)
+ for my $rel (grep { $relinfo->{$_}{is_inner} } keys %$relinfo) {
+ my $ri = $relinfo->{$rel};
+ for (keys %{$ri->{fk_map}} ) {
+ # need to know source from *our* pov, hence $rel.col
+ $my_cols->{$_} ||= { via_fk => "$rel.$ri->{fk_map}{$_}" }
+ if defined $rel_cols->{$rel}{$ri->{fk_map}{$_}} # in fact selected
+ }
+ }
+
+ # if the parent is already defined *AND* we have an inner reverse relationship
+ # (i.e. do not exist without it) , assume all of its related FKs are selected
+ # (even if they in fact are NOT in the select list). Keep a record of what we
+ # assumed, and if any such phantom-column becomes part of our own collapser,
+ # throw everything assumed-from-parent away and replace with the collapser of
+ # the parent (whatever it may be)
+ my $assumed_from_parent;
+ if ( ! $args->{_parent_info}{underdefined} and ! $args->{_parent_info}{rev_rel_is_optional} ) {
+ for my $col ( values %{$args->{_parent_info}{rel_condition} || {}} ) {
+ next if exists $my_cols->{$col};
+ $my_cols->{$col} = { via_collapse => $args->{_parent_info}{collapse_on_idcols} };
+ $assumed_from_parent->{columns}{$col}++;
+ }
+ }
+
+ # get colinfo for everything
+ if ($my_cols) {
+ my $ci = $self->columns_info;
+ $my_cols->{$_}{colinfo} = $ci->{$_} for keys %$my_cols;
+ }
+
+ my $collapse_map;
+
+ # first try to reuse the parent's collapser (i.e. reuse collapser over 1:1)
+ # (makes for a leaner coderef later)
+ unless ($collapse_map->{-identifying_columns}) {
+ $collapse_map->{-identifying_columns} = $args->{_parent_info}{collapse_on_idcols}
+ if $args->{_parent_info}{collapser_reusable};
+ }
+
+ # Still dont know how to collapse - try to resolve based on our columns (plus already inserted FK bridges)
+ if (
+ ! $collapse_map->{-identifying_columns}
+ and
+ $my_cols
+ and
+ my $idset = $self->_identifying_column_set ({map { $_ => $my_cols->{$_}{colinfo} } keys %$my_cols})
+ ) {
+ # see if the resulting collapser relies on any implied columns,
+ # and fix stuff up if this is the case
+ my @reduced_set = grep { ! $assumed_from_parent->{columns}{$_} } @$idset;
+
+ $collapse_map->{-identifying_columns} = [ __unique_numlist(
+ @{ $args->{_parent_info}{collapse_on_idcols}||[] },
+
+ (map
+ {
+ my $fqc = join ('.',
+ @{$args->{_rel_chain}}[1 .. $#{$args->{_rel_chain}}],
+ ( $my_cols->{$_}{via_fk} || $_ ),
+ );
+
+ $common_args->{_as_fq_idx}->{$fqc};
+ }
+ @reduced_set
+ ),
+ )];
+ }
+
+ # Stil don't know how to collapse - keep descending down 1:1 chains - if
+ # a related non-LEFT 1:1 is resolvable - its condition will collapse us
+ # too
+ unless ($collapse_map->{-identifying_columns}) {
+ my @candidates;
+
+ for my $rel (keys %$relinfo) {
+ next unless ($relinfo->{$rel}{is_single} && $relinfo->{$rel}{is_inner});
+
+ if ( my $rel_collapse = $relinfo->{$rel}{rsrc}->_resolve_collapse ({
+ as => $rel_cols->{$rel},
+ _rel_chain => [ @{$args->{_rel_chain}}, $rel ],
+ _parent_info => { underdefined => 1 },
+ }, $common_args)) {
+ push @candidates, $rel_collapse->{-identifying_columns};
+ }
+ }
+
+ # get the set with least amount of columns
+ # FIXME - maybe need to implement a data type order as well (i.e. prefer several ints
+ # to a single varchar)
+ if (@candidates) {
+ ($collapse_map->{-identifying_columns}) = sort { scalar @$a <=> scalar @$b } (@candidates);
+ }
+ }
+
+ # Stil don't know how to collapse, and we are the root node. Last ditch
+ # effort in case we are *NOT* premultiplied.
+ # Run through *each multi* all the way down, left or not, and all
+ # *left* singles (a single may become a multi underneath) . When everything
+ # gets back see if all the rels link to us definitively. If this is the
+ # case we are good - either one of them will define us, or if all are NULLs
+ # we know we are "unique" due to the "non-premultiplied" check
+ if (
+ ! $collapse_map->{-identifying_columns}
+ and
+ ! $args->{premultiplied}
+ and
+ $args->{_is_top_level}
+ ) {
+ my (@collapse_sets, $uncollapsible_chain);
+
+ for my $rel (keys %$relinfo) {
+
+ # we already looked at these higher up
+ next if ($relinfo->{$rel}{is_single} && $relinfo->{$rel}{is_inner});
+
+ if (my $clps = $relinfo->{$rel}{rsrc}->_resolve_collapse ({
+ as => $rel_cols->{$rel},
+ _rel_chain => [ @{$args->{_rel_chain}}, $rel ],
+ _parent_info => { underdefined => 1 },
+ }, $common_args) ) {
+
+ # for singles use the idcols wholesale (either there or not)
+ if ($relinfo->{$rel}{is_single}) {
+ push @collapse_sets, $clps->{-identifying_columns};
+ }
+ elsif (! $relinfo->{$rel}{fk_map}) {
+ $uncollapsible_chain = 1;
+ last;
+ }
+ else {
+ my $defined_cols_parent_side;
+
+ for my $fq_col ( grep { /^$rel\.[^\.]+$/ } keys %{$args->{as}} ) {
+ my ($col) = $fq_col =~ /([^\.]+)$/;
+
+ $defined_cols_parent_side->{$_} = $args->{as}{$fq_col} for grep
+ { $relinfo->{$rel}{fk_map}{$_} eq $col }
+ keys %{$relinfo->{$rel}{fk_map}}
+ ;
+ }
+
+ if (my $set = $self->_identifying_column_set([ keys %$defined_cols_parent_side ]) ) {
+ push @collapse_sets, [ sort map { $defined_cols_parent_side->{$_} } @$set ];
+ }
+ else {
+ $uncollapsible_chain = 1;
+ last;
+ }
+ }
+ }
+ else {
+ $uncollapsible_chain = 1;
+ last;
+ }
+ }
+
+ unless ($uncollapsible_chain) {
+ # if we got here - we are good to go, but the construction is tricky
+ # since our children will want to include our collapse criteria - we
+ # don't give them anything (safe, since they are all collapsible on their own)
+ # in addition we record the individual collapse posibilities
+ # of all left children node collapsers, and merge them in the rowparser
+ # coderef later
+ $collapse_map->{-identifying_columns} = [];
+ $collapse_map->{-identifying_columns_variants} = [ sort {
+ (scalar @$a) <=> (scalar @$b) or max(@$a) <=> max(@$b)
+ } @collapse_sets ];
+ }
+ }
+
+ # stop descending into children if we were called by a parent for first-pass
+ # and don't despair if nothing was found (there may be other parallel branches
+ # to dive into)
+ if ($args->{_parent_info}{underdefined}) {
+ return $collapse_map->{-identifying_columns} ? $collapse_map : undef
+ }
+ # nothing down the chain resolved - can't calculate a collapse-map
+ elsif (! $collapse_map->{-identifying_columns}) {
+ $self->throw_exception ( sprintf
+ "Unable to calculate a definitive collapse column set for %s%s: fetch more unique non-nullable columns",
+ $self->source_name,
+ @{$args->{_rel_chain}} > 1
+ ? sprintf (' (last member of the %s chain)', join ' -> ', @{$args->{_rel_chain}} )
+ : ''
+ ,
+ );
+ }
+
+ # If we got that far - we are collapsable - GREAT! Now go down all children
+ # a second time, and fill in the rest
+
+ $collapse_map->{-identifying_columns} = [ __unique_numlist(
+ @{ $args->{_parent_info}{collapse_on_idcols}||[] },
+ @{ $collapse_map->{-identifying_columns} },
+ )];
+
+ my @id_sets;
+ for my $rel (sort keys %$relinfo) {
+
+ $collapse_map->{$rel} = $relinfo->{$rel}{rsrc}->_resolve_collapse ({
+ as => { map { $_ => 1 } ( keys %{$rel_cols->{$rel}} ) },
+ _rel_chain => [ @{$args->{_rel_chain}}, $rel],
+ _parent_info => {
+ # shallow copy
+ collapse_on_idcols => [ @{$collapse_map->{-identifying_columns}} ],
+
+ rel_condition => $relinfo->{$rel}{fk_map},
+
+ is_optional => ! $relinfo->{$rel}{is_inner},
+
+ # if there is at least one *inner* reverse relationship which is HASH-based (equality only)
+ # we can safely assume that the child can not exist without us
+ rev_rel_is_optional => ( first
+ { ref $_->{cond} eq 'HASH' and ($_->{attrs}{join_type}||'') !~ /^left/i }
+ values %{ $self->reverse_relationship_info($rel) },
+ ) ? 0 : 1,
+
+ # if this is a 1:1 our own collapser can be used as a collapse-map
+ # (regardless of left or not)
+ collapser_reusable => (
+ $relinfo->{$rel}{is_single}
+ &&
+ $relinfo->{$rel}{is_inner}
+ &&
+ @{$collapse_map->{-identifying_columns}}
+ ) ? 1 : 0,
+ },
+ }, $common_args );
+
+ $collapse_map->{$rel}{-is_single} = 1 if $relinfo->{$rel}{is_single};
+ $collapse_map->{$rel}{-is_optional} ||= 1 unless $relinfo->{$rel}{is_inner};
+ }
+
+ return $collapse_map;
+}
+
+# adding a dep on MoreUtils *just* for this is retarded
+sub __unique_numlist {
+ sort { $a <=> $b } keys %{ {map { $_ => 1 } @_ }}
+}
+
+1;
--- /dev/null
+package # hide from the pauses
+ DBIx::Class::ResultSource::RowParser::Util;
+
+use strict;
+use warnings;
+
+use List::Util 'first';
+use B 'perlstring';
+
+use base 'Exporter';
+our @EXPORT_OK = qw(
+ assemble_simple_parser
+ assemble_collapsing_parser
+);
+
+# working title - we are hoping to extract this eventually...
+our $null_branch_class = 'DBIx::ResultParser::RelatedNullBranch';
+
+sub assemble_simple_parser {
+ #my ($args) = @_;
+
+ # the non-collapsing assembler is easy
+ # FIXME SUBOPTIMAL there could be a yet faster way to do things here, but
+ # need to try an actual implementation and benchmark it:
+ #
+ # <timbunce_> First setup the nested data structure you want for each row
+ # Then call bind_col() to alias the row fields into the right place in
+ # the data structure, then to fetch the data do:
+ # push @rows, dclone($row_data_struct) while ($sth->fetchrow);
+ #
+ my $parser_src = sprintf('$_ = %s for @{$_[0]}', __visit_infmap_simple($_[0]) );
+
+ # change the quoted placeholders to unquoted alias-references
+ $parser_src =~ s/ \' \xFF__VALPOS__(\d+)__\xFF \' /"\$_->[$1]"/gex;
+
+ $parser_src = " { use strict; use warnings FATAL => 'all';\n$parser_src\n }";
+}
+
+# the simple non-collapsing nested structure recursor
+sub __visit_infmap_simple {
+ my $args = shift;
+
+ my $my_cols = {};
+ my $rel_cols;
+ for (keys %{$args->{val_index}}) {
+ if ($_ =~ /^ ([^\.]+) \. (.+) /x) {
+ $rel_cols->{$1}{$2} = $args->{val_index}{$_};
+ }
+ else {
+ $my_cols->{$_} = $args->{val_index}{$_};
+ }
+ }
+
+ my @relperl;
+ for my $rel (sort keys %$rel_cols) {
+
+ my $rel_struct = __visit_infmap_simple({ %$args,
+ val_index => $rel_cols->{$rel},
+ });
+
+ if (keys %$my_cols) {
+
+ my $branch_null_checks = join ' && ', map
+ { "( ! defined '\xFF__VALPOS__${_}__\xFF' )" }
+ sort { $a <=> $b } values %{$rel_cols->{$rel}}
+ ;
+
+ if ($args->{hri_style}) {
+ $rel_struct = sprintf ( '( (%s) ? undef : %s )',
+ $branch_null_checks,
+ $rel_struct,
+ );
+ }
+ else {
+ $rel_struct = sprintf ( '( (%s) ? bless( (%s), %s ) : %s )',
+ $branch_null_checks,
+ $rel_struct,
+ perlstring($null_branch_class),
+ $rel_struct,
+ );
+ }
+ }
+
+ push @relperl, sprintf '( %s => %s )',
+ perlstring($rel),
+ $rel_struct,
+ ;
+
+ }
+
+ my $me_struct;
+ $me_struct = __visit_dump({ map { $_ => "\xFF__VALPOS__$my_cols->{$_}__\xFF" } (keys %$my_cols) })
+ if keys %$my_cols;
+
+ if ($args->{hri_style}) {
+ $me_struct =~ s/^ \s* \{ | \} \s* $//gx
+ if $me_struct;
+
+ return sprintf '{ %s }', join (', ', $me_struct||(), @relperl);
+ }
+ else {
+ return sprintf '[%s]', join (',',
+ $me_struct || 'undef',
+ @relperl ? sprintf ('{ %s }', join (',', @relperl)) : (),
+ );
+ }
+}
+
+sub assemble_collapsing_parser {
+ my $args = shift;
+
+ my ($top_node_key, $top_node_key_assembler);
+
+ if (scalar @{$args->{collapse_map}{-identifying_columns}}) {
+ $top_node_key = join ('', map
+ { "{'\xFF__IDVALPOS__${_}__\xFF'}" }
+ @{$args->{collapse_map}{-identifying_columns}}
+ );
+ }
+ elsif( my @variants = @{$args->{collapse_map}{-identifying_columns_variants}} ) {
+
+ my @path_parts = map { sprintf
+ "( ( defined '\xFF__VALPOS__%d__\xFF' ) && (join qq(\xFF), '', %s, '') )",
+ $_->[0], # checking just first is enough - one ID defined, all defined
+ ( join ', ', map { "'\xFF__VALPOS__${_}__\xFF'" } @$_ ),
+ } @variants;
+
+ my $virtual_column_idx = (scalar keys %{$args->{val_index}} ) + 1;
+
+ $top_node_key = "{'\xFF__IDVALPOS__${virtual_column_idx}__\xFF'}";
+
+ $top_node_key_assembler = sprintf "'\xFF__IDVALPOS__%d__\xFF' = (%s);",
+ $virtual_column_idx,
+ "\n" . join( "\n or\n", @path_parts, qq{"\0\$rows_pos\0"} )
+ ;
+
+ $args->{collapse_map} = {
+ %{$args->{collapse_map}},
+ -custom_node_key => $top_node_key,
+ };
+
+ }
+ else {
+ die('Unexpected collapse map contents');
+ }
+
+ my ($data_assemblers, $stats) = __visit_infmap_collapse ($args);
+
+ my @idcol_args = $args->{hri_style} ? ('', '') : (
+ '%cur_row_ids, ', # only declare the variable if we'll use it
+
+ sprintf( <<'EOS', join ', ', sort { $a <=> $b } keys %{ $stats->{idcols_seen} } ),
+ $cur_row_ids{$_} = defined($cur_row_data->[$_]) ? $cur_row_data->[$_] : "\0NULL\xFF$rows_pos\xFF$_\0"
+ for (%s);
+EOS
+ );
+
+ my $parser_src = sprintf (<<'EOS', @idcol_args, $top_node_key_assembler||'', $top_node_key, join( "\n", @{$data_assemblers||[]} ) );
+### BEGIN LITERAL STRING EVAL
+ my ($rows_pos, $result_pos, $cur_row_data,%1$s @collapse_idx, $is_new_res) = (0,0);
+ # this loop is a bit arcane - the rationale is that the passed in
+ # $_[0] will either have only one row (->next) or will have all
+ # rows already pulled in (->all and/or unordered). Given that the
+ # result can be rather large - we reuse the same already allocated
+ # array, since the collapsed prefetch is smaller by definition.
+ # At the end we cut the leftovers away and move on.
+ while ($cur_row_data =
+ ( ( $rows_pos >= 0 and $_[0][$rows_pos++] ) or do { $rows_pos = -1; undef } )
+ ||
+ ($_[1] and $_[1]->())
+ ) {
+ # this code exists only when we are *not* assembling direct to HRI
+ #
+ # due to left joins some of the ids may be NULL/undef, and
+ # won't play well when used as hash lookups
+ # we also need to differentiate NULLs on per-row/per-col basis
+ # (otherwise folding of optional 1:1s will be greatly confused
+ %2$s
+
+ # in the case of an underdefined root - calculate the virtual id (otherwise no code at all)
+ %3$s
+
+ $is_new_res = ! $collapse_idx[0]%4$s and (
+ $_[1] and $result_pos and (unshift @{$_[2]}, $cur_row_data) and last
+ );
+
+ # the rel assemblers
+%5$s
+
+ $_[0][$result_pos++] = $collapse_idx[0]%4$s
+ if $is_new_res;
+ }
+
+ splice @{$_[0]}, $result_pos; # truncate the passed in array for cases of collapsing ->all()
+### END LITERAL STRING EVAL
+EOS
+
+ # !!! note - different var than the one above
+ # change the quoted placeholders to unquoted alias-references
+ $parser_src =~ s/ \' \xFF__VALPOS__(\d+)__\xFF \' /"\$cur_row_data->[$1]"/gex;
+ $parser_src =~ s/ \' \xFF__IDVALPOS__(\d+)__\xFF \' /$args->{hri_style} ? "\$cur_row_data->[$1]" : "\$cur_row_ids{$1}" /gex;
+
+ $parser_src = " { use strict; use warnings FATAL => 'all';\n$parser_src\n }";
+}
+
+
+# the collapsing nested structure recursor
+sub __visit_infmap_collapse {
+ my $args = {%{ shift() }};
+
+ my $cur_node_idx = ${ $args->{-node_idx_counter} ||= \do { my $x = 0} }++;
+
+ my ($my_cols, $rel_cols) = {};
+ for ( keys %{$args->{val_index}} ) {
+ if ($_ =~ /^ ([^\.]+) \. (.+) /x) {
+ $rel_cols->{$1}{$2} = $args->{val_index}{$_};
+ }
+ else {
+ $my_cols->{$_} = $args->{val_index}{$_};
+ }
+ }
+
+
+ if ($args->{hri_style}) {
+ delete $my_cols->{$_} for grep { $rel_cols->{$_} } keys %$my_cols;
+ }
+
+ my $me_struct;
+ $me_struct = __visit_dump({ map { $_ => "\xFF__VALPOS__$my_cols->{$_}__\xFF" } (keys %$my_cols) })
+ if keys %$my_cols;
+
+ $me_struct = sprintf( '[ %s ]', $me_struct||'' )
+ unless $args->{hri_style};
+
+
+ my $node_key = $args->{collapse_map}->{-custom_node_key} || join ('', map
+ { "{'\xFF__IDVALPOS__${_}__\xFF'}" }
+ @{$args->{collapse_map}->{-identifying_columns}}
+ );
+ my $node_idx_slot = sprintf '$collapse_idx[%d]%s', $cur_node_idx, $node_key;
+
+
+ my @src;
+
+ if ($cur_node_idx == 0) {
+ push @src, sprintf( '%s ||= %s;',
+ $node_idx_slot,
+ $me_struct,
+ ) if $me_struct;
+ }
+ else {
+ my $parent_attach_slot = sprintf( '$collapse_idx[%d]%s%s{%s}',
+ @{$args}{qw/-parent_node_idx -parent_node_key/},
+ $args->{hri_style} ? '' : '[1]',
+ perlstring($args->{-node_relname}),
+ );
+
+ if ($args->{collapse_map}->{-is_single}) {
+ push @src, sprintf ( '%s ||= %s%s;',
+ $parent_attach_slot,
+ $node_idx_slot,
+ $me_struct ? " ||= $me_struct" : '',
+ );
+ }
+ else {
+ push @src, sprintf('(! %s) and push @{%s}, %s%s;',
+ $node_idx_slot,
+ $parent_attach_slot,
+ $node_idx_slot,
+ $me_struct ? " = $me_struct" : '',
+ );
+ }
+ }
+
+ my $known_present_ids = { map { $_ => 1 } @{$args->{collapse_map}{-identifying_columns}} };
+ my ($stats, $rel_src);
+
+ for my $rel (sort keys %$rel_cols) {
+
+ my $relinfo = $args->{collapse_map}{$rel};
+
+ ($rel_src, $stats->{$rel}) = __visit_infmap_collapse({ %$args,
+ val_index => $rel_cols->{$rel},
+ collapse_map => $relinfo,
+ -parent_node_idx => $cur_node_idx,
+ -parent_node_key => $node_key,
+ -node_relname => $rel,
+ });
+
+ my $rel_src_pos = $#src + 1;
+ push @src, @$rel_src;
+
+ if (
+ $relinfo->{-is_optional}
+ and
+ defined ( my $first_distinct_child_idcol = first
+ { ! $known_present_ids->{$_} }
+ @{$relinfo->{-identifying_columns}}
+ )
+ ) {
+
+ if ($args->{hri_style}) {
+
+ # start of wrap of the entire chain in a conditional
+ splice @src, $rel_src_pos, 0, sprintf "( ! defined %s )\n ? %s{%s} = %s\n : do {",
+ "'\xFF__VALPOS__${first_distinct_child_idcol}__\xFF'",
+ $node_idx_slot,
+ perlstring($rel),
+ $relinfo->{-is_single} ? 'undef' : '[]'
+ ;
+
+ # end of wrap
+ push @src, '};'
+ }
+ else {
+
+ splice @src, $rel_src_pos + 1, 0, sprintf ( '(defined %s) or bless (%s[1]{%s}, %s);',
+ "'\xFF__VALPOS__${first_distinct_child_idcol}__\xFF'",
+ $node_idx_slot,
+ perlstring($rel),
+ perlstring($null_branch_class),
+ );
+ }
+ }
+ }
+
+ return (
+ \@src,
+ {
+ idcols_seen => {
+ ( map { %{ $_->{idcols_seen} } } values %$stats ),
+ ( map { $_ => 1 } @{$args->{collapse_map}->{-identifying_columns}} ),
+ }
+ }
+ );
+}
+
+# keep our own DD object around so we don't have to fitz with quoting
+my $dumper_obj;
+sub __visit_dump {
+
+ # we actually will be producing functional perl code here,
+ # thus no second-guessing of what these globals might have
+ # been set to. DO NOT CHANGE!
+ ($dumper_obj ||= do {
+ require Data::Dumper;
+ Data::Dumper->new([])
+ ->Useperl (0)
+ ->Purity (1)
+ ->Pad ('')
+ ->Useqq (0)
+ ->Terse (1)
+ ->Quotekeys (1)
+ ->Deepcopy (0)
+ ->Deparse (0)
+ ->Maxdepth (0)
+ ->Indent (0) # faster but harder to read, perhaps leave at 1 ?
+ })->Values ([$_[0]])->Dump;
+}
+
+1;
use namespace::clean;
+__PACKAGE__->mk_group_accessors ( simple => [ in_storage => '_in_storage' ] );
+
=head1 NAME
DBIx::Class::Row - Basic row methods
my ($class, $attrs) = @_;
$class = ref $class if ref $class;
- my $new = bless { _column_data => {} }, $class;
+ my $new = bless { _column_data => {}, _in_storage => 0 }, $class;
if ($attrs) {
$new->throw_exception("attrs must be a hashref")
Creating a result object using L<DBIx::Class::ResultSet/new_result>, or
calling L</delete> on one, sets it to false.
-=cut
-
-sub in_storage {
- my ($self, $val) = @_;
- $self->{_in_storage} = $val if @_ > 1;
- return $self->{_in_storage} ? 1 : 0;
-}
=head2 update
);
delete $self->{_column_data_in_storage};
- $self->in_storage(undef);
+ $self->in_storage(0);
}
else {
my $rsrc = try { $self->result_source_instance }
really changed.
=cut
+
sub make_column_dirty {
my ($self, $column) = @_;
sub inflate_result {
my ($class, $source, $me, $prefetch) = @_;
- $source = $source->resolve
- if $source->isa('DBIx::Class::ResultSourceHandle');
-
my $new = bless
{ _column_data => $me, _result_source => $source },
ref $class || $class
;
- foreach my $pre (keys %{$prefetch||{}}) {
-
- my (@pre_vals, $is_multi);
- if (ref $prefetch->{$pre}[0] eq 'ARRAY') {
- $is_multi = 1;
- @pre_vals = @{$prefetch->{$pre}};
- }
- else {
- @pre_vals = $prefetch->{$pre};
- }
-
- my $pre_source = try {
- $source->related_source($pre)
- }
- catch {
- $class->throw_exception(sprintf
-
- "Can't inflate manual prefetch into non-existent relationship '%s' from '%s', "
- . "check the inflation specification (columns/as) ending in '%s.%s'.",
+ if ($prefetch) {
+ for my $pre ( keys %$prefetch ) {
- $pre,
- $source->source_name,
- $pre,
- (keys %{$pre_vals[0][0]})[0] || 'something.something...',
- );
- };
-
- my $accessor = $source->relationship_info($pre)->{attrs}{accessor}
- or $class->throw_exception("No accessor type declared for prefetched $pre");
+ my @pre_objects;
+ if (
+ @{$prefetch->{$pre}||[]}
+ and
+ ref($prefetch->{$pre}) ne $DBIx::Class::ResultSource::RowParser::Util::null_branch_class
+ ) {
+ my $pre_source = $source->related_source($pre);
- if (! $is_multi and $accessor eq 'multi') {
- $class->throw_exception("Manual prefetch (via select/columns) not supported with accessor 'multi'");
- }
+ @pre_objects = map {
+ $pre_source->result_class->inflate_result( $pre_source, @$_ )
+ } ( ref $prefetch->{$pre}[0] eq 'ARRAY' ? @{$prefetch->{$pre}} : $prefetch->{$pre} );
+ }
- my @pre_objects;
- for my $me_pref (@pre_vals) {
-
- # FIXME - this should not be necessary
- # the collapser currently *could* return bogus elements with all
- # columns set to undef
- my $has_def;
- for (values %{$me_pref->[0]}) {
- if (defined $_) {
- $has_def++;
- last;
- }
- }
- next unless $has_def;
+ my $accessor = $source->relationship_info($pre)->{attrs}{accessor}
+ or $class->throw_exception("No accessor type declared for prefetched relationship '$pre'");
- push @pre_objects, $pre_source->result_class->inflate_result(
- $pre_source, @$me_pref
- );
- }
+ if ($accessor eq 'single') {
+ $new->{_relationship_data}{$pre} = $pre_objects[0];
+ }
+ elsif ($accessor eq 'filter') {
+ $new->{_inflated_column}{$pre} = $pre_objects[0];
+ }
- if ($accessor eq 'single') {
- $new->{_relationship_data}{$pre} = $pre_objects[0];
- }
- elsif ($accessor eq 'filter') {
- $new->{_inflated_column}{$pre} = $pre_objects[0];
+ $new->related_resultset($pre)->set_cache(\@pre_objects);
}
-
- $new->related_resultset($pre)->set_cache(\@pre_objects);
}
$new->in_storage (1);
$new->_sql_maker_opts({});
$new->_dbh_details({});
$new->{_in_do_block} = 0;
- $new->{_dbh_gen} = 0;
# read below to see what this does
$new->_arm_global_destructor;
# soon as possible (DBIC will reconnect only on demand from within
# the thread)
my @instances = grep { defined $_ } values %seek_and_destroy;
+ %seek_and_destroy = ();
+
for (@instances) {
- $_->{_dbh_gen}++; # so that existing cursors will drop as well
$_->_dbh(undef);
$_->transaction_depth(0);
$_->savepoints([]);
- }
- # properly renumber all existing refs
- %seek_and_destroy = ();
- $_->_arm_global_destructor for @instances;
+ # properly renumber existing refs
+ $_->_arm_global_destructor
+ }
}
}
my $pid = $self->_conn_pid;
if( defined $pid and $pid != $$ and my $dbh = $self->_dbh ) {
$dbh->{InactiveDestroy} = 1;
- $self->{_dbh_gen}++;
$self->_dbh(undef);
$self->transaction_depth(0);
$self->savepoints([]);
%{ $self->_dbh->{CachedKids} } = ();
$self->_dbh->disconnect;
$self->_dbh(undef);
- $self->{_dbh_gen}++;
}
}
# see if we need to tear the prefetch apart otherwise delegate the limiting to the
# storage, unless software limit was requested
if (
- #limited has_many
- ( $attrs->{rows} && keys %{$attrs->{collapse}} )
+ # limited collapsing has_many
+ ( $attrs->{rows} && $attrs->{collapse} )
||
# grouped prefetch (to satisfy group_by == select)
( $attrs->{group_by}
=cut
-sub _dbh_next {
- my ($storage, $dbh, $self) = @_;
+sub next {
+ my $self = shift;
- my $next = $self->next::can;
+ my @row = $self->next::method(@_);
- my @row = $next->(@_);
-
- my $col_infos = $storage->_resolve_column_info($self->args->[0]);
-
- my $select = $self->args->[1];
-
- _normalize_guids($select, $col_infos, \@row, $storage);
+ _normalize_guids(
+ $self->args->[1],
+ $self->{_colinfos} ||= $self->storage->_resolve_column_info($self->args->[0]),
+ \@row,
+ $self->storage
+ );
return @row;
}
-sub _dbh_all {
- my ($storage, $dbh, $self) = @_;
-
- my $next = $self->next::can;
-
- my @rows = $next->(@_);
-
- my $col_infos = $storage->_resolve_column_info($self->args->[0]);
+sub all {
+ my $self = shift;
- my $select = $self->args->[1];
+ my @rows = $self->next::method(@_);
- _normalize_guids($select, $col_infos, $_, $storage) for @rows;
+ _normalize_guids(
+ $self->args->[1],
+ $self->{_colinfos} ||= $self->storage->_resolve_column_info($self->args->[0]),
+ $_,
+ $self->storage
+ ) for @rows;
return @rows;
}
=cut
-sub _dbh_next {
- my ($storage, $dbh, $self) = @_;
+sub next {
+ my $self = shift;
- my $next = $self->next::can;
+ my @row = $self->next::method(@_);
- my @row = $next->(@_);
+ $self->{_colinfos} ||= $self->storage->_resolve_column_info($self->args->[0]);
- my $col_infos = $storage->_resolve_column_info($self->args->[0]);
+ _normalize_guids(
+ $self->args->[1],
+ $self->{_colinfos},
+ \@row,
+ $self->storage
+ );
- my $select = $self->args->[1];
-
- _normalize_guids($select, $col_infos, \@row, $storage);
- _strip_trailing_binary_nulls($select, $col_infos, \@row, $storage);
+ _strip_trailing_binary_nulls(
+ $self->args->[1],
+ $self->{_colinfos},
+ \@row,
+ $self->storage
+ );
return @row;
}
-sub _dbh_all {
- my ($storage, $dbh, $self) = @_;
-
- my $next = $self->next::can;
-
- my @rows = $next->(@_);
+sub all {
+ my $self = shift;
- my $col_infos = $storage->_resolve_column_info($self->args->[0]);
+ my @rows = $self->next::method(@_);
- my $select = $self->args->[1];
+ $self->{_colinfos} ||= $self->storage->_resolve_column_info($self->args->[0]);
for (@rows) {
- _normalize_guids($select, $col_infos, $_, $storage);
- _strip_trailing_binary_nulls($select, $col_infos, $_, $storage);
+ _normalize_guids(
+ $self->args->[1],
+ $self->{_colinfos},
+ $_,
+ $self->storage
+ );
+
+ _strip_trailing_binary_nulls(
+ $self->args->[1],
+ $self->{_colinfos},
+ $_,
+ $self->storage
+ );
}
return @rows;
use base qw/DBIx::Class::Cursor/;
use Try::Tiny;
+use Scalar::Util qw/refaddr weaken/;
use namespace::clean;
__PACKAGE__->mk_group_accessors('simple' =>
- qw/sth storage args attrs/
+ qw/storage args attrs/
);
=head1 NAME
=cut
-sub new {
- my ($class, $storage, $args, $attrs) = @_;
- $class = ref $class if ref $class;
+{
+ my %cursor_registry;
- my $new = {
- storage => $storage,
- args => $args,
- attrs => $attrs,
- _dbh_gen => $storage->{_dbh_gen},
- _pos => 0,
- _done => 0,
- };
+ sub new {
+ my ($class, $storage, $args, $attrs) = @_;
- return bless ($new, $class);
+ my $self = bless {
+ storage => $storage,
+ args => $args,
+ attrs => $attrs,
+ }, ref $class || $class;
+
+ weaken( $cursor_registry{ refaddr($self) } = $self )
+ if DBIx::Class::_ENV_::HAS_ITHREADS;
+
+ return $self;
+ }
+
+ sub CLONE {
+ for (keys %cursor_registry) {
+ # once marked we no longer care about them, hence no
+ # need to keep in the registry, left alone renumber the
+ # keys (all addresses are now different)
+ my $self = delete $cursor_registry{$_}
+ or next;
+
+ $self->{_intra_thread} = 1;
+ }
+ }
}
=head2 next
=cut
-sub _dbh_next {
- my ($storage, $dbh, $self) = @_;
+sub next {
+ my $self = shift;
+
+ return if $self->{_done};
+
+ my $sth;
- $self->_check_dbh_gen;
if (
$self->{attrs}{software_limit}
&& $self->{attrs}{rows}
- && $self->{_pos} >= $self->{attrs}{rows}
+ && ($self->{_pos}||0) >= $self->{attrs}{rows}
) {
- $self->sth->finish if $self->sth->{Active};
- $self->sth(undef);
+ if ($sth = $self->sth) {
+ # explicit finish will issue warnings, unlike the DESTROY below
+ $sth->finish if $sth->FETCH('Active');
+ }
$self->{_done} = 1;
+ return;
}
- return if $self->{_done};
+ unless ($sth = $self->sth) {
+ (undef, $sth, undef) = $self->storage->_select( @{$self->{args}} );
+
+ $self->{_results} = [ (undef) x $sth->FETCH('NUM_OF_FIELDS') ];
+ $sth->bind_columns( \( @{$self->{_results}} ) );
- unless ($self->sth) {
- $self->sth(($storage->_select(@{$self->{args}}))[1]);
- if ($self->{attrs}{software_limit}) {
- if (my $offset = $self->{attrs}{offset}) {
- $self->sth->fetch for 1 .. $offset;
- }
+ if ( $self->{attrs}{software_limit} and $self->{attrs}{offset} ) {
+ $sth->fetch for 1 .. $self->{attrs}{offset};
}
+
+ $self->sth($sth);
}
- my @row = $self->sth->fetchrow_array;
- if (@row) {
+
+ if ($sth->fetch) {
$self->{_pos}++;
+ return @{$self->{_results}};
} else {
- $self->sth(undef);
$self->{_done} = 1;
+ return ();
}
- return @row;
}
-sub next {
- my ($self) = @_;
- $self->{storage}->dbh_do($self->can('_dbh_next'), $self);
-}
=head2 all
=cut
-sub _dbh_all {
- my ($storage, $dbh, $self) = @_;
+sub all {
+ my $self = shift;
+
+ # delegate to DBIC::Cursor which will delegate back to next()
+ if ($self->{attrs}{software_limit}
+ && ($self->{attrs}{offset} || $self->{attrs}{rows})) {
+ return $self->next::method(@_);
+ }
+
+ my $sth;
+
+ if ($sth = $self->sth) {
+ # explicit finish will issue warnings, unlike the DESTROY below
+ $sth->finish if ( ! $self->{_done} and $sth->FETCH('Active') );
+ $self->sth(undef);
+ }
+
+ (undef, $sth) = $self->storage->_select( @{$self->{args}} );
- $self->_check_dbh_gen;
- $self->sth->finish if $self->sth && $self->sth->{Active};
- $self->sth(undef);
- my ($rv, $sth) = $storage->_select(@{$self->{args}});
return @{$sth->fetchall_arrayref};
}
-sub all {
- my ($self) = @_;
- if ($self->{attrs}{software_limit}
- && ($self->{attrs}{offset} || $self->{attrs}{rows})) {
- return $self->next::method;
+sub sth {
+ my $self = shift;
+
+ if (@_) {
+ delete @{$self}{qw/_pos _done _pid _intra_thread/};
+
+ $self->{sth} = $_[0];
+ $self->{_pid} = $$ if ! DBIx::Class::_ENV_::BROKEN_FORK and $_[0];
}
+ elsif ($self->{sth} and ! $self->{_done}) {
+
+ my $invalidate_handle_reason;
+
+ if (DBIx::Class::_ENV_::HAS_ITHREADS and $self->{_intra_thread} ) {
+ $invalidate_handle_reason = 'Multi-thread';
+ }
+ elsif (!DBIx::Class::_ENV_::BROKEN_FORK and $self->{_pid} != $$ ) {
+ $invalidate_handle_reason = 'Multi-process';
+ }
- $self->{storage}->dbh_do($self->can('_dbh_all'), $self);
+ if ($invalidate_handle_reason) {
+ $self->storage->throw_exception("$invalidate_handle_reason access attempted while cursor in progress (position $self->{_pos})")
+ if $self->{_pos};
+
+ # reinvokes the reset logic above
+ $self->sth(undef);
+ }
+ }
+
+ return $self->{sth};
}
=head2 reset
=cut
sub reset {
- my ($self) = @_;
-
- # No need to care about failures here
- try { $self->sth->finish }
- if $self->sth && $self->sth->{Active};
- $self->_soft_reset;
- return undef;
+ $_[0]->__finish_sth if $_[0]->{sth};
+ $_[0]->sth(undef);
}
-sub _soft_reset {
- my ($self) = @_;
- $self->sth(undef);
- $self->{_done} = 0;
- $self->{_pos} = 0;
+sub DESTROY {
+ $_[0]->__finish_sth if $_[0]->{sth};
}
-sub _check_dbh_gen {
- my ($self) = @_;
+sub __finish_sth {
+ # It is (sadly) extremely important to finish() handles we are about
+ # to lose (due to reset() or a DESTROY() ). $rs->reset is the closest
+ # thing the user has to getting to the underlying finish() API and some
+ # DBDs mandate this (e.g. DBD::InterBase will segfault, DBD::Sybase
+ # won't start a transaction sanely, etc)
+ # We also can't use the accessor here, as it will trigger a fork/thread
+ # check, and resetting a cursor in a child is perfectly valid
- if($self->{_dbh_gen} != $self->{storage}->{_dbh_gen}) {
- $self->{_dbh_gen} = $self->{storage}->{_dbh_gen};
- $self->_soft_reset;
- }
-}
+ my $self = shift;
-sub DESTROY {
- # None of the reasons this would die matter if we're in DESTROY anyways
- if (my $sth = $_[0]->sth) {
- local $SIG{__WARN__} = sub {};
- try { $sth->finish } if $sth->FETCH('Active');
- }
+ # No need to care about failures here
+ try { local $SIG{__WARN__} = sub {}; $self->{sth}->finish } if (
+ $self->{sth} and ! try { ! $self->{sth}->FETCH('Active') }
+ );
}
1;
=cut
-sub _dbh_next {
- my ($storage, $dbh, $self) = @_;
-
- my $next = $self->next::can;
-
- my @row = $next->(@_);
-
- my $col_info = $storage->_resolve_column_info($self->args->[0]);
-
- my $select = $self->args->[1];
+my $unpack_guids = sub {
+ my ($select, $col_infos, $data, $storage) = @_;
for my $select_idx (0..$#$select) {
- my $selected = $select->[$select_idx];
-
- next if ref $selected;
+ next unless (
+ defined $data->[$select_idx]
+ and
+ length($data->[$select_idx]) == 16
+ );
- my $data_type = $col_info->{$selected}{data_type};
+ my $selected = $select->[$select_idx];
- if ($storage->_is_guid_type($data_type)) {
- my $returned = $row[$select_idx];
+ my $data_type = $col_infos->{$select->[$select_idx]}{data_type}
+ or next;
- if (length $returned == 16) {
- $row[$select_idx] = $storage->_uuid_to_str($returned);
- }
- }
+ $data->[$select_idx] = $storage->_uuid_to_str($data->[$select_idx])
+ if $storage->_is_guid_type($data_type);
}
+};
- return @row;
-}
-
-sub _dbh_all {
- my ($storage, $dbh, $self) = @_;
-
- my $next = $self->next::can;
- my @rows = $next->(@_);
+sub next {
+ my $self = shift;
- my $col_info = $storage->_resolve_column_info($self->args->[0]);
+ my @row = $self->next::method(@_);
- my $select = $self->args->[1];
+ $unpack_guids->(
+ $self->args->[1],
+ $self->{_colinfos} ||= $self->storage->_resolve_column_info($self->args->[0]),
+ \@row,
+ $self->storage
+ );
- for my $row (@rows) {
- for my $select_idx (0..$#$select) {
- my $selected = $select->[$select_idx];
+ return @row;
+}
- next if ref $selected;
+sub all {
+ my $self = shift;
- my $data_type = $col_info->{$selected}{data_type};
+ my @rows = $self->next::method(@_);
- if ($storage->_is_guid_type($data_type)) {
- my $returned = $row->[$select_idx];
+ $unpack_guids->(
+ $self->args->[1],
+ $self->{_colinfos} ||= $self->storage->_resolve_column_info($self->args->[0]),
+ $_,
+ $self->storage
+ ) for @rows;
- if (length $returned == 16) {
- $row->[$select_idx] = $storage->_uuid_to_str($returned);
- }
- }
- }
- }
return @rows;
}
my ($self, $from, $select, $where, $attrs) = @_;
$self->throw_exception ('Nothing to prefetch... how did we get here?!')
- if not @{$attrs->{_prefetch_selector_range}};
+ if not @{$attrs->{_prefetch_selector_range}||[]};
$self->throw_exception ('Complex prefetches are not supported on resultsets with a custom from attribute')
if (ref $from ne 'ARRAY' || ref $from->[0] ne 'HASH' || ref $from->[1] ne 'ARRAY');
delete $outer_attrs->{$_} for qw/where bind rows offset group_by having/;
my $inner_attrs = { %$attrs };
- delete $inner_attrs->{$_} for qw/for collapse _prefetch_selector_range _collapse_order_by select as/;
-
- # bring over all non-collapse-induced order_by into the inner query (if any)
- # the outer one will have to keep them all
- delete $inner_attrs->{order_by};
- if (my $ord_cnt = @{$outer_attrs->{order_by}} - @{$outer_attrs->{_collapse_order_by}} ) {
- $inner_attrs->{order_by} = [
- @{$outer_attrs->{order_by}}[ 0 .. $ord_cnt - 1]
- ];
- }
+ delete $inner_attrs->{$_} for qw/for collapse _prefetch_selector_range select as/;
+
+ # if the user did not request it, there is no point using it inside
+ delete $inner_attrs->{order_by} if delete $inner_attrs->{_order_is_artificial};
# generate the inner/outer select lists
# for inside we consider only stuff *not* brought in by the prefetch
die "Illegal version $version_string - we are still in the 0.08 cycle\n"
}
+if ($v_point >= 300) {
+ die "Illegal version $version_string - we are still in the 0.082xx cycle\n"
+}
-# all odd releases *after* 0.08200 generate a -TRIAL, no exceptions
-Meta->makemaker_args->{DISTVNAME} = Meta->name . "-$version_string-TRIAL"
- if ( $v_point > 200 and int($v_point / 100) % 2 );
+Meta->makemaker_args->{DISTVNAME} = Meta->name . "-$version_string-TRIAL" if (
+ # 0.08240 ~ 0.08249 shall be TRIALs for the collapser rewrite
+ ( $v_point >= 240 and $v_point <= 249 )
+ or
+ # all odd releases *after* 0.08200 generate a -TRIAL, no exceptions
+ ( $v_point > 200 and int($v_point / 100) % 2 )
+);
my $tags = { map { chomp $_; $_ => 1} `git tag` };
use strict;
use warnings;
use Test::More;
+use Test::Exception;
use lib qw(t/lib);
use DBICTest;
$schema->resultset('CD')->create({ title => 'vacation in antarctica part 2', artist => 456, year => 1901 });
$parent_rs = $schema->resultset('CD')->search({ year => 1901 });
- $parent_rs->next;
+ is ($parent_rs->count, 2);
};
ok(!$@) or diag "Creation eval failed: $@";
+# basic tests
{
- my $pid = fork;
- if(!defined $pid) {
- die "fork failed: $!";
+ ok ($schema->storage->connected(), 'Parent is connected');
+ is ($parent_rs->next->id, 1, 'Cursor advanced');
+
+ my ($parent_in, $child_out);
+ pipe( $parent_in, $child_out ) or die "Pipe open failed: $!";
+
+ my $pid = fork;
+ if(!defined $pid) {
+ die "fork failed: $!";
+ }
+
+ if (!$pid) {
+ close $parent_in;
+
+ #simulate a subtest to not confuse the parent TAP emission
+ my $tb = Test::More->builder;
+ $tb->reset;
+ for (qw/output failure_output todo_output/) {
+ close $tb->$_;
+ open ($tb->$_, '>&', $child_out);
}
- if (!$pid) {
- exit $schema->storage->connected ? 1 : 0;
+ ok(!$schema->storage->connected, "storage->connected() false in child");
+ for (1,2) {
+ throws_ok { $parent_rs->next } qr/\QMulti-process access attempted while cursor in progress (position 1)/;
}
- if (waitpid($pid, 0) == $pid) {
- my $ex = $? >> 8;
- ok($ex == 0, "storage->connected() returns false in child");
- exit $ex if $ex; # skip remaining tests
- }
+ $parent_rs->reset;
+ is($parent_rs->next->id, 1, 'Resetting cursor reprepares it within child environment');
+
+ done_testing;
+ exit 0;
+ }
+
+ close $child_out;
+ while (my $ln = <$parent_in>) {
+ print " $ln";
+ }
+ waitpid( $pid, 0 );
+ ok(!$?, 'Child subtests passed');
+
+ is ($parent_rs->next->id, 2, 'Cursor still intact in parent');
+ is ($parent_rs->next, undef, 'Cursor exhausted');
}
+$parent_rs->reset;
my @pids;
while(@pids < $num_children) {
$schema->resultset('CD')->create({ title => 'vacation in antarctica part 2', artist => 456, year => 1901 });
$parent_rs = $schema->resultset('CD')->search({ year => 1901 });
- $parent_rs->next;
+ is ($parent_rs->count, 2);
}, 'populate successfull');
+# basic tests
+{
+ ok ($schema->storage->connected(), 'Parent is connected');
+ is ($parent_rs->next->id, 1, 'Cursor advanced');
+ my $ct_num = Test::More->builder->current_test;
+
+ my $newthread = async {
+ my $out = '';
+
+ #simulate a subtest to not confuse the parent TAP emission
+ my $tb = Test::More->builder;
+ $tb->reset;
+ for (qw/output failure_output todo_output/) {
+ close $tb->$_;
+ open ($tb->$_, '>', \$out);
+ }
+
+ ok(!$schema->storage->connected, "storage->connected() false in child");
+ for (1,2) {
+ throws_ok { $parent_rs->next } qr/\QMulti-thread access attempted while cursor in progress (position 1)/;
+ }
+
+ $parent_rs->reset;
+ is($parent_rs->next->id, 1, 'Resetting cursor reprepares it within child environment');
+
+ done_testing;
+
+ close $tb->$_ for (qw/output failure_output todo_output/);
+ sleep(1); # tasty crashes without this
+
+ $out;
+ };
+ die "Thread creation failed: $! $@" if !defined $newthread;
+
+ my $out = $newthread->join;
+ $out =~ s/^/ /gm;
+ print $out;
+
+ # workaround for older Test::More confusing the plan under threads
+ Test::More->builder->current_test($ct_num);
+
+ is ($parent_rs->next->id, 2, 'Cursor still intact in parent');
+ is ($parent_rs->next, undef, 'Cursor exhausted');
+}
+
+$parent_rs->reset;
my @children;
while(@children < $num_children) {
}
ok(1, "Made it to the end");
+undef $parent_rs;
$schema->storage->dbh->do("DROP TABLE cd");
$schema->resultset('CD')->create({ title => 'vacation in antarctica part 2', artist => 456, year => 1901 });
$parent_rs = $schema->resultset('CD')->search({ year => 1901 });
- $parent_rs->next;
+ is ($parent_rs->count, 2);
};
ok(!$@) or diag "Creation eval failed: $@";
delete $weak_registry->{$slot}
unless $cleared->{hash_merge_singleton}{$weak_registry->{$slot}{weakref}{behavior}}++;
}
+ elsif (
+ $slot =~ /^Data::Dumper/
+ and
+ $weak_registry->{$slot}{stacktrace} =~ /\QDBIx::Class::ResultSource::RowParser::_mk_row_parser/
+ ) {
+ # there should be only one D::D object (used to construct the rowparser)
+ # more would indicate trouble
+ delete $weak_registry->{$slot}
+ unless $cleared->{mk_row_parser_dd_singleton}++;
+ }
elsif ($slot =~ /^DateTime::TimeZone/) {
# DT is going through a refactor it seems - let it leak zones for now
delete $weak_registry->{$slot};
# this subclass is expected to inherit whatever crap comes
# from the parent
'DBIx::Class::ResultSet::Pager',
+
+ # a utility class, not part of the inheritance chain
+ 'DBIx::Class::ResultSource::RowParser::Util',
) };
my $has_cmop = eval { require Class::MOP };
is ($collapsed_or_rs->count, 4, 'Collapsed search count with OR ok');
# make sure sure distinct on a grouped rs is warned about
-my $cd_rs = $schema->resultset ('CD')
- ->search ({}, { distinct => 1, group_by => 'title' });
-warnings_exist (sub {
- $cd_rs->next;
-}, qr/Useless use of distinct/, 'UUoD warning');
+{
+ my $cd_rs = $schema->resultset ('CD')
+ ->search ({}, { distinct => 1, group_by => 'title' });
+ warnings_exist (sub {
+ $cd_rs->next;
+ }, qr/Useless use of distinct/, 'UUoD warning');
+}
{
my $tcount = $schema->resultset('Track')->search(
$or_rs->reset;
$rel_rs->reset;
+# at this point there should be no active statements
+# (finish() was called everywhere, either explicitly via
+# reset() or on DESTROY)
+for (keys %{$schema->storage->dbh->{CachedKids}}) {
+ fail("Unreachable cached statement still active: $_")
+ if $schema->storage->dbh->{CachedKids}{$_}->FETCH('Active');
+}
+
my $tag = $schema->resultset('Tag')->search(
[ { 'me.tag' => 'Blue' } ], { cols=>[qw/tagid/] } )->next;
} 'inferring generator from trigger source works';
}
+ # at this point there should be no active statements
+ # (finish() was called everywhere, either explicitly via
+ # reset() or on DESTROY)
+ for (keys %{$schema->storage->dbh->{CachedKids}}) {
+ fail("Unreachable cached statement still active: $_")
+ if $schema->storage->dbh->{CachedKids}{$_}->FETCH('Active');
+ }
+
# test blobs (stolen from 73oracle.t)
eval { $dbh->do('DROP TABLE "bindtype_test"') };
$dbh->do(q[
push @objs, $tag->id; #warn "tag: ", $tag->ID;
}
-is_deeply( \@objs, [ 1 ], 'second cd has correct tags' );
+is_deeply( \@objs, [ 2, 5, 8 ], 'third cd has correct tags' );
$tags = $cds->next->tags;
@objs = ();
push @objs, $tag->id; #warn "tag: ", $tag->ID;
}
-is_deeply( \@objs, [ 2, 5, 8 ], 'third cd has correct tags' );
+is_deeply( \@objs, [ 1 ], 'second cd has correct tags' );
is( $queries, 0, 'no additional SQL statements while checking nested data' );
'prefetch properly collapses amount of rows from get_column',
);
+$rs->reset;
+my $pob_rs = $rs->search({}, {
+ select => ['me.title', 'tracks.title'],
+ prefetch => 'tracks',
+ order_by => [{-asc => ['position']}],
+ group_by => ['me.title', 'tracks.title'],
+});
+is_same_sql_bind (
+ $pob_rs->get_column("me.title")->as_query,
+ '(SELECT me.title FROM (SELECT me.title, tracks.title FROM cd me LEFT JOIN track tracks ON tracks.cd = me.cdid GROUP BY me.title, tracks.title ORDER BY position ASC) me)',
+ [],
+ 'Correct SQL for prefetch/order_by/group_by'
+);
+
done_testing;
use Test::More;
use Test::Exception;
+
use lib qw(t/lib);
use DBICTest;
use DBIC::SqlMakerTest;
my $schema = DBICTest->init_schema();
- {
- my $rs = $schema->resultset( 'CD' )->search(
- {
- 'producer.name' => 'blah',
- 'producer_2.name' => 'foo',
- },
- {
- 'join' => [
- { cd_to_producer => 'producer' },
- { cd_to_producer => 'producer' },
- ],
- 'prefetch' => [
- 'artist',
- { cd_to_producer => 'producer' },
- ],
- }
- );
-
- lives_ok {
- my @rows = $rs->all();
- };
- }
+lives_ok (sub {
+ my $rs = $schema->resultset( 'CD' )->search(
+ {
+ 'producer.name' => 'blah',
+ 'producer_2.name' => 'foo',
+ },
+ {
+ 'join' => [
+ { cd_to_producer => 'producer' },
+ { cd_to_producer => 'producer' },
+ ],
+ 'prefetch' => [
+ 'artist',
+ { cd_to_producer => { producer => 'producer_to_cd' } },
+ ],
+ }
+ );
+
+ my @executed = $rs->all();
+
+ is_same_sql_bind (
+ $rs->as_query,
+ '(
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+ artist.artistid, artist.name, artist.rank, artist.charfield,
+ cd_to_producer.cd, cd_to_producer.producer, cd_to_producer.attribute,
+ producer.producerid, producer.name,
+ producer_to_cd.cd, producer_to_cd.producer, producer_to_cd.attribute
+ FROM cd me
+ LEFT JOIN cd_to_producer cd_to_producer
+ ON cd_to_producer.cd = me.cdid
+ LEFT JOIN producer producer
+ ON producer.producerid = cd_to_producer.producer
+ LEFT JOIN cd_to_producer producer_to_cd
+ ON producer_to_cd.producer = producer.producerid
+ LEFT JOIN cd_to_producer cd_to_producer_2
+ ON cd_to_producer_2.cd = me.cdid
+ LEFT JOIN producer producer_2
+ ON producer_2.producerid = cd_to_producer_2.producer
+ JOIN artist artist ON artist.artistid = me.artist
+ WHERE ( ( producer.name = ? AND producer_2.name = ? ) )
+ )',
+ [
+ [ { sqlt_datatype => 'varchar', dbic_colname => 'producer.name', sqlt_size => 100 }
+ => 'blah' ],
+ [ { sqlt_datatype => 'varchar', dbic_colname => 'producer_2.name', sqlt_size => 100 }
+ => 'foo' ],
+ ],
+ );
+}, 'Complex join parsed/executed properly');
my @rs1a_results = $schema->resultset("Artist")->search_related('cds', {title => 'Forkful of bees'}, {order_by => 'title'});
is($rs1a_results[0]->title, 'Forkful of bees', "bare field conditions okay after search related");
throws_ok {
$artist_rs->first
- } qr/Can't locate object method "inflate_result" via package "IWillExplode"/,
+ } qr/\QInflator IWillExplode does not provide an inflate_result() method/,
'IWillExplode explodes on inflate';
my $cd_rs = $artist_rs->related_resultset('cds');
is ($rs->result_class, 'DBICTest::CDSubclass', 'original class unchanged');
is ($hri_rs->result_class, 'DBIx::Class::ResultClass::HashRefInflator', 'result_class accessor pre-set via attribute');
-
my $datahashref1 = $hri_rs->next;
is_deeply(
[ sort keys %$datahashref1 ],
my @dbic_reltable = $dbic_obj->$col;
my @hashref_reltable = @{$datahashref->{$col}};
- is (scalar @dbic_reltable, scalar @hashref_reltable, 'number of related entries');
+ is (scalar @hashref_reltable, scalar @dbic_reltable, 'number of related entries');
# for my $index (0..scalar @hashref_reltable) {
for my $index (0..scalar @dbic_reltable) {
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Deep;
+use lib qw(t/lib);
+use DBICTest;
+
+# More tests like this in t/prefetch/manual.t
+
+my $schema = DBICTest->init_schema(no_populate => 1, quote_names => 1);
+$schema->resultset('Artist')->create({ name => 'JMJ', cds => [{
+ title => 'Magnetic Fields',
+ year => 1981,
+ genre => { name => 'electro' },
+ tracks => [
+ { title => 'm1' },
+ { title => 'm2' },
+ { title => 'm3' },
+ { title => 'm4' },
+ ],
+} ] });
+
+
+$schema->resultset('CD')->create({
+ title => 'Equinoxe',
+ year => 1978,
+ artist => { name => 'JMJ' },
+ genre => { name => 'electro' },
+ tracks => [
+ { title => 'e1' },
+ { title => 'e2' },
+ { title => 'e3' },
+ ],
+ single_track => {
+ title => 'o1',
+ cd => {
+ title => 'Oxygene',
+ year => 1976,
+ artist => { name => 'JMJ' },
+ tracks => [
+ { title => 'o2', position => 2}, # the position should not be needed here, bug in MC
+ ],
+ },
+ },
+});
+
+for (1,2) {
+ $schema->resultset('CD')->create({ artist => 1, year => 1977, title => "fuzzy_$_" });
+}
+
+{
+ package DBICTest::HRI::Subclass;
+ use base 'DBIx::Class::ResultClass::HashRefInflator';
+}
+
+{
+ package DBICTest::HRI::Around;
+ use base 'DBIx::Class::ResultClass::HashRefInflator';
+
+ sub inflate_result { shift->next::method(@_) }
+}
+
+for my $rs (
+ $schema->resultset('CD')->search_rs({}, { result_class => 'DBIx::Class::ResultClass::HashRefInflator' }),
+ $schema->resultset('CD')->search_rs({}, { result_class => 'DBICTest::HRI::Subclass' }),
+ $schema->resultset('CD')->search_rs({}, { result_class => 'DBICTest::HRI::Around' }),
+) {
+
+cmp_deeply
+ [ $rs->search({}, {
+ columns => {
+ year => 'me.year',
+ 'single_track.cd.artist.name' => 'artist.name',
+ },
+ join => { single_track => { cd => 'artist' } },
+ order_by => [qw/me.cdid artist.artistid/],
+ })->all ],
+ [
+ {
+ single_track => undef,
+ year => 1981
+ },
+ {
+ single_track => undef,
+ year => 1976
+ },
+ {
+ single_track => {
+ cd => {
+ artist => {
+ name => "JMJ"
+ }
+ }
+ },
+ year => 1978
+ },
+ {
+ single_track => undef,
+ year => 1977
+ },
+ {
+ single_track => undef,
+ year => 1977
+ },
+ ],
+ 'plain 1:1 descending chain ' . $rs->result_class
+;
+
+cmp_deeply
+ [ $rs->search({}, {
+ columns => {
+ 'artist' => 'me.artist',
+ 'title' => 'me.title',
+ 'year' => 'me.year',
+ 'single_track.cd.artist.artistid' => 'artist.artistid',
+ 'single_track.cd.artist.cds.cdid' => 'cds.cdid',
+ 'single_track.cd.artist.cds.tracks.title' => 'tracks.title',
+ },
+ join => { single_track => { cd => { artist => { cds => 'tracks' } } } },
+ order_by => [qw/me.cdid artist.artistid cds.cdid tracks.trackid/],
+ })->all ],
+ [
+ {
+ artist => 1,
+ single_track => undef,
+ title => "Magnetic Fields",
+ year => 1981
+ },
+ {
+ artist => 1,
+ single_track => undef,
+ title => "Oxygene",
+ year => 1976
+ },
+ {
+ artist => 1,
+ single_track => {
+ cd => {
+ artist => {
+ artistid => 1,
+ cds => {
+ cdid => 1,
+ tracks => {
+ title => "m1"
+ }
+ }
+ }
+ }
+ },
+ title => "Equinoxe",
+ year => 1978
+ },
+ {
+ artist => 1,
+ single_track => {
+ cd => {
+ artist => {
+ artistid => 1,
+ cds => {
+ cdid => 1,
+ tracks => {
+ title => "m2"
+ }
+ }
+ }
+ }
+ },
+ title => "Equinoxe",
+ year => 1978
+ },
+ {
+ artist => 1,
+ single_track => {
+ cd => {
+ artist => {
+ artistid => 1,
+ cds => {
+ cdid => 1,
+ tracks => {
+ title => "m3"
+ }
+ }
+ }
+ }
+ },
+ title => "Equinoxe",
+ year => 1978
+ },
+ {
+ artist => 1,
+ single_track => {
+ cd => {
+ artist => {
+ artistid => 1,
+ cds => {
+ cdid => 1,
+ tracks => {
+ title => "m4"
+ }
+ }
+ }
+ }
+ },
+ title => "Equinoxe",
+ year => 1978
+ },
+ {
+ artist => 1,
+ single_track => {
+ cd => {
+ artist => {
+ artistid => 1,
+ cds => {
+ cdid => 2,
+ tracks => {
+ title => "o2"
+ }
+ }
+ }
+ }
+ },
+ title => "Equinoxe",
+ year => 1978
+ },
+ {
+ artist => 1,
+ single_track => {
+ cd => {
+ artist => {
+ artistid => 1,
+ cds => {
+ cdid => 2,
+ tracks => {
+ title => "o1"
+ }
+ }
+ }
+ }
+ },
+ title => "Equinoxe",
+ year => 1978
+ },
+ {
+ artist => 1,
+ single_track => {
+ cd => {
+ artist => {
+ artistid => 1,
+ cds => {
+ cdid => 3,
+ tracks => {
+ title => "e1"
+ }
+ }
+ }
+ }
+ },
+ title => "Equinoxe",
+ year => 1978
+ },
+ {
+ artist => 1,
+ single_track => {
+ cd => {
+ artist => {
+ artistid => 1,
+ cds => {
+ cdid => 3,
+ tracks => {
+ title => "e2"
+ }
+ }
+ }
+ }
+ },
+ title => "Equinoxe",
+ year => 1978
+ },
+ {
+ artist => 1,
+ single_track => {
+ cd => {
+ artist => {
+ artistid => 1,
+ cds => {
+ cdid => 3,
+ tracks => {
+ title => "e3"
+ }
+ }
+ }
+ }
+ },
+ title => "Equinoxe",
+ year => 1978
+ },
+ {
+ artist => 1,
+ single_track => {
+ cd => {
+ artist => {
+ artistid => 1,
+ cds => {
+ cdid => 4,
+ tracks => undef
+ }
+ }
+ }
+ },
+ title => "Equinoxe",
+ year => 1978
+ },
+ {
+ artist => 1,
+ single_track => {
+ cd => {
+ artist => {
+ artistid => 1,
+ cds => {
+ cdid => 5,
+ tracks => undef
+ }
+ }
+ }
+ },
+ title => "Equinoxe",
+ year => 1978
+ },
+ {
+ artist => 1,
+ single_track => undef,
+ title => "fuzzy_1",
+ year => 1977
+ },
+ {
+ artist => 1,
+ single_track => undef,
+ title => "fuzzy_2",
+ year => 1977
+ }
+ ],
+ 'non-collapsing 1:1:1:M:M chain ' . $rs->result_class,
+;
+
+cmp_deeply
+ [ $rs->search({}, {
+ columns => {
+ 'artist' => 'me.artist',
+ 'title' => 'me.title',
+ 'year' => 'me.year',
+ 'single_track.cd.artist.artistid' => 'artist.artistid',
+ 'single_track.cd.artist.cds.cdid' => 'cds.cdid',
+ 'single_track.cd.artist.cds.tracks.title' => 'tracks.title',
+ },
+ join => { single_track => { cd => { artist => { cds => 'tracks' } } } },
+ order_by => [qw/me.cdid artist.artistid cds.cdid tracks.trackid/],
+ collapse => {}, #hashref to keep older DBIC versions happy (doesn't actually work)
+ })->all ],
+ [
+ {
+ artist => 1,
+ single_track => undef,
+ title => "Magnetic Fields",
+ year => 1981
+ },
+ {
+ artist => 1,
+ single_track => undef,
+ title => "Oxygene",
+ year => 1976
+ },
+ {
+ artist => 1,
+ single_track => {
+ cd => {
+ artist => {
+ artistid => 1,
+ cds => [
+ {
+ cdid => 1,
+ tracks => [
+ {
+ title => "m1"
+ },
+ {
+ title => "m2"
+ },
+ {
+ title => "m3"
+ },
+ {
+ title => "m4"
+ }
+ ]
+ },
+ {
+ cdid => 2,
+ tracks => [
+ {
+ title => "o2"
+ },
+ {
+ title => "o1"
+ }
+ ]
+ },
+ {
+ cdid => 3,
+ tracks => [
+ {
+ title => "e1"
+ },
+ {
+ title => "e2"
+ },
+ {
+ title => "e3"
+ }
+ ]
+ },
+ {
+ cdid => 4,
+ tracks => []
+ },
+ {
+ cdid => 5,
+ tracks => []
+ }
+ ]
+ }
+ }
+ },
+ title => "Equinoxe",
+ year => 1978
+ },
+ {
+ artist => 1,
+ single_track => undef,
+ title => "fuzzy_1",
+ year => 1977
+ },
+ {
+ artist => 1,
+ single_track => undef,
+ title => "fuzzy_2",
+ year => 1977
+ }
+ ],
+ 'collapsing 1:1:1:M:M chain ' . $rs->result_class,
+;
+
+}
+
+done_testing;
{ join_type => 'left'}
);
+# add a non-left single relationship for the complex prefetch tests
+__PACKAGE__->belongs_to( existing_single_track => 'DBICTest::Schema::Track', 'single_track');
+
__PACKAGE__->has_many( tracks => 'DBICTest::Schema::Track' );
__PACKAGE__->has_many(
tags => 'DBICTest::Schema::Tag', undef,
},
);
__PACKAGE__->set_primary_key('id');
+__PACKAGE__->add_unique_constraint ([qw/lyric_id text/]);
__PACKAGE__->belongs_to('lyric', 'DBICTest::Schema::Lyrics', 'lyric_id');
1;
__PACKAGE__->belongs_to('track', 'DBICTest::Schema::Track', 'track_id');
__PACKAGE__->has_many('lyric_versions', 'DBICTest::Schema::LyricVersion', 'lyric_id');
+__PACKAGE__->has_many('existing_lyric_versions', 'DBICTest::Schema::LyricVersion', 'lyric_id', {
+ join_type => 'inner',
+});
+
1;
+++ /dev/null
-package PrefetchBug::Left;
-
-use strict;
-use warnings;
-
-use base 'DBIx::Class::Core';
-
-__PACKAGE__->table('prefetchbug_left');
-__PACKAGE__->add_columns(
- id => { data_type => 'integer', is_auto_increment => 1 },
-);
-
-__PACKAGE__->set_primary_key('id');
-
-__PACKAGE__->has_many(
- prefetch_leftright => 'PrefetchBug::LeftRight',
- 'left_id'
-);
-
-1;
+++ /dev/null
-package
- PrefetchBug::LeftRight;
-
-use strict;
-use warnings;
-
-use base 'DBIx::Class::Core';
-
-__PACKAGE__->table('prefetchbug_left_right');
-__PACKAGE__->add_columns(
- left_id => { data_type => 'integer' },
- right_id => { data_type => 'integer' },
- value => {});
-
-__PACKAGE__->set_primary_key('left_id', 'right_id');
-__PACKAGE__->belongs_to(left => 'PrefetchBug::Left', 'left_id');
-__PACKAGE__->belongs_to(
- right => 'PrefetchBug::Right',
- 'right_id',
-# {join_type => 'left'}
-);
-
-
-1;
+++ /dev/null
-package
- PrefetchBug::Right;
-
-use strict;
-use warnings;
-
-use base 'DBIx::Class::Core';
-
-__PACKAGE__->table('prefetchbug_right');
-__PACKAGE__->add_columns(qw/ id name category description propagates locked/);
-__PACKAGE__->set_primary_key('id');
-
-__PACKAGE__->has_many('prefetch_leftright', 'PrefetchBug::LeftRight', 'right_id');
-1;
"genreid" integer,
"single_track" integer,
FOREIGN KEY ("artist") REFERENCES "artist"("artistid") ON DELETE CASCADE ON UPDATE CASCADE,
- FOREIGN KEY ("genreid") REFERENCES "genre"("genreid") ON DELETE SET NULL ON UPDATE CASCADE,
- FOREIGN KEY ("single_track") REFERENCES "track"("trackid") ON DELETE CASCADE
+ FOREIGN KEY ("single_track") REFERENCES "track"("trackid") ON DELETE CASCADE,
+ FOREIGN KEY ("genreid") REFERENCES "genre"("genreid") ON DELETE SET NULL ON UPDATE CASCADE
);
CREATE INDEX "cd_idx_artist" ON "cd" ("artist");
-CREATE INDEX "cd_idx_genreid" ON "cd" ("genreid");
-
CREATE INDEX "cd_idx_single_track" ON "cd" ("single_track");
+CREATE INDEX "cd_idx_genreid" ON "cd" ("genreid");
+
CREATE UNIQUE INDEX "cd_artist_title" ON "cd" ("artist", "title");
CREATE TABLE "collection_object" (
CREATE INDEX "lyric_versions_idx_lyric_id" ON "lyric_versions" ("lyric_id");
+CREATE UNIQUE INDEX "lyric_versions_lyric_id_text" ON "lyric_versions" ("lyric_id", "text");
+
CREATE TABLE "tags" (
"tagid" INTEGER PRIMARY KEY NOT NULL,
"cd" integer NOT NULL,
use lib qw(t/lib);
use DBICTest;
-plan tests => 2;
-
my $schema = DBICTest->init_schema();
-my $track_no_lyrics = $schema->resultset ('Track')
- ->search ({ 'lyrics.lyric_id' => undef }, { join => 'lyrics' })
- ->first;
-
-my $lyric = $track_no_lyrics->create_related ('lyrics', {
- lyric_versions => [
- { text => 'english doubled' },
- { text => 'english doubled' },
- ],
+my $link = $schema->resultset ('Link')->create ({
+ url => 'loldogs!',
+ bookmarks => [
+ { link => 'Mein Hund ist schwul'},
+ { link => 'Mein Hund ist schwul'},
+ ]
});
-is ($lyric->lyric_versions->count, 2, "Two identical has_many's created");
+is ($link->bookmarks->count, 2, "Two identical default-insert has_many's created");
-my $link = $schema->resultset ('Link')->create ({
+$link = $schema->resultset ('Link')->create ({
url => 'lolcats!',
bookmarks => [
{},
]
});
is ($link->bookmarks->count, 2, "Two identical default-insert has_many's created");
+
+done_testing;
use warnings;
use Test::More;
+use Test::Deep;
use lib qw(t/lib);
use DBICTest;
use DBIC::SqlMakerTest;
LEFT JOIN track tracks
ON tracks.cd = me.cdid
WHERE me.artist != ?
- ORDER BY tracks.cd
)',
[
$schema->storage->debugcb(sub { $queries++; });
$schema->storage->debug(1);
-is_deeply (
+cmp_deeply (
{ map
{ $_->cdid => {
track_titles => [ map { $_->title } ($_->tracks->all) ],
LEFT JOIN track tracks
ON tracks.cd = me.cdid
WHERE me.artist != ?
- ORDER BY tracks.cd
)',
[
my $schema = DBICTest->init_schema();
-plan tests => 1;
-
# While this is a rather GIGO case, make sure it behaves as pre-103,
# as it may result in hard-to-track bugs
my $cds = $schema->resultset('Artist')
LEFT JOIN cd cd ON cd.cdid = single_track_2.cd
)',
);
+
+done_testing;
use strict;
use Test::More;
+use Test::Deep;
use lib qw(t/lib);
use DBICTest;
my $cd = $schema->resultset('CD')->search( {}, { prefetch => 'artist' })->next;
-is_deeply
+cmp_deeply
{ $cd->get_columns },
{
artist => 0,
'Expected CD columns present',
;
-is_deeply
+cmp_deeply
{ $cd->artist->get_columns },
{
artistid => 0,
LEFT JOIN track tracks ON tracks.cd = me.cdid
LEFT JOIN liner_notes liner_notes ON liner_notes.liner_id = me.cdid
WHERE ( me.cdid IS NOT NULL )
- ORDER BY track_count DESC, maxtr ASC, tracks.cd
+ ORDER BY track_count DESC, maxtr ASC
)',
[[$ROWS => 2]],
'next() query generated expected SQL',
ORDER BY cdid
) me
LEFT JOIN tags tags ON tags.cd = me.cdid
- ORDER BY cdid, tags.cd, tags.tag
+ ORDER BY cdid
)',
[],
'Prefetch + distinct resulted in correct group_by',
ORDER BY tags.tag ASC LIMIT ?)
me
LEFT JOIN tags tags ON tags.cd = me.cdid
- ORDER BY tags.tag ASC, tags.cd, tags.tag
+ ORDER BY tags.tag ASC
)
}, [[$ROWS => 1]]);
}
use warnings;
use Test::More;
+use Test::Deep;
use Test::Exception;
use lib qw(t/lib);
use DBICTest;
my $schema = DBICTest->init_schema();
lives_ok(sub {
- # while cds.* will be selected anyway (prefetch currently forces the result of _resolve_prefetch)
+ # while cds.* will be selected anyway (prefetch implies it)
# only the requested me.name column will be fetched.
# reference sql with select => [...]
{
prefetch => [ qw/ cds / ],
order_by => [ { -desc => 'me.name' }, 'cds.title' ],
- select => [qw/ me.name cds.title / ],
- }
+ select => [qw/ me.name cds.title / ],
+ },
);
is ($rs->count, 2, 'Correct number of collapsed artists');
is ($we_are_goth->cds->first->title, 'Come Be Depressed With Us', 'Correct cd for artist');
}, 'explicit prefetch on a keyless object works');
+lives_ok ( sub {
+
+ my $rs = $schema->resultset('CD')->search(
+ {},
+ {
+ order_by => [ { -desc => 'me.year' } ],
+ }
+ );
+ my $years = [qw/ 2001 2001 1999 1998 1997/];
+
+ cmp_deeply (
+ [ $rs->search->get_column('me.year')->all ],
+ $years,
+ 'Expected years (at least one duplicate)',
+ );
+
+ my @cds_and_tracks;
+ for my $cd ($rs->all) {
+ my $data = { year => $cd->year, cdid => $cd->cdid };
+ for my $tr ($cd->tracks->all) {
+ push @{$data->{tracks}}, { $tr->get_columns };
+ }
+ push @cds_and_tracks, $data;
+ }
+
+ my $pref_rs = $rs->search ({}, { columns => [qw/year cdid/], prefetch => 'tracks' });
+
+ my @pref_cds_and_tracks;
+ for my $cd ($pref_rs->all) {
+ my $data = { $cd->get_columns };
+ for my $tr ($cd->tracks->all) {
+ push @{$data->{tracks}}, { $tr->get_columns };
+ }
+ push @pref_cds_and_tracks, $data;
+ }
+
+ cmp_deeply (
+ \@pref_cds_and_tracks,
+ \@cds_and_tracks,
+ 'Correct collapsing on non-unique primary object'
+ );
+
+ cmp_deeply (
+ [ $pref_rs->search ({}, { result_class => 'DBIx::Class::ResultClass::HashRefInflator' })->all ],
+ \@cds_and_tracks,
+ 'Correct HRI collapsing on non-unique primary object'
+ );
+
+}, 'weird collapse lives');
+
lives_ok(sub {
# test implicit prefetch as well
sub {
$schema->resultset('Track')->search({}, { join => { cd => 'artist' }, '+columns' => 'artist.name' } )->next;
},
- qr|\QCan't inflate manual prefetch into non-existent relationship 'artist' from 'Track', check the inflation specification (columns/as) ending in 'artist.name'|,
+ qr|\QInflation into non-existent relationship 'artist' of 'Track' requested, check the inflation specification (columns/as) ending in '...artist.name'|,
'Sensible error message on mis-specified "as"',
);
JOIN artist artist ON artist.artistid = me.artist
LEFT JOIN cd cds ON cds.artist = artist.artistid
LEFT JOIN artist artist_2 ON artist_2.artistid = cds.artist
- ORDER BY cds.artist, cds.year ASC
)',
[],
);
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $rs = $schema->resultset('Artist')->search({}, {
+ select => 'artistid',
+ prefetch => { cds => 'tracks' },
+});
+
+my $initial_artists_cnt = $rs->count;
+
+# create one extra artist with just one cd with just one track
+# and then an artist with nothing at all
+# the implicit order by me.artistid will get them back in correct order
+$rs->create({
+ name => 'foo',
+ cds => [{
+ year => 2012,
+ title => 'foocd',
+ tracks => [{
+ title => 'footrack',
+ }]
+ }],
+});
+$rs->create({ name => 'bar' });
+$rs->create({ name => 'baz' });
+
+# make sure we are reentrant, and also check with explicit order_by
+for (undef, undef, 'me.artistid') {
+ $rs = $rs->search({}, { order_by => $_ }) if $_;
+
+ for (1 .. $initial_artists_cnt) {
+ is ($rs->next->artistid, $_, 'Default fixture artists in order') || exit;
+ }
+
+ my $foo_artist = $rs->next;
+ is ($foo_artist->cds->next->tracks->next->title, 'footrack', 'Right track');
+
+ is (
+ [$rs->cursor->next]->[0],
+ $initial_artists_cnt + 3,
+ 'Very last artist still on the cursor'
+ );
+
+ is_deeply ([$rs->cursor->next], [], 'Nothing else left');
+
+ is ($rs->next->artistid, $initial_artists_cnt + 2, 'Row stashed in resultset still accessible');
+ is ($rs->next, undef, 'Nothing left in resultset either');
+
+ $rs->reset;
+}
+
+$rs->next;
+
+my @objs = $rs->all;
+is (@objs, $initial_artists_cnt + 3, '->all resets everything correctly');
+is ( ($rs->cursor->next)[0], 1, 'Cursor auto-rewound after all()');
+is ($rs->{stashed_rows}, undef, 'Nothing else left in $rs stash');
+
+my $unordered_rs = $rs->search({}, { order_by => 'cds.title' });
+ok ($unordered_rs->next, 'got row 1');
+is_deeply ([$unordered_rs->cursor->next], [], 'Nothing left on cursor, eager slurp');
+ok ($unordered_rs->next, "got row $_") for (2 .. $initial_artists_cnt + 3);
+is ($unordered_rs->next, undef, 'End of RS reached');
+is ($unordered_rs->next, undef, 'End of RS not lost');
+
+done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Deep;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema(no_populate => 1);
+
+$schema->resultset('Artist')->create({ name => 'JMJ', cds => [{
+ title => 'Magnetic Fields',
+ year => 1981,
+ genre => { name => 'electro' },
+ tracks => [
+ { title => 'm1' },
+ { title => 'm2' },
+ { title => 'm3' },
+ { title => 'm4' },
+ ],
+} ] });
+
+$schema->resultset('CD')->create({
+ title => 'Equinoxe',
+ year => 1978,
+ artist => { name => 'JMJ' },
+ genre => { name => 'electro' },
+ tracks => [
+ { title => 'e1' },
+ { title => 'e2' },
+ { title => 'e3' },
+ ],
+ single_track => {
+ title => 'o1',
+ cd => {
+ title => 'Oxygene',
+ year => 1976,
+ artist => { name => 'JMJ' },
+ tracks => [
+ { title => 'o2', position => 2}, # the position should not be here, bug in MC
+ ],
+ },
+ },
+});
+
+my $rs = $schema->resultset ('CD')->search ({}, {
+ join => [ 'tracks', { single_track => { cd => { artist => { cds => 'tracks' } } } } ],
+ collapse => 1,
+ columns => [
+ { 'year' => 'me.year' }, # non-unique
+ { 'genreid' => 'me.genreid' }, # nullable
+ { 'tracks.title' => 'tracks.title' }, # non-unique (no me.id)
+ { 'single_track.cd.artist.cds.cdid' => 'cds.cdid' }, # to give uniquiness to ...tracks.title below
+ { 'single_track.cd.artist.artistid' => 'artist.artistid' }, # uniqufies entire parental chain
+ { 'single_track.cd.artist.cds.year' => 'cds.year' }, # non-unique
+ { 'single_track.cd.artist.cds.genreid' => 'cds.genreid' }, # nullable
+ { 'single_track.cd.artist.cds.tracks.title' => 'tracks_2.title' }, # unique when combined with ...cds.cdid above
+ { 'latest_cd' => \ "(SELECT MAX(year) FROM cd)" }, # random function
+ { 'title' => 'me.title' }, # uniquiness for me
+ { 'artist' => 'me.artist' }, # uniquiness for me
+ ],
+ order_by => [{ -desc => 'cds.year' }, { -desc => 'me.title'} ],
+});
+
+my $hri_rs = $rs->search({}, { result_class => 'DBIx::Class::ResultClass::HashRefInflator' });
+
+cmp_deeply (
+ [$hri_rs->all],
+ [
+ {
+ artist => 1,
+ genreid => 1,
+ latest_cd => 1981,
+ single_track => {
+ cd => {
+ artist => {
+ artistid => 1,
+ cds => [
+ {
+ cdid => 1,
+ genreid => 1,
+ tracks => [
+ {
+ title => "m1"
+ },
+ {
+ title => "m2"
+ },
+ {
+ title => "m3"
+ },
+ {
+ title => "m4"
+ }
+ ],
+ year => 1981
+ },
+ {
+ cdid => 3,
+ genreid => 1,
+ tracks => [
+ {
+ title => "e1"
+ },
+ {
+ title => "e2"
+ },
+ {
+ title => "e3"
+ }
+ ],
+ year => 1978
+ },
+ {
+ cdid => 2,
+ genreid => undef,
+ tracks => [
+ {
+ title => "o1"
+ },
+ {
+ title => "o2"
+ }
+ ],
+ year => 1976
+ }
+ ]
+ }
+ }
+ },
+ title => "Equinoxe",
+ tracks => [
+ {
+ title => "e1"
+ },
+ {
+ title => "e2"
+ },
+ {
+ title => "e3"
+ }
+ ],
+ year => 1978
+ },
+ {
+ artist => 1,
+ genreid => undef,
+ latest_cd => 1981,
+ single_track => undef,
+ title => "Oxygene",
+ tracks => [
+ {
+ title => "o1"
+ },
+ {
+ title => "o2"
+ }
+ ],
+ year => 1976
+ },
+ {
+ artist => 1,
+ genreid => 1,
+ latest_cd => 1981,
+ single_track => undef,
+ title => "Magnetic Fields",
+ tracks => [
+ {
+ title => "m1"
+ },
+ {
+ title => "m2"
+ },
+ {
+ title => "m3"
+ },
+ {
+ title => "m4"
+ }
+ ],
+ year => 1981
+ },
+ ],
+ 'W00T, manual prefetch with collapse works'
+);
+
+TODO: {
+ my $row = $rs->next;
+ local $TODO = 'Something is wrong with filter type rels, they throw on incomplete objects >.<';
+
+ lives_ok {
+ cmp_deeply (
+ { $row->single_track->get_columns },
+ {},
+ 'empty intermediate object ok',
+ )
+ } 'no exception';
+}
+
+is ($rs->cursor->next, undef, 'cursor exhausted');
+
+
+TODO: {
+local $TODO = 'this does not work at all, need to promote rsattrs to an object on its own';
+# make sure has_many column redirection does not do weird stuff when collapse is requested
+for my $pref_args (
+ { prefetch => 'cds'},
+ { collapse => 1 }
+) {
+ for my $col_and_join_args (
+ { '+columns' => { 'cd_title' => 'cds_2.title' }, join => [ 'cds', 'cds' ] },
+ { '+columns' => { 'cd_title' => 'cds.title' }, join => 'cds', }
+ ) {
+
+ my $weird_rs = $schema->resultset('Artist')->search({}, {
+ %$col_and_join_args, %$pref_args,
+ });
+
+ for (qw/next all first/) {
+ throws_ok { $weird_rs->$_ } qr/not yet determined exception text/;
+ }
+ }
+}
+}
+
+# multi-has_many with underdefined root, with rather random order
+$rs = $schema->resultset ('CD')->search ({}, {
+ join => [ 'tracks', { single_track => { cd => { artist => { cds => 'tracks' } } } } ],
+ collapse => 1,
+ columns => [
+ { 'single_track.trackid' => 'single_track.trackid' }, # definitive link to root from 1:1:1:1:M:M chain
+ { 'year' => 'me.year' }, # non-unique
+ { 'tracks.cd' => 'tracks.cd' }, # \ together both uniqueness for second multirel
+ { 'tracks.title' => 'tracks.title' }, # / and definitive link back to root
+ { 'single_track.cd.artist.cds.cdid' => 'cds.cdid' }, # to give uniquiness to ...tracks.title below
+ { 'single_track.cd.artist.cds.year' => 'cds.year' }, # non-unique
+ { 'single_track.cd.artist.artistid' => 'artist.artistid' }, # uniqufies entire parental chain
+ { 'single_track.cd.artist.cds.genreid' => 'cds.genreid' }, # nullable
+ { 'single_track.cd.artist.cds.tracks.title' => 'tracks_2.title' }, # unique when combined with ...cds.cdid above
+ ],
+});
+
+for (1..3) {
+ $rs->create({ artist => 1, year => 1977, title => "fuzzy_$_" });
+}
+
+my $rs_random = $rs->search({}, { order_by => \ 'RANDOM()' });
+is ($rs_random->count, 6, 'row count matches');
+
+if ($ENV{TEST_VERBOSE}) {
+ my @lines = (
+ [ "What are we actually trying to collapse (Select/As, tests below will see a *DIFFERENT* random order):" ],
+ [ map { my $s = $_; $s =~ s/single_track\./sngl_tr./; $s } @{$rs_random->{_attrs}{select} } ],
+ $rs_random->{_attrs}{as},
+ [ "-" x 159 ],
+ $rs_random->cursor->all,
+ );
+
+ diag join ' # ', map { sprintf '% 15s', (defined $_ ? $_ : 'NULL') } @$_
+ for @lines;
+}
+
+my $queries = 0;
+$schema->storage->debugcb(sub { $queries++ });
+my $orig_debug = $schema->storage->debug;
+$schema->storage->debug (1);
+
+for my $use_next (0, 1) {
+ my @random_cds;
+ if ($use_next) {
+ while (my $o = $rs_random->next) {
+ push @random_cds, $o;
+ }
+ }
+ else {
+ @random_cds = $rs_random->all;
+ }
+
+ is (@random_cds, 6, 'object count matches');
+
+ for my $cd (@random_cds) {
+ if ($cd->year == 1977) {
+ is( scalar $cd->tracks, 0, 'no tracks on 1977 cd' );
+ is( $cd->single_track, undef, 'no single_track on 1977 cd' );
+ }
+ elsif ($cd->year == 1976) {
+ is( scalar $cd->tracks, 2, 'Two tracks on 1976 cd' );
+ like( $_->title, qr/^o\d/, "correct title" )
+ for $cd->tracks;
+ is( $cd->single_track, undef, 'no single_track on 1976 cd' );
+ }
+ elsif ($cd->year == 1981) {
+ is( scalar $cd->tracks, 4, 'Four tracks on 1981 cd' );
+ like( $_->title, qr/^m\d/, "correct title" )
+ for $cd->tracks;
+ is( $cd->single_track, undef, 'no single_track on 1981 cd' );
+ }
+ elsif ($cd->year == 1978) {
+ is( scalar $cd->tracks, 3, 'Three tracks on 1978 cd' );
+ like( $_->title, qr/^e\d/, "correct title" )
+ for $cd->tracks;
+ ok( defined $cd->single_track, 'single track prefetched on 1987 cd' );
+ is( $cd->single_track->cd->artist->id, 1, 'Single_track->cd->artist prefetched on 1978 cd' );
+ is( scalar $cd->single_track->cd->artist->cds, 6, '6 cds prefetched on artist' );
+ }
+ }
+}
+
+$schema->storage->debugcb(undef);
+$schema->storage->debug($orig_debug);
+is ($queries, 2, "Only two queries for rwo prefetch calls total");
+
+# can't cmp_deeply a random set - need *some* order
+my @hris = sort { $a->{year} cmp $b->{year} } @{$rs->search({}, {
+ order_by => [ 'tracks_2.title', 'tracks.title', 'cds.cdid', \ 'RANDOM()' ],
+})->all_hri};
+is (@hris, 6, 'hri count matches' );
+
+cmp_deeply (\@hris, [
+ {
+ single_track => undef,
+ tracks => [
+ {
+ cd => 2,
+ title => "o1"
+ },
+ {
+ cd => 2,
+ title => "o2"
+ }
+ ],
+ year => 1976
+ },
+ {
+ single_track => undef,
+ tracks => [],
+ year => 1977
+ },
+ {
+ single_track => undef,
+ tracks => [],
+ year => 1977
+ },
+ {
+ single_track => undef,
+ tracks => [],
+ year => 1977
+ },
+ {
+ single_track => {
+ cd => {
+ artist => {
+ artistid => 1,
+ cds => [
+ {
+ cdid => 4,
+ genreid => undef,
+ tracks => [],
+ year => 1977
+ },
+ {
+ cdid => 5,
+ genreid => undef,
+ tracks => [],
+ year => 1977
+ },
+ {
+ cdid => 6,
+ genreid => undef,
+ tracks => [],
+ year => 1977
+ },
+ {
+ cdid => 3,
+ genreid => 1,
+ tracks => [
+ {
+ title => "e1"
+ },
+ {
+ title => "e2"
+ },
+ {
+ title => "e3"
+ }
+ ],
+ year => 1978
+ },
+ {
+ cdid => 1,
+ genreid => 1,
+ tracks => [
+ {
+ title => "m1"
+ },
+ {
+ title => "m2"
+ },
+ {
+ title => "m3"
+ },
+ {
+ title => "m4"
+ }
+ ],
+ year => 1981
+ },
+ {
+ cdid => 2,
+ genreid => undef,
+ tracks => [
+ {
+ title => "o1"
+ },
+ {
+ title => "o2"
+ }
+ ],
+ year => 1976
+ }
+ ]
+ }
+ },
+ trackid => 6
+ },
+ tracks => [
+ {
+ cd => 3,
+ title => "e1"
+ },
+ {
+ cd => 3,
+ title => "e2"
+ },
+ {
+ cd => 3,
+ title => "e3"
+ },
+ ],
+ year => 1978
+ },
+ {
+ single_track => undef,
+ tracks => [
+ {
+ cd => 1,
+ title => "m1"
+ },
+ {
+ cd => 1,
+ title => "m2"
+ },
+ {
+ cd => 1,
+ title => "m3"
+ },
+ {
+ cd => 1,
+ title => "m4"
+ },
+ ],
+ year => 1981
+ },
+], 'W00T, multi-has_many manual underdefined root prefetch with collapse works');
+
+done_testing;
my $schema = DBICTest->init_schema();
my $sdebug = $schema->storage->debug;
-# once the following TODO is complete, remove the 2 warning tests immediately
-# after the TODO block
-# (the TODO block itself contains tests ensuring that the warns are removed)
-TODO: {
- local $TODO = 'Prefetch of multiple has_many rels at the same level (currently warn to protect the clueless git)';
-
- #( 1 -> M + M )
- my $cd_rs = $schema->resultset('CD')->search ({ 'me.title' => 'Forkful of bees' });
- my $pr_cd_rs = $cd_rs->search ({}, {
- prefetch => [qw/tracks tags/],
- });
-
- my $tracks_rs = $cd_rs->first->tracks;
- my $tracks_count = $tracks_rs->count;
-
- my ($pr_tracks_rs, $pr_tracks_count);
-
- my $queries = 0;
- $schema->storage->debugcb(sub { $queries++ });
- $schema->storage->debug(1);
-
- my $o_mm_warn;
- {
- local $SIG{__WARN__} = sub { $o_mm_warn = shift };
- $pr_tracks_rs = $pr_cd_rs->first->tracks;
- };
- $pr_tracks_count = $pr_tracks_rs->count;
-
- ok(! $o_mm_warn, 'no warning on attempt to prefetch several same level has_many\'s (1 -> M + M)');
-
- {
- local $TODO;
- is($queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query');
- }
- $schema->storage->debugcb (undef);
- $schema->storage->debug ($sdebug);
-
- is($pr_tracks_count, $tracks_count, 'equal count of prefetched relations over several same level has_many\'s (1 -> M + M)');
- is ($pr_tracks_rs->all, $tracks_rs->all, 'equal amount of objects returned with and without prefetch over several same level has_many\'s (1 -> M + M)');
-
- #( M -> 1 -> M + M )
- my $note_rs = $schema->resultset('LinerNotes')->search ({ notes => 'Buy Whiskey!' });
- my $pr_note_rs = $note_rs->search ({}, {
- prefetch => {
- cd => [qw/tracks tags/]
- },
- });
-
- my $tags_rs = $note_rs->first->cd->tags;
- my $tags_count = $tags_rs->count;
-
- my ($pr_tags_rs, $pr_tags_count);
-
- $queries = 0;
- $schema->storage->debugcb(sub { $queries++ });
- $schema->storage->debug(1);
-
- my $m_o_mm_warn;
- {
- local $SIG{__WARN__} = sub { $m_o_mm_warn = shift };
- $pr_tags_rs = $pr_note_rs->first->cd->tags;
- };
- $pr_tags_count = $pr_tags_rs->count;
-
- ok(! $m_o_mm_warn, 'no warning on attempt to prefetch several same level has_many\'s (M -> 1 -> M + M)');
-
- {
- local $TODO;
-
- is($queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query');
- $schema->storage->debugcb (undef);
- $schema->storage->debug ($sdebug);
-
- is($pr_tags_count, $tags_count, 'equal count of prefetched relations over several same level has_many\'s (M -> 1 -> M + M)');
- is($pr_tags_rs->all, $tags_rs->all, 'equal amount of objects with and without prefetch over several same level has_many\'s (M -> 1 -> M + M)');
- }
-}
-
-# remove this closure once the TODO above is working
-{
- my $warn_re = qr/will explode the number of row objects retrievable via/;
+#( 1 -> M + M )
+my $cd_rs = $schema->resultset('CD')->search( { 'me.title' => 'Forkful of bees' } );
+my $pr_cd_rs = $cd_rs->search( {}, { prefetch => [qw/tracks tags/], } );
+
+my $tracks_rs = $cd_rs->first->tracks;
+my $tracks_count = $tracks_rs->count;
- my (@w, @dummy);
- local $SIG{__WARN__} = sub { $_[0] =~ $warn_re ? push @w, @_ : warn @_ };
+my ( $pr_tracks_rs, $pr_tracks_count );
- my $rs = $schema->resultset('CD')->search ({ 'me.title' => 'Forkful of bees' }, { prefetch => [qw/tracks tags/] });
- @w = ();
- @dummy = $rs->first;
- is (@w, 1, 'warning on attempt prefetching several same level has_manys (1 -> M + M)');
+my $queries = 0;
+$schema->storage->debugcb( sub { $queries++ } );
+$schema->storage->debug(1);
- my $rs2 = $schema->resultset('LinerNotes')->search ({ notes => 'Buy Whiskey!' }, { prefetch => { cd => [qw/tags tracks/] } });
- @w = ();
- @dummy = $rs2->first;
- is (@w, 1, 'warning on attempt prefetching several same level has_manys (M -> 1 -> M + M)');
-}
+my $o_mm_warn;
+{
+ local $SIG{__WARN__} = sub { $o_mm_warn = shift };
+ $pr_tracks_rs = $pr_cd_rs->first->tracks;
+};
+$pr_tracks_count = $pr_tracks_rs->count;
+
+ok( !$o_mm_warn,
+'no warning on attempt to prefetch several same level has_many\'s (1 -> M + M)'
+);
+
+is( $queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query' );
+$schema->storage->debugcb(undef);
+$schema->storage->debug($sdebug);
+
+is( $pr_tracks_count, $tracks_count,
+'equal count of prefetched relations over several same level has_many\'s (1 -> M + M)'
+);
+is( $pr_tracks_rs->all, $tracks_rs->all,
+'equal amount of objects returned with and without prefetch over several same level has_many\'s (1 -> M + M)'
+);
+
+#( M -> 1 -> M + M )
+my $note_rs =
+ $schema->resultset('LinerNotes')->search( { notes => 'Buy Whiskey!' } );
+my $pr_note_rs =
+ $note_rs->search( {}, { prefetch => { cd => [qw/tracks tags/] }, } );
+
+my $tags_rs = $note_rs->first->cd->tags;
+my $tags_count = $tags_rs->count;
+
+my ( $pr_tags_rs, $pr_tags_count );
+
+$queries = 0;
+$schema->storage->debugcb( sub { $queries++ } );
+$schema->storage->debug(1);
+
+my $m_o_mm_warn;
+{
+ local $SIG{__WARN__} = sub { $m_o_mm_warn = shift };
+ $pr_tags_rs = $pr_note_rs->first->cd->tags;
+};
+$pr_tags_count = $pr_tags_rs->count;
+
+ok( !$m_o_mm_warn,
+'no warning on attempt to prefetch several same level has_many\'s (M -> 1 -> M + M)'
+);
+
+is( $queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query' );
+$schema->storage->debugcb(undef);
+$schema->storage->debug($sdebug);
+
+is( $pr_tags_count, $tags_count,
+'equal count of prefetched relations over several same level has_many\'s (M -> 1 -> M + M)'
+);
+is( $pr_tags_rs->all, $tags_rs->all,
+'equal amount of objects with and without prefetch over several same level has_many\'s (M -> 1 -> M + M)'
+);
done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Deep;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $mo_rs = $schema->resultset('Artist')->search(
+ { 'me.artistid' => 4 },
+ {
+ prefetch => [
+ {
+ cds => [
+ { tracks => { cd_single => 'tracks' } },
+ { cd_to_producer => 'producer' }
+ ]
+ },
+ { artwork_to_artist => 'artwork' }
+ ],
+
+ result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+
+ order_by => [qw/tracks.position tracks.trackid producer.producerid/],
+ }
+);
+
+$schema->resultset('Artist')->create(
+ {
+ name => 'mo',
+ rank => '1337',
+ cds => [
+ {
+ title => 'Song of a Foo',
+ year => '1999',
+ tracks => [
+ {
+ title => 'Foo Me Baby One More Time',
+ },
+ {
+ title => 'Foo Me Baby One More Time II',
+ },
+ {
+ title => 'Foo Me Baby One More Time III',
+ },
+ {
+ title => 'Foo Me Baby One More Time IV',
+ cd_single =>
+ { artist => 1, title => 'MO! Single', year => 2021, tracks => [
+ { title => 'singled out' }, { title => 'still alone' },
+ ] },
+ }
+ ],
+ cd_to_producer => [
+ { producer => { name => 'riba' } },
+ { producer => { name => 'sushi' } },
+ ]
+ },
+ {
+ title => 'Song of a Foo II',
+ year => '2002',
+ tracks => [
+ {
+ title => 'Quit Playing Games With My Heart',
+ },
+ {
+ title => 'Bar Foo',
+ },
+ {
+ title => 'Foo Bar',
+ cd_single =>
+ { artist => 2, title => 'MO! Single', year => 2020, tracks => [
+ { title => 'singled out' }, { title => 'still alone' },
+ ] },
+ }
+ ],
+ cd_to_producer => [
+ { producer => { name => 'riba' } },
+ { producer => { name => 'sushi' } },
+ ],
+ }
+ ],
+ artwork_to_artist =>
+ [ { artwork => { cd_id => 1 } }, { artwork => { cd_id => 2 } } ]
+ }
+);
+
+my $mo = $mo_rs->next;
+
+is( @{$mo->{cds}}, 2, 'two CDs' );
+
+cmp_deeply(
+ $mo,
+ {
+ 'cds' => [
+ {
+ 'single_track' => undef,
+ 'tracks' => [
+ {
+ 'cd' => '6',
+ 'position' => '1',
+ 'trackid' => '19',
+ 'title' => 'Foo Me Baby One More Time',
+ 'cd_single' => undef,
+ 'last_updated_on' => undef,
+ 'last_updated_at' => undef
+ },
+ {
+ 'cd' => '6',
+ 'position' => '2',
+ 'trackid' => '20',
+ 'title' => 'Foo Me Baby One More Time II',
+ 'cd_single' => undef,
+ 'last_updated_on' => undef,
+ 'last_updated_at' => undef
+ },
+ {
+ 'cd' => '6',
+ 'position' => '3',
+ 'trackid' => '21',
+ 'title' => 'Foo Me Baby One More Time III',
+ 'cd_single' => undef,
+ 'last_updated_on' => undef,
+ 'last_updated_at' => undef
+ },
+ {
+ 'cd' => '6',
+ 'position' => '4',
+ 'trackid' => '22',
+ 'title' => 'Foo Me Baby One More Time IV',
+ 'last_updated_on' => undef,
+ 'last_updated_at' => undef,
+ 'cd_single' => {
+ 'single_track' => '22',
+ 'artist' => '1',
+ 'cdid' => '7',
+ 'title' => 'MO! Single',
+ 'genreid' => undef,
+ 'year' => '2021',
+ 'tracks' => [
+ {
+ 'cd' => '7',
+ 'position' => '1',
+ 'title' => 'singled out',
+ 'trackid' => '23',
+ 'last_updated_at' => undef,
+ 'last_updated_on' => undef
+ },
+ {
+ 'cd' => '7',
+ 'position' => '2',
+ 'title' => 'still alone',
+ 'trackid' => '24',
+ 'last_updated_at' => undef,
+ 'last_updated_on' => undef
+ },
+ ],
+ },
+ }
+ ],
+ 'artist' => '4',
+ 'cdid' => '6',
+ 'cd_to_producer' => [
+ {
+ 'attribute' => undef,
+ 'cd' => '6',
+ 'producer' => {
+ 'name' => 'riba',
+ 'producerid' => '4'
+ }
+ },
+ {
+ 'attribute' => undef,
+ 'cd' => '6',
+ 'producer' => {
+ 'name' => 'sushi',
+ 'producerid' => '5'
+ }
+ }
+ ],
+ 'title' => 'Song of a Foo',
+ 'genreid' => undef,
+ 'year' => '1999'
+ },
+ {
+ 'single_track' => undef,
+ 'tracks' => [
+ {
+ 'cd' => '8',
+ 'position' => '1',
+ 'trackid' => '25',
+ 'title' => 'Quit Playing Games With My Heart',
+ 'last_updated_on' => undef,
+ 'last_updated_at' => undef,
+ 'cd_single' => undef,
+ },
+ {
+ 'cd' => '8',
+ 'position' => '2',
+ 'trackid' => '26',
+ 'title' => 'Bar Foo',
+ 'cd_single' => undef,
+ 'last_updated_on' => undef,
+ 'last_updated_at' => undef
+ },
+ {
+ 'cd' => '8',
+ 'position' => '3',
+ 'trackid' => '27',
+ 'title' => 'Foo Bar',
+ 'last_updated_on' => undef,
+ 'last_updated_at' => undef,
+ 'cd_single' => {
+ 'single_track' => '27',
+ 'artist' => '2',
+ 'cdid' => '9',
+ 'title' => 'MO! Single',
+ 'genreid' => undef,
+ 'year' => '2020',
+ 'tracks' => [
+ {
+ 'cd' => '9',
+ 'position' => '1',
+ 'title' => 'singled out',
+ 'trackid' => '28',
+ 'last_updated_at' => undef,
+ 'last_updated_on' => undef
+ },
+ {
+ 'cd' => '9',
+ 'position' => '2',
+ 'title' => 'still alone',
+ 'trackid' => '29',
+ 'last_updated_at' => undef,
+ 'last_updated_on' => undef
+ },
+ ],
+
+ },
+ },
+ ],
+ 'artist' => '4',
+ 'cdid' => '8',
+ 'cd_to_producer' => [
+ {
+ 'attribute' => undef,
+ 'cd' => '8',
+ 'producer' => {
+ 'name' => 'riba',
+ 'producerid' => '4'
+ }
+ },
+ {
+ 'attribute' => undef,
+ 'cd' => '8',
+ 'producer' => {
+ 'name' => 'sushi',
+ 'producerid' => '5'
+ }
+ }
+ ],
+ 'title' => 'Song of a Foo II',
+ 'genreid' => undef,
+ 'year' => '2002'
+ }
+ ],
+ 'artistid' => '4',
+ 'charfield' => undef,
+ 'name' => 'mo',
+ 'artwork_to_artist' => [
+ {
+ 'artwork' => { 'cd_id' => '1' },
+ 'artist_id' => '4',
+ 'artwork_cd_id' => '1'
+ },
+ {
+ 'artwork' => { 'cd_id' => '2' },
+ 'artist_id' => '4',
+ 'artwork_cd_id' => '2'
+ }
+ ],
+ 'rank' => '1337'
+ }
+);
+
+done_testing;
{ "$ar.rank" => 13 },
{
prefetch => [ 'tracks' ],
- order_by => [ { -asc => "$ar.name" }, "$ar.artistid DESC" ],
- offset => 3,
+ order_by => [ 'tracks.position DESC', { -asc => "$ar.name" }, "$ar.artistid DESC" ],
+ offset => 13,
rows => 3,
},
);
FROM artist me
JOIN cd cds_unordered
ON cds_unordered.artist = me.artistid
+ LEFT JOIN track tracks
+ ON tracks.cd = cds_unordered.cdid
WHERE ( me.rank = ? )
- ORDER BY me.name ASC, me.artistid DESC
+ ORDER BY tracks.position DESC, me.name ASC, me.artistid DESC
LIMIT ?
OFFSET ?
) cds_unordered
LEFT JOIN track tracks
ON tracks.cd = cds_unordered.cdid
WHERE ( me.rank = ? )
- ORDER BY me.name ASC, me.artistid DESC, tracks.cd
+ ORDER BY tracks.position DESC, me.name ASC, me.artistid DESC
)},
[
[ { sqlt_datatype => 'integer', dbic_colname => 'me.rank' } => 13 ],
[ $ROWS => 3 ],
- [ $OFFSET => 3 ],
+ [ $OFFSET => 13 ],
[ { sqlt_datatype => 'integer', dbic_colname => 'me.rank' } => 13 ],
],
'correct SQL on limited prefetch over search_related ordered by root',
'cd' => '4',
'last_updated_at' => undef,
'last_updated_on' => undef,
- 'position' => '1',
- 'title' => 'Boring Name',
- 'trackid' => '10'
+ 'position' => '3',
+ 'title' => 'No More Ideas',
+ 'trackid' => '12'
},
{
'cd' => '4',
'cd' => '4',
'last_updated_at' => undef,
'last_updated_on' => undef,
- 'position' => '3',
- 'title' => 'No More Ideas',
- 'trackid' => '12'
+ 'position' => '1',
+ 'title' => 'Boring Name',
+ 'trackid' => '10'
}
],
'year' => '2001'
'cd' => '5',
'last_updated_at' => undef,
'last_updated_on' => undef,
- 'position' => '1',
- 'title' => 'Sad',
- 'trackid' => '13'
- },
- {
- 'cd' => '5',
- 'last_updated_at' => undef,
- 'last_updated_on' => undef,
'position' => '3',
'title' => 'Suicidal',
'trackid' => '15'
'position' => '2',
'title' => 'Under The Weather',
'trackid' => '14'
+ },
+ {
+ 'cd' => '5',
+ 'last_updated_at' => undef,
+ 'last_updated_on' => undef,
+ 'position' => '1',
+ 'title' => 'Sad',
+ 'trackid' => '13'
}
],
'year' => '1998'
$schema->storage->debugcb(sub { $queries++ });
$schema->storage->debug(1);
-
my $pref = $schema->resultset ('Artist')
->search ({ 'me.artistid' => $artist->id }, { prefetch => { cds => 'genre' } })
->next;
is ($pref->cds->count, 3, 'Correct number of CDs prefetched');
is ($pref->cds->search_related ('genre')->count, 1, 'Only one of the prefetched cds has a prefetched genre');
-
is ($queries, 1, 'All happened within one query only');
$schema->storage->debugcb($orig_cb);
$schema->storage->debug(0);
-
done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $cds_rs = $schema->resultset('CD')->search(
+ [
+ {
+ 'me.title' => "Caterwaulin' Blues",
+ 'cds.title' => { '!=' => 'Forkful of bees' }
+ },
+ {
+ 'me.title' => { '!=', => "Caterwaulin' Blues" },
+ 'cds.title' => 'Forkful of bees'
+ },
+ ],
+ {
+ order_by => 'me.cdid',
+ prefetch => { artist => 'cds' },
+ result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+ },
+);
+
+is_deeply [ $cds_rs->all ], [
+ {
+ 'single_track' => undef,
+ 'cdid' => '1',
+ 'artist' => {
+ 'cds' => [
+ {
+ 'single_track' => undef,
+ 'artist' => '1',
+ 'cdid' => '2',
+ 'title' => 'Forkful of bees',
+ 'genreid' => undef,
+ 'year' => '2001'
+ },
+ ],
+ 'artistid' => '1',
+ 'charfield' => undef,
+ 'name' => 'Caterwauler McCrae',
+ 'rank' => '13'
+ },
+ 'title' => 'Spoonful of bees',
+ 'year' => '1999',
+ 'genreid' => '1'
+ },
+ {
+ 'single_track' => undef,
+ 'cdid' => '2',
+ 'artist' => {
+ 'cds' => [
+ {
+ 'single_track' => undef,
+ 'artist' => '1',
+ 'cdid' => '2',
+ 'title' => 'Forkful of bees',
+ 'genreid' => undef,
+ 'year' => '2001'
+ },
+ ],
+ 'artistid' => '1',
+ 'charfield' => undef,
+ 'name' => 'Caterwauler McCrae',
+ 'rank' => '13'
+ },
+ 'title' => 'Forkful of bees',
+ 'year' => '2001',
+ 'genreid' => undef
+ },
+ {
+ 'single_track' => undef,
+ 'cdid' => '3',
+ 'artist' => {
+ 'cds' => [
+ {
+ 'single_track' => undef,
+ 'artist' => '1',
+ 'cdid' => '3',
+ 'title' => 'Caterwaulin\' Blues',
+ 'genreid' => undef,
+ 'year' => '1997'
+ },
+ {
+ 'single_track' => undef,
+ 'artist' => '1',
+ 'cdid' => '1',
+ 'title' => 'Spoonful of bees',
+ 'genreid' => '1',
+ 'year' => '1999'
+ }
+ ],
+ 'artistid' => '1',
+ 'charfield' => undef,
+ 'name' => 'Caterwauler McCrae',
+ 'rank' => '13'
+ },
+ 'title' => 'Caterwaulin\' Blues',
+ 'year' => '1997',
+ 'genreid' => undef
+ }
+], 'multi-level prefetch with restrictions ok';
+
+done_testing;
use warnings;
use Test::More;
+use Test::Exception;
use lib qw(t/lib);
use DBICTest;
my $schema = DBICTest->init_schema();
my $orig_debug = $schema->storage->debug;
-plan tests => 44;
-
my $queries = 0;
$schema->storage->debugcb(sub { $queries++; });
$schema->storage->debug(1);
@artists = $rs->search(undef, { prefetch => 'cds', order_by => 'artistid' });
is(scalar @artists, 5, 'has_many prefetch with adjacent empty rows ok');
+lives_ok { @artists = $rs->search(undef, {
+ join => ['cds'],
+ prefetch => [],
+ rows => 20,
+ });
+} 'join and empty prefetch ok';
+
# -------------
#
# Tests for multilevel has_many prefetch
my $rs = shift;
my $struc = {};
+ # all of these ought to work, but do not for some reason
+ # a noop cloning search() pollution?
+ #foreach my $art ( $rs->search({}, { order_by => 'me.artistid' })->all ) {
+ #foreach my $art ( $rs->search({}, {})->all ) {
+ #foreach my $art ( $rs->search()->all ) {
foreach my $art ( $rs->all ) {
foreach my $cd ( $art->cds ) {
foreach my $track ( $cd->tracks ) {
$schema->storage->debug($orig_debug);
$schema->storage->debugobj->callback(undef);
+
+done_testing;
+++ /dev/null
-use strict;
-use warnings;
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use PrefetchBug;
-
-my $schema = PrefetchBug->connect( DBICTest->_database (quote_char => '"') );
-ok( $schema, 'Connected to PrefetchBug schema OK' );
-
-$schema->storage->dbh->do(<<"EOF");
-CREATE TABLE prefetchbug_left (
- id INTEGER PRIMARY KEY
-)
-EOF
-
-$schema->storage->dbh->do(<<"EOF");
-CREATE TABLE prefetchbug_right (
- id INTEGER PRIMARY KEY,
- name TEXT,
- category TEXT,
- description TEXT,
- propagates INT,
- locked INT
-)
-EOF
-
-$schema->storage->dbh->do(<<"EOF");
-CREATE TABLE prefetchbug_left_right (
- left_id INTEGER REFERENCES prefetchbug_left(id),
- right_id INTEGER REFERENCES prefetchbug_right(id),
- value TEXT,
- PRIMARY KEY (left_id, right_id)
-)
-EOF
-
-# Test simple has_many prefetch:
-
-my $leftc = $schema->resultset('Left')->create({});
-
-my $rightc = $schema->resultset('Right')->create({ id => 60, name => 'Johnny', category => 'something', description=> 'blah', propagates => 0, locked => 1 });
-$rightc->create_related('prefetch_leftright', { left => $leftc, value => 'lr' });
-
-# start with fresh whatsit
-my $left = $schema->resultset('Left')->find({ id => $leftc->id });
-
-my @left_rights = $left->search_related('prefetch_leftright', {}, { prefetch => 'right' });
-ok(defined $left_rights[0]->right, 'Prefetched Right side correctly');
-
-done_testing;
WHERE artwork.cd_id IS NULL
OR tracks.title != ?
GROUP BY me.artistid + ?, me.artistid, me.name, cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track
- ORDER BY name DESC, cds.artist, cds.year ASC
+ ORDER BY name DESC
)',
[
$bind_int_resolved->(), # outer select
JOIN artist artist
ON artist.artistid = me.artist
WHERE ( ( artist.name = ? AND me.year = ? ) )
- ORDER BY tracks.cd
)',
[
[ { sqlt_datatype => 'varchar', sqlt_size => 100, dbic_colname => 'artist.name' } => 'foo' ],
'16 correct cds found'
);
-TODO: {
-local $TODO = 'Prefetch on custom rels can not work until the collapse rewrite is finished '
- . '(currently collapser requires a right-side (which is indeterministic) order-by)';
lives_ok {
my @all_artists_with_80_cds_pref = $schema->resultset("Artist")->search
);
} 'prefetchy-fetchy-fetch';
-} # end of TODO
# try to create_related a 80s cd
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Deep;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema(no_populate => 1);
+
+$schema->resultset('Artist')->create({ name => 'JMJ', cds => [{
+ title => 'Magnetic Fields',
+ year => 1981,
+ genre => { name => 'electro' },
+ tracks => [
+ { title => 'm1' },
+ { title => 'm2' },
+ { title => 'm3' },
+ { title => 'm4' },
+ ],
+} ] });
+
+$schema->resultset('CD')->create({
+ title => 'Equinoxe',
+ year => 1978,
+ artist => { name => 'JMJ' },
+ genre => { name => 'electro' },
+ tracks => [
+ { title => 'e1' },
+ { title => 'e2' },
+ { title => 'e3' },
+ ],
+ single_track => {
+ title => 'o1',
+ cd => {
+ title => 'Oxygene',
+ year => 1976,
+ artist => { name => 'JMJ' },
+ tracks => [
+ { title => 'o2', position => 2}, # the position should not be needed here, bug in MC
+ ],
+ },
+ },
+});
+
+$schema->resultset('CD')->create({ artist => 1, year => 1977, title => "fuzzy_1" });
+
+{
+ package DBICTest::_IRCapture;
+ sub inflate_result { [@_[2,3]] };
+}
+
+{
+ package DBICTest::_IRCaptureAround;
+ use base 'DBIx::Class::Row';
+ sub inflate_result { [@_[2,3]] };
+}
+
+cmp_structures(
+ ([$schema->resultset ('CD')->search ({}, {
+ result_class => 'DBICTest::_IRCapture',
+ prefetch => { single_track => { cd => 'artist' } },
+ order_by => 'me.cdid',
+ })->all]),
+ [
+ [
+ { cdid => 1, single_track => undef, artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
+ { single_track => bless( [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ { cd => bless ( [
+ { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
+ {
+ artist => bless ( [
+ { artistid => undef, name => undef, charfield => undef, rank => undef }
+ ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class )
+ }
+ ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ) }
+ ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ) }
+ ],
+ [
+ { cdid => 2, single_track => undef, artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
+ { single_track => bless( [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ { cd => bless ( [
+ { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
+ {
+ artist => bless ( [
+ { artistid => undef, name => undef, charfield => undef, rank => undef }
+ ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class )
+ }
+ ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ) }
+ ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ) }
+ ],
+ [
+ { cdid => 3, single_track => 6, artist => 1, genreid => 1, year => 1978, title => "Equinoxe" },
+ { single_track => [
+ { trackid => 6, title => 'o1', position => 1, cd => 2, last_updated_at => undef, last_updated_on => undef },
+ { cd => [
+ { cdid => 2, single_track => undef, artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
+ {
+ artist => [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 }
+ ]
+ }
+ ] }
+ ] }
+ ],
+ [
+ { cdid => 4, single_track => undef, artist => 1, genreid => undef, year => 1977, title => "fuzzy_1" },
+ { single_track => bless( [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ { cd => bless ( [
+ { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
+ {
+ artist => bless ( [
+ { artistid => undef, name => undef, charfield => undef, rank => undef }
+ ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class )
+ }
+ ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ) }
+ ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ) }
+ ],
+ ],
+ 'Simple 1:1 descend with classic prefetch'
+);
+
+cmp_structures(
+ [$schema->resultset ('CD')->search ({}, {
+ result_class => 'DBICTest::_IRCapture',
+ join => { single_track => { cd => 'artist' } },
+ columns => [
+ { 'year' => 'me.year' },
+ { 'genreid' => 'me.genreid' },
+ { 'single_track.cd.artist.artistid' => 'artist.artistid' },
+ { 'title' => 'me.title' },
+ { 'artist' => 'me.artist' },
+ ],
+ order_by => 'me.cdid',
+ })->all],
+ [
+ [
+ { artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
+ { single_track => bless( [
+ undef,
+ { cd => [
+ undef,
+ {
+ artist => [
+ { artistid => undef }
+ ]
+ }
+ ] }
+ ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ) }
+ ],
+ [
+ { artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
+ { single_track => bless( [
+ undef,
+ { cd => [
+ undef,
+ {
+ artist => [
+ { artistid => undef }
+ ]
+ }
+ ] }
+ ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ) }
+ ],
+ [
+ { artist => 1, genreid => 1, year => 1978, title => "Equinoxe" },
+ { single_track => [
+ undef,
+ { cd => [
+ undef,
+ {
+ artist => [
+ { artistid => 1 }
+ ]
+ }
+ ] }
+ ] }
+ ],
+ [
+ { artist => 1, genreid => undef, year => 1977, title => "fuzzy_1" },
+ { single_track => bless( [
+ undef,
+ { cd => [
+ undef,
+ {
+ artist => [
+ { artistid => undef }
+ ]
+ }
+ ] }
+ ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ) }
+ ],
+ ],
+ 'Simple 1:1 descend with missing selectors'
+);
+
+cmp_structures(
+ ([$schema->resultset ('CD')->search ({}, {
+ result_class => 'DBICTest::_IRCapture',
+ prefetch => [ { single_track => { cd => { artist => { cds => 'tracks' } } } } ],
+ order_by => [qw/me.cdid tracks.trackid/],
+ })->all]),
+ [
+ [
+ { cdid => 1, single_track => undef, artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
+ { single_track => bless( [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ { cd => [
+ { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
+ {
+ artist => [
+ { artistid => undef, name => undef, charfield => undef, rank => undef },
+ { cds => bless( [ [
+ { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
+ { tracks => bless( [ [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ ] ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ) },
+ ] ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ) },
+ ],
+ },
+ ] },
+ ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ) },
+ ],
+ [
+ { cdid => 2, single_track => undef, artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
+ { single_track => bless( [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ { cd => [
+ { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
+ {
+ artist => [
+ { artistid => undef, name => undef, charfield => undef, rank => undef },
+ { cds => bless( [ [
+ { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
+ { tracks => bless( [ [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ ] ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ) },
+ ] ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ) },
+ ],
+ },
+ ] },
+ ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ) },
+ ],
+ [
+ { cdid => 3, single_track => 6, artist => 1, genreid => 1, year => 1978, title => "Equinoxe" },
+ { single_track => [
+ { trackid => 6, title => 'o1', position => 1, cd => 2, last_updated_at => undef, last_updated_on => undef },
+ { cd => [
+ { cdid => 2, single_track => undef, artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
+ {
+ artist => [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ [
+ { cdid => 4, single_track => undef, artist => 1, genreid => undef, year => 1977, title => "fuzzy_1" },
+ { tracks => bless( [
+ [ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef } ],
+ ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ) },
+ ],
+ [
+ { cdid => 1, single_track => undef, artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
+ { tracks => [
+ [ { trackid => 1, title => 'm1', position => 1, cd => 1, last_updated_at => undef, last_updated_on => undef } ],
+ [ { trackid => 2, title => 'm2', position => 2, cd => 1, last_updated_at => undef, last_updated_on => undef } ],
+ [ { trackid => 3, title => 'm3', position => 3, cd => 1, last_updated_at => undef, last_updated_on => undef } ],
+ [ { trackid => 4, title => 'm4', position => 4, cd => 1, last_updated_at => undef, last_updated_on => undef } ],
+ ]},
+ ],
+ [
+ { cdid => 2, single_track => undef, artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
+ { tracks => [
+ [ { trackid => 5, title => 'o2', position => 2, cd => 2, last_updated_at => undef, last_updated_on => undef } ],
+ [ { trackid => 6, title => 'o1', position => 1, cd => 2, last_updated_at => undef, last_updated_on => undef } ],
+ ]},
+ ],
+ [
+ { cdid => 3, single_track => 6, artist => 1, genreid => 1, year => 1978, title => "Equinoxe" },
+ { tracks => [
+ [ { trackid => 7, title => 'e1', position => 1, cd => 3, last_updated_at => undef, last_updated_on => undef } ],
+ [ { trackid => 8, title => 'e2', position => 2, cd => 3, last_updated_at => undef, last_updated_on => undef } ],
+ [ { trackid => 9, title => 'e3', position => 3, cd => 3, last_updated_at => undef, last_updated_on => undef } ],
+ ]},
+ ],
+ ]},
+ ]
+ }
+ ] }
+ ] }
+ ],
+ [
+ { cdid => 4, single_track => undef, artist => 1, genreid => undef, year => 1977, title => "fuzzy_1" },
+ { single_track => bless( [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ { cd => [
+ { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
+ {
+ artist => [
+ { artistid => undef, name => undef, charfield => undef, rank => undef },
+ { cds => bless( [ [
+ { cdid => undef, single_track => undef, artist => undef, genreid => undef, year => undef, title => undef },
+ { tracks => bless( [ [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ ] ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ) },
+ ] ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ) },
+ ],
+ },
+ ] },
+ ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ) },
+ ],
+ ],
+ 'Collapsing 1:1 ending in chained has_many with classic prefetch'
+);
+
+cmp_structures (
+ ([$schema->resultset ('Artist')->search ({}, {
+ result_class => 'DBICTest::_IRCapture',
+ join => { cds => 'tracks' },
+ '+columns' => [
+ (map { "cds.$_" } $schema->source('CD')->columns),
+ (map { +{ "cds.tracks.$_" => "tracks.$_" } } $schema->source('Track')->columns),
+ ],
+ order_by => [qw/cds.cdid tracks.trackid/],
+ })->all]),
+ [
+ [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ { cdid => 1, single_track => undef, artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
+ { tracks => [
+ { trackid => 1, title => 'm1', position => 1, cd => 1, last_updated_at => undef, last_updated_on => undef },
+ ]},
+ ]},
+ ],
+ [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ { cdid => 1, single_track => undef, artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
+ { tracks => [
+ { trackid => 2, title => 'm2', position => 2, cd => 1, last_updated_at => undef, last_updated_on => undef },
+ ]},
+ ]},
+ ],
+ [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ { cdid => 1, single_track => undef, artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
+ { tracks => [
+ { trackid => 3, title => 'm3', position => 3, cd => 1, last_updated_at => undef, last_updated_on => undef },
+ ]},
+ ]},
+ ],
+ [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ { cdid => 1, single_track => undef, artist => 1, genreid => 1, year => 1981, title => "Magnetic Fields" },
+ { tracks => [
+ { trackid => 4, title => 'm4', position => 4, cd => 1, last_updated_at => undef, last_updated_on => undef },
+ ]},
+ ]},
+ ],
+ [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ { cdid => 2, single_track => undef, artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
+ { tracks => [
+ { trackid => 5, title => 'o2', position => 2, cd => 2, last_updated_at => undef, last_updated_on => undef },
+ ]},
+ ]},
+ ],
+ [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ { cdid => 2, single_track => undef, artist => 1, genreid => undef, year => 1976, title => "Oxygene" },
+ { tracks => [
+ { trackid => 6, title => 'o1', position => 1, cd => 2, last_updated_at => undef, last_updated_on => undef },
+ ]},
+ ]},
+ ],
+ [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ { cdid => 3, single_track => 6, artist => 1, genreid => 1, year => 1978, title => "Equinoxe" },
+ { tracks => [
+ { trackid => 7, title => 'e1', position => 1, cd => 3, last_updated_at => undef, last_updated_on => undef },
+ ]},
+ ]},
+ ],
+ [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ { cdid => 3, single_track => 6, artist => 1, genreid => 1, year => 1978, title => "Equinoxe" },
+ { tracks => [
+ { trackid => 8, title => 'e2', position => 2, cd => 3, last_updated_at => undef, last_updated_on => undef },
+ ]},
+ ]},
+ ],
+ [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ { cdid => 3, single_track => 6, artist => 1, genreid => 1, year => 1978, title => "Equinoxe" },
+ { tracks => [
+ { trackid => 9, title => 'e3', position => 3, cd => 3, last_updated_at => undef, last_updated_on => undef },
+ ]},
+ ]},
+ ],
+ [
+ { artistid => 1, name => 'JMJ', charfield => undef, rank => 13 },
+ { cds => [
+ { cdid => 4, single_track => undef, artist => 1, genreid => undef, year => 1977, title => "fuzzy_1" },
+ { tracks => bless( [
+ { trackid => undef, title => undef, position => undef, cd => undef, last_updated_at => undef, last_updated_on => undef },
+ ], $DBIx::Class::ResultSource::RowParser::Util::null_branch_class ) },
+ ]},
+ ],
+ ],
+ 'Non-Collapsing chained has_many'
+);
+
+sub cmp_structures {
+ my ($left, $right, $msg) = @_;
+
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ cmp_deeply($left, $right, $msg||());
+}
+
+done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use B::Deparse;
+
+# globally set for the rest of test
+# the rowparser maker does not order its hashes by default for the miniscule
+# speed gain. But it does not disable sorting either - for this test
+# everything will be ordered nicely, and the hash randomization of 5.18
+# will not trip up anything
+use Data::Dumper;
+$Data::Dumper::Sortkeys = 1;
+
+my $schema = DBICTest->init_schema(no_deploy => 1);
+my $infmap = [qw/
+ single_track.cd.artist.name
+ year
+/];
+
+is_same_src (
+ $schema->source ('CD')->_mk_row_parser({
+ inflate_map => $infmap,
+ }),
+ '$_ = [
+ { year => $_->[1] },
+ { single_track => ( ! defined( $_->[0]) )
+ ? bless( [
+ undef,
+ { cd => [
+ undef,
+ { artist => [
+ { name => $_->[0] },
+ ] },
+ ] },
+ ], __NBC__ )
+ : [
+ undef,
+ { cd => [
+ undef,
+ { artist => [
+ { name => $_->[0] },
+ ] },
+ ] },
+ ]
+ },
+ ] for @{$_[0]}',
+ 'Simple 1:1 descending non-collapsing parser',
+);
+
+$infmap = [qw/
+ single_track.cd.artist.cds.tracks.title
+ single_track.cd.artist.artistid
+ year
+ single_track.cd.artist.cds.cdid
+ title
+ artist
+/];
+
+is_same_src (
+ $schema->source ('CD')->_mk_row_parser({
+ inflate_map => $infmap,
+ }),
+ '$_ = [
+ { artist => $_->[5], title => $_->[4], year => $_->[2] },
+ {
+ single_track => ( (! defined $_->[0] ) && (! defined $_->[1]) && (! defined $_->[3] ) )
+ ? bless( [
+ undef,
+ {
+ cd => [
+ undef,
+ {
+ artist => [
+ { artistid => $_->[1] },
+ {
+ cds => ( (! defined $_->[0] ) && ( ! defined $_->[3] ) )
+ ? bless ([
+ { cdid => $_->[3] },
+ {
+ tracks => ( ! defined $_->[0] )
+ ? bless ( [{ title => $_->[0] }], __NBC__ )
+ : [{ title => $_->[0] }]
+ }
+ ], __NBC__)
+ : [
+ { cdid => $_->[3] },
+ {
+ tracks => ( ! defined $_->[0] )
+ ? bless ( [{ title => $_->[0] }], __NBC__ )
+ : [{ title => $_->[0] }]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ], __NBC__)
+ : [
+ undef,
+ {
+ cd => [
+ undef,
+ {
+ artist => [
+ { artistid => $_->[1] },
+ {
+ cds => ( (! defined $_->[0] ) && ( ! defined $_->[3] ) )
+ ? bless ([
+ { cdid => $_->[3] },
+ {
+ tracks => ( ! defined $_->[0] )
+ ? bless ( [{ title => $_->[0] }], __NBC__ )
+ : [{ title => $_->[0] }]
+ }
+ ], __NBC__)
+ : [
+ { cdid => $_->[3] },
+ {
+ tracks => ( ! defined $_->[0] )
+ ? bless ( [{ title => $_->[0] }], __NBC__ )
+ : [{ title => $_->[0] }]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ] for @{$_[0]}',
+ '1:1 descending non-collapsing parser terminating with chained 1:M:M',
+);
+
+is_same_src (
+ $schema->source ('CD')->_mk_row_parser({
+ hri_style => 1,
+ inflate_map => $infmap,
+ }),
+ '$_ = {
+ artist => $_->[5], title => $_->[4], year => $_->[2],
+
+ ( single_track => ( (! defined $_->[0] ) && (! defined $_->[1]) && (! defined $_->[3] ) )
+ ? undef
+ : {
+ cd =>
+ {
+ artist => {
+ artistid => $_->[1],
+ ( cds => ( (! defined $_->[0] ) && ( ! defined $_->[3] ) )
+ ? undef
+ : {
+ cdid => $_->[3],
+ ( tracks => ( ! defined $_->[0] )
+ ? undef
+ : { title => $_->[0] }
+ )
+ }
+ )
+ }
+ }
+ }
+ )
+ } for @{$_[0]}',
+ '1:1 descending non-collapsing HRI-direct parser terminating with chained 1:M:M',
+);
+
+
+
+is_deeply (
+ ($schema->source('CD')->_resolve_collapse({ as => {map { $infmap->[$_] => $_ } 0 .. $#$infmap} })),
+ {
+ -identifying_columns => [ 4, 5 ],
+
+ single_track => {
+ -identifying_columns => [ 1, 4, 5 ],
+ -is_optional => 1,
+ -is_single => 1,
+
+ cd => {
+ -identifying_columns => [ 1, 4, 5 ],
+ -is_single => 1,
+
+ artist => {
+ -identifying_columns => [ 1, 4, 5 ],
+ -is_single => 1,
+
+ cds => {
+ -identifying_columns => [ 1, 3, 4, 5 ],
+ -is_optional => 1,
+
+ tracks => {
+ -identifying_columns => [ 0, 1, 3, 4, 5 ],
+ -is_optional => 1,
+ },
+ },
+ },
+ },
+ },
+ },
+ 'Correct collapse map for 1:1 descending chain terminating with chained 1:M:M'
+);
+
+is_same_src (
+ $schema->source ('CD')->_mk_row_parser({
+ inflate_map => $infmap,
+ collapse => 1,
+ }),
+ ' my($rows_pos, $result_pos, $cur_row_data, %cur_row_ids, @collapse_idx, $is_new_res) = (0, 0);
+
+ while ($cur_row_data = (
+ ( $rows_pos >= 0 and $_[0][$rows_pos++] ) or do { $rows_pos = -1; undef } )
+ ||
+ ( $_[1] and $_[1]->() )
+ ) {
+
+ $cur_row_ids{$_} = defined $cur_row_data->[$_] ? $cur_row_data->[$_] : "\0NULL\xFF$rows_pos\xFF$_\0"
+ for (0, 1, 3, 4, 5);
+
+ # a present cref in $_[1] implies lazy prefetch, implies a supplied stash in $_[2]
+ $_[1] and $result_pos and unshift(@{$_[2]}, $cur_row_data) and last
+ if ( $is_new_res = ! $collapse_idx[0]{$cur_row_ids{4}}{$cur_row_ids{5}} );
+
+ # the rowdata itself for root node
+ $collapse_idx[0]{$cur_row_ids{4}}{$cur_row_ids{5}} ||= [{ artist => $cur_row_data->[5], title => $cur_row_data->[4], year => $cur_row_data->[2] }];
+
+ # prefetch data of single_track (placed in root)
+ $collapse_idx[0]{$cur_row_ids{4}}{$cur_row_ids{5}}[1]{single_track} ||= $collapse_idx[1]{$cur_row_ids{1}}{$cur_row_ids{4}}{$cur_row_ids{5}} ||= [];
+ defined($cur_row_data->[1]) or bless( $collapse_idx[0]{$cur_row_ids{4}}{$cur_row_ids{5}}[1]{single_track}, __NBC__ );
+
+ # prefetch data of cd (placed in single_track)
+ $collapse_idx[1]{$cur_row_ids{1}}{$cur_row_ids{4}}{$cur_row_ids{5}}[1]{cd} ||= $collapse_idx[2]{$cur_row_ids{1}}{$cur_row_ids{4}}{$cur_row_ids{5}} ||= [];
+
+ # prefetch data of artist ( placed in single_track->cd)
+ $collapse_idx[2]{$cur_row_ids{1}}{$cur_row_ids{4}}{$cur_row_ids{5}}[1]{artist} ||= $collapse_idx[3]{$cur_row_ids{1}}{$cur_row_ids{4}}{$cur_row_ids{5}} ||= [{ artistid => $cur_row_data->[1] }];
+
+ # prefetch data of cds (if available)
+ (! $collapse_idx[4]{$cur_row_ids{1}}{$cur_row_ids{3}}{$cur_row_ids{4}}{$cur_row_ids{5}} )
+ and
+ push @{$collapse_idx[3]{$cur_row_ids{1}}{$cur_row_ids{4}}{$cur_row_ids{5}}[1]{cds}}, (
+ $collapse_idx[4]{$cur_row_ids{1}}{$cur_row_ids{3}}{$cur_row_ids{4}}{$cur_row_ids{5}} = [{ cdid => $cur_row_data->[3] }]
+ );
+ defined($cur_row_data->[3]) or bless( $collapse_idx[3]{$cur_row_ids{1}}{$cur_row_ids{4}}{$cur_row_ids{5}}[1]{cds}, __NBC__ );
+
+ # prefetch data of tracks (if available)
+ (! $collapse_idx[5]{$cur_row_ids{0}}{$cur_row_ids{1}}{$cur_row_ids{3}}{$cur_row_ids{4}}{$cur_row_ids{5}} )
+ and
+ push @{$collapse_idx[4]{$cur_row_ids{1}}{$cur_row_ids{3}}{$cur_row_ids{4}}{$cur_row_ids{5}}[1]{tracks}}, (
+ $collapse_idx[5]{$cur_row_ids{0}}{$cur_row_ids{1}}{$cur_row_ids{3}}{$cur_row_ids{4}}{$cur_row_ids{5}} = [{ title => $cur_row_data->[0] }]
+ );
+ defined($cur_row_data->[0]) or bless( $collapse_idx[4]{$cur_row_ids{1}}{$cur_row_ids{3}}{$cur_row_ids{4}}{$cur_row_ids{5}}[1]{tracks}, __NBC__ );
+
+ $_[0][$result_pos++] = $collapse_idx[0]{$cur_row_ids{4}}{$cur_row_ids{5}}
+ if $is_new_res;
+ }
+ splice @{$_[0]}, $result_pos;
+ ',
+ 'Same 1:1 descending terminating with chained 1:M:M but with collapse',
+);
+
+is_same_src (
+ $schema->source ('CD')->_mk_row_parser({
+ inflate_map => $infmap,
+ collapse => 1,
+ hri_style => 1,
+ }),
+ ' my($rows_pos, $result_pos, $cur_row_data, @collapse_idx, $is_new_res) = (0, 0);
+
+ while ($cur_row_data = (
+ ( $rows_pos >= 0 and $_[0][$rows_pos++] ) or do { $rows_pos = -1; undef } )
+ ||
+ ( $_[1] and $_[1]->() )
+ ) {
+
+ # a present cref in $_[1] implies lazy prefetch, implies a supplied stash in $_[2]
+ $_[1] and $result_pos and unshift(@{$_[2]}, $cur_row_data) and last
+ if ( $is_new_res = ! $collapse_idx[0]{$cur_row_data->[4]}{$cur_row_data->[5]} );
+
+ # the rowdata itself for root node
+ $collapse_idx[0]{$cur_row_data->[4]}{$cur_row_data->[5]} ||= { artist => $cur_row_data->[5], title => $cur_row_data->[4], year => $cur_row_data->[2] };
+
+ # prefetch data of single_track (placed in root)
+ (! defined($cur_row_data->[1]) ) ? $collapse_idx[0]{$cur_row_data->[4]}{$cur_row_data->[5]}{single_track} = undef : do {
+ $collapse_idx[0]{$cur_row_data->[4]}{$cur_row_data->[5]}{single_track} ||= $collapse_idx[1]{$cur_row_data->[1]}{$cur_row_data->[4]}{$cur_row_data->[5]};
+
+ # prefetch data of cd (placed in single_track)
+ $collapse_idx[1]{$cur_row_data->[1]}{$cur_row_data->[4]}{$cur_row_data->[5]}{cd} ||= $collapse_idx[2]{$cur_row_data->[1]}{$cur_row_data->[4]}{$cur_row_data->[5]};
+
+ # prefetch data of artist ( placed in single_track->cd)
+ $collapse_idx[2]{$cur_row_data->[1]}{$cur_row_data->[4]}{$cur_row_data->[5]}{artist} ||= $collapse_idx[3]{$cur_row_data->[1]}{$cur_row_data->[4]}{$cur_row_data->[5]} ||= { artistid => $cur_row_data->[1] };
+
+ # prefetch data of cds (if available)
+ (! defined $cur_row_data->[3] ) ? $collapse_idx[3]{$cur_row_data->[1]}{$cur_row_data->[4]}{$cur_row_data->[5]}{cds} = [] : do {
+
+ (! $collapse_idx[4]{$cur_row_data->[1]}{$cur_row_data->[3]}{$cur_row_data->[4]}{$cur_row_data->[5]} )
+ and
+ push @{$collapse_idx[3]{$cur_row_data->[1]}{$cur_row_data->[4]}{$cur_row_data->[5]}{cds}}, (
+ $collapse_idx[4]{$cur_row_data->[1]}{$cur_row_data->[3]}{$cur_row_data->[4]}{$cur_row_data->[5]} = { cdid => $cur_row_data->[3] }
+ );
+
+ # prefetch data of tracks (if available)
+ ( ! defined $cur_row_data->[0] ) ? $collapse_idx[4]{$cur_row_data->[1]}{$cur_row_data->[3]}{$cur_row_data->[4]}{$cur_row_data->[5]}{tracks} = [] : do {
+
+ (! $collapse_idx[5]{$cur_row_data->[0]}{$cur_row_data->[1]}{$cur_row_data->[3]}{$cur_row_data->[4]}{$cur_row_data->[5]} )
+ and
+ push @{$collapse_idx[4]{$cur_row_data->[1]}{$cur_row_data->[3]}{$cur_row_data->[4]}{$cur_row_data->[5]}{tracks}}, (
+ $collapse_idx[5]{$cur_row_data->[0]}{$cur_row_data->[1]}{$cur_row_data->[3]}{$cur_row_data->[4]}{$cur_row_data->[5]} = { title => $cur_row_data->[0] }
+ );
+ };
+ };
+ };
+
+ $_[0][$result_pos++] = $collapse_idx[0]{$cur_row_data->[4]}{$cur_row_data->[5]}
+ if $is_new_res;
+ }
+ splice @{$_[0]}, $result_pos;
+ ',
+ 'Same 1:1 descending terminating with chained 1:M:M but with collapse, HRI-direct',
+);
+
+$infmap = [qw/
+ tracks.lyrics.existing_lyric_versions.text
+ existing_single_track.cd.artist.artistid
+ existing_single_track.cd.artist.cds.year
+ year
+ genreid
+ tracks.title
+ existing_single_track.cd.artist.cds.cdid
+ latest_cd
+ existing_single_track.cd.artist.cds.tracks.title
+ existing_single_track.cd.artist.cds.genreid
+ tracks.lyrics.existing_lyric_versions.lyric_id
+/];
+
+is_deeply (
+ $schema->source('CD')->_resolve_collapse({ as => {map { $infmap->[$_] => $_ } 0 .. $#$infmap} }),
+ {
+ -identifying_columns => [ 1 ], # existing_single_track.cd.artist.artistid
+
+ existing_single_track => {
+ -identifying_columns => [ 1 ], # existing_single_track.cd.artist.artistid
+ -is_single => 1,
+
+ cd => {
+ -identifying_columns => [ 1 ], # existing_single_track.cd.artist.artistid
+ -is_single => 1,
+
+ artist => {
+ -identifying_columns => [ 1 ], # existing_single_track.cd.artist.artistid
+ -is_single => 1,
+
+ cds => {
+ -identifying_columns => [ 1, 6 ], # existing_single_track.cd.artist.cds.cdid
+ -is_optional => 1,
+
+ tracks => {
+ -identifying_columns => [ 1, 6, 8 ], # existing_single_track.cd.artist.cds.cdid, existing_single_track.cd.artist.cds.tracks.title
+ -is_optional => 1,
+ }
+ }
+ }
+ }
+ },
+ tracks => {
+ -identifying_columns => [ 1, 5 ], # existing_single_track.cd.artist.artistid, tracks.title
+ -is_optional => 1,
+
+ lyrics => {
+ -identifying_columns => [ 1, 5, 10 ], # existing_single_track.cd.artist.artistid, tracks.title, tracks.lyrics.existing_lyric_versions.lyric_id
+ -is_single => 1,
+ -is_optional => 1,
+
+ existing_lyric_versions => {
+ -identifying_columns => [ 0, 1, 5, 10 ], # tracks.lyrics.existing_lyric_versions.text, existing_single_track.cd.artist.artistid, tracks.title, tracks.lyrics.existing_lyric_versions.lyric_id
+ },
+ },
+ }
+ },
+ 'Correct collapse map constructed',
+);
+
+is_same_src (
+ $schema->source ('CD')->_mk_row_parser({
+ inflate_map => $infmap,
+ collapse => 1,
+ }),
+ ' my ($rows_pos, $result_pos, $cur_row_data, %cur_row_ids, @collapse_idx, $is_new_res) = (0,0);
+
+ while ($cur_row_data = (
+ ( $rows_pos >= 0 and $_[0][$rows_pos++] ) or do { $rows_pos = -1; undef } )
+ ||
+ ( $_[1] and $_[1]->() )
+ ) {
+
+ $cur_row_ids{$_} = defined $cur_row_data->[$_] ? $cur_row_data->[$_] : "\0NULL\xFF$rows_pos\xFF$_\0"
+ for (0, 1, 5, 6, 8, 10);
+
+ # a present cref in $_[1] implies lazy prefetch, implies a supplied stash in $_[2]
+ $_[1] and $result_pos and unshift(@{$_[2]}, $cur_row_data) and last
+ if ( $is_new_res = ! $collapse_idx[0]{$cur_row_ids{1}} );
+
+ $collapse_idx[0]{$cur_row_ids{1}} ||= [{ genreid => $cur_row_data->[4], latest_cd => $cur_row_data->[7], year => $cur_row_data->[3] }];
+
+ $collapse_idx[0]{$cur_row_ids{1}}[1]{existing_single_track} ||= $collapse_idx[1]{$cur_row_ids{1}} ||= [];
+ $collapse_idx[1]{$cur_row_ids{1}}[1]{cd} ||= $collapse_idx[2]{$cur_row_ids{1}} ||= [];
+ $collapse_idx[2]{$cur_row_ids{1}}[1]{artist} ||= $collapse_idx[3]{$cur_row_ids{1}} ||= [{ artistid => $cur_row_data->[1] }];
+
+ (! $collapse_idx[4]{$cur_row_ids{1}}{$cur_row_ids{6}} )
+ and
+ push @{ $collapse_idx[3]{$cur_row_ids{1}}[1]{cds} }, (
+ $collapse_idx[4]{$cur_row_ids{1}}{$cur_row_ids{6}} = [{ cdid => $cur_row_data->[6], genreid => $cur_row_data->[9], year => $cur_row_data->[2] }]
+ );
+ defined($cur_row_data->[6]) or bless( $collapse_idx[3]{$cur_row_ids{1}}[1]{cds}, __NBC__ );
+
+ (! $collapse_idx[5]{$cur_row_ids{1}}{$cur_row_ids{6}}{$cur_row_ids{8}} )
+ and
+ push @{ $collapse_idx[4]{$cur_row_ids{1}}{$cur_row_ids{6}}[1]{tracks} }, (
+ $collapse_idx[5]{$cur_row_ids{1}}{$cur_row_ids{6}}{$cur_row_ids{8}} = [{ title => $cur_row_data->[8] }]
+ );
+ defined($cur_row_data->[8]) or bless( $collapse_idx[4]{$cur_row_ids{1}}{$cur_row_ids{6}}[1]{tracks}, __NBC__ );
+
+ (! $collapse_idx[6]{$cur_row_ids{1}}{$cur_row_ids{5}} )
+ and
+ push @{ $collapse_idx[0]{$cur_row_ids{1}}[1]{tracks} }, (
+ $collapse_idx[6]{$cur_row_ids{1}}{$cur_row_ids{5}} = [{ title => $cur_row_data->[5] }]
+ );
+ defined($cur_row_data->[5]) or bless( $collapse_idx[0]{$cur_row_ids{1}}[1]{tracks}, __NBC__ );
+
+ $collapse_idx[6]{$cur_row_ids{1}}{$cur_row_ids{5}}[1]{lyrics} ||= $collapse_idx[7]{$cur_row_ids{1}}{$cur_row_ids{5}}{$cur_row_ids{10}} ||= [];
+ defined($cur_row_data->[10]) or bless( $collapse_idx[6]{$cur_row_ids{1}}{$cur_row_ids{5}}[1]{lyrics}, __NBC__ );
+
+ (! $collapse_idx[8]{$cur_row_ids{0}}{$cur_row_ids{1}}{$cur_row_ids{5}}{$cur_row_ids{10}} )
+ and
+ push @{ $collapse_idx[7]{$cur_row_ids{1}}{$cur_row_ids{5}}{$cur_row_ids{10}}[1]{existing_lyric_versions} }, (
+ $collapse_idx[8]{$cur_row_ids{0}}{$cur_row_ids{1}}{$cur_row_ids{5}}{$cur_row_ids{10}} = [{ lyric_id => $cur_row_data->[10], text => $cur_row_data->[0] }]
+ );
+
+ $_[0][$result_pos++] = $collapse_idx[0]{$cur_row_ids{1}}
+ if $is_new_res;
+ }
+
+ splice @{$_[0]}, $result_pos;
+ ',
+ 'Multiple has_many on multiple branches torture test',
+);
+
+is_same_src (
+ $schema->source ('CD')->_mk_row_parser({
+ inflate_map => $infmap,
+ collapse => 1,
+ }),
+ ' my ($rows_pos, $result_pos, $cur_row_data, %cur_row_ids, @collapse_idx, $is_new_res) = (0,0);
+
+ while ($cur_row_data = (
+ ( $rows_pos >= 0 and $_[0][$rows_pos++] ) or do { $rows_pos = -1; undef } )
+ ||
+ ( $_[1] and $_[1]->() )
+ ) {
+
+ $cur_row_ids{$_} = defined $cur_row_data->[$_] ? $cur_row_data->[$_] : "\0NULL\xFF$rows_pos\xFF$_\0"
+ for (0, 1, 5, 6, 8, 10);
+
+ # a present cref in $_[1] implies lazy prefetch, implies a supplied stash in $_[2]
+ $_[1] and $result_pos and unshift(@{$_[2]}, $cur_row_data) and last
+ if ( $is_new_res = ! $collapse_idx[0]{$cur_row_ids{1}} );
+
+ $collapse_idx[0]{$cur_row_ids{1}} ||= [{ genreid => $cur_row_data->[4], latest_cd => $cur_row_data->[7], year => $cur_row_data->[3] }];
+
+ $collapse_idx[0]{$cur_row_ids{1}}[1]{existing_single_track} ||= $collapse_idx[1]{$cur_row_ids{1}} ||= [];
+ $collapse_idx[1]{$cur_row_ids{1}}[1]{cd} ||= $collapse_idx[2]{$cur_row_ids{1}} ||= [];
+ $collapse_idx[2]{$cur_row_ids{1}}[1]{artist} ||= $collapse_idx[3]{$cur_row_ids{1}} ||= [{ artistid => $cur_row_data->[1] }];
+
+ (! $collapse_idx[4]{$cur_row_ids{1}}{$cur_row_ids{6}} )
+ and
+ push @{ $collapse_idx[3]{$cur_row_ids{1}}[1]{cds} }, (
+ $collapse_idx[4]{$cur_row_ids{1}}{$cur_row_ids{6}} = [{ cdid => $cur_row_data->[6], genreid => $cur_row_data->[9], year => $cur_row_data->[2] }]
+ );
+ defined($cur_row_data->[6]) or bless( $collapse_idx[3]{$cur_row_ids{1}}[1]{cds}, __NBC__ );
+
+ (! $collapse_idx[5]{$cur_row_ids{1}}{$cur_row_ids{6}}{$cur_row_ids{8}} )
+ and
+ push @{ $collapse_idx[4]{$cur_row_ids{1}}{$cur_row_ids{6}}[1]{tracks} }, (
+ $collapse_idx[5]{$cur_row_ids{1}}{$cur_row_ids{6}}{$cur_row_ids{8}} = [{ title => $cur_row_data->[8] }]
+ );
+ defined($cur_row_data->[8]) or bless( $collapse_idx[4]{$cur_row_ids{1}}{$cur_row_ids{6}}[1]{tracks}, __NBC__ );
+
+ (! $collapse_idx[6]{$cur_row_ids{1}}{$cur_row_ids{5}} )
+ and
+ push @{ $collapse_idx[0]{$cur_row_ids{1}}[1]{tracks} }, (
+ $collapse_idx[6]{$cur_row_ids{1}}{$cur_row_ids{5}} = [{ title => $cur_row_data->[5] }]
+ );
+ defined($cur_row_data->[5]) or bless( $collapse_idx[0]{$cur_row_ids{1}}[1]{tracks}, __NBC__ );
+
+ $collapse_idx[6]{$cur_row_ids{1}}{$cur_row_ids{5}}[1]{lyrics} ||= $collapse_idx[7]{$cur_row_ids{1}}{$cur_row_ids{5}}{$cur_row_ids{10}} ||= [];
+ defined($cur_row_data->[10]) or bless( $collapse_idx[6]{$cur_row_ids{1}}{$cur_row_ids{5}}[1]{lyrics}, __NBC__ );
+
+ (! $collapse_idx[8]{$cur_row_ids{0}}{$cur_row_ids{1}}{$cur_row_ids{5}}{$cur_row_ids{10}} )
+ and
+ push @{ $collapse_idx[7]{$cur_row_ids{1}}{$cur_row_ids{5}}{$cur_row_ids{10}}[1]{existing_lyric_versions} }, (
+ $collapse_idx[8]{$cur_row_ids{0}}{$cur_row_ids{1}}{$cur_row_ids{5}}{$cur_row_ids{10}} = [{ lyric_id => $cur_row_data->[10], text => $cur_row_data->[0] }]
+ );
+
+ $_[0][$result_pos++] = $collapse_idx[0]{$cur_row_ids{1}}
+ if $is_new_res;
+ }
+
+ splice @{$_[0]}, $result_pos;
+ ',
+ 'Multiple has_many on multiple branches with branch torture test',
+);
+
+$infmap = [
+ 'single_track.trackid', # (0) definitive link to root from 1:1:1:1:M:M chain
+ 'year', # (1) non-unique
+ 'tracks.cd', # (2) \ together both uniqueness for second multirel
+ 'tracks.title', # (3) / and definitive link back to root
+ 'single_track.cd.artist.cds.cdid', # (4) to give uniquiness to ...tracks.title below
+ 'single_track.cd.artist.cds.year', # (5) non-unique
+ 'single_track.cd.artist.artistid', # (6) uniqufies entire parental chain
+ 'single_track.cd.artist.cds.genreid', # (7) nullable
+ 'single_track.cd.artist.cds.tracks.title',# (8) unique when combined with ...cds.cdid above
+];
+
+is_deeply (
+ $schema->source('CD')->_resolve_collapse({ as => {map { $infmap->[$_] => $_ } 0 .. $#$infmap} }),
+ {
+ -identifying_columns => [],
+ -identifying_columns_variants => [
+ [ 0 ], [ 2 ],
+ ],
+ single_track => {
+ -identifying_columns => [ 0 ],
+ -is_optional => 1,
+ -is_single => 1,
+ cd => {
+ -identifying_columns => [ 0 ],
+ -is_single => 1,
+ artist => {
+ -identifying_columns => [ 0 ],
+ -is_single => 1,
+ cds => {
+ -identifying_columns => [ 0, 4 ],
+ -is_optional => 1,
+ tracks => {
+ -identifying_columns => [ 0, 4, 8 ],
+ -is_optional => 1,
+ }
+ }
+ }
+ }
+ },
+ tracks => {
+ -identifying_columns => [ 2, 3 ],
+ -is_optional => 1,
+ }
+ },
+ 'Correct underdefined root collapse map constructed'
+);
+
+is_same_src (
+ $schema->source ('CD')->_mk_row_parser({
+ inflate_map => $infmap,
+ collapse => 1,
+ }),
+ ' my($rows_pos, $result_pos, $cur_row_data, %cur_row_ids, @collapse_idx, $is_new_res) = (0, 0);
+
+ while ($cur_row_data = (
+ ( $rows_pos >= 0 and $_[0][$rows_pos++] ) or do { $rows_pos = -1; undef } )
+ ||
+ ( $_[1] and $_[1]->() )
+ ) {
+
+ $cur_row_ids{$_} = defined $$cur_row_data[$_] ? $$cur_row_data[$_] : "\0NULL\xFF$rows_pos\xFF$_\0"
+ for (0, 2, 3, 4, 8);
+
+ # cache expensive set of ops in a non-existent rowid slot
+ $cur_row_ids{10} = (
+ ( ( defined $cur_row_data->[0] ) && (join "\xFF", q{}, $cur_row_data->[0], q{} ))
+ or
+ ( ( defined $cur_row_data->[2] ) && (join "\xFF", q{}, $cur_row_data->[2], q{} ))
+ or
+ "\0$rows_pos\0"
+ );
+
+ # a present cref in $_[1] implies lazy prefetch, implies a supplied stash in $_[2]
+ $_[1] and $result_pos and unshift(@{$_[2]}, $cur_row_data) and last
+ if ( $is_new_res = ! $collapse_idx[0]{$cur_row_ids{10}} );
+
+ $collapse_idx[0]{$cur_row_ids{10}} ||= [{ year => $$cur_row_data[1] }];
+
+ $collapse_idx[0]{$cur_row_ids{10}}[1]{single_track} ||= ($collapse_idx[1]{$cur_row_ids{0}} ||= [{ trackid => $cur_row_data->[0] }]);
+ defined($cur_row_data->[0]) or bless ( $collapse_idx[0]{$cur_row_ids{10}}[1]{single_track}, __NBC__ );
+
+ $collapse_idx[1]{$cur_row_ids{0}}[1]{cd} ||= $collapse_idx[2]{$cur_row_ids{0}} ||= [];
+
+ $collapse_idx[2]{$cur_row_ids{0}}[1]{artist} ||= ($collapse_idx[3]{$cur_row_ids{0}} ||= [{ artistid => $cur_row_data->[6] }]);
+
+ (! $collapse_idx[4]{$cur_row_ids{0}}{$cur_row_ids{4}} )
+ and
+ push @{$collapse_idx[3]{$cur_row_ids{0}}[1]{cds}}, (
+ $collapse_idx[4]{$cur_row_ids{0}}{$cur_row_ids{4}} = [{ cdid => $cur_row_data->[4], genreid => $cur_row_data->[7], year => $cur_row_data->[5] }]
+ );
+ defined($cur_row_data->[4]) or bless ( $collapse_idx[3]{$cur_row_ids{0}}[1]{cds}, __NBC__ );
+
+ (! $collapse_idx[5]{$cur_row_ids{0}}{$cur_row_ids{4}}{$cur_row_ids{8}} )
+ and
+ push @{$collapse_idx[4]{$cur_row_ids{0}}{$cur_row_ids{4}}[1]{tracks}}, (
+ $collapse_idx[5]{$cur_row_ids{0}}{$cur_row_ids{4}}{$cur_row_ids{8}} = [{ title => $cur_row_data->[8] }]
+ );
+ defined($cur_row_data->[8]) or bless ( $collapse_idx[4]{$cur_row_ids{0}}{$cur_row_ids{4}}[1]{tracks}, __NBC__ );
+
+ (! $collapse_idx[6]{$cur_row_ids{2}}{$cur_row_ids{3}} )
+ and
+ push @{$collapse_idx[0]{$cur_row_ids{10}}[1]{tracks}}, (
+ $collapse_idx[6]{$cur_row_ids{2}}{$cur_row_ids{3}} = [{ cd => $$cur_row_data[2], title => $cur_row_data->[3] }]
+ );
+ defined($cur_row_data->[2]) or bless ( $collapse_idx[0]{$cur_row_ids{10}}[1]{tracks}, __NBC__ );
+
+ $_[0][$result_pos++] = $collapse_idx[0]{$cur_row_ids{10}}
+ if $is_new_res;
+ }
+
+ splice @{$_[0]}, $result_pos;
+ ',
+ 'Multiple has_many on multiple branches with underdefined root torture test',
+);
+
+is_same_src (
+ $schema->source ('CD')->_mk_row_parser({
+ inflate_map => $infmap,
+ collapse => 1,
+ hri_style => 1,
+ }),
+ ' my($rows_pos, $result_pos, $cur_row_data, @collapse_idx, $is_new_res) = (0, 0);
+
+ while ($cur_row_data = (
+ ( $rows_pos >= 0 and $_[0][$rows_pos++] ) or do { $rows_pos = -1; undef } )
+ ||
+ ( $_[1] and $_[1]->() )
+ ) {
+
+ # cache expensive set of ops in a non-existent rowid slot
+ $cur_row_data->[10] = (
+ ( ( defined $cur_row_data->[0] ) && (join "\xFF", q{}, $cur_row_data->[0], q{} ))
+ or
+ ( ( defined $cur_row_data->[2] ) && (join "\xFF", q{}, $cur_row_data->[2], q{} ))
+ or
+ "\0$rows_pos\0"
+ );
+
+ # a present cref in $_[1] implies lazy prefetch, implies a supplied stash in $_[2]
+ $_[1] and $result_pos and unshift(@{$_[2]}, $cur_row_data) and last
+ if ( $is_new_res = ! $collapse_idx[0]{$cur_row_data->[10]} );
+
+ $collapse_idx[0]{$cur_row_data->[10]} ||= { year => $$cur_row_data[1] };
+
+ (! defined $cur_row_data->[0] ) ? $collapse_idx[0]{$cur_row_data->[10]}{single_track} = undef : do {
+
+ $collapse_idx[0]{$cur_row_data->[10]}{single_track} ||= ($collapse_idx[1]{$cur_row_data->[0]} ||= { trackid => $$cur_row_data[0] });
+
+ $collapse_idx[1]{$cur_row_data->[0]}{cd} ||= $collapse_idx[2]{$cur_row_data->[0]};
+
+ $collapse_idx[2]{$cur_row_data->[0]}{artist} ||= ($collapse_idx[3]{$cur_row_data->[0]} ||= { artistid => $$cur_row_data[6] });
+
+ (! defined $cur_row_data->[4] ) ? $collapse_idx[3]{$cur_row_data->[0]}{cds} = [] : do {
+
+ (! $collapse_idx[4]{$cur_row_data->[0]}{$cur_row_data->[4]} )
+ and
+ push @{$collapse_idx[3]{$cur_row_data->[0]}{cds}}, (
+ $collapse_idx[4]{$cur_row_data->[0]}{$cur_row_data->[4]} = { cdid => $$cur_row_data[4], genreid => $$cur_row_data[7], year => $$cur_row_data[5] }
+ );
+
+ (! defined $cur_row_data->[8] ) ? $collapse_idx[4]{$cur_row_data->[0]}{$cur_row_data->[4]}{tracks} = [] : do {
+
+ (! $collapse_idx[5]{$cur_row_data->[0]}{$cur_row_data->[4]}{$cur_row_data->[8]} )
+ and
+ push @{$collapse_idx[4]{$cur_row_data->[0]}{$cur_row_data->[4]}{tracks}}, (
+ $collapse_idx[5]{$cur_row_data->[0]}{$cur_row_data->[4]}{$cur_row_data->[8]} = { title => $$cur_row_data[8] }
+ );
+ };
+ };
+ };
+
+ (! defined $cur_row_data->[2] ) ? $collapse_idx[0]{$cur_row_data->[10]}{tracks} = [] : do {
+ (! $collapse_idx[6]{$cur_row_data->[2]}{$cur_row_data->[3]} )
+ and
+ push @{$collapse_idx[0]{$cur_row_data->[10]}{tracks}}, (
+ $collapse_idx[6]{$cur_row_data->[2]}{$cur_row_data->[3]} = { cd => $$cur_row_data[2], title => $$cur_row_data[3] }
+ );
+ };
+
+ $_[0][$result_pos++] = $collapse_idx[0]{$cur_row_data->[10]}
+ if $is_new_res;
+ }
+
+ splice @{$_[0]}, $result_pos;
+ ',
+ 'Multiple has_many on multiple branches with underdefined root, HRI-direct torture test',
+);
+
+done_testing;
+
+my $deparser;
+sub is_same_src {
+ $deparser ||= B::Deparse->new;
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my ($got, $expect) = @_;
+
+ $expect =~ s/__NBC__/B::perlstring($DBIx::Class::ResultSource::RowParser::Util::null_branch_class)/ge;
+
+ $expect = " { use strict; use warnings FATAL => 'all';\n$expect\n }";
+
+ my @normalized = map {
+ my $cref = eval "sub { $_ }" or do {
+ fail "Coderef does not compile!\n\n$@\n\n$_";
+ return undef;
+ };
+ $deparser->coderef2text($cref);
+ } ($got, $expect);
+
+ &is (@normalized, $_[2]||() ) or do {
+ eval { require Test::Differences }
+ ? &Test::Differences::eq_or_diff( @normalized, $_[2]||() )
+ : note ("Original sources:\n\n$got\n\n$expect\n");
+ BAIL_OUT('');
+ };
+}
--- /dev/null
+use strict;
+use warnings;
+use Test::More;
+use Test::Exception;
+use Test::Warn;
+
+use lib 't/lib';
+use DBICTest;
+
+throws_ok {
+ package Foo;
+ use base 'DBIx::Class::Core';
+ __PACKAGE__->table('foo');
+ __PACKAGE__->set_primary_key('bar')
+} qr/No such column 'bar' on source 'foo' /,
+'proper exception on non-existing column as PK';
+
+warnings_exist {
+ package Foo2;
+ use base 'DBIx::Class::Core';
+ __PACKAGE__->table('foo');
+ __PACKAGE__->add_columns(
+ foo => {},
+ bar => { is_nullable => 1 },
+ );
+ __PACKAGE__->set_primary_key(qw(foo bar))
+} qr/Primary key of source 'foo' includes the column 'bar' which has its 'is_nullable' attribute set to true/,
+'proper exception on non-existing column as PK';
+
+done_testing;
warnings_exist { is_same_sql_bind (
$rs->search ({}, { rows => 1, offset => 3,columns => [
{ id => 'foo.id' },
- { 'bar.id' => 'bar.id' },
+ { 'artist.id' => 'bar.id' },
{ bleh => \ 'TO_CHAR (foo.womble, "blah")' },
]})->as_query,
'(
my $rs = $s->resultset ('CD')->search({ id => 1 });
+# important for a test below, never traversed
+$rs->result_source->add_relationship(
+ ends_with_me => 'DBICTest::Schema::Artist', sub {}
+);
+
+
my $where_bind = [ { dbic_colname => 'id' }, 1 ];
for my $test_set (
offset => 3,
columns => [
{ id => 'foo.id' },
- { 'bar.id' => 'bar.id' },
+ { 'artist.id' => 'bar.id' },
{ bleh => \'TO_CHAR (foo.womble, "blah")' },
]
}),
sql => '(
- SELECT id, bar__id, bleh
+ SELECT id, artist__id, bleh
FROM (
- SELECT id, bar__id, bleh, ROWNUM rownum__index
+ SELECT id, artist__id, bleh, ROWNUM rownum__index
FROM (
- SELECT foo.id AS id, bar.id AS bar__id, TO_CHAR (foo.womble, "blah") AS bleh
+ SELECT foo.id AS id, bar.id AS artist__id, TO_CHAR (foo.womble, "blah") AS bleh
FROM cd me
WHERE id = ?
) me
offset => 3,
columns => [
{ id => 'foo.id' },
- { 'bar.id' => 'bar.id' },
+ { 'artist.id' => 'bar.id' },
{ bleh => \'TO_CHAR (foo.womble, "blah")' },
],
order_by => [qw( artist title )],
}),
sql => '(
- SELECT id, bar__id, bleh
+ SELECT id, artist__id, bleh
FROM (
- SELECT id, bar__id, bleh, ROWNUM rownum__index
+ SELECT id, artist__id, bleh, ROWNUM rownum__index
FROM (
- SELECT foo.id AS id, bar.id AS bar__id, TO_CHAR(foo.womble, "blah") AS bleh
+ SELECT foo.id AS id, bar.id AS artist__id, TO_CHAR(foo.womble, "blah") AS bleh
FROM cd me
WHERE id = ?
ORDER BY artist, title
offset => 3,
columns => [
{ id => 'foo.id' },
- { 'bar.id' => 'bar.id' },
+ { 'artist.id' => 'bar.id' },
{ bleh => \'TO_CHAR (foo.womble, "blah")' },
],
order_by => 'artist',
}),
sql => '(
- SELECT id, bar__id, bleh
+ SELECT id, artist__id, bleh
FROM (
- SELECT id, bar__id, bleh, ROWNUM rownum__index
+ SELECT id, artist__id, bleh, ROWNUM rownum__index
FROM (
- SELECT foo.id AS id, bar.id AS bar__id, TO_CHAR(foo.womble, "blah") AS bleh
+ SELECT foo.id AS id, bar.id AS artist__id, TO_CHAR(foo.womble, "blah") AS bleh
FROM cd me
WHERE id = ?
ORDER BY artist
{ id => 'foo.id' },
{ 'ends_with_me.id' => 'ends_with_me.id' },
],
- order_by => [qw( artist title )],
+ order_by => [qw( year artist title )],
}),
sql => '(
SELECT id, ends_with_me__id
SELECT foo.id AS id, ends_with_me.id AS ends_with_me__id
FROM cd me
WHERE id = ?
- ORDER BY artist, title
+ ORDER BY year, artist, title
) me
WHERE ROWNUM <= ?
) me
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY books.owner
)',
[
[ { sqlt_datatype => 'integer' } => 3 ],
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY books.owner
)',
[
[ { sqlt_datatype => 'integer' } => 1 ],
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY books.owner
)',
[
[ { sqlt_datatype => 'integer' } => 1 ],
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY books.owner
)',
[
[ { sqlt_datatype => 'integer' } => 3 ],
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY books.owner
)',
[
[ { sqlt_datatype => 'integer' } => 2 ],
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY books.owner
)',
[
[ { sqlt_datatype => 'integer' } => 2 ],
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY books.owner
)',
[],
],
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY books.owner
)',
[],
],
) me
LEFT JOIN books books
ON books.owner = me.id
- ORDER BY me.id, books.owner
+ ORDER BY me.id
)',
[
[ { sqlt_datatype => 'integer' } => 1 ],