X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FResultSource.pm;h=2874611cd3f2d229b8be4d289de4b7712442d23a;hb=5c008e0fcb98b42197723e3d84342c95b97b6fc0;hp=b4dc288dedd8922f6b95217e5d546f07f03238a0;hpb=908aa1bb761ec1da5c061fe9f687598e3f1934bc;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/ResultSource.pm b/lib/DBIx/Class/ResultSource.pm index b4dc288..2874611 100644 --- a/lib/DBIx/Class/ResultSource.pm +++ b/lib/DBIx/Class/ResultSource.pm @@ -3,21 +3,18 @@ package DBIx::Class::ResultSource; use strict; use warnings; -use base 'DBIx::Class'; - use DBIx::Class::ResultSet; use DBIx::Class::ResultSourceHandle; -use DBIx::Class::Exception; use DBIx::Class::Carp; -use DBIx::Class::GlobalDestruction; +use Devel::GlobalDestruction; use Try::Tiny; use List::Util 'first'; use Scalar::Util qw/blessed weaken isweak/; -use B 'perlstring'; - use namespace::clean; +use base qw/DBIx::Class/; + __PACKAGE__->mk_group_accessors(simple => qw/ source_name name source_info _ordered_columns _columns _primaries _unique_constraints @@ -96,7 +93,7 @@ You can retrieve the result source at runtime in the following ways: $schema->source($source_name); -=item From a Row object: +=item From a Result object: $row->result_source; @@ -135,7 +132,7 @@ sub new { =item Arguments: @columns -=item Return value: The ResultSource object +=item Return Value: L<$result_source|/new> =back @@ -148,7 +145,7 @@ pairs, uses the hashref as the L for that column. Repeated calls of this method will add more columns, not replace them. The column names given will be created as accessor methods on your -L objects. You can change the name of the accessor +L objects. You can change the name of the accessor by supplying an L in the column_info hash. If a column name beginning with a plus sign ('+col1') is provided, the @@ -301,7 +298,7 @@ L. =item Arguments: $colname, \%columninfo? -=item Return value: 1/0 (true/false) +=item Return Value: 1/0 (true/false) =back @@ -345,7 +342,7 @@ sub add_column { shift->add_columns(@_); } # DO NOT CHANGE THIS TO GLOB =item Arguments: $colname -=item Return value: 1/0 (true/false) +=item Return Value: 1/0 (true/false) =back @@ -366,7 +363,7 @@ sub has_column { =item Arguments: $colname -=item Return value: Hashref of info +=item Return Value: Hashref of info =back @@ -414,9 +411,9 @@ sub column_info { =over -=item Arguments: None +=item Arguments: none -=item Return value: Ordered list of column names +=item Return Value: Ordered list of column names =back @@ -440,7 +437,7 @@ sub columns { =item Arguments: \@colnames ? -=item Return value: Hashref of column name/info pairs +=item Return Value: Hashref of column name/info pairs =back @@ -514,7 +511,7 @@ sub columns_info { =item Arguments: @colnames -=item Return value: undefined +=item Return Value: not defined =back @@ -532,7 +529,7 @@ broken result source. =item Arguments: $colname -=item Return value: undefined +=item Return Value: not defined =back @@ -570,7 +567,7 @@ sub remove_column { shift->remove_columns(@_); } # DO NOT CHANGE THIS TO GLOB =item Arguments: @cols -=item Return value: undefined +=item Return Value: not defined =back @@ -604,9 +601,9 @@ sub set_primary_key { =over 4 -=item Arguments: None +=item Arguments: none -=item Return value: Ordered list of primary column names +=item Return Value: Ordered list of primary column names =back @@ -643,7 +640,7 @@ will be applied to the L of each L =item Arguments: $sequence_name -=item Return value: undefined +=item Return Value: not defined =back @@ -666,7 +663,7 @@ sub sequence { =item Arguments: $name?, \@colnames -=item Return value: undefined +=item Return Value: not defined =back @@ -732,7 +729,7 @@ sub add_unique_constraint { =item Arguments: @constraints -=item Return value: undefined +=item Return Value: not defined =back @@ -784,7 +781,7 @@ sub add_unique_constraints { =item Arguments: \@colnames -=item Return value: Constraint name +=item Return Value: Constraint name =back @@ -818,9 +815,9 @@ sub name_unique_constraint { =over 4 -=item Arguments: None +=item Arguments: none -=item Return value: Hash of unique constraint data +=item Return Value: Hash of unique constraint data =back @@ -842,9 +839,9 @@ sub unique_constraints { =over 4 -=item Arguments: None +=item Arguments: none -=item Return value: Unique constraint names +=item Return Value: Unique constraint names =back @@ -868,7 +865,7 @@ sub unique_constraint_names { =item Arguments: $constraintname -=item Return value: List of constraint columns +=item Return Value: List of constraint columns =back @@ -896,7 +893,7 @@ sub unique_constraint_columns { =item Arguments: $callback_name | \&callback_code -=item Return value: $callback_name | \&callback_code +=item Return Value: $callback_name | \&callback_code =back @@ -963,13 +960,39 @@ sub _invoke_sqlt_deploy_hook { } } +=head2 result_class + +=over 4 + +=item Arguments: $classname + +=item Return Value: $classname + +=back + + use My::Schema::ResultClass::Inflator; + ... + + use My::Schema::Artist; + ... + __PACKAGE__->result_class('My::Schema::ResultClass::Inflator'); + +Set the default result class for this source. You can use this to create +and use your own result inflator. See L +for more details. + +Please note that setting this to something like +L will make every result unblessed +and make life more difficult. Inflators like those are better suited to +temporary usage via L. + =head2 resultset =over 4 -=item Arguments: None +=item Arguments: none -=item Return value: $resultset +=item Return Value: L<$resultset|DBIx::Class::ResultSet> =back @@ -986,7 +1009,7 @@ but is cached from then on unless resultset_class changes. =item Arguments: $classname -=item Return value: $classname +=item Return Value: $classname =back @@ -1010,9 +1033,9 @@ exists. =over 4 -=item Arguments: \%attrs +=item Arguments: L<\%attrs|DBIx::Class::ResultSet/ATTRIBUTES> -=item Return value: \%attrs +=item Return Value: L<\%attrs|DBIx::Class::ResultSet/ATTRIBUTES> =back @@ -1023,8 +1046,35 @@ exists. $source->resultset_attributes({ order_by => [ 'id' ] }); Store a collection of resultset attributes, that will be set on every -L produced from this result source. For a full -list see L. +L produced from this result source. + +B: C comes with its own set of issues and +bugs! While C isn't deprecated per se, its usage is +not recommended! + +Since relationships use attributes to link tables together, the "default" +attributes you set may cause unpredictable and undesired behavior. Furthermore, +the defaults cannot be turned off, so you are stuck with them. + +In most cases, what you should actually be using are project-specific methods: + + package My::Schema::ResultSet::Artist; + use base 'DBIx::Class::ResultSet'; + ... + + # BAD IDEA! + #__PACKAGE__->resultset_attributes({ prefetch => 'tracks' }); + + # GOOD IDEA! + sub with_tracks { shift->search({}, { prefetch => 'tracks' }) } + + # in your code + $schema->resultset('Artist')->with_tracks->... + +This gives you the flexibility of not using it when you don't need it. + +For more complex situations, another solution would be to use a virtual view +via L. =cut @@ -1048,7 +1098,7 @@ sub resultset { =over 4 -=item Arguments: None +=item Arguments: none =item Result value: $name @@ -1084,9 +1134,9 @@ its class name. =over 4 -=item Arguments: None +=item Arguments: none -=item Return value: FROM clause +=item Return Value: FROM clause =back @@ -1104,9 +1154,9 @@ sub from { die 'Virtual method!' } =over 4 -=item Arguments: $schema +=item Arguments: L<$schema?|DBIx::Class::Schema> -=item Return value: A schema object +=item Return Value: L<$schema|DBIx::Class::Schema> =back @@ -1140,17 +1190,15 @@ sub schema { =over 4 -=item Arguments: None +=item Arguments: none -=item Return value: A Storage object +=item Return Value: L<$storage|DBIx::Class::Storage> =back $source->storage->debug(1); -Returns the storage handle for the current schema. - -See also: L +Returns the L for the current schema. =cut @@ -1160,13 +1208,13 @@ sub storage { shift->schema->storage; } =over 4 -=item Arguments: $relname, $related_source_name, \%cond, [ \%attrs ] +=item Arguments: $rel_name, $related_source_name, \%cond, \%attrs? -=item Return value: 1/true if it succeeded +=item Return Value: 1/true if it succeeded =back - $source->add_relationship('relname', 'related_source', $cond, $attrs); + $source->add_relationship('rel_name', 'related_source', $cond, $attrs); L describes a series of methods which create pre-defined useful types of relationships. Look there first @@ -1286,9 +1334,9 @@ sub add_relationship { =over 4 -=item Arguments: None +=item Arguments: none -=item Return value: List of relationship names +=item Return Value: L<@rel_names|DBIx::Class::Relationship> =back @@ -1306,29 +1354,29 @@ sub relationships { =over 4 -=item Arguments: $relname +=item Arguments: L<$rel_name|DBIx::Class::Relationship> -=item Return value: Hashref of relation data, +=item Return Value: L<\%rel_data|DBIx::Class::Relationship::Base/add_relationship> =back Returns a hash of relationship information for the specified relationship -name. The keys/values are as specified for L. +name. The keys/values are as specified for L. =cut sub relationship_info { - my ($self, $rel) = @_; - return $self->_relationships->{$rel}; + #my ($self, $rel) = @_; + return shift->_relationships->{+shift}; } =head2 has_relationship =over 4 -=item Arguments: $rel +=item Arguments: L<$rel_name|DBIx::Class::Relationship> -=item Return value: 1/0 (true/false) +=item Return Value: 1/0 (true/false) =back @@ -1337,17 +1385,17 @@ Returns true if the source has a relationship of this name, false otherwise. =cut sub has_relationship { - my ($self, $rel) = @_; - return exists $self->_relationships->{$rel}; + #my ($self, $rel) = @_; + return exists shift->_relationships->{+shift}; } =head2 reverse_relationship_info =over 4 -=item Arguments: $relname +=item Arguments: L<$rel_name|DBIx::Class::Relationship> -=item Return value: Hashref of relationship data +=item Return Value: L<\%rel_data|DBIx::Class::Relationship::Base/add_relationship> =back @@ -1546,8 +1594,8 @@ sub _resolve_join { , -join_path => [@$jpath, { $join => $as } ], -is_single => ( - (! $rel_info->{attrs}{accessor}) - or + $rel_info->{attrs}{accessor} + && first { $rel_info->{attrs}{accessor} eq $_ } (qw/single filter/) ), -alias => $as, @@ -1568,9 +1616,9 @@ sub pk_depends_on { # having already been inserted. Takes the name of the relationship and a # hashref of columns of the related object. sub _pk_depends_on { - my ($self, $relname, $rel_data) = @_; + my ($self, $rel_name, $rel_data) = @_; - my $relinfo = $self->relationship_info($relname); + my $relinfo = $self->relationship_info($rel_name); # don't assume things if the relationship direction is specified return $relinfo->{attrs}{is_foreign_key_constraint} @@ -1585,7 +1633,7 @@ sub _pk_depends_on { # assume anything that references our PK probably is dependent on us # rather than vice versa, unless the far side is (a) defined or (b) # auto-increment - my $rel_source = $self->related_source($relname); + my $rel_source = $self->related_source($rel_name); foreach my $p ($self->primary_columns) { if (exists $keyhash->{$p}) { @@ -1613,7 +1661,7 @@ our $UNRESOLVABLE_CONDITION = \ '1 = 0'; # list of non-triviail values (notmally conditions) returned as a part # of a joinfree condition hash sub _resolve_condition { - my ($self, $cond, $as, $for, $relname) = @_; + my ($self, $cond, $as, $for, $rel_name) = @_; my $obj_rel = !!blessed $for; @@ -1624,7 +1672,7 @@ sub _resolve_condition { self_alias => $obj_rel ? $as : $for, foreign_alias => $relalias, self_resultsource => $self, - foreign_relname => $relname || ($obj_rel ? $as : $for), + foreign_relname => $rel_name || ($obj_rel ? $as : $for), self_rowobj => $obj_rel ? $for : undef }); @@ -1633,7 +1681,7 @@ sub _resolve_condition { # FIXME sanity check until things stabilize, remove at some point $self->throw_exception ( - "A join-free condition returned for relationship '$relname' without a row-object to chain from" + "A join-free condition returned for relationship '$rel_name' without a row-object to chain from" ) unless $obj_rel; # FIXME another sanity check @@ -1643,7 +1691,7 @@ sub _resolve_condition { first { $_ !~ /^\Q$relalias.\E.+/ } keys %$joinfree_cond ) { $self->throw_exception ( - "The join-free condition returned for relationship '$relname' must be a hash " + "The join-free condition returned for relationship '$rel_name' must be a hash " .'reference with all keys being valid columns on the related result source' ); } @@ -1660,7 +1708,7 @@ sub _resolve_condition { } # see which parts of the joinfree cond are conditionals - my $relcol_list = { map { $_ => 1 } $self->related_source($relname)->columns }; + my $relcol_list = { map { $_ => 1 } $self->related_source($rel_name)->columns }; for my $c (keys %$joinfree_cond) { my ($colname) = $c =~ /^ (?: \Q$relalias.\E )? (.+)/x; @@ -1737,14 +1785,14 @@ sub _resolve_condition { elsif (ref $cond eq 'ARRAY') { my (@ret, $crosstable); for (@$cond) { - my ($cond, $crosstab) = $self->_resolve_condition($_, $as, $for, $relname); + my ($cond, $crosstab) = $self->_resolve_condition($_, $as, $for, $rel_name); push @ret, $cond; $crosstable ||= $crosstab; } return wantarray ? (\@ret, $crosstable) : \@ret; } else { - $self->throw_exception ("Can't handle condition $cond for relationship '$relname' yet :("); + $self->throw_exception ("Can't handle condition $cond for relationship '$rel_name' yet :("); } } @@ -1753,7 +1801,7 @@ sub _resolve_condition { # prefixed relative to the current source, in accordance with where they appear # in the supplied relationships. sub _resolve_prefetch { - my ($self, $pre, $alias, $alias_map, $order, $pref_path) = @_; + my ($self, $pre, $alias, $alias_map, $order, $collapse, $pref_path) = @_; $pref_path ||= []; if (not defined $pre or not length $pre) { @@ -1761,15 +1809,15 @@ sub _resolve_prefetch { } elsif( ref $pre eq 'ARRAY' ) { return - map { $self->_resolve_prefetch( $_, $alias, $alias_map, $order, [ @$pref_path ] ) } + map { $self->_resolve_prefetch( $_, $alias, $alias_map, $order, $collapse, [ @$pref_path ] ) } @$pre; } elsif( ref $pre eq 'HASH' ) { my @ret = map { - $self->_resolve_prefetch($_, $alias, $alias_map, $order, [ @$pref_path ] ), + $self->_resolve_prefetch($_, $alias, $alias_map, $order, $collapse, [ @$pref_path ] ), $self->related_source($_)->_resolve_prefetch( - $pre->{$_}, "${alias}.$_", $alias_map, $order, [ @$pref_path, $_] ) + $pre->{$_}, "${alias}.$_", $alias_map, $order, $collapse, [ @$pref_path, $_] ) } keys %$pre; return @ret; } @@ -1791,7 +1839,6 @@ sub _resolve_prefetch { my $rel_info = $self->relationship_info( $pre ); $self->throw_exception( $self->source_name . " has no such relationship '$pre'" ) unless $rel_info; - my $as_prefix = ($alias =~ /^.*?\.(.+)$/ ? $1.'.' : ''); my $rel_source = $self->related_source($pre); @@ -1801,11 +1848,27 @@ sub _resolve_prefetch { unless ref($rel_info->{cond}) eq 'HASH'; my $dots = @{[$as_prefix =~ m/\./g]} + 1; # +1 to match the ".${as_prefix}" + if (my ($fail) = grep { @{[$_ =~ m/\./g]} == $dots } + keys %{$collapse}) { + my ($last) = ($fail =~ /([^\.]+)$/); + carp ( + "Prefetching multiple has_many rels ${last} and ${pre} " + .(length($as_prefix) + ? "at the same level (${as_prefix}) " + : "at top level " + ) + . 'will explode the number of row objects retrievable via ->next or ->all. ' + . 'Use at your own risk.' + ); + } + #my @col = map { (/^self\.(.+)$/ ? ("${as_prefix}.$1") : ()); } # values %{$rel_info->{cond}}; + $collapse->{".${as_prefix}${pre}"} = [ $rel_source->_pri_cols ]; + # action at a distance. prepending the '.' allows simpler code + # in ResultSet->_collapse_result my @key = map { (/^foreign\.(.+)$/ ? ($1) : ()); } keys %{$rel_info->{cond}}; - push @$order, map { "${as}.$_" } @key; if (my $rel_order = $rel_info->{attrs}{order_by}) { @@ -1840,574 +1903,13 @@ sub _resolve_prefetch { } } -# adding a dep on MoreUtils *just* for this is retarded -my $unique_numlist = sub { [ sort { $a <=> $b } keys %{ {map { $_ => 1 } @_ }} ] }; - -# This error must be thrown from two distinct codepaths, joining them is -# rather hard. Go for this hack instead. -my $get_related_source = sub { - my ($rsrc, $rel, $relcols) = @_; - try { - $rsrc->related_source ($rel) - } catch { - $rsrc->throw_exception(sprintf( - "Can't inflate prefetch into non-existent relationship '%s' from '%s', " - . "check the inflation specification (columns/as) ending in '...%s.%s'.", - $rel, - $rsrc->source_name, - $rel, - (sort { length($a) <=> length ($b) } keys %$relcols)[0], - ))}; -}; - -# Takes a selection list and generates a collapse-map representing -# row-object fold-points. Every relationship is assigned a set of unique, -# non-nullable columns (which may *not even be* from the same resultset) -# and the collapser will use this information to correctly distinguish -# data of individual to-be-row-objects. -sub _resolve_collapse { - my ($self, $as, $as_fq_idx, $rel_chain, $parent_info, $node_idx_ref) = @_; - - # for comprehensible error messages put ourselves at the head of the relationship chain - $rel_chain ||= [ $self->source_name ]; - - # record top-level fully-qualified column index - $as_fq_idx ||= { %$as }; - - my ($my_cols, $rel_cols); - for (keys %$as) { - if ($_ =~ /^ ([^\.]+) \. (.+) /x) { - $rel_cols->{$1}{$2} = 1; - } - else { - $my_cols->{$_} = {}; # important for ||= below - } - } - - my $relinfo; - # run through relationships, collect metadata, inject non-left fk-bridges from - # *INNER-JOINED* children (if any) - for my $rel (keys %$rel_cols) { - my $rel_src = $get_related_source->($self, $rel, $rel_cols->{$rel}); - - my $inf = $self->relationship_info ($rel); - - $relinfo->{$rel}{is_single} = $inf->{attrs}{accessor} && $inf->{attrs}{accessor} ne 'multi'; - $relinfo->{$rel}{is_inner} = ( $inf->{attrs}{join_type} || '' ) !~ /^left/i; - $relinfo->{$rel}{rsrc} = $rel_src; - - my $cond = $inf->{cond}; - - if ( - ref $cond eq 'HASH' - and - keys %$cond - and - ! first { $_ !~ /^foreign\./ } (keys %$cond) - and - ! first { $_ !~ /^self\./ } (values %$cond) - ) { - for my $f (keys %$cond) { - my $s = $cond->{$f}; - $_ =~ s/^ (?: foreign | self ) \.//x for ($f, $s); - $relinfo->{$rel}{fk_map}{$s} = $f; - - # need to know source from *our* pov, hnce $rel. - $my_cols->{$s} ||= { via_fk => "$rel.$f" } if ( - defined $rel_cols->{$rel}{$f} # in fact selected - and - (! $node_idx_ref or $relinfo->{$rel}{is_inner}) # either top-level or an inner join - ); - } - } - } - - # if the parent is already defined, assume all of its related FKs are selected - # (even if they in fact are NOT in the select list). Keep a record of what we - # assumed, and if any such phantom-column becomes part of our own collapser, - # throw everything assumed-from-parent away and replace with the collapser of - # the parent (whatever it may be) - my $assumed_from_parent; - unless ($parent_info->{underdefined}) { - $assumed_from_parent->{columns} = { map - # only add to the list if we do not already select said columns - { ! exists $my_cols->{$_} ? ( $_ => 1 ) : () } - values %{$parent_info->{rel_condition} || {}} - }; - - $my_cols->{$_} = { via_collapse => $parent_info->{collapse_on} } - for keys %{$assumed_from_parent->{columns}}; - } - - # get colinfo for everything - if ($my_cols) { - my $ci = $self->columns_info; - $my_cols->{$_}{colinfo} = $ci->{$_} for keys %$my_cols; - } - - my $collapse_map; - - # try to resolve based on our columns (plus already inserted FK bridges) - if ( - $my_cols - and - my $uset = $self->_unique_column_set ($my_cols) - ) { - # see if the resulting collapser relies on any implied columns, - # and fix stuff up if this is the case - - my $parent_collapser_used = defined delete @{$uset}{keys %{$assumed_from_parent->{columns}}}; - $collapse_map->{-node_id} = $unique_numlist->( - $parent_collapser_used ? @{$parent_info->{collapse_on}} : (), - (map - { - my $fqc = join ('.', - @{$rel_chain}[1 .. $#$rel_chain], - ( $my_cols->{$_}{via_fk} || $_ ), - ); - - $as_fq_idx->{$fqc}; - } - keys %$uset - ), - ); - } - - # Stil don't know how to collapse - keep descending down 1:1 chains - if - # a related non-LEFT 1:1 is resolvable - its condition will collapse us - # too - unless ($collapse_map->{-node_id}) { - my @candidates; - - for my $rel (keys %$relinfo) { - next unless ($relinfo->{$rel}{is_single} && $relinfo->{$rel}{is_inner}); - - if ( my $rel_collapse = $relinfo->{$rel}{rsrc}->_resolve_collapse ( - $rel_cols->{$rel}, - $as_fq_idx, - [ @$rel_chain, $rel ], - { underdefined => 1 } - )) { - push @candidates, $rel_collapse->{-node_id}; - } - } - - # get the set with least amount of columns - # FIXME - maybe need to implement a data type order as well (i.e. prefer several ints - # to a single varchar) - if (@candidates) { - ($collapse_map->{-node_id}) = sort { scalar @$a <=> scalar @$b } (@candidates); - } - } - - # Still dont know how to collapse - see if the parent passed us anything - # (i.e. reuse collapser over 1:1) - unless ($collapse_map->{-node_id}) { - $collapse_map->{-node_id} = $parent_info->{collapse_on} - if $parent_info->{collapser_reusable}; - } - - # stop descending into children if we were called by a parent for first-pass - # and don't despair if nothing was found (there may be other parallel branches - # to dive into) - if ($parent_info->{underdefined}) { - return $collapse_map->{-node_id} ? $collapse_map : undef - } - # nothing down the chain resolved - can't calculate a collapse-map - elsif (! $collapse_map->{-node_id}) { - $self->throw_exception ( sprintf - "Unable to calculate a definitive collapse column set for %s%s: fetch more unique non-nullable columns", - $self->source_name, - @$rel_chain > 1 - ? sprintf (' (last member of the %s chain)', join ' -> ', @$rel_chain ) - : '' - , - ); - } - - # If we got that far - we are collapsable - GREAT! Now go down all children - # a second time, and fill in the rest - - $collapse_map->{-is_optional} = 1 if $parent_info->{is_optional}; - $collapse_map->{-node_index} = ${ $node_idx_ref ||= \do { my $x = 1 } }++; # this is *deliberately* not 0-based - - my (@id_sets, $multis_in_chain); - for my $rel (sort keys %$relinfo) { - - $collapse_map->{$rel} = $relinfo->{$rel}{rsrc}->_resolve_collapse ( - { map { $_ => 1 } ( keys %{$rel_cols->{$rel}} ) }, - - $as_fq_idx, - - [ @$rel_chain, $rel], - - { - collapse_on => [ @{$collapse_map->{-node_id}} ], - - rel_condition => $relinfo->{$rel}{fk_map}, - - is_optional => $collapse_map->{-is_optional}, - - # if this is a 1:1 our own collapser can be used as a collapse-map - # (regardless of left or not) - collapser_reusable => $relinfo->{$rel}{is_single}, - }, - - $node_idx_ref, - ); - - $collapse_map->{$rel}{-is_single} = 1 if $relinfo->{$rel}{is_single}; - $collapse_map->{$rel}{-is_optional} ||= 1 unless $relinfo->{$rel}{is_inner}; - push @id_sets, @{ $collapse_map->{$rel}{-branch_id} }; - } - - $collapse_map->{-branch_id} = $unique_numlist->( @id_sets, @{$collapse_map->{-node_id}} ); - - return $collapse_map; -} - -sub _unique_column_set { - my ($self, $cols) = @_; - - my %unique = $self->unique_constraints; - - # always prefer the PK first, and then shortest constraints first - USET: - for my $set (delete $unique{primary}, sort { @$a <=> @$b } (values %unique) ) { - next unless $set && @$set; - - for (@$set) { - next USET unless ($cols->{$_} && $cols->{$_}{colinfo} && !$cols->{$_}{colinfo}{is_nullable} ); - } - - return { map { $_ => 1 } @$set }; - } - - return undef; -} - -# Takes an arrayref of {as} dbic column aliases and the collapse and select -# attributes from the same $rs (the slector requirement is a temporary -# workaround), and returns a coderef capable of: -# my $me_pref_clps = $coderef->([$rs->cursor->next]) -# Where the $me_pref_clps arrayref is the future argument to -# ::ResultSet::_collapse_result. -# -# $me_pref_clps->[0] is always returned (even if as an empty hash with no -# rowdata), however branches of related data in $me_pref_clps->[1] may be -# pruned short of what was originally requested based on {as}, depending -# on: -# -# * If collapse is requested, a definitive collapse map is calculated for -# every relationship "fold-point", consisting of a set of values (which -# may not even be contained in the future 'me' of said relationship -# (for example a cd.artist_id defines the related inner-joined artist)). -# Thus a definedness check is carried on all collapse-condition values -# and if at least one is undef it is assumed that we are dealing with a -# NULLed right-side of a left-join, so we don't return a related data -# container at all, which implies no related objects -# -# * If we are not collapsing, there is no constraint on having a selector -# uniquely identifying all possible objects, and the user might have very -# well requested a column that just *happens* to be all NULLs. What we do -# in this case is fallback to the old behavior (which is a potential FIXME) -# by always returning a data container, but only filling it with columns -# IFF at least one of them is defined. This way we do not get an object -# with a bunch of has_column_loaded to undef, but at the same time do not -# further relationships based off this "null" object (e.g. in case the user -# deliberately skipped link-table values). I am pretty sure there are some -# tests that codify this behavior, need to find the exact testname. -# -# For an example of this coderef in action (and to see its guts) look at -# t/prefetch/_internals.t -# -# This is a huge performance win, as we call the same code for -# every row returned from the db, thus avoiding repeated method -# lookups when traversing relationships -# -# Also since the coderef is completely stateless (the returned structure is -# always fresh on every new invocation) this is a very good opportunity for -# memoization if further speed improvements are needed -# -# The way we construct this coderef is somewhat fugly, although I am not -# sure if the string eval is *that* bad of an idea. The alternative is to -# have a *very* large number of anon coderefs calling each other in a twisty -# maze, whereas the current result is a nice, smooth, single-pass function. -# In any case - the output of this thing is meticulously micro-tested, so -# any sort of rewrite should be relatively easy -# -sub _mk_row_parser { - my ($self, $args) = @_; - - my $inflate_index = { map - { $args->{inflate_map}[$_] => $_ } - ( 0 .. $#{$args->{inflate_map}} ) - }; - - my ($parser_src); - if ($args->{collapse}) { - # FIXME - deal with unorderedness - # unordered => $unordered - - my $collapse_map = $self->_resolve_collapse ( - # FIXME - # only consider real columns (not functions) during collapse resolution - # this check shouldn't really be here, as fucktards are not supposed to - # alias random crap to existing column names anyway, but still - just in - # case - # FIXME !!!! - this does not yet deal with unbalanced selectors correctly - # (it is now trivial as the attrs specify where things go out of sync) - { map - { ref $args->{selection}[$inflate_index->{$_}] ? () : ( $_ => $inflate_index->{$_} ) } - keys %$inflate_index - } - ); - - my $unrolled_top_branch_id_indexes = join (', ', @{$collapse_map->{-branch_id}}); - - my ($sequenced_top_branch_id, $sequenced_top_node_id) = map - { join ('', map { "{'\xFF__IDVALPOS__${_}__\xFF'}" } @$_ ) } - $collapse_map->{-branch_id}, $collapse_map->{-node_id} - ; - - my $rolled_out_assemblers = __visit_infmap_collapse ( - $inflate_index, $collapse_map - ); - - my @sprintf_args = ( - $unrolled_top_branch_id_indexes, - $sequenced_top_branch_id, - $sequenced_top_node_id, - $rolled_out_assemblers, - $sequenced_top_node_id, - ); - $parser_src = sprintf (<<'EOS', @sprintf_args); - -### BEGIN STRING EVAL - my ($rows_pos, $result_pos, $cur_row, @cur_row_id_values, $is_new_res, @collapse_idx) = (0,0); - - # this loop is a bit arcane - the rationale is that the passed in - # $_[0] will either have only one row (->next) or will have all - # rows already pulled in (->all and/or unordered). Given that the - # result can be rather large - we reuse the same already allocated - # array, since the collapsed prefetch is smaller by definition. - # At the end we cut the leftovers away and move on. - while ($cur_row = - ($rows_pos >= 0 and $_[0][$rows_pos++] or do { $rows_pos = -1; 0 } ) - || - ($_[1] and $_[1]->()) - ) { - - # FIXME - # optimize this away when we know we have no undefs in the collapse map - $cur_row_id_values[$_] = defined $cur_row->[$_] ? $cur_row->[$_] : "\xFF\xFFN\xFFU\xFFL\xFFL\xFF\xFF" - for (%s); # the top branch_id includes all id values - - # check top branch for doubling via a has_many non-selecting join or something - # 0 is reserved for this (node indexes start from 1) - next if $collapse_idx[0]%s++; - - $is_new_res = ! $collapse_idx[1]%s; - - # lazify - # fire on ordered only -# if ($is_new_res = ! $collapse_idx[1]{$cur_row_id_values[2]}) { -# } - - %s - - $_[0][$result_pos++] = $collapse_idx[1]%s - if $is_new_res; - } - - splice @{$_[0]}, $result_pos; # truncate the passed in array for cases of collapsing ->all() - -### END STRING EVAL -EOS - - # change the quoted placeholders to unquoted alias-references - $parser_src =~ s/ \' \xFF__VALPOS__(\d+)__\xFF \' /sprintf ('$cur_row->[%d]', $1)/gex; - $parser_src =~ s/ \' \xFF__IDVALPOS__(\d+)__\xFF \' /sprintf ('$cur_row_id_values[%d]', $1)/gex; - } - - else { - $parser_src = sprintf( - '$_ = %s for @{$_[0]}', - __visit_infmap_simple($inflate_index, { rsrc => $self }), # need the $rsrc to determine left-ness - ); - - # change the quoted placeholders to unquoted alias-references - $parser_src =~ s/ \' \xFF__VALPOS__(\d+)__\xFF \' /sprintf ('$_->[%d]', $1)/gex; - } - - eval "sub { no strict; no warnings; $parser_src }" or die "$@\n\n$parser_src"; -} - -{ - # keep our own DD object around so we don't have to fitz with quoting - my $dumper_obj; - my $visit_dump = sub { - # we actually will be producing functional perl code here, - # thus no second-guessing of what these globals might have - # been set to. DO NOT CHANGE! - ($dumper_obj ||= do { - require Data::Dumper; - Data::Dumper->new([]) - ->Purity (1) - ->Pad ('') - ->Useqq (0) - ->Terse (1) - ->Quotekeys (1) - ->Deepcopy (1) - ->Deparse (0) - ->Maxdepth (0) - ->Indent (0) - })->Values ([shift])->Dump, - }; - - sub __visit_infmap_simple { - my ($val_idx, $args) = @_; - - my $my_cols = {}; - my $rel_cols; - for (keys %$val_idx) { - if ($_ =~ /^ ([^\.]+) \. (.+) /x) { - $rel_cols->{$1}{$2} = $val_idx->{$_}; - } - else { - $my_cols->{$_} = $val_idx->{$_}; - } - } - my @relperl; - for my $rel (sort keys %$rel_cols) { - - my $rel_rsrc = $get_related_source->($args->{rsrc}, $rel, $rel_cols->{$rel}); - - #my $optional = $args->{is_optional}; - #$optional ||= ($args->{rsrc}->relationship_info($rel)->{attrs}{join_type} || '') =~ /^left/i; - - push @relperl, join ' => ', perlstring($rel), __visit_infmap_simple($rel_cols->{$rel}, { - non_top => 1, - #is_optional => $optional, - rsrc => $rel_rsrc, - }); - - # FIXME SUBOPTIMAL - disabled to satisfy t/resultset/inflate_result_api.t - #if ($optional and my @branch_null_checks = map - # { "(! defined '\xFF__VALPOS__${_}__\xFF')" } - # sort { $a <=> $b } values %{$rel_cols->{$rel}} - #) { - # $relperl[-1] = sprintf ( '(%s) ? ( %s => [] ) : ( %s )', - # join (' && ', @branch_null_checks ), - # perlstring($rel), - # $relperl[-1], - # ); - #} - } - - my $me_struct = keys %$my_cols - ? $visit_dump->({ map { $_ => "\xFF__VALPOS__$my_cols->{$_}__\xFF" } (keys %$my_cols) }) - : 'undef' - ; - - return sprintf '[%s]', join (',', - $me_struct, - @relperl ? sprintf ('{ %s }', join (',', @relperl)) : (), - ); - } - - sub __visit_infmap_collapse { - my ($val_idx, $collapse_map, $parent_info) = @_; - - my $my_cols = {}; - my $rel_cols; - for (keys %$val_idx) { - if ($_ =~ /^ ([^\.]+) \. (.+) /x) { - $rel_cols->{$1}{$2} = $val_idx->{$_}; - } - else { - $my_cols->{$_} = $val_idx->{$_}; - } - } - - my $sequenced_node_id = join ('', map - { "{'\xFF__IDVALPOS__${_}__\xFF'}" } - @{$collapse_map->{-node_id}} - ); - - my $me_struct = keys %$my_cols - ? $visit_dump->([{ map { $_ => "\xFF__VALPOS__$my_cols->{$_}__\xFF" } (keys %$my_cols) }]) - : 'undef' - ; - my $node_idx_ref = sprintf '$collapse_idx[%d]%s', $collapse_map->{-node_index}, $sequenced_node_id; - - my $parent_idx_ref = sprintf( '$collapse_idx[%d]%s[1]{%s}', - @{$parent_info}{qw/node_idx sequenced_node_id/}, - perlstring($parent_info->{relname}), - ) if $parent_info; - - my @src; - if ($collapse_map->{-node_index} == 1) { - push @src, sprintf( '%s ||= %s;', - $node_idx_ref, - $me_struct, - ); - } - elsif ($collapse_map->{-is_single}) { - push @src, sprintf ( '%s = %s ||= %s;', - $parent_idx_ref, - $node_idx_ref, - $me_struct, - ); - } - else { - push @src, sprintf('push @{%s}, %s = %s if !%s;', - $parent_idx_ref, - $node_idx_ref, - $me_struct, - $node_idx_ref, - ); - } - - #my $known_defined = { %{ $parent_info->{known_defined} || {} } }; - #$known_defined->{$_}++ for @{$collapse_map->{-node_id}}; - - for my $rel (sort keys %$rel_cols) { - - push @src, sprintf( '%s[1]{%s} ||= [];', $node_idx_ref, perlstring($rel) ); - - push @src, __visit_infmap_collapse($rel_cols->{$rel}, $collapse_map->{$rel}, { - node_idx => $collapse_map->{-node_index}, - sequenced_node_id => $sequenced_node_id, - relname => $rel, - #known_defined => $known_defined, - }); - - # FIXME SUBOPTIMAL - disabled to satisfy t/resultset/inflate_result_api.t - #if ($collapse_map->{$rel}{-is_optional} and my @null_checks = map - # { "(! defined '\xFF__VALPOS__${_}__\xFF')" } - # sort { $a <=> $b } grep - # { ! $known_defined->{$_} } - # @{$collapse_map->{$rel}{-node_id}} - #) { - # $src[-1] = sprintf( '(%s) or %s', - # join (' || ', @null_checks ), - # $src[-1], - # ); - #} - } - - join "\n", @src; - } -} - =head2 related_source =over 4 -=item Arguments: $relname +=item Arguments: $rel_name -=item Return value: $source +=item Return Value: $source =back @@ -2438,9 +1940,9 @@ sub related_source { =over 4 -=item Arguments: $relname +=item Arguments: $rel_name -=item Return value: $classname +=item Return Value: $classname =back @@ -2460,9 +1962,9 @@ sub related_class { =over 4 -=item Arguments: None +=item Arguments: none -=item Return value: $source_handle +=item Return Value: L<$source_handle|DBIx::Class::ResultSourceHandle> =back @@ -2579,7 +2081,7 @@ Creates a new ResultSource object. Not normally called directly by end users. =item Arguments: 1/0 (default: 0) -=item Return value: 1/0 +=item Return Value: 1/0 =back @@ -2590,9 +2092,9 @@ metadata from storage as necessary. This is *deprecated*, and should not be used. It will be removed before 1.0. -=head1 AUTHORS +=head1 AUTHOR AND CONTRIBUTORS -Matt S. Trout +See L and L in DBIx::Class =head1 LICENSE