use strict;
use warnings;
-use base qw/DBIx::Class/;
+use base qw/DBIx::Class::ResultSource::RowParser DBIx::Class/;
use DBIx::Class::ResultSet;
use DBIx::Class::ResultSourceHandle;
-use DBIx::Class::Exception;
use DBIx::Class::Carp;
-use DBIx::Class::GlobalDestruction;
+use Devel::GlobalDestruction;
use Try::Tiny;
use List::Util 'first';
use Scalar::Util qw/blessed weaken isweak/;
+
use namespace::clean;
__PACKAGE__->mk_group_accessors(simple => qw/
$schema->source($source_name);
-=item From a Row object:
+=item From a Result object:
- $row->result_source;
+ $result->result_source;
=item From a ResultSet object:
=item Arguments: @columns
-=item Return value: The ResultSource object
+=item Return Value: L<$result_source|/new>
=back
calls of this method will add more columns, not replace them.
The column names given will be created as accessor methods on your
-L<DBIx::Class::Row> objects. You can change the name of the accessor
+L<Result|DBIx::Class::Manual::ResultClass> objects. You can change the name of the accessor
by supplying an L</accessor> in the column_info hash.
If a column name beginning with a plus sign ('+col1') is provided, the
=item Arguments: $colname, \%columninfo?
-=item Return value: 1/0 (true/false)
+=item Return Value: 1/0 (true/false)
=back
=item Arguments: $colname
-=item Return value: 1/0 (true/false)
+=item Return Value: 1/0 (true/false)
=back
=item Arguments: $colname
-=item Return value: Hashref of info
+=item Return Value: Hashref of info
=back
=over
-=item Arguments: None
+=item Arguments: none
-=item Return value: Ordered list of column names
+=item Return Value: Ordered list of column names
=back
=item Arguments: \@colnames ?
-=item Return value: Hashref of column name/info pairs
+=item Return Value: Hashref of column name/info pairs
=back
}
else {
$self->throw_exception( sprintf (
- "No such column '%s' on source %s",
+ "No such column '%s' on source '%s'",
$_,
- $self->source_name,
+ $self->source_name || $self->name || 'Unknown source...?',
));
}
}
=item Arguments: @colnames
-=item Return value: undefined
+=item Return Value: not defined
=back
=item Arguments: $colname
-=item Return value: undefined
+=item Return Value: not defined
=back
=item Arguments: @cols
-=item Return value: undefined
+=item Return Value: not defined
=back
sub set_primary_key {
my ($self, @cols) = @_;
- # check if primary key columns are valid columns
- foreach my $col (@cols) {
- $self->throw_exception("No such column $col on table " . $self->name)
- unless $self->has_column($col);
+
+ my $colinfo = $self->columns_info(\@cols);
+ for my $col (@cols) {
+ carp_unique(sprintf (
+ "Primary key of source '%s' includes the column '%s' which has its "
+ . "'is_nullable' attribute set to true. This is a mistake and will cause "
+ . 'various Result-object operations to fail',
+ $self->source_name || $self->name || 'Unknown source...?',
+ $col,
+ )) if $colinfo->{$col}{is_nullable};
}
+
$self->_primaries(\@cols);
$self->add_unique_constraint(primary => \@cols);
=over 4
-=item Arguments: None
+=item Arguments: none
-=item Return value: Ordered list of primary column names
+=item Return Value: Ordered list of primary column names
=back
=item Arguments: $sequence_name
-=item Return value: undefined
+=item Return Value: not defined
=back
=item Arguments: $name?, \@colnames
-=item Return value: undefined
+=item Return Value: not defined
=back
=item Arguments: @constraints
-=item Return value: undefined
+=item Return Value: not defined
=back
=item Arguments: \@colnames
-=item Return value: Constraint name
+=item Return Value: Constraint name
=back
=over 4
-=item Arguments: None
+=item Arguments: none
-=item Return value: Hash of unique constraint data
+=item Return Value: Hash of unique constraint data
=back
=over 4
-=item Arguments: None
+=item Arguments: none
-=item Return value: Unique constraint names
+=item Return Value: Unique constraint names
=back
=item Arguments: $constraintname
-=item Return value: List of constraint columns
+=item Return Value: List of constraint columns
=back
=item Arguments: $callback_name | \&callback_code
-=item Return value: $callback_name | \&callback_code
+=item Return Value: $callback_name | \&callback_code
=back
}
}
+=head2 result_class
+
+=over 4
+
+=item Arguments: $classname
+
+=item Return Value: $classname
+
+=back
+
+ use My::Schema::ResultClass::Inflator;
+ ...
+
+ use My::Schema::Artist;
+ ...
+ __PACKAGE__->result_class('My::Schema::ResultClass::Inflator');
+
+Set the default result class for this source. You can use this to create
+and use your own result inflator. See L<DBIx::Class::ResultSet/result_class>
+for more details.
+
+Please note that setting this to something like
+L<DBIx::Class::ResultClass::HashRefInflator> will make every result unblessed
+and make life more difficult. Inflators like those are better suited to
+temporary usage via L<DBIx::Class::ResultSet/result_class>.
+
=head2 resultset
=over 4
-=item Arguments: None
+=item Arguments: none
-=item Return value: $resultset
+=item Return Value: L<$resultset|DBIx::Class::ResultSet>
=back
=item Arguments: $classname
-=item Return value: $classname
+=item Return Value: $classname
=back
=over 4
-=item Arguments: \%attrs
+=item Arguments: L<\%attrs|DBIx::Class::ResultSet/ATTRIBUTES>
-=item Return value: \%attrs
+=item Return Value: L<\%attrs|DBIx::Class::ResultSet/ATTRIBUTES>
=back
$source->resultset_attributes({ order_by => [ 'id' ] });
Store a collection of resultset attributes, that will be set on every
-L<DBIx::Class::ResultSet> produced from this result source. For a full
-list see L<DBIx::Class::ResultSet/ATTRIBUTES>.
+L<DBIx::Class::ResultSet> produced from this result source.
+
+B<CAVEAT>: C<resultset_attributes> comes with its own set of issues and
+bugs! While C<resultset_attributes> isn't deprecated per se, its usage is
+not recommended!
+
+Since relationships use attributes to link tables together, the "default"
+attributes you set may cause unpredictable and undesired behavior. Furthermore,
+the defaults cannot be turned off, so you are stuck with them.
+
+In most cases, what you should actually be using are project-specific methods:
+
+ package My::Schema::ResultSet::Artist;
+ use base 'DBIx::Class::ResultSet';
+ ...
+
+ # BAD IDEA!
+ #__PACKAGE__->resultset_attributes({ prefetch => 'tracks' });
+
+ # GOOD IDEA!
+ sub with_tracks { shift->search({}, { prefetch => 'tracks' }) }
+
+ # in your code
+ $schema->resultset('Artist')->with_tracks->...
+
+This gives you the flexibility of not using it when you don't need it.
+
+For more complex situations, another solution would be to use a virtual view
+via L<DBIx::Class::ResultSource::View>.
=cut
=over 4
-=item Arguments: None
+=item Arguments: none
=item Result value: $name
=over 4
-=item Arguments: None
+=item Arguments: none
-=item Return value: FROM clause
+=item Return Value: FROM clause
=back
=over 4
-=item Arguments: $schema
+=item Arguments: L<$schema?|DBIx::Class::Schema>
-=item Return value: A schema object
+=item Return Value: L<$schema|DBIx::Class::Schema>
=back
=over 4
-=item Arguments: None
+=item Arguments: none
-=item Return value: A Storage object
+=item Return Value: L<$storage|DBIx::Class::Storage>
=back
$source->storage->debug(1);
-Returns the storage handle for the current schema.
-
-See also: L<DBIx::Class::Storage>
+Returns the L<storage handle|DBIx::Class::Storage> for the current schema.
=cut
=over 4
-=item Arguments: $relname, $related_source_name, \%cond, [ \%attrs ]
+=item Arguments: $rel_name, $related_source_name, \%cond, \%attrs?
-=item Return value: 1/true if it succeeded
+=item Return Value: 1/true if it succeeded
=back
- $source->add_relationship('relname', 'related_source', $cond, $attrs);
+ $source->add_relationship('rel_name', 'related_source', $cond, $attrs);
L<DBIx::Class::Relationship> describes a series of methods which
create pre-defined useful types of relationships. Look there first
=over 4
-=item Arguments: None
+=item Arguments: none
-=item Return value: List of relationship names
+=item Return Value: L<@rel_names|DBIx::Class::Relationship>
=back
=over 4
-=item Arguments: $relname
+=item Arguments: L<$rel_name|DBIx::Class::Relationship>
-=item Return value: Hashref of relation data,
+=item Return Value: L<\%rel_data|DBIx::Class::Relationship::Base/add_relationship>
=back
Returns a hash of relationship information for the specified relationship
-name. The keys/values are as specified for L</add_relationship>.
+name. The keys/values are as specified for L<DBIx::Class::Relationship::Base/add_relationship>.
=cut
sub relationship_info {
- my ($self, $rel) = @_;
- return $self->_relationships->{$rel};
+ #my ($self, $rel) = @_;
+ return shift->_relationships->{+shift};
}
=head2 has_relationship
=over 4
-=item Arguments: $rel
+=item Arguments: L<$rel_name|DBIx::Class::Relationship>
-=item Return value: 1/0 (true/false)
+=item Return Value: 1/0 (true/false)
=back
=cut
sub has_relationship {
- my ($self, $rel) = @_;
- return exists $self->_relationships->{$rel};
+ #my ($self, $rel) = @_;
+ return exists shift->_relationships->{+shift};
}
=head2 reverse_relationship_info
=over 4
-=item Arguments: $relname
+=item Arguments: L<$rel_name|DBIx::Class::Relationship>
-=item Return value: Hashref of relationship data
+=item Return Value: L<\%rel_data|DBIx::Class::Relationship::Base/add_relationship>
=back
my $stripped_cond = $self->__strip_relcond ($rel_info->{cond});
- my $rsrc_schema_moniker = $self->source_name
- if try { $self->schema };
+ my $registered_source_name = $self->source_name;
# this may be a partial schema or something else equally esoteric
- my $other_rsrc = try { $self->related_source($rel) }
- or return $ret;
+ my $other_rsrc = $self->related_source($rel);
# Get all the relationships for that source that related to this source
# whose foreign column set are our self columns on $rel and whose self
my $roundtrip_rsrc = try { $other_rsrc->related_source($other_rel) }
or next;
- if ($rsrc_schema_moniker and try { $roundtrip_rsrc->schema } ) {
- next unless $rsrc_schema_moniker eq $roundtrip_rsrc->source_name;
+ if ($registered_source_name) {
+ next if $registered_source_name ne ($roundtrip_rsrc->source_name || '')
}
else {
- next unless $self->result_class eq $roundtrip_rsrc->result_class;
+ next if $self->result_class ne $roundtrip_rsrc->result_class;
}
my $other_rel_info = $other_rsrc->relationship_info($other_rel);
,
-join_path => [@$jpath, { $join => $as } ],
-is_single => (
- $rel_info->{attrs}{accessor}
- &&
+ (! $rel_info->{attrs}{accessor})
+ or
first { $rel_info->{attrs}{accessor} eq $_ } (qw/single filter/)
),
-alias => $as,
- -relation_chain_depth => $seen->{-relation_chain_depth} || 0,
+ -relation_chain_depth => ( $seen->{-relation_chain_depth} || 0 ) + 1,
},
scalar $self->_resolve_condition($rel_info->{cond}, $as, $alias, $join)
];
# having already been inserted. Takes the name of the relationship and a
# hashref of columns of the related object.
sub _pk_depends_on {
- my ($self, $relname, $rel_data) = @_;
+ my ($self, $rel_name, $rel_data) = @_;
- my $relinfo = $self->relationship_info($relname);
+ my $relinfo = $self->relationship_info($rel_name);
# don't assume things if the relationship direction is specified
return $relinfo->{attrs}{is_foreign_key_constraint}
# assume anything that references our PK probably is dependent on us
# rather than vice versa, unless the far side is (a) defined or (b)
# auto-increment
- my $rel_source = $self->related_source($relname);
+ my $rel_source = $self->related_source($rel_name);
foreach my $p ($self->primary_columns) {
if (exists $keyhash->{$p}) {
# list of non-triviail values (notmally conditions) returned as a part
# of a joinfree condition hash
sub _resolve_condition {
- my ($self, $cond, $as, $for, $relname) = @_;
+ my ($self, $cond, $as, $for, $rel_name) = @_;
- my $obj_rel = !!blessed $for;
+ my $obj_rel = defined blessed $for;
if (ref $cond eq 'CODE') {
my $relalias = $obj_rel ? 'me' : $as;
self_alias => $obj_rel ? $as : $for,
foreign_alias => $relalias,
self_resultsource => $self,
- foreign_relname => $relname || ($obj_rel ? $as : $for),
+ foreign_relname => $rel_name || ($obj_rel ? $as : $for),
self_rowobj => $obj_rel ? $for : undef
});
# FIXME sanity check until things stabilize, remove at some point
$self->throw_exception (
- "A join-free condition returned for relationship '$relname' without a row-object to chain from"
+ "A join-free condition returned for relationship '$rel_name' without a row-object to chain from"
) unless $obj_rel;
# FIXME another sanity check
first { $_ !~ /^\Q$relalias.\E.+/ } keys %$joinfree_cond
) {
$self->throw_exception (
- "The join-free condition returned for relationship '$relname' must be a hash "
+ "The join-free condition returned for relationship '$rel_name' must be a hash "
.'reference with all keys being valid columns on the related result source'
);
}
}
# see which parts of the joinfree cond are conditionals
- my $relcol_list = { map { $_ => 1 } $self->related_source($relname)->columns };
+ my $relcol_list = { map { $_ => 1 } $self->related_source($rel_name)->columns };
for my $c (keys %$joinfree_cond) {
my ($colname) = $c =~ /^ (?: \Q$relalias.\E )? (.+)/x;
elsif (ref $cond eq 'ARRAY') {
my (@ret, $crosstable);
for (@$cond) {
- my ($cond, $crosstab) = $self->_resolve_condition($_, $as, $for, $relname);
+ my ($cond, $crosstab) = $self->_resolve_condition($_, $as, $for, $rel_name);
push @ret, $cond;
$crosstable ||= $crosstab;
}
return wantarray ? (\@ret, $crosstable) : \@ret;
}
else {
- $self->throw_exception ("Can't handle condition $cond for relationship '$relname' yet :(");
- }
-}
-
-# Accepts one or more relationships for the current source and returns an
-# array of column names for each of those relationships. Column names are
-# prefixed relative to the current source, in accordance with where they appear
-# in the supplied relationships.
-sub _resolve_prefetch {
- my ($self, $pre, $alias, $alias_map, $order, $pref_path) = @_;
- $pref_path ||= [];
-
- if (not defined $pre or not length $pre) {
- return ();
- }
- elsif( ref $pre eq 'ARRAY' ) {
- return
- map { $self->_resolve_prefetch( $_, $alias, $alias_map, $order, [ @$pref_path ] ) }
- @$pre;
- }
- elsif( ref $pre eq 'HASH' ) {
- my @ret =
- map {
- $self->_resolve_prefetch($_, $alias, $alias_map, $order, [ @$pref_path ] ),
- $self->related_source($_)->_resolve_prefetch(
- $pre->{$_}, "${alias}.$_", $alias_map, $order, [ @$pref_path, $_] )
- } keys %$pre;
- return @ret;
- }
- elsif( ref $pre ) {
- $self->throw_exception(
- "don't know how to resolve prefetch reftype ".ref($pre));
- }
- else {
- my $p = $alias_map;
- $p = $p->{$_} for (@$pref_path, $pre);
-
- $self->throw_exception (
- "Unable to resolve prefetch '$pre' - join alias map does not contain an entry for path: "
- . join (' -> ', @$pref_path, $pre)
- ) if (ref $p->{-join_aliases} ne 'ARRAY' or not @{$p->{-join_aliases}} );
-
- my $as = shift @{$p->{-join_aliases}};
-
- my $rel_info = $self->relationship_info( $pre );
- $self->throw_exception( $self->source_name . " has no such relationship '$pre'" )
- unless $rel_info;
- my $as_prefix = ($alias =~ /^.*?\.(.+)$/ ? $1.'.' : '');
- my $rel_source = $self->related_source($pre);
-
- if ($rel_info->{attrs}{accessor} && $rel_info->{attrs}{accessor} eq 'multi') {
- $self->throw_exception(
- "Can't prefetch has_many ${pre} (join cond too complex)")
- unless ref($rel_info->{cond}) eq 'HASH';
- my $dots = @{[$as_prefix =~ m/\./g]} + 1; # +1 to match the ".${as_prefix}"
-
- #my @col = map { (/^self\.(.+)$/ ? ("${as_prefix}.$1") : ()); }
- # values %{$rel_info->{cond}};
- my @key = map { (/^foreign\.(.+)$/ ? ($1) : ()); }
- keys %{$rel_info->{cond}};
-
- push @$order, map { "${as}.$_" } @key;
-
- if (my $rel_order = $rel_info->{attrs}{order_by}) {
- # this is kludgy and incomplete, I am well aware
- # but the parent method is going away entirely anyway
- # so sod it
- my $sql_maker = $self->storage->sql_maker;
- my ($orig_ql, $orig_qr) = $sql_maker->_quote_chars;
- my $sep = $sql_maker->name_sep;
-
- # install our own quoter, so we can catch unqualified stuff
- local $sql_maker->{quote_char} = ["\x00", "\xFF"];
-
- my $quoted_prefix = "\x00${as}\xFF";
-
- for my $chunk ( $sql_maker->_order_by_chunks ($rel_order) ) {
- my @bind;
- ($chunk, @bind) = @$chunk if ref $chunk;
-
- $chunk = "${quoted_prefix}${sep}${chunk}"
- unless $chunk =~ /\Q$sep/;
-
- $chunk =~ s/\x00/$orig_ql/g;
- $chunk =~ s/\xFF/$orig_qr/g;
- push @$order, \[$chunk, @bind];
- }
- }
- }
-
- return map { [ "${as}.$_", "${as_prefix}${pre}.$_", ] }
- $rel_source->columns;
- }
-}
-
-# Takes a selection list and generates a collapse-map representing
-# row-object fold-points. Every relationship is assigned a set of unique,
-# non-nullable columns (which may *not even be* from the same resultset)
-# and the collapser will use this information to correctly distinguish
-# data of individual to-be-row-objects.
-sub _resolve_collapse {
- my ($self, $as, $as_fq_idx, $rel_chain, $parent_info) = @_;
-
- # for comprehensible error messages put ourselves at the head of the relationship chain
- $rel_chain ||= [ $self->source_name ];
-
- # record top-level fully-qualified column index
- $as_fq_idx ||= { %$as };
-
- my ($my_cols, $rel_cols);
- for (keys %$as) {
- if ($_ =~ /^ ([^\.]+) \. (.+) /x) {
- $rel_cols->{$1}{$2} = 1;
- }
- else {
- $my_cols->{$_} = {}; # important for ||= below
- }
- }
-
- my $relinfo;
- # run through relationships, collect metadata, inject non-left fk-bridges from
- # *INNER-JOINED* children (if any)
- for my $rel (keys %$rel_cols) {
- my $rel_src = $self->related_source ($rel);
- my $inf = $self->relationship_info ($rel);
-
- $relinfo->{$rel}{is_single} = $inf->{attrs}{accessor} && $inf->{attrs}{accessor} ne 'multi';
- $relinfo->{$rel}{is_inner} = ( $inf->{attrs}{join_type} || '' ) !~ /^left/i;
- $relinfo->{$rel}{rsrc} = $rel_src;
-
- my $cond = $inf->{cond};
-
- if (
- ref $cond eq 'HASH'
- and
- keys %$cond
- and
- ! List::Util::first { $_ !~ /^foreign\./ } (keys %$cond)
- and
- ! List::Util::first { $_ !~ /^self\./ } (values %$cond)
- ) {
- for my $f (keys %$cond) {
- my $s = $cond->{$f};
- $_ =~ s/^ (?: foreign | self ) \.//x for ($f, $s);
- $relinfo->{$rel}{fk_map}{$s} = $f;
-
- $my_cols->{$s} ||= { via_fk => "$rel.$f" } # need to know source from *our* pov
- if ($relinfo->{$rel}{is_inner} && defined $rel_cols->{$rel}{$f}); # only if it is inner and in fact selected of course
- }
- }
- }
-
- # if the parent is already defined, assume all of its related FKs are selected
- # (even if they in fact are NOT in the select list). Keep a record of what we
- # assumed, and if any such phantom-column becomes part of our own collapser,
- # throw everything assumed-from-parent away and replace with the collapser of
- # the parent (whatever it may be)
- my $assumed_from_parent;
- unless ($parent_info->{underdefined}) {
- $assumed_from_parent->{columns} = { map
- # only add to the list if we do not already select said columns
- { ! exists $my_cols->{$_} ? ( $_ => 1 ) : () }
- values %{$parent_info->{rel_condition} || {}}
- };
-
- $my_cols->{$_} = { via_collapse => $parent_info->{collapse_on} }
- for keys %{$assumed_from_parent->{columns}};
- }
-
- # get colinfo for everything
- if ($my_cols) {
- $my_cols->{$_}{colinfo} = (
- $self->has_column ($_) ? $self->column_info ($_) : undef
- ) for keys %$my_cols;
- }
-
- my $collapse_map;
-
- # try to resolve based on our columns (plus already inserted FK bridges)
- if (
- $my_cols
- and
- my $uset = $self->_unique_column_set ($my_cols)
- ) {
- # see if the resulting collapser relies on any implied columns,
- # and fix stuff up if this is the case
-
- my $parent_collapser_used;
-
- if (List::Util::first
- { exists $assumed_from_parent->{columns}{$_} }
- keys %$uset
- ) {
- # remove implied stuff from the uset, we will inject the equivalent collapser a bit below
- delete @{$uset}{keys %{$assumed_from_parent->{columns}}};
- $parent_collapser_used = 1;
- }
-
- $collapse_map->{-collapse_on} = {
- %{ $parent_collapser_used ? $parent_info->{collapse_on} : {} },
- (map
- {
- my $fqc = join ('.',
- @{$rel_chain}[1 .. $#$rel_chain],
- ( $my_cols->{$_}{via_fk} || $_ ),
- );
-
- $fqc => $as_fq_idx->{$fqc};
- }
- keys %$uset
- ),
- };
- }
-
- # don't know how to collapse - keep descending down 1:1 chains - if
- # a related non-LEFT 1:1 is resolvable - its condition will collapse us
- # too
- unless ($collapse_map->{-collapse_on}) {
- my @candidates;
-
- for my $rel (keys %$relinfo) {
- next unless ($relinfo->{$rel}{is_single} && $relinfo->{$rel}{is_inner});
-
- if ( my $rel_collapse = $relinfo->{$rel}{rsrc}->_resolve_collapse (
- $rel_cols->{$rel},
- $as_fq_idx,
- [ @$rel_chain, $rel ],
- { underdefined => 1 }
- )) {
- push @candidates, $rel_collapse->{-collapse_on};
- }
- }
-
- # get the set with least amount of columns
- # FIXME - maybe need to implement a data type order as well (i.e. prefer several ints
- # to a single varchar)
- if (@candidates) {
- ($collapse_map->{-collapse_on}) = sort { keys %$a <=> keys %$b } (@candidates);
- }
- }
-
- # Still dont know how to collapse - see if the parent passed us anything
- # (i.e. reuse collapser over 1:1)
- unless ($collapse_map->{-collapse_on}) {
- $collapse_map->{-collapse_on} = $parent_info->{collapse_on}
- if $parent_info->{collapser_reusable};
- }
-
-
- # stop descending into children if we were called by a parent for first-pass
- # and don't despair if nothing was found (there may be other parallel branches
- # to dive into)
- if ($parent_info->{underdefined}) {
- return $collapse_map->{-collapse_on} ? $collapse_map : undef
- }
- # nothing down the chain resolved - can't calculate a collapse-map
- elsif (! $collapse_map->{-collapse_on}) {
- $self->throw_exception ( sprintf
- "Unable to calculate a definitive collapse column set for %s%s: fetch more unique non-nullable columns",
- $self->source_name,
- @$rel_chain > 1
- ? sprintf (' (last member of the %s chain)', join ' -> ', @$rel_chain )
- : ''
- ,
- );
- }
-
-
- # If we got that far - we are collapsable - GREAT! Now go down all children
- # a second time, and fill in the rest
-
- for my $rel (keys %$relinfo) {
-
- $collapse_map->{$rel} = $relinfo->{$rel}{rsrc}->_resolve_collapse (
- { map { $_ => 1 } ( keys %{$rel_cols->{$rel}} ) },
-
- $as_fq_idx,
-
- [ @$rel_chain, $rel],
-
- {
- collapse_on => { %{$collapse_map->{-collapse_on}} },
-
- rel_condition => $relinfo->{$rel}{fk_map},
-
- # if this is a 1:1 our own collapser can be used as a collapse-map
- # (regardless of left or not)
- collapser_reusable => $relinfo->{$rel}{is_single},
- },
- );
- }
-
- return $collapse_map;
-}
-
-sub _unique_column_set {
- my ($self, $cols) = @_;
-
- my %unique = $self->unique_constraints;
-
- # always prefer the PK first, and then shortest constraints first
- USET:
- for my $set (delete $unique{primary}, sort { @$a <=> @$b } (values %unique) ) {
- next unless $set && @$set;
-
- for (@$set) {
- next USET unless ($cols->{$_} && $cols->{$_}{colinfo} && !$cols->{$_}{colinfo}{is_nullable} );
- }
-
- return { map { $_ => 1 } @$set };
- }
-
- return undef;
-}
-
-# Takes an arrayref of {as} dbic column aliases and the collapse and select
-# attributes from the same $rs (the slector requirement is a temporary
-# workaround), and returns a coderef capable of:
-# my $me_pref_clps = $coderef->([$rs->cursor->next])
-# Where the $me_pref_clps arrayref is the future argument to
-# ::ResultSet::_collapse_result.
-#
-# $me_pref_clps->[0] is always returned (even if as an empty hash with no
-# rowdata), however branches of related data in $me_pref_clps->[1] may be
-# pruned short of what was originally requested based on {as}, depending
-# on:
-#
-# * If collapse is requested, a definitive collapse map is calculated for
-# every relationship "fold-point", consisting of a set of values (which
-# may not even be contained in the future 'me' of said relationship
-# (for example a cd.artist_id defines the related inner-joined artist)).
-# Thus a definedness check is carried on all collapse-condition values
-# and if at least one is undef it is assumed that we are dealing with a
-# NULLed right-side of a left-join, so we don't return a related data
-# container at all, which implies no related objects
-#
-# * If we are not collapsing, there is no constraint on having a selector
-# uniquely identifying all possible objects, and the user might have very
-# well requested a column that just *happens* to be all NULLs. What we do
-# in this case is fallback to the old behavior (which is a potential FIXME)
-# by always returning a data container, but only filling it with columns
-# IFF at least one of them is defined. This way we do not get an object
-# with a bunch of has_column_loaded to undef, but at the same time do not
-# further relationships based off this "null" object (e.g. in case the user
-# deliberately skipped link-table values). I am pretty sure there are some
-# tests that codify this behavior, need to find the exact testname.
-#
-# For an example of this coderef in action (and to see its guts) look at
-# t/prefetch/_internals.t
-#
-# This is a huge performance win, as we call the same code for
-# every row returned from the db, thus avoiding repeated method
-# lookups when traversing relationships
-#
-# Also since the coderef is completely stateless (the returned structure is
-# always fresh on every new invocation) this is a very good opportunity for
-# memoization if further speed improvements are needed
-#
-# The way we construct this coderef is somewhat fugly, although I am not
-# sure if the string eval is *that* bad of an idea. The alternative is to
-# have a *very* large number of anon coderefs calling each other in a twisty
-# maze, whereas the current result is a nice, smooth, single-pass function.
-# In any case - the output of this thing is meticulously micro-tested, so
-# any sort of rewrite should be relatively easy
-#
-sub _mk_row_parser {
- my ($self, $as, $with_collapse, $select) = @_;
-
- my $as_indexed = { map
- { $as->[$_] => $_ }
- ( 0 .. $#$as )
- };
-
- # calculate collapse fold-points if needed
- my $collapse_on = do {
- # FIXME
- # only consider real columns (not functions) during collapse resolution
- # this check shouldn't really be here, as fucktards are not supposed to
- # alias random crap to existing column names anyway, but still - just in
- # case (also saves us from select/as mismatches which need fixing as well...)
-
- my $plain_as = { %$as_indexed };
- for (keys %$plain_as) {
- delete $plain_as->{$_} if ref $select->[$plain_as->{$_}];
- }
- $self->_resolve_collapse ($plain_as);
-
- } if $with_collapse;
-
- my $perl = $self->__visit_as ($as_indexed, $collapse_on);
- my $cref = eval "sub { $perl }"
- or die "Oops! _mk_row_parser generated invalid perl:\n$@\n\n$perl\n";
- return $cref;
-}
-
-{
- my $visit_as_dumper; # keep our own DD object around so we don't have to fitz with quoting
-
- sub __visit_as {
- my ($self, $as, $collapse_on, $known_defined) = @_;
- $known_defined ||= {};
-
- # prepopulate the known defined map with our own collapse value positions
- # the rationale is that if an Artist needs column 0 to be uniquely
- # identified, and related CDs need columns 0 and 1, by the time we get to
- # CDs we already know that column 0 is defined (otherwise there would be
- # no related CDs as there is no Artist in the 1st place). So we use this
- # index to cut on repetitive defined() checks.
- $known_defined->{$_}++ for ( values %{$collapse_on->{-collapse_on} || {}} );
-
- my $my_cols = {};
- my $rel_cols;
- for (keys %$as) {
- if ($_ =~ /^ ([^\.]+) \. (.+) /x) {
- $rel_cols->{$1}{$2} = $as->{$_};
- }
- else {
- $my_cols->{$_} = $as->{$_};
- }
- }
-
- my @relperl;
- for my $rel (sort keys %$rel_cols) {
- my $rel_node = $self->__visit_as($rel_cols->{$rel}, $collapse_on->{$rel}, {%$known_defined} );
-
- my @null_checks;
- if ($collapse_on->{$rel}{-collapse_on}) {
- @null_checks = map
- { "(! defined '__VALPOS__${_}__')" }
- ( grep
- { ! $known_defined->{$_} }
- ( sort
- { $a <=> $b }
- values %{$collapse_on->{$rel}{-collapse_on}}
- )
- )
- ;
- }
-
- if (@null_checks) {
- push @relperl, sprintf ( '(%s) ? () : ( %s => %s )',
- join (' || ', @null_checks ),
- $rel,
- $rel_node,
- );
- }
- else {
- push @relperl, "$rel => $rel_node";
- }
- }
- my $rels = @relperl
- ? sprintf ('{ %s }', join (',', @relperl))
- : 'undef'
- ;
-
- my $me = {
- map { $_ => "__VALPOS__$my_cols->{$_}__" } (keys %$my_cols)
- };
-
- my $clps = undef; # funny thing, but this prevents a memory leak, I guess it's Data::Dumper#s fault (mo)
- $clps = [
- map { "__VALPOS__${_}__" } ( sort { $a <=> $b } (values %{$collapse_on->{-collapse_on}}) )
- ] if $collapse_on->{-collapse_on};
-
- # we actually will be producing functional perl code here,
- # thus no second-guessing of what these globals might have
- # been set to. DO NOT CHANGE!
- $visit_as_dumper ||= do {
- require Data::Dumper;
- Data::Dumper->new([])
- ->Purity (1)
- ->Pad ('')
- ->Useqq (0)
- ->Terse (1)
- ->Quotekeys (1)
- ->Deepcopy (1)
- ->Deparse (0)
- ->Maxdepth (0)
- ->Indent (0)
- };
- for ($me, $clps) {
- $_ = $visit_as_dumper->Values ([$_])->Dump;
- }
-
- unless ($collapse_on->{-collapse_on}) { # we are not collapsing, insert a definedness check on 'me'
- $me = sprintf ( '(%s) ? %s : {}',
- join (' || ', map { "( defined '__VALPOS__${_}__')" } (sort { $a <=> $b } values %$my_cols) ),
- $me,
- );
- }
-
- my @rv_list = ($me, $rels, $clps);
- pop @rv_list while ($rv_list[-1] eq 'undef'); # strip trailing undefs
-
- # change the quoted placeholders to unquoted alias-references
- $_ =~ s/ \' __VALPOS__(\d+)__ \' /sprintf ('$_[0][%d]', $1)/gex
- for grep { defined $_ } @rv_list;
- return sprintf '[%s]', join (',', @rv_list);
+ $self->throw_exception ("Can't handle condition $cond for relationship '$rel_name' yet :(");
}
}
=over 4
-=item Arguments: $relname
+=item Arguments: $rel_name
-=item Return value: $source
+=item Return Value: $source
=back
=over 4
-=item Arguments: $relname
+=item Arguments: $rel_name
-=item Return value: $classname
+=item Return Value: $classname
=back
=over 4
-=item Arguments: None
+=item Arguments: none
-=item Return value: $source_handle
+=item Return Value: L<$source_handle|DBIx::Class::ResultSourceHandle>
=back
=item Arguments: 1/0 (default: 0)
-=item Return value: 1/0
+=item Return Value: 1/0
=back
should not be used. It will be removed before 1.0.
-=head1 AUTHORS
+=head1 AUTHOR AND CONTRIBUTORS
-Matt S. Trout <mst@shadowcatsystems.co.uk>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE