use strict;
use warnings;
- use overload
- '0+' => "count",
- 'bool' => "_bool",
- fallback => 1;
- use Carp::Clan qw/^DBIx::Class/;
+ use base qw/DBIx::Class/;
+ use DBIx::Class::Carp;
use DBIx::Class::Exception;
- use Data::Page;
- use Storable;
use DBIx::Class::ResultSetColumn;
- use DBIx::Class::ResultSourceHandle;
+ use Scalar::Util qw/blessed weaken/;
+ use Try::Tiny;
+ use Data::Compare (); # no imports!!! guard against insane architecture
+
+ # not importing first() as it will clash with our own method
use List::Util ();
- use Scalar::Util ();
- use base qw/DBIx::Class/;
+ BEGIN {
+ # De-duplication in _merge_attr() is disabled, but left in for reference
+ # (the merger is used for other things that ought not to be de-duped)
+ *__HM_DEDUP = sub () { 0 };
+ }
- #use Test::Deep::NoTest (qw/eq_deeply/);
- use Data::Dumper::Concise;
+ use namespace::clean;
- __PACKAGE__->mk_group_accessors('simple' => qw/_result_class _source_handle/);
+ use overload
+ '0+' => "count",
+ 'bool' => "_bool",
+ fallback => 1;
+
+ __PACKAGE__->mk_group_accessors('simple' => qw/_result_class result_source/);
=head1 NAME
=head1 SYNOPSIS
my $users_rs = $schema->resultset('User');
+ while( $user = $users_rs->next) {
+ print $user->username;
+ }
+
my $registered_users_rs = $schema->resultset('User')->search({ registered => 1 });
my @cds_in_2005 = $schema->resultset('CD')->search({ year => 2005 })->all();
The query that the ResultSet represents is B<only> executed against
the database when these methods are called:
- L</find> L</next> L</all> L</first> L</single> L</count>
+ L</find>, L</next>, L</all>, L</first>, L</single>, L</count>.
+
+ If a resultset is used in a numeric context it returns the L</count>.
+ However, if it is used in a boolean context it is B<always> true. So if
+ you want to check if a resultset has any results, you must use C<if $rs
+ != 0>.
+
+ =head1 CUSTOM ResultSet CLASSES THAT USE Moose
+
+ If you want to make your custom ResultSet classes with L<Moose>, use a template
+ similar to:
+
+ package MyApp::Schema::ResultSet::User;
+
+ use Moose;
+ use namespace::autoclean;
+ use MooseX::NonMoose;
+ extends 'DBIx::Class::ResultSet';
+
+ sub BUILDARGS { $_[2] }
+
+ ...your code...
+
+ __PACKAGE__->meta->make_immutable;
+
+ 1;
+
+ The L<MooseX::NonMoose> is necessary so that the L<Moose> constructor does not
+ clash with the regular ResultSet constructor. Alternatively, you can use:
+
+ __PACKAGE__->meta->make_immutable(inline_constructor => 0);
+
+ The L<BUILDARGS|Moose::Manual::Construction/BUILDARGS> is necessary because the
+ signature of the ResultSet C<new> is C<< ->new($source, \%args) >>.
=head1 EXAMPLES
sub get_data {
my $self = shift;
my $request = $self->get_request; # Get a request object somehow.
- my $schema = $self->get_schema; # Get the DBIC schema object somehow.
+ my $schema = $self->result_source->schema;
my $cd_rs = $schema->resultset('CD')->search({
title => $request->param('title'),
year => $request->param('year'),
});
- $self->apply_security_policy( $cd_rs );
+ $cd_rs = $self->apply_security_policy( $cd_rs );
return $cd_rs->all();
}
L</join>, L</prefetch>, L</+select>, L</+as> attributes are merged
into the existing ones from the original resultset.
- The L</where>, L</having> attribute, and any search conditions are
+ The L</where> and L</having> attributes, and any search conditions, are
merged with an SQL C<AND> to the existing condition from the original
resultset.
See: L</search>, L</count>, L</get_column>, L</all>, L</create>.
- =head1 OVERLOADING
-
- If a resultset is used in a numeric context it returns the L</count>.
- However, if it is used in a boolean context it is always true. So if
- you want to check if a resultset has any results use C<if $rs != 0>.
- C<if $rs> will always be true.
-
=head1 METHODS
=head2 new
return $class->new_result(@_) if ref $class;
my ($source, $attrs) = @_;
- $source = $source->handle
- unless $source->isa('DBIx::Class::ResultSourceHandle');
+ $source = $source->resolve
+ if $source->isa('DBIx::Class::ResultSourceHandle');
$attrs = { %{$attrs||{}} };
if ($attrs->{page}) {
$attrs->{alias} ||= 'me';
- # Creation of {} and bless separated to mitigate RH perl bug
- # see https://bugzilla.redhat.com/show_bug.cgi?id=196836
- my $self = {
- _source_handle => $source,
+ my $self = bless {
+ result_source => $source,
cond => $attrs->{where},
- count => undef,
pager => undef,
- attrs => $attrs
- };
+ attrs => $attrs,
+ }, $class;
- bless $self, $class;
+ # if there is a dark selector, this means we are already in a
+ # chain and the cleanup/sanification was taken care of by
+ # _search_rs already
+ $self->_normalize_selection($attrs)
+ unless $attrs->{_dark_selector};
$self->result_class(
- $attrs->{result_class} || $source->resolve->result_class
+ $attrs->{result_class} || $source->result_class
);
- return $self;
+ $self;
}
=head2 search
=item Arguments: $cond, \%attrs?
- =item Return Value: $resultset (scalar context), @row_objs (list context)
+ =item Return Value: $resultset (scalar context) || @row_objs (list context)
=back
my $new_rs = $cd_rs->search([ { year => 2005 }, { year => 2004 } ]);
# year = 2005 OR year = 2004
+ In list context, C<< ->all() >> is called implicitly on the resultset, thus
+ returning a list of row objects instead. To avoid that, use L</search_rs>.
+
If you need to pass in additional attributes but no additional condition,
call it as C<search(undef, \%attrs)>.
For a list of attributes that can be passed to C<search>, see
L</ATTRIBUTES>. For more examples of using this function, see
L<Searching|DBIx::Class::Manual::Cookbook/Searching>. For a complete
- documentation for the first argument, see L<SQL::Abstract>.
+ documentation for the first argument, see L<SQL::Abstract>
+ and its extension L<DBIx::Class::SQLMaker>.
For more help on using joins with search, see L<DBIx::Class::Manual::Joining>.
+ =head3 CAVEAT
+
+ Note that L</search> does not process/deflate any of the values passed in the
+ L<SQL::Abstract>-compatible search condition structure. This is unlike other
+ condition-bound methods L</new>, L</create> and L</find>. The user must ensure
+ manually that any value passed to this method will stringify to something the
+ RDBMS knows how to deal with. A notable example is the handling of L<DateTime>
+ objects, for more info see:
+ L<DBIx::Class::Manual::Cookbook/Formatting DateTime objects in queries>.
+
=cut
sub search {
my $self = shift;
my $rs = $self->search_rs( @_ );
- return (wantarray ? $rs->all : $rs);
+
+ if (wantarray) {
+ return $rs->all;
+ }
+ elsif (defined wantarray) {
+ return $rs;
+ }
+ else {
+ # we can be called by a relationship helper, which in
+ # turn may be called in void context due to some braindead
+ # overload or whatever else the user decided to be clever
+ # at this particular day. Thus limit the exception to
+ # external code calls only
+ $self->throw_exception ('->search is *not* a mutator, calling it in void context makes no sense')
+ if (caller)[0] !~ /^\QDBIx::Class::/;
+
+ return ();
+ }
}
=head2 search_rs
# Special-case handling for (undef, undef).
if ( @_ == 2 && !defined $_[1] && !defined $_[0] ) {
- pop(@_); pop(@_);
+ @_ = ();
}
- my $attrs = {};
- $attrs = pop(@_) if @_ > 1 and ref $_[$#_] eq 'HASH';
- my $our_attrs = { %{$self->{attrs}} };
- my $having = delete $our_attrs->{having};
- my $where = delete $our_attrs->{where};
-
- my $rows;
+ my $call_attrs = {};
+ if (@_ > 1) {
+ if (ref $_[-1] eq 'HASH') {
+ # copy for _normalize_selection
+ $call_attrs = { %{ pop @_ } };
+ }
+ elsif (! defined $_[-1] ) {
+ pop @_; # search({}, undef)
+ }
+ }
+ # see if we can keep the cache (no $rs changes)
+ my $cache;
my %safe = (alias => 1, cache => 1);
+ if ( ! List::Util::first { !$safe{$_} } keys %$call_attrs and (
+ ! defined $_[0]
+ or
+ ref $_[0] eq 'HASH' && ! keys %{$_[0]}
+ or
+ ref $_[0] eq 'ARRAY' && ! @{$_[0]}
+ )) {
+ $cache = $self->get_cache;
+ }
- unless (
- (@_ && defined($_[0])) # @_ == () or (undef)
- ||
- (keys %$attrs # empty attrs or only 'safe' attrs
- && List::Util::first { !$safe{$_} } keys %$attrs)
- ) {
- # no search, effectively just a clone
- $rows = $self->get_cache;
- }
-
- # reset the selector list
- if (List::Util::first { exists $attrs->{$_} } qw{columns select as}) {
- delete @{$our_attrs}{qw{select as columns +select +as +columns include_columns}};
- }
-
- my $new_attrs = { %{$our_attrs}, %{$attrs} };
-
- # merge new attrs into inherited
- foreach my $key (qw/join prefetch +select +as +columns include_columns bind/) {
- next unless exists $attrs->{$key};
- $new_attrs->{$key} = $self->_merge_attr($our_attrs->{$key}, $attrs->{$key});
- }
-
- my $cond = (@_
- ? (
- (@_ == 1 || ref $_[0] eq "HASH")
- ? (
- (ref $_[0] eq 'HASH')
- ? (
- (keys %{ $_[0] } > 0)
- ? shift
- : undef
- )
- : shift
- )
- : (
- (@_ % 2)
- ? $self->throw_exception("Odd number of arguments to search")
- : {@_}
- )
- )
- : undef
- );
+ my $rsrc = $self->result_source;
- if (defined $where) {
- $new_attrs->{where} = (
- defined $new_attrs->{where}
- ? { '-and' => [
- map {
- ref $_ eq 'ARRAY' ? [ -or => $_ ] : $_
- } $where, $new_attrs->{where}
- ]
- }
- : $where);
+ my $old_attrs = { %{$self->{attrs}} };
+ my $old_having = delete $old_attrs->{having};
+ my $old_where = delete $old_attrs->{where};
+
+ my $new_attrs = { %$old_attrs };
+
+ # take care of call attrs (only if anything is changing)
+ if (keys %$call_attrs) {
+
+ my @selector_attrs = qw/select as columns cols +select +as +columns include_columns/;
+
+ # reset the current selector list if new selectors are supplied
+ if (List::Util::first { exists $call_attrs->{$_} } qw/columns cols select as/) {
+ delete @{$old_attrs}{(@selector_attrs, '_dark_selector')};
+ }
+
+ # Normalize the new selector list (operates on the passed-in attr structure)
+ # Need to do it on every chain instead of only once on _resolved_attrs, in
+ # order to allow detection of empty vs partial 'as'
+ $call_attrs->{_dark_selector} = $old_attrs->{_dark_selector}
+ if $old_attrs->{_dark_selector};
+ $self->_normalize_selection ($call_attrs);
+
+ # start with blind overwriting merge, exclude selector attrs
+ $new_attrs = { %{$old_attrs}, %{$call_attrs} };
+ delete @{$new_attrs}{@selector_attrs};
+
+ for (@selector_attrs) {
+ $new_attrs->{$_} = $self->_merge_attr($old_attrs->{$_}, $call_attrs->{$_})
+ if ( exists $old_attrs->{$_} or exists $call_attrs->{$_} );
+ }
+
+ # older deprecated name, use only if {columns} is not there
+ if (my $c = delete $new_attrs->{cols}) {
+ if ($new_attrs->{columns}) {
+ carp "Resultset specifies both the 'columns' and the legacy 'cols' attributes - ignoring 'cols'";
+ }
+ else {
+ $new_attrs->{columns} = $c;
+ }
+ }
+
+
+ # join/prefetch use their own crazy merging heuristics
+ foreach my $key (qw/join prefetch/) {
+ $new_attrs->{$key} = $self->_merge_joinpref_attr($old_attrs->{$key}, $call_attrs->{$key})
+ if exists $call_attrs->{$key};
+ }
+
+ # stack binds together
+ $new_attrs->{bind} = [ @{ $old_attrs->{bind} || [] }, @{ $call_attrs->{bind} || [] } ];
}
- if (defined $cond) {
- $new_attrs->{where} = (
- defined $new_attrs->{where}
- ? { '-and' => [
- map {
- ref $_ eq 'ARRAY' ? [ -or => $_ ] : $_
- } $cond, $new_attrs->{where}
- ]
- }
- : $cond);
+
+ # rip apart the rest of @_, parse a condition
+ my $call_cond = do {
+
+ if (ref $_[0] eq 'HASH') {
+ (keys %{$_[0]}) ? $_[0] : undef
+ }
+ elsif (@_ == 1) {
+ $_[0]
+ }
+ elsif (@_ % 2) {
+ $self->throw_exception('Odd number of arguments to search')
+ }
+ else {
+ +{ @_ }
+ }
+
+ } if @_;
+
+ if( @_ > 1 and ! $rsrc->result_class->isa('DBIx::Class::CDBICompat') ) {
+ carp_unique 'search( %condition ) is deprecated, use search( \%condition ) instead';
}
- if (defined $having) {
- $new_attrs->{having} = (
- defined $new_attrs->{having}
- ? { '-and' => [
- map {
- ref $_ eq 'ARRAY' ? [ -or => $_ ] : $_
- } $having, $new_attrs->{having}
- ]
- }
- : $having);
+ for ($old_where, $call_cond) {
+ if (defined $_) {
+ $new_attrs->{where} = $self->_stack_cond (
+ $_, $new_attrs->{where}
+ );
+ }
}
- my $rs = (ref $self)->new($self->result_source, $new_attrs);
+ if (defined $old_having) {
+ $new_attrs->{having} = $self->_stack_cond (
+ $old_having, $new_attrs->{having}
+ )
+ }
- $rs->set_cache($rows) if ($rows);
+ my $rs = (ref $self)->new($rsrc, $new_attrs);
+
+ $rs->set_cache($cache) if ($cache);
return $rs;
}
+ my $dark_sel_dumper;
+ sub _normalize_selection {
+ my ($self, $attrs) = @_;
+
+ # legacy syntax
+ $attrs->{'+columns'} = $self->_merge_attr($attrs->{'+columns'}, delete $attrs->{include_columns})
+ if exists $attrs->{include_columns};
+
+ # columns are always placed first, however
+
+ # Keep the X vs +X separation until _resolved_attrs time - this allows to
+ # delay the decision on whether to use a default select list ($rsrc->columns)
+ # allowing stuff like the remove_columns helper to work
+ #
+ # select/as +select/+as pairs need special handling - the amount of select/as
+ # elements in each pair does *not* have to be equal (think multicolumn
+ # selectors like distinct(foo, bar) ). If the selector is bare (no 'as'
+ # supplied at all) - try to infer the alias, either from the -as parameter
+ # of the selector spec, or use the parameter whole if it looks like a column
+ # name (ugly legacy heuristic). If all fails - leave the selector bare (which
+ # is ok as well), but make sure no more additions to the 'as' chain take place
+ for my $pref ('', '+') {
+
+ my ($sel, $as) = map {
+ my $key = "${pref}${_}";
+
+ my $val = [ ref $attrs->{$key} eq 'ARRAY'
+ ? @{$attrs->{$key}}
+ : $attrs->{$key} || ()
+ ];
+ delete $attrs->{$key};
+ $val;
+ } qw/select as/;
+
+ if (! @$as and ! @$sel ) {
+ next;
+ }
+ elsif (@$as and ! @$sel) {
+ $self->throw_exception(
+ "Unable to handle ${pref}as specification (@$as) without a corresponding ${pref}select"
+ );
+ }
+ elsif( ! @$as ) {
+ # no as part supplied at all - try to deduce (unless explicit end of named selection is declared)
+ # if any @$as has been supplied we assume the user knows what (s)he is doing
+ # and blindly keep stacking up pieces
+ unless ($attrs->{_dark_selector}) {
+ SELECTOR:
+ for (@$sel) {
+ if ( ref $_ eq 'HASH' and exists $_->{-as} ) {
+ push @$as, $_->{-as};
+ }
+ # assume any plain no-space, no-parenthesis string to be a column spec
+ # FIXME - this is retarded but is necessary to support shit like 'count(foo)'
+ elsif ( ! ref $_ and $_ =~ /^ [^\s\(\)]+ $/x) {
+ push @$as, $_;
+ }
+ # if all else fails - raise a flag that no more aliasing will be allowed
+ else {
+ $attrs->{_dark_selector} = {
+ plus_stage => $pref,
+ string => ($dark_sel_dumper ||= do {
+ require Data::Dumper::Concise;
+ Data::Dumper::Concise::DumperObject()->Indent(0);
+ })->Values([$_])->Dump
+ ,
+ };
+ last SELECTOR;
+ }
+ }
+ }
+ }
+ elsif (@$as < @$sel) {
+ $self->throw_exception(
+ "Unable to handle an ${pref}as specification (@$as) with less elements than the corresponding ${pref}select"
+ );
+ }
+ elsif ($pref and $attrs->{_dark_selector}) {
+ $self->throw_exception(
+ "Unable to process named '+select', resultset contains an unnamed selector $attrs->{_dark_selector}{string}"
+ );
+ }
+
+
+ # merge result
+ $attrs->{"${pref}select"} = $self->_merge_attr($attrs->{"${pref}select"}, $sel);
+ $attrs->{"${pref}as"} = $self->_merge_attr($attrs->{"${pref}as"}, $as);
+ }
+ }
+
+ sub _stack_cond {
+ my ($self, $left, $right) = @_;
+
+ # collapse single element top-level conditions
+ # (single pass only, unlikely to need recursion)
+ for ($left, $right) {
+ if (ref $_ eq 'ARRAY') {
+ if (@$_ == 0) {
+ $_ = undef;
+ }
+ elsif (@$_ == 1) {
+ $_ = $_->[0];
+ }
+ }
+ elsif (ref $_ eq 'HASH') {
+ my ($first, $more) = keys %$_;
+
+ # empty hash
+ if (! defined $first) {
+ $_ = undef;
+ }
+ # one element hash
+ elsif (! defined $more) {
+ if ($first eq '-and' and ref $_->{'-and'} eq 'HASH') {
+ $_ = $_->{'-and'};
+ }
+ elsif ($first eq '-or' and ref $_->{'-or'} eq 'ARRAY') {
+ $_ = $_->{'-or'};
+ }
+ }
+ }
+ }
+
+ # merge hashes with weeding out of duplicates (simple cases only)
+ if (ref $left eq 'HASH' and ref $right eq 'HASH') {
+
+ # shallow copy to destroy
+ $right = { %$right };
+ for (grep { exists $right->{$_} } keys %$left) {
+ # the use of eq_deeply here is justified - the rhs of an
+ # expression can contain a lot of twisted weird stuff
+ delete $right->{$_} if Data::Compare::Compare( $left->{$_}, $right->{$_} );
+ }
+
+ $right = undef unless keys %$right;
+ }
+
+
+ if (defined $left xor defined $right) {
+ return defined $left ? $left : $right;
+ }
+ elsif (! defined $left) {
+ return undef;
+ }
+ else {
+ return { -and => [ $left, $right ] };
+ }
+ }
+
=head2 search_literal
=over 4
=item Arguments: $sql_fragment, @bind_values
- =item Return Value: $resultset (scalar context), @row_objs (list context)
+ =item Return Value: $resultset (scalar context) || @row_objs (list context)
=back
=over 4
- =item Arguments: @values | \%cols, \%attrs?
+ =item Arguments: \%columns_values | @pk_values, \%attrs?
=item Return Value: $row_object | undef
=back
- Finds a row based on its primary key or unique constraint. For example, to find
- a row by its primary key:
+ Finds and returns a single row based on supplied criteria. Takes either a
+ hashref with the same format as L</create> (including inference of foreign
+ keys from related objects), or a list of primary key values in the same
+ order as the L<primary columns|DBIx::Class::ResultSource/primary_columns>
+ declaration on the L</result_source>.
+
+ In either case an attempt is made to combine conditions already existing on
+ the resultset with the condition passed to this method.
+
+ To aid with preparing the correct query for the storage you may supply the
+ C<key> attribute, which is the name of a
+ L<unique constraint|DBIx::Class::ResultSource/add_unique_constraint> (the
+ unique constraint corresponding to the
+ L<primary columns|DBIx::Class::ResultSource/primary_columns> is always named
+ C<primary>). If the C<key> attribute has been supplied, and DBIC is unable
+ to construct a query that satisfies the named unique constraint fully (
+ non-NULL values for each column member of the constraint) an exception is
+ thrown.
+
+ If no C<key> is specified, the search is carried over all unique constraints
+ which are fully defined by the available condition.
+
+ If no such constraint is found, C<find> currently defaults to a simple
+ C<< search->(\%column_values) >> which may or may not do what you expect.
+ Note that this fallback behavior may be deprecated in further versions. If
+ you need to search with arbitrary conditions - use L</search>. If the query
+ resulting from this fallback produces more than one row, a warning to the
+ effect is issued, though only the first row is constructed and returned as
+ C<$row_object>.
- my $cd = $schema->resultset('CD')->find(5);
+ In addition to C<key>, L</find> recognizes and applies standard
+ L<resultset attributes|/ATTRIBUTES> in the same way as L</search> does.
- You can also find a row by a specific unique constraint using the C<key>
- attribute. For example:
+ Note that if you have extra concerns about the correctness of the resulting
+ query you need to specify the C<key> attribute and supply the entire condition
+ as an argument to find (since it is not always possible to perform the
+ combination of the resultset condition with the supplied one, especially if
+ the resultset condition contains literal sql).
- my $cd = $schema->resultset('CD')->find('Massive Attack', 'Mezzanine', {
- key => 'cd_artist_title'
- });
+ For example, to find a row by its primary key:
+
+ my $cd = $schema->resultset('CD')->find(5);
- Additionally, you can specify the columns explicitly by name:
+ You can also find a row by a specific unique constraint:
my $cd = $schema->resultset('CD')->find(
{
{ key => 'cd_artist_title' }
);
- If the C<key> is specified as C<primary>, it searches only on the primary key.
-
- If no C<key> is specified, it searches on all unique constraints defined on the
- source for which column data is provided, including the primary key.
-
- If your table does not have a primary key, you B<must> provide a value for the
- C<key> attribute matching one of the unique constraints on the source.
-
- In addition to C<key>, L</find> recognizes and applies standard
- L<resultset attributes|/ATTRIBUTES> in the same way as L</search> does.
-
- Note: If your query does not return only one row, a warning is generated:
-
- Query returned more than one row
-
- See also L</find_or_create> and L</update_or_create>. For information on how to
- declare unique constraints, see
- L<DBIx::Class::ResultSource/add_unique_constraint>.
+ See also L</find_or_create> and L</update_or_create>.
=cut
my $self = shift;
my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {});
- # Default to the primary key, but allow a specific key
- my @cols = exists $attrs->{key}
- ? $self->result_source->unique_constraint_columns($attrs->{key})
- : $self->result_source->primary_columns;
- $self->throw_exception(
- "Can't find unless a primary key is defined or unique constraint is specified"
- ) unless @cols;
+ my $rsrc = $self->result_source;
+
+ my $constraint_name;
+ if (exists $attrs->{key}) {
+ $constraint_name = defined $attrs->{key}
+ ? $attrs->{key}
+ : $self->throw_exception("An undefined 'key' resultset attribute makes no sense")
+ ;
+ }
+
+ # Parse out the condition from input
+ my $call_cond;
- # Parse out a hashref from input
- my $input_query;
if (ref $_[0] eq 'HASH') {
- $input_query = { %{$_[0]} };
- }
- elsif (@_ == @cols) {
- $input_query = {};
- @{$input_query}{@cols} = @_;
+ $call_cond = { %{$_[0]} };
}
else {
- # Compatibility: Allow e.g. find(id => $value)
- carp "Find by key => value deprecated; please use a hashref instead";
- $input_query = {@_};
- }
-
- my (%related, $info);
-
- KEY: foreach my $key (keys %$input_query) {
- if (ref($input_query->{$key})
- && ($info = $self->result_source->relationship_info($key))) {
- my $val = delete $input_query->{$key};
- next KEY if (ref($val) eq 'ARRAY'); # has_many for multi_create
- my $rel_q = $self->result_source->_resolve_condition(
- $info->{cond}, $val, $key
- );
- die "Can't handle OR join condition in find" if ref($rel_q) eq 'ARRAY';
+ # if only values are supplied we need to default to 'primary'
+ $constraint_name = 'primary' unless defined $constraint_name;
+
+ my @c_cols = $rsrc->unique_constraint_columns($constraint_name);
+
+ $self->throw_exception(
+ "No constraint columns, maybe a malformed '$constraint_name' constraint?"
+ ) unless @c_cols;
+
+ $self->throw_exception (
+ 'find() expects either a column/value hashref, or a list of values '
+ . "corresponding to the columns of the specified unique constraint '$constraint_name'"
+ ) unless @c_cols == @_;
+
+ $call_cond = {};
+ @{$call_cond}{@c_cols} = @_;
+ }
+
+ my %related;
+ for my $key (keys %$call_cond) {
+ if (
+ my $keyref = ref($call_cond->{$key})
+ and
+ my $relinfo = $rsrc->relationship_info($key)
+ ) {
+ my $val = delete $call_cond->{$key};
+
+ next if $keyref eq 'ARRAY'; # has_many for multi_create
+
+ my $rel_q = $rsrc->_resolve_condition(
+ $relinfo->{cond}, $val, $key, $key
+ );
+ die "Can't handle complex relationship conditions in find" if ref($rel_q) ne 'HASH';
@related{keys %$rel_q} = values %$rel_q;
}
}
- if (my @keys = keys %related) {
- @{$input_query}{@keys} = values %related;
- }
+ # relationship conditions take precedence (?)
+ @{$call_cond}{keys %related} = values %related;
- # Build the final query: Default to the disjunction of the unique queries,
- # but allow the input query in case the ResultSet defines the query or the
- # user is abusing find
my $alias = exists $attrs->{alias} ? $attrs->{alias} : $self->{attrs}{alias};
- my $query;
- if (exists $attrs->{key}) {
- my @unique_cols = $self->result_source->unique_constraint_columns($attrs->{key});
- my $unique_query = $self->_build_unique_query($input_query, \@unique_cols);
- $query = $self->_add_alias($unique_query, $alias);
+ my $final_cond;
+ if (defined $constraint_name) {
+ $final_cond = $self->_qualify_cond_columns (
+
+ $self->_build_unique_cond (
+ $constraint_name,
+ $call_cond,
+ ),
+
+ $alias,
+ );
}
elsif ($self->{attrs}{accessor} and $self->{attrs}{accessor} eq 'single') {
# This means that we got here after a merger of relationship conditions
# relationship
}
else {
- my @unique_queries = $self->_unique_queries($input_query, $attrs);
- $query = @unique_queries
- ? [ map { $self->_add_alias($_, $alias) } @unique_queries ]
- : $self->_add_alias($input_query, $alias);
+ # no key was specified - fall down to heuristics mode:
+ # run through all unique queries registered on the resultset, and
+ # 'OR' all qualifying queries together
+ my (@unique_queries, %seen_column_combinations);
+ for my $c_name ($rsrc->unique_constraint_names) {
+ next if $seen_column_combinations{
+ join "\x00", sort $rsrc->unique_constraint_columns($c_name)
+ }++;
+
+ push @unique_queries, try {
+ $self->_build_unique_cond ($c_name, $call_cond, 'croak_on_nulls')
+ } || ();
+ }
+
+ $final_cond = @unique_queries
+ ? [ map { $self->_qualify_cond_columns($_, $alias) } @unique_queries ]
+ : $self->_non_unique_find_fallback ($call_cond, $attrs)
+ ;
}
- # Run the query
- my $rs = $self->search ($query, {result_class => $self->result_class, %$attrs});
+ # Run the query, passing the result_class since it should propagate for find
+ my $rs = $self->search ($final_cond, {result_class => $self->result_class, %$attrs});
- if (keys %{$rs->_resolved_attrs->{collapse}}) {
+ if ($rs->_resolved_attrs->{collapse}) {
my $row = $rs->next;
carp "Query returned more than one row" if $rs->next;
return $row;
}
}
- # _add_alias
+ # This is a stop-gap method as agreed during the discussion on find() cleanup:
+ # http://lists.scsys.co.uk/pipermail/dbix-class/2010-October/009535.html
+ #
+ # It is invoked when find() is called in legacy-mode with insufficiently-unique
+ # condition. It is provided for overrides until a saner way forward is devised
#
- # Add the specified alias to the specified query hash. A copy is made so the
- # original query is not modified.
+ # *NOTE* This is not a public method, and it's *GUARANTEED* to disappear down
+ # the road. Please adjust your tests accordingly to catch this situation early
+ # DBIx::Class::ResultSet->can('_non_unique_find_fallback') is reasonable
+ #
+ # The method will not be removed without an adequately complete replacement
+ # for strict-mode enforcement
+ sub _non_unique_find_fallback {
+ my ($self, $cond, $attrs) = @_;
+
+ return $self->_qualify_cond_columns(
+ $cond,
+ exists $attrs->{alias}
+ ? $attrs->{alias}
+ : $self->{attrs}{alias}
+ );
+ }
- sub _add_alias {
- my ($self, $query, $alias) = @_;
- my %aliased = %$query;
- foreach my $col (grep { ! m/\./ } keys %aliased) {
- $aliased{"$alias.$col"} = delete $aliased{$col};
+ sub _qualify_cond_columns {
+ my ($self, $cond, $alias) = @_;
+
+ my %aliased = %$cond;
+ for (keys %aliased) {
+ $aliased{"$alias.$_"} = delete $aliased{$_}
+ if $_ !~ /\./;
}
return \%aliased;
}
- # _unique_queries
- #
- # Build a list of queries which satisfy unique constraints.
-
- sub _unique_queries {
- my ($self, $query, $attrs) = @_;
-
- my @constraint_names = exists $attrs->{key}
- ? ($attrs->{key})
- : $self->result_source->unique_constraint_names;
+ sub _build_unique_cond {
+ my ($self, $constraint_name, $extra_cond, $croak_on_null) = @_;
- my $where = $self->_collapse_cond($self->{attrs}{where} || {});
- my $num_where = scalar keys %$where;
+ my @c_cols = $self->result_source->unique_constraint_columns($constraint_name);
- my (@unique_queries, %seen_column_combinations);
- foreach my $name (@constraint_names) {
- my @constraint_cols = $self->result_source->unique_constraint_columns($name);
-
- my $constraint_sig = join "\x00", sort @constraint_cols;
- next if $seen_column_combinations{$constraint_sig}++;
-
- my $unique_query = $self->_build_unique_query($query, \@constraint_cols);
+ # combination may fail if $self->{cond} is non-trivial
+ my ($final_cond) = try {
+ $self->_merge_with_rscond ($extra_cond)
+ } catch {
+ +{ %$extra_cond }
+ };
- my $num_cols = scalar @constraint_cols;
- my $num_query = scalar keys %$unique_query;
+ # trim out everything not in $columns
+ $final_cond = { map {
+ exists $final_cond->{$_}
+ ? ( $_ => $final_cond->{$_} )
+ : ()
+ } @c_cols };
- my $total = $num_query + $num_where;
- if ($num_query && ($num_query == $num_cols || $total == $num_cols)) {
- # The query is either unique on its own or is unique in combination with
- # the existing where clause
- push @unique_queries, $unique_query;
- }
+ if (my @missing = grep
+ { ! ($croak_on_null ? defined $final_cond->{$_} : exists $final_cond->{$_}) }
+ (@c_cols)
+ ) {
+ $self->throw_exception( sprintf ( "Unable to satisfy requested constraint '%s', no values for column(s): %s",
+ $constraint_name,
+ join (', ', map { "'$_'" } @missing),
+ ) );
}
- return @unique_queries;
- }
-
- # _build_unique_query
- #
- # Constrain the specified query hash based on the specified column names.
-
- sub _build_unique_query {
- my ($self, $query, $unique_cols) = @_;
+ if (
+ !$croak_on_null
+ and
+ !$ENV{DBIC_NULLABLE_KEY_NOWARN}
+ and
+ my @undefs = grep { ! defined $final_cond->{$_} } (keys %$final_cond)
+ ) {
+ carp_unique ( sprintf (
+ "NULL/undef values supplied for requested unique constraint '%s' (NULL "
+ . 'values in column(s): %s). This is almost certainly not what you wanted, '
+ . 'though you can set DBIC_NULLABLE_KEY_NOWARN to disable this warning.',
+ $constraint_name,
+ join (', ', map { "'$_'" } @undefs),
+ ));
+ }
- return {
- map { $_ => $query->{$_} }
- grep { exists $query->{$_} }
- @$unique_cols
- };
+ return $final_cond;
}
=head2 search_related
=over 4
- =item Arguments: $rel, $cond, \%attrs?
+ =item Arguments: $rel, $cond?, \%attrs?
- =item Return Value: $new_resultset
+ =item Return Value: $new_resultset (scalar context) || @row_objs (list context)
=back
Searches the specified relationship, optionally specifying a condition and
attributes for matching records. See L</ATTRIBUTES> for more information.
+ In list context, C<< ->all() >> is called implicitly on the resultset, thus
+ returning a list of row objects instead. To avoid that, use L</search_related_rs>.
+
+ See also L</search_related_rs>.
+
=cut
sub search_related {
=item Arguments: $cond?
- =item Return Value: $row_object?
+ =item Return Value: $row_object | undef
=back
my $cd = $schema->resultset('CD')->single({ year => 2001 });
Inflates the first result without creating a cursor if the resultset has
- any records in it; if not returns nothing. Used by L</find> as a lean version of
- L</search>.
+ any records in it; if not returns C<undef>. Used by L</find> as a lean version
+ of L</search>.
While this method can take an optional search condition (just like L</search>)
being a fast-code-path it does not recognize search attributes. If you need to
my $attrs = $self->_resolved_attrs_copy;
- if (keys %{$attrs->{collapse}}) {
+ if ($attrs->{collapse}) {
$self->throw_exception(
'single() can not be used on resultsets prefetching has_many. Use find( \%cond ) or next() instead'
);
}
}
- # XXX: Disabled since it doesn't infer uniqueness in all cases
- # unless ($self->_is_unique_query($attrs->{where})) {
- # carp "Query not guaranteed to return a single row"
- # . "; please declare your unique constraints or use search instead";
- # }
-
my @data = $self->result_source->storage->select_single(
$attrs->{from}, $attrs->{select},
$attrs->{where}, $attrs
);
- return (@data ? ($self->_construct_object(@data))[0] : undef);
+ return @data
+ ? ($self->_construct_objects(@data))[0]
+ : undef
+ ;
}
- # _is_unique_query
- #
- # Try to determine if the specified query is guaranteed to be unique, based on
- # the declared unique constraints.
-
- sub _is_unique_query {
- my ($self, $query) = @_;
-
- my $collapsed = $self->_collapse_query($query);
- my $alias = $self->{attrs}{alias};
-
- foreach my $name ($self->result_source->unique_constraint_names) {
- my @unique_cols = map {
- "$alias.$_"
- } $self->result_source->unique_constraint_columns($name);
-
- # Count the values for each unique column
- my %seen = map { $_ => 0 } @unique_cols;
-
- foreach my $key (keys %$collapsed) {
- my $aliased = $key =~ /\./ ? $key : "$alias.$key";
- next unless exists $seen{$aliased}; # Additional constraints are okay
- $seen{$aliased} = scalar keys %{ $collapsed->{$key} };
- }
-
- # If we get 0 or more than 1 value for a column, it's not necessarily unique
- return 1 unless grep { $_ != 1 } values %seen;
- }
-
- return 0;
- }
-
# _collapse_query
#
# Recursively collapse the query, accumulating values for each column.
=item Arguments: $cond, \%attrs?
- =item Return Value: $resultset (scalar context), @row_objs (list context)
+ =item Return Value: $resultset (scalar context) || @row_objs (list context)
=back
sub search_like {
my $class = shift;
- carp (
+ carp_unique (
'search_like() is deprecated and will be removed in DBIC version 0.09.'
.' Instead use ->search({ x => { -like => "y%" } })'
.' (note the outer pair of {}s - they are important!)'
=item Arguments: $first, $last
- =item Return Value: $resultset (scalar context), @row_objs (list context)
+ =item Return Value: $resultset (scalar context) || @row_objs (list context)
=back
$attrs->{offset} = $self->{attrs}{offset} || 0;
$attrs->{offset} += $min;
$attrs->{rows} = ($max ? ($max - $min + 1) : 1);
- return $self->search(undef(), $attrs);
+ return $self->search(undef, $attrs);
#my $slice = (ref $self)->new($self->result_source, $attrs);
#return (wantarray ? $slice->all : $slice);
}
=item Arguments: none
- =item Return Value: $result?
+ =item Return Value: $result | undef
=back
return $cache->[$self->{all_cache_position}++];
}
if ($self->{attrs}{cache}) {
+ delete $self->{pager};
$self->{all_cache_position} = 1;
return ($self->all)[0];
}
: $self->cursor->next
);
return undef unless (@row);
- my ($row, @more) = $self->_construct_object(@row);
+ my ($row, @more) = $self->_construct_objects(@row);
$self->{stashed_objects} = \@more if @more;
return $row;
}
-sub _construct_object {
+# takes a single DBI-row of data and coinstructs as many objects
+# as the resultset attributes call for.
+# This can be a bit of an action at a distance - it takes as an argument
+# the *current* cursor-row (already taken off the $sth), but if
+# collapsing is requested it will keep advancing the cursor either
+# until the current row-object is assembled (the collapser was able to
+# order the result sensibly) OR until the cursor is exhausted (an
+# unordered collapsing resultset effectively triggers ->all)
+
+# FIXME: why the *FUCK* do we pass around DBI data by copy?! Sadly needs
+# assessment before changing...
+#
+sub _construct_objects {
my ($self, @row) = @_;
+ my $attrs = $self->_resolved_attrs;
+ my $keep_collapsing = $attrs->{collapse};
+
+ my $res_index;
+=begin
+ do {
+ my $me_pref_col = $attrs->{_row_parser}->($row_ref);
+
+ my $container;
+ if ($keep_collapsing) {
+
+ # FIXME - we should be able to remove these 2 checks after the design validates
+ $self->throw_exception ('Collapsing without a top-level collapse-set... can not happen')
+ unless @{$me_ref_col->[2]};
+ $self->throw_exception ('Top-level collapse-set contains a NULL-value... can not happen')
+ if grep { ! defined $_ } @{$me_pref_col->[2]};
+
+ my $main_ident = join "\x00", @{$me_pref_col->[2]};
+
+ if (! $res_index->{$main_ident}) {
+ # this is where we bail out IFF we are ordered, and the $main_ident changes
+
+ $res_index->{$main_ident} = {
+ all_me_pref => [,
+ index => scalar keys %$res_index,
+ };
+ }
+ }
+
+
- my $info = $self->_collapse_result($self->{_attrs}{as}, \@row)
+ $container = $res_index->{$main_ident}{container};
+ };
+
+ push @$container, [ @{$me_pref_col}[0,1] ];
+
+
+
+ } while (
+ $keep_collapsing
+ &&
+ do { $row_ref = [$self->cursor->next]; $self->{stashed_row} = $row_ref if @$row_ref; scalar @$row_ref }
+ );
+
+ # attempt collapse all rows with same collapse identity
+ if (@to_collapse > 1) {
+ my @collapsed;
+ while (@to_collapse) {
+ $self->_merge_result(\@collapsed, shift @to_collapse);
+ }
+ }
+=cut
+
+ my $mepref_structs = $self->_collapse_result($attrs->{as}, \@row, $keep_collapsing)
or return ();
- my @new = $self->result_class->inflate_result($self->result_source, @$info);
- @new = $self->{_attrs}{record_filter}->(@new)
- if exists $self->{_attrs}{record_filter};
- return @new;
+
+ my $rsrc = $self->result_source;
+ my $res_class = $self->result_class;
+ my $inflator = $res_class->can ('inflate_result');
+
+ my @objs =
+ $res_class->$inflator ($rsrc, @$mepref_structs);
+
+ if (my $f = $attrs->{record_filter}) {
+ @objs = map { $f->($_) } @objs;
+ }
+
+ return @objs;
}
+
sub _collapse_result {
- my ($self, $as_proto, $row) = @_;
+ my ( $self, $as_proto, $row_ref, $keep_collapsing ) = @_;
+ my $collapse = $self->_resolved_attrs->{collapse};
+ my $parser = $self->result_source->_mk_row_parser( $as_proto, $collapse );
+ my $result = [];
+ my $register = {};
+ my $rel_register = {};
- my @copy = @$row;
+ my @row = @$row_ref;
+ do {
+ my $row = $parser->( \@row );
- # 'foo' => [ undef, 'foo' ]
- # 'foo.bar' => [ 'foo', 'bar' ]
- # 'foo.bar.baz' => [ 'foo.bar', 'baz' ]
+ # init register
+ $self->_check_register( $register, $row ) unless ( keys %$register );
- my @construct_as = map { [ (/^(?:(.*)\.)?([^.]+)$/) ] } @$as_proto;
+ $self->_merge_result( $result, $row, $rel_register )
+ if ( !$collapse
+ || ( $collapse = $self->_check_register( $register, $row ) ) );
- my %collapse = %{$self->{_attrs}{collapse}||{}};
+ } while (
+ $collapse
+ && do { @row = $self->cursor->next; $self->{stashed_row} = \@row if @row; }
+
+ # run this as long as there is a next row and we are not yet done collapsing
+ );
+ return $result;
+}
- my @pri_index;
- # if we're doing collapsing (has_many prefetch) we need to grab records
- # until the PK changes, so fill @pri_index. if not, we leave it empty so
- # we know we don't have to bother.
- # the reason for not using the collapse stuff directly is because if you
- # had for e.g. two artists in a row with no cds, the collapse info for
- # both would be NULL (undef) so you'd lose the second artist
+# Taubenschlag
+sub _check_register {
+ my ( $self, $register, $obj ) = @_;
+ return undef unless ( ref $obj eq 'ARRAY' && ref $obj->[2] eq 'ARRAY' );
+ my @ids = @{ $obj->[2] };
+ while ( defined( my $id = shift @ids ) ) {
+ return $register->{$id} if ( exists $register->{$id} && !@ids );
+ $register->{$id} = @ids ? {} : $obj unless ( exists $register->{$id} );
+ $register = $register->{$id};
+ }
+ return undef;
+}
-
- # store just the index so we can check the array positions from the row
- # without having to contruct the full hash
+sub _merge_result {
+ my ( $self, $result, $row, $register ) = @_;
+ return @$result = @$row if ( @$result == 0 ); # initialize with $row
- if (keys %collapse) {
- my %pri = map { ($_ => 1) } $self->result_source->_pri_cols;
- foreach my $i (0 .. $#construct_as) {
- next if defined($construct_as[$i][0]); # only self table
- if (delete $pri{$construct_as[$i][1]}) {
- push(@pri_index, $i);
- }
- last unless keys %pri; # short circuit (Johnny Five Is Alive!)
+ my ( undef, $rels, $ids ) = @$result;
+ my ( undef, $new_rels, $new_ids ) = @$row;
+
- use List::MoreUtils;
- my @rels = List::MoreUtils::uniq( keys %$rels, keys %$new_rels );
++ my @rels = keys %{ { %{$rels||{} }, %{ $new_rels||{} } } };
+ foreach my $rel (@rels) {
+ $register = $register->{$rel} ||= {};
+
+ my $new_data = $new_rels->{$rel};
+ my $data = $rels->{$rel};
+ @$data = [@$data] unless ( ref $data->[0] eq 'ARRAY' );
+
+ $self->_check_register( $register, $data->[0] )
+ unless ( keys %$register );
+
+ if ( my $found = $self->_check_register( $register, $new_data ) ) {
+ $self->_merge_result( $found, $new_data, $register );
+ }
+ else {
+ push( @$data, $new_data );
}
}
+ return 1;
+}
+
-
-
-
+=begin
+
+# two arguments: $as_proto is an arrayref of column names,
+# $row_ref is an arrayref of the data. If none of the row data
+# is defined we return undef (that's copied from the old
+# _collapse_result). Next we decide whether we need to collapse
+# the resultset (i.e. we prefetch something) or not. $collapse
+# indicates that. The do-while loop will run once if we do not need
+# to collapse the result and will run as long as _merge_result returns
+# a true value. It will return undef if the current added row does not
+# match the previous row. A bit of stashing and cursor magic is
+# required so that the cursor is not mixed up.
+
+# "$rows" is a bit misleading. In the end, there should only be one
+# element in this arrayref.
- # no need to do an if, it'll be empty if @pri_index is empty anyway
+sub _collapse_result {
+ my ( $self, $as_proto, $row_ref ) = @_;
+ my $has_def;
+ for (@$row_ref) {
+ if ( defined $_ ) {
+ $has_def++;
+ last;
+ }
+ }
+ return undef unless $has_def;
+
+ my $collapse = $self->_resolved_attrs->{collapse};
+ my $rows = [];
+ my @row = @$row_ref;
+ do {
+ my $i = 0;
+ my $row = { map { $_ => $row[ $i++ ] } @$as_proto };
+ $row = $self->result_source->_parse_row($row, $collapse);
+ unless ( scalar @$rows ) {
+ push( @$rows, $row );
+ }
+ $collapse = undef unless ( $self->_merge_result( $rows, $row ) );
+ } while (
+ $collapse
+ && do { @row = $self->cursor->next; $self->{stashed_row} = \@row if @row; }
+ );
+
+ return $rows->[0];
- my %pri_vals = map { ($_ => $copy[$_]) } @pri_index;
+}
+
+# _merge_result accepts an arrayref of rows objects (again, an arrayref of two elements)
+# and a row object which should be merged into the first object.
+# First we try to find out whether $row is already in $rows. If this is the case
+# we try to merge them by iteration through their relationship data. We call
+# _merge_result again on them, so they get merged.
+
+# If we don't find the $row in $rows, we append it to $rows and return undef.
+# _merge_result returns 1 otherwise (i.e. $row has been found in $rows).
+
+sub _merge_result {
+ my ( $self, $rows, $row ) = @_;
+ my ( $columns, $rels ) = @$row;
+ my $found = undef;
+ foreach my $seen (@$rows) {
+ my $match = 1;
+ foreach my $column ( keys %$columns ) {
+ if ( defined $seen->[0]->{$column} ^ defined $columns->{$column}
+ or defined $columns->{$column}
+ && $seen->[0]->{$column} ne $columns->{$column} )
+ {
+
+ $match = 0;
+ last;
+ }
+ }
+ if ($match) {
+ $found = $seen;
+ last;
+ }
+ }
+ if ($found) {
+ foreach my $rel ( keys %$rels ) {
+ my $old_rows = $found->[1]->{$rel};
+ $self->_merge_result(
+ ref $found->[1]->{$rel}->[0] eq 'HASH' ? [ $found->[1]->{$rel} ]
+ : $found->[1]->{$rel},
+ ref $rels->{$rel}->[0] eq 'HASH' ? [ $rels->{$rel}->[0], $rels->{$rel}->[1] ]
+ : $rels->{$rel}->[0]
+ );
- my @const_rows;
+ my $attrs = $self->_resolved_attrs;
+ my ($keep_collapsing, $set_ident) = @{$attrs}{qw/collapse _collapse_ident/};
+
+ # FIXME this is temporary, need to calculate in _resolved_attrs
+ $set_ident ||= { me => [ $self->result_source->_pri_cols ], pref => {} };
+
+ my @cur_row = @$row_ref;
+ my (@to_collapse, $last_ident);
+
+ do {
+ my $row_hr = { map { $as_proto->[$_] => $cur_row[$_] } (0 .. $#$as_proto) };
+
+ # see if we are switching to another object
+ # this can be turned off and things will still work
+ # since _merge_prefetch knows about _collapse_ident
+# my $cur_ident = [ @{$row_hr}{@$set_ident} ];
+ my $cur_ident = [];
+ $last_ident ||= $cur_ident;
+
+# if ($keep_collapsing = Test::Deep::eq_deeply ($cur_ident, $last_ident)) {
+# push @to_collapse, $self->result_source->_parse_row (
+# $row_hr,
+# );
+# }
+ } while (
+ $keep_collapsing
+ &&
+ do { @cur_row = $self->cursor->next; $self->{stashed_row} = \@cur_row if @cur_row; }
+ );
- do { # no need to check anything at the front, we always want the first row
+ die Dumper \@to_collapse;
- my %const;
- foreach my $this_as (@construct_as) {
- $const{$this_as->[0]||''}{$this_as->[1]} = shift(@copy);
+ # attempt collapse all rows with same collapse identity
+ if (@to_collapse > 1) {
+ my @collapsed;
+ while (@to_collapse) {
+ $self->_merge_result(\@collapsed, shift @to_collapse);
}
+ @to_collapse = @collapsed;
+ }
- push(@const_rows, \%const);
+ # still didn't fully collapse
+ $self->throw_exception ('Resultset collapse failed (theoretically impossible). Maybe a wrong collapse_ident...?')
+ if (@to_collapse > 1);
- } until ( # no pri_index => no collapse => drop straight out
- !@pri_index
- or
- do { # get another row, stash it, drop out if different PK
+ return $to_collapse[0];
+}
- @copy = $self->cursor->next;
- $self->{stashed_row} = \@copy;
- # last thing in do block, counts as true if anything doesn't match
+# two arguments: $as_proto is an arrayref of 'as' column names,
+# $row_ref is an arrayref of the data. The do-while loop will run
+# once if we do not need to collapse the result and will run as long as
+# _merge_result returns a true value. It will return undef if the
+# current added row does not match the previous row, which in turn
+# means we need to stash the row for the subsequent ->next call
+sub _collapse_result {
+ my ( $self, $as_proto, $row_ref ) = @_;
- # check xor defined first for NULL vs. NOT NULL then if one is
- # defined the other must be so check string equality
+ my $attrs = $self->_resolved_attrs;
+ my ($keep_collapsing, $set_ident) = @{$attrs}{qw/collapse _collapse_ident/};
- grep {
- (defined $pri_vals{$_} ^ defined $copy[$_])
- || (defined $pri_vals{$_} && ($pri_vals{$_} ne $copy[$_]))
- } @pri_index;
- }
- );
+ die Dumper [$as_proto, $row_ref, $keep_collapsing, $set_ident ];
- my $alias = $self->{attrs}{alias};
- my $info = [];
- my %collapse_pos;
+ my @cur_row = @$row_ref;
+ my (@to_collapse, $last_ident);
- my @const_keys;
+ do {
+ my $row_hr = { map { $as_proto->[$_] => $cur_row[$_] } (0 .. $#$as_proto) };
- foreach my $const (@const_rows) {
- scalar @const_keys or do {
- @const_keys = sort { length($a) <=> length($b) } keys %$const;
- };
- foreach my $key (@const_keys) {
- if (length $key) {
- my $target = $info;
- my @parts = split(/\./, $key);
- my $cur = '';
- my $data = $const->{$key};
- foreach my $p (@parts) {
- $target = $target->[1]->{$p} ||= [];
- $cur .= ".${p}";
- if ($cur eq ".${key}" && (my @ckey = @{$collapse{$cur}||[]})) {
- # collapsing at this point and on final part
- my $pos = $collapse_pos{$cur};
- CK: foreach my $ck (@ckey) {
- if (!defined $pos->{$ck} || $pos->{$ck} ne $data->{$ck}) {
- $collapse_pos{$cur} = $data;
- delete @collapse_pos{ # clear all positioning for sub-entries
- grep { m/^\Q${cur}.\E/ } keys %collapse_pos
- };
- push(@$target, []);
- last CK;
- }
- }
- }
- if (exists $collapse{$cur}) {
- $target = $target->[-1];
- }
- }
- $target->[0] = $data;
- } else {
- $info->[0] = $const->{$key};
- }
- }
+ # see if we are switching to another object
+ # this can be turned off and things will still work
+ # since _merge_prefetch knows about _collapse_ident
+# my $cur_ident = [ @{$row_hr}{@$set_ident} ];
+ my $cur_ident = [];
+ $last_ident ||= $cur_ident;
+
+# if ($keep_collapsing = eq_deeply ($cur_ident, $last_ident)) {
+# push @to_collapse, $self->result_source->_parse_row (
+# $row_hr,
+# );
+# }
+ } while (
+ $keep_collapsing
+ &&
+ do { @cur_row = $self->cursor->next; $self->{stashed_row} = \@cur_row if @cur_row; }
+ );
+
+ # attempt collapse all rows with same collapse identity
+}
+=cut
+
+# Takes an arrayref of me/pref pairs and a new me/pref pair that should
+# be merged on a preexisting matching me (or should be pushed into $merged
+# as a new me/pref pair for further invocations). It should be possible to
+# use this function to collapse complete ->all results, provided _collapse_result() is adjusted
+# to provide everything to this sub not to barf when $merged contains more than one
+# arrayref)
+sub _merge_prefetch {
+ my ($self, $merged, $next_row) = @_;
+
+ unless (@$merged) {
+ push @$merged, $next_row;
+ return;
}
- return $info;
}
=head2 result_source
sub result_class {
my ($self, $result_class) = @_;
if ($result_class) {
- $self->ensure_class_loaded($result_class);
+ unless (ref $result_class) { # don't fire this for an object
+ $self->ensure_class_loaded($result_class);
+ }
$self->_result_class($result_class);
- $self->{attrs}{result_class} = $result_class if ref $self;
+ # THIS LINE WOULD BE A BUG - this accessor specifically exists to
+ # permit the user to set result class on one result set only; it only
+ # chains if provided to search()
+ #$self->{attrs}{result_class} = $result_class if ref $self;
}
$self->_result_class;
}
$attrs ||= $self->_resolved_attrs;
my $tmp_attrs = { %$attrs };
-
- # take off any limits, record_filter is cdbi, and no point of ordering a count
- delete $tmp_attrs->{$_} for (qw/select as rows offset order_by record_filter/);
+ # take off any limits, record_filter is cdbi, and no point of ordering nor locking a count
+ delete @{$tmp_attrs}{qw/rows offset order_by record_filter for/};
# overwrite the selector (supplied by the storage)
- $tmp_attrs->{select} = $rsrc->storage->_count_select ($rsrc, $tmp_attrs);
+ $tmp_attrs->{select} = $rsrc->storage->_count_select ($rsrc, $attrs);
$tmp_attrs->{as} = 'count';
+ delete @{$tmp_attrs}{qw/columns/};
my $tmp_rs = $rsrc->resultset_class->new($rsrc, $tmp_attrs)->get_column ('count');
my ($self, $attrs) = @_;
my $rsrc = $self->result_source;
- $attrs ||= $self->_resolved_attrs_copy;
+ $attrs ||= $self->_resolved_attrs;
my $sub_attrs = { %$attrs };
+ # extra selectors do not go in the subquery and there is no point of ordering it, nor locking it
+ delete @{$sub_attrs}{qw/collapse columns as select _prefetch_selector_range order_by for/};
- # extra selectors do not go in the subquery and there is no point of ordering it
- delete $sub_attrs->{$_} for qw/collapse select _prefetch_select as order_by/;
-
- # if we multi-prefetch we group_by primary keys only as this is what we would
+ # if we multi-prefetch we group_by something unique, as this is what we would
# get out of the rs via ->next/->all. We *DO WANT* to clobber old group_by regardless
- if ( keys %{$attrs->{collapse}} ) {
+ if ( $attrs->{collapse} ) {
- $sub_attrs->{group_by} = [ map { "$attrs->{alias}.$_" } ($rsrc->_pri_cols) ]
+ $sub_attrs->{group_by} = [ map { "$attrs->{alias}.$_" } @{
+ $rsrc->_identifying_column_set || $self->throw_exception(
+ 'Unable to construct a unique group_by criteria properly collapsing the '
+ . 'has_many prefetch before count()'
+ );
+ } ]
}
- $sub_attrs->{select} = $rsrc->storage->_subq_count_select ($rsrc, $attrs);
+ # Calculate subquery selector
+ if (my $g = $sub_attrs->{group_by}) {
- # this is so that the query can be simplified e.g.
- # * ordering can be thrown away in things like Top limit
- $sub_attrs->{-for_count_only} = 1;
+ my $sql_maker = $rsrc->storage->sql_maker;
- my $sub_rs = $rsrc->resultset_class->new ($rsrc, $sub_attrs);
+ # necessary as the group_by may refer to aliased functions
+ my $sel_index;
+ for my $sel (@{$attrs->{select}}) {
+ $sel_index->{$sel->{-as}} = $sel
+ if (ref $sel eq 'HASH' and $sel->{-as});
+ }
- $attrs->{from} = [{
- -alias => 'count_subq',
- -source_handle => $rsrc->handle,
- count_subq => $sub_rs->as_query,
- }];
+ # anything from the original select mentioned on the group-by needs to make it to the inner selector
+ # also look for named aggregates referred in the having clause
+ # having often contains scalarrefs - thus parse it out entirely
+ my @parts = @$g;
+ if ($attrs->{having}) {
+ local $sql_maker->{having_bind};
+ local $sql_maker->{quote_char} = $sql_maker->{quote_char};
+ local $sql_maker->{name_sep} = $sql_maker->{name_sep};
+ unless (defined $sql_maker->{quote_char} and length $sql_maker->{quote_char}) {
+ $sql_maker->{quote_char} = [ "\x00", "\xFF" ];
+ # if we don't unset it we screw up retarded but unfortunately working
+ # 'MAX(foo.bar)' => { '>', 3 }
+ $sql_maker->{name_sep} = '';
+ }
- # the subquery replaces this
- delete $attrs->{$_} for qw/where bind collapse group_by having having_bind rows offset/;
+ my ($lquote, $rquote, $sep) = map { quotemeta $_ } ($sql_maker->_quote_chars, $sql_maker->name_sep);
- return $self->_count_rs ($attrs);
+ my $sql = $sql_maker->_parse_rs_attrs ({ having => $attrs->{having} });
+
+ # search for both a proper quoted qualified string, for a naive unquoted scalarref
+ # and if all fails for an utterly naive quoted scalar-with-function
+ while ($sql =~ /
+ $rquote $sep $lquote (.+?) $rquote
+ |
+ [\s,] \w+ \. (\w+) [\s,]
+ |
+ [\s,] $lquote (.+?) $rquote [\s,]
+ /gx) {
+ push @parts, ($1 || $2 || $3); # one of them matched if we got here
+ }
+ }
+
+ for (@parts) {
+ my $colpiece = $sel_index->{$_} || $_;
+
+ # unqualify join-based group_by's. Arcane but possible query
+ # also horrible horrible hack to alias a column (not a func.)
+ # (probably need to introduce SQLA syntax)
+ if ($colpiece =~ /\./ && $colpiece !~ /^$attrs->{alias}\./) {
+ my $as = $colpiece;
+ $as =~ s/\./__/;
+ $colpiece = \ sprintf ('%s AS %s', map { $sql_maker->_quote ($_) } ($colpiece, $as) );
+ }
+ push @{$sub_attrs->{select}}, $colpiece;
+ }
+ }
+ else {
+ my @pcols = map { "$attrs->{alias}.$_" } ($rsrc->primary_columns);
+ $sub_attrs->{select} = @pcols ? \@pcols : [ 1 ];
+ }
+
+ return $rsrc->resultset_class
+ ->new ($rsrc, $sub_attrs)
+ ->as_subselect_rs
+ ->search ({}, { columns => { count => $rsrc->storage->_count_select ($rsrc, $attrs) } })
+ ->get_column ('count');
}
sub _bool {
=back
- Returns all elements in the resultset. Called implicitly if the resultset
- is returned in list context.
+ Returns all elements in the resultset.
=cut
$self->throw_exception("all() doesn't take any arguments, you probably wanted ->search(...)->all()");
}
- return @{ $self->get_cache } if $self->get_cache;
+ if (my $c = $self->get_cache) {
+ return @$c;
+ }
- my @obj;
+ my @objects;
- if (keys %{$self->_resolved_attrs->{collapse}}) {
+ if ($self->_resolved_attrs->{collapse}) {
# Using $self->cursor->all is really just an optimisation.
# If we're collapsing has_many prefetches it probably makes
# very little difference, and this is cleaner than hacking
- # _construct_object to survive the approach
+ # _construct_objects to survive the approach
$self->cursor->reset;
my @row = $self->cursor->next;
while (@row) {
- push(@obj, $self->_construct_object(@row));
+ push(@objects, $self->_construct_objects(@row));
@row = (exists $self->{stashed_row}
? @{delete $self->{stashed_row}}
: $self->cursor->next);
}
} else {
- @obj = map { $self->_construct_object(@$_) } $self->cursor->all;
+ @objects = map { $self->_construct_objects(@$_) } $self->cursor->all;
}
- $self->set_cache(\@obj) if $self->{attrs}{cache};
+ $self->set_cache(\@objects) if $self->{attrs}{cache};
- return @obj;
+ return @objects;
}
=head2 reset
=item Arguments: none
- =item Return Value: $object?
+ =item Return Value: $object | undef
=back
- Resets the resultset and returns an object for the first result (if the
- resultset returns anything).
+ Resets the resultset and returns an object for the first result (or C<undef>
+ if the resultset is empty).
=cut
sub _rs_update_delete {
my ($self, $op, $values) = @_;
+ my $cond = $self->{cond};
my $rsrc = $self->result_source;
+ my $storage = $rsrc->schema->storage;
- # if a condition exists we need to strip all table qualifiers
- # if this is not possible we'll force a subquery below
- my $cond = $rsrc->schema->storage->_strip_cond_qualifiers ($self->{cond});
+ my $attrs = { %{$self->_resolved_attrs} };
- my $needs_group_by_subq = $self->_has_resolved_attr (qw/collapse group_by -join/);
- my $needs_subq = $needs_group_by_subq || (not defined $cond) || $self->_has_resolved_attr(qw/row offset/);
+ # "needs" is a strong word here - if the subquery is part of an IN clause - no point of
+ # even adding the group_by. It will really be used only when composing a poor-man's
+ # multicolumn-IN equivalent OR set
+ my $needs_group_by_subq = defined $attrs->{group_by};
- if ($needs_group_by_subq or $needs_subq) {
+ # simplify the joinmap and maybe decide if a grouping (and thus subquery) is necessary
+ my $relation_classifications;
+ if (ref($attrs->{from}) eq 'ARRAY') {
+ $attrs->{from} = $storage->_prune_unused_joins ($attrs->{from}, $attrs->{select}, $cond, $attrs);
- # make a new $rs selecting only the PKs (that's all we really need)
- my $attrs = $self->_resolved_attrs_copy;
+ $relation_classifications = $storage->_resolve_aliastypes_from_select_args (
+ [ @{$attrs->{from}}[1 .. $#{$attrs->{from}}] ],
+ $attrs->{select},
+ $cond,
+ $attrs
+ ) unless $needs_group_by_subq; # we already know we need a group, no point of resolving them
+ }
+ else {
+ $needs_group_by_subq ||= 1; # if {from} is unparseable assume the worst
+ }
- delete $attrs->{$_} for qw/collapse select as/;
- $attrs->{columns} = [ map { "$attrs->{alias}.$_" } ($self->result_source->_pri_cols) ];
+ $needs_group_by_subq ||= exists $relation_classifications->{multiplying};
+ # if no subquery - life is easy-ish
+ unless (
+ $needs_group_by_subq
+ or
+ keys %$relation_classifications # if any joins at all - need to wrap a subq
+ or
+ $self->_has_resolved_attr(qw/rows offset/) # limits call for a subq
+ ) {
+ # Most databases do not allow aliasing of tables in UPDATE/DELETE. Thus
+ # a condition containing 'me' or other table prefixes will not work
+ # at all. What this code tries to do (badly) is to generate a condition
+ # with the qualifiers removed, by exploiting the quote mechanism of sqla
+ #
+ # this is atrocious and should be replaced by normal sqla introspection
+ # one sunny day
+ my ($sql, @bind) = do {
+ my $sqla = $rsrc->storage->sql_maker;
+ local $sqla->{_dequalify_idents} = 1;
+ $sqla->_recurse_where($self->{cond});
+ } if $self->{cond};
+
+ return $rsrc->storage->$op(
+ $rsrc,
+ $op eq 'update' ? $values : (),
+ $self->{cond} ? \[$sql, @bind] : (),
+ );
+ }
+
+ # we got this far - means it is time to wrap a subquery
+ my $idcols = $rsrc->_identifying_column_set || $self->throw_exception(
+ sprintf(
+ "Unable to perform complex resultset %s() without an identifying set of columns on source '%s'",
+ $op,
+ $rsrc->source_name,
+ )
+ );
+ my $existing_group_by = delete $attrs->{group_by};
+
+ # make a new $rs selecting only the PKs (that's all we really need for the subq)
+ delete $attrs->{$_} for qw/collapse _collapse_order_by select _prefetch_selector_range as/;
+ $attrs->{columns} = [ map { "$attrs->{alias}.$_" } @$idcols ];
+ $attrs->{group_by} = \ ''; # FIXME - this is an evil hack, it causes the optimiser to kick in and throw away the LEFT joins
+ my $subrs = (ref $self)->new($rsrc, $attrs);
+
+ if (@$idcols == 1) {
+ return $storage->$op (
+ $rsrc,
+ $op eq 'update' ? $values : (),
+ { $idcols->[0] => { -in => $subrs->as_query } },
+ );
+ }
+ elsif ($storage->_use_multicolumn_in) {
+ # This is hideously ugly, but SQLA does not understand multicol IN expressions
+ my $sql_maker = $storage->sql_maker;
+ my ($sql, @bind) = @${$subrs->as_query};
+ $sql = sprintf ('(%s) IN %s', # the as_query already comes with a set of parenthesis
+ join (', ', map { $sql_maker->_quote ($_) } @$idcols),
+ $sql,
+ );
+
+ return $storage->$op (
+ $rsrc,
+ $op eq 'update' ? $values : (),
+ \[$sql, @bind],
+ );
+ }
+ else {
+ # if all else fails - get all primary keys and operate over a ORed set
+ # wrap in a transaction for consistency
+ # this is where the group_by starts to matter
+ my $subq_group_by;
if ($needs_group_by_subq) {
- # make sure no group_by was supplied, or if there is one - make sure it matches
- # the columns compiled above perfectly. Anything else can not be sanely executed
- # on most databases so croak right then and there
+ $subq_group_by = $attrs->{columns};
- if (my $g = $attrs->{group_by}) {
+ # make sure if there is a supplied group_by it matches the columns compiled above
+ # perfectly. Anything else can not be sanely executed on most databases so croak
+ # right then and there
+ if ($existing_group_by) {
my @current_group_by = map
{ $_ =~ /\./ ? $_ : "$attrs->{alias}.$_" }
- @$g
+ @$existing_group_by
;
if (
join ("\x00", sort @current_group_by)
ne
- join ("\x00", sort @{$attrs->{columns}} )
+ join ("\x00", sort @$subq_group_by )
) {
$self->throw_exception (
"You have just attempted a $op operation on a resultset which does group_by"
);
}
}
- else {
- $attrs->{group_by} = $attrs->{columns};
- }
}
- my $subrs = (ref $self)->new($rsrc, $attrs);
+ my $guard = $storage->txn_scope_guard;
- return $self->result_source->storage->_subq_update_delete($subrs, $op, $values);
- }
- else {
- return $rsrc->storage->$op(
+ my @op_condition;
+ for my $row ($subrs->search({}, { group_by => $subq_group_by })->cursor->all) {
+ push @op_condition, { map
+ { $idcols->[$_] => $row->[$_] }
+ (0 .. $#$idcols)
+ };
+ }
+
+ my $res = $storage->$op (
$rsrc,
$op eq 'update' ? $values : (),
- $cond,
+ \@op_condition,
);
+
+ $guard->commit;
+
+ return $res;
}
}
=back
Sets the specified columns in the resultset to the supplied values in a
- single query. Return value will be true if the update succeeded or false
- if no records were updated; exact type of success value is storage-dependent.
+ single query. Note that this will not run any accessor/set_column/update
+ triggers, nor will it update any row object instances derived from this
+ resultset (this includes the contents of the L<resultset cache|/set_cache>
+ if any). See L</update_all> if you need to execute any on-update
+ triggers or cascades defined either by you or a
+ L<result component|DBIx::Class::Manual::Component/WHAT IS A COMPONENT>.
+
+ The return value is a pass through of what the underlying
+ storage backend returned, and may vary. See L<DBI/execute> for the most
+ common case.
+
+ =head3 CAVEAT
+
+ Note that L</update> does not process/deflate any of the values passed in.
+ This is unlike the corresponding L<DBIx::Class::Row/update>. The user must
+ ensure manually that any value passed to this method will stringify to
+ something the RDBMS knows how to deal with. A notable example is the
+ handling of L<DateTime> objects, for more info see:
+ L<DBIx::Class::Manual::Cookbook/Formatting DateTime objects in queries>.
=cut
=back
- Fetches all objects and updates them one at a time. Note that C<update_all>
- will run DBIC cascade triggers, while L</update> will not.
+ Fetches all objects and updates them one at a time via
+ L<DBIx::Class::Row/update>. Note that C<update_all> will run DBIC defined
+ triggers, while L</update> will not.
=cut
my ($self, $values) = @_;
$self->throw_exception('Values for update_all must be a hash')
unless ref $values eq 'HASH';
- foreach my $obj ($self->all) {
- $obj->set_columns($values)->update;
- }
+
+ my $guard = $self->result_source->schema->txn_scope_guard;
+ $_->update({%$values}) for $self->all; # shallow copy - update will mangle it
+ $guard->commit;
return 1;
}
=back
- Deletes the contents of the resultset from its result source. Note that this
- will not run DBIC cascade triggers. See L</delete_all> if you need triggers
- to run. See also L<DBIx::Class::Row/delete>.
+ Deletes the rows matching this resultset in a single query. Note that this
+ will not run any delete triggers, nor will it alter the
+ L<in_storage|DBIx::Class::Row/in_storage> status of any row object instances
+ derived from this resultset (this includes the contents of the
+ L<resultset cache|/set_cache> if any). See L</delete_all> if you need to
+ execute any on-delete triggers or cascades defined either by you or a
+ L<result component|DBIx::Class::Manual::Component/WHAT IS A COMPONENT>.
- Return value will be the amount of rows deleted; exact type of return value
- is storage-dependent.
+ The return value is a pass through of what the underlying storage backend
+ returned, and may vary. See L<DBI/execute> for the most common case.
=cut
=back
- Fetches all objects and deletes them one at a time. Note that C<delete_all>
- will run DBIC cascade triggers, while L</delete> will not.
+ Fetches all objects and deletes them one at a time via
+ L<DBIx::Class::Row/delete>. Note that C<delete_all> will run DBIC defined
+ triggers, while L</delete> will not.
=cut
$self->throw_exception('delete_all does not accept any arguments')
if @_;
+ my $guard = $self->result_source->schema->txn_scope_guard;
$_->delete for $self->all;
+ $guard->commit;
return 1;
}
Accepts either an arrayref of hashrefs or alternatively an arrayref of arrayrefs.
For the arrayref of hashrefs style each hashref should be a structure suitable
- forsubmitting to a $resultset->create(...) method.
+ for submitting to a $resultset->create(...) method.
In void context, C<insert_bulk> in L<DBIx::Class::Storage::DBI> is used
to insert the data, as this is a faster method.
# cruft placed in standalone method
my $data = $self->_normalize_populate_args(@_);
+ return unless @$data;
+
if(defined wantarray) {
my @created;
foreach my $item (@$data) {
push(@created, $self->create($item));
}
return wantarray ? @created : \@created;
- } else {
+ }
+ else {
my $first = $data->[0];
# if a column is a registered relationship, and is a non-blessed hash/array, consider
# it relationship data
my (@rels, @columns);
+ my $rsrc = $self->result_source;
+ my $rels = { map { $_ => $rsrc->relationship_info($_) } $rsrc->relationships };
for (keys %$first) {
my $ref = ref $first->{$_};
- $self->result_source->has_relationship($_) && ($ref eq 'ARRAY' or $ref eq 'HASH')
+ $rels->{$_} && ($ref eq 'ARRAY' or $ref eq 'HASH')
? push @rels, $_
: push @columns, $_
;
}
- my @pks = $self->result_source->primary_columns;
+ my @pks = $rsrc->primary_columns;
## do the belongs_to relationships
foreach my $index (0..$#$data) {
foreach my $rel (@rels) {
next unless ref $data->[$index]->{$rel} eq "HASH";
my $result = $self->related_resultset($rel)->create($data->[$index]->{$rel});
- my ($reverse) = keys %{$self->result_source->reverse_relationship_info($rel)};
+ my ($reverse_relname, $reverse_relinfo) = %{$rsrc->reverse_relationship_info($rel)};
my $related = $result->result_source->_resolve_condition(
- $result->result_source->relationship_info($reverse)->{cond},
+ $reverse_relinfo->{cond},
$self,
$result,
+ $rel,
);
delete $data->[$index]->{$rel};
}
## inherit the data locked in the conditions of the resultset
- my ($rs_data) = $self->_merge_cond_with_data({});
+ my ($rs_data) = $self->_merge_with_rscond({});
delete @{$rs_data}{@columns};
my @inherit_cols = keys %$rs_data;
my @inherit_data = values %$rs_data;
## do bulk insert on current row
- $self->result_source->storage->insert_bulk(
- $self->result_source,
+ $rsrc->storage->insert_bulk(
+ $rsrc,
[@columns, @inherit_cols],
[ map { [ @$_{@columns}, @inherit_data ] } @$data ],
);
## do the has_many relationships
foreach my $item (@$data) {
+ my $main_row;
+
foreach my $rel (@rels) {
- next unless $item->{$rel} && ref $item->{$rel} eq "ARRAY";
+ next unless ref $item->{$rel} eq "ARRAY" && @{ $item->{$rel} };
- my $parent = $self->find({map { $_ => $item->{$_} } @pks})
- || $self->throw_exception('Cannot find the relating object.');
+ $main_row ||= $self->new_result({map { $_ => $item->{$_} } @pks});
- my $child = $parent->$rel;
+ my $child = $main_row->$rel;
my $related = $child->result_source->_resolve_condition(
- $parent->result_source->relationship_info($rel)->{cond},
+ $rels->{$rel}{cond},
$child,
- $parent,
+ $main_row,
+ $rel,
);
my @rows_to_add = ref $item->{$rel} eq 'ARRAY' ? @{$item->{$rel}} : ($item->{$rel});
my ($self, $arg) = @_;
if (ref $arg eq 'ARRAY') {
- if (ref $arg->[0] eq 'HASH') {
+ if (!@$arg) {
+ return [];
+ }
+ elsif (ref $arg->[0] eq 'HASH') {
return $arg;
}
elsif (ref $arg->[0] eq 'ARRAY') {
return $self->{pager} if $self->{pager};
my $attrs = $self->{attrs};
- $self->throw_exception("Can't create pager for non-paged rs")
- unless $self->{attrs}{page};
+ if (!defined $attrs->{page}) {
+ $self->throw_exception("Can't create pager for non-paged rs");
+ }
+ elsif ($attrs->{page} <= 0) {
+ $self->throw_exception('Invalid page number (page-numbers are 1-based)');
+ }
$attrs->{rows} ||= 10;
# throw away the paging flags and re-run the count (possibly
# with a subselect) to get the real total count
my $count_attrs = { %$attrs };
delete $count_attrs->{$_} for qw/rows offset page pager/;
- my $total_count = (ref $self)->new($self->result_source, $count_attrs)->count;
- return $self->{pager} = Data::Page->new(
- $total_count,
+ my $total_rs = (ref $self)->new($self->result_source, $count_attrs);
+
+ require DBIx::Class::ResultSet::Pager;
+ return $self->{pager} = DBIx::Class::ResultSet::Pager->new(
+ sub { $total_rs->count }, #lazy-get the total
$attrs->{rows},
- $self->{attrs}{page}
+ $self->{attrs}{page},
);
}
$self->throw_exception( "new_result needs a hash" )
unless (ref $values eq 'HASH');
- my ($merged_cond, $cols_from_relations) = $self->_merge_cond_with_data($values);
+ my ($merged_cond, $cols_from_relations) = $self->_merge_with_rscond($values);
my %new = (
%$merged_cond,
@$cols_from_relations
? (-cols_from_relations => $cols_from_relations)
: (),
- -source_handle => $self->_source_handle,
-result_source => $self->result_source, # DO NOT REMOVE THIS, REQUIRED
);
return $self->result_class->new(\%new);
}
- # _merge_cond_with_data
+ # _merge_with_rscond
#
# Takes a simple hash of K/V data and returns its copy merged with the
# condition already present on the resultset. Additionally returns an
# arrayref of value/condition names, which were inferred from related
# objects (this is needed for in-memory related objects)
- sub _merge_cond_with_data {
+ sub _merge_with_rscond {
my ($self, $data) = @_;
my (%new_data, @cols_from_relations);
my %implied = %{$self->_remove_alias($collapsed_cond, $alias)};
while ( my($col, $value) = each %implied ) {
- if (ref($value) eq 'HASH' && keys(%$value) && (keys %$value)[0] eq '=') {
+ my $vref = ref $value;
+ if (
+ $vref eq 'HASH'
+ and
+ keys(%$value) == 1
+ and
+ (keys %$value)[0] eq '='
+ ) {
$new_data{$col} = $value->{'='};
- next;
}
- $new_data{$col} = $value if $self->_is_deterministic_value($value);
+ elsif( !$vref or $vref eq 'SCALAR' or blessed($value) ) {
+ $new_data{$col} = $value;
+ }
}
}
return (\%new_data, \@cols_from_relations);
}
- # _is_deterministic_value
- #
- # Make an effor to strip non-deterministic values from the condition,
- # to make sure new_result chokes less
-
- sub _is_deterministic_value {
- my $self = shift;
- my $value = shift;
- my $ref_type = ref $value;
- return 1 if $ref_type eq '' || $ref_type eq 'SCALAR';
- return 1 if Scalar::Util::blessed($value);
- return 0;
- }
-
# _has_resolved_attr
#
# determines if the resultset defines at least one
$cd->cd_to_producer->find_or_new({ producer => $producer },
{ key => 'primary });
- Find an existing record from this resultset, based on its primary
- key, or a unique constraint. If none exists, instantiate a new result
- object and return it. The object will not be saved into your storage
- until you call L<DBIx::Class::Row/insert> on it.
+ Find an existing record from this resultset using L</find>. if none exists,
+ instantiate a new result object and return it. The object will not be saved
+ into your storage until you call L<DBIx::Class::Row/insert> on it.
- You most likely want this method when looking for existing rows using
- a unique constraint that is not the primary key, or looking for
- related rows.
+ You most likely want this method when looking for existing rows using a unique
+ constraint that is not the primary key, or looking for related rows.
- If you want objects to be saved immediately, use L</find_or_create>
- instead.
+ If you want objects to be saved immediately, use L</find_or_create> instead.
+
+ B<Note>: Make sure to read the documentation of L</find> and understand the
+ significance of the C<key> attribute, as its lack may skew your search, and
+ subsequently result in spurious new objects.
B<Note>: Take care when using C<find_or_new> with a table having
columns with default values that you intend to be automatically
);
Example of creating a new row and also creating a row in a related
- C<belongs_to>resultset. Note Hashref.
+ C<belongs_to> resultset. Note Hashref.
$cd_rs->create({
title=>"Music for Silly Walks",
{ key => 'cd_artist_title' }
);
+ B<Note>: Make sure to read the documentation of L</find> and understand the
+ significance of the C<key> attribute, as its lack may skew your search, and
+ subsequently result in spurious row creation.
+
B<Note>: Because find_or_create() reads from the database and then
possibly inserts based on the result, this method is subject to a race
condition. Another process could create a record in the table after
See also L</find> and L</update_or_create>. For information on how to declare
unique constraints, see L<DBIx::Class::ResultSource/add_unique_constraint>.
+ If you need to know if an existing row was found or a new one created use
+ L</find_or_new> and L<DBIx::Class::Row/in_storage> instead. Don't forget
+ to call L<DBIx::Class::Row/insert> to save the newly created row to the
+ database!
+
+ my $cd = $schema->resultset('CD')->find_or_new({
+ cdid => 5,
+ artist => 'Massive Attack',
+ title => 'Mezzanine',
+ year => 2005,
+ });
+
+ if( $cd->in_storage ) {
+ # do some stuff
+ $cd->insert;
+ }
+
=cut
sub find_or_create {
=item Arguments: \%col_values, { key => $unique_constraint }?
- =item Return Value: $rowobject
+ =item Return Value: $row_object
=back
$resultset->update_or_create({ col => $val, ... });
- First, searches for an existing row matching one of the unique constraints
- (including the primary key) on the source of this resultset. If a row is
- found, updates it with the other given column values. Otherwise, creates a new
- row.
+ Like L</find_or_create>, but if a row is found it is immediately updated via
+ C<< $found_row->update (\%col_values) >>.
+
Takes an optional C<key> attribute to search on a specific unique constraint.
For example:
producer => $producer,
name => 'harry',
}, {
- key => 'primary,
+ key => 'primary',
});
-
- If no C<key> is specified, it searches on all unique constraints defined on the
- source, including the primary key.
-
- If the C<key> is specified as C<primary>, it searches only on the primary key.
-
- See also L</find> and L</find_or_create>. For information on how to declare
- unique constraints, see L<DBIx::Class::ResultSource/add_unique_constraint>.
+ B<Note>: Make sure to read the documentation of L</find> and understand the
+ significance of the C<key> attribute, as its lack may skew your search, and
+ subsequently result in spurious row creation.
B<Note>: Take care when using C<update_or_create> with a table having
columns with default values that you intend to be automatically
In normal usage, the value of such columns should NOT be included at
all in the call to C<update_or_create>, even when set to C<undef>.
+ See also L</find> and L</find_or_create>. For information on how to declare
+ unique constraints, see L<DBIx::Class::ResultSource/add_unique_constraint>.
+
+ If you need to know if an existing row was updated or a new one created use
+ L</update_or_new> and L<DBIx::Class::Row/in_storage> instead. Don't forget
+ to call L<DBIx::Class::Row/insert> to save the newly created row to the
+ database!
+
+ my $cd = $schema->resultset('CD')->update_or_new(
+ {
+ artist => 'Massive Attack',
+ title => 'Mezzanine',
+ year => 1998,
+ },
+ { key => 'cd_artist_title' }
+ );
+
+ if( $cd->in_storage ) {
+ # do some stuff
+ $cd->insert;
+ }
+
=cut
sub update_or_create {
$resultset->update_or_new({ col => $val, ... });
- First, searches for an existing row matching one of the unique constraints
- (including the primary key) on the source of this resultset. If a row is
- found, updates it with the other given column values. Otherwise, instantiate
- a new result object and return it. The object will not be saved into your storage
- until you call L<DBIx::Class::Row/insert> on it.
+ Like L</find_or_new> but if a row is found it is immediately updated via
+ C<< $found_row->update (\%col_values) >>.
- Takes an optional C<key> attribute to search on a specific unique constraint.
For example:
# In your application
$cd->insert;
}
+ B<Note>: Make sure to read the documentation of L</find> and understand the
+ significance of the C<key> attribute, as its lack may skew your search, and
+ subsequently result in spurious new objects.
+
B<Note>: Take care when using C<update_or_new> with a table having
columns with default values that you intend to be automatically
supplied by the database (e.g. an auto_increment primary key column).
=item Arguments: none
- =item Return Value: \@cache_objects?
+ =item Return Value: \@cache_objects | undef
=back
=item Arguments: none
- =item Return Value: []
+ =item Return Value: undef
=back
sub is_ordered {
my ($self) = @_;
- return scalar $self->result_source->storage->_parse_order_by($self->{attrs}{order_by});
+ return scalar $self->result_source->storage->_extract_order_criteria($self->{attrs}{order_by});
}
=head2 related_resultset
# (the select/as attrs were deleted in the beginning), we need to flip all
# left joins to inner, so we get the expected results
# read the comment on top of the actual function to see what this does
- $attrs->{from} = $rsrc->schema->storage->_straight_join_to_node ($attrs->{from}, $alias);
+ $attrs->{from} = $rsrc->schema->storage->_inner_join_to_node ($attrs->{from}, $alias);
#XXX - temp fix for result_class bug. There likely is a more elegant fix -groditi
my $me = $self->current_source_alias;
- return $self->search(
+ return $self->search({
"$me.modified" => $user->id,
- );
+ });
}
=cut
=cut
sub as_subselect_rs {
- my $self = shift;
+ my $self = shift;
+
+ my $attrs = $self->_resolved_attrs;
- return $self->result_source->resultset->search( undef, {
- alias => $self->current_source_alias,
- from => [{
- $self->current_source_alias => $self->as_query,
- -alias => $self->current_source_alias,
- -source_handle => $self->result_source->handle,
- }]
- });
+ my $fresh_rs = (ref $self)->new (
+ $self->result_source
+ );
+
+ # these pieces will be locked in the subquery
+ delete $fresh_rs->{cond};
+ delete @{$fresh_rs->{attrs}}{qw/where bind/};
+
+ return $fresh_rs->search( {}, {
+ from => [{
+ $attrs->{alias} => $self->as_query,
+ -alias => $attrs->{alias},
+ -rsrc => $self->result_source,
+ }],
+ alias => $attrs->{alias},
+ });
}
# This code is called by search_related, and makes sure there
# we need to take the prefetch the attrs into account before we
# ->_resolve_join as otherwise they get lost - captainL
- my $join = $self->_merge_attr( $attrs->{join}, $attrs->{prefetch} );
+ my $join = $self->_merge_joinpref_attr( $attrs->{join}, $attrs->{prefetch} );
- delete @{$attrs}{qw/join prefetch collapse distinct select as columns +select +as +columns/};
+ delete @{$attrs}{qw/join prefetch collapse group_by distinct select as columns +select +as +columns/};
my $seen = { %{ (delete $attrs->{seen_join}) || {} } };
# are resolved (prefetch is useless - we are wrapping
# a subquery anyway).
my $rs_copy = $self->search;
- $rs_copy->{attrs}{join} = $self->_merge_attr (
+ $rs_copy->{attrs}{join} = $self->_merge_joinpref_attr (
$rs_copy->{attrs}{join},
delete $rs_copy->{attrs}{prefetch},
);
$from = [{
- -source_handle => $source->handle,
- -alias => $attrs->{alias},
+ -rsrc => $source,
+ -alias => $attrs->{alias},
$attrs->{alias} => $rs_copy->as_query,
}];
- delete @{$attrs}{@force_subq_attrs, 'where'};
+ delete @{$attrs}{@force_subq_attrs, qw/where bind/};
$seen->{-relation_chain_depth} = 0;
}
elsif ($attrs->{from}) { #shallow copy suffices
}
else {
$from = [{
- -source_handle => $source->handle,
+ -rsrc => $source,
-alias => $attrs->{alias},
$attrs->{alias} => $source->from,
}];
my $source = $self->result_source;
my $alias = $attrs->{alias};
- $attrs->{columns} ||= delete $attrs->{cols} if exists $attrs->{cols};
- my @colbits;
-
- # build columns (as long as select isn't set) into a set of as/select hashes
- unless ( $attrs->{select} ) {
-
- my @cols;
- if ( ref $attrs->{columns} eq 'ARRAY' ) {
- @cols = @{ delete $attrs->{columns}}
- } elsif ( defined $attrs->{columns} ) {
- @cols = delete $attrs->{columns}
- } else {
- @cols = $source->columns
- }
+ # default selection list
+ $attrs->{columns} = [ $source->columns ]
+ unless List::Util::first { exists $attrs->{$_} } qw/columns cols select as/;
- for (@cols) {
- if ( ref $_ eq 'HASH' ) {
- push @colbits, $_
- } else {
- my $key = /^\Q${alias}.\E(.+)$/
- ? "$1"
- : "$_";
- my $value = /\./
- ? "$_"
- : "${alias}.$_";
- push @colbits, { $key => $value };
- }
- }
+ # merge selectors together
+ for (qw/columns select as/) {
+ $attrs->{$_} = $self->_merge_attr($attrs->{$_}, delete $attrs->{"+$_"})
+ if $attrs->{$_} or $attrs->{"+$_"};
}
- # add the additional columns on
- foreach (qw{include_columns +columns}) {
- if ( $attrs->{$_} ) {
- my @list = ( ref($attrs->{$_}) eq 'ARRAY' )
- ? @{ delete $attrs->{$_} }
- : delete $attrs->{$_};
- for (@list) {
- if ( ref($_) eq 'HASH' ) {
- push @colbits, $_
- } else {
- my $key = ( split /\./, $_ )[-1];
- my $value = ( /\./ ? $_ : "$alias.$_" );
- push @colbits, { $key => $value };
+ # disassemble columns
+ my (@sel, @as);
+ if (my $cols = delete $attrs->{columns}) {
+ for my $c (ref $cols eq 'ARRAY' ? @$cols : $cols) {
+ if (ref $c eq 'HASH') {
+ for my $as (keys %$c) {
+ push @sel, $c->{$as};
+ push @as, $as;
}
}
+ else {
+ push @sel, $c;
+ push @as, $c;
+ }
}
}
- # start with initial select items
- if ( $attrs->{select} ) {
- $attrs->{select} =
- ( ref $attrs->{select} eq 'ARRAY' )
- ? [ @{ $attrs->{select} } ]
- : [ $attrs->{select} ];
+ # when trying to weed off duplicates later do not go past this point -
+ # everything added from here on is unbalanced "anyone's guess" stuff
+ my $dedup_stop_idx = $#as;
- if ( $attrs->{as} ) {
- $attrs->{as} =
- (
- ref $attrs->{as} eq 'ARRAY'
- ? [ @{ $attrs->{as} } ]
- : [ $attrs->{as} ]
- )
- } else {
- $attrs->{as} = [ map {
- m/^\Q${alias}.\E(.+)$/
- ? $1
- : $_
- } @{ $attrs->{select} }
- ]
- }
- }
- else {
- # otherwise we intialise select & as to empty
- $attrs->{select} = [];
- $attrs->{as} = [];
- }
+ push @as, @{ ref $attrs->{as} eq 'ARRAY' ? $attrs->{as} : [ $attrs->{as} ] }
+ if $attrs->{as};
+ push @sel, @{ ref $attrs->{select} eq 'ARRAY' ? $attrs->{select} : [ $attrs->{select} ] }
+ if $attrs->{select};
- # now add colbits to select/as
- push @{ $attrs->{select} }, map values %{$_}, @colbits;
- push @{ $attrs->{as} }, map keys %{$_}, @colbits;
+ # assume all unqualified selectors to apply to the current alias (legacy stuff)
+ for (@sel) {
+ $_ = (ref $_ or $_ =~ /\./) ? $_ : "$alias.$_";
+ }
- if ( my $adds = delete $attrs->{'+select'} ) {
- $adds = [$adds] unless ref $adds eq 'ARRAY';
- push @{ $attrs->{select} },
- map { /\./ || ref $_ ? $_ : "$alias.$_" } @$adds;
+ # disqualify all $alias.col as-bits (collapser mandated)
+ for (@as) {
+ $_ = ($_ =~ /^\Q$alias.\E(.+)$/) ? $1 : $_;
}
- if ( my $adds = delete $attrs->{'+as'} ) {
- $adds = [$adds] unless ref $adds eq 'ARRAY';
- push @{ $attrs->{as} }, @$adds;
+
+ # de-duplicate the result (remove *identical* select/as pairs)
+ # and also die on duplicate {as} pointing to different {select}s
+ # not using a c-style for as the condition is prone to shrinkage
+ my $seen;
+ my $i = 0;
+ while ($i <= $dedup_stop_idx) {
+ if ($seen->{"$sel[$i] \x00\x00 $as[$i]"}++) {
+ splice @sel, $i, 1;
+ splice @as, $i, 1;
+ $dedup_stop_idx--;
+ }
+ elsif ($seen->{$as[$i]}++) {
+ $self->throw_exception(
+ "inflate_result() alias '$as[$i]' specified twice with different SQL-side {select}-ors"
+ );
+ }
+ else {
+ $i++;
+ }
}
+ $attrs->{select} = \@sel;
+ $attrs->{as} = \@as;
+
$attrs->{from} ||= [{
- -source_handle => $source->handle,
- -alias => $self->{attrs}{alias},
+ -rsrc => $source,
+ -alias => $self->{attrs}{alias},
$self->{attrs}{alias} => $source->from,
}];
$self->throw_exception ('join/prefetch can not be used with a custom {from}')
if ref $attrs->{from} ne 'ARRAY';
- my $join = delete $attrs->{join} || {};
+ my $join = (delete $attrs->{join}) || {};
if ( defined $attrs->{prefetch} ) {
- $join = $self->_merge_attr( $join, $attrs->{prefetch} );
+ $join = $self->_merge_joinpref_attr( $join, $attrs->{prefetch} );
}
$attrs->{from} = # have to copy here to avoid corrupting the original
# subquery (since a group_by is present)
if (delete $attrs->{distinct}) {
if ($attrs->{group_by}) {
- carp ("Useless use of distinct on a grouped resultset ('distinct' is ignored when a 'group_by' is present)");
+ carp_unique ("Useless use of distinct on a grouped resultset ('distinct' is ignored when a 'group_by' is present)");
}
else {
- $attrs->{group_by} = [ grep { !ref($_) || (ref($_) ne 'HASH') } @{$attrs->{select}} ];
-
- # add any order_by parts that are not already present in the group_by
- # we need to be careful not to add any named functions/aggregates
- # i.e. select => [ ... { count => 'foo', -as 'foocount' } ... ]
- my %already_grouped = map { $_ => 1 } (@{$attrs->{group_by}});
-
- my $storage = $self->result_source->schema->storage;
-
- my $rs_column_list = $storage->_resolve_column_info ($attrs->{from});
-
- for my $chunk ($storage->_parse_order_by($attrs->{order_by})) {
- if ($rs_column_list->{$chunk} && not $already_grouped{$chunk}++) {
- push @{$attrs->{group_by}}, $chunk;
- }
- }
+ # distinct affects only the main selection part, not what prefetch may
+ # add below.
+ $attrs->{group_by} = $source->storage->_group_over_selection (
+ $attrs->{from},
+ $attrs->{select},
+ $attrs->{order_by},
+ );
}
}
- $attrs->{collapse} ||= {};
- if ($attrs->{prefetch}) {
+ # generate selections based on the prefetch helper
- if ( my $prefetch = delete $attrs->{prefetch} ) {
++ my $prefetch;
++ $prefetch = $self->_merge_joinpref_attr( {}, delete $attrs->{prefetch} )
++ if defined $attrs->{prefetch};
++
++ if ($prefetch) {
+
+ $self->throw_exception("Unable to prefetch, resultset contains an unnamed selector $attrs->{_dark_selector}{string}")
+ if $attrs->{_dark_selector};
+
- my $prefetch = $self->_merge_joinpref_attr( {}, delete $attrs->{prefetch} );
-
- my $prefetch_ordering = [];
+ $attrs->{collapse} = 1;
# this is a separate structure (we don't look in {from} directly)
# as the resolver needs to shift things off the lists to work
}
}
- my @prefetch =
- $source->_resolve_prefetch( $prefetch, $alias, $join_map, $prefetch_ordering, $attrs->{collapse} );
+ my @prefetch = $source->_resolve_prefetch( $prefetch, $alias, $join_map );
# we need to somehow mark which columns came from prefetch
- $attrs->{_prefetch_select} = [ map { $_->[0] } @prefetch ];
+ if (@prefetch) {
+ my $sel_end = $#{$attrs->{select}};
+ $attrs->{_prefetch_selector_range} = [ $sel_end + 1, $sel_end + @prefetch ];
+ }
- push @{ $attrs->{select} }, @{$attrs->{_prefetch_select}};
+ push @{ $attrs->{select} }, (map { $_->[0] } @prefetch);
push @{ $attrs->{as} }, (map { $_->[1] } @prefetch);
+ }
- push( @{$attrs->{order_by}}, @$prefetch_ordering );
- $attrs->{_collapse_order_by} = \@$prefetch_ordering;
+ # run through the resulting joinstructure (starting from our current slot)
+ # and unset collapse if proven unnesessary
+ if ($attrs->{collapse} && ref $attrs->{from} eq 'ARRAY') {
+
+ if (@{$attrs->{from}} > 1) {
+
+ # find where our table-spec starts and consider only things after us
+ my @fromlist = @{$attrs->{from}};
+ while (@fromlist) {
+ my $t = shift @fromlist;
+ $t = $t->[0] if ref $t eq 'ARRAY'; #me vs join from-spec mismatch
+ last if ($t->{-alias} && $t->{-alias} eq $alias);
+ }
+
+ for (@fromlist) {
+ $attrs->{collapse} = ! $_->[0]{-is_single}
+ and last;
+ }
+ }
+ else {
+ # no joins - no collapse
+ $attrs->{collapse} = 0;
+ }
}
+
# if both page and offset are specified, produce a combined offset
# even though it doesn't make much sense, this is what pre 081xx has
# been doing
}
}
- sub _merge_attr {
+ sub _merge_joinpref_attr {
my ($self, $orig, $import) = @_;
return $import unless defined($orig);
$position++;
}
my ($import_key) = ( ref $import_element eq 'HASH' ) ? keys %{$import_element} : ($import_element);
+ $import_key = '' if not defined $import_key;
if ($best_candidate->{score} == 0 || exists $seen_keys->{$import_key}) {
push( @{$orig}, $import_element );
$orig->[$best_candidate->{position}] = $import_element;
} elsif (ref $import_element eq 'HASH') {
my ($key) = keys %{$orig_best};
- $orig->[$best_candidate->{position}] = { $key => $self->_merge_attr($orig_best->{$key}, $import_element->{$key}) };
+ $orig->[$best_candidate->{position}] = { $key => $self->_merge_joinpref_attr($orig_best->{$key}, $import_element->{$key}) };
}
}
$seen_keys->{$import_key} = 1; # don't merge the same key twice
return $orig;
}
- sub result_source {
- my $self = shift;
+ {
+ my $hm;
- if (@_) {
- $self->_source_handle($_[0]->handle);
- } else {
- $self->_source_handle->resolve;
- }
+ sub _merge_attr {
+ $hm ||= do {
+ require Hash::Merge;
+ my $hm = Hash::Merge->new;
+
+ $hm->specify_behavior({
+ SCALAR => {
+ SCALAR => sub {
+ my ($defl, $defr) = map { defined $_ } (@_[0,1]);
+
+ if ($defl xor $defr) {
+ return [ $defl ? $_[0] : $_[1] ];
+ }
+ elsif (! $defl) {
+ return [];
+ }
+ elsif (__HM_DEDUP and $_[0] eq $_[1]) {
+ return [ $_[0] ];
+ }
+ else {
+ return [$_[0], $_[1]];
+ }
+ },
+ ARRAY => sub {
+ return $_[1] if !defined $_[0];
+ return $_[1] if __HM_DEDUP and List::Util::first { $_ eq $_[0] } @{$_[1]};
+ return [$_[0], @{$_[1]}]
+ },
+ HASH => sub {
+ return [] if !defined $_[0] and !keys %{$_[1]};
+ return [ $_[1] ] if !defined $_[0];
+ return [ $_[0] ] if !keys %{$_[1]};
+ return [$_[0], $_[1]]
+ },
+ },
+ ARRAY => {
+ SCALAR => sub {
+ return $_[0] if !defined $_[1];
+ return $_[0] if __HM_DEDUP and List::Util::first { $_ eq $_[1] } @{$_[0]};
+ return [@{$_[0]}, $_[1]]
+ },
+ ARRAY => sub {
+ my @ret = @{$_[0]} or return $_[1];
+ return [ @ret, @{$_[1]} ] unless __HM_DEDUP;
+ my %idx = map { $_ => 1 } @ret;
+ push @ret, grep { ! defined $idx{$_} } (@{$_[1]});
+ \@ret;
+ },
+ HASH => sub {
+ return [ $_[1] ] if ! @{$_[0]};
+ return $_[0] if !keys %{$_[1]};
+ return $_[0] if __HM_DEDUP and List::Util::first { $_ eq $_[1] } @{$_[0]};
+ return [ @{$_[0]}, $_[1] ];
+ },
+ },
+ HASH => {
+ SCALAR => sub {
+ return [] if !keys %{$_[0]} and !defined $_[1];
+ return [ $_[0] ] if !defined $_[1];
+ return [ $_[1] ] if !keys %{$_[0]};
+ return [$_[0], $_[1]]
+ },
+ ARRAY => sub {
+ return [] if !keys %{$_[0]} and !@{$_[1]};
+ return [ $_[0] ] if !@{$_[1]};
+ return $_[1] if !keys %{$_[0]};
+ return $_[1] if __HM_DEDUP and List::Util::first { $_ eq $_[0] } @{$_[1]};
+ return [ $_[0], @{$_[1]} ];
+ },
+ HASH => sub {
+ return [] if !keys %{$_[0]} and !keys %{$_[1]};
+ return [ $_[0] ] if !keys %{$_[1]};
+ return [ $_[1] ] if !keys %{$_[0]};
+ return [ $_[0] ] if $_[0] eq $_[1];
+ return [ $_[0], $_[1] ];
+ },
+ }
+ } => 'DBIC_RS_ATTR_MERGER');
+ $hm;
+ };
+
+ return $hm->merge ($_[1], $_[2]);
+ }
+ }
+
+ sub STORABLE_freeze {
+ my ($self, $cloning) = @_;
+ my $to_serialize = { %$self };
+
+ # A cursor in progress can't be serialized (and would make little sense anyway)
+ delete $to_serialize->{cursor};
+
+ # nor is it sensical to store a not-yet-fired-count pager
+ if ($to_serialize->{pager} and ref $to_serialize->{pager}{total_entries} eq 'CODE') {
+ delete $to_serialize->{pager};
+ }
+
+ Storable::nfreeze($to_serialize);
+ }
+
+ # need this hook for symmetry
+ sub STORABLE_thaw {
+ my ($self, $cloning, $serialized) = @_;
+
+ %$self = %{ Storable::thaw($serialized) };
+
+ $self;
}
+
=head2 throw_exception
See L<DBIx::Class::Schema/throw_exception> for details.
sub throw_exception {
my $self=shift;
- if (ref $self && $self->_source_handle->schema) {
- $self->_source_handle->schema->throw_exception(@_)
+ if (ref $self and my $rsrc = $self->result_source) {
+ $rsrc->throw_exception(@_)
}
else {
DBIx::Class::Exception->throw(@_);
C<select> as normal. (You may also use the C<cols> attribute, as in
earlier versions of DBIC.)
+ Essentially C<columns> does the same as L</select> and L</as>.
+
+ columns => [ 'foo', { bar => 'baz' } ]
+
+ is the same as
+
+ select => [qw/foo baz/],
+ as => [qw/foo bar/]
+
=head2 +columns
=over 4
column (or relationship) accessor, and 'name' is the name of the column
accessor in the related table.
+ B<NOTE:> You need to explicitly quote '+columns' when defining the attribute.
+ Not doing so causes Perl to incorrectly interpret +columns as a bareword with a
+ unary plus operator before it.
+
=head2 include_columns
=over 4
select => [
'name',
{ count => 'employeeid' },
- { sum => 'salary' }
+ { max => { length => 'name' }, -as => 'longest_name' }
]
});
- When you use function/stored procedure names and do not supply an C<as>
- attribute, the column names returned are storage-dependent. E.g. MySQL would
- return a column named C<count(employeeid)> in the above example.
+ # Equivalent SQL
+ SELECT name, COUNT( employeeid ), MAX( LENGTH( name ) ) AS longest_name FROM employee
- B<NOTE:> You will almost always need a corresponding 'as' entry when you use
- 'select'.
+ B<NOTE:> You will almost always need a corresponding L</as> attribute when you
+ use L</select>, to instruct DBIx::Class how to store the result of the column.
+ Also note that the L</as> attribute has nothing to do with the SQL-side 'AS'
+ identifier aliasing. You can however alias a function, so you can use it in
+ e.g. an C<ORDER BY> clause. This is done via the C<-as> B<select function
+ attribute> supplied as shown in the example above.
+
+ B<NOTE:> You need to explicitly quote '+select'/'+as' when defining the attributes.
+ Not doing so causes Perl to incorrectly interpret them as a bareword with a
+ unary plus operator before it.
=head2 +select
=over 4
Indicates additional columns to be selected from storage. Works the same as
- L</select> but adds columns to the selection.
+ L</select> but adds columns to the default selection, instead of specifying
+ an explicit list.
=back
=back
- Indicates column names for object inflation. That is, C<as>
- indicates the name that the column can be accessed as via the
- C<get_column> method (or via the object accessor, B<if one already
- exists>). It has nothing to do with the SQL code C<SELECT foo AS bar>.
-
- The C<as> attribute is used in conjunction with C<select>,
- usually when C<select> contains one or more function or stored
- procedure names:
+ Indicates column names for object inflation. That is L</as> indicates the
+ slot name in which the column value will be stored within the
+ L<Row|DBIx::Class::Row> object. The value will then be accessible via this
+ identifier by the C<get_column> method (or via the object accessor B<if one
+ with the same name already exists>) as shown below. The L</as> attribute has
+ B<nothing to do> with the SQL-side C<AS>. See L</select> for details.
$rs = $schema->resultset('Employee')->search(undef, {
select => [
'name',
- { count => 'employeeid' }
+ { count => 'employeeid' },
+ { max => { length => 'name' }, -as => 'longest_name' }
],
- as => ['name', 'employee_count'],
+ as => [qw/
+ name
+ employee_count
+ max_name_length
+ /],
});
- my $employee = $rs->first(); # get the first Employee
-
If the object against which the search is performed already has an accessor
matching a column name specified in C<as>, the value can be retrieved using
the accessor as normal:
You can create your own accessors if required - see
L<DBIx::Class::Manual::Cookbook> for details.
- Please note: This will NOT insert an C<AS employee_count> into the SQL
- statement produced, it is used for internal access only. Thus
- attempting to use the accessor in an C<order_by> clause or similar
- will fail miserably.
-
- To get around this limitation, you can supply literal SQL to your
- C<select> attribute that contains the C<AS alias> text, e.g.
-
- select => [\'myfield AS alias']
-
=head2 join
=over 4
Simple prefetches will be joined automatically, so there is no need
for a C<join> attribute in the above search.
- C<prefetch> can be used with the following relationship types: C<belongs_to>,
- C<has_one> (or if you're using C<add_relationship>, any relationship declared
- with an accessor type of 'single' or 'filter'). A more complex example that
- prefetches an artists cds, the tracks on those cds, and the tags associated
- with that artist is given below (assuming many-to-many from artists to tags):
+ L</prefetch> can be used with the any of the relationship types and
+ multiple prefetches can be specified together. Below is a more complex
+ example that prefetches a CD's artist, its liner notes (if present),
+ the cover image, the tracks on that cd, and the guests on those
+ tracks.
- my $rs = $schema->resultset('Artist')->search(
+ # Assuming:
+ My::Schema::CD->belongs_to( artist => 'My::Schema::Artist' );
+ My::Schema::CD->might_have( liner_note => 'My::Schema::LinerNotes' );
+ My::Schema::CD->has_one( cover_image => 'My::Schema::Artwork' );
+ My::Schema::CD->has_many( tracks => 'My::Schema::Track' );
+
+ My::Schema::Artist->belongs_to( record_label => 'My::Schema::RecordLabel' );
+
+ My::Schema::Track->has_many( guests => 'My::Schema::Guest' );
+
+
+ my $rs = $schema->resultset('CD')->search(
+ undef,
+ {
+ prefetch => [
+ { artist => 'record_label'}, # belongs_to => belongs_to
+ 'liner_note', # might_have
+ 'cover_image', # has_one
+ { tracks => 'guests' }, # has_many => has_many
+ ]
+ }
+ );
+
+ This will produce SQL like the following:
+
+ SELECT cd.*, artist.*, record_label.*, liner_note.*, cover_image.*,
+ tracks.*, guests.*
+ FROM cd me
+ JOIN artist artist
+ ON artist.artistid = me.artistid
+ JOIN record_label record_label
+ ON record_label.labelid = artist.labelid
+ LEFT JOIN track tracks
+ ON tracks.cdid = me.cdid
+ LEFT JOIN guest guests
+ ON guests.trackid = track.trackid
+ LEFT JOIN liner_notes liner_note
+ ON liner_note.cdid = me.cdid
+ JOIN cd_artwork cover_image
+ ON cover_image.cdid = me.cdid
+ ORDER BY tracks.cd
+
+ Now the C<artist>, C<record_label>, C<liner_note>, C<cover_image>,
+ C<tracks>, and C<guests> of the CD will all be available through the
+ relationship accessors without the need for additional queries to the
+ database.
+
+ However, there is one caveat to be observed: it can be dangerous to
+ prefetch more than one L<has_many|DBIx::Class::Relationship/has_many>
+ relationship on a given level. e.g.:
+
+ my $rs = $schema->resultset('CD')->search(
undef,
{
prefetch => [
- { cds => 'tracks' },
- { artist_tags => 'tags' }
+ 'tracks', # has_many
+ { cd_to_producer => 'producer' }, # has_many => belongs_to (i.e. m2m)
]
}
);
+ In fact, C<DBIx::Class> will emit the following warning:
- B<NOTE:> If you specify a C<prefetch> attribute, the C<join> and C<select>
- attributes will be ignored.
+ Prefetching multiple has_many rels tracks and cd_to_producer at top
+ level will explode the number of row objects retrievable via ->next
+ or ->all. Use at your own risk.
- B<CAVEATs>: Prefetch does a lot of deep magic. As such, it may not behave
- exactly as you might expect.
+ The collapser currently can't identify duplicate tuples for multiple
+ L<has_many|DBIx::Class::Relationship/has_many> relationships and as a
+ result the second L<has_many|DBIx::Class::Relationship/has_many>
+ relation could contain redundant objects.
+
+ =head3 Using L</prefetch> with L</join>
+
+ L</prefetch> implies a L</join> with the equivalent argument, and is
+ properly merged with any existing L</join> specification. So the
+ following:
+
+ my $rs = $schema->resultset('CD')->search(
+ {'record_label.name' => 'Music Product Ltd.'},
+ {
+ join => {artist => 'record_label'},
+ prefetch => 'artist',
+ }
+ );
+
+ ... will work, searching on the record label's name, but only
+ prefetching the C<artist>.
+
+ =head3 Using L</prefetch> with L</select> / L</+select> / L</as> / L</+as>
+
+ L</prefetch> implies a L</+select>/L</+as> with the fields of the
+ prefetched relations. So given:
+
+ my $rs = $schema->resultset('CD')->search(
+ undef,
+ {
+ select => ['cd.title'],
+ as => ['cd_title'],
+ prefetch => 'artist',
+ }
+ );
+
+ The L</select> becomes: C<'cd.title', 'artist.*'> and the L</as>
+ becomes: C<'cd_title', 'artist.*'>.
+
+ =head3 CAVEATS
+
+ Prefetch does a lot of deep magic. As such, it may not behave exactly
+ as you might expect.
=over 4
identical to creating a non-pages resultset and then calling ->page($page)
on it.
- If L<rows> attribute is not specified it defaults to 10 rows per page.
+ If L</rows> attribute is not specified it defaults to 10 rows per page.
When you have a paged resultset, L</count> will only return the number
of rows in the page. To get the total, use the L</pager> and call
Specifies the (zero-based) row number for the first row to be returned, or the
of the first row of the first page if paging is used.
+ =head2 software_limit
+
+ =over 4
+
+ =item Value: (0 | 1)
+
+ =back
+
+ When combined with L</rows> and/or L</offset> the generated SQL will not
+ include any limit dialect stanzas. Instead the entire result will be selected
+ as if no limits were specified, and DBIC will perform the limit locally, by
+ artificially advancing and finishing the resulting L</cursor>.
+
+ This is the recommended way of performing resultset limiting when no sane RDBMS
+ implementation is available (e.g.
+ L<Sybase ASE|DBIx::Class::Storage::DBI::Sybase::ASE> using the
+ L<Generic Sub Query|DBIx::Class::SQLMaker::LimitDialects/GenericSubQ> hack)
+
=head2 group_by
=over 4
ORDER BY. It is applied to the after the grouping calculations have been
done.
- having => { 'count(employee)' => { '>=', 100 } }
+ having => { 'count_employee' => { '>=', 100 } }
+
+ or with an in-place function in which case literal SQL is required:
+
+ having => \[ 'count(employee) >= ?', [ count => 100 ] ]
=head2 distinct
Can be overridden by passing C<< { where => undef } >> as an attribute
to a resultset.
+ For more complicated where clauses see L<SQL::Abstract/WHERE CLAUSES>.
+
=back
=head2 cache
use warnings;
use base 'DBIx::Class';
-
- use Carp::Clan qw/^DBIx::Class/;
+ use DBIx::Class::Carp;
use DBIx::Class::Exception;
- use List::Util;
+
+ # not importing first() as it will clash with our own method
+ use List::Util ();
=head1 NAME
my $orig_attrs = $rs->_resolved_attrs;
my $alias = $rs->current_source_alias;
+ my $rsrc = $rs->result_source;
# If $column can be found in the 'as' list of the parent resultset, use the
# corresponding element of its 'select' list (to keep any custom column
my $as_index = List::Util::first { ($as_list->[$_] || "") eq $column } 0..$#$as_list;
my $select = defined $as_index ? $select_list->[$as_index] : $column;
- my $new_parent_rs;
+ my ($new_parent_rs, $colmap);
+ for ($rsrc->columns, $column) {
+ if ($_ =~ /^ \Q$alias\E \. ([^\.]+) $ /x) {
+ $colmap->{$_} = $1;
+ }
+ elsif ($_ !~ /\./) {
+ $colmap->{"$alias.$_"} = $_;
+ $colmap->{$_} = $_;
+ }
+ }
+
# analyze the order_by, and see if it is done over a function/nonexistentcolumn
# if this is the case we will need to wrap a subquery since the result of RSC
# *must* be a single column select
- my %collist = map
- { $_ => 1, ($_ =~ /\./) ? () : ( "$alias.$_" => 1 ) }
- ($rs->result_source->columns, $column)
- ;
if (
scalar grep
- { ! $collist{$_} }
- ( $rs->result_source->schema->storage->_parse_order_by ($orig_attrs->{order_by} ) )
+ { ! exists $colmap->{$_->[0]} }
+ ( $rsrc->schema->storage->_extract_order_criteria ($orig_attrs->{order_by} ) )
) {
# nuke the prefetch before collapsing to sql
my $subq_rs = $rs->search;
- $subq_rs->{attrs}{join} = $subq_rs->_merge_attr( $subq_rs->{attrs}{join}, delete $subq_rs->{attrs}{prefetch} );
+ $subq_rs->{attrs}{join} = $subq_rs->_merge_joinpref_attr( $subq_rs->{attrs}{join}, delete $subq_rs->{attrs}{prefetch} );
$new_parent_rs = $subq_rs->as_subselect_rs;
}
# rs via the _resolved_attrs trick - we need to retain the separation between
# +select/+as and select/as. At the same time we want to preserve any joins that the
# prefetch would otherwise generate.
- $new_attrs->{join} = $rs->_merge_attr( $new_attrs->{join}, delete $new_attrs->{prefetch} );
+ $new_attrs->{join} = $rs->_merge_joinpref_attr( $new_attrs->{join}, delete $new_attrs->{prefetch} );
# {collapse} would mean a has_many join was injected, which in turn means
# we need to group *IF WE CAN* (only if the column in question is unique)
- if (!$orig_attrs->{group_by} && keys %{$orig_attrs->{collapse}}) {
+ if (!$orig_attrs->{group_by} && $orig_attrs->{collapse}) {
- # scan for a constraint that would contain our column only - that'd be proof
- # enough it is unique
- my $constraints = { $rs->result_source->unique_constraints };
- for my $constraint_columns ( values %$constraints ) {
-
- next unless @$constraint_columns == 1;
-
- my $col = $constraint_columns->[0];
- my $fqcol = join ('.', $new_attrs->{alias}, $col);
-
- if ($col eq $select or $fqcol eq $select) {
- $new_attrs->{group_by} = [ $select ];
- delete $new_attrs->{distinct}; # it is ignored when group_by is present
- last;
- }
+ if ($colmap->{$select} and $rsrc->_identifying_column_set([$colmap->{$select}])) {
+ $new_attrs->{group_by} = [ $select ];
+ delete $new_attrs->{distinct}; # it is ignored when group_by is present
}
-
- if (!$new_attrs->{group_by}) {
+ else {
carp (
"Attempting to retrieve non-unique column '$column' on a resultset containing "
. 'one-to-many joins will return duplicate results.'
Returns the next value of the column in the resultset (or C<undef> if
there is none).
- Much like L<DBIx::Class::ResultSet/next> but just returning the
+ Much like L<DBIx::Class::ResultSet/next> but just returning the
one value.
=cut
See L<DBIx::Class::Schema/throw_exception> for details.
- =cut
+ =cut
sub throw_exception {
my $self=shift;
use strict;
use warnings;
+use base qw/DBIx::Class/;
+
use DBIx::Class::ResultSet;
use DBIx::Class::ResultSourceHandle;
use DBIx::Class::Exception;
- use Carp::Clan qw/^DBIx::Class/;
-
- __PACKAGE__->mk_group_accessors('simple' => qw/_ordered_columns
- _columns _primaries _unique_constraints name resultset_attributes
- schema from _relationships column_info_from_storage source_info
- source_name sqlt_deploy_callback/);
-
- __PACKAGE__->mk_group_accessors('component_class' => qw/resultset_class
- result_class/);
+ use DBIx::Class::Carp;
+ use DBIx::Class::GlobalDestruction;
+ use Try::Tiny;
+ use List::Util 'first';
+ use Scalar::Util qw/blessed weaken isweak/;
+ use namespace::clean;
+
-use base qw/DBIx::Class/;
-
+ __PACKAGE__->mk_group_accessors(simple => qw/
+ source_name name source_info
+ _ordered_columns _columns _primaries _unique_constraints
+ _relationships resultset_attributes
+ column_info_from_storage
+ /);
+
+ __PACKAGE__->mk_group_accessors(component_class => qw/
+ resultset_class
+ result_class
+ /);
+
+ __PACKAGE__->mk_classdata( sqlt_deploy_callback => 'default_sqlt_deploy_hook' );
=head1 NAME
# Create a table based result source, in a result class.
- package MyDB::Schema::Result::Artist;
+ package MyApp::Schema::Result::Artist;
use base qw/DBIx::Class::Core/;
__PACKAGE__->table('artist');
__PACKAGE__->add_columns(qw/ artistid name /);
__PACKAGE__->set_primary_key('artistid');
- __PACKAGE__->has_many(cds => 'MyDB::Schema::Result::CD');
+ __PACKAGE__->has_many(cds => 'MyApp::Schema::Result::CD');
1;
# Create a query (view) based result source, in a result class
- package MyDB::Schema::Result::Year2000CDs;
+ package MyApp::Schema::Result::Year2000CDs;
use base qw/DBIx::Class::Core/;
__PACKAGE__->load_components('InflateColumn::DateTime');
$new->{_relationships} = { %{$new->{_relationships}||{}} };
$new->{name} ||= "!!NAME NOT SET!!";
$new->{_columns_info_loaded} ||= 0;
- $new->{sqlt_deploy_callback} ||= "default_sqlt_deploy_hook";
return $new;
}
L<DBIx::Class::Row> objects. You can change the name of the accessor
by supplying an L</accessor> in the column_info hash.
+ If a column name beginning with a plus sign ('+col1') is provided, the
+ attributes provided will be merged with any existing attributes for the
+ column, with the new attributes taking precedence in the case that an
+ attribute already exists. Using this without a hashref
+ (C<< $source->add_columns(qw/+col1 +col2/) >>) is legal, but useless --
+ it does the same thing it would do without the plus.
+
The contents of the column_info are not set in stone. The following
keys are currently recognised/used by DBIx::Class:
This contains the column type. It is automatically filled if you use the
L<SQL::Translator::Producer::DBIx::Class::File> producer, or the
- L<DBIx::Class::Schema::Loader> module.
+ L<DBIx::Class::Schema::Loader> module.
Currently there is no standard set of values for the data_type. Use
whatever your database supports.
will attempt to retrieve the name of the sequence from the database
automatically.
+ =item retrieve_on_insert
+
+ { retrieve_on_insert => 1 }
+
+ For every column where this is set to true, DBIC will retrieve the RDBMS-side
+ value upon a new row insertion (normally only the autoincrement PK is
+ retrieved on insert). C<INSERT ... RETURNING> is used automatically if
+ supported by the underlying storage, otherwise an extra SELECT statement is
+ executed to retrieve the missing data.
+
=item auto_nextval
+ { auto_nextval => 1 }
+
Set this to a true value for a column whose value is retrieved automatically
from a sequence or function (if supported by your Storage driver.) For a
sequence, if you do not use a trigger to get the nextval, you have to set the
L</sequence> value as well.
Also set this for MSSQL columns with the 'uniqueidentifier'
- L<DBIx::Class::ResultSource/data_type> whose values you want to automatically
- generate using C<NEWID()>, unless they are a primary key in which case this will
- be done anyway.
+ L<data_type|DBIx::Class::ResultSource/data_type> whose values you want to
+ automatically generate using C<NEWID()>, unless they are a primary key in which
+ case this will be done anyway.
=item extra
my @added;
my $columns = $self->_columns;
while (my $col = shift @cols) {
+ my $column_info = {};
+ if ($col =~ s/^\+//) {
+ $column_info = $self->column_info($col);
+ }
+
# If next entry is { ... } use that for the column info, if not
# use an empty hashref
- my $column_info = ref $cols[0] ? shift(@cols) : {};
+ if (ref $cols[0]) {
+ my $new_info = shift(@cols);
+ %$column_info = (%$column_info, %$new_info);
+ }
push(@added, $col) unless exists $columns->{$col};
$columns->{$col} = $column_info;
}
my ($self, $column) = @_;
$self->throw_exception("No such column $column")
unless exists $self->_columns->{$column};
- #warn $self->{_columns_info_loaded}, "\n";
+
if ( ! $self->_columns->{$column}{data_type}
- and $self->column_info_from_storage
and ! $self->{_columns_info_loaded}
- and $self->schema and $self->storage )
+ and $self->column_info_from_storage
+ and my $stor = try { $self->storage } )
{
$self->{_columns_info_loaded}++;
- my $info = {};
- my $lc_info = {};
- # eval for the case of storage without table
- eval { $info = $self->storage->columns_info_for( $self->from ) };
- unless ($@) {
- for my $realcol ( keys %{$info} ) {
- $lc_info->{lc $realcol} = $info->{$realcol};
- }
+
+ # try for the case of storage without table
+ try {
+ my $info = $stor->columns_info_for( $self->from );
+ my $lc_info = { map
+ { (lc $_) => $info->{$_} }
+ ( keys %$info )
+ };
+
foreach my $col ( keys %{$self->_columns} ) {
$self->_columns->{$col} = {
%{ $self->_columns->{$col} },
%{ $info->{$col} || $lc_info->{lc $col} || {} }
};
}
- }
+ };
}
+
return $self->_columns->{$column};
}
return @{$self->{_ordered_columns}||[]};
}
+ =head2 columns_info
+
+ =over
+
+ =item Arguments: \@colnames ?
+
+ =item Return value: Hashref of column name/info pairs
+
+ =back
+
+ my $columns_info = $source->columns_info;
+
+ Like L</column_info> but returns information for the requested columns. If
+ the optional column-list arrayref is omitted it returns info on all columns
+ currently defined on the ResultSource via L</add_columns>.
+
+ =cut
+
+ sub columns_info {
+ my ($self, $columns) = @_;
+
+ my $colinfo = $self->_columns;
+
+ if (
+ first { ! $_->{data_type} } values %$colinfo
+ and
+ ! $self->{_columns_info_loaded}
+ and
+ $self->column_info_from_storage
+ and
+ my $stor = try { $self->storage }
+ ) {
+ $self->{_columns_info_loaded}++;
+
+ # try for the case of storage without table
+ try {
+ my $info = $stor->columns_info_for( $self->from );
+ my $lc_info = { map
+ { (lc $_) => $info->{$_} }
+ ( keys %$info )
+ };
+
+ foreach my $col ( keys %$colinfo ) {
+ $colinfo->{$col} = {
+ %{ $colinfo->{$col} },
+ %{ $info->{$col} || $lc_info->{lc $col} || {} }
+ };
+ }
+ };
+ }
+
+ my %ret;
+
+ if ($columns) {
+ for (@$columns) {
+ if (my $inf = $colinfo->{$_}) {
+ $ret{$_} = $inf;
+ }
+ else {
+ $self->throw_exception( sprintf (
+ "No such column '%s' on source %s",
+ $_,
+ $self->source_name,
+ ));
+ }
+ }
+ }
+ else {
+ %ret = %$colinfo;
+ }
+
+ return \%ret;
+ }
+
=head2 remove_columns
=over
Additionally, defines a L<unique constraint|add_unique_constraint>
named C<primary>.
- The primary key columns are used by L<DBIx::Class::PK::Auto> to
- retrieve automatically created values from the database. They are also
- used as default joining columns when specifying relationships, see
- L<DBIx::Class::Relationship>.
+ Note: you normally do want to define a primary key on your sources
+ B<even if the underlying database table does not have a primary key>.
+ See
+ L<DBIx::Class::Manual::Intro/The Significance and Importance of Primary Keys>
+ for more info.
=cut
return @{shift->_primaries||[]};
}
+ # a helper method that will automatically die with a descriptive message if
+ # no pk is defined on the source in question. For internal use to save
+ # on if @pks... boilerplate
sub _pri_cols {
my $self = shift;
my @pcols = $self->primary_columns
or $self->throw_exception (sprintf(
- 'Operation requires a primary key to be declared on %s via set_primary_key',
- $self->source_name,
+ "Operation requires a primary key to be declared on '%s' via set_primary_key",
+ # source_name is set only after schema-registration
+ $self->source_name || $self->result_class || $self->name || 'Unknown source...?',
));
return @pcols;
}
+ =head2 sequence
+
+ Manually define the correct sequence for your table, to avoid the overhead
+ associated with looking up the sequence automatically. The supplied sequence
+ will be applied to the L</column_info> of each L<primary_key|/set_primary_key>
+
+ =over 4
+
+ =item Arguments: $sequence_name
+
+ =item Return value: undefined
+
+ =back
+
+ =cut
+
+ sub sequence {
+ my ($self,$seq) = @_;
+
+ my @pks = $self->primary_columns
+ or return;
+
+ $_->{sequence} = $seq
+ for values %{ $self->columns_info (\@pks) };
+ }
+
+
=head2 add_unique_constraint
=over 4
sub add_unique_constraint {
my $self = shift;
+
+ if (@_ > 2) {
+ $self->throw_exception(
+ 'add_unique_constraint() does not accept multiple constraints, use '
+ . 'add_unique_constraints() instead'
+ );
+ }
+
my $cols = pop @_;
- my $name = shift;
+ if (ref $cols ne 'ARRAY') {
+ $self->throw_exception (
+ 'Expecting an arrayref of constraint columns, got ' . ($cols||'NOTHING')
+ );
+ }
+
+ my $name = shift @_;
$name ||= $self->name_unique_constraint($cols);
$self->_unique_constraints(\%unique_constraints);
}
+ =head2 add_unique_constraints
+
+ =over 4
+
+ =item Arguments: @constraints
+
+ =item Return value: undefined
+
+ =back
+
+ Declare multiple unique constraints on this source.
+
+ __PACKAGE__->add_unique_constraints(
+ constraint_name1 => [ qw/column1 column2/ ],
+ constraint_name2 => [ qw/column2 column3/ ],
+ );
+
+ Alternatively, you can specify only the columns:
+
+ __PACKAGE__->add_unique_constraints(
+ [ qw/column1 column2/ ],
+ [ qw/column3 column4/ ]
+ );
+
+ This will result in unique constraints named C<table_column1_column2> and
+ C<table_column3_column4>, where C<table> is replaced with the table name.
+
+ Throws an error if any of the given column names do not yet exist on
+ the result source.
+
+ See also L</add_unique_constraint>.
+
+ =cut
+
+ sub add_unique_constraints {
+ my $self = shift;
+ my @constraints = @_;
+
+ if ( !(@constraints % 2) && first { ref $_ ne 'ARRAY' } @constraints ) {
+ # with constraint name
+ while (my ($name, $constraint) = splice @constraints, 0, 2) {
+ $self->add_unique_constraint($name => $constraint);
+ }
+ }
+ else {
+ # no constraint name
+ foreach my $constraint (@constraints) {
+ $self->add_unique_constraint($constraint);
+ }
+ }
+ }
+
=head2 name_unique_constraint
=over 4
- =item Arguments: @colnames
+ =item Arguments: \@colnames
=item Return value: Constraint name
=back
$source->table('mytable');
- $source->name_unique_constraint('col1', 'col2');
+ $source->name_unique_constraint(['col1', 'col2']);
# returns
'mytable_col1_col2'
=over
- =item Arguments: $callback
+ =item Arguments: $callback_name | \&callback_code
+
+ =item Return value: $callback_name | \&callback_code
=back
__PACKAGE__->sqlt_deploy_callback('mycallbackmethod');
+ or
+
+ __PACKAGE__->sqlt_deploy_callback(sub {
+ my ($source_instance, $sqlt_table) = @_;
+ ...
+ } );
+
An accessor to set a callback to be called during deployment of
the schema via L<DBIx::Class::Schema/create_ddl_dir> or
L<DBIx::Class::Schema/deploy>.
The callback can be set as either a code reference or the name of a
method in the current result class.
- If not set, the L</default_sqlt_deploy_hook> is called.
+ Defaults to L</default_sqlt_deploy_hook>.
Your callback will be passed the $source object representing the
ResultSource instance being deployed, and the
=head2 default_sqlt_deploy_hook
- =over
-
- =item Arguments: $source, $sqlt_table
-
- =item Return value: undefined
-
- =back
-
- This is the sensible default for L</sqlt_deploy_callback>.
-
- If a method named C<sqlt_deploy_hook> exists in your Result class, it
- will be called and passed the current C<$source> and the
- C<$sqlt_table> being deployed.
+ This is the default deploy hook implementation which checks if your
+ current Result class has a C<sqlt_deploy_hook> method, and if present
+ invokes it B<on the Result class directly>. This is to preserve the
+ semantics of C<sqlt_deploy_hook> which was originally designed to expect
+ the Result class name and the
+ L<$sqlt_table instance|SQL::Translator::Schema::Table> of the table being
+ deployed.
=cut
'call it on the schema instead.'
) if scalar @_;
- return $self->resultset_class->new(
+ $self->resultset_class->new(
$self,
{
+ try { %{$self->schema->default_resultset_attributes} },
%{$self->{resultset_attributes}},
- %{$self->schema->default_resultset_attributes}
},
);
}
+ =head2 name
+
+ =over 4
+
+ =item Arguments: None
+
+ =item Result value: $name
+
+ =back
+
+ Returns the name of the result source, which will typically be the table
+ name. This may be a scalar reference if the result source has a non-standard
+ name.
+
=head2 source_name
=over 4
retrieval from this source. In the case of a database, the required FROM
clause contents.
+ =cut
+
+ sub from { die 'Virtual method!' }
+
=head2 schema
=over 4
- =item Arguments: None
+ =item Arguments: $schema
=item Return value: A schema object
my $schema = $source->schema();
- Returns the L<DBIx::Class::Schema> object that this result source
- belongs to.
+ Sets and/or returns the L<DBIx::Class::Schema> object to which this
+ result source instance has been attached to.
+
+ =cut
+
+ sub schema {
+ if (@_ > 1) {
+ $_[0]->{schema} = $_[1];
+ }
+ else {
+ $_[0]->{schema} || do {
+ my $name = $_[0]->{source_name} || '_unnamed_';
+ my $err = 'Unable to perform storage-dependent operations with a detached result source '
+ . "(source '$name' is not associated with a schema).";
+
+ $err .= ' You need to use $schema->thaw() or manually set'
+ . ' $DBIx::Class::ResultSourceHandle::thaw_schema while thawing.'
+ if $_[0]->{_detached_thaw};
+
+ DBIx::Class::Exception->throw($err);
+ };
+ }
+ }
=head2 storage
return $self;
- # XXX disabled. doesn't work properly currently. skip in tests.
+ # XXX disabled. doesn't work properly currently. skip in tests.
my $f_source = $self->schema->source($f_source_name);
unless ($f_source) {
}
return unless $f_source; # Can't test rel without f_source
- eval { $self->_resolve_join($rel, 'me', {}, []) };
-
- if ($@) { # If the resolve failed, back out and re-throw the error
- delete $rels{$rel}; #
+ try { $self->_resolve_join($rel, 'me', {}, []) }
+ catch {
+ # If the resolve failed, back out and re-throw the error
+ delete $rels{$rel};
$self->_relationships(\%rels);
- $self->throw_exception("Error creating relationship $rel: $@");
- }
+ $self->throw_exception("Error creating relationship $rel: $_");
+ };
+
1;
}
sub reverse_relationship_info {
my ($self, $rel) = @_;
- my $rel_info = $self->relationship_info($rel);
+
+ my $rel_info = $self->relationship_info($rel)
+ or $self->throw_exception("No such relationship '$rel'");
+
my $ret = {};
return $ret unless ((ref $rel_info->{cond}) eq 'HASH');
- my @cond = keys(%{$rel_info->{cond}});
- my @refkeys = map {/^\w+\.(\w+)$/} @cond;
- my @keys = map {$rel_info->{cond}->{$_} =~ /^\w+\.(\w+)$/} @cond;
+ my $stripped_cond = $self->__strip_relcond ($rel_info->{cond});
- # Get the related result source for this relationship
- my $othertable = $self->related_source($rel);
+ my $rsrc_schema_moniker = $self->source_name
+ if try { $self->schema };
+
+ # this may be a partial schema or something else equally esoteric
+ my $other_rsrc = try { $self->related_source($rel) }
+ or return $ret;
# Get all the relationships for that source that related to this source
# whose foreign column set are our self columns on $rel and whose self
- # columns are our foreign columns on $rel.
- my @otherrels = $othertable->relationships();
- my $otherrelationship;
- foreach my $otherrel (@otherrels) {
- my $otherrel_info = $othertable->relationship_info($otherrel);
+ # columns are our foreign columns on $rel
+ foreach my $other_rel ($other_rsrc->relationships) {
- my $back = $othertable->related_source($otherrel);
- next unless $back->source_name eq $self->source_name;
+ # only consider stuff that points back to us
+ # "us" here is tricky - if we are in a schema registration, we want
+ # to use the source_names, otherwise we will use the actual classes
- my @othertestconds;
+ # the schema may be partial
+ my $roundtrip_rsrc = try { $other_rsrc->related_source($other_rel) }
+ or next;
- if (ref $otherrel_info->{cond} eq 'HASH') {
- @othertestconds = ($otherrel_info->{cond});
- }
- elsif (ref $otherrel_info->{cond} eq 'ARRAY') {
- @othertestconds = @{$otherrel_info->{cond}};
+ if ($rsrc_schema_moniker and try { $roundtrip_rsrc->schema } ) {
+ next unless $rsrc_schema_moniker eq $roundtrip_rsrc->source_name;
}
else {
- next;
+ next unless $self->result_class eq $roundtrip_rsrc->result_class;
}
- foreach my $othercond (@othertestconds) {
- my @other_cond = keys(%$othercond);
- my @other_refkeys = map {/^\w+\.(\w+)$/} @other_cond;
- my @other_keys = map {$othercond->{$_} =~ /^\w+\.(\w+)$/} @other_cond;
- next if (!$self->_compare_relationship_keys(\@refkeys, \@other_keys) ||
- !$self->_compare_relationship_keys(\@other_refkeys, \@keys));
- $ret->{$otherrel} = $otherrel_info;
- }
+ my $other_rel_info = $other_rsrc->relationship_info($other_rel);
+
+ # this can happen when we have a self-referential class
+ next if $other_rel_info eq $rel_info;
+
+ next unless ref $other_rel_info->{cond} eq 'HASH';
+ my $other_stripped_cond = $self->__strip_relcond($other_rel_info->{cond});
+
+ $ret->{$other_rel} = $other_rel_info if (
+ $self->_compare_relationship_keys (
+ [ keys %$stripped_cond ], [ values %$other_stripped_cond ]
+ )
+ and
+ $self->_compare_relationship_keys (
+ [ values %$stripped_cond ], [ keys %$other_stripped_cond ]
+ )
+ );
}
+
return $ret;
}
+ # all this does is removes the foreign/self prefix from a condition
+ sub __strip_relcond {
+ +{
+ map
+ { map { /^ (?:foreign|self) \. (\w+) $/x } ($_, $_[1]{$_}) }
+ keys %{$_[1]}
+ }
+ }
+
sub compare_relationship_keys {
carp 'compare_relationship_keys is a private method, stop calling it';
my $self = shift;
# Returns true if both sets of keynames are the same, false otherwise.
sub _compare_relationship_keys {
- my ($self, $keys1, $keys2) = @_;
-
- # Make sure every keys1 is in keys2
- my $found;
- foreach my $key (@$keys1) {
- $found = 0;
- foreach my $prim (@$keys2) {
- if ($prim eq $key) {
- $found = 1;
- last;
- }
- }
- last unless $found;
- }
+ # my ($self, $keys1, $keys2) = @_;
+ return
+ join ("\x00", sort @{$_[1]})
+ eq
+ join ("\x00", sort @{$_[2]})
+ ;
+ }
- # Make sure every key2 is in key1
- if ($found) {
- foreach my $prim (@$keys2) {
- $found = 0;
- foreach my $key (@$keys1) {
- if ($prim eq $key) {
- $found = 1;
- last;
- }
- }
- last unless $found;
+ # optionally takes either an arrayref of column names, or a hashref of already
+ # retrieved colinfos
+ # returns an arrayref of column names of the shortest unique constraint
+ # (matching some of the input if any), giving preference to the PK
+ sub _identifying_column_set {
+ my ($self, $cols) = @_;
+
+ my %unique = $self->unique_constraints;
+ my $colinfos = ref $cols eq 'HASH' ? $cols : $self->columns_info($cols||());
+
+ # always prefer the PK first, and then shortest constraints first
+ USET:
+ for my $set (delete $unique{primary}, sort { @$a <=> @$b } (values %unique) ) {
+ next unless $set && @$set;
+
+ for (@$set) {
+ next USET unless ($colinfos->{$_} && !$colinfos->{$_}{is_nullable} );
}
+
+ # copy so we can mangle it at will
+ return [ @$set ];
}
- return $found;
+ return undef;
}
# Returns the {from} structure used to express JOIN conditions
$jpath = [@$jpath]; # copy
- if (not defined $join) {
+ if (not defined $join or not length $join) {
return ();
}
elsif (ref $join eq 'ARRAY') {
for my $rel (keys %$join) {
my $rel_info = $self->relationship_info($rel)
- or $self->throw_exception("No such relationship ${rel}");
+ or $self->throw_exception("No such relationship '$rel' on " . $self->source_name);
my $force_left = $parent_force_left;
$force_left ||= lc($rel_info->{attrs}{join_type}||'') eq 'left';
);
my $rel_info = $self->relationship_info($join)
- or $self->throw_exception("No such relationship ${join}");
+ or $self->throw_exception("No such relationship $join on " . $self->source_name);
my $rel_src = $self->related_source($join);
return [ { $as => $rel_src->from,
- -source_handle => $rel_src->handle,
+ -rsrc => $rel_src,
-join_type => $parent_force_left
? 'left'
: $rel_info->{attrs}{join_type}
-is_single => (
$rel_info->{attrs}{accessor}
&&
- List::Util::first { $rel_info->{attrs}{accessor} eq $_ } (qw/single filter/)
+ first { $rel_info->{attrs}{accessor} eq $_ } (qw/single filter/)
),
-alias => $as,
-relation_chain_depth => $seen->{-relation_chain_depth} || 0,
},
- $self->_resolve_condition($rel_info->{cond}, $as, $alias) ];
+ scalar $self->_resolve_condition($rel_info->{cond}, $as, $alias, $join)
+ ];
}
}
$self->_resolve_condition (@_);
}
- # Resolves the passed condition to a concrete query fragment. If given an alias,
- # returns a join condition; if given an object, inverts that object to produce
- # a related conditional from that object.
our $UNRESOLVABLE_CONDITION = \ '1 = 0';
+ # Resolves the passed condition to a concrete query fragment and a flag
+ # indicating whether this is a cross-table condition. Also an optional
+ # list of non-triviail values (notmally conditions) returned as a part
+ # of a joinfree condition hash
sub _resolve_condition {
- my ($self, $cond, $as, $for) = @_;
- if (ref $cond eq 'HASH') {
+ my ($self, $cond, $as, $for, $relname) = @_;
+
+ my $obj_rel = !!blessed $for;
+
+ if (ref $cond eq 'CODE') {
+ my $relalias = $obj_rel ? 'me' : $as;
+
+ my ($crosstable_cond, $joinfree_cond) = $cond->({
+ self_alias => $obj_rel ? $as : $for,
+ foreign_alias => $relalias,
+ self_resultsource => $self,
+ foreign_relname => $relname || ($obj_rel ? $as : $for),
+ self_rowobj => $obj_rel ? $for : undef
+ });
+
+ my $cond_cols;
+ if ($joinfree_cond) {
+
+ # FIXME sanity check until things stabilize, remove at some point
+ $self->throw_exception (
+ "A join-free condition returned for relationship '$relname' without a row-object to chain from"
+ ) unless $obj_rel;
+
+ # FIXME another sanity check
+ if (
+ ref $joinfree_cond ne 'HASH'
+ or
+ first { $_ !~ /^\Q$relalias.\E.+/ } keys %$joinfree_cond
+ ) {
+ $self->throw_exception (
+ "The join-free condition returned for relationship '$relname' must be a hash "
+ .'reference with all keys being valid columns on the related result source'
+ );
+ }
+
+ # normalize
+ for (values %$joinfree_cond) {
+ $_ = $_->{'='} if (
+ ref $_ eq 'HASH'
+ and
+ keys %$_ == 1
+ and
+ exists $_->{'='}
+ );
+ }
+
+ # see which parts of the joinfree cond are conditionals
+ my $relcol_list = { map { $_ => 1 } $self->related_source($relname)->columns };
+
+ for my $c (keys %$joinfree_cond) {
+ my ($colname) = $c =~ /^ (?: \Q$relalias.\E )? (.+)/x;
+
+ unless ($relcol_list->{$colname}) {
+ push @$cond_cols, $colname;
+ next;
+ }
+
+ if (
+ ref $joinfree_cond->{$c}
+ and
+ ref $joinfree_cond->{$c} ne 'SCALAR'
+ and
+ ref $joinfree_cond->{$c} ne 'REF'
+ ) {
+ push @$cond_cols, $colname;
+ next;
+ }
+ }
+
+ return wantarray ? ($joinfree_cond, 0, $cond_cols) : $joinfree_cond;
+ }
+ else {
+ return wantarray ? ($crosstable_cond, 1) : $crosstable_cond;
+ }
+ }
+ elsif (ref $cond eq 'HASH') {
my %ret;
foreach my $k (keys %{$cond}) {
my $v = $cond->{$k};
} elsif (!defined $as) { # undef, i.e. "no reverse object"
$ret{$v} = undef;
} else {
- $ret{"${as}.${k}"} = "${for}.${v}";
+ $ret{"${as}.${k}"} = { -ident => "${for}.${v}" };
}
}
- return \%ret;
- } elsif (ref $cond eq 'ARRAY') {
- return [ map { $self->_resolve_condition($_, $as, $for) } @$cond ];
- } else {
- die("Can't handle condition $cond yet :(");
+
+ return wantarray
+ ? ( \%ret, ($obj_rel || !defined $as || ref $as) ? 0 : 1 )
+ : \%ret
+ ;
+ }
+ elsif (ref $cond eq 'ARRAY') {
+ my (@ret, $crosstable);
+ for (@$cond) {
+ my ($cond, $crosstab) = $self->_resolve_condition($_, $as, $for, $relname);
+ push @ret, $cond;
+ $crosstable ||= $crosstab;
+ }
+ return wantarray ? (\@ret, $crosstable) : \@ret;
+ }
+ else {
+ $self->throw_exception ("Can't handle condition $cond for relationship '$relname' yet :(");
}
}
-
# Accepts one or more relationships for the current source and returns an
# array of column names for each of those relationships. Column names are
# prefixed relative to the current source, in accordance with where they appear
# in the supplied relationships.
-
sub _resolve_prefetch {
- my ($self, $pre, $alias, $alias_map, $order, $collapse, $pref_path) = @_;
+ my ($self, $pre, $alias, $alias_map, $order, $pref_path) = @_;
$pref_path ||= [];
- if (not defined $pre) {
+ if (not defined $pre or not length $pre) {
return ();
}
elsif( ref $pre eq 'ARRAY' ) {
return
- map { $self->_resolve_prefetch( $_, $alias, $alias_map, $order, $collapse, [ @$pref_path ] ) }
+ map { $self->_resolve_prefetch( $_, $alias, $alias_map, $order, [ @$pref_path ] ) }
@$pre;
}
elsif( ref $pre eq 'HASH' ) {
my @ret =
map {
- $self->_resolve_prefetch($_, $alias, $alias_map, $order, $collapse, [ @$pref_path ] ),
+ $self->_resolve_prefetch($_, $alias, $alias_map, $order, [ @$pref_path ] ),
$self->related_source($_)->_resolve_prefetch(
- $pre->{$_}, "${alias}.$_", $alias_map, $order, $collapse, [ @$pref_path, $_] )
+ $pre->{$_}, "${alias}.$_", $alias_map, $order, [ @$pref_path, $_] )
} keys %$pre;
return @ret;
}
my $as = shift @{$p->{-join_aliases}};
my $rel_info = $self->relationship_info( $pre );
- $self->throw_exception( $self->name . " has no such relationship '$pre'" )
+ $self->throw_exception( $self->source_name . " has no such relationship '$pre'" )
unless $rel_info;
my $as_prefix = ($alias =~ /^.*?\.(.+)$/ ? $1.'.' : '');
my $rel_source = $self->related_source($pre);
unless ref($rel_info->{cond}) eq 'HASH';
my $dots = @{[$as_prefix =~ m/\./g]} + 1; # +1 to match the ".${as_prefix}"
- if (my ($fail) = grep { @{[$_ =~ m/\./g]} == $dots }
- keys %{$collapse}) {
- my ($last) = ($fail =~ /([^\.]+)$/);
- carp (
- "Prefetching multiple has_many rels ${last} and ${pre} "
- .(length($as_prefix)
- ? "at the same level (${as_prefix}) "
- : "at top level "
- )
- . 'will explode the number of row objects retrievable via ->next or ->all. '
- . 'Use at your own risk.'
- );
- }
-
#my @col = map { (/^self\.(.+)$/ ? ("${as_prefix}.$1") : ()); }
# values %{$rel_info->{cond}};
- $collapse->{".${as_prefix}${pre}"} = [ $rel_source->_pri_cols ];
- # action at a distance. prepending the '.' allows simpler code
- # in ResultSet->_collapse_result
my @key = map { (/^foreign\.(.+)$/ ? ($1) : ()); }
keys %{$rel_info->{cond}};
- my @ord = (ref($rel_info->{attrs}{order_by}) eq 'ARRAY'
- ? @{$rel_info->{attrs}{order_by}}
-
- : (defined $rel_info->{attrs}{order_by}
- ? ($rel_info->{attrs}{order_by})
- : ()
- ));
- push(@$order, map { "${as}.$_" } (@key, @ord));
++
+ push @$order, map { "${as}.$_" } @key;
+
+ if (my $rel_order = $rel_info->{attrs}{order_by}) {
+ # this is kludgy and incomplete, I am well aware
+ # but the parent method is going away entirely anyway
+ # so sod it
+ my $sql_maker = $self->storage->sql_maker;
+ my ($orig_ql, $orig_qr) = $sql_maker->_quote_chars;
+ my $sep = $sql_maker->name_sep;
+
+ # install our own quoter, so we can catch unqualified stuff
+ local $sql_maker->{quote_char} = ["\x00", "\xFF"];
+
+ my $quoted_prefix = "\x00${as}\xFF";
+
+ for my $chunk ( $sql_maker->_order_by_chunks ($rel_order) ) {
+ my @bind;
+ ($chunk, @bind) = @$chunk if ref $chunk;
+
+ $chunk = "${quoted_prefix}${sep}${chunk}"
+ unless $chunk =~ /\Q$sep/;
+
+ $chunk =~ s/\x00/$orig_ql/g;
+ $chunk =~ s/\xFF/$orig_qr/g;
+ push @$order, \[$chunk, @bind];
+ }
+ }
}
return map { [ "${as}.$_", "${as_prefix}${pre}.$_", ] }
}
}
+# Takes a selection list and generates a collapse-map representing
+# row-object fold-points. Every relationship is assigned a set of unique,
+# non-nullable columns (which may *not even be* from the same resultset)
+# and the collapser will use this information to correctly distinguish
+# data of individual to-be-row-objects.
+sub _resolve_collapse {
+ my ($self, $as, $as_fq_idx, $rel_chain, $parent_info) = @_;
+
+ # for comprehensible error messages put ourselves at the head of the relationship chain
+ $rel_chain ||= [ $self->source_name ];
+
+ # record top-level fully-qualified column index
+ $as_fq_idx ||= { %$as };
+
+ my ($my_cols, $rel_cols);
+ for (keys %$as) {
+ if ($_ =~ /^ ([^\.]+) \. (.+) /x) {
+ $rel_cols->{$1}{$2} = 1;
+ }
+ else {
+ $my_cols->{$_} = {}; # important for ||= below
+ }
+ }
+
+ my $relinfo;
+ # run through relationships, collect metadata, inject non-left fk-bridges from
+ # *INNER-JOINED* children (if any)
+ for my $rel (keys %$rel_cols) {
+ my $rel_src = $self->related_source ($rel);
+ my $inf = $self->relationship_info ($rel);
+
+ $relinfo->{$rel}{is_single} = $inf->{attrs}{accessor} && $inf->{attrs}{accessor} ne 'multi';
+ $relinfo->{$rel}{is_inner} = ( $inf->{attrs}{join_type} || '' ) !~ /^left/i;
+ $relinfo->{$rel}{rsrc} = $rel_src;
+
+ my $cond = $inf->{cond};
+
+ if (
+ ref $cond eq 'HASH'
+ and
+ keys %$cond
+ and
+ ! List::Util::first { $_ !~ /^foreign\./ } (keys %$cond)
+ and
+ ! List::Util::first { $_ !~ /^self\./ } (values %$cond)
+ ) {
+ for my $f (keys %$cond) {
+ my $s = $cond->{$f};
+ $_ =~ s/^ (?: foreign | self ) \.//x for ($f, $s);
+ $relinfo->{$rel}{fk_map}{$s} = $f;
+
+ $my_cols->{$s} ||= { via_fk => "$rel.$f" } # need to know source from *our* pov
+ if ($relinfo->{$rel}{is_inner} && defined $rel_cols->{$rel}{$f}); # only if it is inner and in fact selected of course
+ }
+ }
+ }
+
+ # if the parent is already defined, assume all of its related FKs are selected
+ # (even if they in fact are NOT in the select list). Keep a record of what we
+ # assumed, and if any such phantom-column becomes part of our own collapser,
+ # throw everything assumed-from-parent away and replace with the collapser of
+ # the parent (whatever it may be)
+ my $assumed_from_parent;
+ unless ($parent_info->{underdefined}) {
+ $assumed_from_parent->{columns} = { map
+ # only add to the list if we do not already select said columns
+ { ! exists $my_cols->{$_} ? ( $_ => 1 ) : () }
+ values %{$parent_info->{rel_condition} || {}}
+ };
+
+ $my_cols->{$_} = { via_collapse => $parent_info->{collapse_on} }
+ for keys %{$assumed_from_parent->{columns}};
+ }
+
+ # get colinfo for everything
+ if ($my_cols) {
+ $my_cols->{$_}{colinfo} = (
+ $self->has_column ($_) ? $self->column_info ($_) : undef
+ ) for keys %$my_cols;
+ }
+
+ my $collapse_map;
+
+ # try to resolve based on our columns (plus already inserted FK bridges)
+ if (
+ $my_cols
+ and
+ my $uset = $self->_unique_column_set ($my_cols)
+ ) {
+ # see if the resulting collapser relies on any implied columns,
+ # and fix stuff up if this is the case
+
+ my $parent_collapser_used;
+
+ if (List::Util::first
+ { exists $assumed_from_parent->{columns}{$_} }
+ keys %$uset
+ ) {
+ # remove implied stuff from the uset, we will inject the equivalent collapser a bit below
+ delete @{$uset}{keys %{$assumed_from_parent->{columns}}};
+ $parent_collapser_used = 1;
+ }
+
+ $collapse_map->{-collapse_on} = {
+ %{ $parent_collapser_used ? $parent_info->{collapse_on} : {} },
+ (map
+ {
+ my $fqc = join ('.',
+ @{$rel_chain}[1 .. $#$rel_chain],
+ ( $my_cols->{$_}{via_fk} || $_ ),
+ );
+
+ $fqc => $as_fq_idx->{$fqc};
+ }
+ keys %$uset
+ ),
+ };
+ }
+
+ # don't know how to collapse - keep descending down 1:1 chains - if
+ # a related non-LEFT 1:1 is resolvable - its condition will collapse us
+ # too
+ unless ($collapse_map->{-collapse_on}) {
+ my @candidates;
+
+ for my $rel (keys %$relinfo) {
+ next unless ($relinfo->{$rel}{is_single} && $relinfo->{$rel}{is_inner});
+
+ if ( my $rel_collapse = $relinfo->{$rel}{rsrc}->_resolve_collapse (
+ $rel_cols->{$rel},
+ $as_fq_idx,
+ [ @$rel_chain, $rel ],
+ { underdefined => 1 }
+ )) {
+ push @candidates, $rel_collapse->{-collapse_on};
+ }
+ }
+
+ # get the set with least amount of columns
+ # FIXME - maybe need to implement a data type order as well (i.e. prefer several ints
+ # to a single varchar)
+ if (@candidates) {
+ ($collapse_map->{-collapse_on}) = sort { keys %$a <=> keys %$b } (@candidates);
+ }
+ }
+
+ # Still dont know how to collapse - see if the parent passed us anything
+ # (i.e. reuse collapser over 1:1)
+ unless ($collapse_map->{-collapse_on}) {
+ $collapse_map->{-collapse_on} = $parent_info->{collapse_on}
+ if $parent_info->{collapser_reusable};
+ }
+
+
+ # stop descending into children if we were called by a parent for first-pass
+ # and don't despair if nothing was found (there may be other parallel branches
+ # to dive into)
+ if ($parent_info->{underdefined}) {
+ return $collapse_map->{-collapse_on} ? $collapse_map : undef
+ }
+ # nothing down the chain resolved - can't calculate a collapse-map
+ elsif (! $collapse_map->{-collapse_on}) {
+ $self->throw_exception ( sprintf
+ "Unable to calculate a definitive collapse column set for %s%s: fetch more unique non-nullable columns",
+ $self->source_name,
+ @$rel_chain > 1
+ ? sprintf (' (last member of the %s chain)', join ' -> ', @$rel_chain )
+ : ''
+ ,
+ );
+ }
+
+
+ # If we got that far - we are collapsable - GREAT! Now go down all children
+ # a second time, and fill in the rest
+
+ for my $rel (keys %$relinfo) {
+
+ $collapse_map->{$rel} = $relinfo->{$rel}{rsrc}->_resolve_collapse (
+ { map { $_ => 1 } ( keys %{$rel_cols->{$rel}} ) },
+
+ $as_fq_idx,
+
+ [ @$rel_chain, $rel],
+
+ {
+ collapse_on => { %{$collapse_map->{-collapse_on}} },
+
+ rel_condition => $relinfo->{$rel}{fk_map},
+
+ # if this is a 1:1 our own collapser can be used as a collapse-map
+ # (regardless of left or not)
+ collapser_reusable => $relinfo->{$rel}{is_single},
+ },
+ );
+ }
+
+ return $collapse_map;
+}
+
+sub _unique_column_set {
+ my ($self, $cols) = @_;
+
+ my %unique = $self->unique_constraints;
+
+ # always prefer the PK first, and then shortest constraints first
+ USET:
+ for my $set (delete $unique{primary}, sort { @$a <=> @$b } (values %unique) ) {
+ next unless $set && @$set;
+
+ for (@$set) {
+ next USET unless ($cols->{$_} && $cols->{$_}{colinfo} && !$cols->{$_}{colinfo}{is_nullable} );
+ }
+
+ return { map { $_ => 1 } @$set };
+ }
+
+ return undef;
+}
+
+# Takes an arrayref of {as} dbic column aliases and the collapse and select
+# attributes from the same $rs (the slector requirement is a temporary
+# workaround), and returns a coderef capable of:
+# my $me_pref_clps = $coderef->([$rs->cursor->next])
+# Where the $me_pref_clps arrayref is the future argument to
+# ::ResultSet::_collapse_result.
+#
+# $me_pref_clps->[0] is always returned (even if as an empty hash with no
+# rowdata), however branches of related data in $me_pref_clps->[1] may be
+# pruned short of what was originally requested based on {as}, depending
+# on:
+#
+# * If collapse is requested, a definitive collapse map is calculated for
+# every relationship "fold-point", consisting of a set of values (which
+# may not even be contained in the future 'me' of said relationship
+# (for example a cd.artist_id defines the related inner-joined artist)).
+# Thus a definedness check is carried on all collapse-condition values
+# and if at least one is undef it is assumed that we are dealing with a
+# NULLed right-side of a left-join, so we don't return a related data
+# container at all, which implies no related objects
+#
+# * If we are not collapsing, there is no constraint on having a selector
+# uniquely identifying all possible objects, and the user might have very
+# well requested a column that just *happens* to be all NULLs. What we do
+# in this case is fallback to the old behavior (which is a potential FIXME)
+# by always returning a data container, but only filling it with columns
+# IFF at least one of them is defined. This way we do not get an object
+# with a bunch of has_column_loaded to undef, but at the same time do not
+# further relationships based off this "null" object (e.g. in case the user
+# deliberately skipped link-table values). I am pretty sure there are some
+# tests that codify this behavior, need to find the exact testname.
+#
+# For an example of this coderef in action (and to see its guts) look at
+# t/prefetch/_internals.t
+#
+# This is a huge performance win, as we call the same code for
+# every row returned from the db, thus avoiding repeated method
+# lookups when traversing relationships
+#
+# Also since the coderef is completely stateless (the returned structure is
+# always fresh on every new invocation) this is a very good opportunity for
+# memoization if further speed improvements are needed
+#
+# The way we construct this coderef is somewhat fugly, although I am not
+# sure if the string eval is *that* bad of an idea. The alternative is to
+# have a *very* large number of anon coderefs calling each other in a twisty
+# maze, whereas the current result is a nice, smooth, single-pass function.
+# In any case - the output of this thing is meticulously micro-tested, so
+# any sort of rewrite should be relatively easy
+#
+sub _mk_row_parser {
+ my ($self, $as, $with_collapse, $select) = @_;
+
+ my $as_indexed = { map
+ { $as->[$_] => $_ }
+ ( 0 .. $#$as )
+ };
+
+ # calculate collapse fold-points if needed
+ my $collapse_on = do {
+ # FIXME
+ # only consider real columns (not functions) during collapse resolution
+ # this check shouldn't really be here, as fucktards are not supposed to
+ # alias random crap to existing column names anyway, but still - just in
+ # case (also saves us from select/as mismatches which need fixing as well...)
+
+ my $plain_as = { %$as_indexed };
+ for (keys %$plain_as) {
+ delete $plain_as->{$_} if ref $select->[$plain_as->{$_}];
+ }
+ $self->_resolve_collapse ($plain_as);
+
+ } if $with_collapse;
+
+ my $perl = $self->__visit_as ($as_indexed, $collapse_on);
+ my $cref = eval "sub { $perl }"
+ or die "Oops! _mk_row_parser generated invalid perl:\n$@\n\n$perl\n";
+ return $cref;
+}
+
+{
+ my $visit_as_dumper; # keep our own DD object around so we don't have to fitz with quoting
+
+ sub __visit_as {
+ my ($self, $as, $collapse_on, $known_defined) = @_;
+ $known_defined ||= {};
+
+ # prepopulate the known defined map with our own collapse value positions
+ # the rationale is that if an Artist needs column 0 to be uniquely
+ # identified, and related CDs need columns 0 and 1, by the time we get to
+ # CDs we already know that column 0 is defined (otherwise there would be
+ # no related CDs as there is no Artist in the 1st place). So we use this
+ # index to cut on repetitive defined() checks.
+ $known_defined->{$_}++ for ( values %{$collapse_on->{-collapse_on} || {}} );
+
+ my $my_cols = {};
+ my $rel_cols;
+ for (keys %$as) {
+ if ($_ =~ /^ ([^\.]+) \. (.+) /x) {
+ $rel_cols->{$1}{$2} = $as->{$_};
+ }
+ else {
+ $my_cols->{$_} = $as->{$_};
+ }
+ }
+
+ my @relperl;
+ for my $rel (sort keys %$rel_cols) {
+ my $rel_node = $self->__visit_as($rel_cols->{$rel}, $collapse_on->{$rel}, {%$known_defined} );
+
+ my @null_checks;
+ if ($collapse_on->{$rel}{-collapse_on}) {
+ @null_checks = map
+ { "(! defined '__VALPOS__${_}__')" }
+ ( grep
+ { ! $known_defined->{$_} }
+ ( sort
+ { $a <=> $b }
+ values %{$collapse_on->{$rel}{-collapse_on}}
+ )
+ )
+ ;
+ }
+
+ if (@null_checks) {
+ push @relperl, sprintf ( '(%s) ? () : ( %s => %s )',
+ join (' || ', @null_checks ),
+ $rel,
+ $rel_node,
+ );
+ }
+ else {
+ push @relperl, "$rel => $rel_node";
+ }
+ }
+ my $rels = @relperl
+ ? sprintf ('{ %s }', join (',', @relperl))
+ : 'undef'
+ ;
+
+ my $me = {
+ map { $_ => "__VALPOS__$my_cols->{$_}__" } (keys %$my_cols)
+ };
+
+ my $clps = undef; # funny thing, but this prevents a memory leak, I guess it's Data::Dumper#s fault (mo)
+ $clps = [
+ map { "__VALPOS__${_}__" } ( sort { $a <=> $b } (values %{$collapse_on->{-collapse_on}}) )
+ ] if $collapse_on->{-collapse_on};
+
+ # we actually will be producing functional perl code here,
+ # thus no second-guessing of what these globals might have
+ # been set to. DO NOT CHANGE!
+ $visit_as_dumper ||= do {
+ require Data::Dumper;
+ Data::Dumper->new([])
+ ->Purity (1)
+ ->Pad ('')
+ ->Useqq (0)
+ ->Terse (1)
+ ->Quotekeys (1)
+ ->Deepcopy (1)
+ ->Deparse (0)
+ ->Maxdepth (0)
+ ->Indent (0)
+ };
+ for ($me, $clps) {
+ $_ = $visit_as_dumper->Values ([$_])->Dump;
+ }
+
+ unless ($collapse_on->{-collapse_on}) { # we are not collapsing, insert a definedness check on 'me'
+ $me = sprintf ( '(%s) ? %s : {}',
+ join (' || ', map { "( defined '__VALPOS__${_}__')" } (sort { $a <=> $b } values %$my_cols) ),
+ $me,
+ );
+ }
+
+ my @rv_list = ($me, $rels, $clps);
+ pop @rv_list while ($rv_list[-1] eq 'undef'); # strip trailing undefs
+
+ # change the quoted placeholders to unquoted alias-references
+ $_ =~ s/ \' __VALPOS__(\d+)__ \' /sprintf ('$_[0][%d]', $1)/gex
+ for grep { defined $_ } @rv_list;
+ return sprintf '[%s]', join (',', @rv_list);
+ }
+}
+
=head2 related_source
=over 4
if( !$self->has_relationship( $rel ) ) {
$self->throw_exception("No such relationship '$rel' on " . $self->source_name);
}
- return $self->schema->source($self->relationship_info($rel)->{source});
+
+ # if we are not registered with a schema - just use the prototype
+ # however if we do have a schema - ask for the source by name (and
+ # throw in the process if all fails)
+ if (my $schema = try { $self->schema }) {
+ $schema->source($self->relationship_info($rel)->{source});
+ }
+ else {
+ my $class = $self->relationship_info($rel)->{class};
+ $self->ensure_class_loaded($class);
+ $class->result_source_instance;
+ }
}
=head2 related_class
sub related_class {
my ($self, $rel) = @_;
if( !$self->has_relationship( $rel ) ) {
- $self->throw_exception("No such relationship '$rel'");
+ $self->throw_exception("No such relationship '$rel' on " . $self->source_name);
}
return $self->schema->class($self->relationship_info($rel)->{source});
}
=head2 handle
- Obtain a new handle to this source. Returns an instance of a
- L<DBIx::Class::ResultSourceHandle>.
+ =over 4
+
+ =item Arguments: None
+
+ =item Return value: $source_handle
+
+ =back
+
+ Obtain a new L<result source handle instance|DBIx::Class::ResultSourceHandle>
+ for this source. Used as a serializable pointer to this resultsource, as it is not
+ easy (nor advisable) to serialize CODErefs which may very well be present in e.g.
+ relationship definitions.
=cut
sub handle {
- return DBIx::Class::ResultSourceHandle->new({
- schema => $_[0]->schema,
- source_moniker => $_[0]->source_name
- });
+ return DBIx::Class::ResultSourceHandle->new({
+ source_moniker => $_[0]->source_name,
+
+ # so that a detached thaw can be re-frozen
+ $_[0]->{_detached_thaw}
+ ? ( _detached_source => $_[0] )
+ : ( schema => $_[0]->schema )
+ ,
+ });
+ }
+
+ my $global_phase_destroy;
+ sub DESTROY {
+ return if $global_phase_destroy ||= in_global_destruction;
+
+ ######
+ # !!! ACHTUNG !!!!
+ ######
+ #
+ # Under no circumstances shall $_[0] be stored anywhere else (like copied to
+ # a lexical variable, or shifted, or anything else). Doing so will mess up
+ # the refcount of this particular result source, and will allow the $schema
+ # we are trying to save to reattach back to the source we are destroying.
+ # The relevant code checking refcounts is in ::Schema::DESTROY()
+
+ # if we are not a schema instance holder - we don't matter
+ return if(
+ ! ref $_[0]->{schema}
+ or
+ isweak $_[0]->{schema}
+ );
+
+ # weaken our schema hold forcing the schema to find somewhere else to live
+ # during global destruction (if we have not yet bailed out) this will throw
+ # which will serve as a signal to not try doing anything else
+ # however beware - on older perls the exception seems randomly untrappable
+ # due to some weird race condition during thread joining :(((
+ local $@;
+ eval {
+ weaken $_[0]->{schema};
+
+ # if schema is still there reintroduce ourselves with strong refs back to us
+ if ($_[0]->{schema}) {
+ my $srcregs = $_[0]->{schema}->source_registrations;
+ for (keys %$srcregs) {
+ next unless $srcregs->{$_};
+ $srcregs->{$_} = $_[0] if $srcregs->{$_} == $_[0];
+ }
+ }
+
+ 1;
+ } or do {
+ $global_phase_destroy = 1;
+ };
+
+ return;
+ }
+
+ sub STORABLE_freeze { Storable::nfreeze($_[0]->handle) }
+
+ sub STORABLE_thaw {
+ my ($self, $cloning, $ice) = @_;
+ %$self = %{ (Storable::thaw($ice))->resolve };
}
=head2 throw_exception
sub throw_exception {
my $self = shift;
- if (defined $self->schema) {
- $self->schema->throw_exception(@_);
- }
- else {
- DBIx::Class::Exception->throw(@_);
- }
+ $self->{schema}
+ ? $self->{schema}->throw_exception(@_)
+ : DBIx::Class::Exception->throw(@_)
+ ;
}
=head2 source_info
use base qw/DBIx::Class/;
use DBIx::Class::Exception;
- use Scalar::Util ();
+ use Scalar::Util 'blessed';
+ use List::Util 'first';
+ use Try::Tiny;
###
### Internal method
: sub () { 0 };
}
- __PACKAGE__->mk_group_accessors('simple' => qw/_source_handle/);
+ use namespace::clean;
=head1 NAME
L<DBIx::Class::ResultSet> object.
When calling it directly, you will not get a complete, usable row
- object until you pass or set the C<source_handle> attribute, to a
+ object until you pass or set the C<result_source> attribute, to a
L<DBIx::Class::ResultSource> instance that is attached to a
L<DBIx::Class::Schema> with a valid connection.
C<$attrs> is a hashref of column name, value data. It can also contain
- some other attributes such as the C<source_handle>.
+ some other attributes such as the C<result_source>.
Passing an object, or an arrayref of objects as a value will call
L<DBIx::Class::Relationship::Base/set_from_related> for you. When
sub __new_related_find_or_new_helper {
my ($self, $relname, $data) = @_;
- if ($self->__their_pk_needs_us($relname, $data)) {
+
+ my $rsrc = $self->result_source;
+
+ # create a mock-object so all new/set_column component overrides will run:
+ my $rel_rs = $rsrc->related_source($relname)->resultset;
+ my $new_rel_obj = $rel_rs->new_result($data);
+ my $proc_data = { $new_rel_obj->get_columns };
+
+ if ($self->__their_pk_needs_us($relname)) {
MULTICREATE_DEBUG and warn "MC $self constructing $relname via new_result";
- return $self->result_source
- ->related_source($relname)
- ->resultset
- ->new_result($data);
+ return $new_rel_obj;
}
- if ($self->result_source->_pk_depends_on($relname, $data)) {
- MULTICREATE_DEBUG and warn "MC $self constructing $relname via find_or_new";
- return $self->result_source
- ->related_source($relname)
- ->resultset
- ->find_or_new($data);
+ elsif ($rsrc->_pk_depends_on($relname, $proc_data )) {
+ if (! keys %$proc_data) {
+ # there is nothing to search for - blind create
+ MULTICREATE_DEBUG and warn "MC $self constructing default-insert $relname";
+ }
+ else {
+ MULTICREATE_DEBUG and warn "MC $self constructing $relname via find_or_new";
+ # this is not *really* find or new, as we don't want to double-new the
+ # data (thus potentially double encoding or whatever)
+ my $exists = $rel_rs->find ($proc_data);
+ return $exists if $exists;
+ }
+ return $new_rel_obj;
+ }
+ else {
+ my $us = $rsrc->source_name;
+ $self->throw_exception (
+ "Unable to determine relationship '$relname' direction from '$us', "
+ . "possibly due to a missing reverse-relationship on '$relname' to '$us'."
+ );
}
}
sub __their_pk_needs_us { # this should maybe be in resultsource.
- my ($self, $relname, $data) = @_;
+ my ($self, $relname) = @_;
my $source = $self->result_source;
my $reverse = $source->reverse_relationship_info($relname);
my $rel_source = $source->related_source($relname);
my ($class, $attrs) = @_;
$class = ref $class if ref $class;
- my $new = {
- _column_data => {},
- };
- bless $new, $class;
-
- if (my $handle = delete $attrs->{-source_handle}) {
- $new->_source_handle($handle);
- }
-
- my $source;
- if ($source = delete $attrs->{-result_source}) {
- $new->result_source($source);
- }
-
- if (my $related = delete $attrs->{-cols_from_relations}) {
- @{$new->{_ignore_at_insert}={}}{@$related} = ();
- }
+ my $new = bless { _column_data => {} }, $class;
if ($attrs) {
$new->throw_exception("attrs must be a hashref")
unless ref($attrs) eq 'HASH';
+ my $source = delete $attrs->{-result_source};
+ if ( my $h = delete $attrs->{-source_handle} ) {
+ $source ||= $h->resolve;
+ }
+
+ $new->result_source($source) if $source;
+
+ if (my $col_from_rel = delete $attrs->{-cols_from_relations}) {
+ @{$new->{_ignore_at_insert}={}}{@$col_from_rel} = ();
+ }
+
my ($related,$inflated);
foreach my $key (keys %$attrs) {
my $acc_type = $info->{attrs}{accessor} || '';
if ($acc_type eq 'single') {
my $rel_obj = delete $attrs->{$key};
- if(!Scalar::Util::blessed($rel_obj)) {
+ if(!blessed $rel_obj) {
$rel_obj = $new->__new_related_find_or_new_helper($key, $rel_obj);
}
my @objects;
foreach my $idx (0 .. $#$others) {
my $rel_obj = $others->[$idx];
- if(!Scalar::Util::blessed($rel_obj)) {
+ if(!blessed $rel_obj) {
$rel_obj = $new->__new_related_find_or_new_helper($key, $rel_obj);
}
elsif ($acc_type eq 'filter') {
## 'filter' should disappear and get merged in with 'single' above!
my $rel_obj = delete $attrs->{$key};
- if(!Scalar::Util::blessed($rel_obj)) {
+ if(!blessed $rel_obj) {
$rel_obj = $new->__new_related_find_or_new_helper($key, $rel_obj);
}
if ($rel_obj->in_storage) {
=back
Inserts an object previously created by L</new> into the database if
- it isn't already in there. Returns the object itself. Requires the
- object's result source to be set, or the class to have a
- result_source_instance method. To insert an entirely new row into
- the database, use C<create> (see L<DBIx::Class::ResultSet/create>).
+ it isn't already in there. Returns the object itself. To insert an
+ entirely new row into the database, use L<DBIx::Class::ResultSet/create>.
To fetch an uninserted row object, call
L<new|DBIx::Class::ResultSet/new> on a resultset.
my ($self) = @_;
return $self if $self->in_storage;
my $source = $self->result_source;
$self->throw_exception("No result_source set on this object; can't insert")
unless $source;
+ my $storage = $source->storage;
+
my $rollback_guard;
# Check if we stored uninserted relobjs here in new()
my $rel_obj = $related_stuff{$relname};
if (! $self->{_rel_in_storage}{$relname}) {
- next unless (Scalar::Util::blessed($rel_obj)
- && $rel_obj->isa('DBIx::Class::Row'));
+ next unless (blessed $rel_obj && $rel_obj->isa('DBIx::Class::Row'));
next unless $source->_pk_depends_on(
$relname, { $rel_obj->get_columns }
);
# The guard will save us if we blow out of this scope via die
- $rollback_guard ||= $source->storage->txn_scope_guard;
+ $rollback_guard ||= $storage->txn_scope_guard;
MULTICREATE_DEBUG and warn "MC $self pre-reconstructing $relname $rel_obj\n";
- my $them = { %{$rel_obj->{_relationship_data} || {} }, $rel_obj->get_inflated_columns };
- my $re = $self->result_source
- ->related_source($relname)
- ->resultset
- ->find_or_create($them);
+ my $them = { %{$rel_obj->{_relationship_data} || {} }, $rel_obj->get_columns };
+ my $existing;
+
+ # if there are no keys - nothing to search for
+ if (keys %$them and $existing = $self->result_source
+ ->related_source($relname)
+ ->resultset
+ ->find($them)
+ ) {
+ %{$rel_obj} = %{$existing};
+ }
+ else {
+ $rel_obj->insert;
+ }
- %{$rel_obj} = %{$re};
$self->{_rel_in_storage}{$relname} = 1;
}
# start a transaction here if not started yet and there is more stuff
# to insert after us
if (keys %related_stuff) {
- $rollback_guard ||= $source->storage->txn_scope_guard
+ $rollback_guard ||= $storage->txn_scope_guard
}
MULTICREATE_DEBUG and do {
no warnings 'uninitialized';
warn "MC $self inserting (".join(', ', $self->get_columns).")\n";
};
- my $updated_cols = $source->storage->insert($source, { $self->get_columns });
- foreach my $col (keys %$updated_cols) {
- $self->store_column($col, $updated_cols->{$col});
- }
- ## PK::Auto
- my @auto_pri = grep {
- (not defined $self->get_column($_))
- ||
- (ref($self->get_column($_)) eq 'SCALAR')
- } $self->primary_columns;
-
- if (@auto_pri) {
- MULTICREATE_DEBUG and warn "MC $self fetching missing PKs ".join(', ', @auto_pri)."\n";
- my $storage = $self->result_source->storage;
- $self->throw_exception( "Missing primary key but Storage doesn't support last_insert_id" )
- unless $storage->can('last_insert_id');
- my @ids = $storage->last_insert_id($self->result_source,@auto_pri);
- $self->throw_exception( "Can't get last insert id" )
- unless (@ids == @auto_pri);
- $self->store_column($auto_pri[$_] => $ids[$_]) for 0 .. $#ids;
+ # perform the insert - the storage will return everything it is asked to
+ # (autoinc primary columns and any retrieve_on_insert columns)
+ my %current_rowdata = $self->get_columns;
+ my $returned_cols = $storage->insert(
+ $source,
+ { %current_rowdata }, # what to insert, copy because the storage *will* change it
+ );
+
+ for (keys %$returned_cols) {
+ $self->store_column($_, $returned_cols->{$_})
+ # this ensures we fire store_column only once
+ # (some asshats like overriding it)
+ if (
+ (!exists $current_rowdata{$_})
+ or
+ (defined $current_rowdata{$_} xor defined $returned_cols->{$_})
+ or
+ (defined $current_rowdata{$_} and $current_rowdata{$_} ne $returned_cols->{$_})
+ );
}
+ delete $self->{_column_data_in_storage};
+ $self->in_storage(1);
$self->{_dirty_columns} = {};
$self->{related_resultsets} = {};
: $related_stuff{$relname}
;
- if (@cands
- && Scalar::Util::blessed($cands[0])
- && $cands[0]->isa('DBIx::Class::Row')
+ if (@cands && blessed $cands[0] && $cands[0]->isa('DBIx::Class::Row')
) {
my $reverse = $source->reverse_relationship_info($relname);
foreach my $obj (@cands) {
$obj->set_from_related($_, $self) for keys %$reverse;
- my $them = { %{$obj->{_relationship_data} || {} }, $obj->get_inflated_columns };
- if ($self->__their_pk_needs_us($relname, $them)) {
+ if ($self->__their_pk_needs_us($relname)) {
if (exists $self->{_ignore_at_insert}{$relname}) {
MULTICREATE_DEBUG and warn "MC $self skipping post-insert on $relname";
- } else {
- MULTICREATE_DEBUG and warn "MC $self re-creating $relname $obj";
- my $re = $self->result_source
- ->related_source($relname)
- ->resultset
- ->create($them);
- %{$obj} = %{$re};
- MULTICREATE_DEBUG and warn "MC $self new $relname $obj";
+ }
+ else {
+ MULTICREATE_DEBUG and warn "MC $self inserting $relname $obj";
+ $obj->insert;
}
} else {
MULTICREATE_DEBUG and warn "MC $self post-inserting $obj";
}
}
- $self->in_storage(1);
- delete $self->{_orig_ident};
delete $self->{_ignore_at_insert};
+
$rollback_guard->commit if $rollback_guard;
return $self;
according to L</in_storage>.
This method issues an SQL UPDATE query to commit any changes to the
- object to the database if required.
+ object to the database if required (see L</get_dirty_columns>).
+ It throws an exception if a proper WHERE clause uniquely identifying
+ the database row can not be constructed (see
+ L<significance of primary keys|DBIx::Class::Manual::Intro/The Significance and Importance of Primary Keys>
+ for more details).
- Also takes an optional hashref of C<< column_name => value> >> pairs
+ Also takes an optional hashref of C<< column_name => value >> pairs
to update on the object first. Be aware that the hashref will be
passed to C<set_inflated_columns>, which might edit it in place, so
don't rely on it being the same after a call to C<update>. If you
If the values passed or any of the column values set on the object
contain scalar references, e.g.:
- $row->last_modified(\'NOW()');
+ $row->last_modified(\'NOW()')->update();
# OR
$row->update({ last_modified => \'NOW()' });
sub update {
my ($self, $upd) = @_;
- $self->throw_exception( "Not in database" ) unless $self->in_storage;
- my $ident_cond = $self->ident_condition;
- $self->throw_exception("Cannot safely update a row in a PK-less table")
- if ! keys %$ident_cond;
$self->set_inflated_columns($upd) if $upd;
- my %to_update = $self->get_dirty_columns;
- return $self unless keys %to_update;
+
+ my %to_update = $self->get_dirty_columns
+ or return $self;
+
+ $self->throw_exception( "Not in database" ) unless $self->in_storage;
+
my $rows = $self->result_source->storage->update(
- $self->result_source, \%to_update,
- $self->{_orig_ident} || $ident_cond
- );
+ $self->result_source, \%to_update, $self->_storage_ident_condition
+ );
if ($rows == 0) {
$self->throw_exception( "Can't update ${self}: row not found" );
} elsif ($rows > 1) {
}
$self->{_dirty_columns} = {};
$self->{related_resultsets} = {};
- undef $self->{_orig_ident};
+ delete $self->{_column_data_in_storage};
return $self;
}
=back
Throws an exception if the object is not in the database according to
- L</in_storage>. Runs an SQL DELETE statement using the primary key
- values to locate the row.
+ L</in_storage>. Also throws an exception if a proper WHERE clause
+ uniquely identifying the database row can not be constructed (see
+ L<significance of primary keys|DBIx::Class::Manual::Intro/The Significance and Importance of Primary Keys>
+ for more details).
The object is still perfectly usable, but L</in_storage> will
now return 0 and the object must be reinserted using L</insert>
my $self = shift;
if (ref $self) {
$self->throw_exception( "Not in database" ) unless $self->in_storage;
- my $ident_cond = $self->{_orig_ident} || $self->ident_condition;
- $self->throw_exception("Cannot safely delete a row in a PK-less table")
- if ! keys %$ident_cond;
- foreach my $column (keys %$ident_cond) {
- $self->throw_exception("Can't delete the object unless it has loaded the primary keys")
- unless exists $self->{_column_data}{$column};
- }
+
$self->result_source->storage->delete(
- $self->result_source, $ident_cond);
+ $self->result_source, $self->_storage_ident_condition
+ );
+
+ delete $self->{_column_data_in_storage};
$self->in_storage(undef);
- } else {
- $self->throw_exception("Can't do class delete without a ResultSource instance")
- unless $self->can('result_source_instance');
+ }
+ else {
+ my $rsrc = try { $self->result_source_instance }
+ or $self->throw_exception("Can't do class delete without a ResultSource instance");
+
my $attrs = @_ > 1 && ref $_[$#_] eq 'HASH' ? { %{pop(@_)} } : {};
my $query = ref $_[0] eq 'HASH' ? $_[0] : {@_};
- $self->result_source_instance->resultset->search(@_)->delete;
+ $rsrc->resultset->search(@_)->delete;
}
return $self;
}
=back
Throws an exception if the column name given doesn't exist according
- to L</has_column>.
+ to L<has_column|DBIx::Class::ResultSource/has_column>.
Returns a raw column value from the row object, if it has already
been fetched from the database or set by an accessor.
sub get_inflated_columns {
my $self = shift;
- my %loaded_colinfo = (map
- { $_ => $self->column_info($_) }
- (grep { $self->has_column_loaded($_) } $self->columns)
- );
+ my $loaded_colinfo = $self->columns_info ([
+ grep { $self->has_column_loaded($_) } $self->columns
+ ]);
my %inflated;
- for my $col (keys %loaded_colinfo) {
- if (exists $loaded_colinfo{$col}{accessor}) {
- my $acc = $loaded_colinfo{$col}{accessor};
+ for my $col (keys %$loaded_colinfo) {
+ if (exists $loaded_colinfo->{$col}{accessor}) {
+ my $acc = $loaded_colinfo->{$col}{accessor};
$inflated{$col} = $self->$acc if defined $acc;
}
else {
}
# return all loaded columns with the inflations overlayed on top
- return ($self->get_columns, %inflated);
+ return %{ { $self->get_columns, %inflated } };
}
sub _is_column_numeric {
my $colinfo = $self->column_info ($column);
# cache for speed (the object may *not* have a resultsource instance)
- if (not defined $colinfo->{is_numeric} && $self->_source_handle) {
+ if (
+ ! defined $colinfo->{is_numeric}
+ and
+ my $storage = try { $self->result_source->schema->storage }
+ ) {
$colinfo->{is_numeric} =
- $self->result_source->schema->storage->is_datatype_numeric ($colinfo->{data_type})
+ $storage->is_datatype_numeric ($colinfo->{data_type})
? 1
: 0
;
sub set_column {
my ($self, $column, $new_value) = @_;
- $self->{_orig_ident} ||= $self->ident_condition;
- my $old_value = $self->get_column($column);
+ my $had_value = $self->has_column_loaded($column);
+ my ($old_value, $in_storage) = ($self->get_column($column), $self->in_storage)
+ if $had_value;
$new_value = $self->store_column($column, $new_value);
- my $dirty;
- if (!$self->in_storage) { # no point tracking dirtyness on uninserted data
- $dirty = 1;
- }
- elsif (defined $old_value xor defined $new_value) {
- $dirty = 1;
- }
- elsif (not defined $old_value) { # both undef
- $dirty = 0;
- }
- elsif ($old_value eq $new_value) {
- $dirty = 0;
- }
- else { # do a numeric comparison if datatype allows it
- if ($self->_is_column_numeric($column)) {
- $dirty = $old_value != $new_value;
+ my $dirty =
+ $self->{_dirty_columns}{$column}
+ ||
+ $in_storage # no point tracking dirtyness on uninserted data
+ ? ! $self->_eq_column_values ($column, $old_value, $new_value)
+ : 1
+ ;
+
+ if ($dirty) {
+ # FIXME sadly the update code just checks for keys, not for their value
+ $self->{_dirty_columns}{$column} = 1;
+
+ # Clear out the relation/inflation cache related to this column
+ #
+ # FIXME - this is a quick *largely incorrect* hack, pending a more
+ # serious rework during the merge of single and filter rels
+ my $rels = $self->result_source->{_relationships};
+ for my $rel (keys %$rels) {
+
+ my $acc = $rels->{$rel}{attrs}{accessor} || '';
+
+ if ( $acc eq 'single' and $rels->{$rel}{attrs}{fk_columns}{$column} ) {
+ delete $self->{related_resultsets}{$rel};
+ delete $self->{_relationship_data}{$rel};
+ #delete $self->{_inflated_column}{$rel};
+ }
+ elsif ( $acc eq 'filter' and $rel eq $column) {
+ delete $self->{related_resultsets}{$rel};
+ #delete $self->{_relationship_data}{$rel};
+ delete $self->{_inflated_column}{$rel};
+ }
}
- else {
- $dirty = 1;
+
+ if (
+ # value change from something (even if NULL)
+ $had_value
+ and
+ # no storage - no storage-value
+ $in_storage
+ and
+ # no value already stored (multiple changes before commit to storage)
+ ! exists $self->{_column_data_in_storage}{$column}
+ and
+ $self->_track_storage_value($column)
+ ) {
+ $self->{_column_data_in_storage}{$column} = $old_value;
}
}
- # sadly the update code just checks for keys, not for their value
- $self->{_dirty_columns}{$column} = 1 if $dirty;
+ return $new_value;
+ }
- # XXX clear out the relation cache for this column
- delete $self->{related_resultsets}{$column};
+ sub _eq_column_values {
+ my ($self, $col, $old, $new) = @_;
- return $new_value;
+ if (defined $old xor defined $new) {
+ return 0;
+ }
+ elsif (not defined $old) { # both undef
+ return 1;
+ }
+ elsif ($old eq $new) {
+ return 1;
+ }
+ elsif ($self->_is_column_numeric($col)) { # do a numeric comparison if datatype allows it
+ return $old == $new;
+ }
+ else {
+ return 0;
+ }
+ }
+
+ # returns a boolean indicating if the passed column should have its original
+ # value tracked between column changes and commitment to storage
+ sub _track_storage_value {
+ my ($self, $col) = @_;
+ return defined first { $col eq $_ } ($self->primary_columns);
}
=head2 set_columns
my ($self, $changes) = @_;
$changes ||= {};
my $col_data = { %{$self->{_column_data}} };
+
+ my $colinfo = $self->columns_info([ keys %$col_data ]);
foreach my $col (keys %$col_data) {
delete $col_data->{$col}
- if $self->result_source->column_info($col)->{is_auto_increment};
+ if $colinfo->{$col}{is_auto_increment};
}
my $new = { _column_data => $col_data };
next unless $rel_info->{attrs}{cascade_copy};
my $resolved = $self->result_source->_resolve_condition(
- $rel_info->{cond}, $rel, $new
+ $rel_info->{cond}, $rel, $new, $rel
);
my $copied = $rels_copied->{ $rel_info->{source} } ||= {};
sub inflate_result {
my ($class, $source, $me, $prefetch) = @_;
- my ($source_handle) = $source;
+ $source = $source->resolve
+ if $source->isa('DBIx::Class::ResultSourceHandle');
- if ($source->isa('DBIx::Class::ResultSourceHandle')) {
- $source = $source_handle->resolve
- }
- else {
- $source_handle = $source->handle
- }
-
- my $new = {
- _source_handle => $source_handle,
- _column_data => $me,
- };
- bless $new, (ref $class || $class);
+ my $new = bless
+ { _column_data => $me, _result_source => $source },
+ ref $class || $class
+ ;
foreach my $pre (keys %{$prefetch||{}}) {
- my $pre_source = $source->related_source($pre)
- or $class->throw_exception("Can't prefetch non-existent relationship ${pre}");
-
- my $accessor = $source->relationship_info($pre)->{attrs}{accessor}
- or $class->throw_exception("No accessor for prefetched $pre");
-
- my @pre_vals;
+ my (@pre_vals, $is_multi);
if (ref $prefetch->{$pre}[0] eq 'ARRAY') {
+ $is_multi = 1;
@pre_vals = @{$prefetch->{$pre}};
}
else {
@pre_vals = $prefetch->{$pre};
}
+ my $pre_source = try {
+ $source->related_source($pre)
+ }
+ catch {
+ $class->throw_exception(sprintf
+
+ "Can't inflate manual prefetch into non-existent relationship '%s' from '%s', "
+ . "check the inflation specification (columns/as) ending in '%s.%s'.",
+
+ $pre,
+ $source->source_name,
+ $pre,
+ (keys %{$pre_vals[0][0]})[0] || 'something.something...',
+ );
+ };
+
+ my $accessor = $source->relationship_info($pre)->{attrs}{accessor}
+ or $class->throw_exception("No accessor type declared for prefetched $pre");
+
+ if (! $is_multi and $accessor eq 'multi') {
+ $class->throw_exception("Manual prefetch (via select/columns) not supported with accessor 'multi'");
+ }
+
my @pre_objects;
for my $me_pref (@pre_vals) {
-
- # FIXME - this should not be necessary
- # the collapser currently *could* return bogus elements with all
- # columns set to undef
- my $has_def;
- for (values %{$me_pref->[0]}) {
- if (defined $_) {
- $has_def++;
- last;
- }
- }
- next unless $has_def;
-
push @pre_objects, $pre_source->result_class->inflate_result(
$pre_source, @$me_pref
);
=over
- =item Arguments: none
+ =item Arguments: $result_source_instance
=item Returns: a ResultSource instance
=cut
sub result_source {
- my $self = shift;
-
- if (@_) {
- $self->_source_handle($_[0]->handle);
- } else {
- $self->_source_handle->resolve;
- }
+ $_[0]->throw_exception( 'result_source can be called on instances only' )
+ unless ref $_[0];
+
+ @_ > 1
+ ? $_[0]->{_result_source} = $_[1]
+
+ # note this is a || not a ||=, the difference is important
+ : $_[0]->{_result_source} || do {
+ my $class = ref $_[0];
+ $_[0]->can('result_source_instance')
+ ? $_[0]->result_source_instance
+ : $_[0]->throw_exception(
+ "No result source instance registered for $class, did you forget to call $class->table(...) ?"
+ )
+ }
+ ;
}
=head2 register_column
=back
Fetches a fresh copy of the Row object from the database and returns it.
-
- If passed the \%attrs argument, will first apply these attributes to
+ Throws an exception if a proper WHERE clause identifying the database row
+ can not be constructed (i.e. if the original object does not contain its
+ entire
+ L<primary key|DBIx::Class::Manual::Intro/The Significance and Importance of Primary Keys>
+ ). If passed the \%attrs argument, will first apply these attributes to
the resultset used to find the row.
This copy can then be used to compare to an existing row object, to
$resultset = $resultset->search(undef, $attrs);
}
- return $resultset->find($self->{_orig_ident} || $self->ident_condition);
+ return $resultset->find($self->_storage_ident_condition);
}
- =head2 discard_changes ($attrs)
+ =head2 discard_changes ($attrs?)
+
+ $row->discard_changes
+
+ =over
+
+ =item Arguments: none or $attrs
+
+ =item Returns: self (updates object in-place)
+
+ =back
Re-selects the row from the database, losing any changes that had
- been made.
+ been made. Throws an exception if a proper C<WHERE> clause identifying
+ the database row can not be constructed (i.e. if the original object
+ does not contain its entire
+ L<primary key|DBIx::Class::Manual::Intro/The Significance and Importance of Primary Keys>).
This method can also be used to refresh from storage, retrieving any
changes made since the row was last read from storage.
- $attrs is expected to be a hashref of attributes suitable for passing as the
- second argument to $resultset->search($cond, $attrs);
+ $attrs, if supplied, is expected to be a hashref of attributes suitable for passing as the
+ second argument to C<< $resultset->search($cond, $attrs) >>;
+
+ Note: If you are using L<DBIx::Class::Storage::DBI::Replicated> as your
+ storage, please kept in mind that if you L</discard_changes> on a row that you
+ just updated or created, you should wrap the entire bit inside a transaction.
+ Otherwise you run the risk that you insert or update to the master database
+ but read from a replicant database that has not yet been updated from the
+ master. This will result in unexpected results.
=cut
sub discard_changes {
my ($self, $attrs) = @_;
- delete $self->{_dirty_columns};
return unless $self->in_storage; # Don't reload if we aren't real!
# add a replication default to read from the master only
}
}
-
=head2 throw_exception
See L<DBIx::Class::Schema/throw_exception>.
sub throw_exception {
my $self=shift;
- if (ref $self && ref $self->result_source && $self->result_source->schema) {
- $self->result_source->schema->throw_exception(@_)
+ if (ref $self && ref $self->result_source ) {
+ $self->result_source->throw_exception(@_)
}
else {
DBIx::Class::Exception->throw(@_);
Returns the primary key(s) for a row. Can't be called as a class method.
Actually implemented in L<DBIx::Class::PK>
- =head2 discard_changes
-
- $row->discard_changes
-
- =over
-
- =item Arguments: none
-
- =item Returns: nothing (updates object in-place)
-
- =back
-
- Retrieves and sets the row object data from the database, losing any
- local changes made.
-
- This method can also be used to refresh from storage, retrieving any
- changes made since the row was last read from storage. Actually
- implemented in L<DBIx::Class::PK>
-
- Note: If you are using L<DBIx::Class::Storage::DBI::Replicated> as your
- storage, please kept in mind that if you L</discard_changes> on a row that you
- just updated or created, you should wrap the entire bit inside a transaction.
- Otherwise you run the risk that you insert or update to the master database
- but read from a replicant database that has not yet been updated from the
- master. This will result in unexpected results.
-
- =cut
-
- 1;
-
=head1 AUTHORS
Matt S. Trout <mst@shadowcatsystems.co.uk>
You may distribute this code under the same terms as Perl itself.
=cut
+
+ 1;
use base qw/DBIx::Class::Storage::DBIHacks DBIx::Class::Storage/;
use mro 'c3';
- use Carp::Clan qw/^DBIx::Class/;
- use DBI;
- use DBIx::Class::Storage::DBI::Cursor;
- use DBIx::Class::Storage::Statistics;
- use Scalar::Util();
- use List::Util();
- use Data::Dumper::Concise();
- use Sub::Name ();
-
- __PACKAGE__->mk_group_accessors('simple' =>
- qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts _conn_pid
- _conn_tid transaction_depth _dbh_autocommit _driver_determined savepoints/
- );
+ use DBIx::Class::Carp;
+ use DBIx::Class::Exception;
+ use Scalar::Util qw/refaddr weaken reftype blessed/;
+ use List::Util qw/first/;
+ use Sub::Name 'subname';
+ use Context::Preserve 'preserve_context';
+ use Try::Tiny;
+ use overload ();
+ use Data::Compare (); # no imports!!! guard against insane architecture
+ use DBI::Const::GetInfoType (); # no import of retarded global hash
+ use namespace::clean;
+
+ # default cursor class, overridable in connect_info attributes
+ __PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor');
+
+ __PACKAGE__->mk_group_accessors('inherited' => qw/
+ sql_limit_dialect sql_quote_char sql_name_sep
+ /);
+
+ __PACKAGE__->mk_group_accessors('component_class' => qw/sql_maker_class datetime_parser_type/);
+
+ __PACKAGE__->sql_maker_class('DBIx::Class::SQLMaker');
+ __PACKAGE__->datetime_parser_type('DateTime::Format::MySQL'); # historic default
+
+ __PACKAGE__->sql_name_sep('.');
+
+ __PACKAGE__->mk_group_accessors('simple' => qw/
+ _connect_info _dbi_connect_info _dbic_connect_attributes _driver_determined
+ _dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts _dbh_autocommit
+ _perform_autoinc_retrieval _autoinc_supplied_for_op
+ /);
# the values for these accessors are picked out (and deleted) from
# the attribute hashref passed to connect_info
__PACKAGE__->mk_group_accessors('simple' => @storage_options);
- # default cursor class, overridable in connect_info attributes
- __PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor');
+ # capability definitions, using a 2-tiered accessor system
+ # The rationale is:
+ #
+ # A driver/user may define _use_X, which blindly without any checks says:
+ # "(do not) use this capability", (use_dbms_capability is an "inherited"
+ # type accessor)
+ #
+ # If _use_X is undef, _supports_X is then queried. This is a "simple" style
+ # accessor, which in turn calls _determine_supports_X, and stores the return
+ # in a special slot on the storage object, which is wiped every time a $dbh
+ # reconnection takes place (it is not guaranteed that upon reconnection we
+ # will get the same rdbms version). _determine_supports_X does not need to
+ # exist on a driver, as we ->can for it before calling.
- __PACKAGE__->mk_group_accessors('inherited' => qw/sql_maker_class/);
- __PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks');
+ my @capabilities = (qw/
+ insert_returning
+ insert_returning_bound
+ multicolumn_in
+
+ placeholders
+ typeless_placeholders
+
+ join_optimizer
+ /);
+ __PACKAGE__->mk_group_accessors( dbms_capability => map { "_supports_$_" } @capabilities );
+ __PACKAGE__->mk_group_accessors( use_dbms_capability => map { "_use_$_" } (@capabilities ) );
+
+ # on by default, not strictly a capability (pending rewrite)
+ __PACKAGE__->_use_join_optimizer (1);
+ sub _determine_supports_join_optimizer { 1 };
# Each of these methods need _determine_driver called before itself
# in order to function reliably. This is a purely DRY optimization
+ #
+ # get_(use)_dbms_capability need to be called on the correct Storage
+ # class, as _use_X may be hardcoded class-wide, and _supports_X calls
+ # _determine_supports_X which obv. needs a correct driver as well
my @rdbms_specific_methods = qw/
deployment_statements
sqlt_type
+ sql_maker
build_datetime_parser
datetime_parser_type
+ txn_begin
insert
insert_bulk
update
delete
select
select_single
+ with_deferred_fk_checks
+
+ get_use_dbms_capability
+ get_dbms_capability
+
+ _server_info
+ _get_server_version
/;
for my $meth (@rdbms_specific_methods) {
my $orig = __PACKAGE__->can ($meth)
- or next;
+ or die "$meth is not a ::Storage::DBI method!";
no strict qw/refs/;
no warnings qw/redefine/;
- *{__PACKAGE__ ."::$meth"} = Sub::Name::subname $meth => sub {
- if (not $_[0]->_driver_determined) {
+ *{__PACKAGE__ ."::$meth"} = subname $meth => sub {
+ if (
+ # only fire when invoked on an instance, a valid class-based invocation
+ # would e.g. be setting a default for an inherited accessor
+ ref $_[0]
+ and
+ ! $_[0]->_driver_determined
+ and
+ ! $_[0]->{_in_determine_driver}
+ ) {
$_[0]->_determine_driver;
- goto $_[0]->can($meth);
+
+ # This for some reason crashes and burns on perl 5.8.1
+ # IFF the method ends up throwing an exception
+ #goto $_[0]->can ($meth);
+
+ my $cref = $_[0]->can ($meth);
+ goto $cref;
}
- $orig->(@_);
+
+ goto $orig;
};
}
-
=head1 NAME
DBIx::Class::Storage::DBI - DBI storage handler
);
$schema->resultset('Book')->search({
- written_on => $schema->storage->datetime_parser(DateTime->now)
+ written_on => $schema->storage->datetime_parser->format_datetime(DateTime->now)
});
=head1 DESCRIPTION
sub new {
my $new = shift->next::method(@_);
- $new->transaction_depth(0);
$new->_sql_maker_opts({});
- $new->{savepoints} = [];
- $new->{_in_dbh_do} = 0;
+ $new->_dbh_details({});
+ $new->{_in_do_block} = 0;
$new->{_dbh_gen} = 0;
+ # read below to see what this does
+ $new->_arm_global_destructor;
+
$new;
}
+ # This is hack to work around perl shooting stuff in random
+ # order on exit(). If we do not walk the remaining storage
+ # objects in an END block, there is a *small but real* chance
+ # of a fork()ed child to kill the parent's shared DBI handle,
+ # *before perl reaches the DESTROY in this package*
+ # Yes, it is ugly and effective.
+ # Additionally this registry is used by the CLONE method to
+ # make sure no handles are shared between threads
+ {
+ my %seek_and_destroy;
+
+ sub _arm_global_destructor {
+ my $self = shift;
+ my $key = refaddr ($self);
+ $seek_and_destroy{$key} = $self;
+ weaken ($seek_and_destroy{$key});
+ }
+
+ END {
+ local $?; # just in case the DBI destructor changes it somehow
+
+ # destroy just the object if not native to this process/thread
+ $_->_verify_pid for (grep
+ { defined $_ }
+ values %seek_and_destroy
+ );
+ }
+
+ sub CLONE {
+ # As per DBI's recommendation, DBIC disconnects all handles as
+ # soon as possible (DBIC will reconnect only on demand from within
+ # the thread)
+ for (values %seek_and_destroy) {
+ next unless $_;
+ $_->{_dbh_gen}++; # so that existing cursors will drop as well
+ $_->_dbh(undef);
+
+ $_->transaction_depth(0);
+ $_->savepoints([]);
+ }
+ }
+ }
+
+ sub DESTROY {
+ my $self = shift;
+
+ # some databases spew warnings on implicit disconnect
+ $self->_verify_pid;
+ local $SIG{__WARN__} = sub {};
+ $self->_dbh(undef);
+
+ # this op is necessary, since the very last perl runtime statement
+ # triggers a global destruction shootout, and the $SIG localization
+ # may very well be destroyed before perl actually gets to do the
+ # $dbh undef
+ 1;
+ }
+
+ # handle pid changes correctly - do not destroy parent's connection
+ sub _verify_pid {
+ my $self = shift;
+
+ my $pid = $self->_conn_pid;
+ if( defined $pid and $pid != $$ and my $dbh = $self->_dbh ) {
+ $dbh->{InactiveDestroy} = 1;
+ $self->{_dbh_gen}++;
+ $self->_dbh(undef);
+ $self->transaction_depth(0);
+ $self->savepoints([]);
+ }
+
+ return;
+ }
+
=head2 connect_info
This method is normally called by L<DBIx::Class::Schema/connection>, which
=head3 DBIx::Class specific connection attributes
- In addition to the standard L<DBI|DBI/ATTRIBUTES_COMMON_TO_ALL_HANDLES>
- L<connection|DBI/Database_Handle_Attributes> attributes, DBIx::Class recognizes
+ In addition to the standard L<DBI|DBI/ATTRIBUTES COMMON TO ALL HANDLES>
+ L<connection|DBI/Database Handle Attributes> attributes, DBIx::Class recognizes
the following connection options. These options can be mixed in with your other
L<DBI> connection attributes, or placed in a separate hashref
(C<\%extra_attributes>) as shown above.
=item limit_dialect
- Sets the limit dialect. This is useful for JDBC-bridge among others
- where the remote SQL-dialect cannot be determined by the name of the
- driver alone. See also L<SQL::Abstract::Limit>.
+ Sets a specific SQL::Abstract::Limit-style limit dialect, overriding the
+ default L</sql_limit_dialect> setting of the storage (if any). For a list
+ of available limit dialects see L<DBIx::Class::SQLMaker::LimitDialects>.
+
+ =item quote_names
+
+ When true automatically sets L</quote_char> and L</name_sep> to the characters
+ appropriate for your particular RDBMS. This option is preferred over specifying
+ L</quote_char> directly.
=item quote_char
- Specifies what characters to use to quote table and column names. If
- you use this you will want to specify L</name_sep> as well.
+ Specifies what characters to use to quote table and column names.
C<quote_char> expects either a single character, in which case is it
is placed on either side of the table/column name, or an arrayref of length
=item name_sep
- This only needs to be used in conjunction with C<quote_char>, and is used to
+ This parameter is only useful in conjunction with C<quote_char>, and is used to
specify the character that separates elements (schemas, tables, columns) from
- each other. In most cases this is simply a C<.>.
-
- The consequences of not supplying this value is that L<SQL::Abstract>
- will assume DBIx::Class' uses of aliases to be complete column
- names. The output will look like I<"me.name"> when it should actually
- be I<"me"."name">.
+ each other. If unspecified it defaults to the most commonly used C<.>.
=item unsafe
'postgres',
'my_pg_password',
{ AutoCommit => 1 },
- { quote_char => q{"}, name_sep => q{.} },
+ { quote_char => q{"} },
]
);
my @args = @{ $info->{arguments} };
- $self->_dbi_connect_info([@args,
- %attrs && !(ref $args[0] eq 'CODE') ? \%attrs : ()]);
+ if (keys %attrs and ref $args[0] ne 'CODE') {
+ carp
+ 'You provided explicit AutoCommit => 0 in your connection_info. '
+ . 'This is almost universally a bad idea (see the footnotes of '
+ . 'DBIx::Class::Storage::DBI for more info). If you still want to '
+ . 'do this you can set $ENV{DBIC_UNSAFE_AUTOCOMMIT_OK} to disable '
+ . 'this warning.'
+ if ! $attrs{AutoCommit} and ! $ENV{DBIC_UNSAFE_AUTOCOMMIT_OK};
+
+ push @args, \%attrs if keys %attrs;
+ }
+ $self->_dbi_connect_info(\@args);
+
+ # FIXME - dirty:
+ # save attributes them in a separate accessor so they are always
+ # introspectable, even in case of a CODE $dbhmaker
+ $self->_dbic_connect_attributes (\%attrs);
return $self->_connect_info;
}
delete @attrs{@storage_opts} if @storage_opts;
my @sql_maker_opts = grep exists $attrs{$_},
- qw/limit_dialect quote_char name_sep/;
+ qw/limit_dialect quote_char name_sep quote_names/;
@{ $info{sql_maker_options} }{@sql_maker_opts} =
delete @attrs{@sql_maker_opts} if @sql_maker_opts;
return \%info;
}
- sub _default_dbi_connect_attributes {
- return {
+ sub _default_dbi_connect_attributes () {
+ +{
AutoCommit => 1,
- RaiseError => 1,
PrintError => 0,
+ RaiseError => 1,
+ ShowErrorStatement => 1,
};
}
sub dbh_do {
my $self = shift;
- my $code = shift;
-
- my $dbh = $self->_get_dbh;
-
- return $self->$code($dbh, @_) if $self->{_in_dbh_do}
- || $self->{transaction_depth};
-
- local $self->{_in_dbh_do} = 1;
-
- my @result;
- my $want_array = wantarray;
+ my $run_target = shift;
- eval {
-
- if($want_array) {
- @result = $self->$code($dbh, @_);
- }
- elsif(defined $want_array) {
- $result[0] = $self->$code($dbh, @_);
- }
- else {
- $self->$code($dbh, @_);
- }
- };
-
- # ->connected might unset $@ - copy
- my $exception = $@;
- if(!$exception) { return $want_array ? @result : $result[0] }
+ # short circuit when we know there is no need for a runner
+ #
+ # FIXME - asumption may be wrong
+ # the rationale for the txn_depth check is that if this block is a part
+ # of a larger transaction, everything up to that point is screwed anyway
+ return $self->$run_target($self->_get_dbh, @_)
+ if $self->{_in_do_block} or $self->transaction_depth;
- $self->throw_exception($exception) if $self->connected;
+ my $args = \@_;
- # We were not connected - reconnect and retry, but let any
- # exception fall right through this time
- carp "Retrying $code after catching disconnected exception: $exception"
- if $ENV{DBIC_DBIRETRY_DEBUG};
- $self->_populate_dbh;
- $self->$code($self->_dbh, @_);
+ DBIx::Class::Storage::BlockRunner->new(
+ storage => $self,
+ run_code => sub { $self->$run_target ($self->_get_dbh, @$args ) },
+ wrap_txn => 0,
+ retry_handler => sub { ! ( $_[0]->retried_count or $_[0]->storage->connected ) },
+ )->run;
}
- # This is basically a blend of dbh_do above and DBIx::Class::Storage::txn_do.
- # It also informs dbh_do to bypass itself while under the direction of txn_do,
- # via $self->{_in_dbh_do} (this saves some redundant eval and errorcheck, etc)
sub txn_do {
- my $self = shift;
- my $coderef = shift;
-
- ref $coderef eq 'CODE' or $self->throw_exception
- ('$coderef must be a CODE reference');
-
- return $coderef->(@_) if $self->{transaction_depth} && ! $self->auto_savepoint;
-
- local $self->{_in_dbh_do} = 1;
-
- my @result;
- my $want_array = wantarray;
-
- my $tried = 0;
- while(1) {
- eval {
- $self->_get_dbh;
-
- $self->txn_begin;
- if($want_array) {
- @result = $coderef->(@_);
- }
- elsif(defined $want_array) {
- $result[0] = $coderef->(@_);
- }
- else {
- $coderef->(@_);
- }
- $self->txn_commit;
- };
-
- # ->connected might unset $@ - copy
- my $exception = $@;
- if(!$exception) { return $want_array ? @result : $result[0] }
-
- if($tried++ || $self->connected) {
- eval { $self->txn_rollback };
- my $rollback_exception = $@;
- if($rollback_exception) {
- my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION";
- $self->throw_exception($exception) # propagate nested rollback
- if $rollback_exception =~ /$exception_class/;
-
- $self->throw_exception(
- "Transaction aborted: ${exception}. "
- . "Rollback failed: ${rollback_exception}"
- );
- }
- $self->throw_exception($exception)
- }
-
- # We were not connected, and was first try - reconnect and retry
- # via the while loop
- carp "Retrying $coderef after catching disconnected exception: $exception"
- if $ENV{DBIC_DBIRETRY_DEBUG};
- $self->_populate_dbh;
- }
+ $_[0]->_get_dbh; # connects or reconnects on pid change, necessary to grab correct txn_depth
+ shift->next::method(@_);
}
=head2 disconnect
$self->_do_connection_actions(disconnect_call_ => $_) for @actions;
- $self->_dbh_rollback unless $self->_dbh_autocommit;
+ # stops the "implicit rollback on disconnect" warning
+ $self->_exec_txn_rollback unless $self->_dbh_autocommit;
+ %{ $self->_dbh->{CachedKids} } = ();
$self->_dbh->disconnect;
$self->_dbh(undef);
$self->{_dbh_gen}++;
sub _seems_connected {
my $self = shift;
+ $self->_verify_pid;
+
my $dbh = $self->_dbh
or return 0;
- if(defined $self->_conn_tid && $self->_conn_tid != threads->tid) {
- $self->_dbh(undef);
- $self->{_dbh_gen}++;
- return 0;
- }
- else {
- $self->_verify_pid;
- return 0 if !$self->_dbh;
- }
-
return $dbh->FETCH('Active');
}
return $dbh->ping;
}
- # handle pid changes correctly
- # NOTE: assumes $self->_dbh is a valid $dbh
- sub _verify_pid {
- my ($self) = @_;
-
- return if defined $self->_conn_pid && $self->_conn_pid == $$;
-
- $self->_dbh->{InactiveDestroy} = 1;
- $self->_dbh(undef);
- $self->{_dbh_gen}++;
-
- return;
- }
-
sub ensure_connected {
my ($self) = @_;
Returns a C<$dbh> - a data base handle of class L<DBI>. The returned handle
is guaranteed to be healthy by implicitly calling L</connected>, and if
necessary performing a reconnection before returning. Keep in mind that this
- is very B<expensive> on some database engines. Consider using L<dbh_do>
+ is very B<expensive> on some database engines. Consider using L</dbh_do>
instead.
=cut
# this is the internal "get dbh or connect (don't check)" method
sub _get_dbh {
my $self = shift;
- $self->_verify_pid if $self->_dbh;
+ $self->_verify_pid;
$self->_populate_dbh unless $self->_dbh;
return $self->_dbh;
}
- sub _sql_maker_args {
- my ($self) = @_;
-
- return (
- bindtype=>'columns',
- array_datatypes => 1,
- limit_dialect => $self->_get_dbh,
- %{$self->_sql_maker_opts}
- );
- }
-
sub sql_maker {
my ($self) = @_;
unless ($self->_sql_maker) {
my $sql_maker_class = $self->sql_maker_class;
- $self->ensure_class_loaded ($sql_maker_class);
- $self->_sql_maker($sql_maker_class->new( $self->_sql_maker_args ));
+
+ my %opts = %{$self->_sql_maker_opts||{}};
+ my $dialect =
+ $opts{limit_dialect}
+ ||
+ $self->sql_limit_dialect
+ ||
+ do {
+ my $s_class = (ref $self) || $self;
+ carp (
+ "Your storage class ($s_class) does not set sql_limit_dialect and you "
+ . 'have not supplied an explicit limit_dialect in your connection_info. '
+ . 'DBIC will attempt to use the GenericSubQ dialect, which works on most '
+ . 'databases but can be (and often is) painfully slow. '
+ . "Please file an RT ticket against '$s_class' ."
+ );
+
+ 'GenericSubQ';
+ }
+ ;
+
+ my ($quote_char, $name_sep);
+
+ if ($opts{quote_names}) {
+ $quote_char = (delete $opts{quote_char}) || $self->sql_quote_char || do {
+ my $s_class = (ref $self) || $self;
+ carp (
+ "You requested 'quote_names' but your storage class ($s_class) does "
+ . 'not explicitly define a default sql_quote_char and you have not '
+ . 'supplied a quote_char as part of your connection_info. DBIC will '
+ .q{default to the ANSI SQL standard quote '"', which works most of }
+ . "the time. Please file an RT ticket against '$s_class'."
+ );
+
+ '"'; # RV
+ };
+
+ $name_sep = (delete $opts{name_sep}) || $self->sql_name_sep;
+ }
+
+ $self->_sql_maker($sql_maker_class->new(
+ bindtype=>'columns',
+ array_datatypes => 1,
+ limit_dialect => $dialect,
+ ($quote_char ? (quote_char => $quote_char) : ()),
+ name_sep => ($name_sep || '.'),
+ %opts,
+ ));
}
return $self->_sql_maker;
}
my @info = @{$self->_dbi_connect_info || []};
$self->_dbh(undef); # in case ->connected failed we might get sent here
+ $self->_dbh_details({}); # reset everything we know
+
$self->_dbh($self->_connect(@info));
- $self->_conn_pid($$);
- $self->_conn_tid(threads->tid) if $INC{'threads.pm'};
+ $self->_conn_pid($$) if $^O ne 'MSWin32'; # on win32 these are in fact threads
$self->_determine_driver;
$self->_do_connection_actions(connect_call_ => $_) for @actions;
}
+
+
+ sub set_use_dbms_capability {
+ $_[0]->set_inherited ($_[1], $_[2]);
+ }
+
+ sub get_use_dbms_capability {
+ my ($self, $capname) = @_;
+
+ my $use = $self->get_inherited ($capname);
+ return defined $use
+ ? $use
+ : do { $capname =~ s/^_use_/_supports_/; $self->get_dbms_capability ($capname) }
+ ;
+ }
+
+ sub set_dbms_capability {
+ $_[0]->_dbh_details->{capability}{$_[1]} = $_[2];
+ }
+
+ sub get_dbms_capability {
+ my ($self, $capname) = @_;
+
+ my $cap = $self->_dbh_details->{capability}{$capname};
+
+ unless (defined $cap) {
+ if (my $meth = $self->can ("_determine$capname")) {
+ $cap = $self->$meth ? 1 : 0;
+ }
+ else {
+ $cap = 0;
+ }
+
+ $self->set_dbms_capability ($capname, $cap);
+ }
+
+ return $cap;
+ }
+
+ sub _server_info {
+ my $self = shift;
+
+ my $info;
+ unless ($info = $self->_dbh_details->{info}) {
+
+ $info = {};
+
+ my $server_version = try { $self->_get_server_version };
+
+ if (defined $server_version) {
+ $info->{dbms_version} = $server_version;
+
+ my ($numeric_version) = $server_version =~ /^([\d\.]+)/;
+ my @verparts = split (/\./, $numeric_version);
+ if (
+ @verparts
+ &&
+ $verparts[0] <= 999
+ ) {
+ # consider only up to 3 version parts, iff not more than 3 digits
+ my @use_parts;
+ while (@verparts && @use_parts < 3) {
+ my $p = shift @verparts;
+ last if $p > 999;
+ push @use_parts, $p;
+ }
+ push @use_parts, 0 while @use_parts < 3;
+
+ $info->{normalized_dbms_version} = sprintf "%d.%03d%03d", @use_parts;
+ }
+ }
+
+ $self->_dbh_details->{info} = $info;
+ }
+
+ return $info;
+ }
+
+ sub _get_server_version {
+ shift->_dbh_get_info('SQL_DBMS_VER');
+ }
+
+ sub _dbh_get_info {
+ my ($self, $info) = @_;
+
+ if ($info =~ /[^0-9]/) {
+ $info = $DBI::Const::GetInfoType::GetInfoType{$info};
+ $self->throw_exception("Info type '$_[1]' not provided by DBI::Const::GetInfoType")
+ unless defined $info;
+ }
+
+ return try { $self->_get_dbh->get_info($info) } || undef;
+ }
+
sub _determine_driver {
my ($self) = @_;
} else {
# if connect_info is a CODEREF, we have no choice but to connect
if (ref $self->_dbi_connect_info->[0] &&
- Scalar::Util::reftype($self->_dbi_connect_info->[0]) eq 'CODE') {
+ reftype $self->_dbi_connect_info->[0] eq 'CODE') {
$self->_populate_dbh;
$driver = $self->_dbh->{Driver}{Name};
}
else {
# try to use dsn to not require being connected, the driver may still
# force a connection in _rebless to determine version
- ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i;
+ # (dsn may not be supplied at all if all we do is make a mock-schema)
+ my $dsn = $self->_dbi_connect_info->[0] || $ENV{DBI_DSN} || '';
+ ($driver) = $dsn =~ /dbi:([^:]+):/i;
+ $driver ||= $ENV{DBI_DRIVER};
}
}
- my $storage_class = "DBIx::Class::Storage::DBI::${driver}";
- if ($self->load_optional_class($storage_class)) {
- mro::set_mro($storage_class, 'c3');
- bless $self, $storage_class;
- $self->_rebless();
+ if ($driver) {
+ my $storage_class = "DBIx::Class::Storage::DBI::${driver}";
+ if ($self->load_optional_class($storage_class)) {
+ mro::set_mro($storage_class, 'c3');
+ bless $self, $storage_class;
+ $self->_rebless();
+ }
}
}
$self->_driver_determined(1);
+ Class::C3->reinitialize() if DBIx::Class::_ENV_::OLD_MRO;
+
$self->_init; # run driver-specific initializations
$self->_run_connection_actions
my $attrs = shift @do_args;
my @bind = map { [ undef, $_ ] } @do_args;
- $self->_query_start($sql, @bind);
- $self->_get_dbh->do($sql, $attrs, @do_args);
- $self->_query_end($sql, @bind);
+ $self->dbh_do(sub {
+ $_[0]->_query_start($sql, \@bind);
+ $_[1]->do($sql, $attrs, @do_args);
+ $_[0]->_query_end($sql, \@bind);
+ });
}
return $self;
my ($old_connect_via, $dbh);
- if ($INC{'Apache/DBI.pm'} && $ENV{MOD_PERL}) {
- $old_connect_via = $DBI::connect_via;
- $DBI::connect_via = 'connect';
- }
+ local $DBI::connect_via = 'connect' if $INC{'Apache/DBI.pm'} && $ENV{MOD_PERL};
- eval {
+ try {
if(ref $info[0] eq 'CODE') {
- $dbh = $info[0]->();
+ $dbh = $info[0]->();
}
else {
- $dbh = DBI->connect(@info);
+ require DBI;
+ $dbh = DBI->connect(@info);
+ }
+
+ if (!$dbh) {
+ die $DBI::errstr;
}
- if($dbh && !$self->unsafe) {
- my $weak_self = $self;
- Scalar::Util::weaken($weak_self);
- $dbh->{HandleError} = sub {
+ unless ($self->unsafe) {
+
+ $self->throw_exception(
+ 'Refusing clobbering of {HandleError} installed on externally supplied '
+ ."DBI handle $dbh. Either remove the handler or use the 'unsafe' attribute."
+ ) if $dbh->{HandleError} and ref $dbh->{HandleError} ne '__DBIC__DBH__ERROR__HANDLER__';
+
+ # Default via _default_dbi_connect_attributes is 1, hence it was an explicit
+ # request, or an external handle. Complain and set anyway
+ unless ($dbh->{RaiseError}) {
+ carp( ref $info[0] eq 'CODE'
+
+ ? "The 'RaiseError' of the externally supplied DBI handle is set to false. "
+ ."DBIx::Class will toggle it back to true, unless the 'unsafe' connect "
+ .'attribute has been supplied'
+
+ : 'RaiseError => 0 supplied in your connection_info, without an explicit '
+ .'unsafe => 1. Toggling RaiseError back to true'
+ );
+
+ $dbh->{RaiseError} = 1;
+ }
+
+ # this odd anonymous coderef dereference is in fact really
+ # necessary to avoid the unwanted effect described in perl5
+ # RT#75792
+ sub {
+ my $weak_self = $_[0];
+ weaken $weak_self;
+
+ # the coderef is blessed so we can distinguish it from externally
+ # supplied handles (which must be preserved)
+ $_[1]->{HandleError} = bless sub {
if ($weak_self) {
$weak_self->throw_exception("DBI Exception: $_[0]");
}
else {
# the handler may be invoked by something totally out of
# the scope of DBIC
- croak ("DBI Exception: $_[0]");
+ DBIx::Class::Exception->throw("DBI Exception (unhandled by DBIC, ::Schema GCed): $_[0]");
}
- };
- $dbh->{ShowErrorStatement} = 1;
- $dbh->{RaiseError} = 1;
- $dbh->{PrintError} = 0;
+ }, '__DBIC__DBH__ERROR__HANDLER__';
+ }->($self, $dbh);
}
+ }
+ catch {
+ $self->throw_exception("DBI Connection failed: $_")
};
- $DBI::connect_via = $old_connect_via if $old_connect_via;
-
- $self->throw_exception("DBI Connection failed: " . ($@||$DBI::errstr))
- if !$dbh || $@;
-
$self->_dbh_autocommit($dbh->{AutoCommit});
$dbh;
}
- sub svp_begin {
- my ($self, $name) = @_;
-
- $name = $self->_svp_generate_name
- unless defined $name;
-
- $self->throw_exception ("You can't use savepoints outside a transaction")
- if $self->{transaction_depth} == 0;
-
- $self->throw_exception ("Your Storage implementation doesn't support savepoints")
- unless $self->can('_svp_begin');
-
- push @{ $self->{savepoints} }, $name;
-
- $self->debugobj->svp_begin($name) if $self->debug;
-
- return $self->_svp_begin($name);
- }
-
- sub svp_release {
- my ($self, $name) = @_;
-
- $self->throw_exception ("You can't use savepoints outside a transaction")
- if $self->{transaction_depth} == 0;
-
- $self->throw_exception ("Your Storage implementation doesn't support savepoints")
- unless $self->can('_svp_release');
-
- if (defined $name) {
- $self->throw_exception ("Savepoint '$name' does not exist")
- unless grep { $_ eq $name } @{ $self->{savepoints} };
-
- # Dig through the stack until we find the one we are releasing. This keeps
- # the stack up to date.
- my $svp;
-
- do { $svp = pop @{ $self->{savepoints} } } while $svp ne $name;
- } else {
- $name = pop @{ $self->{savepoints} };
- }
-
- $self->debugobj->svp_release($name) if $self->debug;
-
- return $self->_svp_release($name);
- }
-
- sub svp_rollback {
- my ($self, $name) = @_;
-
- $self->throw_exception ("You can't use savepoints outside a transaction")
- if $self->{transaction_depth} == 0;
-
- $self->throw_exception ("Your Storage implementation doesn't support savepoints")
- unless $self->can('_svp_rollback');
-
- if (defined $name) {
- # If they passed us a name, verify that it exists in the stack
- unless(grep({ $_ eq $name } @{ $self->{savepoints} })) {
- $self->throw_exception("Savepoint '$name' does not exist!");
- }
-
- # Dig through the stack until we find the one we are releasing. This keeps
- # the stack up to date.
- while(my $s = pop(@{ $self->{savepoints} })) {
- last if($s eq $name);
- }
- # Add the savepoint back to the stack, as a rollback doesn't remove the
- # named savepoint, only everything after it.
- push(@{ $self->{savepoints} }, $name);
- } else {
- # We'll assume they want to rollback to the last savepoint
- $name = $self->{savepoints}->[-1];
- }
-
- $self->debugobj->svp_rollback($name) if $self->debug;
-
- return $self->_svp_rollback($name);
- }
-
- sub _svp_generate_name {
- my ($self) = @_;
-
- return 'savepoint_'.scalar(@{ $self->{'savepoints'} });
- }
-
sub txn_begin {
my $self = shift;
# this means we have not yet connected and do not know the AC status
- # (e.g. coderef $dbh)
- $self->ensure_connected if (! defined $self->_dbh_autocommit);
-
- if($self->{transaction_depth} == 0) {
- $self->debugobj->txn_begin()
- if $self->debug;
- $self->_dbh_begin_work;
+ # (e.g. coderef $dbh), need a full-fledged connection check
+ if (! defined $self->_dbh_autocommit) {
+ $self->ensure_connected;
}
- elsif ($self->auto_savepoint) {
- $self->svp_begin;
+ # Otherwise simply connect or re-connect on pid changes
+ else {
+ $self->_get_dbh;
}
- $self->{transaction_depth}++;
+
+ $self->next::method(@_);
}
- sub _dbh_begin_work {
+ sub _exec_txn_begin {
my $self = shift;
# if the user is utilizing txn_do - good for him, otherwise we need to
# We do this via ->dbh_do instead of ->dbh, so that the ->dbh "ping"
# will be replaced by a failure of begin_work itself (which will be
# then retried on reconnect)
- if ($self->{_in_dbh_do}) {
+ if ($self->{_in_do_block}) {
$self->_dbh->begin_work;
} else {
$self->dbh_do(sub { $_[1]->begin_work });
sub txn_commit {
my $self = shift;
- if ($self->{transaction_depth} == 1) {
- $self->debugobj->txn_commit()
- if ($self->debug);
- $self->_dbh_commit;
- $self->{transaction_depth} = 0
- if $self->_dbh_autocommit;
- }
- elsif($self->{transaction_depth} > 1) {
- $self->{transaction_depth}--;
- $self->svp_release
- if $self->auto_savepoint;
+
+ $self->_verify_pid if $self->_dbh;
+ $self->throw_exception("Unable to txn_commit() on a disconnected storage")
+ unless $self->_dbh;
+
+ # esoteric case for folks using external $dbh handles
+ if (! $self->transaction_depth and ! $self->_dbh->FETCH('AutoCommit') ) {
+ carp "Storage transaction_depth 0 does not match "
+ ."false AutoCommit of $self->{_dbh}, attempting COMMIT anyway";
+ $self->transaction_depth(1);
}
+
+ $self->next::method(@_);
+
+ # if AutoCommit is disabled txn_depth never goes to 0
+ # as a new txn is started immediately on commit
+ $self->transaction_depth(1) if (
+ !$self->transaction_depth
+ and
+ defined $self->_dbh_autocommit
+ and
+ ! $self->_dbh_autocommit
+ );
}
- sub _dbh_commit {
- my $self = shift;
- my $dbh = $self->_dbh
- or $self->throw_exception('cannot COMMIT on a disconnected handle');
- $dbh->commit;
+ sub _exec_txn_commit {
+ shift->_dbh->commit;
}
sub txn_rollback {
my $self = shift;
- my $dbh = $self->_dbh;
- eval {
- if ($self->{transaction_depth} == 1) {
- $self->debugobj->txn_rollback()
- if ($self->debug);
- $self->{transaction_depth} = 0
- if $self->_dbh_autocommit;
- $self->_dbh_rollback;
- }
- elsif($self->{transaction_depth} > 1) {
- $self->{transaction_depth}--;
- if ($self->auto_savepoint) {
- $self->svp_rollback;
- $self->svp_release;
- }
- }
- else {
- die DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION->new;
- }
- };
- if ($@) {
- my $error = $@;
- my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION";
- $error =~ /$exception_class/ and $self->throw_exception($error);
- # ensure that a failed rollback resets the transaction depth
- $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1;
- $self->throw_exception($error);
+
+ $self->_verify_pid if $self->_dbh;
+ $self->throw_exception("Unable to txn_rollback() on a disconnected storage")
+ unless $self->_dbh;
+
+ # esoteric case for folks using external $dbh handles
+ if (! $self->transaction_depth and ! $self->_dbh->FETCH('AutoCommit') ) {
+ carp "Storage transaction_depth 0 does not match "
+ ."false AutoCommit of $self->{_dbh}, attempting ROLLBACK anyway";
+ $self->transaction_depth(1);
}
+
+ $self->next::method(@_);
+
+ # if AutoCommit is disabled txn_depth never goes to 0
+ # as a new txn is started immediately on commit
+ $self->transaction_depth(1) if (
+ !$self->transaction_depth
+ and
+ defined $self->_dbh_autocommit
+ and
+ ! $self->_dbh_autocommit
+ );
}
- sub _dbh_rollback {
- my $self = shift;
- my $dbh = $self->_dbh
- or $self->throw_exception('cannot ROLLBACK on a disconnected handle');
- $dbh->rollback;
+ sub _exec_txn_rollback {
+ shift->_dbh->rollback;
+ }
+
+ # generate some identical methods
+ for my $meth (qw/svp_begin svp_release svp_rollback/) {
+ no strict qw/refs/;
+ *{__PACKAGE__ ."::$meth"} = subname $meth => sub {
+ my $self = shift;
+ $self->_verify_pid if $self->_dbh;
+ $self->throw_exception("Unable to $meth() on a disconnected storage")
+ unless $self->_dbh;
+ $self->next::method(@_);
+ };
}
# This used to be the top-half of _execute. It was split out to make it
# easier to override in NoBindVars without duping the rest. It takes up
# all of _execute's args, and emits $sql, @bind.
sub _prep_for_execute {
- my ($self, $op, $extra_bind, $ident, $args) = @_;
+ #my ($self, $op, $ident, $args) = @_;
+ return shift->_gen_sql_bind(@_)
+ }
- if( Scalar::Util::blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) {
- $ident = $ident->from();
- }
+ sub _gen_sql_bind {
+ my ($self, $op, $ident, $args) = @_;
+
+ my ($sql, @bind) = $self->sql_maker->$op(
+ blessed($ident) ? $ident->from : $ident,
+ @$args,
+ );
- my ($sql, @bind) = $self->sql_maker->$op($ident, @$args);
+ if (
+ ! $ENV{DBIC_DT_SEARCH_OK}
+ and
+ $op eq 'select'
+ and
+ first { blessed($_->[1]) && $_->[1]->isa('DateTime') } @bind
+ ) {
+ carp_unique 'DateTime objects passed to search() are not supported '
+ . 'properly (InflateColumn::DateTime formats and settings are not '
+ . 'respected.) See "Formatting DateTime objects in queries" in '
+ . 'DBIx::Class::Manual::Cookbook. To disable this warning for good '
+ . 'set $ENV{DBIC_DT_SEARCH_OK} to true'
+ }
- unshift(@bind,
- map { ref $_ eq 'ARRAY' ? $_ : [ '!!dummy', $_ ] } @$extra_bind)
- if $extra_bind;
- return ($sql, \@bind);
+ return( $sql, $self->_resolve_bindattrs(
+ $ident, [ @{$args->[2]{bind}||[]}, @bind ]
+ ));
}
+ sub _resolve_bindattrs {
+ my ($self, $ident, $bind, $colinfos) = @_;
- sub _fix_bind_params {
- my ($self, @bind) = @_;
+ $colinfos ||= {};
- ### Turn @bind from something like this:
- ### ( [ "artist", 1 ], [ "cdid", 1, 3 ] )
- ### to this:
- ### ( "'1'", "'1'", "'3'" )
- return
- map {
- if ( defined( $_ && $_->[1] ) ) {
- map { qq{'$_'}; } @{$_}[ 1 .. $#$_ ];
- }
- else { q{'NULL'}; }
- } @bind;
- }
+ my $resolve_bindinfo = sub {
+ #my $infohash = shift;
- sub _query_start {
- my ( $self, $sql, @bind ) = @_;
+ %$colinfos = %{ $self->_resolve_column_info($ident) }
+ unless keys %$colinfos;
+
+ my $ret;
+ if (my $col = $_[0]->{dbic_colname}) {
+ $ret = { %{$_[0]} };
- if ( $self->debug ) {
- @bind = $self->_fix_bind_params(@bind);
+ $ret->{sqlt_datatype} ||= $colinfos->{$col}{data_type}
+ if $colinfos->{$col}{data_type};
- $self->debugobj->query_start( $sql, @bind );
+ $ret->{sqlt_size} ||= $colinfos->{$col}{size}
+ if $colinfos->{$col}{size};
}
- }
- sub _query_end {
- my ( $self, $sql, @bind ) = @_;
+ $ret || $_[0];
+ };
- if ( $self->debug ) {
- @bind = $self->_fix_bind_params(@bind);
- $self->debugobj->query_end( $sql, @bind );
+ return [ map {
+ if (ref $_ ne 'ARRAY') {
+ [{}, $_]
+ }
+ elsif (! defined $_->[0]) {
+ [{}, $_->[1]]
+ }
+ elsif (ref $_->[0] eq 'HASH') {
+ [
+ ($_->[0]{dbd_attrs} or $_->[0]{sqlt_datatype}) ? $_->[0] : $resolve_bindinfo->($_->[0]),
+ $_->[1]
+ ]
+ }
+ elsif (ref $_->[0] eq 'SCALAR') {
+ [ { sqlt_datatype => ${$_->[0]} }, $_->[1] ]
+ }
+ else {
+ [ $resolve_bindinfo->({ dbic_colname => $_->[0] }), $_->[1] ]
}
+ } @$bind ];
}
- sub _dbh_execute {
- my ($self, $dbh, $op, $extra_bind, $ident, $bind_attributes, @args) = @_;
+ sub _format_for_trace {
+ #my ($self, $bind) = @_;
- my ($sql, $bind) = $self->_prep_for_execute($op, $extra_bind, $ident, \@args);
+ ### Turn @bind from something like this:
+ ### ( [ "artist", 1 ], [ \%attrs, 3 ] )
+ ### to this:
+ ### ( "'1'", "'3'" )
- $self->_query_start( $sql, @$bind );
+ map {
+ defined( $_ && $_->[1] )
+ ? qq{'$_->[1]'}
+ : q{NULL}
+ } @{$_[1] || []};
+ }
- my $sth = $self->sth($sql,$op);
+ sub _query_start {
+ my ( $self, $sql, $bind ) = @_;
- my $placeholder_index = 1;
+ $self->debugobj->query_start( $sql, $self->_format_for_trace($bind) )
+ if $self->debug;
+ }
+
+ sub _query_end {
+ my ( $self, $sql, $bind ) = @_;
+
+ $self->debugobj->query_end( $sql, $self->_format_for_trace($bind) )
+ if $self->debug;
+ }
- foreach my $bound (@$bind) {
- my $attributes = {};
- my($column_name, @data) = @$bound;
+ my $sba_compat;
+ sub _dbi_attrs_for_bind {
+ my ($self, $ident, $bind) = @_;
- if ($bind_attributes) {
- $attributes = $bind_attributes->{$column_name}
- if defined $bind_attributes->{$column_name};
+ if (! defined $sba_compat) {
+ $self->_determine_driver;
+ $sba_compat = $self->can('source_bind_attributes') == \&source_bind_attributes
+ ? 0
+ : 1
+ ;
+ }
+
+ my $sba_attrs;
+ if ($sba_compat) {
+ my $class = ref $self;
+ carp_unique (
+ "The source_bind_attributes() override in $class relies on a deprecated codepath. "
+ .'You are strongly advised to switch your code to override bind_attribute_by_datatype() '
+ .'instead. This legacy compat shim will also disappear some time before DBIC 0.09'
+ );
+
+ my $sba_attrs = $self->source_bind_attributes
+ }
+
+ my @attrs;
+
+ for (map { $_->[0] } @$bind) {
+ push @attrs, do {
+ if (exists $_->{dbd_attrs}) {
+ $_->{dbd_attrs}
+ }
+ elsif($_->{sqlt_datatype}) {
+ # cache the result in the dbh_details hash, as it can not change unless
+ # we connect to something else
+ my $cache = $self->_dbh_details->{_datatype_map_cache} ||= {};
+ if (not exists $cache->{$_->{sqlt_datatype}}) {
+ $cache->{$_->{sqlt_datatype}} = $self->bind_attribute_by_data_type($_->{sqlt_datatype}) || undef;
+ }
+ $cache->{$_->{sqlt_datatype}};
+ }
+ elsif ($sba_attrs and $_->{dbic_colname}) {
+ $sba_attrs->{$_->{dbic_colname}} || undef;
+ }
+ else {
+ undef; # always push something at this position
+ }
}
+ }
- foreach my $data (@data) {
- my $ref = ref $data;
- $data = $ref && $ref ne 'ARRAY' ? ''.$data : $data; # stringify args (except arrayrefs)
+ return \@attrs;
+ }
- $sth->bind_param($placeholder_index, $data, $attributes);
- $placeholder_index++;
+ sub _execute {
+ my ($self, $op, $ident, @args) = @_;
+
+ my ($sql, $bind) = $self->_prep_for_execute($op, $ident, \@args);
+
+ shift->dbh_do( # retry over disconnects
+ '_dbh_execute',
+ $sql,
+ $bind,
+ $self->_dbi_attrs_for_bind($ident, $bind)
+ );
+ }
+
+ sub _dbh_execute {
+ my ($self, undef, $sql, $bind, $bind_attrs) = @_;
+
+ $self->_query_start( $sql, $bind );
+ my $sth = $self->_sth($sql);
+
+ for my $i (0 .. $#$bind) {
+ if (ref $bind->[$i][1] eq 'SCALAR') { # any scalarrefs are assumed to be bind_inouts
+ $sth->bind_param_inout(
+ $i + 1, # bind params counts are 1-based
+ $bind->[$i][1],
+ $bind->[$i][0]{dbd_size} || $self->_max_column_bytesize($bind->[$i][0]), # size
+ $bind_attrs->[$i],
+ );
+ }
+ else {
+ $sth->bind_param(
+ $i + 1,
+ (ref $bind->[$i][1] and overload::Method($bind->[$i][1], '""'))
+ ? "$bind->[$i][1]"
+ : $bind->[$i][1]
+ ,
+ $bind_attrs->[$i],
+ );
}
}
# Can this fail without throwing an exception anyways???
my $rv = $sth->execute();
- $self->throw_exception($sth->errstr) if !$rv;
+ $self->throw_exception(
+ $sth->errstr || $sth->err || 'Unknown error: execute() returned false, but error flags were not set...'
+ ) if !$rv;
- $self->_query_end( $sql, @$bind );
+ $self->_query_end( $sql, $bind );
return (wantarray ? ($rv, $sth, @$bind) : $rv);
}
- sub _execute {
- my $self = shift;
- $self->dbh_do('_dbh_execute', @_); # retry over disconnects
+ sub _prefetch_autovalues {
+ my ($self, $source, $to_insert) = @_;
+
+ my $colinfo = $source->columns_info;
+
+ my %values;
+ for my $col (keys %$colinfo) {
+ if (
+ $colinfo->{$col}{auto_nextval}
+ and
+ (
+ ! exists $to_insert->{$col}
+ or
+ ref $to_insert->{$col} eq 'SCALAR'
+ or
+ (ref $to_insert->{$col} eq 'REF' and ref ${$to_insert->{$col}} eq 'ARRAY')
+ )
+ ) {
+ $values{$col} = $self->_sequence_fetch(
+ 'NEXTVAL',
+ ( $colinfo->{$col}{sequence} ||=
+ $self->_dbh_get_autoinc_seq($self->_get_dbh, $source, $col)
+ ),
+ );
+ }
+ }
+
+ \%values;
}
sub insert {
my ($self, $source, $to_insert) = @_;
- my $ident = $source->from;
- my $bind_attributes = $self->source_bind_attributes($source);
+ my $prefetched_values = $self->_prefetch_autovalues($source, $to_insert);
+
+ # fuse the values, but keep a separate list of prefetched_values so that
+ # they can be fused once again with the final return
+ $to_insert = { %$to_insert, %$prefetched_values };
+
+ # FIXME - we seem to assume undef values as non-supplied. This is wrong.
+ # Investigate what does it take to s/defined/exists/
+ my $col_infos = $source->columns_info;
+ my %pcols = map { $_ => 1 } $source->primary_columns;
+ my (%retrieve_cols, $autoinc_supplied, $retrieve_autoinc_col);
+ for my $col ($source->columns) {
+ if ($col_infos->{$col}{is_auto_increment}) {
+ $autoinc_supplied ||= 1 if defined $to_insert->{$col};
+ $retrieve_autoinc_col ||= $col unless $autoinc_supplied;
+ }
+
+ # nothing to retrieve when explicit values are supplied
+ next if (defined $to_insert->{$col} and ! (
+ ref $to_insert->{$col} eq 'SCALAR'
+ or
+ (ref $to_insert->{$col} eq 'REF' and ref ${$to_insert->{$col}} eq 'ARRAY')
+ ));
+
+ # the 'scalar keys' is a trick to preserve the ->columns declaration order
+ $retrieve_cols{$col} = scalar keys %retrieve_cols if (
+ $pcols{$col}
+ or
+ $col_infos->{$col}{retrieve_on_insert}
+ );
+ };
- my $updated_cols = {};
+ local $self->{_autoinc_supplied_for_op} = $autoinc_supplied;
+ local $self->{_perform_autoinc_retrieval} = $retrieve_autoinc_col;
- foreach my $col ( $source->columns ) {
- if ( !defined $to_insert->{$col} ) {
- my $col_info = $source->column_info($col);
+ my ($sqla_opts, @ir_container);
+ if (%retrieve_cols and $self->_use_insert_returning) {
+ $sqla_opts->{returning_container} = \@ir_container
+ if $self->_use_insert_returning_bound;
- if ( $col_info->{auto_nextval} ) {
- $updated_cols->{$col} = $to_insert->{$col} = $self->_sequence_fetch(
- 'nextval',
- $col_info->{sequence} ||
- $self->_dbh_get_autoinc_seq($self->_get_dbh, $source)
- );
- }
- }
+ $sqla_opts->{returning} = [
+ sort { $retrieve_cols{$a} <=> $retrieve_cols{$b} } keys %retrieve_cols
+ ];
+ }
+
+ my ($rv, $sth) = $self->_execute('insert', $source, $to_insert, $sqla_opts);
+
+ my %returned_cols = %$to_insert;
+ if (my $retlist = $sqla_opts->{returning}) { # if IR is supported - we will get everything in one set
+ @ir_container = try {
+ local $SIG{__WARN__} = sub {};
+ my @r = $sth->fetchrow_array;
+ $sth->finish;
+ @r;
+ } unless @ir_container;
+
+ @returned_cols{@$retlist} = @ir_container if @ir_container;
}
+ else {
+ # pull in PK if needed and then everything else
+ if (my @missing_pri = grep { $pcols{$_} } keys %retrieve_cols) {
+
+ $self->throw_exception( "Missing primary key but Storage doesn't support last_insert_id" )
+ unless $self->can('last_insert_id');
+
+ my @pri_values = $self->last_insert_id($source, @missing_pri);
+
+ $self->throw_exception( "Can't get last insert id" )
+ unless (@pri_values == @missing_pri);
+
+ @returned_cols{@missing_pri} = @pri_values;
+ delete $retrieve_cols{$_} for @missing_pri;
+ }
+
+ # if there is more left to pull
+ if (%retrieve_cols) {
+ $self->throw_exception(
+ 'Unable to retrieve additional columns without a Primary Key on ' . $source->source_name
+ ) unless %pcols;
+
+ my @left_to_fetch = sort { $retrieve_cols{$a} <=> $retrieve_cols{$b} } keys %retrieve_cols;
- $self->_execute('insert' => [], $source, $bind_attributes, $to_insert);
+ my $cur = DBIx::Class::ResultSet->new($source, {
+ where => { map { $_ => $returned_cols{$_} } (keys %pcols) },
+ select => \@left_to_fetch,
+ })->cursor;
- return $updated_cols;
+ @returned_cols{@left_to_fetch} = $cur->next;
+
+ $self->throw_exception('Duplicate row returned for PK-search after fresh insert')
+ if scalar $cur->next;
+ }
+ }
+
+ return { %$prefetched_values, %returned_cols };
}
sub insert_bulk {
my ($self, $source, $cols, $data) = @_;
- my %colvalues;
- @colvalues{@$cols} = (0..$#$cols);
+ my @col_range = (0..$#$cols);
+
+ # FIXME - perhaps this is not even needed? does DBI stringify?
+ #
+ # forcibly stringify whatever is stringifiable
+ # ResultSet::populate() hands us a copy - safe to mangle
+ for my $r (0 .. $#$data) {
+ for my $c (0 .. $#{$data->[$r]}) {
+ $data->[$r][$c] = "$data->[$r][$c]"
+ if ( ref $data->[$r][$c] and overload::Method($data->[$r][$c], '""') );
+ }
+ }
+
+ my $colinfos = $source->columns_info($cols);
+
+ local $self->{_autoinc_supplied_for_op} =
+ (first { $_->{is_auto_increment} } values %$colinfos)
+ ? 1
+ : 0
+ ;
+
+ # get a slice type index based on first row of data
+ # a "column" in this context may refer to more than one bind value
+ # e.g. \[ '?, ?', [...], [...] ]
+ #
+ # construct the value type index - a description of values types for every
+ # per-column slice of $data:
+ #
+ # nonexistent - nonbind literal
+ # 0 - regular value
+ # [] of bindattrs - resolved attribute(s) of bind(s) passed via literal+bind \[] combo
+ #
+ # also construct the column hash to pass to the SQL generator. For plain
+ # (non literal) values - convert the members of the first row into a
+ # literal+bind combo, with extra positional info in the bind attr hashref.
+ # This will allow us to match the order properly, and is so contrived
+ # because a user-supplied literal/bind (or something else specific to a
+ # resultsource and/or storage driver) can inject extra binds along the
+ # way, so one can't rely on "shift positions" ordering at all. Also we
+ # can't just hand SQLA a set of some known "values" (e.g. hashrefs that
+ # can be later matched up by address), because we want to supply a real
+ # value on which perhaps e.g. datatype checks will be performed
+ my ($proto_data, $value_type_idx);
+ for my $i (@col_range) {
+ my $colname = $cols->[$i];
+ if (ref $data->[0][$i] eq 'SCALAR') {
+ # no bind value at all - no type
+
+ $proto_data->{$colname} = $data->[0][$i];
+ }
+ elsif (ref $data->[0][$i] eq 'REF' and ref ${$data->[0][$i]} eq 'ARRAY' ) {
+ # repack, so we don't end up mangling the original \[]
+ my ($sql, @bind) = @${$data->[0][$i]};
+
+ # normalization of user supplied stuff
+ my $resolved_bind = $self->_resolve_bindattrs(
+ $source, \@bind, $colinfos,
+ );
+
+ # store value-less (attrs only) bind info - we will be comparing all
+ # supplied binds against this for sanity
+ $value_type_idx->{$i} = [ map { $_->[0] } @$resolved_bind ];
+
+ $proto_data->{$colname} = \[ $sql, map { [
+ # inject slice order to use for $proto_bind construction
+ { %{$resolved_bind->[$_][0]}, _bind_data_slice_idx => $i }
+ =>
+ $resolved_bind->[$_][1]
+ ] } (0 .. $#bind)
+ ];
+ }
+ else {
+ $value_type_idx->{$i} = 0;
+
+ $proto_data->{$colname} = \[ '?', [
+ { dbic_colname => $colname, _bind_data_slice_idx => $i }
+ =>
+ $data->[0][$i]
+ ] ];
+ }
+ }
- for my $i (0..$#$cols) {
- my $first_val = $data->[0][$i];
- next unless ref $first_val eq 'SCALAR';
+ my ($sql, $proto_bind) = $self->_prep_for_execute (
+ 'insert',
+ $source,
+ [ $proto_data ],
+ );
- $colvalues{ $cols->[$i] } = $first_val;
+ if (! @$proto_bind and keys %$value_type_idx) {
+ # if the bindlist is empty and we had some dynamic binds, this means the
+ # storage ate them away (e.g. the NoBindVars component) and interpolated
+ # them directly into the SQL. This obviously can't be good for multi-inserts
+ $self->throw_exception('Cannot insert_bulk without support for placeholders');
}
- # check for bad data and stringify stringifiable objects
- my $bad_slice = sub {
- my ($msg, $col_idx, $slice_idx) = @_;
+ # sanity checks
+ # FIXME - devise a flag "no babysitting" or somesuch to shut this off
+ #
+ # use an error reporting closure for convenience (less to pass)
+ my $bad_slice_report_cref = sub {
+ my ($msg, $r_idx, $c_idx) = @_;
$self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s",
$msg,
- $cols->[$col_idx],
+ $cols->[$c_idx],
do {
- local $Data::Dumper::Maxdepth = 1; # don't dump objects, if any
- Data::Dumper::Concise::Dumper({
- map { $cols->[$_] => $data->[$slice_idx][$_] } (0 .. $#$cols)
+ require Data::Dumper::Concise;
+ local $Data::Dumper::Maxdepth = 5;
+ Data::Dumper::Concise::Dumper ({
+ map { $cols->[$_] =>
+ $data->[$r_idx][$_]
+ } @col_range
}),
}
);
};
- for my $datum_idx (0..$#$data) {
- my $datum = $data->[$datum_idx];
+ for my $col_idx (@col_range) {
+ my $reference_val = $data->[0][$col_idx];
- for my $col_idx (0..$#$cols) {
- my $val = $datum->[$col_idx];
- my $sqla_bind = $colvalues{ $cols->[$col_idx] };
- my $is_literal_sql = (ref $sqla_bind) eq 'SCALAR';
+ for my $row_idx (1..$#$data) { # we are comparing against what we got from [0] above, hence start from 1
+ my $val = $data->[$row_idx][$col_idx];
- if ($is_literal_sql) {
- if (not ref $val) {
- $bad_slice->('bind found where literal SQL expected', $col_idx, $datum_idx);
+ if (! exists $value_type_idx->{$col_idx}) { # literal no binds
+ if (ref $val ne 'SCALAR') {
+ $bad_slice_report_cref->(
+ "Incorrect value (expecting SCALAR-ref \\'$$reference_val')",
+ $row_idx,
+ $col_idx,
+ );
}
- elsif ((my $reftype = ref $val) ne 'SCALAR') {
- $bad_slice->("$reftype reference found where literal SQL expected",
- $col_idx, $datum_idx);
+ elsif ($$val ne $$reference_val) {
+ $bad_slice_report_cref->(
+ "Inconsistent literal SQL value (expecting \\'$$reference_val')",
+ $row_idx,
+ $col_idx,
+ );
}
- elsif ($$val ne $$sqla_bind){
- $bad_slice->("inconsistent literal SQL value, expecting: '$$sqla_bind'",
- $col_idx, $datum_idx);
+ }
+ elsif (! $value_type_idx->{$col_idx} ) { # regular non-literal value
+ if (ref $val eq 'SCALAR' or (ref $val eq 'REF' and ref $$val eq 'ARRAY') ) {
+ $bad_slice_report_cref->("Literal SQL found where a plain bind value is expected", $row_idx, $col_idx);
}
}
- elsif (my $reftype = ref $val) {
- require overload;
- if (overload::Method($val, '""')) {
- $datum->[$col_idx] = "".$val;
+ else { # binds from a \[], compare type and attrs
+ if (ref $val ne 'REF' or ref $$val ne 'ARRAY') {
+ $bad_slice_report_cref->(
+ "Incorrect value (expecting ARRAYREF-ref \\['${$reference_val}->[0]', ... ])",
+ $row_idx,
+ $col_idx,
+ );
}
- else {
- $bad_slice->("$reftype reference found where bind expected",
- $col_idx, $datum_idx);
+ # start drilling down and bail out early on identical refs
+ elsif (
+ $reference_val != $val
+ or
+ $$reference_val != $$val
+ ) {
+ if (${$val}->[0] ne ${$reference_val}->[0]) {
+ $bad_slice_report_cref->(
+ "Inconsistent literal/bind SQL (expecting \\['${$reference_val}->[0]', ... ])",
+ $row_idx,
+ $col_idx,
+ );
+ }
+ # need to check the bind attrs - a bind will happen only once for
+ # the entire dataset, so any changes further down will be ignored.
+ elsif (! Data::Compare::Compare(
+ $value_type_idx->{$col_idx},
+ [
+ map
+ { $_->[0] }
+ @{$self->_resolve_bindattrs(
+ $source, [ @{$$val}[1 .. $#$$val] ], $colinfos,
+ )}
+ ],
+ )) {
+ $bad_slice_report_cref->(
+ 'Differing bind attributes on literal/bind values not supported',
+ $row_idx,
+ $col_idx,
+ );
+ }
}
}
}
}
- my ($sql, $bind) = $self->_prep_for_execute (
- 'insert', undef, $source, [\%colvalues]
- );
- my @bind = @$bind;
-
- my $empty_bind = 1 if (not @bind) &&
- (grep { ref $_ eq 'SCALAR' } values %colvalues) == @$cols;
-
- if ((not @bind) && (not $empty_bind)) {
- $self->throw_exception(
- 'Cannot insert_bulk without support for placeholders'
- );
- }
-
- # neither _execute_array, nor _execute_inserts_with_no_binds are
- # atomic (even if _execute _array is a single call). Thus a safety
+ # neither _dbh_execute_for_fetch, nor _dbh_execute_inserts_with_no_binds
+ # are atomic (even if execute_for_fetch is a single call). Thus a safety
# scope guard
- my $guard = $self->txn_scope_guard unless $self->{transaction_depth} != 0;
+ my $guard = $self->txn_scope_guard;
- $self->_query_start( $sql, ['__BULK__'] );
- my $sth = $self->sth($sql);
+ $self->_query_start( $sql, @$proto_bind ? [[undef => '__BULK_INSERT__' ]] : () );
+ my $sth = $self->_sth($sql);
my $rv = do {
- if ($empty_bind) {
- # bind_param_array doesn't work if there are no binds
- $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data );
+ if (@$proto_bind) {
+ # proto bind contains the information on which pieces of $data to pull
+ # $cols is passed in only for prettier error-reporting
+ $self->_dbh_execute_for_fetch( $source, $sth, $proto_bind, $cols, $data );
}
else {
- # @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
- $self->_execute_array( $source, $sth, \@bind, $cols, $data );
+ # bind_param_array doesn't work if there are no binds
+ $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data );
}
};
- $self->_query_end( $sql, ['__BULK__'] );
-
+ $self->_query_end( $sql, @$proto_bind ? [[ undef => '__BULK_INSERT__' ]] : () );
- $guard->commit if $guard;
+ $guard->commit;
- return (wantarray ? ($rv, $sth, @bind) : $rv);
+ return wantarray ? ($rv, $sth, @$proto_bind) : $rv;
}
- sub _execute_array {
- my ($self, $source, $sth, $bind, $cols, $data, @extra) = @_;
+ # execute_for_fetch is capable of returning data just fine (it means it
+ # can be used for INSERT...RETURNING and UPDATE...RETURNING. Since this
+ # is the void-populate fast-path we will just ignore this altogether
+ # for the time being.
+ sub _dbh_execute_for_fetch {
+ my ($self, $source, $sth, $proto_bind, $cols, $data) = @_;
- ## This must be an arrayref, else nothing works!
- my $tuple_status = [];
+ my @idx_range = ( 0 .. $#$proto_bind );
- ## Get the bind_attributes, if any exist
- my $bind_attributes = $self->source_bind_attributes($source);
+ # If we have any bind attributes to take care of, we will bind the
+ # proto-bind data (which will never be used by execute_for_fetch)
+ # However since column bindtypes are "sticky", this is sufficient
+ # to get the DBD to apply the bindtype to all values later on
- ## Bind the values and execute
- my $placeholder_index = 1;
+ my $bind_attrs = $self->_dbi_attrs_for_bind($source, $proto_bind);
- foreach my $bound (@$bind) {
+ for my $i (@idx_range) {
+ $sth->bind_param (
+ $i+1, # DBI bind indexes are 1-based
+ $proto_bind->[$i][1],
+ $bind_attrs->[$i],
+ ) if defined $bind_attrs->[$i];
+ }
- my $attributes = {};
- my ($column_name, $data_index) = @$bound;
+ # At this point $data slots named in the _bind_data_slice_idx of
+ # each piece of $proto_bind are either \[]s or plain values to be
+ # passed in. Construct the dispensing coderef. *NOTE* the order
+ # of $data will differ from this of the ?s in the SQL (due to
+ # alphabetical ordering by colname). We actually do want to
+ # preserve this behavior so that prepare_cached has a better
+ # chance of matching on unrelated calls
+ my %data_reorder = map { $proto_bind->[$_][0]{_bind_data_slice_idx} => $_ } @idx_range;
+
+ my $fetch_row_idx = -1; # saner loop this way
+ my $fetch_tuple = sub {
+ return undef if ++$fetch_row_idx > $#$data;
+
+ return [ map
+ { (ref $_ eq 'REF' and ref $$_ eq 'ARRAY')
+ ? map { $_->[-1] } @{$$_}[1 .. $#$$_]
+ : $_
+ }
+ map
+ { $data->[$fetch_row_idx][$_]}
+ sort
+ { $data_reorder{$a} <=> $data_reorder{$b} }
+ keys %data_reorder
+ ];
+ };
- if( $bind_attributes ) {
- $attributes = $bind_attributes->{$column_name}
- if defined $bind_attributes->{$column_name};
- }
+ my $tuple_status = [];
+ my ($rv, $err);
+ try {
+ $rv = $sth->execute_for_fetch(
+ $fetch_tuple,
+ $tuple_status,
+ );
+ }
+ catch {
+ $err = shift;
+ };
- my @data = map { $_->[$data_index] } @$data;
+ # Not all DBDs are create equal. Some throw on error, some return
+ # an undef $rv, and some set $sth->err - try whatever we can
+ $err = ($sth->errstr || 'UNKNOWN ERROR ($sth->errstr is unset)') if (
+ ! defined $err
+ and
+ ( !defined $rv or $sth->err )
+ );
- $sth->bind_param_array( $placeholder_index, [@data], $attributes );
- $placeholder_index++;
+ # Statement must finish even if there was an exception.
+ try {
+ $sth->finish
}
-
- my $rv = eval {
- $self->_dbh_execute_array($sth, $tuple_status, @extra);
+ catch {
+ $err = shift unless defined $err
};
- my $err = $@ || $sth->errstr;
- # Statement must finish even if there was an exception.
- eval { $sth->finish };
- $err = $@ unless $err;
-
- if ($err) {
+ if (defined $err) {
my $i = 0;
++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i];
$self->throw_exception("Unexpected populate error: $err")
if ($i > $#$tuple_status);
- $self->throw_exception(sprintf "%s for populate slice:\n%s",
+ require Data::Dumper::Concise;
+ $self->throw_exception(sprintf "execute_for_fetch() aborted with '%s' at populate slice:\n%s",
($tuple_status->[$i][1] || $err),
- Data::Dumper::Concise::Dumper({
- map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols)
- }),
+ Data::Dumper::Concise::Dumper( { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) } ),
);
}
- return $rv;
- }
- sub _dbh_execute_array {
- my ($self, $sth, $tuple_status, @extra) = @_;
-
- return $sth->execute_array({ArrayTupleStatus => $tuple_status});
+ return $rv;
}
sub _dbh_execute_inserts_with_no_binds {
my ($self, $sth, $count) = @_;
- eval {
+ my $err;
+ try {
my $dbh = $self->_get_dbh;
local $dbh->{RaiseError} = 1;
local $dbh->{PrintError} = 0;
$sth->execute foreach 1..$count;
+ }
+ catch {
+ $err = shift;
};
- my $exception = $@;
- # Make sure statement is finished even if there was an exception.
- eval { $sth->finish };
- $exception = $@ unless $exception;
+ # Make sure statement is finished even if there was an exception.
+ try {
+ $sth->finish
+ }
+ catch {
+ $err = shift unless defined $err;
+ };
- $self->throw_exception($exception) if $exception;
+ $self->throw_exception($err) if defined $err;
return $count;
}
sub update {
- my ($self, $source, @args) = @_;
-
- my $bind_attrs = $self->source_bind_attributes($source);
-
- return $self->_execute('update' => [], $source, $bind_attrs, @args);
+ #my ($self, $source, @args) = @_;
+ shift->_execute('update', @_);
}
sub delete {
- my ($self, $source, @args) = @_;
-
- my $bind_attrs = $self->source_bind_attributes($source);
-
- return $self->_execute('delete' => [], $source, $bind_attrs, @args);
- }
-
- # We were sent here because the $rs contains a complex search
- # which will require a subquery to select the correct rows
- # (i.e. joined or limited resultsets, or non-introspectable conditions)
- #
- # Generating a single PK column subquery is trivial and supported
- # by all RDBMS. However if we have a multicolumn PK, things get ugly.
- # Look at _multipk_update_delete()
- sub _subq_update_delete {
- my $self = shift;
- my ($rs, $op, $values) = @_;
-
- my $rsrc = $rs->result_source;
-
- # quick check if we got a sane rs on our hands
- my @pcols = $rsrc->_pri_cols;
-
- my $sel = $rs->_resolved_attrs->{select};
- $sel = [ $sel ] unless ref $sel eq 'ARRAY';
-
- if (
- join ("\x00", map { join '.', $rs->{attrs}{alias}, $_ } sort @pcols)
- ne
- join ("\x00", sort @$sel )
- ) {
- $self->throw_exception (
- '_subq_update_delete can not be called on resultsets selecting columns other than the primary keys'
- );
- }
-
- if (@pcols == 1) {
- return $self->$op (
- $rsrc,
- $op eq 'update' ? $values : (),
- { $pcols[0] => { -in => $rs->as_query } },
- );
- }
-
- else {
- return $self->_multipk_update_delete (@_);
- }
- }
-
- # ANSI SQL does not provide a reliable way to perform a multicol-PK
- # resultset update/delete involving subqueries. So by default resort
- # to simple (and inefficient) delete_all style per-row opearations,
- # while allowing specific storages to override this with a faster
- # implementation.
- #
- sub _multipk_update_delete {
- return shift->_per_row_update_delete (@_);
- }
-
- # This is the default loop used to delete/update rows for multi PK
- # resultsets, and used by mysql exclusively (because it can't do anything
- # else).
- #
- # We do not use $row->$op style queries, because resultset update/delete
- # is not expected to cascade (this is what delete_all/update_all is for).
- #
- # There should be no race conditions as the entire operation is rolled
- # in a transaction.
- #
- sub _per_row_update_delete {
- my $self = shift;
- my ($rs, $op, $values) = @_;
-
- my $rsrc = $rs->result_source;
- my @pcols = $rsrc->_pri_cols;
-
- my $guard = $self->txn_scope_guard;
-
- # emulate the return value of $sth->execute for non-selects
- my $row_cnt = '0E0';
-
- my $subrs_cur = $rs->cursor;
- my @all_pk = $subrs_cur->all;
- for my $pks ( @all_pk) {
-
- my $cond;
- for my $i (0.. $#pcols) {
- $cond->{$pcols[$i]} = $pks->[$i];
- }
-
- $self->$op (
- $rsrc,
- $op eq 'update' ? $values : (),
- $cond,
- );
-
- $row_cnt++;
- }
-
- $guard->commit;
-
- return $row_cnt;
+ #my ($self, $source, @args) = @_;
+ shift->_execute('delete', @_);
}
sub _select {
my $self = shift;
-
- # localization is neccessary as
- # 1) there is no infrastructure to pass this around before SQLA2
- # 2) _select_args sets it and _prep_for_execute consumes it
- my $sql_maker = $self->sql_maker;
- local $sql_maker->{_dbic_rs_attrs};
-
- return $self->_execute($self->_select_args(@_));
+ $self->_execute($self->_select_args(@_));
}
sub _select_args_to_query {
my $self = shift;
- # localization is neccessary as
- # 1) there is no infrastructure to pass this around before SQLA2
- # 2) _select_args sets it and _prep_for_execute consumes it
- my $sql_maker = $self->sql_maker;
- local $sql_maker->{_dbic_rs_attrs};
+ $self->throw_exception(
+ "Unable to generate limited query representation with 'software_limit' enabled"
+ ) if ($_[3]->{software_limit} and ($_[3]->{offset} or $_[3]->{rows}) );
- # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $order, $rows, $offset)
+ # my ($op, $ident, $select, $cond, $rs_attrs, $rows, $offset)
# = $self->_select_args($ident, $select, $cond, $attrs);
- my ($op, $bind, $ident, $bind_attrs, @args) =
+ my ($op, $ident, @args) =
$self->_select_args(@_);
- # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $order, $rows, $offset ]);
- my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, \@args);
+ # my ($sql, $prepared_bind) = $self->_gen_sql_bind($op, $ident, [ $select, $cond, $rs_attrs, $rows, $offset ]);
+ my ($sql, $prepared_bind) = $self->_gen_sql_bind($op, $ident, \@args);
$prepared_bind ||= [];
return wantarray
- ? ($sql, $prepared_bind, $bind_attrs)
+ ? ($sql, $prepared_bind)
: \[ "($sql)", @$prepared_bind ]
;
}
sub _select_args {
my ($self, $ident, $select, $where, $attrs) = @_;
+ my $sql_maker = $self->sql_maker;
my ($alias2source, $rs_alias) = $self->_resolve_ident_sources ($ident);
- my $sql_maker = $self->sql_maker;
- $sql_maker->{_dbic_rs_attrs} = {
+ $attrs = {
%$attrs,
select => $select,
from => $ident,
where => $where,
$rs_alias && $alias2source->{$rs_alias}
- ? ( _source_handle => $alias2source->{$rs_alias}->handle )
+ ? ( _rsroot_rsrc => $alias2source->{$rs_alias} )
: ()
,
};
- # calculate bind_attrs before possible $ident mangling
- my $bind_attrs = {};
- for my $alias (keys %$alias2source) {
- my $bindtypes = $self->source_bind_attributes ($alias2source->{$alias}) || {};
- for my $col (keys %$bindtypes) {
-
- my $fqcn = join ('.', $alias, $col);
- $bind_attrs->{$fqcn} = $bindtypes->{$col} if $bindtypes->{$col};
-
- # Unqialified column names are nice, but at the same time can be
- # rather ambiguous. What we do here is basically go along with
- # the loop, adding an unqualified column slot to $bind_attrs,
- # alongside the fully qualified name. As soon as we encounter
- # another column by that name (which would imply another table)
- # we unset the unqualified slot and never add any info to it
- # to avoid erroneous type binding. If this happens the users
- # only choice will be to fully qualify his column name
-
- if (exists $bind_attrs->{$col}) {
- $bind_attrs->{$col} = {};
- }
- else {
- $bind_attrs->{$col} = $bind_attrs->{$fqcn};
- }
- }
+ # Sanity check the attributes (SQLMaker does it too, but
+ # in case of a software_limit we'll never reach there)
+ if (defined $attrs->{offset}) {
+ $self->throw_exception('A supplied offset attribute must be a non-negative integer')
+ if ( $attrs->{offset} =~ /\D/ or $attrs->{offset} < 0 );
}
- # adjust limits
- if (
- $attrs->{software_limit}
- ||
- $sql_maker->_default_limit_syntax eq "GenericSubQ"
- ) {
- $attrs->{software_limit} = 1;
+ if (defined $attrs->{rows}) {
+ $self->throw_exception("The rows attribute must be a positive integer if present")
+ if ( $attrs->{rows} =~ /\D/ or $attrs->{rows} <= 0 );
}
- else {
- $self->throw_exception("rows attribute must be positive if present")
- if (defined($attrs->{rows}) && !($attrs->{rows} > 0));
-
+ elsif ($attrs->{offset}) {
# MySQL actually recommends this approach. I cringe.
- $attrs->{rows} = 2**48 if not defined $attrs->{rows} and defined $attrs->{offset};
+ $attrs->{rows} = $sql_maker->__max_int;
}
my @limit;
# see if we need to tear the prefetch apart otherwise delegate the limiting to the
# storage, unless software limit was requested
if (
- #limited has_many
- ( $attrs->{rows} && keys %{$attrs->{collapse}} )
+ # limited collapsing has_many
+ ( $attrs->{rows} && $attrs->{collapse} )
||
- # limited prefetch with RNO subqueries
- (
- $attrs->{rows}
- &&
- $sql_maker->limit_dialect eq 'RowNumberOver'
- &&
- $attrs->{_prefetch_select}
- &&
- @{$attrs->{_prefetch_select}}
- )
- ||
- # grouped prefetch
+ # grouped prefetch (to satisfy group_by == select)
( $attrs->{group_by}
&&
@{$attrs->{group_by}}
&&
- $attrs->{_prefetch_select}
- &&
- @{$attrs->{_prefetch_select}}
+ $attrs->{_prefetch_selector_range}
)
) {
($ident, $select, $where, $attrs)
= $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs);
}
-
- elsif (
- ($attrs->{rows} || $attrs->{offset})
- &&
- $sql_maker->limit_dialect eq 'RowNumberOver'
- &&
- (ref $ident eq 'ARRAY' && @$ident > 1) # indicates a join
- &&
- scalar $self->_parse_order_by ($attrs->{order_by})
- ) {
- # the RNO limit dialect above mangles the SQL such that the join gets lost
- # wrap a subquery here
-
- push @limit, delete @{$attrs}{qw/rows offset/};
-
- my $subq = $self->_select_args_to_query (
- $ident,
- $select,
- $where,
- $attrs,
- );
-
- $ident = {
- -alias => $attrs->{alias},
- -source_handle => $ident->[0]{-source_handle},
- $attrs->{alias} => $subq,
- };
-
- # all part of the subquery now
- delete @{$attrs}{qw/order_by group_by having/};
- $where = undef;
- }
-
elsif (! $attrs->{software_limit} ) {
- push @limit, $attrs->{rows}, $attrs->{offset};
+ push @limit, (
+ $attrs->{rows} || (),
+ $attrs->{offset} || (),
+ );
}
# try to simplify the joinmap further (prune unreferenced type-single joins)
# invoked, and that's just bad...
###
- my $order = { map
- { $attrs->{$_} ? ( $_ => $attrs->{$_} ) : () }
- (qw/order_by group_by having/ )
- };
-
- return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $order, @limit);
+ return ('select', $ident, $select, $where, $attrs, @limit);
}
# Returns a counting SELECT for a simple count
return { count => '*' };
}
- # Returns a SELECT which will end up in the subselect
- # There may or may not be a group_by, as the subquery
- # might have been called to accomodate a limit
- #
- # Most databases would be happy with whatever ends up
- # here, but some choke in various ways.
- #
- sub _subq_count_select {
- my ($self, $source, $rs_attrs) = @_;
-
- if (my $groupby = $rs_attrs->{group_by}) {
-
- my $avail_columns = $self->_resolve_column_info ($rs_attrs->{from});
-
- my $sel_index;
- for my $sel (@{$rs_attrs->{select}}) {
- if (ref $sel eq 'HASH' and $sel->{-as}) {
- $sel_index->{$sel->{-as}} = $sel;
- }
- }
-
- my @selection;
- for my $g_part (@$groupby) {
- if (ref $g_part or $avail_columns->{$g_part}) {
- push @selection, $g_part;
- }
- elsif ($sel_index->{$g_part}) {
- push @selection, $sel_index->{$g_part};
- }
- else {
- $self->throw_exception ("group_by criteria '$g_part' not contained within current resultset source(s)");
- }
- }
-
- return \@selection;
- }
-
- my @pcols = map { join '.', $rs_attrs->{alias}, $_ } ($source->primary_columns);
- return @pcols ? \@pcols : [ 1 ];
- }
-
sub source_bind_attributes {
- my ($self, $source) = @_;
-
- my $bind_attributes;
- foreach my $column ($source->columns) {
-
- my $data_type = $source->column_info($column)->{data_type} || '';
- $bind_attributes->{$column} = $self->bind_attribute_by_data_type($data_type)
- if $data_type;
- }
-
- return $bind_attributes;
+ shift->throw_exception(
+ 'source_bind_attributes() was never meant to be a callable public method - '
+ .'please contact the DBIC dev-team and describe your use case so that a reasonable '
+ .'solution can be provided'
+ ."\nhttp://search.cpan.org/dist/DBIx-Class/lib/DBIx/Class.pm#GETTING_HELP/SUPPORT"
+ );
}
=head2 select
return @row;
}
- =head2 sth
-
- =over 4
+ =head2 sql_limit_dialect
- =item Arguments: $sql
-
- =back
-
- Returns a L<DBI> sth (statement handle) for the supplied SQL.
+ This is an accessor for the default SQL limit dialect used by a particular
+ storage driver. Can be overridden by supplying an explicit L</limit_dialect>
+ to L<DBIx::Class::Schema/connect>. For a list of available limit dialects
+ see L<DBIx::Class::SQLMaker::LimitDialects>.
=cut
# XXX You would think RaiseError would make this impossible,
# but apparently that's not true :(
- $self->throw_exception($dbh->errstr) if !$sth;
+ $self->throw_exception(
+ $dbh->errstr
+ ||
+ sprintf( "\$dbh->prepare() of '%s' through %s failed *silently* without "
+ .'an exception and/or setting $dbh->errstr',
+ length ($sql) > 20
+ ? substr($sql, 0, 20) . '...'
+ : $sql
+ ,
+ 'DBD::' . $dbh->{Driver}{Name},
+ )
+ ) if !$sth;
$sth;
}
sub sth {
+ carp_unique 'sth was mistakenly marked/documented as public, stop calling it (will be removed before DBIC v0.09)';
+ shift->_sth(@_);
+ }
+
+ sub _sth {
my ($self, $sql) = @_;
$self->dbh_do('_dbh_sth', $sql); # retry over disconnects
}
if ($dbh->can('column_info')) {
my %result;
- eval {
+ my $caught;
+ try {
my ($schema,$tab) = $table =~ /^(.+?)\.(.+)$/ ? ($1,$2) : (undef,$table);
my $sth = $dbh->column_info( undef,$schema, $tab, '%' );
$sth->execute();
$result{$col_name} = \%column_info;
}
+ } catch {
+ $caught = 1;
};
- return \%result if !$@ && scalar keys %result;
+ return \%result if !$caught && scalar keys %result;
}
my %result;
sub _dbh_last_insert_id {
my ($self, $dbh, $source, $col) = @_;
- my $id = eval { $dbh->last_insert_id (undef, undef, $source->name, $col) };
+ my $id = try { $dbh->last_insert_id (undef, undef, $source->name, $col) };
return $id if defined $id;
}
# Check if placeholders are supported at all
- sub _placeholders_supported {
+ sub _determine_supports_placeholders {
my $self = shift;
my $dbh = $self->_get_dbh;
# some drivers provide a $dbh attribute (e.g. Sybase and $dbh->{syb_dynamic_supported})
# but it is inaccurate more often than not
- eval {
+ return try {
local $dbh->{PrintError} = 0;
local $dbh->{RaiseError} = 1;
$dbh->do('select ?', {}, 1);
+ 1;
+ }
+ catch {
+ 0;
};
- return $@ ? 0 : 1;
}
# Check if placeholders bound to non-string types throw exceptions
#
- sub _typeless_placeholders_supported {
+ sub _determine_supports_typeless_placeholders {
my $self = shift;
my $dbh = $self->_get_dbh;
- eval {
+ return try {
local $dbh->{PrintError} = 0;
local $dbh->{RaiseError} = 1;
# this specifically tests a bind that is NOT a string
$dbh->do('select 1 where 1 = ?', {}, 1);
+ 1;
+ }
+ catch {
+ 0;
};
- return $@ ? 0 : 1;
}
=head2 sqlt_type
=cut
sub is_datatype_numeric {
- my ($self, $dt) = @_;
+ #my ($self, $dt) = @_;
- return 0 unless $dt;
+ return 0 unless $_[1];
- return $dt =~ /^ (?:
+ $_[1] =~ /^ (?:
numeric | int(?:eger)? | (?:tiny|small|medium|big)int | dec(?:imal)? | real | float | double (?: \s+ precision)? | (?:big)?serial
) $/ix;
}
sub create_ddl_dir {
my ($self, $schema, $databases, $version, $dir, $preversion, $sqltargs) = @_;
- if(!$dir || !-d $dir) {
+ unless ($dir) {
carp "No directory given, using ./\n";
- $dir = "./";
+ $dir = './';
+ } else {
+ -d $dir
+ or
+ (require File::Path and File::Path::make_path ("$dir")) # make_path does not like objects (i.e. Path::Class::Dir)
+ or
+ $self->throw_exception(
+ "Failed to create '$dir': " . ($! || $@ || 'error unknown')
+ );
}
+
+ $self->throw_exception ("Directory '$dir' does not exist\n") unless(-d $dir);
+
$databases ||= ['MySQL', 'SQLite', 'PostgreSQL'];
$databases = [ $databases ] if(ref($databases) ne 'ARRAY');
my $filename = $schema->ddl_filename($type, $version, $dir);
if(-f $filename)
{
+ # FIXME replace this block when a proper sane sql parser is available
my $file;
open($file, "<$filename")
or $self->throw_exception("Can't open $filename ($!)");
data => $schema,
);
- my @ret;
- my $wa = wantarray;
- if ($wa) {
- @ret = $tr->translate;
- }
- else {
- $ret[0] = $tr->translate;
- }
-
- $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error)
- unless (@ret && defined $ret[0]);
-
- return $wa ? @ret : $ret[0];
+ return preserve_context {
+ $tr->translate
+ } after => sub {
+ $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error)
+ unless defined $_[0];
+ };
}
+ # FIXME deploy() currently does not accurately report sql errors
+ # Will always return true while errors are warned
sub deploy {
my ($self, $schema, $type, $sqltargs, $dir) = @_;
my $deploy = sub {
my $line = shift;
- return if($line =~ /^--/);
return if(!$line);
+ return if($line =~ /^--/);
# next if($line =~ /^DROP/m);
return if($line =~ /^BEGIN TRANSACTION/m);
return if($line =~ /^COMMIT/m);
return if $line =~ /^\s+$/; # skip whitespace only
$self->_query_start($line);
- eval {
+ try {
# do a dbh_do cycle here, as we need some error checking in
# place (even though we will ignore errors)
$self->dbh_do (sub { $_[1]->do($line) });
+ } catch {
+ carp qq{$_ (running "${line}")};
};
- if ($@) {
- carp qq{$@ (running "${line}")};
- }
$self->_query_end($line);
};
my @statements = $schema->deployment_statements($type, undef, $dir, { %{ $sqltargs || {} }, no_comments => 1 } );
}
}
elsif (@statements == 1) {
- foreach my $line ( split(";\n", $statements[0])) {
+ # split on single line comments and end of statements
+ foreach my $line ( split(/\s*--.*\n|;\n/, $statements[0])) {
$deploy->( $line );
}
}
=head2 datetime_parser_type
- Defines (returns) the datetime parser class - currently hardwired to
- L<DateTime::Format::MySQL>
-
- =cut
-
- sub datetime_parser_type { "DateTime::Format::MySQL"; }
+ Defines the datetime parser class - currently defaults to L<DateTime::Format::MySQL>
=head2 build_datetime_parser
sub build_datetime_parser {
my $self = shift;
my $type = $self->datetime_parser_type(@_);
- $self->ensure_class_loaded ($type);
return $type;
}
return $alias;
}
- sub DESTROY {
- my $self = shift;
+ # The size in bytes to use for DBI's ->bind_param_inout, this is the generic
+ # version and it may be necessary to amend or override it for a specific storage
+ # if such binds are necessary.
+ sub _max_column_bytesize {
+ my ($self, $attr) = @_;
- $self->_verify_pid if $self->_dbh;
+ my $max_size;
- # some databases need this to stop spewing warnings
- if (my $dbh = $self->_dbh) {
- local $@;
- eval {
- %{ $dbh->{CachedKids} } = ();
- $dbh->disconnect;
- };
+ if ($attr->{sqlt_datatype}) {
+ my $data_type = lc($attr->{sqlt_datatype});
+
+ if ($attr->{sqlt_size}) {
+
+ # String/sized-binary types
+ if ($data_type =~ /^(?:
+ l? (?:var)? char(?:acter)? (?:\s*varying)?
+ |
+ (?:var)? binary (?:\s*varying)?
+ |
+ raw
+ )\b/x
+ ) {
+ $max_size = $attr->{sqlt_size};
+ }
+ # Other charset/unicode types, assume scale of 4
+ elsif ($data_type =~ /^(?:
+ national \s* character (?:\s*varying)?
+ |
+ nchar
+ |
+ univarchar
+ |
+ nvarchar
+ )\b/x
+ ) {
+ $max_size = $attr->{sqlt_size} * 4;
+ }
+ }
+
+ if (!$max_size and !$self->_is_lob_type($data_type)) {
+ $max_size = 100 # for all other (numeric?) datatypes
+ }
}
- $self->_dbh(undef);
+ $max_size || $self->_dbic_connect_attributes->{LongReadLen} || $self->_get_dbh->{LongReadLen} || 8000;
+ }
+
+ # Determine if a data_type is some type of BLOB
+ sub _is_lob_type {
+ my ($self, $data_type) = @_;
+ $data_type && ($data_type =~ /lob|bfile|text|image|bytea|memo/i
+ || $data_type =~ /^long(?:\s+(?:raw|bit\s*varying|varbit|binary
+ |varchar|character\s*varying|nvarchar
+ |national\s*character\s*varying))?\z/xi);
+ }
+
+ sub _is_binary_lob_type {
+ my ($self, $data_type) = @_;
+ $data_type && ($data_type =~ /blob|bfile|image|bytea/i
+ || $data_type =~ /^long(?:\s+(?:raw|bit\s*varying|varbit|binary))?\z/xi);
+ }
+
+ sub _is_text_lob_type {
+ my ($self, $data_type) = @_;
+ $data_type && ($data_type =~ /^(?:clob|memo)\z/i
+ || $data_type =~ /^long(?:\s+(?:varchar|character\s*varying|nvarchar
+ |national\s*character\s*varying))\z/xi);
}
1;
DBIx::Class can do some wonderful magic with handling exceptions,
disconnections, and transactions when you use C<< AutoCommit => 1 >>
- (the default) combined with C<txn_do> for transaction support.
+ (the default) combined with L<txn_do|DBIx::Class::Storage/txn_do> for
+ transaction support.
If you set C<< AutoCommit => 0 >> in your connect info, then you are always
in an assumed transaction between commits, and you're telling us you'd
#
# This module contains code that should never have seen the light of day,
# does not belong in the Storage, or is otherwise unfit for public
- # display. The arrival of SQLA2 should immediately oboslete 90% of this
+ # display. The arrival of SQLA2 should immediately obsolete 90% of this
#
use strict;
use base 'DBIx::Class::Storage';
use mro 'c3';
- use Carp::Clan qw/^DBIx::Class/;
+ use List::Util 'first';
+ use Scalar::Util 'blessed';
+ use Sub::Name 'subname';
+ use namespace::clean;
#
# This code will remove non-selecting/non-restricting joins from
# {from} specs, aiding the RDBMS query optimizer
#
sub _prune_unused_joins {
- my ($self) = shift;
-
+ my $self = shift;
my ($from, $select, $where, $attrs) = @_;
+ return $from unless $self->_use_join_optimizer;
+
if (ref $from ne 'ARRAY' || ref $from->[0] ne 'HASH' || ref $from->[1] ne 'ARRAY') {
return $from; # only standard {from} specs are supported
}
# {multiplying} joins can go
delete $aliastypes->{multiplying} if $attrs->{group_by};
-
my @newfrom = $from->[0]; # FROM head is always present
- my %need_joins = (map { %{$_||{}} } (values %$aliastypes) );
+ my %need_joins;
+ for (values %$aliastypes) {
+ # add all requested aliases
+ $need_joins{$_} = 1 for keys %$_;
+
+ # add all their parents (as per joinpath which is an AoH { table => alias })
+ $need_joins{$_} = 1 for map { values %$_ } map { @$_ } values %$_;
+ }
for my $j (@{$from}[1..$#$from]) {
push @newfrom, $j if (
(! $j->[0]{-alias}) # legacy crap
#
# This is the code producing joined subqueries like:
- # SELECT me.*, other.* FROM ( SELECT me.* FROM ... ) JOIN other ON ...
+ # SELECT me.*, other.* FROM ( SELECT me.* FROM ... ) JOIN other ON ...
#
sub _adjust_select_args_for_complex_prefetch {
my ($self, $from, $select, $where, $attrs) = @_;
$self->throw_exception ('Nothing to prefetch... how did we get here?!')
- if not @{$attrs->{_prefetch_select}};
+ if not @{$attrs->{_prefetch_selector_range}};
$self->throw_exception ('Complex prefetches are not supported on resultsets with a custom from attribute')
if (ref $from ne 'ARRAY' || ref $from->[0] ne 'HASH' || ref $from->[1] ne 'ARRAY');
my $outer_attrs = { %$attrs };
delete $outer_attrs->{$_} for qw/where bind rows offset group_by having/;
- my $inner_attrs = { %$attrs };
- delete $inner_attrs->{$_} for qw/for collapse _prefetch_select _collapse_order_by select as/;
+ my $inner_attrs = { %$attrs, _is_internal_subuery => 1 };
+ delete $inner_attrs->{$_} for qw/for collapse _prefetch_selector_range _collapse_order_by select as/;
# bring over all non-collapse-induced order_by into the inner query (if any)
# the outer one will have to keep them all
delete $inner_attrs->{order_by};
-- if (my $ord_cnt = @{$outer_attrs->{order_by}} - @{$outer_attrs->{_collapse_order_by}} ) {
++ if (my $ord_cnt = @{$outer_attrs->{order_by}} - @{$outer_attrs->{_collapse_order_by}||[]} ) {
$inner_attrs->{order_by} = [
@{$outer_attrs->{order_by}}[ 0 .. $ord_cnt - 1]
];
# on the outside we substitute any function for its alias
my $outer_select = [ @$select ];
my $inner_select = [];
- for my $i (0 .. ( @$outer_select - @{$outer_attrs->{_prefetch_select}} - 1) ) {
+
+ my ($p_start, $p_end) = @{$outer_attrs->{_prefetch_selector_range}};
+ for my $i (0 .. $p_start - 1, $p_end + 1 .. $#$outer_select) {
my $sel = $outer_select->[$i];
if (ref $sel eq 'HASH' ) {
}
push @$inner_select, $sel;
+
+ push @{$inner_attrs->{as}}, $attrs->{as}[$i];
}
- # construct the inner $from for the subquery
+ # construct the inner $from and lock it in a subquery
# we need to prune first, because this will determine if we need a group_by below
- my $inner_from = $self->_prune_unused_joins ($from, $inner_select, $where, $inner_attrs);
-
- # if a multi-type join was needed in the subquery - add a group_by to simulate the
- # collapse in the subq
- $inner_attrs->{group_by} ||= $inner_select
- if List::Util::first
- { ! $_->[0]{-is_single} }
- (@{$inner_from}[1 .. $#$inner_from])
- ;
+ # the fake group_by is so that the pruner throws away all non-selecting, non-restricting
+ # multijoins (since we def. do not care about those inside the subquery)
+
+ my $inner_subq = do {
+
+ # must use it here regardless of user requests
+ local $self->{_use_join_optimizer} = 1;
+
+ my $inner_from = $self->_prune_unused_joins ($from, $inner_select, $where, {
+ group_by => ['dummy'], %$inner_attrs,
+ });
+
+ my $inner_aliastypes =
+ $self->_resolve_aliastypes_from_select_args( $inner_from, $inner_select, $where, $inner_attrs );
+
+ # we need to simulate collapse in the subq if a multiplying join is pulled
+ # by being a non-selecting restrictor
+ if (
+ ! $inner_attrs->{group_by}
+ and
+ first {
+ $inner_aliastypes->{restricting}{$_}
+ and
+ ! $inner_aliastypes->{selecting}{$_}
+ } ( keys %{$inner_aliastypes->{multiplying}||{}} )
+ ) {
+ my $unprocessed_order_chunks;
+ ($inner_attrs->{group_by}, $unprocessed_order_chunks) = $self->_group_over_selection (
+ $inner_from, $inner_select, $inner_attrs->{order_by}
+ );
+
+ $self->throw_exception (
+ 'A required group_by clause could not be constructed automatically due to a complex '
+ . 'order_by criteria. Either order_by columns only (no functions) or construct a suitable '
+ . 'group_by by hand'
+ ) if $unprocessed_order_chunks;
+ }
- # generate the subquery
- my $subq = $self->_select_args_to_query (
- $inner_from,
- $inner_select,
- $where,
- $inner_attrs,
- );
+ # we already optimized $inner_from above
+ local $self->{_use_join_optimizer} = 0;
- my $subq_joinspec = {
- -alias => $attrs->{alias},
- -source_handle => $inner_from->[0]{-source_handle},
- $attrs->{alias} => $subq,
+ # generate the subquery
+ $self->_select_args_to_query (
+ $inner_from,
+ $inner_select,
+ $where,
+ $inner_attrs,
+ );
};
# Generate the outer from - this is relatively easy (really just replace
# - it is part of the restrictions, in which case we need to collapse the outer
# result by tackling yet another group_by to the outside of the query
- # normalize a copy of $from, so it will be easier to work with further
- # down (i.e. promote the initial hashref to an AoH)
$from = [ @$from ];
- $from->[0] = [ $from->[0] ];
# so first generate the outer_from, up to the substitution point
my @outer_from;
while (my $j = shift @$from) {
+ $j = [ $j ] unless ref $j eq 'ARRAY'; # promote the head-from to an AoH
+
if ($j->[0]{-alias} eq $attrs->{alias}) { # time to swap
+
push @outer_from, [
- $subq_joinspec,
+ {
+ -alias => $attrs->{alias},
+ -rsrc => $j->[0]{-rsrc},
+ $attrs->{alias} => $inner_subq,
+ },
@{$j}[1 .. $#$j],
];
last; # we'll take care of what's left in $from below
}
}
- # scan the from spec against different attributes, and see which joins are needed
+ # scan the *remaining* from spec against different attributes, and see which joins are needed
# in what role
my $outer_aliastypes =
$self->_resolve_aliastypes_from_select_args( $from, $outer_select, $where, $outer_attrs );
+ # unroll parents
+ my ($outer_select_chain, $outer_restrict_chain) = map { +{
+ map { $_ => 1 } map { values %$_} map { @$_ } values %{ $outer_aliastypes->{$_} || {} }
+ } } qw/selecting restricting/;
+
# see what's left - throw away if not selecting/restricting
- # also throw in a group_by if restricting to guard against
- # cross-join explosions
- #
+ # also throw in a group_by if a non-selecting multiplier,
+ # to guard against cross-join explosions
+ my $need_outer_group_by;
while (my $j = shift @$from) {
my $alias = $j->[0]{-alias};
- if ($outer_aliastypes->{select}{$alias}) {
- push @outer_from, $j;
+ if (
+ $outer_select_chain->{$alias}
+ ) {
+ push @outer_from, $j
}
- elsif ($outer_aliastypes->{restrict}{$alias}) {
+ elsif ($outer_restrict_chain->{$alias}) {
push @outer_from, $j;
- $outer_attrs->{group_by} ||= $outer_select unless $j->[0]{-is_single};
+ $need_outer_group_by ||= $outer_aliastypes->{multiplying}{$alias} ? 1 : 0;
}
}
# demote the outer_from head
$outer_from[0] = $outer_from[0][0];
+ if ($need_outer_group_by and ! $outer_attrs->{group_by}) {
+
+ my $unprocessed_order_chunks;
+ ($outer_attrs->{group_by}, $unprocessed_order_chunks) = $self->_group_over_selection (
+ \@outer_from, $outer_select, $outer_attrs->{order_by}
+ );
+
+ $self->throw_exception (
+ 'A required group_by clause could not be constructed automatically due to a complex '
+ . 'order_by criteria. Either order_by columns only (no functions) or construct a suitable '
+ . 'group_by by hand'
+ ) if $unprocessed_order_chunks;
+
+ }
+
# This is totally horrific - the $where ends up in both the inner and outer query
# Unfortunately not much can be done until SQLA2 introspection arrives, and even
# then if where conditions apply to the *right* side of the prefetch, you may have
return (\@outer_from, $outer_select, $where, $outer_attrs);
}
+ #
+ # I KNOW THIS SUCKS! GET SQLA2 OUT THE DOOR SO THIS CAN DIE!
+ #
# Due to a lack of SQLA2 we fall back to crude scans of all the
# select/where/order/group attributes, in order to determine what
# aliases are neded to fulfill the query. This information is used
# throughout the code to prune unnecessary JOINs from the queries
# in an attempt to reduce the execution time.
# Although the method is pretty horrific, the worst thing that can
- # happen is for it to fail due to an unqualified column, which in
- # turn will result in a vocal exception. Qualifying the column will
- # invariably solve the problem.
+ # happen is for it to fail due to some scalar SQL, which in turn will
+ # result in a vocal exception.
sub _resolve_aliastypes_from_select_args {
my ( $self, $from, $select, $where, $attrs ) = @_;
or next;
$alias_list->{$al} = $j;
- $aliases_by_type->{multiplying}{$al} = 1
- unless $j->{-is_single};
+ $aliases_by_type->{multiplying}{$al} ||= $j->{-join_path}||[] if (
+ # not array == {from} head == can't be multiplying
+ ( ref($_) eq 'ARRAY' and ! $j->{-is_single} )
+ or
+ # a parent of ours is already a multiplier
+ ( grep { $aliases_by_type->{multiplying}{$_} } @{ $j->{-join_path}||[] } )
+ );
}
+ # get a column to source/alias map (including unqualified ones)
+ my $colinfo = $self->_resolve_column_info ($from);
+
# set up a botched SQLA
my $sql_maker = $self->sql_maker;
- my $sep = quotemeta ($self->_sql_maker_opts->{name_sep} || '.');
- local $sql_maker->{quote_char}; # so that we can regex away
+ # these are throw away results, do not pollute the bind stack
+ local $sql_maker->{select_bind};
+ local $sql_maker->{where_bind};
+ local $sql_maker->{group_bind};
+ local $sql_maker->{having_bind};
+
+ # we can't scan properly without any quoting (\b doesn't cut it
+ # everywhere), so unless there is proper quoting set - use our
+ # own weird impossible character.
+ # Also in the case of no quoting, we need to explicitly disable
+ # name_sep, otherwise sorry nasty legacy syntax like
+ # { 'count(foo.id)' => { '>' => 3 } } will stop working >:(
+ local $sql_maker->{quote_char} = $sql_maker->{quote_char};
+ local $sql_maker->{name_sep} = $sql_maker->{name_sep};
+
+ unless (defined $sql_maker->{quote_char} and length $sql_maker->{quote_char}) {
+ $sql_maker->{quote_char} = ["\x00", "\xFF"];
+ # if we don't unset it we screw up retarded but unfortunately working
+ # 'MAX(foo.bar)' => { '>', 3 }
+ $sql_maker->{name_sep} = '';
+ }
+
+ my ($lquote, $rquote, $sep) = map { quotemeta $_ } ($sql_maker->_quote_chars, $sql_maker->name_sep);
+
+ # generate sql chunks
+ my $to_scan = {
+ restricting => [
+ $sql_maker->_recurse_where ($where),
+ $sql_maker->_parse_rs_attrs ({
+ map { $_ => $attrs->{$_} } (qw/group_by having/)
+ }),
+ ],
+ selecting => [
+ $sql_maker->_recurse_fields ($select),
+ ( map { $_->[0] } $self->_extract_order_criteria ($attrs->{order_by}, $sql_maker) ),
+ ],
+ };
- my $select_sql = $sql_maker->_recurse_fields ($select);
- my $where_sql = $sql_maker->where ($where);
- my $group_by_sql = $sql_maker->_order_by({
- map { $_ => $attrs->{$_} } qw/group_by having/
- });
- my @order_by_chunks = ($self->_parse_order_by ($attrs->{order_by}) );
+ # throw away empty chunks
+ $_ = [ map { $_ || () } @$_ ] for values %$to_scan;
- # match every alias to the sql chunks above
+ # first loop through all fully qualified columns and get the corresponding
+ # alias (should work even if they are in scalarrefs)
for my $alias (keys %$alias_list) {
- my $al_re = qr/\b $alias $sep/x;
-
- for my $piece ($where_sql, $group_by_sql) {
- $aliases_by_type->{restrict}{$alias} = 1 if ($piece =~ $al_re);
+ my $al_re = qr/
+ $lquote $alias $rquote $sep
+ |
+ \b $alias \.
+ /x;
+
+ for my $type (keys %$to_scan) {
+ for my $piece (@{$to_scan->{$type}}) {
+ $aliases_by_type->{$type}{$alias} ||= $alias_list->{$alias}{-join_path}||[]
+ if ($piece =~ $al_re);
+ }
}
+ }
+
+ # now loop through unqualified column names, and try to locate them within
+ # the chunks
+ for my $col (keys %$colinfo) {
+ next if $col =~ / \. /x; # if column is qualified it was caught by the above
+
+ my $col_re = qr/ $lquote $col $rquote /x;
- for my $piece ($select_sql, @order_by_chunks ) {
- $aliases_by_type->{select}{$alias} = 1 if ($piece =~ $al_re);
+ for my $type (keys %$to_scan) {
+ for my $piece (@{$to_scan->{$type}}) {
+ if ($piece =~ $col_re) {
+ my $alias = $colinfo->{$col}{-source_alias};
+ $aliases_by_type->{$type}{$alias} ||= $alias_list->{$alias}{-join_path}||[];
+ }
+ }
}
}
# Add any non-left joins to the restriction list (such joins are indeed restrictions)
for my $j (values %$alias_list) {
my $alias = $j->{-alias} or next;
- $aliases_by_type->{restrict}{$alias} = 1 if (
+ $aliases_by_type->{restricting}{$alias} ||= $j->{-join_path}||[] if (
(not $j->{-join_type})
or
($j->{-join_type} !~ /^left (?: \s+ outer)? $/xi)
);
}
- # mark all join parents as mentioned
- # (e.g. join => { cds => 'tracks' } - tracks will need to bring cds too )
- for my $type (keys %$aliases_by_type) {
- for my $alias (keys %{$aliases_by_type->{$type}}) {
- $aliases_by_type->{$type}{$_} = 1
- for (map { keys %$_ } @{ $alias_list->{$alias}{-join_path} || [] });
+ return $aliases_by_type;
+ }
+
+ # This is the engine behind { distinct => 1 }
+ sub _group_over_selection {
+ my ($self, $from, $select, $order_by) = @_;
+
+ my $rs_column_list = $self->_resolve_column_info ($from);
+
+ my (@group_by, %group_index);
+
+ # the logic is: if it is a { func => val } we assume an aggregate,
+ # otherwise if \'...' or \[...] we assume the user knows what is
+ # going on thus group over it
+ for (@$select) {
+ if (! ref($_) or ref ($_) ne 'HASH' ) {
+ push @group_by, $_;
+ $group_index{$_}++;
+ if ($rs_column_list->{$_} and $_ !~ /\./ ) {
+ # add a fully qualified version as well
+ $group_index{"$rs_column_list->{$_}{-source_alias}.$_"}++;
+ }
}
}
- return $aliases_by_type;
+ # add any order_by parts that are not already present in the group_by
+ # we need to be careful not to add any named functions/aggregates
+ # i.e. order_by => [ ... { count => 'foo' } ... ]
+ my @leftovers;
+ for ($self->_extract_order_criteria($order_by)) {
+ # only consider real columns (for functions the user got to do an explicit group_by)
+ if (@$_ != 1) {
+ push @leftovers, $_;
+ next;
+ }
+ my $chunk = $_->[0];
+ my $colinfo = $rs_column_list->{$chunk} or do {
+ push @leftovers, $_;
+ next;
+ };
+
+ $chunk = "$colinfo->{-source_alias}.$chunk" if $chunk !~ /\./;
+ push @group_by, $chunk unless $group_index{$chunk}++;
+ }
+
+ return wantarray
+ ? (\@group_by, (@leftovers ? \@leftovers : undef) )
+ : \@group_by
+ ;
}
sub _resolve_ident_sources {
# the reason this is so contrived is that $ident may be a {from}
# structure, specifying multiple tables to join
- if ( Scalar::Util::blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) {
+ if ( blessed $ident && $ident->isa("DBIx::Class::ResultSource") ) {
# this is compat mode for insert/update/delete which do not deal with aliases
$alias2source->{me} = $ident;
$rs_alias = 'me';
$tabinfo = $_->[0];
}
- $alias2source->{$tabinfo->{-alias}} = $tabinfo->{-source_handle}->resolve
- if ($tabinfo->{-source_handle});
+ $alias2source->{$tabinfo->{-alias}} = $tabinfo->{-rsrc}
+ if ($tabinfo->{-rsrc});
}
}
my ($self, $ident, $colnames) = @_;
my ($alias2src, $root_alias) = $self->_resolve_ident_sources($ident);
- my $sep = $self->_sql_maker_opts->{name_sep} || '.';
- my $qsep = quotemeta $sep;
-
- my (%return, %seen_cols, @auto_colnames);
+ my (%seen_cols, @auto_colnames);
# compile a global list of column names, to be able to properly
# disambiguate unqualified column names (if at all possible)
my $rsrc = $alias2src->{$alias};
for my $colname ($rsrc->columns) {
push @{$seen_cols{$colname}}, $alias;
- push @auto_colnames, "$alias$sep$colname" unless $colnames;
+ push @auto_colnames, "$alias.$colname" unless $colnames;
}
}
grep { @{$seen_cols{$_}} == 1 } (keys %seen_cols),
];
- COLUMN:
+ my (%return, $colinfos);
foreach my $col (@$colnames) {
- my ($alias, $colname) = $col =~ m/^ (?: ([^$qsep]+) $qsep)? (.+) $/x;
+ my ($source_alias, $colname) = $col =~ m/^ (?: ([^\.]+) \. )? (.+) $/x;
- unless ($alias) {
- # see if the column was seen exactly once (so we know which rsrc it came from)
- if ($seen_cols{$colname} and @{$seen_cols{$colname}} == 1) {
- $alias = $seen_cols{$colname}[0];
- }
- else {
- next COLUMN;
- }
- }
+ # if the column was seen exactly once - we know which rsrc it came from
+ $source_alias ||= $seen_cols{$colname}[0]
+ if ($seen_cols{$colname} and @{$seen_cols{$colname}} == 1);
- my $rsrc = $alias2src->{$alias};
- $return{$col} = $rsrc && {
- %{$rsrc->column_info($colname)},
+ next unless $source_alias;
+
+ my $rsrc = $alias2src->{$source_alias}
+ or next;
+
+ $return{$col} = {
+ %{
+ ( $colinfos->{$source_alias} ||= $rsrc->columns_info )->{$colname}
+ ||
+ $self->throw_exception(
+ "No such column '$colname' on source " . $rsrc->source_name
+ );
+ },
-result_source => $rsrc,
- -source_alias => $alias,
+ -source_alias => $source_alias,
+ -fq_colname => $col eq $colname ? "$source_alias.$col" : $col,
+ -colname => $colname,
};
+
+ $return{"$source_alias.$colname"} = $return{$col} if $col eq $colname;
}
return \%return;
# the top of the stack, and if not - make sure the chain is inner-joined down
# to the root.
#
- sub _straight_join_to_node {
+ sub _inner_join_to_node {
my ($self, $from, $alias) = @_;
# subqueries and other oddness are naturally not supported
# So it looks like we will have to switch some stuff around.
# local() is useless here as we will be leaving the scope
# anyway, and deep cloning is just too fucking expensive
- # So replace the first hashref in the node arrayref manually
+ # So replace the first hashref in the node arrayref manually
my @new_from = ($from->[0]);
- my $sw_idx = { map { values %$_ => 1 } @$switch_branch };
+ my $sw_idx = { map { (values %$_), 1 } @$switch_branch }; #there's one k/v per join-path
for my $j (@{$from}[1 .. $#$from]) {
my $jalias = $j->[0]{-alias};
return \@new_from;
}
- # Most databases do not allow aliasing of tables in UPDATE/DELETE. Thus
- # a condition containing 'me' or other table prefixes will not work
- # at all. What this code tries to do (badly) is introspect the condition
- # and remove all column qualifiers. If it bails out early (returns undef)
- # the calling code should try another approach (e.g. a subquery)
- sub _strip_cond_qualifiers {
- my ($self, $where) = @_;
-
- my $cond = {};
-
- # No-op. No condition, we're updating/deleting everything
- return $cond unless $where;
-
- if (ref $where eq 'ARRAY') {
- $cond = [
- map {
- my %hash;
- foreach my $key (keys %{$_}) {
- $key =~ /([^.]+)$/;
- $hash{$1} = $_->{$key};
- }
- \%hash;
- } @$where
- ];
- }
- elsif (ref $where eq 'HASH') {
- if ( (keys %$where) == 1 && ( (keys %{$where})[0] eq '-and' )) {
- $cond->{-and} = [];
- my @cond = @{$where->{-and}};
- for (my $i = 0; $i < @cond; $i++) {
- my $entry = $cond[$i];
- my $hash;
- my $ref = ref $entry;
- if ($ref eq 'HASH' or $ref eq 'ARRAY') {
- $hash = $self->_strip_cond_qualifiers($entry);
- }
- elsif (! $ref) {
- $entry =~ /([^.]+)$/;
- $hash->{$1} = $cond[++$i];
+ # yet another atrocity: attempt to extract all columns from a
+ # where condition by hooking _quote
+ sub _extract_condition_columns {
+ my ($self, $cond, $sql_maker) = @_;
+
+ return [] unless $cond;
+
+ $sql_maker ||= $self->{_sql_ident_capturer} ||= do {
+ # FIXME - replace with a Moo trait
+ my $orig_sm_class = ref $self->sql_maker;
+ my $smic_class = "${orig_sm_class}::_IdentCapture_";
+
+ unless ($smic_class->isa('SQL::Abstract')) {
+
+ no strict 'refs';
+ *{"${smic_class}::_quote"} = subname "${smic_class}::_quote" => sub {
+ my ($self, $ident) = @_;
+ if (ref $ident eq 'SCALAR') {
+ $ident = $$ident;
+ my $storage_quotes = $self->sql_quote_char || '"';
+ my ($ql, $qr) = map
+ { quotemeta $_ }
+ (ref $storage_quotes eq 'ARRAY' ? @$storage_quotes : ($storage_quotes) x 2 )
+ ;
+
+ while ($ident =~ /
+ $ql (\w+) $qr
+ |
+ ([\w\.]+)
+ /xg) {
+ $self->{_captured_idents}{$1||$2}++;
+ }
}
else {
- $self->throw_exception ("_strip_cond_qualifiers() is unable to handle a condition reftype $ref");
+ $self->{_captured_idents}{$ident}++;
}
- push @{$cond->{-and}}, $hash;
- }
+ return $ident;
+ };
+
+ *{"${smic_class}::_get_captured_idents"} = subname "${smic_class}::_get_captures" => sub {
+ (delete shift->{_captured_idents}) || {};
+ };
+
+ $self->inject_base ($smic_class, $orig_sm_class);
+
}
- else {
- foreach my $key (keys %$where) {
- $key =~ /([^.]+)$/;
- $cond->{$1} = $where->{$key};
- }
+
+ $smic_class->new();
+ };
+
+ $sql_maker->_recurse_where($cond);
+
+ return [ sort keys %{$sql_maker->_get_captured_idents} ];
+ }
+
+ sub _extract_order_criteria {
+ my ($self, $order_by, $sql_maker) = @_;
+
+ my $parser = sub {
+ my ($sql_maker, $order_by) = @_;
+
+ return scalar $sql_maker->_order_by_chunks ($order_by)
+ unless wantarray;
+
+ my @chunks;
+ for ($sql_maker->_order_by_chunks ($order_by) ) {
+ my $chunk = ref $_ ? $_ : [ $_ ];
+ $chunk->[0] =~ s/\s+ (?: ASC|DESC ) \s* $//ix;
+ push @chunks, $chunk;
}
+
+ return @chunks;
+ };
+
+ if ($sql_maker) {
+ return $parser->($sql_maker, $order_by);
}
else {
- return undef;
+ $sql_maker = $self->sql_maker;
+ local $sql_maker->{quote_char};
+ return $parser->($sql_maker, $order_by);
}
-
- return $cond;
}
- sub _parse_order_by {
- my ($self, $order_by) = @_;
+ sub _order_by_is_stable {
+ my ($self, $ident, $order_by, $where) = @_;
- return scalar $self->sql_maker->_order_by_chunks ($order_by)
- unless wantarray;
+ my $colinfo = $self->_resolve_column_info($ident, [
+ (map { $_->[0] } $self->_extract_order_criteria($order_by)),
+ $where ? @{$self->_extract_fixed_condition_columns($where)} :(),
+ ]);
- my $sql_maker = $self->sql_maker;
- local $sql_maker->{quote_char}; #disable quoting
- my @chunks;
- for my $chunk (map { ref $_ ? @$_ : $_ } ($sql_maker->_order_by_chunks ($order_by) ) ) {
- $chunk =~ s/\s+ (?: ASC|DESC ) \s* $//ix;
- push @chunks, $chunk;
+ return undef unless keys %$colinfo;
+
+ my $cols_per_src;
+ $cols_per_src->{$_->{-source_alias}}{$_->{-colname}} = $_ for values %$colinfo;
+
+ for (values %$cols_per_src) {
+ my $src = (values %$_)[0]->{-result_source};
+ return 1 if $src->_identifying_column_set($_);
}
- return @chunks;
+ return undef;
+ }
+
+ # returns an arrayref of column names which *definitely* have som
+ # sort of non-nullable equality requested in the given condition
+ # specification. This is used to figure out if a resultset is
+ # constrained to a column which is part of a unique constraint,
+ # which in turn allows us to better predict how ordering will behave
+ # etc.
+ #
+ # this is a rudimentary, incomplete, and error-prone extractor
+ # however this is OK - it is conservative, and if we can not find
+ # something that is in fact there - the stack will recover gracefully
+ # Also - DQ and the mst it rode in on will save us all RSN!!!
+ sub _extract_fixed_condition_columns {
+ my ($self, $where, $nested) = @_;
+
+ return unless ref $where eq 'HASH';
+
+ my @cols;
+ for my $lhs (keys %$where) {
+ if ($lhs =~ /^\-and$/i) {
+ push @cols, ref $where->{$lhs} eq 'ARRAY'
+ ? ( map { $self->_extract_fixed_condition_columns($_, 1) } @{$where->{$lhs}} )
+ : $self->_extract_fixed_condition_columns($where->{$lhs}, 1)
+ ;
+ }
+ elsif ($lhs !~ /^\-/) {
+ my $val = $where->{$lhs};
+
+ push @cols, $lhs if (defined $val and (
+ ! ref $val
+ or
+ (ref $val eq 'HASH' and keys %$val == 1 and defined $val->{'='})
+ ));
+ }
+ }
+ return $nested ? @cols : \@cols;
}
1;
use Test::More;
use Test::Exception;
+
use lib qw(t/lib);
use DBICTest;
use DBIC::SqlMakerTest;
my $schema = DBICTest->init_schema();
- {
- my $rs = $schema->resultset( 'CD' )->search(
- {
- 'producer.name' => 'blah',
- 'producer_2.name' => 'foo',
- },
- {
- 'join' => [
- { cd_to_producer => 'producer' },
- { cd_to_producer => 'producer' },
- ],
- 'prefetch' => [
- 'artist',
- { cd_to_producer => 'producer' },
- ],
- }
- );
-
- lives_ok {
- my @rows = $rs->all();
- };
- }
+lives_ok (sub {
+ my $rs = $schema->resultset( 'CD' )->search(
+ {
+ 'producer.name' => 'blah',
+ 'producer_2.name' => 'foo',
+ },
+ {
+ 'join' => [
+ { cd_to_producer => 'producer' },
+ { cd_to_producer => 'producer' },
+ ],
+ 'prefetch' => [
+ 'artist',
+ { cd_to_producer => { producer => 'producer_to_cd' } },
+ ],
+ }
+ );
+
+ my @executed = $rs->all();
+
+ is_same_sql_bind (
+ $rs->as_query,
+ '(
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+ artist.artistid, artist.name, artist.rank, artist.charfield,
+ cd_to_producer.cd, cd_to_producer.producer, cd_to_producer.attribute,
+ producer.producerid, producer.name,
+ producer_to_cd.cd, producer_to_cd.producer, producer_to_cd.attribute
+ FROM cd me
+ LEFT JOIN cd_to_producer cd_to_producer
+ ON cd_to_producer.cd = me.cdid
+ LEFT JOIN producer producer
+ ON producer.producerid = cd_to_producer.producer
+ LEFT JOIN cd_to_producer producer_to_cd
+ ON producer_to_cd.producer = producer.producerid
+ LEFT JOIN cd_to_producer cd_to_producer_2
+ ON cd_to_producer_2.cd = me.cdid
+ LEFT JOIN producer producer_2
+ ON producer_2.producerid = cd_to_producer_2.producer
+ JOIN artist artist ON artist.artistid = me.artist
+ WHERE ( ( producer.name = ? AND producer_2.name = ? ) )
+ ORDER BY cd_to_producer.cd, producer_to_cd.producer
+ )',
+ [
+ [ 'producer.name' => 'blah' ],
+ [ 'producer_2.name' => 'foo' ],
+ ],
+ );
+}, 'Complex join parsed/executed properly');
my @rs1a_results = $schema->resultset("Artist")->search_related('cds', {title => 'Forkful of bees'}, {order_by => 'title'});
is($rs1a_results[0]->title, 'Forkful of bees', "bare field conditions okay after search related");
is(scalar(@{$merge_rs_2->{attrs}->{join}}), 1, 'only one join kept when inherited');
my $merge_rs_2_cd = $merge_rs_2->next;
- eval {
+ lives_ok (sub {
my @rs_with_prefetch = $schema->resultset('TreeLike')
->search(
prefetch => [ 'parent', { 'children' => 'parent' } ],
});
- };
-
- ok(!$@, "pathological prefetch ok");
+ }, 'pathological prefetch ok');
my $rs = $schema->resultset("Artist")->search({}, { join => 'twokeys' });
my $second_search_rs = $rs->search({ 'cds_2.cdid' => '2' }, { join =>
is(scalar(@{$second_search_rs->{attrs}->{join}}), 3, 'both joins kept');
ok($second_search_rs->next, 'query on double joined rel runs okay');
+ # test joinmap pruner
+ lives_ok ( sub {
+ my $rs = $schema->resultset('Artwork')->search (
+ {
+ },
+ {
+ distinct => 1,
+ join => [
+ { artwork_to_artist => 'artist' },
+ { cd => 'artist' },
+ ],
+ },
+ );
+
+ is_same_sql_bind (
+ $rs->count_rs->as_query,
+ '(
+ SELECT COUNT( * )
+ FROM (
+ SELECT me.cd_id
+ FROM cd_artwork me
+ JOIN cd cd ON cd.cdid = me.cd_id
+ JOIN artist artist_2 ON artist_2.artistid = cd.artist
+ GROUP BY me.cd_id
+ ) me
+ )',
+ [],
+ );
+
+ ok (defined $rs->count);
+ });
+
+ # make sure multiplying endpoints do not lose heir join-path
+ lives_ok (sub {
+ my $rs = $schema->resultset('CD')->search (
+ { },
+ { join => { artwork => 'images' } },
+ )->get_column('cdid');
+
+ is_same_sql_bind (
+ $rs->as_query,
+ '(
+ SELECT me.cdid
+ FROM cd me
+ LEFT JOIN cd_artwork artwork
+ ON artwork.cd_id = me.cdid
+ LEFT JOIN images images
+ ON images.artwork_id = artwork.cd_id
+ )',
+ [],
+ );
+
+ # execution
+ $rs->next;
+ });
+
done_testing;
use warnings;
use Test::More;
+ use Test::Exception;
use lib qw(t/lib);
use DBICTest;
my $schema = DBICTest->init_schema();
# Under some versions of SQLite if the $rs is left hanging around it will lock
# So we create a scope here cos I'm lazy
{
- my $rs = $schema->resultset('CD')->search ({}, { order_by => 'cdid' });
-
- # get the defined columns
- my @dbic_cols = sort $rs->result_source->columns;
-
- # use the hashref inflator class as result class
- $rs->result_class('DBIx::Class::ResultClass::HashRefInflator');
-
- # fetch first record
- my $datahashref1 = $rs->first;
-
- my @hashref_cols = sort keys %$datahashref1;
-
- is_deeply( \@dbic_cols, \@hashref_cols, 'returned columns' );
-
- my $cd1 = $rs->find ({cdid => 1});
- is_deeply ( $cd1, $datahashref1, 'first/find return the same thing');
-
- my $cd2 = $rs->search({ cdid => 1 })->single;
- is_deeply ( $cd2, $datahashref1, 'first/search+single return the same thing');
+ my $rs = $schema->resultset('CD')->search ({}, {
+ order_by => 'cdid',
+ });
+
+ my $orig_resclass = $rs->result_class;
+ eval "package DBICTest::CDSubclass; use base '$orig_resclass'";
+
+ # override on a specific $rs object, should not chain
+ $rs->result_class ('DBICTest::CDSubclass');
+
+ my $cd = $rs->find ({cdid => 1});
+ is (ref $cd, 'DBICTest::CDSubclass', 'result_class override propagates to find');
+
+ $cd = $rs->search({ cdid => 1 })->single;
+ is (ref $cd, $orig_resclass, 'result_class override does not propagate over seach+single');
+
+ $cd = $rs->search()->find ({ cdid => 1 });
+ is (ref $cd, $orig_resclass, 'result_class override does not propagate over seach+find');
+
+ # set as attr - should propagate
+ my $hri_rs = $rs->search ({}, { result_class => 'DBIx::Class::ResultClass::HashRefInflator' });
+ is ($rs->result_class, 'DBICTest::CDSubclass', 'original class unchanged');
+ is ($hri_rs->result_class, 'DBIx::Class::ResultClass::HashRefInflator', 'result_class accessor pre-set via attribute');
+
+
+ my $datahashref1 = $hri_rs->next;
+ is_deeply(
+ [ sort keys %$datahashref1 ],
+ [ sort $rs->result_source->columns ],
+ 'returned correct columns',
+ );
+
+ $cd = $hri_rs->find ({cdid => 1});
+ is_deeply ( $cd, $datahashref1, 'first/find return the same thing (result_class attr propagates)');
+
+ $cd = $hri_rs->search({ cdid => 1 })->single;
+ is_deeply ( $cd, $datahashref1, 'first/search+single return the same thing (result_class attr propagates)');
+
+ $hri_rs->result_class ('DBIx::Class::Row'); # something bogus
+ is(
+ $hri_rs->search->result_class, 'DBIx::Class::ResultClass::HashRefInflator',
+ 'result_class set using accessor does not propagate over unused search'
+ );
+
+ # test result class auto-loading
+ throws_ok (
+ sub { $rs->result_class ('nonexsitant_bogus_class') },
+ qr/Can't locate nonexsitant_bogus_class.pm/,
+ 'Attempt to load on accessor override',
+ );
+ is ($rs->result_class, 'DBICTest::CDSubclass', 'class unchanged');
+
+ throws_ok (
+ sub { $rs->search ({}, { result_class => 'nonexsitant_bogus_class' }) },
+ qr/Can't locate nonexsitant_bogus_class.pm/,
+ 'Attempt to load on accessor override',
+ );
+ is ($rs->result_class, 'DBICTest::CDSubclass', 'class unchanged');
}
sub check_cols_of {
my @dbic_reltable = $dbic_obj->$col;
my @hashref_reltable = @{$datahashref->{$col}};
- is (scalar @dbic_reltable, scalar @hashref_reltable, 'number of related entries');
+ is (scalar @hashref_reltable, scalar @dbic_reltable, 'number of related entries');
# for my $index (0..scalar @hashref_reltable) {
for my $index (0..scalar @dbic_reltable) {
}
# sometimes for ultra-mega-speed you want to fetch columns in esoteric ways
- # check the inflator over a non-fetching join
+ # check the inflator over a non-fetching join
$rs_dbic = $schema->resultset ('Artist')->search ({ 'me.artistid' => 1}, {
prefetch => { cds => 'tracks' },
order_by => [qw/cds.cdid tracks.trackid/],
- package # hide from PAUSE
+ package # hide from PAUSE
DBICTest::Schema::CD;
use base qw/DBICTest::BaseResult/;
data_type => 'varchar',
size => 100,
},
- 'genreid' => {
+ 'genreid' => {
data_type => 'integer',
is_nullable => 1,
accessor => undef,
__PACKAGE__->set_primary_key('cdid');
__PACKAGE__->add_unique_constraint([ qw/artist title/ ]);
- __PACKAGE__->belongs_to( artist => 'DBICTest::Schema::Artist', undef, {
- is_deferrable => 1,
+ __PACKAGE__->belongs_to( artist => 'DBICTest::Schema::Artist', undef, {
+ is_deferrable => 1,
+ proxy => { artist_name => 'name' },
+ });
+ __PACKAGE__->belongs_to( very_long_artist_relationship => 'DBICTest::Schema::Artist', 'artist', {
+ is_deferrable => 1,
});
# in case this is a single-cd it promotes a track from another cd
- __PACKAGE__->belongs_to( single_track => 'DBICTest::Schema::Track', 'single_track',
- { join_type => 'left'}
+ __PACKAGE__->belongs_to( single_track => 'DBICTest::Schema::Track', 'single_track',
+ { join_type => 'left'}
);
+# add a non-left single relationship for the complex prefetch tests
+__PACKAGE__->belongs_to( existing_single_track => 'DBICTest::Schema::Track', 'single_track');
+
__PACKAGE__->has_many( tracks => 'DBICTest::Schema::Track' );
__PACKAGE__->has_many(
tags => 'DBICTest::Schema::Tag', undef,
},
);
+
+ # This is insane. Don't ever do anything like that
+ # This is for testing purposes only!
+
+ # mst: mo: DBIC is an "object relational mapper"
+ # mst: mo: not an "object relational hider-because-mo-doesn't-understand-databases
+ # ribasushi: mo: try it with a subselect nevertheless, I'd love to be proven wrong
+ # ribasushi: mo: does sqlite actually take this?
+ # ribasushi: an order in a correlated subquery is insane - how long does it take you on real data?
+
+ __PACKAGE__->might_have(
+ 'last_track',
+ 'DBICTest::Schema::Track',
+ sub {
+ my $args = shift;
+ return (
+ {
+ "$args->{foreign_alias}.trackid" => { '=' =>
+ $args->{self_resultsource}->schema->resultset('Track')->search(
+ { 'correlated_tracks.cd' => { -ident => "$args->{self_alias}.cdid" } },
+ {
+ order_by => { -desc => 'position' },
+ rows => 1,
+ alias => 'correlated_tracks',
+ columns => ['trackid']
+ },
+ )->as_query
+ }
+ }
+ );
+ },
+ );
+
1;
use strict;
- use warnings;
+ use warnings;
use Test::More;
use Test::Exception;
use lib qw(t/lib);
use DBICTest;
- plan tests => 9;
-
my $schema = DBICTest->init_schema();
lives_ok(sub {
prefetch => [ qw/ cds / ],
order_by => [ { -desc => 'me.name' }, 'cds.title' ],
select => [qw/ me.name cds.title / ],
- }
+ },
);
is ($rs->count, 2, 'Correct number of collapsed artists');
}, 'explicit prefetch on a keyless object works');
+lives_ok ( sub {
+
+ my $rs = $schema->resultset('CD')->search(
+ {},
+ {
+ order_by => [ { -desc => 'me.year' } ],
+ }
+ );
+ my $years = [qw/ 2001 2001 1999 1998 1997/];
+
+ is_deeply (
+ [ $rs->search->get_column('me.year')->all ],
+ $years,
+ 'Expected years (at least one duplicate)',
+ );
+
+ my @cds_and_tracks;
+ for my $cd ($rs->all) {
+ my $data->{year} = $cd->year;
+ for my $tr ($cd->tracks->all) {
+ push @{$data->{tracks}}, { $tr->get_columns };
+ }
+ push @cds_and_tracks, $data;
+ }
+
+ my $pref_rs = $rs->search ({}, { columns => ['year'], prefetch => 'tracks' });
+
+ my @pref_cds_and_tracks;
+ for my $cd ($pref_rs->all) {
+ my $data = { $cd->get_columns };
+ for my $tr ($cd->tracks->all) {
+ push @{$data->{tracks}}, { $tr->get_columns };
+ }
+ push @pref_cds_and_tracks, $data;
+ }
+
+ is_deeply (
+ \@pref_cds_and_tracks,
+ \@cds_and_tracks,
+ 'Correct collapsing on non-unique primary object'
+ );
+
+ is_deeply (
+ [ $pref_rs->search ({}, { result_class => 'DBIx::Class::ResultClass::HashRefInflator' })->all ],
+ \@cds_and_tracks,
+ 'Correct HRI collapsing on non-unique primary object'
+ );
+
+}, 'weird collapse lives');
+
+
lives_ok(sub {
# test implicit prefetch as well
is ($cd->artist->name, 'Random Boy Band', 'Artist object has correct name');
}, 'implicit keyless prefetch works');
+
+ # sane error
+ throws_ok(
+ sub {
+ $schema->resultset('Track')->search({}, { join => { cd => 'artist' }, '+columns' => 'artist.name' } )->next;
+ },
+ qr|\QCan't inflate manual prefetch into non-existent relationship 'artist' from 'Track', check the inflation specification (columns/as) ending in 'artist.name'|,
+ 'Sensible error message on mis-specified "as"',
+ );
+
+ done_testing;
use warnings;
use Test::More;
- use Test::Exception;
use lib qw(t/lib);
use DBICTest;
-use IO::File;
my $schema = DBICTest->init_schema();
my $sdebug = $schema->storage->debug;
-# once the following TODO is complete, remove the 2 warning tests immediately
-# after the TODO block
-# (the TODO block itself contains tests ensuring that the warns are removed)
-TODO: {
- local $TODO = 'Prefetch of multiple has_many rels at the same level (currently warn to protect the clueless git)';
+#( 1 -> M + M )
+my $cd_rs = $schema->resultset('CD')->search( { 'me.title' => 'Forkful of bees' } );
+my $pr_cd_rs = $cd_rs->search( {}, { prefetch => [qw/tracks tags/], } );
- #( 1 -> M + M )
- my $cd_rs = $schema->resultset('CD')->search ({ 'me.title' => 'Forkful of bees' });
- my $pr_cd_rs = $cd_rs->search ({}, {
- prefetch => [qw/tracks tags/],
- });
+my $tracks_rs = $cd_rs->first->tracks;
+my $tracks_count = $tracks_rs->count;
- my $tracks_rs = $cd_rs->first->tracks;
- my $tracks_count = $tracks_rs->count;
+my ( $pr_tracks_rs, $pr_tracks_count );
- my ($pr_tracks_rs, $pr_tracks_count);
+my $queries = 0;
+$schema->storage->debugcb( sub { $queries++ } );
+$schema->storage->debug(1);
- my $queries = 0;
- $schema->storage->debugcb(sub { $queries++ });
- $schema->storage->debug(1);
-
- my $o_mm_warn;
- {
- local $SIG{__WARN__} = sub { $o_mm_warn = shift };
- $pr_tracks_rs = $pr_cd_rs->first->tracks;
- };
- $pr_tracks_count = $pr_tracks_rs->count;
-
- ok(! $o_mm_warn, 'no warning on attempt to prefetch several same level has_many\'s (1 -> M + M)');
-
- is($queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query');
- $schema->storage->debugcb (undef);
- $schema->storage->debug ($sdebug);
-
- is($pr_tracks_count, $tracks_count, 'equal count of prefetched relations over several same level has_many\'s (1 -> M + M)');
- is ($pr_tracks_rs->all, $tracks_rs->all, 'equal amount of objects returned with and without prefetch over several same level has_many\'s (1 -> M + M)');
-
- #( M -> 1 -> M + M )
- my $note_rs = $schema->resultset('LinerNotes')->search ({ notes => 'Buy Whiskey!' });
- my $pr_note_rs = $note_rs->search ({}, {
- prefetch => {
- cd => [qw/tracks tags/]
- },
- });
-
- my $tags_rs = $note_rs->first->cd->tags;
- my $tags_count = $tags_rs->count;
-
- my ($pr_tags_rs, $pr_tags_count);
-
- $queries = 0;
- $schema->storage->debugcb(sub { $queries++ });
- $schema->storage->debug(1);
-
- my $m_o_mm_warn;
- {
- local $SIG{__WARN__} = sub { $m_o_mm_warn = shift };
- $pr_tags_rs = $pr_note_rs->first->cd->tags;
- };
- $pr_tags_count = $pr_tags_rs->count;
-
- ok(! $m_o_mm_warn, 'no warning on attempt to prefetch several same level has_many\'s (M -> 1 -> M + M)');
-
- is($queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query');
- $schema->storage->debugcb (undef);
- $schema->storage->debug ($sdebug);
-
- is($pr_tags_count, $tags_count, 'equal count of prefetched relations over several same level has_many\'s (M -> 1 -> M + M)');
- is($pr_tags_rs->all, $tags_rs->all, 'equal amount of objects with and without prefetch over several same level has_many\'s (M -> 1 -> M + M)');
-}
-
-# remove this closure once the TODO above is working
+my $o_mm_warn;
{
- my $warn_re = qr/will explode the number of row objects retrievable via/;
-
- my (@w, @dummy);
- local $SIG{__WARN__} = sub { $_[0] =~ $warn_re ? push @w, @_ : warn @_ };
-
- my $rs = $schema->resultset('CD')->search ({ 'me.title' => 'Forkful of bees' }, { prefetch => [qw/tracks tags/] });
- @w = ();
- @dummy = $rs->first;
- is (@w, 1, 'warning on attempt prefetching several same level has_manys (1 -> M + M)');
-
- my $rs2 = $schema->resultset('LinerNotes')->search ({ notes => 'Buy Whiskey!' }, { prefetch => { cd => [qw/tags tracks/] } });
- @w = ();
- @dummy = $rs2->first;
- is (@w, 1, 'warning on attempt prefetching several same level has_manys (M -> 1 -> M + M)');
-}
+ local $SIG{__WARN__} = sub { $o_mm_warn = shift };
+ $pr_tracks_rs = $pr_cd_rs->first->tracks;
+};
+$pr_tracks_count = $pr_tracks_rs->count;
+
+ok( !$o_mm_warn,
+'no warning on attempt to prefetch several same level has_many\'s (1 -> M + M)'
+);
+
+is( $queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query' );
+$schema->storage->debugcb(undef);
+$schema->storage->debug($sdebug);
+
+is( $pr_tracks_count, $tracks_count,
+'equal count of prefetched relations over several same level has_many\'s (1 -> M + M)'
+);
+is( $pr_tracks_rs->all, $tracks_rs->all,
+'equal amount of objects returned with and without prefetch over several same level has_many\'s (1 -> M + M)'
+);
+
+#( M -> 1 -> M + M )
+my $note_rs =
+ $schema->resultset('LinerNotes')->search( { notes => 'Buy Whiskey!' } );
+my $pr_note_rs =
+ $note_rs->search( {}, { prefetch => { cd => [qw/tracks tags/] }, } );
+
+my $tags_rs = $note_rs->first->cd->tags;
+my $tags_count = $tags_rs->count;
+
+my ( $pr_tags_rs, $pr_tags_count );
+
+$queries = 0;
+$schema->storage->debugcb( sub { $queries++ } );
+$schema->storage->debug(1);
+
+my $m_o_mm_warn;
+{
+ local $SIG{__WARN__} = sub { $m_o_mm_warn = shift };
+ $pr_tags_rs = $pr_note_rs->first->cd->tags;
+};
+$pr_tags_count = $pr_tags_rs->count;
+
+ok( !$m_o_mm_warn,
+'no warning on attempt to prefetch several same level has_many\'s (M -> 1 -> M + M)'
+);
+
+is( $queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query' );
+$schema->storage->debugcb(undef);
+$schema->storage->debug($sdebug);
+
+is( $pr_tags_count, $tags_count,
+'equal count of prefetched relations over several same level has_many\'s (M -> 1 -> M + M)'
+);
+is( $pr_tags_rs->all, $tags_rs->all,
+'equal amount of objects with and without prefetch over several same level has_many\'s (M -> 1 -> M + M)'
+);
done_testing;