X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI.pm;h=3d9a200f41c55348cb4c4a88b5ca33699089e283;hb=526dc858fec4cfc63c8bc2de69c972fc6cbcccec;hp=530d5aab56f157d74271f8d7bc6127e344fade08;hpb=b16aca2fa8f737246f8afe18093efaf486ff0048;p=dbsrgits%2FDBIx-Class-Historic.git diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index 530d5aa..3d9a200 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -13,14 +13,15 @@ use Scalar::Util(); use List::Util(); __PACKAGE__->mk_group_accessors('simple' => - qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts - _conn_pid _conn_tid transaction_depth _dbh_autocommit savepoints/ + qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts + _conn_pid _conn_tid transaction_depth _dbh_autocommit savepoints/ ); # the values for these accessors are picked out (and deleted) from # the attribute hashref passed to connect_info my @storage_options = qw/ - on_connect_do on_disconnect_do disable_sth_caching unsafe auto_savepoint + on_connect_call on_disconnect_call on_connect_do on_disconnect_do + disable_sth_caching unsafe auto_savepoint /; __PACKAGE__->mk_group_accessors('simple' => @storage_options); @@ -177,6 +178,91 @@ immediately before disconnecting from the database. Note, this only runs if you explicitly call L on the storage object. +=item on_connect_call + +A more generalized form of L that calls the specified +C methods in your storage driver. + + on_connect_do => 'select 1' + +is equivalent to: + + on_connect_call => [ [ do_sql => 'select 1' ] ] + +Its values may contain: + +=over + +=item a scalar + +Will call the C method. + +=item a code reference + +Will execute C<< $code->($storage) >> + +=item an array reference + +Each value can be a method name or code reference. + +=item an array of arrays + +For each array, the first item is taken to be the C method name +or code reference, and the rest are parameters to it. + +=back + +Some predefined storage methods you may use: + +=over + +=item do_sql + +Executes a SQL string or a code reference that returns a SQL string. This is +what L and L use. + +It can take: + +=over + +=item a scalar + +Will execute the scalar as SQL. + +=item an arrayref + +Taken to be arguments to L, the SQL string optionally followed by the +attributes hashref and bind values. + +=item a code reference + +Will execute C<< $code->($storage) >> and execute the return array refs as +above. + +=back + +=item datetime_setup + +Execute any statements necessary to initialize the database session to return +and accept datetime/timestamp values used with +L. + +Only necessary for some databases, see your specific storage driver for +implementation details. + +=back + +=item on_disconnect_call + +Takes arguments in the same form as L and executes them +immediately before disconnecting from the database. + +Calls the C methods as opposed to the +C methods called by L. + +Note, this only runs if you explicitly call L on the +storage object. + =item disable_sth_caching If set to a true value, this option will disable the caching of @@ -359,6 +445,34 @@ sub connect_info { This method is deprecated in favour of setting via L. +=cut + +=head2 on_disconnect_do + +This method is deprecated in favour of setting via L. + +=cut + +sub _parse_connect_do { + my ($self, $type) = @_; + + my $val = $self->$type; + return () if not defined $val; + + my @res; + + if (not ref($val)) { + push @res, [ 'do_sql', $val ]; + } elsif (ref($val) eq 'CODE') { + push @res, $val; + } elsif (ref($val) eq 'ARRAY') { + push @res, map { [ 'do_sql', $_ ] } @$val; + } else { + $self->throw_exception("Invalid type for $type: ".ref($val)); + } + + return \@res; +} =head2 dbh_do @@ -506,8 +620,12 @@ sub disconnect { my ($self) = @_; if( $self->connected ) { - my $connection_do = $self->on_disconnect_do; - $self->_do_connection_actions($connection_do) if ref($connection_do); + my @actions; + + push @actions, ( $self->on_disconnect_call || () ); + push @actions, $self->_parse_connect_do ('on_disconnect_do'); + + $self->_do_connection_actions(disconnect_call_ => $_) for @actions; $self->_dbh->rollback unless $self->_dbh_autocommit; $self->_dbh->disconnect; @@ -594,7 +712,7 @@ sub dbh { sub _sql_maker_args { my ($self) = @_; - + return ( bindtype=>'columns', array_datatypes => 1, limit_dialect => $self->dbh, %{$self->_sql_maker_opts} ); } @@ -624,8 +742,12 @@ sub _populate_dbh { # there is no transaction in progress by definition $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1; - my $connection_do = $self->on_connect_do; - $self->_do_connection_actions($connection_do) if $connection_do; + my @actions; + + push @actions, ( $self->on_connect_call || () ); + push @actions, $self->_parse_connect_do ('on_connect_do'); + + $self->_do_connection_actions(connect_call_ => $_) for @actions; } sub _determine_driver { @@ -650,25 +772,41 @@ sub _determine_driver { } sub _do_connection_actions { - my $self = shift; - my $connection_do = shift; - - if (!ref $connection_do) { - $self->_do_query($connection_do); - } - elsif (ref $connection_do eq 'ARRAY') { - $self->_do_query($_) foreach @$connection_do; - } - elsif (ref $connection_do eq 'CODE') { - $connection_do->($self); - } - else { - $self->throw_exception (sprintf ("Don't know how to process conection actions of type '%s'", ref $connection_do) ); + my $self = shift; + my $method_prefix = shift; + my $call = shift; + + if (not ref($call)) { + my $method = $method_prefix . $call; + $self->$method(@_); + } elsif (ref($call) eq 'CODE') { + $self->$call(@_); + } elsif (ref($call) eq 'ARRAY') { + if (ref($call->[0]) ne 'ARRAY') { + $self->_do_connection_actions($method_prefix, $_) for @$call; + } else { + $self->_do_connection_actions($method_prefix, @$_) for @$call; + } + } else { + $self->throw_exception (sprintf ("Don't know how to process conection actions of type '%s'", ref($call)) ); } return $self; } +sub connect_call_do_sql { + my $self = shift; + $self->_do_query(@_); +} + +sub disconnect_call_do_sql { + my $self = shift; + $self->_do_query(@_); +} + +# override in db-specific backend when necessary +sub connect_call_datetime_setup { 1 } + sub _do_query { my ($self, $action) = @_; @@ -1245,24 +1383,37 @@ sub _select_args { } } - my @limit; - if ($attrs->{software_limit} || - $sql_maker->_default_limit_syntax eq "GenericSubQ") { - $attrs->{software_limit} = 1; - } else { + # adjust limits + if ( + $attrs->{software_limit} + || + $sql_maker->_default_limit_syntax eq "GenericSubQ" + ) { + $attrs->{software_limit} = 1; + } + else { $self->throw_exception("rows attribute must be positive if present") if (defined($attrs->{rows}) && !($attrs->{rows} > 0)); # MySQL actually recommends this approach. I cringe. $attrs->{rows} = 2**48 if not defined $attrs->{rows} and defined $attrs->{offset}; + } - if ($attrs->{rows} && keys %{$attrs->{collapse}}) { - ($ident, $select, $where, $attrs) - = $self->_adjust_select_args_for_limited_prefetch ($ident, $select, $where, $attrs); - } - else { - push @limit, $attrs->{rows}, $attrs->{offset}; - } + my @limit; + + # see if we need to tear the prefetch apart (either limited has_many or grouped prefetch) + # otherwise delegate the limiting to the storage, unless software limit was requested + if ( + ( $attrs->{rows} && keys %{$attrs->{collapse}} ) + || + ( $attrs->{group_by} && @{$attrs->{group_by}} && + $attrs->{prefetch_select} && @{$attrs->{prefetch_select}} ) + ) { + ($ident, $select, $where, $attrs) + = $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs); + } + elsif (! $attrs->{software_limit} ) { + push @limit, $attrs->{rows}, $attrs->{offset}; } ### @@ -1280,63 +1431,95 @@ sub _select_args { (qw/order_by group_by having _virtual_order_by/ ) }; - $sql_maker->{for} = delete $attrs->{for}; return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $order, @limit); } -sub _adjust_select_args_for_limited_prefetch { +sub _adjust_select_args_for_complex_prefetch { my ($self, $from, $select, $where, $attrs) = @_; - if ($attrs->{group_by} and @{$attrs->{group_by}}) { - $self->throw_exception ('Prefetch with limit (rows/offset) is not supported on resultsets with a group_by attribute'); - } + # copies for mangling + $from = [ @$from ]; + $select = [ @$select ]; + $attrs = { %$attrs }; - $self->throw_exception ('Prefetch with limit (rows/offset) is not supported on resultsets with a custom from attribute') + $self->throw_exception ('Complex prefetches are not supported on resultsets with a custom from attribute') if (ref $from ne 'ARRAY'); # separate attributes my $sub_attrs = { %$attrs }; - delete $attrs->{$_} for qw/where bind rows offset/; - delete $sub_attrs->{$_} for qw/for collapse select order_by/; + delete $attrs->{$_} for qw/where bind rows offset group_by having/; + delete $sub_attrs->{$_} for qw/for collapse prefetch_select _collapse_order_by select as/; my $alias = $attrs->{alias}; - # create subquery select list - my $sub_select = [ grep { $_ =~ /^$alias\./ } @{$attrs->{select}} ]; + # create subquery select list - loop only over primary columns + my $sub_select = []; + for my $i (0 .. @{$attrs->{select}} - @{$attrs->{prefetch_select}} - 1) { + my $sel = $attrs->{select}[$i]; + + # alias any functions to the dbic-side 'as' label + # adjust the outer select accordingly + if (ref $sel eq 'HASH' && !$sel->{-select}) { + $sel = { -select => $sel, -as => $attrs->{as}[$i] }; + $select->[$i] = join ('.', $attrs->{alias}, $attrs->{as}[$i]); + } + + push @$sub_select, $sel; + } # bring over all non-collapse-induced order_by into the inner query (if any) # the outer one will have to keep them all + delete $sub_attrs->{order_by}; if (my $ord_cnt = @{$attrs->{order_by}} - @{$attrs->{_collapse_order_by}} ) { $sub_attrs->{order_by} = [ - @{$attrs->{order_by}}[ 0 .. ($#{$attrs->{order_by}} - $ord_cnt - 1) ] + @{$attrs->{order_by}}[ 0 .. $ord_cnt - 1] ]; } + # mangle {from} + my $select_root = shift @$from; + my @outer_from = @$from; - # mangle the from, separating it into an outer and inner part - my $self_ident = shift @$from; + my %inner_joins; + my %join_info = map { $_->[0]{-alias} => $_->[0] } (@$from); - # this map indicates which aliases need to be joined if we want - # to join a specific alias - # (e.g. join => { cds => 'tracks' } - tracks will need cds too ) - my %join_map = map { $_->[0]{-alias} => $_->[0]{-join_path} } (@$from); + # in complex search_related chains $alias may *not* be 'me' + # so always include it in the inner join, and also shift away + # from the outer stack, so that the two datasets actually do + # meet + if ($select_root->{-alias} ne $alias) { + $inner_joins{$alias} = 1; - my (%inner_joins, %outer_joins); + while (@outer_from && $outer_from[0][0]{-alias} ne $alias) { + shift @outer_from; + } + if (! @outer_from) { + $self->throw_exception ("Unable to find '$alias' in the {from} stack, something is wrong"); + } - # decide which parts of the join will remain + shift @outer_from; # the new subquery will represent this alias, so get rid of it + } + + + # decide which parts of the join will remain on the inside + # + # this is not a very viable optimisation, but it was written + # before I realised this, so might as well remain. We can throw + # away _any_ branches of the join tree that are: + # 1) not mentioned in the condition/order + # 2) left-join leaves (or left-join leaf chains) + # Most of the join conditions will not satisfy this, but for real + # complex queries some might, and we might make some RDBMS happy. # - # resolve the prefetch-needed joins here as well, as the $attr->{prefetch} - # is 1) resolved away 2) unreliable as it may be a result of search_related - # and whatnot # # since we do not have introspectable SQLA, we fall back to ugly # scanning of raw SQL for WHERE, and for pieces of ORDER BY # in order to determine what goes into %inner_joins # It may not be very efficient, but it's a reasonable stop-gap { - # produce stuff unquoted, so it's easier to scan + # produce stuff unquoted, so it can be scanned my $sql_maker = $self->sql_maker; local $sql_maker->{quote_char}; @@ -1348,47 +1531,50 @@ sub _adjust_select_args_for_limited_prefetch { my $where_sql = $sql_maker->where ($where); # sort needed joins - for my $alias (keys %join_map) { + for my $alias (keys %join_info) { # any table alias found on a column name in where or order_by # gets included in %inner_joins # Also any parent joins that are needed to reach this particular alias - # (e.g. join => { cds => 'tracks' } - tracks will bring cds too ) for my $piece ($where_sql, @order_by ) { if ($piece =~ /\b$alias\./) { $inner_joins{$alias} = 1; - $inner_joins{$_} = 1 for @{$join_map{$alias}}; } } + } + } - # any alias found in the select becomes %outer_joins - # the join parents are included in the same manner - for my $sel (@$select) { - if ($sel =~ /^$alias\./) { - $outer_joins{$alias} = 1; - $outer_joins{$_} = 1 for @{$join_map{$alias}}; - } - } + # scan for non-leaf/non-left joins and mark as needed + # also mark all ancestor joins that are needed to reach this particular alias + # (e.g. join => { cds => 'tracks' } - tracks will bring cds too ) + # + # traverse by the size of the -join_path i.e. reverse depth first + for my $alias (sort { @{$join_info{$b}{-join_path}} <=> @{$join_info{$a}{-join_path}} } (keys %join_info) ) { + + my $j = $join_info{$alias}; + $inner_joins{$alias} = 1 if (! $j->{-join_type} || ($j->{-join_type} !~ /^left$/i) ); + + if ($inner_joins{$alias}) { + $inner_joins{$_} = 1 for (@{$j->{-join_path}}); } } # construct the inner $from for the subquery - my $inner_from = [ $self_ident ]; - if (keys %inner_joins) { - for my $j (@$from) { - push @$inner_from, $j if $inner_joins{$j->[0]{-alias}}; - } + my $inner_from = [ $select_root ]; + for my $j (@$from) { + push @$inner_from, $j if $inner_joins{$j->[0]{-alias}}; + } - # if a multi-type join was needed in the subquery ("multi" is indicated by - # presence in {collapse}) - add a group_by to simulate the collapse in the subq - for my $alias (keys %inner_joins) { + # if a multi-type join was needed in the subquery ("multi" is indicated by + # presence in {collapse}) - add a group_by to simulate the collapse in the subq - # the dot comes from some weirdness in collapse - # remove after the rewrite - if ($attrs->{collapse}{".$alias"}) { - $sub_attrs->{group_by} = $sub_select; - last; - } + for my $alias (keys %inner_joins) { + + # the dot comes from some weirdness in collapse + # remove after the rewrite + if ($attrs->{collapse}{".$alias"}) { + $sub_attrs->{group_by} ||= $sub_select; + last; } } @@ -1400,17 +1586,14 @@ sub _adjust_select_args_for_limited_prefetch { $sub_attrs ); - # generate the outer $from - my $outer_from = [ { $alias => $subq } ]; - if (keys %outer_joins) { - for my $j (@$from) { - push @$outer_from, $j if $outer_joins{$j->[0]{-alias}}; - } - } + # put it in the new {from} + unshift @outer_from, { $alias => $subq }; - # now _select_args() will continue with the modified set of arguments - # where ended up in the subquery, thus {} - return ($outer_from, $select, {}, $attrs); + # This is totally horrific - the $where ends up in both the inner and outer query + # Unfortunately not much can be done until SQLA2 introspection arrives + # + # OTOH it can be seen as a plus: (notes that this query would make a DBA cry ;) + return (\@outer_from, $select, $where, $attrs); } sub _resolve_ident_sources { @@ -1443,75 +1626,37 @@ sub _resolve_ident_sources { return $alias2source; } -sub count { - my ($self, $source, $attrs) = @_; - - my $tmp_attrs = { %$attrs }; - - # take off any limits, record_filter is cdbi, and no point of ordering a count - delete $tmp_attrs->{$_} for (qw/select as rows offset order_by record_filter/); - - # overwrite the selector - $tmp_attrs->{select} = { count => '*' }; - - my $tmp_rs = $source->resultset_class->new($source, $tmp_attrs); - my ($count) = $tmp_rs->cursor->next; - - # if the offset/rows attributes are still present, we did not use - # a subquery, so we need to make the calculations in software - $count -= $attrs->{offset} if $attrs->{offset}; - $count = $attrs->{rows} if $attrs->{rows} and $attrs->{rows} < $count; - $count = 0 if ($count < 0); - - return $count; -} - -sub count_grouped { - my ($self, $source, $attrs) = @_; - - # copy for the subquery, we need to do some adjustments to it too - my $sub_attrs = { %$attrs }; - - # these can not go in the subquery, and there is no point of ordering it - delete $sub_attrs->{$_} for qw/collapse select as order_by/; - - # if we prefetch, we group_by primary keys only as this is what we would get out of the rs via ->next/->all - # simply deleting group_by suffices, as the code below will re-fill it - # Note: we check $attrs, as $sub_attrs has collapse deleted - if (ref $attrs->{collapse} and keys %{$attrs->{collapse}} ) { - delete $sub_attrs->{group_by}; - } - - $sub_attrs->{group_by} ||= [ map { "$attrs->{alias}.$_" } ($source->primary_columns) ]; - $sub_attrs->{select} = $self->_grouped_count_select ($source, $sub_attrs); - - $attrs->{from} = [{ - count_subq => $source->resultset_class->new ($source, $sub_attrs )->as_query - }]; - - # the subquery replaces this - delete $attrs->{$_} for qw/where bind collapse group_by having having_bind rows offset/; - - return $self->count ($source, $attrs); +# Returns a counting SELECT for a simple count +# query. Abstracted so that a storage could override +# this to { count => 'firstcol' } or whatever makes +# sense as a performance optimization +sub _count_select { + #my ($self, $source, $rs_attrs) = @_; + return { count => '*' }; } +# Returns a SELECT which will end up in the subselect +# There may or may not be a group_by, as the subquery +# might have been called to accomodate a limit # -# Returns a SELECT to go with a supplied GROUP BY -# (caled by count_grouped so a group_by is present) -# Most databases expect them to match, but some -# choke in various ways. +# Most databases would be happy with whatever ends up +# here, but some choke in various ways. # -sub _grouped_count_select { - my ($self, $source, $rs_args) = @_; - return $rs_args->{group_by}; +sub _subq_count_select { + my ($self, $source, $rs_attrs) = @_; + return $rs_attrs->{group_by} if $rs_attrs->{group_by}; + + my @pcols = map { join '.', $rs_attrs->{alias}, $_ } ($source->primary_columns); + return @pcols ? \@pcols : [ 1 ]; } + sub source_bind_attributes { my ($self, $source) = @_; - + my $bind_attributes; foreach my $column ($source->columns) { - + my $data_type = $source->column_info($column)->{data_type} || ''; $bind_attributes->{$column} = $self->bind_attribute_by_data_type($data_type) if $data_type;