X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI.pm;h=436f54034b5b7d8766c4544f1bd7b2426038ac9c;hb=0bd8e0585b592d2583f28b6922b47afa78559cf4;hp=717177c3c36fc7c52c3eda6f5b0876f74ca25a49;hpb=e9554c38de5cda0ac04471d3b6e84cbb41976091;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index 717177c..436f540 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -1,10 +1,12 @@ package DBIx::Class::Storage::DBI; # -*- mode: cperl; cperl-indent-level: 2 -*- +use strict; +use warnings; + use base 'DBIx::Class::Storage'; +use mro 'c3'; -use strict; -use warnings; use Carp::Clan qw/^DBIx::Class/; use DBI; use DBIx::Class::Storage::DBI::Cursor; @@ -13,14 +15,15 @@ use Scalar::Util(); use List::Util(); __PACKAGE__->mk_group_accessors('simple' => - qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts - _conn_pid _conn_tid transaction_depth _dbh_autocommit savepoints/ + qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts _conn_pid + _conn_tid transaction_depth _dbh_autocommit _driver_determined savepoints/ ); # the values for these accessors are picked out (and deleted) from # the attribute hashref passed to connect_info my @storage_options = qw/ - on_connect_do on_disconnect_do disable_sth_caching unsafe auto_savepoint + on_connect_call on_disconnect_call on_connect_do on_disconnect_do + disable_sth_caching unsafe auto_savepoint /; __PACKAGE__->mk_group_accessors('simple' => @storage_options); @@ -89,8 +92,8 @@ recognized by DBIx::Class: =item * -A single code reference which returns a connected -L optionally followed by +A single code reference which returns a connected +L optionally followed by L recognized by DBIx::Class: @@ -109,7 +112,13 @@ mixed together: %extra_attributes, }]; -This is particularly useful for L based applications, allowing the + $connect_info_args = [{ + dbh_maker => sub { DBI->connect (...) }, + %dbi_attributes, + %extra_attributes, + }]; + +This is particularly useful for L based applications, allowing the following config (L style): @@ -122,13 +131,17 @@ following config (L style): +The C/C/C combination can be substituted by the +C key whose value is a coderef that returns a connected +L + =back Please note that the L docs recommend that you always explicitly set C to either I<0> or I<1>. L further recommends that it be set to I<1>, and that you perform transactions via our L method. L will set it -to I<1> if you do not do explicitly set it to zero. This is the default +to I<1> if you do not do explicitly set it to zero. This is the default for most DBDs. See L for details. =head3 DBIx::Class specific connection attributes @@ -177,12 +190,97 @@ immediately before disconnecting from the database. Note, this only runs if you explicitly call L on the storage object. +=item on_connect_call + +A more generalized form of L that calls the specified +C methods in your storage driver. + + on_connect_do => 'select 1' + +is equivalent to: + + on_connect_call => [ [ do_sql => 'select 1' ] ] + +Its values may contain: + +=over + +=item a scalar + +Will call the C method. + +=item a code reference + +Will execute C<< $code->($storage) >> + +=item an array reference + +Each value can be a method name or code reference. + +=item an array of arrays + +For each array, the first item is taken to be the C method name +or code reference, and the rest are parameters to it. + +=back + +Some predefined storage methods you may use: + +=over + +=item do_sql + +Executes a SQL string or a code reference that returns a SQL string. This is +what L and L use. + +It can take: + +=over + +=item a scalar + +Will execute the scalar as SQL. + +=item an arrayref + +Taken to be arguments to L, the SQL string optionally followed by the +attributes hashref and bind values. + +=item a code reference + +Will execute C<< $code->($storage) >> and execute the return array refs as +above. + +=back + +=item datetime_setup + +Execute any statements necessary to initialize the database session to return +and accept datetime/timestamp values used with +L. + +Only necessary for some databases, see your specific storage driver for +implementation details. + +=back + +=item on_disconnect_call + +Takes arguments in the same form as L and executes them +immediately before disconnecting from the database. + +Calls the C methods as opposed to the +C methods called by L. + +Note, this only runs if you explicitly call L on the +storage object. + =item disable_sth_caching If set to a true value, this option will disable the caching of statement handles via L. -=item limit_dialect +=item limit_dialect Sets the limit dialect. This is useful for JDBC-bridge among others where the remote SQL-dialect cannot be determined by the name of the @@ -190,7 +288,7 @@ driver alone. See also L. =item quote_char -Specifies what characters to use to quote table and column names. If +Specifies what characters to use to quote table and column names. If you use this you will want to specify L as well. C expects either a single character, in which case is it @@ -202,8 +300,8 @@ SQL Server you should use C<< quote_char => [qw/[ ]/] >>. =item name_sep -This only needs to be used in conjunction with C, and is used to -specify the charecter that seperates elements (schemas, tables, columns) from +This only needs to be used in conjunction with C, and is used to +specify the charecter that seperates elements (schemas, tables, columns) from each other. In most cases this is simply a C<.>. The consequences of not supplying this value is that L @@ -249,6 +347,12 @@ L # Connect via subref ->connect_info([ sub { DBI->connect(...) } ]); + # Connect via subref in hashref + ->connect_info([{ + dbh_maker => sub { DBI->connect(...) }, + on_connect_do => 'alter session ...', + }]); + # A bit more complicated ->connect_info( [ @@ -319,8 +423,21 @@ sub connect_info { elsif (ref $args[0] eq 'HASH') { # single hashref (i.e. Catalyst config) %attrs = %{$args[0]}; @args = (); - for (qw/password user dsn/) { - unshift @args, delete $attrs{$_}; + if (my $code = delete $attrs{dbh_maker}) { + @args = $code; + + my @ignored = grep { delete $attrs{$_} } (qw/dsn user password/); + if (@ignored) { + carp sprintf ( + 'Attribute(s) %s in connect_info were ignored, as they can not be applied ' + . "to the result of 'dbh_maker'", + + join (', ', map { "'$_'" } (@ignored) ), + ); + } + } + else { + @args = delete @attrs{qw/dsn user password/}; } } else { # otherwise assume dsn/user/password + \%attrs + \%extra_attrs @@ -349,16 +466,60 @@ sub connect_info { } } - %attrs = () if (ref $args[0] eq 'CODE'); # _connect() never looks past $args[0] in this case + if (ref $args[0] eq 'CODE') { + # _connect() never looks past $args[0] in this case + %attrs = () + } else { + %attrs = ( + %{ $self->_default_dbi_connect_attributes || {} }, + %attrs, + ); + } $self->_dbi_connect_info([@args, keys %attrs ? \%attrs : ()]); $self->_connect_info; } +sub _default_dbi_connect_attributes { + return { + AutoCommit => 1, + RaiseError => 1, + PrintError => 0, + }; +} + =head2 on_connect_do This method is deprecated in favour of setting via L. +=cut + +=head2 on_disconnect_do + +This method is deprecated in favour of setting via L. + +=cut + +sub _parse_connect_do { + my ($self, $type) = @_; + + my $val = $self->$type; + return () if not defined $val; + + my @res; + + if (not ref($val)) { + push @res, [ 'do_sql', $val ]; + } elsif (ref($val) eq 'CODE') { + push @res, $val; + } elsif (ref($val) eq 'ARRAY') { + push @res, map { [ 'do_sql', $_ ] } @$val; + } else { + $self->throw_exception("Invalid type for $type: ".ref($val)); + } + + return \@res; +} =head2 dbh_do @@ -423,6 +584,7 @@ sub dbh_do { } }; + # ->connected might unset $@ - copy my $exception = $@; if(!$exception) { return $want_array ? @result : $result[0] } @@ -430,6 +592,8 @@ sub dbh_do { # We were not connected - reconnect and retry, but let any # exception fall right through this time + carp "Retrying $code after catching disconnected exception: $exception" + if $ENV{DBIC_DBIRETRY_DEBUG}; $self->_populate_dbh; $self->$code($self->_dbh, @_); } @@ -470,10 +634,11 @@ sub txn_do { $self->txn_commit; }; + # ->connected might unset $@ - copy my $exception = $@; if(!$exception) { return $want_array ? @result : $result[0] } - if($tried++ > 0 || $self->connected) { + if($tried++ || $self->connected) { eval { $self->txn_rollback }; my $rollback_exception = $@; if($rollback_exception) { @@ -491,6 +656,8 @@ sub txn_do { # We were not connected, and was first try - reconnect and retry # via the while loop + carp "Retrying $coderef after catching disconnected exception: $exception" + if $ENV{DBIC_DBIRETRY_DEBUG}; $self->_populate_dbh; } } @@ -505,9 +672,13 @@ database is not in C mode. sub disconnect { my ($self) = @_; - if( $self->connected ) { - my $connection_do = $self->on_disconnect_do; - $self->_do_connection_actions($connection_do) if ref($connection_do); + if( $self->_dbh ) { + my @actions; + + push @actions, ( $self->on_disconnect_call || () ); + push @actions, $self->_parse_connect_do ('on_disconnect_do'); + + $self->_do_connection_actions(disconnect_call_ => $_) for @actions; $self->_dbh->rollback unless $self->_dbh_autocommit; $self->_dbh->disconnect; @@ -538,23 +709,57 @@ sub with_deferred_fk_checks { $sub->(); } +=head2 connected + +=over + +=item Arguments: none + +=item Return Value: 1|0 + +=back + +Verifies that the the current database handle is active and ready to execute +an SQL statement (i.e. the connection did not get stale, server is still +answering, etc.) This method is used internally by L. + +=cut + sub connected { - my ($self) = @_; + my $self = shift; + return 0 unless $self->_seems_connected; - if(my $dbh = $self->_dbh) { - if(defined $self->_conn_tid && $self->_conn_tid != threads->tid) { - $self->_dbh(undef); - $self->{_dbh_gen}++; - return; - } - else { - $self->_verify_pid; - return 0 if !$self->_dbh; - } - return ($dbh->FETCH('Active') && $dbh->ping); + #be on the safe side + local $self->_dbh->{RaiseError} = 1; + + return $self->_ping; +} + +sub _seems_connected { + my $self = shift; + + my $dbh = $self->_dbh + or return 0; + + if(defined $self->_conn_tid && $self->_conn_tid != threads->tid) { + $self->_dbh(undef); + $self->{_dbh_gen}++; + return 0; } + else { + $self->_verify_pid; + return 0 if !$self->_dbh; + } + + return $dbh->FETCH('Active'); +} + +sub _ping { + my $self = shift; + + my $dbh = $self->_dbh or return 0; - return 0; + return $dbh->ping; } # handle pid changes correctly @@ -581,21 +786,41 @@ sub ensure_connected { =head2 dbh -Returns the dbh - a data base handle of class L. +Returns a C<$dbh> - a data base handle of class L. The returned handle +is guaranteed to be healthy by implicitly calling L, and if +necessary performing a reconnection before returning. Keep in mind that this +is very B on some database engines. Consider using L +instead. =cut sub dbh { my ($self) = @_; - $self->ensure_connected; + if (not $self->_dbh) { + $self->_populate_dbh; + } else { + $self->ensure_connected; + } + return $self->_dbh; +} + +# this is the internal "get dbh or connect (don't check)" method +sub _get_dbh { + my $self = shift; + $self->_populate_dbh unless $self->_dbh; return $self->_dbh; } sub _sql_maker_args { my ($self) = @_; - - return ( bindtype=>'columns', array_datatypes => 1, limit_dialect => $self->dbh, %{$self->_sql_maker_opts} ); + + return ( + bindtype=>'columns', + array_datatypes => 1, + limit_dialect => $self->_get_dbh, + %{$self->_sql_maker_opts} + ); } sub sql_maker { @@ -612,7 +837,9 @@ sub _rebless {} sub _populate_dbh { my ($self) = @_; + my @info = @{$self->_dbi_connect_info || []}; + $self->_dbh(undef); # in case ->connected failed we might get sent here $self->_dbh($self->_connect(@info)); $self->_conn_pid($$); @@ -624,51 +851,96 @@ sub _populate_dbh { # there is no transaction in progress by definition $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1; - my $connection_do = $self->on_connect_do; - $self->_do_connection_actions($connection_do) if $connection_do; + $self->_run_connection_actions unless $self->{_in_determine_driver}; +} + +sub _run_connection_actions { + my $self = shift; + my @actions; + + push @actions, ( $self->on_connect_call || () ); + push @actions, $self->_parse_connect_do ('on_connect_do'); + + $self->_do_connection_actions(connect_call_ => $_) for @actions; } sub _determine_driver { my ($self) = @_; - if (ref $self eq 'DBIx::Class::Storage::DBI') { - my $driver; + if ((not $self->_driver_determined) && (not $self->{_in_determine_driver})) { + my $started_unconnected = 0; + local $self->{_in_determine_driver} = 1; + + if (ref($self) eq __PACKAGE__) { + my $driver; + if ($self->_dbh) { # we are connected + $driver = $self->_dbh->{Driver}{Name}; + } else { + # if connect_info is a CODEREF, we have no choice but to connect + if (ref $self->_dbi_connect_info->[0] && + Scalar::Util::reftype($self->_dbi_connect_info->[0]) eq 'CODE') { + $self->_populate_dbh; + $driver = $self->_dbh->{Driver}{Name}; + } + else { + # try to use dsn to not require being connected, the driver may still + # force a connection in _rebless to determine version + ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i; + $started_unconnected = 1; + } + } - if ($self->_dbh) { # we are connected - $driver = $self->_dbh->{Driver}{Name}; - } else { - # try to use dsn to not require being connected, the driver may still - # force a connection in _rebless to determine version - ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i; + my $storage_class = "DBIx::Class::Storage::DBI::${driver}"; + if ($self->load_optional_class($storage_class)) { + mro::set_mro($storage_class, 'c3'); + bless $self, $storage_class; + $self->_rebless(); + } } - if ($self->load_optional_class("DBIx::Class::Storage::DBI::${driver}")) { - bless $self, "DBIx::Class::Storage::DBI::${driver}"; - $self->_rebless(); - } + $self->_driver_determined(1); + + $self->_run_connection_actions + if $started_unconnected && defined $self->_dbh; } } sub _do_connection_actions { - my $self = shift; - my $connection_do = shift; - - if (!ref $connection_do) { - $self->_do_query($connection_do); - } - elsif (ref $connection_do eq 'ARRAY') { - $self->_do_query($_) foreach @$connection_do; - } - elsif (ref $connection_do eq 'CODE') { - $connection_do->($self); - } - else { - $self->throw_exception (sprintf ("Don't know how to process conection actions of type '%s'", ref $connection_do) ); + my $self = shift; + my $method_prefix = shift; + my $call = shift; + + if (not ref($call)) { + my $method = $method_prefix . $call; + $self->$method(@_); + } elsif (ref($call) eq 'CODE') { + $self->$call(@_); + } elsif (ref($call) eq 'ARRAY') { + if (ref($call->[0]) ne 'ARRAY') { + $self->_do_connection_actions($method_prefix, $_) for @$call; + } else { + $self->_do_connection_actions($method_prefix, @$_) for @$call; + } + } else { + $self->throw_exception (sprintf ("Don't know how to process conection actions of type '%s'", ref($call)) ); } return $self; } +sub connect_call_do_sql { + my $self = shift; + $self->_do_query(@_); +} + +sub disconnect_call_do_sql { + my $self = shift; + $self->_do_query(@_); +} + +# override in db-specific backend when necessary +sub connect_call_datetime_setup { 1 } + sub _do_query { my ($self, $action) = @_; @@ -753,11 +1025,11 @@ sub svp_begin { $self->throw_exception ("Your Storage implementation doesn't support savepoints") unless $self->can('_svp_begin'); - + push @{ $self->{savepoints} }, $name; $self->debugobj->svp_begin($name) if $self->debug; - + return $self->_svp_begin($name); } @@ -817,7 +1089,7 @@ sub svp_rollback { } $self->debugobj->svp_rollback($name) if $self->debug; - + return $self->_svp_rollback($name); } @@ -829,14 +1101,17 @@ sub _svp_generate_name { sub txn_begin { my $self = shift; - $self->ensure_connected(); if($self->{transaction_depth} == 0) { $self->debugobj->txn_begin() if $self->debug; - # this isn't ->_dbh-> because - # we should reconnect on begin_work - # for AutoCommit users - $self->dbh->begin_work; + + # being here implies we have AutoCommit => 1 + # if the user is utilizing txn_do - good for + # him, otherwise we need to ensure that the + # $dbh is healthy on BEGIN + my $dbh_method = $self->{_in_dbh_do} ? '_dbh' : 'dbh'; + $self->$dbh_method->begin_work; + } elsif ($self->auto_savepoint) { $self->svp_begin; } @@ -955,7 +1230,7 @@ sub _dbh_execute { my $sth = $self->sth($sql,$op); - my $placeholder_index = 1; + my $placeholder_index = 1; foreach my $bound (@$bind) { my $attributes = {}; @@ -986,24 +1261,33 @@ sub _dbh_execute { sub _execute { my $self = shift; - $self->dbh_do('_dbh_execute', @_) + $self->dbh_do('_dbh_execute', @_); # retry over disconnects } sub insert { my ($self, $source, $to_insert) = @_; +# redispatch to insert method of storage we reblessed into, if necessary + if (not $self->_driver_determined) { + $self->_determine_driver; + goto $self->can('insert'); + } + my $ident = $source->from; my $bind_attributes = $self->source_bind_attributes($source); my $updated_cols = {}; - $self->ensure_connected; foreach my $col ( $source->columns ) { if ( !defined $to_insert->{$col} ) { my $col_info = $source->column_info($col); if ( $col_info->{auto_nextval} ) { - $updated_cols->{$col} = $to_insert->{$col} = $self->_sequence_fetch( 'nextval', $col_info->{sequence} || $self->_dbh_get_autoinc_seq($self->dbh, $source) ); + $updated_cols->{$col} = $to_insert->{$col} = $self->_sequence_fetch( + 'nextval', + $col_info->{sequence} || + $self->_dbh_get_autoinc_seq($self->_get_dbh, $source) + ); } } } @@ -1014,7 +1298,7 @@ sub insert { } ## Still not quite perfect, and EXPERIMENTAL -## Currently it is assumed that all values passed will be "normal", i.e. not +## Currently it is assumed that all values passed will be "normal", i.e. not ## scalar refs, or at least, all the same type as the first set, the statement is ## only prepped once. sub insert_bulk { @@ -1023,7 +1307,9 @@ sub insert_bulk { my $table = $source->from; @colvalues{@$cols} = (0..$#$cols); my ($sql, @bind) = $self->sql_maker->insert($table, \%colvalues); - + + $self->_determine_driver; + $self->_query_start( $sql, @bind ); my $sth = $self->sth($sql); @@ -1036,7 +1322,7 @@ sub insert_bulk { my $bind_attributes = $self->source_bind_attributes($source); ## Bind the values and execute - my $placeholder_index = 1; + my $placeholder_index = 1; foreach my $bound (@bind) { @@ -1083,8 +1369,9 @@ sub insert_bulk { sub update { my $self = shift @_; my $source = shift @_; + $self->_determine_driver; my $bind_attributes = $self->source_bind_attributes($source); - + return $self->_execute('update' => [], $source, $bind_attributes, @_); } @@ -1092,9 +1379,9 @@ sub update { sub delete { my $self = shift @_; my $source = shift @_; - + $self->_determine_driver; my $bind_attrs = $self->source_bind_attributes($source); - + return $self->_execute('delete' => [], $source, $bind_attrs, @_); } @@ -1191,45 +1478,59 @@ sub _per_row_update_delete { sub _select { my $self = shift; + + # localization is neccessary as + # 1) there is no infrastructure to pass this around before SQLA2 + # 2) _select_args sets it and _prep_for_execute consumes it my $sql_maker = $self->sql_maker; - local $sql_maker->{for}; + local $sql_maker->{_dbic_rs_attrs}; + return $self->_execute($self->_select_args(@_)); } sub _select_args_to_query { my $self = shift; + # localization is neccessary as + # 1) there is no infrastructure to pass this around before SQLA2 + # 2) _select_args sets it and _prep_for_execute consumes it my $sql_maker = $self->sql_maker; - local $sql_maker->{for}; + local $sql_maker->{_dbic_rs_attrs}; - # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $order, $rows, $offset) + # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $order, $rows, $offset) # = $self->_select_args($ident, $select, $cond, $attrs); my ($op, $bind, $ident, $bind_attrs, @args) = $self->_select_args(@_); - # my ($sql, $bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $order, $rows, $offset ]); + # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $order, $rows, $offset ]); my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, \@args); + $prepared_bind ||= []; - return \[ "($sql)", @{ $prepared_bind || [] }]; + return wantarray + ? ($sql, $prepared_bind, $bind_attrs) + : \[ "($sql)", @$prepared_bind ] + ; } sub _select_args { - my ($self, $ident, $select, $condition, $attrs) = @_; + my ($self, $ident, $select, $where, $attrs) = @_; - my $for = delete $attrs->{for}; - my $sql_maker = $self->sql_maker; - $sql_maker->{for} = $for; + my ($alias2source, $rs_alias) = $self->_resolve_ident_sources ($ident); - my $order = { map - { $attrs->{$_} ? ( $_ => $attrs->{$_} ) : () } - (qw/order_by group_by having _virtual_order_by/ ) + my $sql_maker = $self->sql_maker; + $sql_maker->{_dbic_rs_attrs} = { + %$attrs, + select => $select, + from => $ident, + where => $where, + $rs_alias + ? ( _source_handle => $alias2source->{$rs_alias}->handle ) + : () + , }; - + # calculate bind_attrs before possible $ident mangling my $bind_attrs = {}; - - my $alias2source = $self->_resolve_ident_sources ($ident); - for my $alias (keys %$alias2source) { my $bindtypes = $self->source_bind_attributes ($alias2source->{$alias}) || {}; for my $col (keys %$bindtypes) { @@ -1237,44 +1538,322 @@ sub _select_args { my $fqcn = join ('.', $alias, $col); $bind_attrs->{$fqcn} = $bindtypes->{$col} if $bindtypes->{$col}; - # so that unqualified searches can be bound too - $bind_attrs->{$col} = $bind_attrs->{$fqcn} if $alias eq 'me'; + # Unqialified column names are nice, but at the same time can be + # rather ambiguous. What we do here is basically go along with + # the loop, adding an unqualified column slot to $bind_attrs, + # alongside the fully qualified name. As soon as we encounter + # another column by that name (which would imply another table) + # we unset the unqualified slot and never add any info to it + # to avoid erroneous type binding. If this happens the users + # only choice will be to fully qualify his column name + + if (exists $bind_attrs->{$col}) { + $bind_attrs->{$col} = {}; + } + else { + $bind_attrs->{$col} = $bind_attrs->{$fqcn}; + } } } - # This would be the point to deflate anything found in $condition + # adjust limits + if ( + $attrs->{software_limit} + || + $sql_maker->_default_limit_syntax eq "GenericSubQ" + ) { + $attrs->{software_limit} = 1; + } + else { + $self->throw_exception("rows attribute must be positive if present") + if (defined($attrs->{rows}) && !($attrs->{rows} > 0)); + + # MySQL actually recommends this approach. I cringe. + $attrs->{rows} = 2**48 if not defined $attrs->{rows} and defined $attrs->{offset}; + } + + my @limit; + + # see if we need to tear the prefetch apart (either limited has_many or grouped prefetch) + # otherwise delegate the limiting to the storage, unless software limit was requested + if ( + ( $attrs->{rows} && keys %{$attrs->{collapse}} ) + || + ( $attrs->{group_by} && @{$attrs->{group_by}} && + $attrs->{_prefetch_select} && @{$attrs->{_prefetch_select}} ) + ) { + ($ident, $select, $where, $attrs) + = $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs); + } + elsif (! $attrs->{software_limit} ) { + push @limit, $attrs->{rows}, $attrs->{offset}; + } + +### + # This would be the point to deflate anything found in $where # (and leave $attrs->{bind} intact). Problem is - inflators historically # expect a row object. And all we have is a resultsource (it is trivial # to extract deflator coderefs via $alias2source above). # # I don't see a way forward other than changing the way deflators are # invoked, and that's just bad... +### - my @args = ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $condition, $order); - if ($attrs->{software_limit} || - $sql_maker->_default_limit_syntax eq "GenericSubQ") { - $attrs->{software_limit} = 1; - } else { - $self->throw_exception("rows attribute must be positive if present") - if (defined($attrs->{rows}) && !($attrs->{rows} > 0)); + my $order = { map + { $attrs->{$_} ? ( $_ => $attrs->{$_} ) : () } + (qw/order_by group_by having/ ) + }; - # MySQL actually recommends this approach. I cringe. - $attrs->{rows} = 2**48 if not defined $attrs->{rows} and defined $attrs->{offset}; - push @args, $attrs->{rows}, $attrs->{offset}; + return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $order, @limit); +} + +# +# This is the code producing joined subqueries like: +# SELECT me.*, other.* FROM ( SELECT me.* FROM ... ) JOIN other ON ... +# +sub _adjust_select_args_for_complex_prefetch { + my ($self, $from, $select, $where, $attrs) = @_; + + $self->throw_exception ('Nothing to prefetch... how did we get here?!') + if not @{$attrs->{_prefetch_select}}; + + $self->throw_exception ('Complex prefetches are not supported on resultsets with a custom from attribute') + if (ref $from ne 'ARRAY' || ref $from->[0] ne 'HASH' || ref $from->[1] ne 'ARRAY'); + + + # generate inner/outer attribute lists, remove stuff that doesn't apply + my $outer_attrs = { %$attrs }; + delete $outer_attrs->{$_} for qw/where bind rows offset group_by having/; + + my $inner_attrs = { %$attrs }; + delete $inner_attrs->{$_} for qw/for collapse _prefetch_select _collapse_order_by select as/; + + + # bring over all non-collapse-induced order_by into the inner query (if any) + # the outer one will have to keep them all + delete $inner_attrs->{order_by}; + if (my $ord_cnt = @{$outer_attrs->{order_by}} - @{$outer_attrs->{_collapse_order_by}} ) { + $inner_attrs->{order_by} = [ + @{$outer_attrs->{order_by}}[ 0 .. $ord_cnt - 1] + ]; + } + + + # generate the inner/outer select lists + # for inside we consider only stuff *not* brought in by the prefetch + # on the outside we substitute any function for its alias + my $outer_select = [ @$select ]; + my $inner_select = []; + for my $i (0 .. ( @$outer_select - @{$outer_attrs->{_prefetch_select}} - 1) ) { + my $sel = $outer_select->[$i]; + + if (ref $sel eq 'HASH' ) { + $sel->{-as} ||= $attrs->{as}[$i]; + $outer_select->[$i] = join ('.', $attrs->{alias}, ($sel->{-as} || "inner_column_$i") ); + } + + push @$inner_select, $sel; + } + + # normalize a copy of $from, so it will be easier to work with further + # down (i.e. promote the initial hashref to an AoH) + $from = [ @$from ]; + $from->[0] = [ $from->[0] ]; + my %original_join_info = map { $_->[0]{-alias} => $_->[0] } (@$from); + + + # decide which parts of the join will remain in either part of + # the outer/inner query + + # First we compose a list of which aliases are used in restrictions + # (i.e. conditions/order/grouping/etc). Since we do not have + # introspectable SQLA, we fall back to ugly scanning of raw SQL for + # WHERE, and for pieces of ORDER BY in order to determine which aliases + # need to appear in the resulting sql. + # It may not be very efficient, but it's a reasonable stop-gap + # Also unqualified column names will not be considered, but more often + # than not this is actually ok + # + # In the same loop we enumerate part of the selection aliases, as + # it requires the same sqla hack for the time being + my ($restrict_aliases, $select_aliases, $prefetch_aliases); + { + # produce stuff unquoted, so it can be scanned + my $sql_maker = $self->sql_maker; + local $sql_maker->{quote_char}; + my $sep = $self->_sql_maker_opts->{name_sep} || '.'; + $sep = "\Q$sep\E"; + + my $non_prefetch_select_sql = $sql_maker->_recurse_fields ($inner_select); + my $prefetch_select_sql = $sql_maker->_recurse_fields ($outer_attrs->{_prefetch_select}); + my $where_sql = $sql_maker->where ($where); + my $group_by_sql = $sql_maker->_order_by({ + map { $_ => $inner_attrs->{$_} } qw/group_by having/ + }); + my @non_prefetch_order_by_chunks = (map + { ref $_ ? $_->[0] : $_ } + $sql_maker->_order_by_chunks ($inner_attrs->{order_by}) + ); + + + for my $alias (keys %original_join_info) { + my $seen_re = qr/\b $alias $sep/x; + + for my $piece ($where_sql, $group_by_sql, @non_prefetch_order_by_chunks ) { + if ($piece =~ $seen_re) { + $restrict_aliases->{$alias} = 1; + } + } + + if ($non_prefetch_select_sql =~ $seen_re) { + $select_aliases->{$alias} = 1; + } + + if ($prefetch_select_sql =~ $seen_re) { + $prefetch_aliases->{$alias} = 1; + } + + } + } + + # Add any non-left joins to the restriction list (such joins are indeed restrictions) + for my $j (values %original_join_info) { + my $alias = $j->{-alias} or next; + $restrict_aliases->{$alias} = 1 if ( + (not $j->{-join_type}) + or + ($j->{-join_type} !~ /^left (?: \s+ outer)? $/xi) + ); + } + + # mark all join parents as mentioned + # (e.g. join => { cds => 'tracks' } - tracks will need to bring cds too ) + for my $collection ($restrict_aliases, $select_aliases) { + for my $alias (keys %$collection) { + $collection->{$_} = 1 + for (@{ $original_join_info{$alias}{-join_path} || [] }); + } + } + + # construct the inner $from for the subquery + my %inner_joins = (map { %{$_ || {}} } ($restrict_aliases, $select_aliases) ); + my @inner_from; + for my $j (@$from) { + push @inner_from, $j if $inner_joins{$j->[0]{-alias}}; + } + + # if a multi-type join was needed in the subquery ("multi" is indicated by + # presence in {collapse}) - add a group_by to simulate the collapse in the subq + unless ($inner_attrs->{group_by}) { + for my $alias (keys %inner_joins) { + + # the dot comes from some weirdness in collapse + # remove after the rewrite + if ($attrs->{collapse}{".$alias"}) { + $inner_attrs->{group_by} ||= $inner_select; + last; + } + } } - return @args; + + # demote the inner_from head + $inner_from[0] = $inner_from[0][0]; + + # generate the subquery + my $subq = $self->_select_args_to_query ( + \@inner_from, + $inner_select, + $where, + $inner_attrs, + ); + + my $subq_joinspec = { + -alias => $attrs->{alias}, + -source_handle => $inner_from[0]{-source_handle}, + $attrs->{alias} => $subq, + }; + + # Generate the outer from - this is relatively easy (really just replace + # the join slot with the subquery), with a major caveat - we can not + # join anything that is non-selecting (not part of the prefetch), but at + # the same time is a multi-type relationship, as it will explode the result. + # + # There are two possibilities here + # - either the join is non-restricting, in which case we simply throw it away + # - it is part of the restrictions, in which case we need to collapse the outer + # result by tackling yet another group_by to the outside of the query + + # so first generate the outer_from, up to the substitution point + my @outer_from; + while (my $j = shift @$from) { + if ($j->[0]{-alias} eq $attrs->{alias}) { # time to swap + push @outer_from, [ + $subq_joinspec, + @{$j}[1 .. $#$j], + ]; + last; # we'll take care of what's left in $from below + } + else { + push @outer_from, $j; + } + } + + # see what's left - throw away if not selecting/restricting + # also throw in a group_by if restricting to guard against + # cross-join explosions + # + while (my $j = shift @$from) { + my $alias = $j->[0]{-alias}; + + if ($select_aliases->{$alias} || $prefetch_aliases->{$alias}) { + push @outer_from, $j; + } + elsif ($restrict_aliases->{$alias}) { + push @outer_from, $j; + + # FIXME - this should be obviated by SQLA2, as I'll be able to + # have restrict_inner and restrict_outer... or something to that + # effect... I think... + + # FIXME2 - I can't find a clean way to determine if a particular join + # is a multi - instead I am just treating everything as a potential + # explosive join (ribasushi) + # + # if (my $handle = $j->[0]{-source_handle}) { + # my $rsrc = $handle->resolve; + # ... need to bail out of the following if this is not a multi, + # as it will be much easier on the db ... + + $outer_attrs->{group_by} ||= $outer_select; + # } + } + } + + # demote the outer_from head + $outer_from[0] = $outer_from[0][0]; + + # This is totally horrific - the $where ends up in both the inner and outer query + # Unfortunately not much can be done until SQLA2 introspection arrives, and even + # then if where conditions apply to the *right* side of the prefetch, you may have + # to both filter the inner select (e.g. to apply a limit) and then have to re-filter + # the outer select to exclude joins you didin't want in the first place + # + # OTOH it can be seen as a plus: (notes that this query would make a DBA cry ;) + return (\@outer_from, $outer_select, $where, $outer_attrs); } sub _resolve_ident_sources { my ($self, $ident) = @_; my $alias2source = {}; + my $rs_alias; # the reason this is so contrived is that $ident may be a {from} # structure, specifying multiple tables to join if ( Scalar::Util::blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) { # this is compat mode for insert/update/delete which do not deal with aliases $alias2source->{me} = $ident; + $rs_alias = 'me'; } elsif (ref $ident eq 'ARRAY') { @@ -1282,88 +1861,101 @@ sub _resolve_ident_sources { my $tabinfo; if (ref $_ eq 'HASH') { $tabinfo = $_; + $rs_alias = $tabinfo->{-alias}; } if (ref $_ eq 'ARRAY' and ref $_->[0] eq 'HASH') { $tabinfo = $_->[0]; } - $alias2source->{$tabinfo->{-alias}} = $tabinfo->{-result_source} - if ($tabinfo->{-result_source}); + $alias2source->{$tabinfo->{-alias}} = $tabinfo->{-source_handle}->resolve + if ($tabinfo->{-source_handle}); } } - return $alias2source; -} - -sub count { - my ($self, $source, $attrs) = @_; - - my $tmp_attrs = { %$attrs }; - - # take off any pagers, record_filter is cdbi, and no point of ordering a count - delete $tmp_attrs->{$_} for (qw/select as rows offset page order_by record_filter/); - - # overwrite the selector - $tmp_attrs->{select} = { count => '*' }; - - my $tmp_rs = $source->resultset_class->new($source, $tmp_attrs); - my ($count) = $tmp_rs->cursor->next; - - # if the offset/rows attributes are still present, we did not use - # a subquery, so we need to make the calculations in software - $count -= $attrs->{offset} if $attrs->{offset}; - $count = $attrs->{rows} if $attrs->{rows} and $attrs->{rows} < $count; - $count = 0 if ($count < 0); - - return $count; + return ($alias2source, $rs_alias); } -sub count_grouped { - my ($self, $source, $attrs) = @_; +# Takes $ident, \@column_names +# +# returns { $column_name => \%column_info, ... } +# also note: this adds -result_source => $rsrc to the column info +# +# usage: +# my $col_sources = $self->_resolve_column_info($ident, @column_names); +sub _resolve_column_info { + my ($self, $ident, $colnames) = @_; + my ($alias2src, $root_alias) = $self->_resolve_ident_sources($ident); + + my $sep = $self->_sql_maker_opts->{name_sep} || '.'; + $sep = "\Q$sep\E"; + + my (%return, %seen_cols); + + # compile a global list of column names, to be able to properly + # disambiguate unqualified column names (if at all possible) + for my $alias (keys %$alias2src) { + my $rsrc = $alias2src->{$alias}; + for my $colname ($rsrc->columns) { + push @{$seen_cols{$colname}}, $alias; + } + } - # copy for the subquery, we need to do some adjustments to it too - my $sub_attrs = { %$attrs }; + COLUMN: + foreach my $col (@$colnames) { + my ($alias, $colname) = $col =~ m/^ (?: ([^$sep]+) $sep)? (.+) $/x; - # these can not go in the subquery, and there is no point of ordering it - delete $sub_attrs->{$_} for qw/prefetch collapse select as order_by/; + unless ($alias) { + # see if the column was seen exactly once (so we know which rsrc it came from) + if ($seen_cols{$colname} and @{$seen_cols{$colname}} == 1) { + $alias = $seen_cols{$colname}[0]; + } + else { + next COLUMN; + } + } - # if we prefetch, we group_by primary keys only as this is what we would get out of the rs via ->next/->all - # simply deleting group_by suffices, as the code below will re-fill it - # Note: we check $attrs, as $sub_attrs has collapse deleted - if (ref $attrs->{collapse} and keys %{$attrs->{collapse}} ) { - delete $sub_attrs->{group_by}; + my $rsrc = $alias2src->{$alias}; + $return{$col} = $rsrc && { + %{$rsrc->column_info($colname)}, + -result_source => $rsrc, + -source_alias => $alias, + }; } - $sub_attrs->{group_by} ||= [ map { "$attrs->{alias}.$_" } ($source->primary_columns) ]; - $sub_attrs->{select} = $self->_grouped_count_select ($source, $sub_attrs); - - $attrs->{from} = [{ - count_subq => $source->resultset_class->new ($source, $sub_attrs )->as_query - }]; - - # the subquery replaces this - delete $attrs->{$_} for qw/where bind prefetch collapse group_by having having_bind rows offset page pager/; + return \%return; +} - return $self->count ($source, $attrs); +# Returns a counting SELECT for a simple count +# query. Abstracted so that a storage could override +# this to { count => 'firstcol' } or whatever makes +# sense as a performance optimization +sub _count_select { + #my ($self, $source, $rs_attrs) = @_; + return { count => '*' }; } +# Returns a SELECT which will end up in the subselect +# There may or may not be a group_by, as the subquery +# might have been called to accomodate a limit # -# Returns a SELECT to go with a supplied GROUP BY -# (caled by count_grouped so a group_by is present) -# Most databases expect them to match, but some -# choke in various ways. +# Most databases would be happy with whatever ends up +# here, but some choke in various ways. # -sub _grouped_count_select { - my ($self, $source, $rs_args) = @_; - return $rs_args->{group_by}; +sub _subq_count_select { + my ($self, $source, $rs_attrs) = @_; + return $rs_attrs->{group_by} if $rs_attrs->{group_by}; + + my @pcols = map { join '.', $rs_attrs->{alias}, $_ } ($source->primary_columns); + return @pcols ? \@pcols : [ 1 ]; } + sub source_bind_attributes { my ($self, $source) = @_; - + my $bind_attributes; foreach my $column ($source->columns) { - + my $data_type = $source->column_info($column)->{data_type} || ''; $bind_attributes->{$column} = $self->bind_attribute_by_data_type($data_type) if $data_type; @@ -1432,7 +2024,7 @@ sub _dbh_sth { sub sth { my ($self, $sql) = @_; - $self->dbh_do('_dbh_sth', $sql); + $self->dbh_do('_dbh_sth', $sql); # retry over disconnects } sub _dbh_columns_info_for { @@ -1494,7 +2086,7 @@ sub _dbh_columns_info_for { sub columns_info_for { my ($self, $table) = @_; - $self->dbh_do('_dbh_columns_info_for', $table); + $self->_dbh_columns_info_for ($self->_get_dbh, $table); } =head2 last_insert_id @@ -1520,7 +2112,38 @@ EOE sub last_insert_id { my $self = shift; - $self->dbh_do('_dbh_last_insert_id', @_); + $self->_dbh_last_insert_id ($self->_dbh, @_); +} + +=head2 _native_data_type + +=over 4 + +=item Arguments: $type_name + +=back + +This API is B, will almost definitely change in the future, and +currently only used by L<::AutoCast|DBIx::Class::Storage::DBI::AutoCast> and +L<::Sybase|DBIx::Class::Storage::DBI::Sybase>. + +The default implementation returns C, implement in your Storage driver if +you need this functionality. + +Should map types from other databases to the native RDBMS type, for example +C to C. + +Types with modifiers should map to the underlying data type. For example, +C should become C. + +Composite types should map to the container type, for example +C becomes C. + +=cut + +sub _native_data_type { + #my ($self, $data_type) = @_; + return undef } =head2 sqlt_type @@ -1529,7 +2152,16 @@ Returns the database driver name. =cut -sub sqlt_type { shift->dbh->{Driver}->{Name} } +sub sqlt_type { + my ($self) = @_; + + if (not $self->_driver_determined) { + $self->_determine_driver; + goto $self->can ('sqlt_type'); + } + + $self->_get_dbh->{Driver}->{Name}; +} =head2 bind_attribute_by_data_type @@ -1612,13 +2244,13 @@ By default, C<\%sqlt_args> will have { add_drop_table => 1, ignore_constraint_names => 1, ignore_index_names => 1 } -merged with the hash passed in. To disable any of those features, pass in a +merged with the hash passed in. To disable any of those features, pass in a hashref like the following { ignore_constraint_names => 0, # ... other options } -Note that this feature is currently EXPERIMENTAL and may not work correctly +Note that this feature is currently EXPERIMENTAL and may not work correctly across all databases, or fully handle complex relationships. WARNING: Please check all SQL files created, before applying them. @@ -1639,7 +2271,7 @@ sub create_ddl_dir { $version ||= $schema_version; $sqltargs = { - add_drop_table => 1, + add_drop_table => 1, ignore_constraint_names => 1, ignore_index_names => 1, %{$sqltargs || {}} @@ -1679,7 +2311,7 @@ sub create_ddl_dir { } print $file $output; close($file); - + next unless ($preversion); require SQL::Translator::Diff; @@ -1695,7 +2327,7 @@ sub create_ddl_dir { carp("Overwriting existing diff file - $difffile"); unlink($difffile); } - + my $source_schema; { my $t = SQL::Translator->new($sqltargs); @@ -1714,7 +2346,7 @@ sub create_ddl_dir { unless ( $source_schema->name ); } - # The "new" style of producers have sane normalization and can support + # The "new" style of producers have sane normalization and can support # diffing a SQL file against a DBIC->SQLT schema. Old style ones don't # And we have to diff parsed SQL against parsed SQL. my $dest_schema = $sqlt_schema; @@ -1735,12 +2367,12 @@ sub create_ddl_dir { $dest_schema->name( $filename ) unless $dest_schema->name; } - + my $diff = SQL::Translator::Diff::schema_diff($source_schema, $db, $dest_schema, $db, $sqltargs ); - if(!open $file, ">$difffile") { + if(!open $file, ">$difffile") { $self->throw_exception("Can't write to $difffile ($!)"); next; } @@ -1775,8 +2407,6 @@ See L for a list of values for C<$sqlt_args>. sub deployment_statements { my ($self, $schema, $type, $version, $dir, $sqltargs) = @_; - # Need to be connected to get the correct sqlt_type - $self->ensure_connected() unless $type; $type ||= $self->sqlt_type; $version ||= $schema->schema_version || '1.x'; $dir ||= './'; @@ -1784,7 +2414,7 @@ sub deployment_statements { if(-f $filename) { my $file; - open($file, "<$filename") + open($file, "<$filename") or $self->throw_exception("Can't open $filename ($!)"); my @rows = <$file>; close($file); @@ -1795,18 +2425,18 @@ sub deployment_statements { . $self->_check_sqlt_message . q{'}) if !$self->_check_sqlt_version; - require SQL::Translator::Parser::DBIx::Class; - eval qq{use SQL::Translator::Producer::${type}}; - $self->throw_exception($@) if $@; - - # sources needs to be a parser arg, but for simplicty allow at top level + # sources needs to be a parser arg, but for simplicty allow at top level # coming in $sqltargs->{parser_args}{sources} = delete $sqltargs->{sources} if exists $sqltargs->{sources}; - my $tr = SQL::Translator->new(%$sqltargs); - SQL::Translator::Parser::DBIx::Class::parse( $tr, $schema ); - return "SQL::Translator::Producer::${type}"->can('produce')->($tr); + my $tr = SQL::Translator->new( + producer => "SQL::Translator::Producer::${type}", + %$sqltargs, + parser => 'SQL::Translator::Parser::DBIx::Class', + data => $schema, + ); + return $tr->translate; } sub deploy { @@ -1821,7 +2451,9 @@ sub deploy { return if $line =~ /^\s+$/; # skip whitespace only $self->_query_start($line); eval { - $self->dbh->do($line); # shouldn't be using ->dbh ? + # do a dbh_do cycle here, as we need some error checking in + # place (even though we will ignore errors) + $self->dbh_do (sub { $_[1]->do($line) }); }; if ($@) { carp qq{$@ (running "${line}")}; @@ -1850,7 +2482,7 @@ Returns the datetime parser class sub datetime_parser { my $self = shift; return $self->{datetime_parser} ||= do { - $self->ensure_connected; + $self->_populate_dbh unless $self->_dbh; $self->build_datetime_parser(@_); }; } @@ -1904,7 +2536,7 @@ returned by databases that don't support replication. sub is_replicating { return; - + } =head2 lag_behind_master @@ -1921,8 +2553,13 @@ sub lag_behind_master { sub DESTROY { my $self = shift; - return if !$self->_dbh; - $self->_verify_pid; + $self->_verify_pid if $self->_dbh; + + # some databases need this to stop spewing warnings + if (my $dbh = $self->_dbh) { + eval { $dbh->disconnect }; + } + $self->_dbh(undef); } @@ -1934,7 +2571,7 @@ sub DESTROY { DBIx::Class can do some wonderful magic with handling exceptions, disconnections, and transactions when you use C<< AutoCommit => 1 >> -combined with C for transaction support. +(the default) combined with C for transaction support. If you set C<< AutoCommit => 0 >> in your connect info, then you are always in an assumed transaction between commits, and you're telling us you'd @@ -1946,7 +2583,6 @@ cases if you choose the C<< AutoCommit => 0 >> path, just as you would be with raw DBI. - =head1 AUTHORS Matt S. Trout