X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI.pm;h=d32154517447779d07c9d442dff6733e81276ad9;hb=44e538d00c41e69899b48178c9dede95e2ef7e77;hp=a0a34a8b29836ce55eae9a4443356f780ff21430;hpb=b33697ef204738fc1bb8edd81bc6eb2e7bd2b5a2;p=dbsrgits%2FDBIx-Class-Historic.git diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index a0a34a8..d321545 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -1,387 +1,335 @@ package DBIx::Class::Storage::DBI; # -*- mode: cperl; cperl-indent-level: 2 -*- -use base 'DBIx::Class::Storage'; - use strict; use warnings; + +use base 'DBIx::Class::Storage'; +use mro 'c3'; + +use Carp::Clan qw/^DBIx::Class/; use DBI; -use SQL::Abstract::Limit; use DBIx::Class::Storage::DBI::Cursor; use DBIx::Class::Storage::Statistics; -use IO::File; +use Scalar::Util(); +use List::Util(); -__PACKAGE__->mk_group_accessors( - 'simple' => - qw/_connect_info _dbh _sql_maker _sql_maker_opts _conn_pid _conn_tid - disable_sth_caching cursor on_connect_do transaction_depth/ +__PACKAGE__->mk_group_accessors('simple' => + qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts _conn_pid + _conn_tid transaction_depth _dbh_autocommit _driver_determined savepoints/ ); -BEGIN { +# the values for these accessors are picked out (and deleted) from +# the attribute hashref passed to connect_info +my @storage_options = qw/ + on_connect_call on_disconnect_call on_connect_do on_disconnect_do + disable_sth_caching unsafe auto_savepoint +/; +__PACKAGE__->mk_group_accessors('simple' => @storage_options); + + +# default cursor class, overridable in connect_info attributes +__PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor'); + +__PACKAGE__->mk_group_accessors('inherited' => qw/sql_maker_class/); +__PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks'); + + +=head1 NAME + +DBIx::Class::Storage::DBI - DBI storage handler + +=head1 SYNOPSIS + + my $schema = MySchema->connect('dbi:SQLite:my.db'); -package DBIC::SQL::Abstract; # Would merge upstream, but nate doesn't reply :( + $schema->storage->debug(1); + $schema->dbh_do("DROP TABLE authors"); -use base qw/SQL::Abstract::Limit/; + $schema->resultset('Book')->search({ + written_on => $schema->storage->datetime_parser(DateTime->now) + }); + +=head1 DESCRIPTION + +This class represents the connection to an RDBMS via L. See +L for general information. This pod only +documents DBI-specific methods and behaviors. + +=head1 METHODS + +=cut -# This prevents the caching of $dbh in S::A::L, I believe sub new { - my $self = shift->SUPER::new(@_); + my $new = shift->next::method(@_); - # If limit_dialect is a ref (like a $dbh), go ahead and replace - # it with what it resolves to: - $self->{limit_dialect} = $self->_find_syntax($self->{limit_dialect}) - if ref $self->{limit_dialect}; + $new->transaction_depth(0); + $new->_sql_maker_opts({}); + $new->{savepoints} = []; + $new->{_in_dbh_do} = 0; + $new->{_dbh_gen} = 0; - $self; + $new; } -sub _RowNumberOver { - my ($self, $sql, $order, $rows, $offset ) = @_; +=head2 connect_info - $offset += 1; - my $last = $rows + $offset; - my ( $order_by ) = $self->_order_by( $order ); +This method is normally called by L, which +encapsulates its argument list in an arrayref before passing them here. - $sql = <<""; -SELECT * FROM -( - SELECT Q1.*, ROW_NUMBER() OVER( ) AS ROW_NUM FROM ( - $sql - $order_by - ) Q1 -) Q2 -WHERE ROW_NUM BETWEEN $offset AND $last +The argument list may contain: - return $sql; -} +=over +=item * -# While we're at it, this should make LIMIT queries more efficient, -# without digging into things too deeply -use Scalar::Util 'blessed'; -sub _find_syntax { - my ($self, $syntax) = @_; - my $dbhname = blessed($syntax) ? $syntax->{Driver}{Name} : $syntax; -# print STDERR "Found DBH $syntax >$dbhname< ", $syntax->{Driver}->{Name}, "\n"; - if(ref($self) && $dbhname && $dbhname eq 'DB2') { - return 'RowNumberOver'; - } +The same 4-element argument set one would normally pass to +L, optionally followed by +L +recognized by DBIx::Class: - $self->{_cached_syntax} ||= $self->SUPER::_find_syntax($syntax); -} + $connect_info_args = [ $dsn, $user, $password, \%dbi_attributes?, \%extra_attributes? ]; -sub select { - my ($self, $table, $fields, $where, $order, @rest) = @_; - $table = $self->_quote($table) unless ref($table); - local $self->{rownum_hack_count} = 1 - if (defined $rest[0] && $self->{limit_dialect} eq 'RowNum'); - @rest = (-1) unless defined $rest[0]; - die "LIMIT 0 Does Not Compute" if $rest[0] == 0; - # and anyway, SQL::Abstract::Limit will cause a barf if we don't first - local $self->{having_bind} = []; - my ($sql, @ret) = $self->SUPER::select( - $table, $self->_recurse_fields($fields), $where, $order, @rest - ); - return wantarray ? ($sql, @ret, @{$self->{having_bind}}) : $sql; -} +=item * -sub insert { - my $self = shift; - my $table = shift; - $table = $self->_quote($table) unless ref($table); - $self->SUPER::insert($table, @_); -} +A single code reference which returns a connected +L optionally followed by +L recognized +by DBIx::Class: -sub update { - my $self = shift; - my $table = shift; - $table = $self->_quote($table) unless ref($table); - $self->SUPER::update($table, @_); -} + $connect_info_args = [ sub { DBI->connect (...) }, \%extra_attributes? ]; -sub delete { - my $self = shift; - my $table = shift; - $table = $self->_quote($table) unless ref($table); - $self->SUPER::delete($table, @_); -} +=item * -sub _emulate_limit { - my $self = shift; - if ($_[3] == -1) { - return $_[1].$self->_order_by($_[2]); - } else { - return $self->SUPER::_emulate_limit(@_); - } -} +A single hashref with all the attributes and the dsn/user/password +mixed together: -sub _recurse_fields { - my ($self, $fields) = @_; - my $ref = ref $fields; - return $self->_quote($fields) unless $ref; - return $$fields if $ref eq 'SCALAR'; - - if ($ref eq 'ARRAY') { - return join(', ', map { - $self->_recurse_fields($_) - .(exists $self->{rownum_hack_count} - ? ' AS col'.$self->{rownum_hack_count}++ - : '') - } @$fields); - } elsif ($ref eq 'HASH') { - foreach my $func (keys %$fields) { - return $self->_sqlcase($func) - .'( '.$self->_recurse_fields($fields->{$func}).' )'; - } - } -} + $connect_info_args = [{ + dsn => $dsn, + user => $user, + password => $pass, + %dbi_attributes, + %extra_attributes, + }]; -sub _order_by { - my $self = shift; - my $ret = ''; - my @extra; - if (ref $_[0] eq 'HASH') { - if (defined $_[0]->{group_by}) { - $ret = $self->_sqlcase(' group by ') - .$self->_recurse_fields($_[0]->{group_by}); - } - if (defined $_[0]->{having}) { - my $frag; - ($frag, @extra) = $self->_recurse_where($_[0]->{having}); - push(@{$self->{having_bind}}, @extra); - $ret .= $self->_sqlcase(' having ').$frag; - } - if (defined $_[0]->{order_by}) { - $ret .= $self->_order_by($_[0]->{order_by}); - } - } elsif (ref $_[0] eq 'SCALAR') { - $ret = $self->_sqlcase(' order by ').${ $_[0] }; - } elsif (ref $_[0] eq 'ARRAY' && @{$_[0]}) { - my @order = @{+shift}; - $ret = $self->_sqlcase(' order by ') - .join(', ', map { - my $r = $self->_order_by($_, @_); - $r =~ s/^ ?ORDER BY //i; - $r; - } @order); - } else { - $ret = $self->SUPER::_order_by(@_); - } - return $ret; -} +This is particularly useful for L based applications, allowing the +following config (L style): -sub _order_directions { - my ($self, $order) = @_; - $order = $order->{order_by} if ref $order eq 'HASH'; - return $self->SUPER::_order_directions($order); -} + + schema_class App::DB + + dsn dbi:mysql:database=test + user testuser + password TestPass + AutoCommit 1 + + -sub _table { - my ($self, $from) = @_; - if (ref $from eq 'ARRAY') { - return $self->_recurse_from(@$from); - } elsif (ref $from eq 'HASH') { - return $self->_make_as($from); - } else { - return $from; # would love to quote here but _table ends up getting called - # twice during an ->select without a limit clause due to - # the way S::A::Limit->select works. should maybe consider - # bypassing this and doing S::A::select($self, ...) in - # our select method above. meantime, quoting shims have - # been added to select/insert/update/delete here - } -} +=back -sub _recurse_from { - my ($self, $from, @join) = @_; - my @sqlf; - push(@sqlf, $self->_make_as($from)); - foreach my $j (@join) { - my ($to, $on) = @$j; +Please note that the L docs recommend that you always explicitly +set C to either I<0> or I<1>. L further +recommends that it be set to I<1>, and that you perform transactions +via our L method. L will set it +to I<1> if you do not do explicitly set it to zero. This is the default +for most DBDs. See L for details. - # check whether a join type exists - my $join_clause = ''; - my $to_jt = ref($to) eq 'ARRAY' ? $to->[0] : $to; - if (ref($to_jt) eq 'HASH' and exists($to_jt->{-join_type})) { - $join_clause = ' '.uc($to_jt->{-join_type}).' JOIN '; - } else { - $join_clause = ' JOIN '; - } - push(@sqlf, $join_clause); +=head3 DBIx::Class specific connection attributes - if (ref $to eq 'ARRAY') { - push(@sqlf, '(', $self->_recurse_from(@$to), ')'); - } else { - push(@sqlf, $self->_make_as($to)); - } - push(@sqlf, ' ON ', $self->_join_condition($on)); - } - return join('', @sqlf); -} +In addition to the standard L +L attributes, DBIx::Class recognizes +the following connection options. These options can be mixed in with your other +L connection attributes, or placed in a seperate hashref +(C<\%extra_attributes>) as shown above. -sub _make_as { - my ($self, $from) = @_; - return join(' ', map { (ref $_ eq 'SCALAR' ? $$_ : $self->_quote($_)) } - reverse each %{$self->_skip_options($from)}); -} +Every time C is invoked, any previous settings for +these options will be cleared before setting the new ones, regardless of +whether any options are specified in the new C. -sub _skip_options { - my ($self, $hash) = @_; - my $clean_hash = {}; - $clean_hash->{$_} = $hash->{$_} - for grep {!/^-/} keys %$hash; - return $clean_hash; -} -sub _join_condition { - my ($self, $cond) = @_; - if (ref $cond eq 'HASH') { - my %j; - for (keys %$cond) { - my $x = '= '.$self->_quote($cond->{$_}); $j{$_} = \$x; - }; - return $self->_recurse_where(\%j); - } elsif (ref $cond eq 'ARRAY') { - return join(' OR ', map { $self->_join_condition($_) } @$cond); - } else { - die "Can't handle this yet!"; - } -} +=over -sub _quote { - my ($self, $label) = @_; - return '' unless defined $label; - return "*" if $label eq '*'; - return $label unless $self->{quote_char}; - if(ref $self->{quote_char} eq "ARRAY"){ - return $self->{quote_char}->[0] . $label . $self->{quote_char}->[1] - if !defined $self->{name_sep}; - my $sep = $self->{name_sep}; - return join($self->{name_sep}, - map { $self->{quote_char}->[0] . $_ . $self->{quote_char}->[1] } - split(/\Q$sep\E/,$label)); - } - return $self->SUPER::_quote($label); -} +=item on_connect_do -sub limit_dialect { - my $self = shift; - $self->{limit_dialect} = shift if @_; - return $self->{limit_dialect}; -} +Specifies things to do immediately after connecting or re-connecting to +the database. Its value may contain: -sub quote_char { - my $self = shift; - $self->{quote_char} = shift if @_; - return $self->{quote_char}; -} +=over -sub name_sep { - my $self = shift; - $self->{name_sep} = shift if @_; - return $self->{name_sep}; -} +=item a scalar -} # End of BEGIN block +This contains one SQL statement to execute. -=head1 NAME +=item an array reference -DBIx::Class::Storage::DBI - DBI storage handler +This contains SQL statements to execute in order. Each element contains +a string or a code reference that returns a string. -=head1 SYNOPSIS +=item a code reference -=head1 DESCRIPTION +This contains some code to execute. Unlike code references within an +array reference, its return value is ignored. -This class represents the connection to an RDBMS via L. See -L for general information. This pod only -documents DBI-specific methods and behaviors. +=back -=head1 METHODS +=item on_disconnect_do -=cut +Takes arguments in the same form as L and executes them +immediately before disconnecting from the database. -sub new { - my $new = shift->next::method(@_); +Note, this only runs if you explicitly call L on the +storage object. - $new->cursor("DBIx::Class::Storage::DBI::Cursor"); - $new->transaction_depth(0); - $new->_sql_maker_opts({}); - $new->{_in_dbh_do} = 0; - $new->{_dbh_gen} = 0; +=item on_connect_call - $new; -} +A more generalized form of L that calls the specified +C methods in your storage driver. -=head2 connect_info + on_connect_do => 'select 1' -The arguments of C are always a single array reference. +is equivalent to: -This is normally accessed via L, which -encapsulates its argument list in an arrayref before calling -C here. + on_connect_call => [ [ do_sql => 'select 1' ] ] -The arrayref can either contain the same set of arguments one would -normally pass to L, or a lone code reference which returns -a connected database handle. +Its values may contain: -In either case, if the final argument in your connect_info happens -to be a hashref, C will look there for several -connection-specific options: +=over -=over 4 +=item a scalar -=item on_connect_do +Will call the C method. + +=item a code reference + +Will execute C<< $code->($storage) >> + +=item an array reference + +Each value can be a method name or code reference. + +=item an array of arrays + +For each array, the first item is taken to be the C method name +or code reference, and the rest are parameters to it. + +=back + +Some predefined storage methods you may use: + +=over + +=item do_sql + +Executes a SQL string or a code reference that returns a SQL string. This is +what L and L use. + +It can take: + +=over + +=item a scalar + +Will execute the scalar as SQL. -This can be set to an arrayref of literal sql statements, which will -be executed immediately after making the connection to the database -every time we [re-]connect. +=item an arrayref + +Taken to be arguments to L, the SQL string optionally followed by the +attributes hashref and bind values. + +=item a code reference + +Will execute C<< $code->($storage) >> and execute the return array refs as +above. + +=back + +=item datetime_setup + +Execute any statements necessary to initialize the database session to return +and accept datetime/timestamp values used with +L. + +Only necessary for some databases, see your specific storage driver for +implementation details. + +=back + +=item on_disconnect_call + +Takes arguments in the same form as L and executes them +immediately before disconnecting from the database. + +Calls the C methods as opposed to the +C methods called by L. + +Note, this only runs if you explicitly call L on the +storage object. =item disable_sth_caching If set to a true value, this option will disable the caching of statement handles via L. -=item limit_dialect +=item limit_dialect Sets the limit dialect. This is useful for JDBC-bridge among others where the remote SQL-dialect cannot be determined by the name of the -driver alone. +driver alone. See also L. =item quote_char -Specifies what characters to use to quote table and column names. If -you use this you will want to specify L as well. +Specifies what characters to use to quote table and column names. If +you use this you will want to specify L as well. -quote_char expects either a single character, in which case is it is placed -on either side of the table/column, or an arrayref of length 2 in which case the -table/column name is placed between the elements. +C expects either a single character, in which case is it +is placed on either side of the table/column name, or an arrayref of length +2 in which case the table/column name is placed between the elements. -For example under MySQL you'd use C '`'>, and user SQL Server you'd -use C [qw/[ ]/]>. +For example under MySQL you should use C<< quote_char => '`' >>, and for +SQL Server you should use C<< quote_char => [qw/[ ]/] >>. =item name_sep -This only needs to be used in conjunction with L, and is used to -specify the charecter that seperates elements (schemas, tables, columns) from +This only needs to be used in conjunction with C, and is used to +specify the charecter that seperates elements (schemas, tables, columns) from each other. In most cases this is simply a C<.>. -=back +The consequences of not supplying this value is that L +will assume DBIx::Class' uses of aliases to be complete column +names. The output will look like I<"me.name"> when it should actually +be I<"me"."name">. -These options can be mixed in with your other L connection attributes, -or placed in a seperate hashref after all other normal L connection -arguments. +=item unsafe -Every time C is invoked, any previous settings for -these options will be cleared before setting the new ones, regardless of -whether any options are specified in the new C. +This Storage driver normally installs its own C, sets +C and C on, and sets C off on +all database handles, including those supplied by a coderef. It does this +so that it can have consistent and useful error behavior. + +If you set this option to a true value, Storage will not do its usual +modifications to the database handle's attributes, and instead relies on +the settings in your connect_info DBI options (or the values you set in +your connection coderef, in the case that you are connecting via coderef). + +Note that your custom settings can cause Storage to malfunction, +especially if you set a C handler that suppresses exceptions +and/or disable C. + +=item auto_savepoint + +If this option is true, L will use savepoints when nesting +transactions, making it possible to recover from failure in the inner +transaction without having to abort all outer transactions. + +=item cursor_class -Important note: DBIC expects the returned database handle provided by -a subref argument to have RaiseError set on it. If it doesn't, things -might not work very well, YMMV. If you don't use a subref, DBIC will -force this setting for you anyways. Setting HandleError to anything -other than simple exception object wrapper might cause problems too. +Use this argument to supply a cursor class other than the default +L. -Examples: +=back + +Some real-life examples of arguments to L and +L # Simple SQLite connection ->connect_info([ 'dbi:SQLite:./foo.db' ]); @@ -395,7 +343,7 @@ Examples: 'dbi:Pg:dbname=foo', 'postgres', 'my_pg_password', - { AutoCommit => 0 }, + { AutoCommit => 1 }, { quote_char => q{"}, name_sep => q{.} }, ] ); @@ -406,11 +354,24 @@ Examples: 'dbi:Pg:dbname=foo', 'postgres', 'my_pg_password', - { AutoCommit => 0, quote_char => q{"}, name_sep => q{.} }, + { AutoCommit => 1, quote_char => q{"}, name_sep => q{.} }, ] ); - # Subref + DBIC-specific connection options + # Same, but with hashref as argument + # See parse_connect_info for explanation + ->connect_info( + [{ + dsn => 'dbi:Pg:dbname=foo', + user => 'postgres', + password => 'my_pg_password', + AutoCommit => 1, + quote_char => q{"}, + name_sep => q{.}, + }] + ); + + # Subref + DBIx::Class-specific connection options ->connect_info( [ sub { DBI->connect(...) }, @@ -423,6 +384,8 @@ Examples: ] ); + + =cut sub connect_info { @@ -430,41 +393,107 @@ sub connect_info { return $self->_connect_info if !$info_arg; + my @args = @$info_arg; # take a shallow copy for further mutilation + $self->_connect_info([@args]); # copy for _connect_info + + + # combine/pre-parse arguments depending on invocation style + + my %attrs; + if (ref $args[0] eq 'CODE') { # coderef with optional \%extra_attributes + %attrs = %{ $args[1] || {} }; + @args = $args[0]; + } + elsif (ref $args[0] eq 'HASH') { # single hashref (i.e. Catalyst config) + %attrs = %{$args[0]}; + @args = (); + for (qw/password user dsn/) { + unshift @args, delete $attrs{$_}; + } + } + else { # otherwise assume dsn/user/password + \%attrs + \%extra_attrs + %attrs = ( + % { $args[3] || {} }, + % { $args[4] || {} }, + ); + @args = @args[0,1,2]; + } + # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only # the new set of options $self->_sql_maker(undef); $self->_sql_maker_opts({}); - my $info = [ @$info_arg ]; # copy because we can alter it - my $last_info = $info->[-1]; - if(ref $last_info eq 'HASH') { - for my $storage_opt (qw/on_connect_do disable_sth_caching/) { - if(my $value = delete $last_info->{$storage_opt}) { + if(keys %attrs) { + for my $storage_opt (@storage_options, 'cursor_class') { # @storage_options is declared at the top of the module + if(my $value = delete $attrs{$storage_opt}) { $self->$storage_opt($value); } } for my $sql_maker_opt (qw/limit_dialect quote_char name_sep/) { - if(my $opt_val = delete $last_info->{$sql_maker_opt}) { + if(my $opt_val = delete $attrs{$sql_maker_opt}) { $self->_sql_maker_opts->{$sql_maker_opt} = $opt_val; } } + } - # Get rid of any trailing empty hashref - pop(@$info) if !keys %$last_info; + if (ref $args[0] eq 'CODE') { + # _connect() never looks past $args[0] in this case + %attrs = () + } else { + %attrs = ( + %{ $self->_default_dbi_connect_attributes || {} }, + %attrs, + ); } - $self->_connect_info($info); + $self->_dbi_connect_info([@args, keys %attrs ? \%attrs : ()]); + $self->_connect_info; +} + +sub _default_dbi_connect_attributes { + return { AutoCommit => 1 }; } =head2 on_connect_do -This method is deprecated in favor of setting via L. +This method is deprecated in favour of setting via L. + +=cut + +=head2 on_disconnect_do + +This method is deprecated in favour of setting via L. + +=cut + +sub _parse_connect_do { + my ($self, $type) = @_; + + my $val = $self->$type; + return () if not defined $val; + + my @res; + + if (not ref($val)) { + push @res, [ 'do_sql', $val ]; + } elsif (ref($val) eq 'CODE') { + push @res, $val; + } elsif (ref($val) eq 'ARRAY') { + push @res, map { [ 'do_sql', $_ ] } @$val; + } else { + $self->throw_exception("Invalid type for $type: ".ref($val)); + } + + return \@res; +} =head2 dbh_do -Arguments: $subref, @extra_coderef_args? +Arguments: ($subref | $method_name), @extra_coderef_args? -Execute the given subref using the new exception-based connection management. +Execute the given $subref or $method_name using the new exception-based +connection management. The first two arguments will be the storage object that C was called on and a database handle to use. Any additional arguments will be passed @@ -492,28 +521,33 @@ Example: sub dbh_do { my $self = shift; - my $coderef = shift; + my $code = shift; - ref $coderef eq 'CODE' or $self->throw_exception - ('$coderef must be a CODE reference'); + my $dbh = $self->_dbh; + + return $self->$code($dbh, @_) if $self->{_in_dbh_do} + || $self->{transaction_depth}; - return $coderef->($self, $self->_dbh, @_) if $self->{_in_dbh_do}; local $self->{_in_dbh_do} = 1; my @result; my $want_array = wantarray; eval { - $self->_verify_pid if $self->_dbh; - $self->_populate_dbh if !$self->_dbh; + $self->_verify_pid if $dbh; + if(!$self->_dbh) { + $self->_populate_dbh; + $dbh = $self->_dbh; + } + if($want_array) { - @result = $coderef->($self, $self->_dbh, @_); + @result = $self->$code($dbh, @_); } elsif(defined $want_array) { - $result[0] = $coderef->($self, $self->_dbh, @_); + $result[0] = $self->$code($dbh, @_); } else { - $coderef->($self, $self->_dbh, @_); + $self->$code($dbh, @_); } }; @@ -525,7 +559,7 @@ sub dbh_do { # We were not connected - reconnect and retry, but let any # exception fall right through this time $self->_populate_dbh; - $coderef->($self, $self->_dbh, @_); + $self->$code($self->_dbh, @_); } # This is basically a blend of dbh_do above and DBIx::Class::Storage::txn_do. @@ -538,6 +572,8 @@ sub txn_do { ref $coderef eq 'CODE' or $self->throw_exception ('$coderef must be a CODE reference'); + return $coderef->(@_) if $self->{transaction_depth} && ! $self->auto_savepoint; + local $self->{_in_dbh_do} = 1; my @result; @@ -565,7 +601,7 @@ sub txn_do { my $exception = $@; if(!$exception) { return $want_array ? @result : $result[0] } - if($tried++ > 0 || $self->connected) { + if($tried++ || $self->connected) { eval { $self->txn_rollback }; my $rollback_exception = $@; if($rollback_exception) { @@ -597,14 +633,60 @@ database is not in C mode. sub disconnect { my ($self) = @_; - if( $self->connected ) { - $self->_dbh->rollback unless $self->_dbh->{AutoCommit}; + if( $self->_dbh ) { + my @actions; + + push @actions, ( $self->on_disconnect_call || () ); + push @actions, $self->_parse_connect_do ('on_disconnect_do'); + + $self->_do_connection_actions(disconnect_call_ => $_) for @actions; + + $self->_dbh_rollback unless $self->_dbh_autocommit; + $self->_dbh->disconnect; $self->_dbh(undef); $self->{_dbh_gen}++; } } +=head2 with_deferred_fk_checks + +=over 4 + +=item Arguments: C<$coderef> + +=item Return Value: The return value of $coderef + +=back + +Storage specific method to run the code ref with FK checks deferred or +in MySQL's case disabled entirely. + +=cut + +# Storage subclasses should override this +sub with_deferred_fk_checks { + my ($self, $sub) = @_; + + $sub->(); +} + +=head2 connected + +=over + +=item Arguments: none + +=item Return Value: 1|0 + +=back + +Verifies that the the current database handle is active and ready to execute +an SQL statement (i.e. the connection did not get stale, server is still +answering, etc.) This method is used internally by L. + +=cut + sub connected { my ($self) = @_; @@ -616,19 +698,28 @@ sub connected { } else { $self->_verify_pid; + return 0 if !$self->_dbh; } - return ($dbh->FETCH('Active') && $dbh->ping); + return ($dbh->FETCH('Active') && $self->_ping); } return 0; } +sub _ping { + my $self = shift; + + my $dbh = $self->_dbh or return 0; + + return $dbh->ping; +} + # handle pid changes correctly # NOTE: assumes $self->_dbh is a valid $dbh sub _verify_pid { my ($self) = @_; - return if $self->_conn_pid == $$; + return if defined $self->_conn_pid && $self->_conn_pid == $$; $self->_dbh->{InactiveDestroy} = 1; $self->_dbh(undef); @@ -647,293 +738,1149 @@ sub ensure_connected { =head2 dbh -Returns the dbh - a data base handle of class L. +Returns a C<$dbh> - a data base handle of class L. The returned handle +is guaranteed to be healthy by implicitly calling L, and if +necessary performing a reconnection before returning. =cut sub dbh { my ($self) = @_; - $self->ensure_connected; + if (not $self->_dbh) { + $self->_populate_dbh; + } else { + $self->ensure_connected; + } + return $self->_dbh; +} + +=head2 last_dbh + +This returns the B available C<$dbh> if any, or attempts to +connect and returns the resulting handle. This method differs from +L by not validating if a preexisting handle is still healthy +via L. Make sure you take appropriate precautions +when using this method, as the C<$dbh> may be useless at this point. + +=cut + +sub last_dbh { + my $self = shift; + $self->_populate_dbh unless $self->_dbh; return $self->_dbh; } sub _sql_maker_args { my ($self) = @_; - - return ( limit_dialect => $self->dbh, %{$self->_sql_maker_opts} ); + + return ( + bindtype=>'columns', + array_datatypes => 1, + limit_dialect => $self->last_dbh, + %{$self->_sql_maker_opts} + ); } sub sql_maker { my ($self) = @_; unless ($self->_sql_maker) { - $self->_sql_maker(new DBIC::SQL::Abstract( $self->_sql_maker_args )); + my $sql_maker_class = $self->sql_maker_class; + $self->ensure_class_loaded ($sql_maker_class); + $self->_sql_maker($sql_maker_class->new( $self->_sql_maker_args )); } return $self->_sql_maker; } +sub _rebless {} + sub _populate_dbh { my ($self) = @_; - my @info = @{$self->_connect_info || []}; - $self->_dbh($self->_connect(@info)); - - if(ref $self eq 'DBIx::Class::Storage::DBI') { - my $driver = $self->_dbh->{Driver}->{Name}; - if ($self->load_optional_class("DBIx::Class::Storage::DBI::${driver}")) { - bless $self, "DBIx::Class::Storage::DBI::${driver}"; - $self->_rebless() if $self->can('_rebless'); - } - } - # if on-connect sql statements are given execute them - foreach my $sql_statement (@{$self->on_connect_do || []}) { - $self->debugobj->query_start($sql_statement) if $self->debug(); - $self->_dbh->do($sql_statement); - $self->debugobj->query_end($sql_statement) if $self->debug(); - } + my @info = @{$self->_dbi_connect_info || []}; + $self->_dbh($self->_connect(@info)); $self->_conn_pid($$); $self->_conn_tid(threads->tid) if $INC{'threads.pm'}; + + $self->_determine_driver; + + # Always set the transaction depth on connect, since + # there is no transaction in progress by definition + $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1; + + $self->_run_connection_actions unless $self->{_in_determine_driver}; } -sub _connect { - my ($self, @info) = @_; +sub _run_connection_actions { + my $self = shift; + my @actions; - $self->throw_exception("You failed to provide any connection info") - if !@info; + push @actions, ( $self->on_connect_call || () ); + push @actions, $self->_parse_connect_do ('on_connect_do'); - my ($old_connect_via, $dbh); + $self->_do_connection_actions(connect_call_ => $_) for @actions; +} - if ($INC{'Apache/DBI.pm'} && $ENV{MOD_PERL}) { - $old_connect_via = $DBI::connect_via; - $DBI::connect_via = 'connect'; - } +sub _determine_driver { + my ($self) = @_; - eval { - if(ref $info[0] eq 'CODE') { + if (not $self->_driver_determined) { + my $started_unconnected = 0; + local $self->{_in_determine_driver} = 1; + + if (ref($self) eq __PACKAGE__) { + my $driver; + if ($self->_dbh) { # we are connected + $driver = $self->_dbh->{Driver}{Name}; + } else { + # try to use dsn to not require being connected, the driver may still + # force a connection in _rebless to determine version + ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i; + $started_unconnected = 1; + } + + my $storage_class = "DBIx::Class::Storage::DBI::${driver}"; + if ($self->load_optional_class($storage_class)) { + mro::set_mro($storage_class, 'c3'); + bless $self, $storage_class; + $self->_rebless(); + } + } + + $self->_driver_determined(1); + + $self->_run_connection_actions + if $started_unconnected && defined $self->_dbh; + } +} + +sub _do_connection_actions { + my $self = shift; + my $method_prefix = shift; + my $call = shift; + + if (not ref($call)) { + my $method = $method_prefix . $call; + $self->$method(@_); + } elsif (ref($call) eq 'CODE') { + $self->$call(@_); + } elsif (ref($call) eq 'ARRAY') { + if (ref($call->[0]) ne 'ARRAY') { + $self->_do_connection_actions($method_prefix, $_) for @$call; + } else { + $self->_do_connection_actions($method_prefix, @$_) for @$call; + } + } else { + $self->throw_exception (sprintf ("Don't know how to process conection actions of type '%s'", ref($call)) ); + } + + return $self; +} + +sub connect_call_do_sql { + my $self = shift; + $self->_do_query(@_); +} + +sub disconnect_call_do_sql { + my $self = shift; + $self->_do_query(@_); +} + +# override in db-specific backend when necessary +sub connect_call_datetime_setup { 1 } + +sub _do_query { + my ($self, $action) = @_; + + if (ref $action eq 'CODE') { + $action = $action->($self); + $self->_do_query($_) foreach @$action; + } + else { + # Most debuggers expect ($sql, @bind), so we need to exclude + # the attribute hash which is the second argument to $dbh->do + # furthermore the bind values are usually to be presented + # as named arrayref pairs, so wrap those here too + my @do_args = (ref $action eq 'ARRAY') ? (@$action) : ($action); + my $sql = shift @do_args; + my $attrs = shift @do_args; + my @bind = map { [ undef, $_ ] } @do_args; + + $self->_query_start($sql, @bind); + $self->_dbh->do($sql, $attrs, @do_args); + $self->_query_end($sql, @bind); + } + + return $self; +} + +sub _connect { + my ($self, @info) = @_; + + $self->throw_exception("You failed to provide any connection info") + if !@info; + + my ($old_connect_via, $dbh); + + if ($INC{'Apache/DBI.pm'} && $ENV{MOD_PERL}) { + $old_connect_via = $DBI::connect_via; + $DBI::connect_via = 'connect'; + } + + eval { + if(ref $info[0] eq 'CODE') { $dbh = &{$info[0]} } else { $dbh = DBI->connect(@info); - $dbh->{RaiseError} = 1; - $dbh->{PrintError} = 0; - $dbh->{PrintWarn} = 0; + } + + if($dbh && !$self->unsafe) { + my $weak_self = $self; + Scalar::Util::weaken($weak_self); + $dbh->{HandleError} = sub { + if ($weak_self) { + $weak_self->throw_exception("DBI Exception: $_[0]"); + } + else { + croak ("DBI Exception: $_[0]"); + } + }; + $dbh->{ShowErrorStatement} = 1; + $dbh->{RaiseError} = 1; + $dbh->{PrintError} = 0; } }; $DBI::connect_via = $old_connect_via if $old_connect_via; - if (!$dbh || $@) { - $self->throw_exception("DBI Connection failed: " . ($@ || $DBI::errstr)); - } + $self->throw_exception("DBI Connection failed: " . ($@||$DBI::errstr)) + if !$dbh || $@; + + $self->_dbh_autocommit($dbh->{AutoCommit}); $dbh; } -sub _dbh_txn_begin { - my ($self, $dbh) = @_; - if ($dbh->{AutoCommit}) { - $self->debugobj->txn_begin() - if ($self->debug); - $dbh->begin_work; +sub svp_begin { + my ($self, $name) = @_; + + $name = $self->_svp_generate_name + unless defined $name; + + $self->throw_exception ("You can't use savepoints outside a transaction") + if $self->{transaction_depth} == 0; + + $self->throw_exception ("Your Storage implementation doesn't support savepoints") + unless $self->can('_svp_begin'); + + push @{ $self->{savepoints} }, $name; + + $self->debugobj->svp_begin($name) if $self->debug; + + return $self->_svp_begin($name); +} + +sub svp_release { + my ($self, $name) = @_; + + $self->throw_exception ("You can't use savepoints outside a transaction") + if $self->{transaction_depth} == 0; + + $self->throw_exception ("Your Storage implementation doesn't support savepoints") + unless $self->can('_svp_release'); + + if (defined $name) { + $self->throw_exception ("Savepoint '$name' does not exist") + unless grep { $_ eq $name } @{ $self->{savepoints} }; + + # Dig through the stack until we find the one we are releasing. This keeps + # the stack up to date. + my $svp; + + do { $svp = pop @{ $self->{savepoints} } } while $svp ne $name; + } else { + $name = pop @{ $self->{savepoints} }; } + + $self->debugobj->svp_release($name) if $self->debug; + + return $self->_svp_release($name); +} + +sub svp_rollback { + my ($self, $name) = @_; + + $self->throw_exception ("You can't use savepoints outside a transaction") + if $self->{transaction_depth} == 0; + + $self->throw_exception ("Your Storage implementation doesn't support savepoints") + unless $self->can('_svp_rollback'); + + if (defined $name) { + # If they passed us a name, verify that it exists in the stack + unless(grep({ $_ eq $name } @{ $self->{savepoints} })) { + $self->throw_exception("Savepoint '$name' does not exist!"); + } + + # Dig through the stack until we find the one we are releasing. This keeps + # the stack up to date. + while(my $s = pop(@{ $self->{savepoints} })) { + last if($s eq $name); + } + # Add the savepoint back to the stack, as a rollback doesn't remove the + # named savepoint, only everything after it. + push(@{ $self->{savepoints} }, $name); + } else { + # We'll assume they want to rollback to the last savepoint + $name = $self->{savepoints}->[-1]; + } + + $self->debugobj->svp_rollback($name) if $self->debug; + + return $self->_svp_rollback($name); +} + +sub _svp_generate_name { + my ($self) = @_; + + return 'savepoint_'.scalar(@{ $self->{'savepoints'} }); } sub txn_begin { my $self = shift; - $self->dbh_do($self->can('_dbh_txn_begin')) - if $self->{transaction_depth}++ == 0; + if($self->{transaction_depth} == 0) { + $self->debugobj->txn_begin() + if $self->debug; + $self->_dbh_begin_work; + } + elsif ($self->auto_savepoint) { + $self->svp_begin; + } + $self->{transaction_depth}++; +} + +sub _dbh_begin_work { + my $self = shift; + # being here implies we have AutoCommit => 1 + # if the user is utilizing txn_do - good for + # him, otherwise we need to ensure that the + # $dbh is healthy on BEGIN + my $dbh_method = $self->{_in_dbh_do} ? '_dbh' : 'dbh'; + $self->$dbh_method->begin_work; } -sub _dbh_txn_commit { - my ($self, $dbh) = @_; - if ($self->{transaction_depth} == 0) { - unless ($dbh->{AutoCommit}) { - $self->debugobj->txn_commit() - if ($self->debug); - $dbh->commit; - } +sub txn_commit { + my $self = shift; + if ($self->{transaction_depth} == 1) { + my $dbh = $self->_dbh; + $self->debugobj->txn_commit() + if ($self->debug); + $self->_dbh_commit; + $self->{transaction_depth} = 0 + if $self->_dbh_autocommit; } - else { - if (--$self->{transaction_depth} == 0) { - $self->debugobj->txn_commit() - if ($self->debug); - $dbh->commit; - } + elsif($self->{transaction_depth} > 1) { + $self->{transaction_depth}--; + $self->svp_release + if $self->auto_savepoint; } } -sub txn_commit { +sub _dbh_commit { my $self = shift; - $self->dbh_do($self->can('_dbh_txn_commit')); + $self->_dbh->commit; } -sub _dbh_txn_rollback { - my ($self, $dbh) = @_; - if ($self->{transaction_depth} == 0) { - unless ($dbh->{AutoCommit}) { +sub txn_rollback { + my $self = shift; + my $dbh = $self->_dbh; + eval { + if ($self->{transaction_depth} == 1) { $self->debugobj->txn_rollback() if ($self->debug); - $dbh->rollback; + $self->{transaction_depth} = 0 + if $self->_dbh_autocommit; + $self->_dbh_rollback; } - } - else { - if (--$self->{transaction_depth} == 0) { - $self->debugobj->txn_rollback() - if ($self->debug); - $dbh->rollback; + elsif($self->{transaction_depth} > 1) { + $self->{transaction_depth}--; + if ($self->auto_savepoint) { + $self->svp_rollback; + $self->svp_release; + } } else { die DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION->new; } - } -} - -sub txn_rollback { - my $self = shift; - - eval { $self->dbh_do($self->can('_dbh_txn_rollback')) }; + }; if ($@) { my $error = $@; my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION"; $error =~ /$exception_class/ and $self->throw_exception($error); - $self->{transaction_depth} = 0; # ensure that a failed rollback - $self->throw_exception($error); # resets the transaction depth + # ensure that a failed rollback resets the transaction depth + $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1; + $self->throw_exception($error); } } +sub _dbh_rollback { + my $self = shift; + $self->_dbh->rollback; +} + # This used to be the top-half of _execute. It was split out to make it # easier to override in NoBindVars without duping the rest. It takes up # all of _execute's args, and emits $sql, @bind. sub _prep_for_execute { - my ($self, $op, $extra_bind, $ident, @args) = @_; + my ($self, $op, $extra_bind, $ident, $args) = @_; + + if( Scalar::Util::blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) { + $ident = $ident->from(); + } - my ($sql, @bind) = $self->sql_maker->$op($ident, @args); - unshift(@bind, @$extra_bind) if $extra_bind; - @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args + my ($sql, @bind) = $self->sql_maker->$op($ident, @$args); - return ($sql, @bind); + unshift(@bind, + map { ref $_ eq 'ARRAY' ? $_ : [ '!!dummy', $_ ] } @$extra_bind) + if $extra_bind; + return ($sql, \@bind); } -sub _execute { - my $self = shift; - my ($sql, @bind) = $self->_prep_for_execute(@_); +sub _fix_bind_params { + my ($self, @bind) = @_; + + ### Turn @bind from something like this: + ### ( [ "artist", 1 ], [ "cdid", 1, 3 ] ) + ### to this: + ### ( "'1'", "'1'", "'3'" ) + return + map { + if ( defined( $_ && $_->[1] ) ) { + map { qq{'$_'}; } @{$_}[ 1 .. $#$_ ]; + } + else { q{'NULL'}; } + } @bind; +} - if ($self->debug) { - my @debug_bind = map { defined $_ ? qq{'$_'} : q{'NULL'} } @bind; - $self->debugobj->query_start($sql, @debug_bind); - } +sub _query_start { + my ( $self, $sql, @bind ) = @_; - my $sth = $self->sth($sql); + if ( $self->debug ) { + @bind = $self->_fix_bind_params(@bind); - my $rv; - if ($sth) { - my $time = time(); - $rv = eval { $sth->execute(@bind) }; + $self->debugobj->query_start( $sql, @bind ); + } +} - if ($@ || !$rv) { - $self->throw_exception("Error executing '$sql': ".($@ || $sth->errstr)); +sub _query_end { + my ( $self, $sql, @bind ) = @_; + + if ( $self->debug ) { + @bind = $self->_fix_bind_params(@bind); + $self->debugobj->query_end( $sql, @bind ); + } +} + +sub _dbh_execute { + my ($self, $dbh, $op, $extra_bind, $ident, $bind_attributes, @args) = @_; + + my ($sql, $bind) = $self->_prep_for_execute($op, $extra_bind, $ident, \@args); + + $self->_query_start( $sql, @$bind ); + + my $sth = $self->sth($sql,$op); + + my $placeholder_index = 1; + + foreach my $bound (@$bind) { + my $attributes = {}; + my($column_name, @data) = @$bound; + + if ($bind_attributes) { + $attributes = $bind_attributes->{$column_name} + if defined $bind_attributes->{$column_name}; + } + + foreach my $data (@data) { + my $ref = ref $data; + $data = $ref && $ref ne 'ARRAY' ? ''.$data : $data; # stringify args (except arrayrefs) + + $sth->bind_param($placeholder_index, $data, $attributes); + $placeholder_index++; } - } else { - $self->throw_exception("'$sql' did not generate a statement."); - } - if ($self->debug) { - my @debug_bind = map { defined $_ ? qq{`$_'} : q{`NULL'} } @bind; - $self->debugobj->query_end($sql, @debug_bind); } - return (wantarray ? ($rv, $sth, @bind) : $rv); + + # Can this fail without throwing an exception anyways??? + my $rv = $sth->execute(); + $self->throw_exception($sth->errstr) if !$rv; + + $self->_query_end( $sql, @$bind ); + + return (wantarray ? ($rv, $sth, @$bind) : $rv); +} + +sub _execute { + my $self = shift; + $self->dbh_do('_dbh_execute', @_) } sub insert { - my ($self, $ident, $to_insert) = @_; - $self->throw_exception( - "Couldn't insert ".join(', ', - map "$_ => $to_insert->{$_}", keys %$to_insert - )." into ${ident}" - ) unless ($self->_execute('insert' => [], $ident, $to_insert)); - return $to_insert; + my ($self, $source, $to_insert) = @_; + +# redispatch to insert method of storage we reblessed into, if necessary + if (not $self->_driver_determined) { + $self->_determine_driver; + goto $self->can('insert'); + } + + my $ident = $source->from; + my $bind_attributes = $self->source_bind_attributes($source); + + my $updated_cols = {}; + + foreach my $col ( $source->columns ) { + if ( !defined $to_insert->{$col} ) { + my $col_info = $source->column_info($col); + + if ( $col_info->{auto_nextval} ) { + $updated_cols->{$col} = $to_insert->{$col} = $self->_sequence_fetch( + 'nextval', + $col_info->{sequence} || + $self->_dbh_get_autoinc_seq($self->last_dbh, $source) + ); + } + } + } + + $self->_execute('insert' => [], $source, $bind_attributes, $to_insert); + + return $updated_cols; } ## Still not quite perfect, and EXPERIMENTAL -## Currently it is assumed that all values passed will be "normal", i.e. not +## Currently it is assumed that all values passed will be "normal", i.e. not ## scalar refs, or at least, all the same type as the first set, the statement is ## only prepped once. sub insert_bulk { - my ($self, $table, $cols, $data) = @_; + my ($self, $source, $cols, $data) = @_; my %colvalues; + my $table = $source->from; @colvalues{@$cols} = (0..$#$cols); my ($sql, @bind) = $self->sql_maker->insert($table, \%colvalues); -# print STDERR "BIND".Dumper(\@bind); - if ($self->debug) { - my @debug_bind = map { defined $_ ? qq{'$_'} : q{'NULL'} } @bind; - $self->debugobj->query_start($sql, @debug_bind); - } + $self->_determine_driver; + + $self->_query_start( $sql, @bind ); my $sth = $self->sth($sql); # @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args - my $rv; ## This must be an arrayref, else nothing works! my $tuple_status = []; -# use Data::Dumper; -# print STDERR Dumper($data); - if ($sth) { - my $time = time(); - $rv = eval { $sth->execute_array({ ArrayTupleFetch => sub { my $values = shift @$data; return if !$values; return [ @{$values}[@bind] ]}, - ArrayTupleStatus => $tuple_status }) }; -# print STDERR Dumper($tuple_status); -# print STDERR "RV: $rv\n"; - if ($@ || !defined $rv) { - my $errors = ''; - foreach my $tuple (@$tuple_status) - { - $errors .= "\n" . $tuple->[1] if(ref $tuple); - } - $self->throw_exception("Error executing '$sql': ".($@ || $errors)); + + ## Get the bind_attributes, if any exist + my $bind_attributes = $self->source_bind_attributes($source); + + ## Bind the values and execute + my $placeholder_index = 1; + + foreach my $bound (@bind) { + + my $attributes = {}; + my ($column_name, $data_index) = @$bound; + + if( $bind_attributes ) { + $attributes = $bind_attributes->{$column_name} + if defined $bind_attributes->{$column_name}; } - } else { - $self->throw_exception("'$sql' did not generate a statement."); + + my @data = map { $_->[$data_index] } @$data; + + $sth->bind_param_array( $placeholder_index, [@data], $attributes ); + $placeholder_index++; } - if ($self->debug) { - my @debug_bind = map { defined $_ ? qq{`$_'} : q{`NULL'} } @bind; - $self->debugobj->query_end($sql, @debug_bind); + my $rv = eval { $sth->execute_array({ArrayTupleStatus => $tuple_status}) }; + if (my $err = $@) { + my $i = 0; + ++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i]; + + $self->throw_exception($sth->errstr || "Unexpected populate error: $err") + if ($i > $#$tuple_status); + + require Data::Dumper; + local $Data::Dumper::Terse = 1; + local $Data::Dumper::Indent = 1; + local $Data::Dumper::Useqq = 1; + local $Data::Dumper::Quotekeys = 0; + + $self->throw_exception(sprintf "%s for populate slice:\n%s", + $tuple_status->[$i][1], + Data::Dumper::Dumper( + { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) } + ), + ); } + $self->throw_exception($sth->errstr) if !$rv; + + $self->_query_end( $sql, @bind ); return (wantarray ? ($rv, $sth, @bind) : $rv); } sub update { - return shift->_execute('update' => [], @_); + my $self = shift @_; + my $source = shift @_; + $self->_determine_driver; + my $bind_attributes = $self->source_bind_attributes($source); + + return $self->_execute('update' => [], $source, $bind_attributes, @_); } + sub delete { - return shift->_execute('delete' => [], @_); + my $self = shift @_; + my $source = shift @_; + $self->_determine_driver; + my $bind_attrs = $self->source_bind_attributes($source); + + return $self->_execute('delete' => [], $source, $bind_attrs, @_); +} + +# We were sent here because the $rs contains a complex search +# which will require a subquery to select the correct rows +# (i.e. joined or limited resultsets) +# +# Genarating a single PK column subquery is trivial and supported +# by all RDBMS. However if we have a multicolumn PK, things get ugly. +# Look at _multipk_update_delete() +sub _subq_update_delete { + my $self = shift; + my ($rs, $op, $values) = @_; + + my $rsrc = $rs->result_source; + + # we already check this, but double check naively just in case. Should be removed soon + my $sel = $rs->_resolved_attrs->{select}; + $sel = [ $sel ] unless ref $sel eq 'ARRAY'; + my @pcols = $rsrc->primary_columns; + if (@$sel != @pcols) { + $self->throw_exception ( + 'Subquery update/delete can not be called on resultsets selecting a' + .' number of columns different than the number of primary keys' + ); + } + + if (@pcols == 1) { + return $self->$op ( + $rsrc, + $op eq 'update' ? $values : (), + { $pcols[0] => { -in => $rs->as_query } }, + ); + } + + else { + return $self->_multipk_update_delete (@_); + } +} + +# ANSI SQL does not provide a reliable way to perform a multicol-PK +# resultset update/delete involving subqueries. So by default resort +# to simple (and inefficient) delete_all style per-row opearations, +# while allowing specific storages to override this with a faster +# implementation. +# +sub _multipk_update_delete { + return shift->_per_row_update_delete (@_); +} + +# This is the default loop used to delete/update rows for multi PK +# resultsets, and used by mysql exclusively (because it can't do anything +# else). +# +# We do not use $row->$op style queries, because resultset update/delete +# is not expected to cascade (this is what delete_all/update_all is for). +# +# There should be no race conditions as the entire operation is rolled +# in a transaction. +# +sub _per_row_update_delete { + my $self = shift; + my ($rs, $op, $values) = @_; + + my $rsrc = $rs->result_source; + my @pcols = $rsrc->primary_columns; + + my $guard = $self->txn_scope_guard; + + # emulate the return value of $sth->execute for non-selects + my $row_cnt = '0E0'; + + my $subrs_cur = $rs->cursor; + while (my @pks = $subrs_cur->next) { + + my $cond; + for my $i (0.. $#pcols) { + $cond->{$pcols[$i]} = $pks[$i]; + } + + $self->$op ( + $rsrc, + $op eq 'update' ? $values : (), + $cond, + ); + + $row_cnt++; + } + + $guard->commit; + + return $row_cnt; } sub _select { - my ($self, $ident, $select, $condition, $attrs) = @_; - my $order = $attrs->{order_by}; - if (ref $condition eq 'SCALAR') { - $order = $1 if $$condition =~ s/ORDER BY (.*)$//i; + my $self = shift; + + # localization is neccessary as + # 1) there is no infrastructure to pass this around before SQLA2 + # 2) _select_args sets it and _prep_for_execute consumes it + my $sql_maker = $self->sql_maker; + local $sql_maker->{_dbic_rs_attrs}; + + return $self->_execute($self->_select_args(@_)); +} + +sub _select_args_to_query { + my $self = shift; + + # localization is neccessary as + # 1) there is no infrastructure to pass this around before SQLA2 + # 2) _select_args sets it and _prep_for_execute consumes it + my $sql_maker = $self->sql_maker; + local $sql_maker->{_dbic_rs_attrs}; + + # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $order, $rows, $offset) + # = $self->_select_args($ident, $select, $cond, $attrs); + my ($op, $bind, $ident, $bind_attrs, @args) = + $self->_select_args(@_); + + # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $order, $rows, $offset ]); + my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, \@args); + $prepared_bind ||= []; + + return wantarray + ? ($sql, $prepared_bind, $bind_attrs) + : \[ "($sql)", @$prepared_bind ] + ; +} + +sub _select_args { + my ($self, $ident, $select, $where, $attrs) = @_; + + my ($alias2source, $rs_alias) = $self->_resolve_ident_sources ($ident); + + my $sql_maker = $self->sql_maker; + $sql_maker->{_dbic_rs_attrs} = { + %$attrs, + select => $select, + from => $ident, + where => $where, + $rs_alias + ? ( _source_handle => $alias2source->{$rs_alias}->handle ) + : () + , + }; + + # calculate bind_attrs before possible $ident mangling + my $bind_attrs = {}; + for my $alias (keys %$alias2source) { + my $bindtypes = $self->source_bind_attributes ($alias2source->{$alias}) || {}; + for my $col (keys %$bindtypes) { + + my $fqcn = join ('.', $alias, $col); + $bind_attrs->{$fqcn} = $bindtypes->{$col} if $bindtypes->{$col}; + + # Unqialified column names are nice, but at the same time can be + # rather ambiguous. What we do here is basically go along with + # the loop, adding an unqualified column slot to $bind_attrs, + # alongside the fully qualified name. As soon as we encounter + # another column by that name (which would imply another table) + # we unset the unqualified slot and never add any info to it + # to avoid erroneous type binding. If this happens the users + # only choice will be to fully qualify his column name + + if (exists $bind_attrs->{$col}) { + $bind_attrs->{$col} = {}; + } + else { + $bind_attrs->{$col} = $bind_attrs->{$fqcn}; + } + } } - if (exists $attrs->{group_by} || $attrs->{having}) { - $order = { - group_by => $attrs->{group_by}, - having => $attrs->{having}, - ($order ? (order_by => $order) : ()) - }; + + # adjust limits + if ( + $attrs->{software_limit} + || + $sql_maker->_default_limit_syntax eq "GenericSubQ" + ) { + $attrs->{software_limit} = 1; } - my @args = ('select', $attrs->{bind}, $ident, $select, $condition, $order); - if ($attrs->{software_limit} || - $self->sql_maker->_default_limit_syntax eq "GenericSubQ") { - $attrs->{software_limit} = 1; - } else { + else { $self->throw_exception("rows attribute must be positive if present") if (defined($attrs->{rows}) && !($attrs->{rows} > 0)); - push @args, $attrs->{rows}, $attrs->{offset}; + + # MySQL actually recommends this approach. I cringe. + $attrs->{rows} = 2**48 if not defined $attrs->{rows} and defined $attrs->{offset}; + } + + my @limit; + + # see if we need to tear the prefetch apart (either limited has_many or grouped prefetch) + # otherwise delegate the limiting to the storage, unless software limit was requested + if ( + ( $attrs->{rows} && keys %{$attrs->{collapse}} ) + || + ( $attrs->{group_by} && @{$attrs->{group_by}} && + $attrs->{_prefetch_select} && @{$attrs->{_prefetch_select}} ) + ) { + ($ident, $select, $where, $attrs) + = $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs); + } + elsif (! $attrs->{software_limit} ) { + push @limit, $attrs->{rows}, $attrs->{offset}; + } + +### + # This would be the point to deflate anything found in $where + # (and leave $attrs->{bind} intact). Problem is - inflators historically + # expect a row object. And all we have is a resultsource (it is trivial + # to extract deflator coderefs via $alias2source above). + # + # I don't see a way forward other than changing the way deflators are + # invoked, and that's just bad... +### + + my $order = { map + { $attrs->{$_} ? ( $_ => $attrs->{$_} ) : () } + (qw/order_by group_by having/ ) + }; + + return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $order, @limit); +} + +# +# This is the code producing joined subqueries like: +# SELECT me.*, other.* FROM ( SELECT me.* FROM ... ) JOIN other ON ... +# +sub _adjust_select_args_for_complex_prefetch { + my ($self, $from, $select, $where, $attrs) = @_; + + $self->throw_exception ('Complex prefetches are not supported on resultsets with a custom from attribute') + if (ref $from ne 'ARRAY'); + + # copies for mangling + $from = [ @$from ]; + $select = [ @$select ]; + $attrs = { %$attrs }; + + # separate attributes + my $sub_attrs = { %$attrs }; + delete $attrs->{$_} for qw/where bind rows offset group_by having/; + delete $sub_attrs->{$_} for qw/for collapse _prefetch_select _collapse_order_by select as/; + + my $select_root_alias = $attrs->{alias}; + my $sql_maker = $self->sql_maker; + + # create subquery select list - consider only stuff *not* brought in by the prefetch + my $sub_select = []; + my $sub_group_by; + for my $i (0 .. @{$attrs->{select}} - @{$attrs->{_prefetch_select}} - 1) { + my $sel = $attrs->{select}[$i]; + + # alias any functions to the dbic-side 'as' label + # adjust the outer select accordingly + if (ref $sel eq 'HASH' && !$sel->{-select}) { + $sel = { -select => $sel, -as => $attrs->{as}[$i] }; + $select->[$i] = join ('.', $attrs->{alias}, ($attrs->{as}[$i] || "select_$i") ); + } + + push @$sub_select, $sel; + } + + # bring over all non-collapse-induced order_by into the inner query (if any) + # the outer one will have to keep them all + delete $sub_attrs->{order_by}; + if (my $ord_cnt = @{$attrs->{order_by}} - @{$attrs->{_collapse_order_by}} ) { + $sub_attrs->{order_by} = [ + @{$attrs->{order_by}}[ 0 .. $ord_cnt - 1] + ]; + } + + # mangle {from}, keep in mind that $from is "headless" from here on + my $join_root = shift @$from; + + my %inner_joins; + my %join_info = map { $_->[0]{-alias} => $_->[0] } (@$from); + + # in complex search_related chains $select_root_alias may *not* be + # 'me' so always include it in the inner join + $inner_joins{$select_root_alias} = 1 if ($join_root->{-alias} ne $select_root_alias); + + + # decide which parts of the join will remain on the inside + # + # this is not a very viable optimisation, but it was written + # before I realised this, so might as well remain. We can throw + # away _any_ branches of the join tree that are: + # 1) not mentioned in the condition/order + # 2) left-join leaves (or left-join leaf chains) + # Most of the join conditions will not satisfy this, but for real + # complex queries some might, and we might make some RDBMS happy. + # + # + # since we do not have introspectable SQLA, we fall back to ugly + # scanning of raw SQL for WHERE, and for pieces of ORDER BY + # in order to determine what goes into %inner_joins + # It may not be very efficient, but it's a reasonable stop-gap + { + # produce stuff unquoted, so it can be scanned + local $sql_maker->{quote_char}; + my $sep = $self->_sql_maker_opts->{name_sep} || '.'; + $sep = "\Q$sep\E"; + + my @order_by = (map + { ref $_ ? $_->[0] : $_ } + $sql_maker->_order_by_chunks ($sub_attrs->{order_by}) + ); + + my $where_sql = $sql_maker->where ($where); + my $select_sql = $sql_maker->_recurse_fields ($sub_select); + + # sort needed joins + for my $alias (keys %join_info) { + + # any table alias found on a column name in where or order_by + # gets included in %inner_joins + # Also any parent joins that are needed to reach this particular alias + for my $piece ($select_sql, $where_sql, @order_by ) { + if ($piece =~ /\b $alias $sep/x) { + $inner_joins{$alias} = 1; + } + } + } + } + + # scan for non-leaf/non-left joins and mark as needed + # also mark all ancestor joins that are needed to reach this particular alias + # (e.g. join => { cds => 'tracks' } - tracks will bring cds too ) + # + # traverse by the size of the -join_path i.e. reverse depth first + for my $alias (sort { @{$join_info{$b}{-join_path}} <=> @{$join_info{$a}{-join_path}} } (keys %join_info) ) { + + my $j = $join_info{$alias}; + $inner_joins{$alias} = 1 if (! $j->{-join_type} || ($j->{-join_type} !~ /^left$/i) ); + + if ($inner_joins{$alias}) { + $inner_joins{$_} = 1 for (@{$j->{-join_path}}); + } + } + + # construct the inner $from for the subquery + my $inner_from = [ $join_root ]; + for my $j (@$from) { + push @$inner_from, $j if $inner_joins{$j->[0]{-alias}}; + } + + # if a multi-type join was needed in the subquery ("multi" is indicated by + # presence in {collapse}) - add a group_by to simulate the collapse in the subq + unless ($sub_attrs->{group_by}) { + for my $alias (keys %inner_joins) { + + # the dot comes from some weirdness in collapse + # remove after the rewrite + if ($attrs->{collapse}{".$alias"}) { + $sub_attrs->{group_by} ||= $sub_select; + last; + } + } + } + + # generate the subquery + my $subq = $self->_select_args_to_query ( + $inner_from, + $sub_select, + $where, + $sub_attrs + ); + my $subq_joinspec = { + -alias => $select_root_alias, + -source_handle => $join_root->{-source_handle}, + $select_root_alias => $subq, + }; + + # Generate a new from (really just replace the join slot with the subquery) + # Before we would start the outer chain from the subquery itself (i.e. + # SELECT ... FROM (SELECT ... ) alias JOIN ..., but this turned out to be + # a bad idea for search_related, as the root of the chain was effectively + # lost (i.e. $artist_rs->search_related ('cds'... ) would result in alias + # of 'cds', which would prevent from doing things like order_by artist.*) + # See t/prefetch/via_search_related.t for a better idea + my @outer_from; + if ($join_root->{-alias} eq $select_root_alias) { # just swap the root part and we're done + @outer_from = ( + $subq_joinspec, + @$from, + ) + } + else { # this is trickier + @outer_from = ($join_root); + + for my $j (@$from) { + if ($j->[0]{-alias} eq $select_root_alias) { + push @outer_from, [ + $subq_joinspec, + @{$j}[1 .. $#$j], + ]; + } + else { + push @outer_from, $j; + } + } + } + + # This is totally horrific - the $where ends up in both the inner and outer query + # Unfortunately not much can be done until SQLA2 introspection arrives, and even + # then if where conditions apply to the *right* side of the prefetch, you may have + # to both filter the inner select (e.g. to apply a limit) and then have to re-filter + # the outer select to exclude joins you didin't want in the first place + # + # OTOH it can be seen as a plus: (notes that this query would make a DBA cry ;) + return (\@outer_from, $select, $where, $attrs); +} + +sub _resolve_ident_sources { + my ($self, $ident) = @_; + + my $alias2source = {}; + my $rs_alias; + + # the reason this is so contrived is that $ident may be a {from} + # structure, specifying multiple tables to join + if ( Scalar::Util::blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) { + # this is compat mode for insert/update/delete which do not deal with aliases + $alias2source->{me} = $ident; + $rs_alias = 'me'; + } + elsif (ref $ident eq 'ARRAY') { + + for (@$ident) { + my $tabinfo; + if (ref $_ eq 'HASH') { + $tabinfo = $_; + $rs_alias = $tabinfo->{-alias}; + } + if (ref $_ eq 'ARRAY' and ref $_->[0] eq 'HASH') { + $tabinfo = $_->[0]; + } + + $alias2source->{$tabinfo->{-alias}} = $tabinfo->{-source_handle}->resolve + if ($tabinfo->{-source_handle}); + } } - return $self->_execute(@args); + + return ($alias2source, $rs_alias); +} + +# Takes $ident, \@column_names +# +# returns { $column_name => \%column_info, ... } +# also note: this adds -result_source => $rsrc to the column info +# +# usage: +# my $col_sources = $self->_resolve_column_info($ident, @column_names); +sub _resolve_column_info { + my ($self, $ident, $colnames) = @_; + my ($alias2src, $root_alias) = $self->_resolve_ident_sources($ident); + + my $sep = $self->_sql_maker_opts->{name_sep} || '.'; + $sep = "\Q$sep\E"; + + my (%return, %seen_cols); + + # compile a global list of column names, to be able to properly + # disambiguate unqualified column names (if at all possible) + for my $alias (keys %$alias2src) { + my $rsrc = $alias2src->{$alias}; + for my $colname ($rsrc->columns) { + push @{$seen_cols{$colname}}, $alias; + } + } + + COLUMN: + foreach my $col (@$colnames) { + my ($alias, $colname) = $col =~ m/^ (?: ([^$sep]+) $sep)? (.+) $/x; + + unless ($alias) { + # see if the column was seen exactly once (so we know which rsrc it came from) + if ($seen_cols{$colname} and @{$seen_cols{$colname}} == 1) { + $alias = $seen_cols{$colname}[0]; + } + else { + next COLUMN; + } + } + + my $rsrc = $alias2src->{$alias}; + $return{$col} = $rsrc && { + %{$rsrc->column_info($colname)}, + -result_source => $rsrc, + -source_alias => $alias, + }; + } + + return \%return; +} + +# Returns a counting SELECT for a simple count +# query. Abstracted so that a storage could override +# this to { count => 'firstcol' } or whatever makes +# sense as a performance optimization +sub _count_select { + #my ($self, $source, $rs_attrs) = @_; + return { count => '*' }; +} + +# Returns a SELECT which will end up in the subselect +# There may or may not be a group_by, as the subquery +# might have been called to accomodate a limit +# +# Most databases would be happy with whatever ends up +# here, but some choke in various ways. +# +sub _subq_count_select { + my ($self, $source, $rs_attrs) = @_; + return $rs_attrs->{group_by} if $rs_attrs->{group_by}; + + my @pcols = map { join '.', $rs_attrs->{alias}, $_ } ($source->primary_columns); + return @pcols ? \@pcols : [ 1 ]; +} + + +sub source_bind_attributes { + my ($self, $source) = @_; + + my $bind_attributes; + foreach my $column ($source->columns) { + + my $data_type = $source->column_info($column)->{data_type} || ''; + $bind_attributes->{$column} = $self->bind_attribute_by_data_type($data_type) + if $data_type; + } + + return $bind_attributes; } =head2 select @@ -951,13 +1898,17 @@ Handle a SQL select statement. sub select { my $self = shift; my ($ident, $select, $condition, $attrs) = @_; - return $self->cursor->new($self, \@_, $attrs); + return $self->cursor_class->new($self, \@_, $attrs); } sub select_single { my $self = shift; my ($rv, $sth, @bind) = $self->_select(@_); my @row = $sth->fetchrow_array; + my @nextrow = $sth->fetchrow_array if @row; + if(@row && @nextrow) { + carp "Query returned more than one row. SQL that returns multiple rows is DEPRECATED for ->find and ->single"; + } # Need to call finish() to work round broken DBDs $sth->finish(); return @row; @@ -983,16 +1934,16 @@ sub _dbh_sth { ? $dbh->prepare($sql) : $dbh->prepare_cached($sql, {}, 3); - $self->throw_exception( - 'no sth generated via sql (' . ($@ || $dbh->errstr) . "): $sql" - ) if !$sth; + # XXX You would think RaiseError would make this impossible, + # but apparently that's not true :( + $self->throw_exception($dbh->errstr) if !$sth; $sth; } sub sth { my ($self, $sql) = @_; - $self->dbh_do($self->can('_dbh_sth'), $sql); + $self->dbh_do('_dbh_sth', $sql); } sub _dbh_columns_info_for { @@ -1020,18 +1971,12 @@ sub _dbh_columns_info_for { } my %result; - my $sth = $dbh->prepare("SELECT * FROM $table WHERE 1=0"); + my $sth = $dbh->prepare($self->sql_maker->select($table, undef, \'1 = 0')); $sth->execute; my @columns = @{$sth->{NAME_lc}}; for my $i ( 0 .. $#columns ){ my %column_info; - my $type_num = $sth->{TYPE}->[$i]; - my $type_name; - if(defined $type_num && $dbh->can('type_info')) { - my $type_info = $dbh->type_info($type_num); - $type_name = $type_info->{TYPE_NAME} if $type_info; - } - $column_info{data_type} = $type_name ? $type_name : $type_num; + $column_info{data_type} = $sth->{TYPE}->[$i]; $column_info{size} = $sth->{PRECISION}->[$i]; $column_info{is_nullable} = $sth->{NULLABLE}->[$i] ? 1 : 0; @@ -1042,13 +1987,25 @@ sub _dbh_columns_info_for { $result{$columns[$i]} = \%column_info; } + $sth->finish; + + foreach my $col (keys %result) { + my $colinfo = $result{$col}; + my $type_num = $colinfo->{data_type}; + my $type_name; + if(defined $type_num && $dbh->can('type_info')) { + my $type_info = $dbh->type_info($type_num); + $type_name = $type_info->{TYPE_NAME} if $type_info; + $colinfo->{data_type} = $type_name if $type_name; + } + } return \%result; } sub columns_info_for { my ($self, $table) = @_; - $self->dbh_do($self->can('_dbh_columns_info_for'), $table); + $self->dbh_do('_dbh_columns_info_for', $table); } =head2 last_insert_id @@ -1058,14 +2015,23 @@ Return the row id of the last insert. =cut sub _dbh_last_insert_id { - my ($self, $dbh, $source, $col) = @_; - # XXX This is a SQLite-ism as a default... is there a DBI-generic way? - $dbh->func('last_insert_rowid'); + # All Storage's need to register their own _dbh_last_insert_id + # the old SQLite-based method was highly inappropriate + + my $self = shift; + my $class = ref $self; + $self->throw_exception (<dbh_do($self->can('_dbh_last_insert_id'), @_); + $self->dbh_do('_dbh_last_insert_id', @_); } =head2 sqlt_type @@ -1074,72 +2040,224 @@ Returns the database driver name. =cut -sub sqlt_type { shift->dbh->{Driver}->{Name} } +sub sqlt_type { shift->last_dbh->{Driver}->{Name} } + +=head2 bind_attribute_by_data_type + +Given a datatype from column info, returns a database specific bind +attribute for C<< $dbh->bind_param($val,$attribute) >> or nothing if we will +let the database planner just handle it. + +Generally only needed for special case column types, like bytea in postgres. + +=cut + +sub bind_attribute_by_data_type { + return; +} + +=head2 is_datatype_numeric + +Given a datatype from column_info, returns a boolean value indicating if +the current RDBMS considers it a numeric value. This controls how +L decides whether to mark the column as +dirty - when the datatype is deemed numeric a C<< != >> comparison will +be performed instead of the usual C. + +=cut + +sub is_datatype_numeric { + my ($self, $dt) = @_; + + return 0 unless $dt; + + return $dt =~ /^ (?: + numeric | int(?:eger)? | (?:tiny|small|medium|big)int | dec(?:imal)? | real | float | double (?: \s+ precision)? | (?:big)?serial + ) $/ix; +} + =head2 create_ddl_dir (EXPERIMENTAL) =over 4 -=item Arguments: $schema \@databases, $version, $directory, $sqlt_args +=item Arguments: $schema \@databases, $version, $directory, $preversion, \%sqlt_args =back Creates a SQL file based on the Schema, for each of the specified -database types, in the given directory. +database engines in C<\@databases> in the given directory. +(note: specify L names, not L driver names). + +Given a previous version number, this will also create a file containing +the ALTER TABLE statements to transform the previous schema into the +current one. Note that these statements may contain C or +C statements that can potentially destroy data. + +The file names are created using the C method below, please +override this method in your schema if you would like a different file +name format. For the ALTER file, the same format is used, replacing +$version in the name with "$preversion-$version". + +See L for a list of values for C<\%sqlt_args>. +The most common value for this would be C<< { add_drop_table => 1 } >> +to have the SQL produced include a C statement for each table +created. For quoting purposes supply C and +C. + +If no arguments are passed, then the following default values are assumed: + +=over 4 + +=item databases - ['MySQL', 'SQLite', 'PostgreSQL'] + +=item version - $schema->schema_version + +=item directory - './' + +=item preversion - + +=back + +By default, C<\%sqlt_args> will have + + { add_drop_table => 1, ignore_constraint_names => 1, ignore_index_names => 1 } + +merged with the hash passed in. To disable any of those features, pass in a +hashref like the following + + { ignore_constraint_names => 0, # ... other options } + Note that this feature is currently EXPERIMENTAL and may not work correctly across all databases, or fully handle complex relationships. +WARNING: Please check all SQL files created, before applying them. + =cut -sub create_ddl_dir -{ - my ($self, $schema, $databases, $version, $dir, $sqltargs) = @_; +sub create_ddl_dir { + my ($self, $schema, $databases, $version, $dir, $preversion, $sqltargs) = @_; - if(!$dir || !-d $dir) - { - warn "No directory given, using ./\n"; + if(!$dir || !-d $dir) { + carp "No directory given, using ./\n"; $dir = "./"; } $databases ||= ['MySQL', 'SQLite', 'PostgreSQL']; $databases = [ $databases ] if(ref($databases) ne 'ARRAY'); - $version ||= $schema->VERSION || '1.x'; - $sqltargs = { ( add_drop_table => 1 ), %{$sqltargs || {}} }; - eval "use SQL::Translator"; - $self->throw_exception("Can't deploy without SQL::Translator: $@") if $@; + my $schema_version = $schema->schema_version || '1.x'; + $version ||= $schema_version; - my $sqlt = SQL::Translator->new($sqltargs); - foreach my $db (@$databases) - { + $sqltargs = { + add_drop_table => 1, + ignore_constraint_names => 1, + ignore_index_names => 1, + %{$sqltargs || {}} + }; + + $self->throw_exception(q{Can't create a ddl file without SQL::Translator 0.09003: '} + . $self->_check_sqlt_message . q{'}) + if !$self->_check_sqlt_version; + + my $sqlt = SQL::Translator->new( $sqltargs ); + + $sqlt->parser('SQL::Translator::Parser::DBIx::Class'); + my $sqlt_schema = $sqlt->translate({ data => $schema }) + or $self->throw_exception ($sqlt->error); + + foreach my $db (@$databases) { $sqlt->reset(); - $sqlt->parser('SQL::Translator::Parser::DBIx::Class'); -# $sqlt->parser_args({'DBIx::Class' => $schema); - $sqlt->data($schema); + $sqlt->{schema} = $sqlt_schema; $sqlt->producer($db); my $file; - my $filename = $schema->ddl_filename($db, $dir, $version); - if(-e $filename) - { - $self->throw_exception("$filename already exists, skipping $db"); - next; + my $filename = $schema->ddl_filename($db, $version, $dir); + if (-e $filename && ($version eq $schema_version )) { + # if we are dumping the current version, overwrite the DDL + carp "Overwriting existing DDL file - $filename"; + unlink($filename); } - open($file, ">$filename") - or $self->throw_exception("Can't open $filename for writing ($!)"); + my $output = $sqlt->translate; -#use Data::Dumper; -# print join(":", keys %{$schema->source_registrations}); -# print Dumper($sqlt->schema); - if(!$output) - { - $self->throw_exception("Failed to translate to $db. (" . $sqlt->error . ")"); + if(!$output) { + carp("Failed to translate to $db, skipping. (" . $sqlt->error . ")"); + next; + } + if(!open($file, ">$filename")) { + $self->throw_exception("Can't open $filename for writing ($!)"); next; } print $file $output; close($file); - } + next unless ($preversion); + + require SQL::Translator::Diff; + + my $prefilename = $schema->ddl_filename($db, $preversion, $dir); + if(!-e $prefilename) { + carp("No previous schema file found ($prefilename)"); + next; + } + + my $difffile = $schema->ddl_filename($db, $version, $dir, $preversion); + if(-e $difffile) { + carp("Overwriting existing diff file - $difffile"); + unlink($difffile); + } + + my $source_schema; + { + my $t = SQL::Translator->new($sqltargs); + $t->debug( 0 ); + $t->trace( 0 ); + + $t->parser( $db ) + or $self->throw_exception ($t->error); + + my $out = $t->translate( $prefilename ) + or $self->throw_exception ($t->error); + + $source_schema = $t->schema; + + $source_schema->name( $prefilename ) + unless ( $source_schema->name ); + } + + # The "new" style of producers have sane normalization and can support + # diffing a SQL file against a DBIC->SQLT schema. Old style ones don't + # And we have to diff parsed SQL against parsed SQL. + my $dest_schema = $sqlt_schema; + + unless ( "SQL::Translator::Producer::$db"->can('preprocess_schema') ) { + my $t = SQL::Translator->new($sqltargs); + $t->debug( 0 ); + $t->trace( 0 ); + + $t->parser( $db ) + or $self->throw_exception ($t->error); + + my $out = $t->translate( $filename ) + or $self->throw_exception ($t->error); + + $dest_schema = $t->schema; + + $dest_schema->name( $filename ) + unless $dest_schema->name; + } + + my $diff = SQL::Translator::Diff::schema_diff($source_schema, $db, + $dest_schema, $db, + $sqltargs + ); + if(!open $file, ">$difffile") { + $self->throw_exception("Can't write to $difffile ($!)"); + next; + } + print $file $diff; + close($file); + } } =head2 deployment_statements @@ -1151,8 +2269,9 @@ sub create_ddl_dir =back Returns the statements used by L and L. -The database driver name is given by C<$type>, though the value from -L is used if it is not specified. + +The L (not L) database driver name can be explicitly +provided in C<$type>, otherwise the result of L is used as default. C<$directory> is used to return statements from files in a previously created L directory and is optional. The filenames are constructed @@ -1168,52 +2287,70 @@ See L for a list of values for C<$sqlt_args>. sub deployment_statements { my ($self, $schema, $type, $version, $dir, $sqltargs) = @_; # Need to be connected to get the correct sqlt_type - $self->ensure_connected() unless $type; + $self->last_dbh() unless $type; $type ||= $self->sqlt_type; - $version ||= $schema->VERSION || '1.x'; + $version ||= $schema->schema_version || '1.x'; $dir ||= './'; - eval "use SQL::Translator"; - if(!$@) + my $filename = $schema->ddl_filename($type, $version, $dir); + if(-f $filename) { - eval "use SQL::Translator::Parser::DBIx::Class;"; - $self->throw_exception($@) if $@; - eval "use SQL::Translator::Producer::${type};"; - $self->throw_exception($@) if $@; - my $tr = SQL::Translator->new(%$sqltargs); - SQL::Translator::Parser::DBIx::Class::parse( $tr, $schema ); - return "SQL::Translator::Producer::${type}"->can('produce')->($tr); + my $file; + open($file, "<$filename") + or $self->throw_exception("Can't open $filename ($!)"); + my @rows = <$file>; + close($file); + return join('', @rows); } - my $filename = $schema->ddl_filename($type, $dir, $version); - if(!-f $filename) - { -# $schema->create_ddl_dir([ $type ], $version, $dir, $sqltargs); - $self->throw_exception("No SQL::Translator, and no Schema file found, aborting deploy"); - return; - } - my $file; - open($file, "<$filename") - or $self->throw_exception("Can't open $filename ($!)"); - my @rows = <$file>; - close($file); + $self->throw_exception(q{Can't deploy without SQL::Translator 0.09003: '} + . $self->_check_sqlt_message . q{'}) + if !$self->_check_sqlt_version; + + require SQL::Translator::Parser::DBIx::Class; + eval qq{use SQL::Translator::Producer::${type}}; + $self->throw_exception($@) if $@; - return join('', @rows); - + # sources needs to be a parser arg, but for simplicty allow at top level + # coming in + $sqltargs->{parser_args}{sources} = delete $sqltargs->{sources} + if exists $sqltargs->{sources}; + + my $tr = SQL::Translator->new(%$sqltargs); + SQL::Translator::Parser::DBIx::Class::parse( $tr, $schema ); + return "SQL::Translator::Producer::${type}"->can('produce')->($tr); } sub deploy { my ($self, $schema, $type, $sqltargs, $dir) = @_; - foreach my $statement ( $self->deployment_statements($schema, $type, undef, $dir, { no_comments => 1, %{ $sqltargs || {} } } ) ) { - for ( split(";\n", $statement)) { - next if($_ =~ /^--/); - next if(!$_); -# next if($_ =~ /^DROP/m); - next if($_ =~ /^BEGIN TRANSACTION/m); - next if($_ =~ /^COMMIT/m); - next if $_ =~ /^\s+$/; # skip whitespace only - $self->debugobj->query_start($_) if $self->debug; - $self->dbh->do($_) or warn "SQL was:\n $_"; # XXX exceptions? - $self->debugobj->query_end($_) if $self->debug; + my $deploy = sub { + my $line = shift; + return if($line =~ /^--/); + return if(!$line); + # next if($line =~ /^DROP/m); + return if($line =~ /^BEGIN TRANSACTION/m); + return if($line =~ /^COMMIT/m); + return if $line =~ /^\s+$/; # skip whitespace only + $self->_query_start($line); + eval { + # a previous error may invalidate $dbh - thus we need to use dbh() + # to guarantee a healthy $dbh (this is temporary until we get + # proper error handling on deploy() ) + $self->dbh->do($line); + }; + if ($@) { + carp qq{$@ (running "${line}")}; + } + $self->_query_end($line); + }; + my @statements = $self->deployment_statements($schema, $type, undef, $dir, { %{ $sqltargs || {} }, no_comments => 1 } ); + if (@statements > 1) { + foreach my $statement (@statements) { + $deploy->( $statement ); + } + } + elsif (@statements == 1) { + foreach my $line ( split(";\n", $statements[0])) { + $deploy->( $line ); } } } @@ -1226,7 +2363,10 @@ Returns the datetime parser class sub datetime_parser { my $self = shift; - return $self->{datetime_parser} ||= $self->build_datetime_parser(@_); + return $self->{datetime_parser} ||= do { + $self->last_dbh; + $self->build_datetime_parser(@_); + }; } =head2 datetime_parser_type @@ -1252,49 +2392,90 @@ sub build_datetime_parser { return $type; } -sub DESTROY { - my $self = shift; - return if !$self->_dbh; - $self->_verify_pid; - $self->_dbh(undef); +{ + my $_check_sqlt_version; # private + my $_check_sqlt_message; # private + sub _check_sqlt_version { + return $_check_sqlt_version if defined $_check_sqlt_version; + eval 'use SQL::Translator "0.09003"'; + $_check_sqlt_message = $@ || ''; + $_check_sqlt_version = !$@; + } + + sub _check_sqlt_message { + _check_sqlt_version if !defined $_check_sqlt_message; + $_check_sqlt_message; + } } -1; +=head2 is_replicating -=head1 SQL METHODS +A boolean that reports if a particular L is set to +replicate from a master database. Default is undef, which is the result +returned by databases that don't support replication. -The module defines a set of methods within the DBIC::SQL::Abstract -namespace. These build on L to provide the -SQL query functions. +=cut -The following methods are extended:- +sub is_replicating { + return; -=over 4 +} -=item delete +=head2 lag_behind_master -=item insert +Returns a number that represents a certain amount of lag behind a master db +when a given storage is replicating. The number is database dependent, but +starts at zero and increases with the amount of lag. Default in undef -=item select +=cut -=item update +sub lag_behind_master { + return; +} -=item limit_dialect +=head2 order_columns_for_select -See L for details. -For setting, this method is deprecated in favor of L. +Returns an ordered list of column names for use with a C