X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI.pm;h=e14f234c3f607456081d7648a3c8747ac71fc95e;hb=630ba6e5b8cb098e3991e34f6d4ac7e9234639b1;hp=6126b3fcbe6a0dbb6bc38555240f00245828c10a;hpb=2304e326c2d4a4e3745f52941639cfea708a76b6;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index 6126b3f..e14f234 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -1,299 +1,116 @@ package DBIx::Class::Storage::DBI; # -*- mode: cperl; cperl-indent-level: 2 -*- -use base 'DBIx::Class::Storage'; - -use strict; +use strict; use warnings; -use Carp::Clan qw/^DBIx::Class/; -use DBI; -use SQL::Abstract::Limit; -use DBIx::Class::Storage::DBI::Cursor; -use DBIx::Class::Storage::Statistics; -use Scalar::Util qw/blessed weaken/; -__PACKAGE__->mk_group_accessors('simple' => - qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts - _conn_pid _conn_tid transaction_depth _dbh_autocommit savepoints/ -); +use base qw/DBIx::Class::Storage::DBIHacks DBIx::Class::Storage/; +use mro 'c3'; -# the values for these accessors are picked out (and deleted) from -# the attribute hashref passed to connect_info -my @storage_options = qw/ - on_connect_do on_disconnect_do disable_sth_caching unsafe auto_savepoint -/; -__PACKAGE__->mk_group_accessors('simple' => @storage_options); +use Carp::Clan qw/^DBIx::Class|^Try::Tiny/; +use DBI; +use DBIx::Class::Storage::DBI::Cursor; +use Scalar::Util qw/refaddr weaken reftype blessed/; +use List::Util qw/first/; +use Data::Dumper::Concise 'Dumper'; +use Sub::Name 'subname'; +use Try::Tiny; +use File::Path 'make_path'; +use namespace::clean; # default cursor class, overridable in connect_info attributes __PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor'); -__PACKAGE__->mk_group_accessors('inherited' => qw/sql_maker_class/); -__PACKAGE__->sql_maker_class('DBIC::SQL::Abstract'); - -BEGIN { - -package # Hide from PAUSE - DBIC::SQL::Abstract; # Would merge upstream, but nate doesn't reply :( - -use base qw/SQL::Abstract::Limit/; - -# This prevents the caching of $dbh in S::A::L, I believe -sub new { - my $self = shift->SUPER::new(@_); - - # If limit_dialect is a ref (like a $dbh), go ahead and replace - # it with what it resolves to: - $self->{limit_dialect} = $self->_find_syntax($self->{limit_dialect}) - if ref $self->{limit_dialect}; +__PACKAGE__->mk_group_accessors('inherited' => qw/sql_maker_class sql_limit_dialect/); +__PACKAGE__->sql_maker_class('DBIx::Class::SQLMaker'); - $self; -} - -# While we're at it, this should make LIMIT queries more efficient, -# without digging into things too deeply -use Scalar::Util 'blessed'; -sub _find_syntax { - my ($self, $syntax) = @_; - my $dbhname = blessed($syntax) ? $syntax->{Driver}{Name} : $syntax; - if(ref($self) && $dbhname && $dbhname eq 'DB2') { - return 'RowNumberOver'; - } - - $self->{_cached_syntax} ||= $self->SUPER::_find_syntax($syntax); -} - -sub select { - my ($self, $table, $fields, $where, $order, @rest) = @_; - $table = $self->_quote($table) unless ref($table); - local $self->{rownum_hack_count} = 1 - if (defined $rest[0] && $self->{limit_dialect} eq 'RowNum'); - @rest = (-1) unless defined $rest[0]; - die "LIMIT 0 Does Not Compute" if $rest[0] == 0; - # and anyway, SQL::Abstract::Limit will cause a barf if we don't first - local $self->{having_bind} = []; - my ($sql, @ret) = $self->SUPER::select( - $table, $self->_recurse_fields($fields), $where, $order, @rest - ); - $sql .= - $self->{for} ? - ( - $self->{for} eq 'update' ? ' FOR UPDATE' : - $self->{for} eq 'shared' ? ' FOR SHARE' : - '' - ) : - '' - ; - return wantarray ? ($sql, @ret, @{$self->{having_bind}}) : $sql; -} +__PACKAGE__->mk_group_accessors('simple' => qw/ + _connect_info _dbi_connect_info _dbic_connect_attributes _driver_determined + _dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts + transaction_depth _dbh_autocommit savepoints +/); -sub insert { - my $self = shift; - my $table = shift; - $table = $self->_quote($table) unless ref($table); - $self->SUPER::insert($table, @_); -} +# the values for these accessors are picked out (and deleted) from +# the attribute hashref passed to connect_info +my @storage_options = qw/ + on_connect_call on_disconnect_call on_connect_do on_disconnect_do + disable_sth_caching unsafe auto_savepoint +/; +__PACKAGE__->mk_group_accessors('simple' => @storage_options); -sub update { - my $self = shift; - my $table = shift; - $table = $self->_quote($table) unless ref($table); - $self->SUPER::update($table, @_); -} -sub delete { - my $self = shift; - my $table = shift; - $table = $self->_quote($table) unless ref($table); - $self->SUPER::delete($table, @_); -} +# capability definitions, using a 2-tiered accessor system +# The rationale is: +# +# A driver/user may define _use_X, which blindly without any checks says: +# "(do not) use this capability", (use_dbms_capability is an "inherited" +# type accessor) +# +# If _use_X is undef, _supports_X is then queried. This is a "simple" style +# accessor, which in turn calls _determine_supports_X, and stores the return +# in a special slot on the storage object, which is wiped every time a $dbh +# reconnection takes place (it is not guaranteed that upon reconnection we +# will get the same rdbms version). _determine_supports_X does not need to +# exist on a driver, as we ->can for it before calling. + +my @capabilities = (qw/insert_returning placeholders typeless_placeholders join_optimizer/); +__PACKAGE__->mk_group_accessors( dbms_capability => map { "_supports_$_" } @capabilities ); +__PACKAGE__->mk_group_accessors( use_dbms_capability => map { "_use_$_" } (@capabilities ) ); + +# on by default, not strictly a capability (pending rewrite) +__PACKAGE__->_use_join_optimizer (1); +sub _determine_supports_join_optimizer { 1 }; + +# Each of these methods need _determine_driver called before itself +# in order to function reliably. This is a purely DRY optimization +# +# get_(use)_dbms_capability need to be called on the correct Storage +# class, as _use_X may be hardcoded class-wide, and _supports_X calls +# _determine_supports_X which obv. needs a correct driver as well +my @rdbms_specific_methods = qw/ + deployment_statements + sqlt_type + sql_maker + build_datetime_parser + datetime_parser_type + + insert + insert_bulk + update + delete + select + select_single + + get_use_dbms_capability + get_dbms_capability + + _server_info + _get_server_version +/; -sub _emulate_limit { - my $self = shift; - if ($_[3] == -1) { - return $_[1].$self->_order_by($_[2]); - } else { - return $self->SUPER::_emulate_limit(@_); - } -} - -sub _recurse_fields { - my ($self, $fields, $params) = @_; - my $ref = ref $fields; - return $self->_quote($fields) unless $ref; - return $$fields if $ref eq 'SCALAR'; - - if ($ref eq 'ARRAY') { - return join(', ', map { - $self->_recurse_fields($_) - .(exists $self->{rownum_hack_count} && !($params && $params->{no_rownum_hack}) - ? ' AS col'.$self->{rownum_hack_count}++ - : '') - } @$fields); - } elsif ($ref eq 'HASH') { - foreach my $func (keys %$fields) { - return $self->_sqlcase($func) - .'( '.$self->_recurse_fields($fields->{$func}).' )'; - } - } -} +for my $meth (@rdbms_specific_methods) { -sub _order_by { - my $self = shift; - my $ret = ''; - my @extra; - if (ref $_[0] eq 'HASH') { - if (defined $_[0]->{group_by}) { - $ret = $self->_sqlcase(' group by ') - .$self->_recurse_fields($_[0]->{group_by}, { no_rownum_hack => 1 }); - } - if (defined $_[0]->{having}) { - my $frag; - ($frag, @extra) = $self->_recurse_where($_[0]->{having}); - push(@{$self->{having_bind}}, @extra); - $ret .= $self->_sqlcase(' having ').$frag; - } - if (defined $_[0]->{order_by}) { - $ret .= $self->_order_by($_[0]->{order_by}); - } - } elsif (ref $_[0] eq 'SCALAR') { - $ret = $self->_sqlcase(' order by ').${ $_[0] }; - } elsif (ref $_[0] eq 'ARRAY' && @{$_[0]}) { - my @order = @{+shift}; - $ret = $self->_sqlcase(' order by ') - .join(', ', map { - my $r = $self->_order_by($_, @_); - $r =~ s/^ ?ORDER BY //i; - $r; - } @order); - } else { - $ret = $self->SUPER::_order_by(@_); - } - return $ret; -} + my $orig = __PACKAGE__->can ($meth) + or die "$meth is not a ::Storage::DBI method!"; -sub _order_directions { - my ($self, $order) = @_; - $order = $order->{order_by} if ref $order eq 'HASH'; - return $self->SUPER::_order_directions($order); -} + no strict qw/refs/; + no warnings qw/redefine/; + *{__PACKAGE__ ."::$meth"} = subname $meth => sub { + if (not $_[0]->_driver_determined and not $_[0]->{_in_determine_driver}) { + $_[0]->_determine_driver; -sub _table { - my ($self, $from) = @_; - if (ref $from eq 'ARRAY') { - return $self->_recurse_from(@$from); - } elsif (ref $from eq 'HASH') { - return $self->_make_as($from); - } else { - return $from; # would love to quote here but _table ends up getting called - # twice during an ->select without a limit clause due to - # the way S::A::Limit->select works. should maybe consider - # bypassing this and doing S::A::select($self, ...) in - # our select method above. meantime, quoting shims have - # been added to select/insert/update/delete here - } -} - -sub _recurse_from { - my ($self, $from, @join) = @_; - my @sqlf; - push(@sqlf, $self->_make_as($from)); - foreach my $j (@join) { - my ($to, $on) = @$j; - - # check whether a join type exists - my $join_clause = ''; - my $to_jt = ref($to) eq 'ARRAY' ? $to->[0] : $to; - if (ref($to_jt) eq 'HASH' and exists($to_jt->{-join_type})) { - $join_clause = ' '.uc($to_jt->{-join_type}).' JOIN '; - } else { - $join_clause = ' JOIN '; - } - push(@sqlf, $join_clause); + # This for some reason crashes and burns on perl 5.8.1 + # IFF the method ends up throwing an exception + #goto $_[0]->can ($meth); - if (ref $to eq 'ARRAY') { - push(@sqlf, '(', $self->_recurse_from(@$to), ')'); - } else { - push(@sqlf, $self->_make_as($to)); + my $cref = $_[0]->can ($meth); + goto $cref; } - push(@sqlf, ' ON (', $self->_join_condition($on), ')'); - } - return join('', @sqlf); -} - -sub _make_as { - my ($self, $from) = @_; - return join(' ', map { (ref $_ eq 'SCALAR' ? $$_ : $self->_quote($_)) } - reverse each %{$self->_skip_options($from)}); -} - -sub _skip_options { - my ($self, $hash) = @_; - my $clean_hash = {}; - $clean_hash->{$_} = $hash->{$_} - for grep {!/^-/} keys %$hash; - return $clean_hash; -} - -sub _join_condition { - my ($self, $cond) = @_; - if (ref $cond eq 'HASH') { - my %j; - for (keys %$cond) { - my $v = $cond->{$_}; - if (ref $v) { - # XXX no throw_exception() in this package and croak() fails with strange results - Carp::croak(ref($v) . qq{ reference arguments are not supported in JOINS - try using \"..." instead'}) - if ref($v) ne 'SCALAR'; - $j{$_} = $v; - } - else { - my $x = '= '.$self->_quote($v); $j{$_} = \$x; - } - }; - return scalar($self->_recurse_where(\%j)); - } elsif (ref $cond eq 'ARRAY') { - return join(' OR ', map { $self->_join_condition($_) } @$cond); - } else { - die "Can't handle this yet!"; - } -} - -sub _quote { - my ($self, $label) = @_; - return '' unless defined $label; - return "*" if $label eq '*'; - return $label unless $self->{quote_char}; - if(ref $self->{quote_char} eq "ARRAY"){ - return $self->{quote_char}->[0] . $label . $self->{quote_char}->[1] - if !defined $self->{name_sep}; - my $sep = $self->{name_sep}; - return join($self->{name_sep}, - map { $self->{quote_char}->[0] . $_ . $self->{quote_char}->[1] } - split(/\Q$sep\E/,$label)); - } - return $self->SUPER::_quote($label); -} - -sub limit_dialect { - my $self = shift; - $self->{limit_dialect} = shift if @_; - return $self->{limit_dialect}; -} - -sub quote_char { - my $self = shift; - $self->{quote_char} = shift if @_; - return $self->{quote_char}; -} - -sub name_sep { - my $self = shift; - $self->{name_sep} = shift if @_; - return $self->{name_sep}; + goto $orig; + }; } -} # End of BEGIN block =head1 NAME @@ -304,10 +121,17 @@ DBIx::Class::Storage::DBI - DBI storage handler my $schema = MySchema->connect('dbi:SQLite:my.db'); $schema->storage->debug(1); - $schema->dbh_do("DROP TABLE authors"); + + my @stuff = $schema->storage->dbh_do( + sub { + my ($storage, $dbh, @args) = @_; + $dbh->do("DROP TABLE authors"); + }, + @column_list + ); $schema->resultset('Book')->search({ - written_on => $schema->storage->datetime_parser(DateTime->now) + written_on => $schema->storage->datetime_parser->format_datetime(DateTime->now) }); =head1 DESCRIPTION @@ -325,13 +149,85 @@ sub new { $new->transaction_depth(0); $new->_sql_maker_opts({}); + $new->_dbh_details({}); $new->{savepoints} = []; $new->{_in_dbh_do} = 0; $new->{_dbh_gen} = 0; + # read below to see what this does + $new->_arm_global_destructor; + $new; } +# This is hack to work around perl shooting stuff in random +# order on exit(). If we do not walk the remaining storage +# objects in an END block, there is a *small but real* chance +# of a fork()ed child to kill the parent's shared DBI handle, +# *before perl reaches the DESTROY in this package* +# Yes, it is ugly and effective. +# Additionally this registry is used by the CLONE method to +# make sure no handles are shared between threads +{ + my %seek_and_destroy; + + sub _arm_global_destructor { + my $self = shift; + my $key = refaddr ($self); + $seek_and_destroy{$key} = $self; + weaken ($seek_and_destroy{$key}); + } + + END { + local $?; # just in case the DBI destructor changes it somehow + + # destroy just the object if not native to this process/thread + $_->_verify_pid for (grep + { defined $_ } + values %seek_and_destroy + ); + } + + sub CLONE { + # As per DBI's recommendation, DBIC disconnects all handles as + # soon as possible (DBIC will reconnect only on demand from within + # the thread) + for (values %seek_and_destroy) { + next unless $_; + $_->{_dbh_gen}++; # so that existing cursors will drop as well + $_->_dbh(undef); + } + } +} + +sub DESTROY { + my $self = shift; + + # some databases spew warnings on implicit disconnect + local $SIG{__WARN__} = sub {}; + $self->_dbh(undef); + + # this op is necessary, since the very last perl runtime statement + # triggers a global destruction shootout, and the $SIG localization + # may very well be destroyed before perl actually gets to do the + # $dbh undef + 1; +} + +# handle pid changes correctly - do not destroy parent's connection +sub _verify_pid { + my $self = shift; + + my $pid = $self->_conn_pid; + if( defined $pid and $pid != $$ and my $dbh = $self->_dbh ) { + $dbh->{InactiveDestroy} = 1; + $self->{_dbh_gen}++; + $self->_dbh(undef); + } + + return; +} + =head2 connect_info This method is normally called by L, which @@ -352,8 +248,8 @@ recognized by DBIx::Class: =item * -A single code reference which returns a connected -L optionally followed by +A single code reference which returns a connected +L optionally followed by L recognized by DBIx::Class: @@ -372,7 +268,13 @@ mixed together: %extra_attributes, }]; -This is particularly useful for L based applications, allowing the + $connect_info_args = [{ + dbh_maker => sub { DBI->connect (...) }, + %dbi_attributes, + %extra_attributes, + }]; + +This is particularly useful for L based applications, allowing the following config (L style): @@ -385,13 +287,17 @@ following config (L style): +The C/C/C combination can be substituted by the +C key whose value is a coderef that returns a connected +L + =back Please note that the L docs recommend that you always explicitly set C to either I<0> or I<1>. L further recommends that it be set to I<1>, and that you perform transactions via our L method. L will set it -to I<1> if you do not do explicitly set it to zero. This is the default +to I<1> if you do not do explicitly set it to zero. This is the default for most DBDs. See L for details. =head3 DBIx::Class specific connection attributes @@ -399,7 +305,7 @@ for most DBDs. See L for details. In addition to the standard L L attributes, DBIx::Class recognizes the following connection options. These options can be mixed in with your other -L connection attributes, or placed in a seperate hashref +L connection attributes, or placed in a separate hashref (C<\%extra_attributes>) as shown above. Every time C is invoked, any previous settings for @@ -416,6 +322,10 @@ the database. Its value may contain: =over +=item a scalar + +This contains one SQL statement to execute. + =item an array reference This contains SQL statements to execute in order. Each element contains @@ -436,21 +346,105 @@ immediately before disconnecting from the database. Note, this only runs if you explicitly call L on the storage object. +=item on_connect_call + +A more generalized form of L that calls the specified +C methods in your storage driver. + + on_connect_do => 'select 1' + +is equivalent to: + + on_connect_call => [ [ do_sql => 'select 1' ] ] + +Its values may contain: + +=over + +=item a scalar + +Will call the C method. + +=item a code reference + +Will execute C<< $code->($storage) >> + +=item an array reference + +Each value can be a method name or code reference. + +=item an array of arrays + +For each array, the first item is taken to be the C method name +or code reference, and the rest are parameters to it. + +=back + +Some predefined storage methods you may use: + +=over + +=item do_sql + +Executes a SQL string or a code reference that returns a SQL string. This is +what L and L use. + +It can take: + +=over + +=item a scalar + +Will execute the scalar as SQL. + +=item an arrayref + +Taken to be arguments to L, the SQL string optionally followed by the +attributes hashref and bind values. + +=item a code reference + +Will execute C<< $code->($storage) >> and execute the return array refs as +above. + +=back + +=item datetime_setup + +Execute any statements necessary to initialize the database session to return +and accept datetime/timestamp values used with +L. + +Only necessary for some databases, see your specific storage driver for +implementation details. + +=back + +=item on_disconnect_call + +Takes arguments in the same form as L and executes them +immediately before disconnecting from the database. + +Calls the C methods as opposed to the +C methods called by L. + +Note, this only runs if you explicitly call L on the +storage object. + =item disable_sth_caching If set to a true value, this option will disable the caching of statement handles via L. -=item limit_dialect +=item limit_dialect -Sets the limit dialect. This is useful for JDBC-bridge among others -where the remote SQL-dialect cannot be determined by the name of the -driver alone. See also L. +Sets a specific SQL::Abstract::Limit-style limit dialect, overriding the +default L setting of the storage (if any). For a list +of available limit dialects see L. =item quote_char -Specifies what characters to use to quote table and column names. If -you use this you will want to specify L as well. +Specifies what characters to use to quote table and column names. C expects either a single character, in which case is it is placed on either side of the table/column name, or an arrayref of length @@ -461,14 +455,9 @@ SQL Server you should use C<< quote_char => [qw/[ ]/] >>. =item name_sep -This only needs to be used in conjunction with C, and is used to -specify the charecter that seperates elements (schemas, tables, columns) from -each other. In most cases this is simply a C<.>. - -The consequences of not supplying this value is that L -will assume DBIx::Class' uses of aliases to be complete column -names. The output will look like I<"me.name"> when it should actually -be I<"me"."name">. +This parameter is only useful in conjunction with C, and is used to +specify the character that separates elements (schemas, tables, columns) from +each other. If unspecified it defaults to the most commonly used C<.>. =item unsafe @@ -508,6 +497,12 @@ L # Connect via subref ->connect_info([ sub { DBI->connect(...) } ]); + # Connect via subref in hashref + ->connect_info([{ + dbh_maker => sub { DBI->connect(...) }, + on_connect_do => 'alter session ...', + }]); + # A bit more complicated ->connect_info( [ @@ -515,7 +510,7 @@ L 'postgres', 'my_pg_password', { AutoCommit => 1 }, - { quote_char => q{"}, name_sep => q{.} }, + { quote_char => q{"} }, ] ); @@ -560,13 +555,55 @@ L =cut sub connect_info { - my ($self, $info_arg) = @_; + my ($self, $info) = @_; - return $self->_connect_info if !$info_arg; + return $self->_connect_info if !$info; - my @args = @$info_arg; # take a shallow copy for further mutilation - $self->_connect_info([@args]); # copy for _connect_info + $self->_connect_info($info); # copy for _connect_info + + $info = $self->_normalize_connect_info($info) + if ref $info eq 'ARRAY'; + + for my $storage_opt (keys %{ $info->{storage_options} }) { + my $value = $info->{storage_options}{$storage_opt}; + + $self->$storage_opt($value); + } + + # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only + # the new set of options + $self->_sql_maker(undef); + $self->_sql_maker_opts({}); + for my $sql_maker_opt (keys %{ $info->{sql_maker_options} }) { + my $value = $info->{sql_maker_options}{$sql_maker_opt}; + + $self->_sql_maker_opts->{$sql_maker_opt} = $value; + } + + my %attrs = ( + %{ $self->_default_dbi_connect_attributes || {} }, + %{ $info->{attributes} || {} }, + ); + + my @args = @{ $info->{arguments} }; + + $self->_dbi_connect_info([@args, + %attrs && !(ref $args[0] eq 'CODE') ? \%attrs : ()]); + + # FIXME - dirty: + # save attributes them in a separate accessor so they are always + # introspectable, even in case of a CODE $dbhmaker + $self->_dbic_connect_attributes (\%attrs); + + return $self->_connect_info; +} + +sub _normalize_connect_info { + my ($self, $info_arg) = @_; + my %info; + + my @args = @$info_arg; # take a shallow copy for further mutilation # combine/pre-parse arguments depending on invocation style @@ -578,8 +615,21 @@ sub connect_info { elsif (ref $args[0] eq 'HASH') { # single hashref (i.e. Catalyst config) %attrs = %{$args[0]}; @args = (); - for (qw/password user dsn/) { - unshift @args, delete $attrs{$_}; + if (my $code = delete $attrs{dbh_maker}) { + @args = $code; + + my @ignored = grep { delete $attrs{$_} } (qw/dsn user password/); + if (@ignored) { + carp sprintf ( + 'Attribute(s) %s in connect_info were ignored, as they can not be applied ' + . "to the result of 'dbh_maker'", + + join (', ', map { "'$_'" } (@ignored) ), + ); + } + } + else { + @args = delete @attrs{qw/dsn user password/}; } } else { # otherwise assume dsn/user/password + \%attrs + \%extra_attrs @@ -590,34 +640,65 @@ sub connect_info { @args = @args[0,1,2]; } - # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only - # the new set of options - $self->_sql_maker(undef); - $self->_sql_maker_opts({}); + $info{arguments} = \@args; - if(keys %attrs) { - for my $storage_opt (@storage_options, 'cursor_class') { # @storage_options is declared at the top of the module - if(my $value = delete $attrs{$storage_opt}) { - $self->$storage_opt($value); - } - } - for my $sql_maker_opt (qw/limit_dialect quote_char name_sep/) { - if(my $opt_val = delete $attrs{$sql_maker_opt}) { - $self->_sql_maker_opts->{$sql_maker_opt} = $opt_val; - } - } - } + my @storage_opts = grep exists $attrs{$_}, + @storage_options, 'cursor_class'; - %attrs = () if (ref $args[0] eq 'CODE'); # _connect() never looks past $args[0] in this case + @{ $info{storage_options} }{@storage_opts} = + delete @attrs{@storage_opts} if @storage_opts; - $self->_dbi_connect_info([@args, keys %attrs ? \%attrs : ()]); - $self->_connect_info; + my @sql_maker_opts = grep exists $attrs{$_}, + qw/limit_dialect quote_char name_sep/; + + @{ $info{sql_maker_options} }{@sql_maker_opts} = + delete @attrs{@sql_maker_opts} if @sql_maker_opts; + + $info{attributes} = \%attrs if %attrs; + + return \%info; +} + +sub _default_dbi_connect_attributes { + return { + AutoCommit => 1, + RaiseError => 1, + PrintError => 0, + }; } =head2 on_connect_do This method is deprecated in favour of setting via L. +=cut + +=head2 on_disconnect_do + +This method is deprecated in favour of setting via L. + +=cut + +sub _parse_connect_do { + my ($self, $type) = @_; + + my $val = $self->$type; + return () if not defined $val; + + my @res; + + if (not ref($val)) { + push @res, [ 'do_sql', $val ]; + } elsif (ref($val) eq 'CODE') { + push @res, $val; + } elsif (ref($val) eq 'ARRAY') { + push @res, map { [ 'do_sql', $_ ] } @$val; + } else { + $self->throw_exception("Invalid type for $type: ".ref($val)); + } + + return \@res; +} =head2 dbh_do @@ -654,43 +735,28 @@ sub dbh_do { my $self = shift; my $code = shift; - my $dbh = $self->_dbh; + my $dbh = $self->_get_dbh; - return $self->$code($dbh, @_) if $self->{_in_dbh_do} - || $self->{transaction_depth}; + return $self->$code($dbh, @_) + if ( $self->{_in_dbh_do} || $self->{transaction_depth} ); local $self->{_in_dbh_do} = 1; - my @result; - my $want_array = wantarray; + # take a ref instead of a copy, to preserve coderef @_ aliasing semantics + my $args = \@_; + return try { + $self->$code ($dbh, @$args); + } catch { + $self->throw_exception($_) if $self->connected; - eval { - $self->_verify_pid if $dbh; - if(!$self->_dbh) { - $self->_populate_dbh; - $dbh = $self->_dbh; - } + # We were not connected - reconnect and retry, but let any + # exception fall right through this time + carp "Retrying $code after catching disconnected exception: $_" + if $ENV{DBIC_DBIRETRY_DEBUG}; - if($want_array) { - @result = $self->$code($dbh, @_); - } - elsif(defined $want_array) { - $result[0] = $self->$code($dbh, @_); - } - else { - $self->$code($dbh, @_); - } + $self->_populate_dbh; + $self->$code($self->_dbh, @$args); }; - - my $exception = $@; - if(!$exception) { return $want_array ? @result : $result[0] } - - $self->throw_exception($exception) if $self->connected; - - # We were not connected - reconnect and retry, but let any - # exception fall right through this time - $self->_populate_dbh; - $self->$code($self->_dbh, @_); } # This is basically a blend of dbh_do above and DBIx::Class::Storage::txn_do. @@ -703,8 +769,6 @@ sub txn_do { ref $coderef eq 'CODE' or $self->throw_exception ('$coderef must be a CODE reference'); - return $coderef->(@_) if $self->{transaction_depth} && ! $self->auto_savepoint; - local $self->{_in_dbh_do} = 1; my @result; @@ -712,30 +776,42 @@ sub txn_do { my $tried = 0; while(1) { - eval { - $self->_verify_pid if $self->_dbh; - $self->_populate_dbh if !$self->_dbh; + my $exception; + + # take a ref instead of a copy, to preserve coderef @_ aliasing semantics + my $args = \@_; + try { $self->txn_begin; + my $txn_start_depth = $self->transaction_depth; if($want_array) { - @result = $coderef->(@_); + @result = $coderef->(@$args); } elsif(defined $want_array) { - $result[0] = $coderef->(@_); + $result[0] = $coderef->(@$args); } else { - $coderef->(@_); + $coderef->(@$args); } - $self->txn_commit; + + my $delta_txn = $txn_start_depth - $self->transaction_depth; + if ($delta_txn == 0) { + $self->txn_commit; + } + elsif ($delta_txn != 1) { + # an off-by-one would mean we fired a rollback + carp "Unexpected reduction of transaction depth by $delta_txn after execution of $coderef"; + } + } catch { + $exception = $_; }; - my $exception = $@; - if(!$exception) { return $want_array ? @result : $result[0] } + if(! defined $exception) { return $want_array ? @result : $result[0] } - if($tried++ > 0 || $self->connected) { - eval { $self->txn_rollback }; - my $rollback_exception = $@; - if($rollback_exception) { + if($self->transaction_depth > 1 || $tried++ || $self->connected) { + my $rollback_exception; + try { $self->txn_rollback } catch { $rollback_exception = shift }; + if(defined $rollback_exception) { my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION"; $self->throw_exception($exception) # propagate nested rollback if $rollback_exception =~ /$exception_class/; @@ -750,6 +826,8 @@ sub txn_do { # We were not connected, and was first try - reconnect and retry # via the while loop + carp "Retrying $coderef after catching disconnected exception: $exception" + if $ENV{DBIC_TXNRETRY_DEBUG}; $self->_populate_dbh; } } @@ -764,11 +842,17 @@ database is not in C mode. sub disconnect { my ($self) = @_; - if( $self->connected ) { - my $connection_do = $self->on_disconnect_do; - $self->_do_connection_actions($connection_do) if ref($connection_do); + if( $self->_dbh ) { + my @actions; + + push @actions, ( $self->on_disconnect_call || () ); + push @actions, $self->_parse_connect_do ('on_disconnect_do'); + + $self->_do_connection_actions(disconnect_call_ => $_) for @actions; - $self->_dbh->rollback unless $self->_dbh_autocommit; + $self->_dbh_rollback unless $self->_dbh_autocommit; + + %{ $self->_dbh->{CachedKids} } = (); $self->_dbh->disconnect; $self->_dbh(undef); $self->{_dbh_gen}++; @@ -793,41 +877,52 @@ in MySQL's case disabled entirely. # Storage subclasses should override this sub with_deferred_fk_checks { my ($self, $sub) = @_; - $sub->(); } +=head2 connected + +=over + +=item Arguments: none + +=item Return Value: 1|0 + +=back + +Verifies that the current database handle is active and ready to execute +an SQL statement (e.g. the connection did not get stale, server is still +answering, etc.) This method is used internally by L. + +=cut + sub connected { - my ($self) = @_; + my $self = shift; + return 0 unless $self->_seems_connected; - if(my $dbh = $self->_dbh) { - if(defined $self->_conn_tid && $self->_conn_tid != threads->tid) { - $self->_dbh(undef); - $self->{_dbh_gen}++; - return; - } - else { - $self->_verify_pid; - return 0 if !$self->_dbh; - } - return ($dbh->FETCH('Active') && $dbh->ping); - } + #be on the safe side + local $self->_dbh->{RaiseError} = 1; - return 0; + return $self->_ping; } -# handle pid changes correctly -# NOTE: assumes $self->_dbh is a valid $dbh -sub _verify_pid { - my ($self) = @_; +sub _seems_connected { + my $self = shift; - return if defined $self->_conn_pid && $self->_conn_pid == $$; + $self->_verify_pid; - $self->_dbh->{InactiveDestroy} = 1; - $self->_dbh(undef); - $self->{_dbh_gen}++; + my $dbh = $self->_dbh + or return 0; - return; + return $dbh->FETCH('Active'); +} + +sub _ping { + my $self = shift; + + my $dbh = $self->_dbh or return 0; + + return $dbh->ping; } sub ensure_connected { @@ -840,72 +935,269 @@ sub ensure_connected { =head2 dbh -Returns the dbh - a data base handle of class L. +Returns a C<$dbh> - a data base handle of class L. The returned handle +is guaranteed to be healthy by implicitly calling L, and if +necessary performing a reconnection before returning. Keep in mind that this +is very B on some database engines. Consider using L +instead. =cut sub dbh { my ($self) = @_; - $self->ensure_connected; + if (not $self->_dbh) { + $self->_populate_dbh; + } else { + $self->ensure_connected; + } return $self->_dbh; } -sub _sql_maker_args { - my ($self) = @_; - - return ( bindtype=>'columns', limit_dialect => $self->dbh, %{$self->_sql_maker_opts} ); +# this is the internal "get dbh or connect (don't check)" method +sub _get_dbh { + my $self = shift; + $self->_verify_pid; + $self->_populate_dbh unless $self->_dbh; + return $self->_dbh; } sub sql_maker { my ($self) = @_; unless ($self->_sql_maker) { my $sql_maker_class = $self->sql_maker_class; - $self->_sql_maker($sql_maker_class->new( $self->_sql_maker_args )); + $self->ensure_class_loaded ($sql_maker_class); + + my %opts = %{$self->_sql_maker_opts||{}}; + my $dialect = + $opts{limit_dialect} + || + $self->sql_limit_dialect + || + do { + my $s_class = (ref $self) || $self; + carp ( + "Your storage class ($s_class) does not set sql_limit_dialect and you " + . 'have not supplied an explicit limit_dialect in your connection_info. ' + . 'DBIC will attempt to use the GenericSubQ dialect, which works on most ' + . 'databases but can be (and often is) painfully slow.' + ); + + 'GenericSubQ'; + } + ; + + $self->_sql_maker($sql_maker_class->new( + bindtype=>'columns', + array_datatypes => 1, + limit_dialect => $dialect, + name_sep => '.', + %opts, + )); } return $self->_sql_maker; } +# nothing to do by default sub _rebless {} +sub _init {} sub _populate_dbh { my ($self) = @_; + my @info = @{$self->_dbi_connect_info || []}; + $self->_dbh(undef); # in case ->connected failed we might get sent here + $self->_dbh_details({}); # reset everything we know + $self->_dbh($self->_connect(@info)); + $self->_conn_pid($$) if $^O ne 'MSWin32'; # on win32 these are in fact threads + + $self->_determine_driver; + # Always set the transaction depth on connect, since # there is no transaction in progress by definition $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1; - if(ref $self eq 'DBIx::Class::Storage::DBI') { - my $driver = $self->_dbh->{Driver}->{Name}; - if ($self->load_optional_class("DBIx::Class::Storage::DBI::${driver}")) { - bless $self, "DBIx::Class::Storage::DBI::${driver}"; - $self->_rebless(); + $self->_run_connection_actions unless $self->{_in_determine_driver}; +} + +sub _run_connection_actions { + my $self = shift; + my @actions; + + push @actions, ( $self->on_connect_call || () ); + push @actions, $self->_parse_connect_do ('on_connect_do'); + + $self->_do_connection_actions(connect_call_ => $_) for @actions; +} + + + +sub set_use_dbms_capability { + $_[0]->set_inherited ($_[1], $_[2]); +} + +sub get_use_dbms_capability { + my ($self, $capname) = @_; + + my $use = $self->get_inherited ($capname); + return defined $use + ? $use + : do { $capname =~ s/^_use_/_supports_/; $self->get_dbms_capability ($capname) } + ; +} + +sub set_dbms_capability { + $_[0]->_dbh_details->{capability}{$_[1]} = $_[2]; +} + +sub get_dbms_capability { + my ($self, $capname) = @_; + + my $cap = $self->_dbh_details->{capability}{$capname}; + + unless (defined $cap) { + if (my $meth = $self->can ("_determine$capname")) { + $cap = $self->$meth ? 1 : 0; + } + else { + $cap = 0; } - } - my $connection_do = $self->on_connect_do; - $self->_do_connection_actions($connection_do) if ref($connection_do); + $self->set_dbms_capability ($capname, $cap); + } - $self->_conn_pid($$); - $self->_conn_tid(threads->tid) if $INC{'threads.pm'}; + return $cap; } -sub _do_connection_actions { +sub _server_info { my $self = shift; - my $connection_do = shift; - if (ref $connection_do eq 'ARRAY') { - $self->_do_query($_) foreach @$connection_do; + my $info; + unless ($info = $self->_dbh_details->{info}) { + + $info = {}; + + my $server_version = try { $self->_get_server_version }; + + if (defined $server_version) { + $info->{dbms_version} = $server_version; + + my ($numeric_version) = $server_version =~ /^([\d\.]+)/; + my @verparts = split (/\./, $numeric_version); + if ( + @verparts + && + $verparts[0] <= 999 + ) { + # consider only up to 3 version parts, iff not more than 3 digits + my @use_parts; + while (@verparts && @use_parts < 3) { + my $p = shift @verparts; + last if $p > 999; + push @use_parts, $p; + } + push @use_parts, 0 while @use_parts < 3; + + $info->{normalized_dbms_version} = sprintf "%d.%03d%03d", @use_parts; + } + } + + $self->_dbh_details->{info} = $info; + } + + return $info; +} + +sub _get_server_version { + shift->_get_dbh->get_info(18); +} + +sub _determine_driver { + my ($self) = @_; + + if ((not $self->_driver_determined) && (not $self->{_in_determine_driver})) { + my $started_connected = 0; + local $self->{_in_determine_driver} = 1; + + if (ref($self) eq __PACKAGE__) { + my $driver; + if ($self->_dbh) { # we are connected + $driver = $self->_dbh->{Driver}{Name}; + $started_connected = 1; + } else { + # if connect_info is a CODEREF, we have no choice but to connect + if (ref $self->_dbi_connect_info->[0] && + reftype $self->_dbi_connect_info->[0] eq 'CODE') { + $self->_populate_dbh; + $driver = $self->_dbh->{Driver}{Name}; + } + else { + # try to use dsn to not require being connected, the driver may still + # force a connection in _rebless to determine version + # (dsn may not be supplied at all if all we do is make a mock-schema) + my $dsn = $self->_dbi_connect_info->[0] || $ENV{DBI_DSN} || ''; + ($driver) = $dsn =~ /dbi:([^:]+):/i; + $driver ||= $ENV{DBI_DRIVER}; + } + } + + if ($driver) { + my $storage_class = "DBIx::Class::Storage::DBI::${driver}"; + if ($self->load_optional_class($storage_class)) { + mro::set_mro($storage_class, 'c3'); + bless $self, $storage_class; + $self->_rebless(); + } + } + } + + $self->_driver_determined(1); + + $self->_init; # run driver-specific initializations + + $self->_run_connection_actions + if !$started_connected && defined $self->_dbh; } - elsif (ref $connection_do eq 'CODE') { - $connection_do->(); +} + +sub _do_connection_actions { + my $self = shift; + my $method_prefix = shift; + my $call = shift; + + if (not ref($call)) { + my $method = $method_prefix . $call; + $self->$method(@_); + } elsif (ref($call) eq 'CODE') { + $self->$call(@_); + } elsif (ref($call) eq 'ARRAY') { + if (ref($call->[0]) ne 'ARRAY') { + $self->_do_connection_actions($method_prefix, $_) for @$call; + } else { + $self->_do_connection_actions($method_prefix, @$_) for @$call; + } + } else { + $self->throw_exception (sprintf ("Don't know how to process conection actions of type '%s'", ref($call)) ); } return $self; } +sub connect_call_do_sql { + my $self = shift; + $self->_do_query(@_); +} + +sub disconnect_call_do_sql { + my $self = shift; + $self->_do_query(@_); +} + +# override in db-specific backend when necessary +sub connect_call_datetime_setup { 1 } + sub _do_query { my ($self, $action) = @_; @@ -914,10 +1206,18 @@ sub _do_query { $self->_do_query($_) foreach @$action; } else { - my @to_run = (ref $action eq 'ARRAY') ? (@$action) : ($action); - $self->_query_start(@to_run); - $self->_dbh->do(@to_run); - $self->_query_end(@to_run); + # Most debuggers expect ($sql, @bind), so we need to exclude + # the attribute hash which is the second argument to $dbh->do + # furthermore the bind values are usually to be presented + # as named arrayref pairs, so wrap those here too + my @do_args = (ref $action eq 'ARRAY') ? (@$action) : ($action); + my $sql = shift @do_args; + my $attrs = shift @do_args; + my @bind = map { [ undef, $_ ] } @do_args; + + $self->_query_start($sql, @bind); + $self->_get_dbh->do($sql, $attrs, @do_args); + $self->_query_end($sql, @bind); } return $self; @@ -936,38 +1236,52 @@ sub _connect { $DBI::connect_via = 'connect'; } - eval { + try { if(ref $info[0] eq 'CODE') { - $dbh = &{$info[0]} + $dbh = $info[0]->(); } else { $dbh = DBI->connect(@info); } - if($dbh && !$self->unsafe) { - my $weak_self = $self; - weaken($weak_self); - $dbh->{HandleError} = sub { + if (!$dbh) { + die $DBI::errstr; + } + + unless ($self->unsafe) { + + # this odd anonymous coderef dereference is in fact really + # necessary to avoid the unwanted effect described in perl5 + # RT#75792 + sub { + my $weak_self = $_[0]; + weaken $weak_self; + + $_[1]->{HandleError} = sub { if ($weak_self) { $weak_self->throw_exception("DBI Exception: $_[0]"); } else { - croak ("DBI Exception: $_[0]"); + # the handler may be invoked by something totally out of + # the scope of DBIC + croak ("DBI Exception (unhandled by DBIC, ::Schema GCed): $_[0]"); } - }; + }; + }->($self, $dbh); + $dbh->{ShowErrorStatement} = 1; $dbh->{RaiseError} = 1; $dbh->{PrintError} = 0; } + } + catch { + $self->throw_exception("DBI Connection failed: $_") + } + finally { + $DBI::connect_via = $old_connect_via if $old_connect_via; }; - $DBI::connect_via = $old_connect_via if $old_connect_via; - - $self->throw_exception("DBI Connection failed: " . ($@||$DBI::errstr)) - if !$dbh || $@; - $self->_dbh_autocommit($dbh->{AutoCommit}); - $dbh; } @@ -982,11 +1296,11 @@ sub svp_begin { $self->throw_exception ("Your Storage implementation doesn't support savepoints") unless $self->can('_svp_begin'); - + push @{ $self->{savepoints} }, $name; $self->debugobj->svp_begin($name) if $self->debug; - + return $self->_svp_begin($name); } @@ -1046,39 +1360,63 @@ sub svp_rollback { } $self->debugobj->svp_rollback($name) if $self->debug; - + return $self->_svp_rollback($name); } sub _svp_generate_name { - my ($self) = @_; - - return 'savepoint_'.scalar(@{ $self->{'savepoints'} }); + my ($self) = @_; + return 'savepoint_'.scalar(@{ $self->{'savepoints'} }); } sub txn_begin { my $self = shift; - $self->ensure_connected(); - if($self->{transaction_depth} == 0) { + + # this means we have not yet connected and do not know the AC status + # (e.g. coderef $dbh) + if (! defined $self->_dbh_autocommit) { + $self->ensure_connected; + } + # otherwise re-connect on pid changes, so + # that the txn_depth is adjusted properly + # the lightweight _get_dbh is good enoug here + # (only superficial handle check, no pings) + else { + $self->_get_dbh; + } + + if($self->transaction_depth == 0) { $self->debugobj->txn_begin() if $self->debug; - # this isn't ->_dbh-> because - # we should reconnect on begin_work - # for AutoCommit users - $self->dbh->begin_work; - } elsif ($self->auto_savepoint) { + $self->_dbh_begin_work; + } + elsif ($self->auto_savepoint) { $self->svp_begin; } $self->{transaction_depth}++; } +sub _dbh_begin_work { + my $self = shift; + + # if the user is utilizing txn_do - good for him, otherwise we need to + # ensure that the $dbh is healthy on BEGIN. + # We do this via ->dbh_do instead of ->dbh, so that the ->dbh "ping" + # will be replaced by a failure of begin_work itself (which will be + # then retried on reconnect) + if ($self->{_in_dbh_do}) { + $self->_dbh->begin_work; + } else { + $self->dbh_do(sub { $_[1]->begin_work }); + } +} + sub txn_commit { my $self = shift; if ($self->{transaction_depth} == 1) { - my $dbh = $self->_dbh; $self->debugobj->txn_commit() if ($self->debug); - $dbh->commit; + $self->_dbh_commit; $self->{transaction_depth} = 0 if $self->_dbh_autocommit; } @@ -1087,18 +1425,28 @@ sub txn_commit { $self->svp_release if $self->auto_savepoint; } + else { + $self->throw_exception( 'Refusing to commit without a started transaction' ); + } +} + +sub _dbh_commit { + my $self = shift; + my $dbh = $self->_dbh + or $self->throw_exception('cannot COMMIT on a disconnected handle'); + $dbh->commit; } sub txn_rollback { my $self = shift; my $dbh = $self->_dbh; - eval { + try { if ($self->{transaction_depth} == 1) { $self->debugobj->txn_rollback() if ($self->debug); $self->{transaction_depth} = 0 if $self->_dbh_autocommit; - $dbh->rollback; + $self->_dbh_rollback; } elsif($self->{transaction_depth} > 1) { $self->{transaction_depth}--; @@ -1110,15 +1458,24 @@ sub txn_rollback { else { die DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION->new; } - }; - if ($@) { - my $error = $@; - my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION"; - $error =~ /$exception_class/ and $self->throw_exception($error); - # ensure that a failed rollback resets the transaction depth - $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1; - $self->throw_exception($error); } + catch { + my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION"; + + if ($_ !~ /$exception_class/) { + # ensure that a failed rollback resets the transaction depth + $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1; + } + + $self->throw_exception($_) + }; +} + +sub _dbh_rollback { + my $self = shift; + my $dbh = $self->_dbh + or $self->throw_exception('cannot ROLLBACK on a disconnected handle'); + $dbh->rollback; } # This used to be the top-half of _execute. It was split out to make it @@ -1127,14 +1484,19 @@ sub txn_rollback { sub _prep_for_execute { my ($self, $op, $extra_bind, $ident, $args) = @_; + if( blessed $ident && $ident->isa("DBIx::Class::ResultSource") ) { + $ident = $ident->from(); + } + my ($sql, @bind) = $self->sql_maker->$op($ident, @$args); + unshift(@bind, map { ref $_ eq 'ARRAY' ? $_ : [ '!!dummy', $_ ] } @$extra_bind) if $extra_bind; - return ($sql, \@bind); } + sub _fix_bind_params { my ($self, @bind) = @_; @@ -1147,7 +1509,7 @@ sub _fix_bind_params { if ( defined( $_ && $_->[1] ) ) { map { qq{'$_'}; } @{$_}[ 1 .. $#$_ ]; } - else { q{'NULL'}; } + else { q{NULL}; } } @bind; } @@ -1156,7 +1518,7 @@ sub _query_start { if ( $self->debug ) { @bind = $self->_fix_bind_params(@bind); - + $self->debugobj->query_start( $sql, @bind ); } } @@ -1172,192 +1534,590 @@ sub _query_end { sub _dbh_execute { my ($self, $dbh, $op, $extra_bind, $ident, $bind_attributes, @args) = @_; - - if( blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) { - $ident = $ident->from(); - } my ($sql, $bind) = $self->_prep_for_execute($op, $extra_bind, $ident, \@args); $self->_query_start( $sql, @$bind ); - my $sth = $self->sth($sql,$op); + my $sth = $self->sth($sql,$op); + + my $placeholder_index = 1; + + foreach my $bound (@$bind) { + my $attributes = {}; + my($column_name, @data) = @$bound; + + if ($bind_attributes) { + $attributes = $bind_attributes->{$column_name} + if defined $bind_attributes->{$column_name}; + } + + foreach my $data (@data) { + my $ref = ref $data; + $data = $ref && $ref ne 'ARRAY' ? ''.$data : $data; # stringify args (except arrayrefs) + + $sth->bind_param($placeholder_index, $data, $attributes); + $placeholder_index++; + } + } + + # Can this fail without throwing an exception anyways??? + my $rv = $sth->execute(); + $self->throw_exception( + $sth->errstr || $sth->err || 'Unknown error: execute() returned false, but error flags were not set...' + ) if !$rv; + + $self->_query_end( $sql, @$bind ); + + return (wantarray ? ($rv, $sth, @$bind) : $rv); +} + +sub _execute { + my $self = shift; + $self->dbh_do('_dbh_execute', @_); # retry over disconnects +} + +sub _prefetch_autovalues { + my ($self, $source, $to_insert) = @_; + + my $colinfo = $source->columns_info; + + my %values; + for my $col (keys %$colinfo) { + if ( + $colinfo->{$col}{auto_nextval} + and + ( + ! exists $to_insert->{$col} + or + ref $to_insert->{$col} eq 'SCALAR' + ) + ) { + $values{$col} = $self->_sequence_fetch( + 'nextval', + ( $colinfo->{$col}{sequence} ||= + $self->_dbh_get_autoinc_seq($self->_get_dbh, $source, $col) + ), + ); + } + } + + \%values; +} + +sub insert { + my ($self, $source, $to_insert) = @_; + + my $prefetched_values = $self->_prefetch_autovalues($source, $to_insert); + + # fuse the values + $to_insert = { %$to_insert, %$prefetched_values }; + + # list of primary keys we try to fetch from the database + # both not-exsists and scalarrefs are considered + my %fetch_pks; + %fetch_pks = ( map + { $_ => scalar keys %fetch_pks } # so we can preserve order for prettyness + grep + { ! exists $to_insert->{$_} or ref $to_insert->{$_} eq 'SCALAR' } + $source->primary_columns + ); + + my $sqla_opts; + if ($self->_use_insert_returning) { + + # retain order as declared in the resultsource + for (sort { $fetch_pks{$a} <=> $fetch_pks{$b} } keys %fetch_pks ) { + push @{$sqla_opts->{returning}}, $_; + } + } + + my $bind_attributes = $self->source_bind_attributes($source); + + my ($rv, $sth) = $self->_execute('insert' => [], $source, $bind_attributes, $to_insert, $sqla_opts); + + my %returned_cols; + + if (my $retlist = $sqla_opts->{returning}) { + my @ret_vals = try { + local $SIG{__WARN__} = sub {}; + my @r = $sth->fetchrow_array; + $sth->finish; + @r; + }; + + @returned_cols{@$retlist} = @ret_vals if @ret_vals; + } + + return { %$prefetched_values, %returned_cols }; +} + + +## Currently it is assumed that all values passed will be "normal", i.e. not +## scalar refs, or at least, all the same type as the first set, the statement is +## only prepped once. +sub insert_bulk { + my ($self, $source, $cols, $data) = @_; + + my %colvalues; + @colvalues{@$cols} = (0..$#$cols); + + for my $i (0..$#$cols) { + my $first_val = $data->[0][$i]; + next unless ref $first_val eq 'SCALAR'; + + $colvalues{ $cols->[$i] } = $first_val; + } + + # check for bad data and stringify stringifiable objects + my $bad_slice = sub { + my ($msg, $col_idx, $slice_idx) = @_; + $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s", + $msg, + $cols->[$col_idx], + do { + local $Data::Dumper::Maxdepth = 1; # don't dump objects, if any + Dumper { + map { $cols->[$_] => $data->[$slice_idx][$_] } (0 .. $#$cols) + }, + } + ); + }; + + for my $datum_idx (0..$#$data) { + my $datum = $data->[$datum_idx]; + + for my $col_idx (0..$#$cols) { + my $val = $datum->[$col_idx]; + my $sqla_bind = $colvalues{ $cols->[$col_idx] }; + my $is_literal_sql = (ref $sqla_bind) eq 'SCALAR'; + + if ($is_literal_sql) { + if (not ref $val) { + $bad_slice->('bind found where literal SQL expected', $col_idx, $datum_idx); + } + elsif ((my $reftype = ref $val) ne 'SCALAR') { + $bad_slice->("$reftype reference found where literal SQL expected", + $col_idx, $datum_idx); + } + elsif ($$val ne $$sqla_bind){ + $bad_slice->("inconsistent literal SQL value, expecting: '$$sqla_bind'", + $col_idx, $datum_idx); + } + } + elsif (my $reftype = ref $val) { + require overload; + if (overload::Method($val, '""')) { + $datum->[$col_idx] = "".$val; + } + else { + $bad_slice->("$reftype reference found where bind expected", + $col_idx, $datum_idx); + } + } + } + } + + my ($sql, $bind) = $self->_prep_for_execute ( + 'insert', undef, $source, [\%colvalues] + ); + + if (! @$bind) { + # if the bindlist is empty - make sure all "values" are in fact + # literal scalarrefs. If not the case this means the storage ate + # them away (e.g. the NoBindVars component) and interpolated them + # directly into the SQL. This obviosly can't be good for multi-inserts + + $self->throw_exception('Cannot insert_bulk without support for placeholders') + if first { ref $_ ne 'SCALAR' } values %colvalues; + } + + # neither _execute_array, nor _execute_inserts_with_no_binds are + # atomic (even if _execute _array is a single call). Thus a safety + # scope guard + my $guard = $self->txn_scope_guard; + + $self->_query_start( $sql, @$bind ? [ dummy => '__BULK_INSERT__' ] : () ); + my $sth = $self->sth($sql); + my $rv = do { + if (@$bind) { + #@bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args + $self->_execute_array( $source, $sth, $bind, $cols, $data ); + } + else { + # bind_param_array doesn't work if there are no binds + $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data ); + } + }; + + $self->_query_end( $sql, @$bind ? [ dummy => '__BULK_INSERT__' ] : () ); + + $guard->commit; + + return (wantarray ? ($rv, $sth, @$bind) : $rv); +} + +sub _execute_array { + my ($self, $source, $sth, $bind, $cols, $data, @extra) = @_; + + ## This must be an arrayref, else nothing works! + my $tuple_status = []; + + ## Get the bind_attributes, if any exist + my $bind_attributes = $self->source_bind_attributes($source); - my $placeholder_index = 1; + ## Bind the values and execute + my $placeholder_index = 1; foreach my $bound (@$bind) { + my $attributes = {}; - my($column_name, @data) = @$bound; + my ($column_name, $data_index) = @$bound; - if ($bind_attributes) { + if( $bind_attributes ) { $attributes = $bind_attributes->{$column_name} if defined $bind_attributes->{$column_name}; } - foreach my $data (@data) { - $data = ref $data ? ''.$data : $data; # stringify args + my @data = map { $_->[$data_index] } @$data; - $sth->bind_param($placeholder_index, $data, $attributes); - $placeholder_index++; - } + $sth->bind_param_array( + $placeholder_index, + [@data], + (%$attributes ? $attributes : ()), + ); + $placeholder_index++; } - # Can this fail without throwing an exception anyways??? - my $rv = $sth->execute(); - $self->throw_exception($sth->errstr) if !$rv; + my ($rv, $err); + try { + $rv = $self->_dbh_execute_array($sth, $tuple_status, @extra); + } + catch { + $err = shift; + }; - $self->_query_end( $sql, @$bind ); + # Statement must finish even if there was an exception. + try { + $sth->finish + } + catch { + $err = shift unless defined $err + }; - return (wantarray ? ($rv, $sth, @$bind) : $rv); + $err = $sth->errstr + if (! defined $err and $sth->err); + + if (defined $err) { + my $i = 0; + ++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i]; + + $self->throw_exception("Unexpected populate error: $err") + if ($i > $#$tuple_status); + + $self->throw_exception(sprintf "%s for populate slice:\n%s", + ($tuple_status->[$i][1] || $err), + Dumper { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) }, + ); + } + + return $rv; } -sub _execute { - my $self = shift; - $self->dbh_do('_dbh_execute', @_) +sub _dbh_execute_array { + my ($self, $sth, $tuple_status, @extra) = @_; + + return $sth->execute_array({ArrayTupleStatus => $tuple_status}); } -sub insert { - my ($self, $source, $to_insert) = @_; - - my $ident = $source->from; - my $bind_attributes = $self->source_bind_attributes($source); +sub _dbh_execute_inserts_with_no_binds { + my ($self, $sth, $count) = @_; - $self->ensure_connected; - foreach my $col ( $source->columns ) { - if ( !defined $to_insert->{$col} ) { - my $col_info = $source->column_info($col); + my $err; + try { + my $dbh = $self->_get_dbh; + local $dbh->{RaiseError} = 1; + local $dbh->{PrintError} = 0; - if ( $col_info->{auto_nextval} ) { - $to_insert->{$col} = $self->_sequence_fetch( 'nextval', $col_info->{sequence} || $self->_dbh_get_autoinc_seq($self->dbh, $source) ); - } - } + $sth->execute foreach 1..$count; + } + catch { + $err = shift; + }; + + # Make sure statement is finished even if there was an exception. + try { + $sth->finish } + catch { + $err = shift unless defined $err; + }; - $self->_execute('insert' => [], $source, $bind_attributes, $to_insert); + $self->throw_exception($err) if defined $err; - return $to_insert; + return $count; } -## Still not quite perfect, and EXPERIMENTAL -## Currently it is assumed that all values passed will be "normal", i.e. not -## scalar refs, or at least, all the same type as the first set, the statement is -## only prepped once. -sub insert_bulk { - my ($self, $source, $cols, $data) = @_; - my %colvalues; - my $table = $source->from; - @colvalues{@$cols} = (0..$#$cols); - my ($sql, @bind) = $self->sql_maker->insert($table, \%colvalues); - - $self->_query_start( $sql, @bind ); - my $sth = $self->sth($sql); +sub update { + my ($self, $source, @args) = @_; -# @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args + my $bind_attrs = $self->source_bind_attributes($source); - ## This must be an arrayref, else nothing works! - - my $tuple_status = []; - - ##use Data::Dumper; - ##print STDERR Dumper( $data, $sql, [@bind] ); + return $self->_execute('update' => [], $source, $bind_attrs, @args); +} - my $time = time(); - ## Get the bind_attributes, if any exist - my $bind_attributes = $self->source_bind_attributes($source); +sub delete { + my ($self, $source, @args) = @_; - ## Bind the values and execute - my $placeholder_index = 1; + my $bind_attrs = $self->source_bind_attributes($source); - foreach my $bound (@bind) { + return $self->_execute('delete' => [], $source, $bind_attrs, @args); +} - my $attributes = {}; - my ($column_name, $data_index) = @$bound; +# We were sent here because the $rs contains a complex search +# which will require a subquery to select the correct rows +# (i.e. joined or limited resultsets, or non-introspectable conditions) +# +# Generating a single PK column subquery is trivial and supported +# by all RDBMS. However if we have a multicolumn PK, things get ugly. +# Look at _multipk_update_delete() +sub _subq_update_delete { + my $self = shift; + my ($rs, $op, $values) = @_; - if( $bind_attributes ) { - $attributes = $bind_attributes->{$column_name} - if defined $bind_attributes->{$column_name}; + my $rsrc = $rs->result_source; + + # quick check if we got a sane rs on our hands + my @pcols = $rsrc->_pri_cols; + + my $sel = $rs->_resolved_attrs->{select}; + $sel = [ $sel ] unless ref $sel eq 'ARRAY'; + + if ( + join ("\x00", map { join '.', $rs->{attrs}{alias}, $_ } sort @pcols) + ne + join ("\x00", sort @$sel ) + ) { + $self->throw_exception ( + '_subq_update_delete can not be called on resultsets selecting columns other than the primary keys' + ); + } + + if (@pcols == 1) { + return $self->$op ( + $rsrc, + $op eq 'update' ? $values : (), + { $pcols[0] => { -in => $rs->as_query } }, + ); + } + + else { + return $self->_multipk_update_delete (@_); + } +} + +# ANSI SQL does not provide a reliable way to perform a multicol-PK +# resultset update/delete involving subqueries. So by default resort +# to simple (and inefficient) delete_all style per-row opearations, +# while allowing specific storages to override this with a faster +# implementation. +# +sub _multipk_update_delete { + return shift->_per_row_update_delete (@_); +} + +# This is the default loop used to delete/update rows for multi PK +# resultsets, and used by mysql exclusively (because it can't do anything +# else). +# +# We do not use $row->$op style queries, because resultset update/delete +# is not expected to cascade (this is what delete_all/update_all is for). +# +# There should be no race conditions as the entire operation is rolled +# in a transaction. +# +sub _per_row_update_delete { + my $self = shift; + my ($rs, $op, $values) = @_; + + my $rsrc = $rs->result_source; + my @pcols = $rsrc->_pri_cols; + + my $guard = $self->txn_scope_guard; + + # emulate the return value of $sth->execute for non-selects + my $row_cnt = '0E0'; + + my $subrs_cur = $rs->cursor; + my @all_pk = $subrs_cur->all; + for my $pks ( @all_pk) { + + my $cond; + for my $i (0.. $#pcols) { + $cond->{$pcols[$i]} = $pks->[$i]; } - my @data = map { $_->[$data_index] } @$data; + $self->$op ( + $rsrc, + $op eq 'update' ? $values : (), + $cond, + ); - $sth->bind_param_array( $placeholder_index, [@data], $attributes ); - $placeholder_index++; + $row_cnt++; } - my $rv = $sth->execute_array({ArrayTupleStatus => $tuple_status}); - $self->throw_exception($sth->errstr) if !$rv; - $self->_query_end( $sql, @bind ); - return (wantarray ? ($rv, $sth, @bind) : $rv); + $guard->commit; + + return $row_cnt; } -sub update { - my $self = shift @_; - my $source = shift @_; - my $bind_attributes = $self->source_bind_attributes($source); - - return $self->_execute('update' => [], $source, $bind_attributes, @_); +sub _select { + my $self = shift; + $self->_execute($self->_select_args(@_)); } +sub _select_args_to_query { + my $self = shift; -sub delete { - my $self = shift @_; - my $source = shift @_; - - my $bind_attrs = {}; ## If ever it's needed... - - return $self->_execute('delete' => [], $source, $bind_attrs, @_); + # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $rs_attrs, $rows, $offset) + # = $self->_select_args($ident, $select, $cond, $attrs); + my ($op, $bind, $ident, $bind_attrs, @args) = + $self->_select_args(@_); + + # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $rs_attrs, $rows, $offset ]); + my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, \@args); + $prepared_bind ||= []; + + return wantarray + ? ($sql, $prepared_bind, $bind_attrs) + : \[ "($sql)", @$prepared_bind ] + ; } -sub _select { - my ($self, $ident, $select, $condition, $attrs) = @_; - my $order = $attrs->{order_by}; - - if (ref $condition eq 'SCALAR') { - my $unwrap = ${$condition}; - if ($unwrap =~ s/ORDER BY (.*)$//i) { - $order = $1; - $condition = \$unwrap; - } - } +sub _select_args { + my ($self, $ident, $select, $where, $attrs) = @_; - my $for = delete $attrs->{for}; my $sql_maker = $self->sql_maker; - local $sql_maker->{for} = $for; + my ($alias2source, $rs_alias) = $self->_resolve_ident_sources ($ident); + + $attrs = { + %$attrs, + select => $select, + from => $ident, + where => $where, + $rs_alias && $alias2source->{$rs_alias} + ? ( _rsroot_source_handle => $alias2source->{$rs_alias}->handle ) + : () + , + }; - if (exists $attrs->{group_by} || $attrs->{having}) { - $order = { - group_by => $attrs->{group_by}, - having => $attrs->{having}, - ($order ? (order_by => $order) : ()) - }; + # calculate bind_attrs before possible $ident mangling + my $bind_attrs = {}; + for my $alias (keys %$alias2source) { + my $bindtypes = $self->source_bind_attributes ($alias2source->{$alias}) || {}; + for my $col (keys %$bindtypes) { + + my $fqcn = join ('.', $alias, $col); + $bind_attrs->{$fqcn} = $bindtypes->{$col} if $bindtypes->{$col}; + + # Unqialified column names are nice, but at the same time can be + # rather ambiguous. What we do here is basically go along with + # the loop, adding an unqualified column slot to $bind_attrs, + # alongside the fully qualified name. As soon as we encounter + # another column by that name (which would imply another table) + # we unset the unqualified slot and never add any info to it + # to avoid erroneous type binding. If this happens the users + # only choice will be to fully qualify his column name + + if (exists $bind_attrs->{$col}) { + $bind_attrs->{$col} = {}; + } + else { + $bind_attrs->{$col} = $bind_attrs->{$fqcn}; + } + } } - my $bind_attrs = {}; ## Future support - my @args = ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $condition, $order); - if ($attrs->{software_limit} || - $self->sql_maker->_default_limit_syntax eq "GenericSubQ") { - $attrs->{software_limit} = 1; - } else { - $self->throw_exception("rows attribute must be positive if present") - if (defined($attrs->{rows}) && !($attrs->{rows} > 0)); + # Sanity check the attributes (SQLMaker does it too, but + # in case of a software_limit we'll never reach there) + if (defined $attrs->{offset}) { + $self->throw_exception('A supplied offset attribute must be a non-negative integer') + if ( $attrs->{offset} =~ /\D/ or $attrs->{offset} < 0 ); + } + $attrs->{offset} ||= 0; + + if (defined $attrs->{rows}) { + $self->throw_exception("The rows attribute must be a positive integer if present") + if ( $attrs->{rows} =~ /\D/ or $attrs->{rows} <= 0 ); + } + elsif ($attrs->{offset}) { # MySQL actually recommends this approach. I cringe. - $attrs->{rows} = 2**48 if not defined $attrs->{rows} and defined $attrs->{offset}; - push @args, $attrs->{rows}, $attrs->{offset}; + $attrs->{rows} = $sql_maker->__max_int; + } + + my @limit; + + # see if we need to tear the prefetch apart otherwise delegate the limiting to the + # storage, unless software limit was requested + if ( + #limited has_many + ( $attrs->{rows} && keys %{$attrs->{collapse}} ) + || + # grouped prefetch (to satisfy group_by == select) + ( $attrs->{group_by} + && + @{$attrs->{group_by}} + && + $attrs->{_prefetch_select} + && + @{$attrs->{_prefetch_select}} + ) + ) { + ($ident, $select, $where, $attrs) + = $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs); } + elsif (! $attrs->{software_limit} ) { + push @limit, $attrs->{rows}, $attrs->{offset}; + } + + # try to simplify the joinmap further (prune unreferenced type-single joins) + $ident = $self->_prune_unused_joins ($ident, $select, $where, $attrs); + +### + # This would be the point to deflate anything found in $where + # (and leave $attrs->{bind} intact). Problem is - inflators historically + # expect a row object. And all we have is a resultsource (it is trivial + # to extract deflator coderefs via $alias2source above). + # + # I don't see a way forward other than changing the way deflators are + # invoked, and that's just bad... +### + + return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $attrs, @limit); +} - return $self->_execute(@args); +# Returns a counting SELECT for a simple count +# query. Abstracted so that a storage could override +# this to { count => 'firstcol' } or whatever makes +# sense as a performance optimization +sub _count_select { + #my ($self, $source, $rs_attrs) = @_; + return { count => '*' }; } + sub source_bind_attributes { my ($self, $source) = @_; - + my $bind_attributes; - foreach my $column ($source->columns) { - - my $data_type = $source->column_info($column)->{data_type} || ''; - $bind_attributes->{$column} = $self->bind_attribute_by_data_type($data_type) - if $data_type; + + my $colinfo = $source->columns_info; + + for my $col (keys %$colinfo) { + if (my $dt = $colinfo->{$col}{data_type} ) { + $bind_attributes->{$col} = $self->bind_attribute_by_data_type($dt) + } } return $bind_attributes; @@ -1394,6 +2154,13 @@ sub select_single { return @row; } +=head2 sql_limit_dialect + +This is an accessor for the default SQL limit dialect used by a particular +storage driver. Can be overriden by supplying an explicit L +to L. For a list of available limit dialects +see L. + =head2 sth =over 4 @@ -1423,7 +2190,7 @@ sub _dbh_sth { sub sth { my ($self, $sql) = @_; - $self->dbh_do('_dbh_sth', $sql); + $self->dbh_do('_dbh_sth', $sql); # retry over disconnects } sub _dbh_columns_info_for { @@ -1431,7 +2198,8 @@ sub _dbh_columns_info_for { if ($dbh->can('column_info')) { my %result; - eval { + my $caught; + try { my ($schema,$tab) = $table =~ /^(.+?)\.(.+)$/ ? ($1,$2) : (undef,$table); my $sth = $dbh->column_info( undef,$schema, $tab, '%' ); $sth->execute(); @@ -1446,8 +2214,10 @@ sub _dbh_columns_info_for { $result{$col_name} = \%column_info; } + } catch { + $caught = 1; }; - return \%result if !$@ && scalar keys %result; + return \%result if !$caught && scalar keys %result; } my %result; @@ -1485,7 +2255,7 @@ sub _dbh_columns_info_for { sub columns_info_for { my ($self, $table) = @_; - $self->dbh_do('_dbh_columns_info_for', $table); + $self->_dbh_columns_info_for ($self->_get_dbh, $table); } =head2 last_insert_id @@ -1496,13 +2266,85 @@ Return the row id of the last insert. sub _dbh_last_insert_id { my ($self, $dbh, $source, $col) = @_; - # XXX This is a SQLite-ism as a default... is there a DBI-generic way? - $dbh->func('last_insert_rowid'); + + my $id = try { $dbh->last_insert_id (undef, undef, $source->name, $col) }; + + return $id if defined $id; + + my $class = ref $self; + $self->throw_exception ("No storage specific _dbh_last_insert_id() method implemented in $class, and the generic DBI::last_insert_id() failed"); } sub last_insert_id { my $self = shift; - $self->dbh_do('_dbh_last_insert_id', @_); + $self->_dbh_last_insert_id ($self->_dbh, @_); +} + +=head2 _native_data_type + +=over 4 + +=item Arguments: $type_name + +=back + +This API is B, will almost definitely change in the future, and +currently only used by L<::AutoCast|DBIx::Class::Storage::DBI::AutoCast> and +L<::Sybase::ASE|DBIx::Class::Storage::DBI::Sybase::ASE>. + +The default implementation returns C, implement in your Storage driver if +you need this functionality. + +Should map types from other databases to the native RDBMS type, for example +C to C. + +Types with modifiers should map to the underlying data type. For example, +C should become C. + +Composite types should map to the container type, for example +C becomes C. + +=cut + +sub _native_data_type { + #my ($self, $data_type) = @_; + return undef +} + +# Check if placeholders are supported at all +sub _determine_supports_placeholders { + my $self = shift; + my $dbh = $self->_get_dbh; + + # some drivers provide a $dbh attribute (e.g. Sybase and $dbh->{syb_dynamic_supported}) + # but it is inaccurate more often than not + return try { + local $dbh->{PrintError} = 0; + local $dbh->{RaiseError} = 1; + $dbh->do('select ?', {}, 1); + 1; + } + catch { + 0; + }; +} + +# Check if placeholders bound to non-string types throw exceptions +# +sub _determine_supports_typeless_placeholders { + my $self = shift; + my $dbh = $self->_get_dbh; + + return try { + local $dbh->{PrintError} = 0; + local $dbh->{RaiseError} = 1; + # this specifically tests a bind that is NOT a string + $dbh->do('select 1 where 1 = ?', {}, 1); + 1; + } + catch { + 0; + }; } =head2 sqlt_type @@ -1511,7 +2353,9 @@ Returns the database driver name. =cut -sub sqlt_type { shift->dbh->{Driver}->{Name} } +sub sqlt_type { + shift->_get_dbh->{Driver}->{Name}; +} =head2 bind_attribute_by_data_type @@ -1527,6 +2371,27 @@ sub bind_attribute_by_data_type { return; } +=head2 is_datatype_numeric + +Given a datatype from column_info, returns a boolean value indicating if +the current RDBMS considers it a numeric value. This controls how +L decides whether to mark the column as +dirty - when the datatype is deemed numeric a C<< != >> comparison will +be performed instead of the usual C. + +=cut + +sub is_datatype_numeric { + my ($self, $dt) = @_; + + return 0 unless $dt; + + return $dt =~ /^ (?: + numeric | int(?:eger)? | (?:tiny|small|medium|big)int | dec(?:imal)? | real | float | double (?: \s+ precision)? | (?:big)?serial + ) $/ix; +} + + =head2 create_ddl_dir =over 4 @@ -1536,26 +2401,72 @@ sub bind_attribute_by_data_type { =back Creates a SQL file based on the Schema, for each of the specified -database types, in the given directory. +database engines in C<\@databases> in the given directory. +(note: specify L names, not L driver names). + +Given a previous version number, this will also create a file containing +the ALTER TABLE statements to transform the previous schema into the +current one. Note that these statements may contain C or +C statements that can potentially destroy data. + +The file names are created using the C method below, please +override this method in your schema if you would like a different file +name format. For the ALTER file, the same format is used, replacing +$version in the name with "$preversion-$version". + +See L for a list of values for C<\%sqlt_args>. +The most common value for this would be C<< { add_drop_table => 1 } >> +to have the SQL produced include a C statement for each table +created. For quoting purposes supply C and +C. + +If no arguments are passed, then the following default values are assumed: + +=over 4 + +=item databases - ['MySQL', 'SQLite', 'PostgreSQL'] + +=item version - $schema->schema_version + +=item directory - './' + +=item preversion - + +=back By default, C<\%sqlt_args> will have { add_drop_table => 1, ignore_constraint_names => 1, ignore_index_names => 1 } -merged with the hash passed in. To disable any of those features, pass in a +merged with the hash passed in. To disable any of those features, pass in a hashref like the following { ignore_constraint_names => 0, # ... other options } + +WARNING: You are strongly advised to check all SQL files created, before applying +them. + =cut sub create_ddl_dir { my ($self, $schema, $databases, $version, $dir, $preversion, $sqltargs) = @_; - if(!$dir || !-d $dir) { - warn "No directory given, using ./\n"; - $dir = "./"; + unless ($dir) { + carp "No directory given, using ./\n"; + $dir = './'; + } else { + -d $dir + or + make_path ("$dir") # make_path does not like objects (i.e. Path::Class::Dir) + or + $self->throw_exception( + "Failed to create '$dir': " . ($! || $@ || 'error unknow') + ); } + + $self->throw_exception ("Directory '$dir' does not exist\n") unless(-d $dir); + $databases ||= ['MySQL', 'SQLite', 'PostgreSQL']; $databases = [ $databases ] if(ref($databases) ne 'ARRAY'); @@ -1563,24 +2474,24 @@ sub create_ddl_dir { $version ||= $schema_version; $sqltargs = { - add_drop_table => 1, + add_drop_table => 1, ignore_constraint_names => 1, ignore_index_names => 1, %{$sqltargs || {}} }; - $self->throw_exception(q{Can't create a ddl file without SQL::Translator 0.09: '} - . $self->_check_sqlt_message . q{'}) - if !$self->_check_sqlt_version; + unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')) { + $self->throw_exception("Can't create a ddl file without " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') ); + } my $sqlt = SQL::Translator->new( $sqltargs ); $sqlt->parser('SQL::Translator::Parser::DBIx::Class'); - my $sqlt_schema = $sqlt->translate({ data => $schema }) or die $sqlt->error; + my $sqlt_schema = $sqlt->translate({ data => $schema }) + or $self->throw_exception ($sqlt->error); foreach my $db (@$databases) { $sqlt->reset(); - $sqlt = $self->configure_sqlt($sqlt, $db); $sqlt->{schema} = $sqlt_schema; $sqlt->producer($db); @@ -1588,13 +2499,13 @@ sub create_ddl_dir { my $filename = $schema->ddl_filename($db, $version, $dir); if (-e $filename && ($version eq $schema_version )) { # if we are dumping the current version, overwrite the DDL - warn "Overwriting existing DDL file - $filename"; + carp "Overwriting existing DDL file - $filename"; unlink($filename); } my $output = $sqlt->translate; if(!$output) { - warn("Failed to translate to $db, skipping. (" . $sqlt->error . ")"); + carp("Failed to translate to $db, skipping. (" . $sqlt->error . ")"); next; } if(!open($file, ">$filename")) { @@ -1603,59 +2514,68 @@ sub create_ddl_dir { } print $file $output; close($file); - + next unless ($preversion); require SQL::Translator::Diff; my $prefilename = $schema->ddl_filename($db, $preversion, $dir); if(!-e $prefilename) { - warn("No previous schema file found ($prefilename)"); + carp("No previous schema file found ($prefilename)"); next; } my $difffile = $schema->ddl_filename($db, $version, $dir, $preversion); if(-e $difffile) { - warn("Overwriting existing diff file - $difffile"); + carp("Overwriting existing diff file - $difffile"); unlink($difffile); } - + my $source_schema; { my $t = SQL::Translator->new($sqltargs); $t->debug( 0 ); $t->trace( 0 ); - $t->parser( $db ) or die $t->error; - $t = $self->configure_sqlt($t, $db); - my $out = $t->translate( $prefilename ) or die $t->error; + + $t->parser( $db ) + or $self->throw_exception ($t->error); + + my $out = $t->translate( $prefilename ) + or $self->throw_exception ($t->error); + $source_schema = $t->schema; - unless ( $source_schema->name ) { - $source_schema->name( $prefilename ); - } + + $source_schema->name( $prefilename ) + unless ( $source_schema->name ); } - # The "new" style of producers have sane normalization and can support + # The "new" style of producers have sane normalization and can support # diffing a SQL file against a DBIC->SQLT schema. Old style ones don't # And we have to diff parsed SQL against parsed SQL. my $dest_schema = $sqlt_schema; - + unless ( "SQL::Translator::Producer::$db"->can('preprocess_schema') ) { my $t = SQL::Translator->new($sqltargs); $t->debug( 0 ); $t->trace( 0 ); - $t->parser( $db ) or die $t->error; - $t = $self->configure_sqlt($t, $db); - my $out = $t->translate( $filename ) or die $t->error; + + $t->parser( $db ) + or $self->throw_exception ($t->error); + + my $out = $t->translate( $filename ) + or $self->throw_exception ($t->error); + $dest_schema = $t->schema; + $dest_schema->name( $filename ) unless $dest_schema->name; } - + my $diff = SQL::Translator::Diff::schema_diff($source_schema, $db, $dest_schema, $db, $sqltargs ); - if(!open $file, ">$difffile") { + if(!open $file, ">$difffile") { $self->throw_exception("Can't write to $difffile ($!)"); next; } @@ -1664,17 +2584,6 @@ sub create_ddl_dir { } } -sub configure_sqlt() { - my $self = shift; - my $tr = shift; - my $db = shift || $self->sqlt_type; - if ($db eq 'PostgreSQL') { - $tr->quote_table_names(0); - $tr->quote_field_names(0); - } - return $tr; -} - =head2 deployment_statements =over 4 @@ -1684,8 +2593,9 @@ sub configure_sqlt() { =back Returns the statements used by L and L. -The database driver name is given by C<$type>, though the value from -L is used if it is not specified. + +The L (not L) database driver name can be explicitly +provided in C<$type>, otherwise the result of L is used as default. C<$directory> is used to return statements from files in a previously created L directory and is optional. The filenames are constructed @@ -1700,58 +2610,80 @@ See L for a list of values for C<$sqlt_args>. sub deployment_statements { my ($self, $schema, $type, $version, $dir, $sqltargs) = @_; - # Need to be connected to get the correct sqlt_type - $self->ensure_connected() unless $type; $type ||= $self->sqlt_type; $version ||= $schema->schema_version || '1.x'; $dir ||= './'; - my $filename = $schema->ddl_filename($type, $dir, $version); + my $filename = $schema->ddl_filename($type, $version, $dir); if(-f $filename) { my $file; - open($file, "<$filename") + open($file, "<$filename") or $self->throw_exception("Can't open $filename ($!)"); my @rows = <$file>; close($file); return join('', @rows); } - $self->throw_exception(q{Can't deploy without SQL::Translator 0.09: '} - . $self->_check_sqlt_message . q{'}) - if !$self->_check_sqlt_version; - - require SQL::Translator::Parser::DBIx::Class; - eval qq{use SQL::Translator::Producer::${type}}; - $self->throw_exception($@) if $@; + unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy') ) { + $self->throw_exception("Can't deploy without a ddl_dir or " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') ); + } - # sources needs to be a parser arg, but for simplicty allow at top level + # sources needs to be a parser arg, but for simplicty allow at top level # coming in $sqltargs->{parser_args}{sources} = delete $sqltargs->{sources} if exists $sqltargs->{sources}; - my $tr = SQL::Translator->new(%$sqltargs); - SQL::Translator::Parser::DBIx::Class::parse( $tr, $schema ); - return "SQL::Translator::Producer::${type}"->can('produce')->($tr); + my $tr = SQL::Translator->new( + producer => "SQL::Translator::Producer::${type}", + %$sqltargs, + parser => 'SQL::Translator::Parser::DBIx::Class', + data => $schema, + ); + + my @ret; + my $wa = wantarray; + if ($wa) { + @ret = $tr->translate; + } + else { + $ret[0] = $tr->translate; + } + + $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error) + unless (@ret && defined $ret[0]); + + return $wa ? @ret : $ret[0]; } sub deploy { my ($self, $schema, $type, $sqltargs, $dir) = @_; - foreach my $statement ( $self->deployment_statements($schema, $type, undef, $dir, { no_comments => 1, %{ $sqltargs || {} } } ) ) { - foreach my $line ( split(";\n", $statement)) { - next if($line =~ /^--/); - next if(!$line); -# next if($line =~ /^DROP/m); - next if($line =~ /^BEGIN TRANSACTION/m); - next if($line =~ /^COMMIT/m); - next if $line =~ /^\s+$/; # skip whitespace only - $self->_query_start($line); - eval { - $self->dbh->do($line); # shouldn't be using ->dbh ? - }; - if ($@) { - warn qq{$@ (running "${line}")}; - } - $self->_query_end($line); + my $deploy = sub { + my $line = shift; + return if($line =~ /^--/); + return if(!$line); + # next if($line =~ /^DROP/m); + return if($line =~ /^BEGIN TRANSACTION/m); + return if($line =~ /^COMMIT/m); + return if $line =~ /^\s+$/; # skip whitespace only + $self->_query_start($line); + try { + # do a dbh_do cycle here, as we need some error checking in + # place (even though we will ignore errors) + $self->dbh_do (sub { $_[1]->do($line) }); + } catch { + carp qq{$_ (running "${line}")}; + }; + $self->_query_end($line); + }; + my @statements = $schema->deployment_statements($type, undef, $dir, { %{ $sqltargs || {} }, no_comments => 1 } ); + if (@statements > 1) { + foreach my $statement (@statements) { + $deploy->( $statement ); + } + } + elsif (@statements == 1) { + foreach my $line ( split(";\n", $statements[0])) { + $deploy->( $line ); } } } @@ -1765,7 +2697,6 @@ Returns the datetime parser class sub datetime_parser { my $self = shift; return $self->{datetime_parser} ||= do { - $self->ensure_connected; $self->build_datetime_parser(@_); }; } @@ -1788,26 +2719,10 @@ See L sub build_datetime_parser { my $self = shift; my $type = $self->datetime_parser_type(@_); - eval "use ${type}"; - $self->throw_exception("Couldn't load ${type}: $@") if $@; + $self->ensure_class_loaded ($type); return $type; } -{ - my $_check_sqlt_version; # private - my $_check_sqlt_message; # private - sub _check_sqlt_version { - return $_check_sqlt_version if defined $_check_sqlt_version; - eval 'use SQL::Translator "0.09"'; - $_check_sqlt_message = $@ || ''; - $_check_sqlt_version = !$@; - } - - sub _check_sqlt_message { - _check_sqlt_version if !defined $_check_sqlt_message; - $_check_sqlt_message; - } -} =head2 is_replicating @@ -1819,7 +2734,7 @@ returned by databases that don't support replication. sub is_replicating { return; - + } =head2 lag_behind_master @@ -1834,11 +2749,32 @@ sub lag_behind_master { return; } -sub DESTROY { - my $self = shift; - return if !$self->_dbh; - $self->_verify_pid; - $self->_dbh(undef); +=head2 relname_to_table_alias + +=over 4 + +=item Arguments: $relname, $join_count + +=back + +L uses L names as table aliases in +queries. + +This hook is to allow specific L drivers to change the +way these aliases are named. + +The default behavior is C<< "$relname_$join_count" if $join_count > 1 >>, +otherwise C<"$relname">. + +=cut + +sub relname_to_table_alias { + my ($self, $relname, $join_count) = @_; + + my $alias = ($join_count && $join_count > 1 ? + join('_', $relname, $join_count) : $relname); + + return $alias; } 1; @@ -1849,7 +2785,7 @@ sub DESTROY { DBIx::Class can do some wonderful magic with handling exceptions, disconnections, and transactions when you use C<< AutoCommit => 1 >> -combined with C for transaction support. +(the default) combined with C for transaction support. If you set C<< AutoCommit => 0 >> in your connect info, then you are always in an assumed transaction between commits, and you're telling us you'd @@ -1861,38 +2797,6 @@ cases if you choose the C<< AutoCommit => 0 >> path, just as you would be with raw DBI. -=head1 SQL METHODS - -The module defines a set of methods within the DBIC::SQL::Abstract -namespace. These build on L to provide the -SQL query functions. - -The following methods are extended:- - -=over 4 - -=item delete - -=item insert - -=item select - -=item update - -=item limit_dialect - -See L for details. - -=item quote_char - -See L for details. - -=item name_sep - -See L for details. - -=back - =head1 AUTHORS Matt S. Trout