X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI.pm;h=cdaac3097b262665917a9cd3a336ceb244ab0977;hb=0e773352a;hp=17cb7699219b44d657c598dda4b889d827065b27;hpb=e9657379908899b73ff92948a4cd19b6f875e10f;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index 17cb769..cdaac30 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -7,20 +7,32 @@ use warnings; use base qw/DBIx::Class::Storage::DBIHacks DBIx::Class::Storage/; use mro 'c3'; -use Carp::Clan qw/^DBIx::Class/; -use DBI; -use DBIx::Class::Storage::DBI::Cursor; -use DBIx::Class::Storage::Statistics; +use DBIx::Class::Carp; +use DBIx::Class::Exception; use Scalar::Util qw/refaddr weaken reftype blessed/; -use Data::Dumper::Concise 'Dumper'; +use List::Util qw/first/; use Sub::Name 'subname'; use Try::Tiny; -use File::Path 'mkpath'; +use overload (); use namespace::clean; +# default cursor class, overridable in connect_info attributes +__PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor'); + +__PACKAGE__->mk_group_accessors('inherited' => qw/ + sql_limit_dialect sql_quote_char sql_name_sep +/); + +__PACKAGE__->mk_group_accessors('component_class' => qw/sql_maker_class datetime_parser_type/); + +__PACKAGE__->sql_maker_class('DBIx::Class::SQLMaker'); +__PACKAGE__->datetime_parser_type('DateTime::Format::MySQL'); # historic default + +__PACKAGE__->sql_name_sep('.'); + __PACKAGE__->mk_group_accessors('simple' => qw/ _connect_info _dbi_connect_info _dbic_connect_attributes _driver_determined - _dbh _server_info_hash _conn_pid _conn_tid _sql_maker _sql_maker_opts + _dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts transaction_depth _dbh_autocommit savepoints /); @@ -33,17 +45,40 @@ my @storage_options = qw/ __PACKAGE__->mk_group_accessors('simple' => @storage_options); -# default cursor class, overridable in connect_info attributes -__PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor'); - -__PACKAGE__->mk_group_accessors('inherited' => qw/ - sql_maker_class - _supports_insert_returning +# capability definitions, using a 2-tiered accessor system +# The rationale is: +# +# A driver/user may define _use_X, which blindly without any checks says: +# "(do not) use this capability", (use_dbms_capability is an "inherited" +# type accessor) +# +# If _use_X is undef, _supports_X is then queried. This is a "simple" style +# accessor, which in turn calls _determine_supports_X, and stores the return +# in a special slot on the storage object, which is wiped every time a $dbh +# reconnection takes place (it is not guaranteed that upon reconnection we +# will get the same rdbms version). _determine_supports_X does not need to +# exist on a driver, as we ->can for it before calling. + +my @capabilities = (qw/ + insert_returning + insert_returning_bound + placeholders + typeless_placeholders + join_optimizer /); -__PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks'); +__PACKAGE__->mk_group_accessors( dbms_capability => map { "_supports_$_" } @capabilities ); +__PACKAGE__->mk_group_accessors( use_dbms_capability => map { "_use_$_" } (@capabilities ) ); + +# on by default, not strictly a capability (pending rewrite) +__PACKAGE__->_use_join_optimizer (1); +sub _determine_supports_join_optimizer { 1 }; # Each of these methods need _determine_driver called before itself # in order to function reliably. This is a purely DRY optimization +# +# get_(use)_dbms_capability need to be called on the correct Storage +# class, as _use_X may be hardcoded class-wide, and _supports_X calls +# _determine_supports_X which obv. needs a correct driver as well my @rdbms_specific_methods = qw/ deployment_statements sqlt_type @@ -57,21 +92,42 @@ my @rdbms_specific_methods = qw/ delete select select_single + + get_use_dbms_capability + get_dbms_capability + + _server_info + _get_server_version /; for my $meth (@rdbms_specific_methods) { my $orig = __PACKAGE__->can ($meth) - or next; + or die "$meth is not a ::Storage::DBI method!"; no strict qw/refs/; no warnings qw/redefine/; *{__PACKAGE__ ."::$meth"} = subname $meth => sub { - if (not $_[0]->_driver_determined) { + if ( + # only fire when invoked on an instance, a valid class-based invocation + # would e.g. be setting a default for an inherited accessor + ref $_[0] + and + ! $_[0]->_driver_determined + and + ! $_[0]->{_in_determine_driver} + ) { $_[0]->_determine_driver; - goto $_[0]->can($meth); + + # This for some reason crashes and burns on perl 5.8.1 + # IFF the method ends up throwing an exception + #goto $_[0]->can ($meth); + + my $cref = $_[0]->can ($meth); + goto $cref; } - $orig->(@_); + + goto $orig; }; } @@ -113,6 +169,7 @@ sub new { $new->transaction_depth(0); $new->_sql_maker_opts({}); + $new->_dbh_details({}); $new->{savepoints} = []; $new->{_in_dbh_do} = 0; $new->{_dbh_gen} = 0; @@ -129,89 +186,68 @@ sub new { # of a fork()ed child to kill the parent's shared DBI handle, # *before perl reaches the DESTROY in this package* # Yes, it is ugly and effective. +# Additionally this registry is used by the CLONE method to +# make sure no handles are shared between threads { my %seek_and_destroy; sub _arm_global_destructor { my $self = shift; - my $key = Scalar::Util::refaddr ($self); + my $key = refaddr ($self); $seek_and_destroy{$key} = $self; - Scalar::Util::weaken ($seek_and_destroy{$key}); + weaken ($seek_and_destroy{$key}); } END { local $?; # just in case the DBI destructor changes it somehow # destroy just the object if not native to this process/thread - $_->_preserve_foreign_dbh for (grep + $_->_verify_pid for (grep { defined $_ } values %seek_and_destroy ); } -} - -sub DESTROY { - my $self = shift; - - # destroy just the object if not native to this process/thread - $self->_preserve_foreign_dbh; - # some databases need this to stop spewing warnings - if (my $dbh = $self->_dbh) { - try { - %{ $dbh->{CachedKids} } = (); - $dbh->disconnect; - }; + sub CLONE { + # As per DBI's recommendation, DBIC disconnects all handles as + # soon as possible (DBIC will reconnect only on demand from within + # the thread) + for (values %seek_and_destroy) { + next unless $_; + $_->{_dbh_gen}++; # so that existing cursors will drop as well + $_->_dbh(undef); + } } - - $self->_dbh(undef); } -sub _preserve_foreign_dbh { +sub DESTROY { my $self = shift; - return unless $self->_dbh; - - $self->_verify_tid; - - return unless $self->_dbh; - - $self->_verify_pid; + # some databases spew warnings on implicit disconnect + local $SIG{__WARN__} = sub {}; + $self->_dbh(undef); + # this op is necessary, since the very last perl runtime statement + # triggers a global destruction shootout, and the $SIG localization + # may very well be destroyed before perl actually gets to do the + # $dbh undef + 1; } # handle pid changes correctly - do not destroy parent's connection sub _verify_pid { my $self = shift; - return if ( defined $self->_conn_pid and $self->_conn_pid == $$ ); - - $self->_dbh->{InactiveDestroy} = 1; - $self->_dbh(undef); - $self->{_dbh_gen}++; - - return; -} - -# very similar to above, but seems to FAIL if I set InactiveDestroy -sub _verify_tid { - my $self = shift; - - if ( ! defined $self->_conn_tid ) { - return; # no threads - } - elsif ( $self->_conn_tid == threads->tid ) { - return; # same thread + my $pid = $self->_conn_pid; + if( defined $pid and $pid != $$ and my $dbh = $self->_dbh ) { + $dbh->{InactiveDestroy} = 1; + $self->{_dbh_gen}++; + $self->_dbh(undef); } - #$self->_dbh->{InactiveDestroy} = 1; # why does t/51threads.t fail...? - $self->_dbh(undef); - $self->{_dbh_gen}++; - return; } - =head2 connect_info This method is normally called by L, which @@ -422,14 +458,19 @@ statement handles via L. =item limit_dialect -Sets the limit dialect. This is useful for JDBC-bridge among others -where the remote SQL-dialect cannot be determined by the name of the -driver alone. See also L. +Sets a specific SQL::Abstract::Limit-style limit dialect, overriding the +default L setting of the storage (if any). For a list +of available limit dialects see L. + +=item quote_names + +When true automatically sets L and L to the characters +appropriate for your particular RDBMS. This option is preferred over specifying +L directly. =item quote_char -Specifies what characters to use to quote table and column names. If -you use this you will want to specify L as well. +Specifies what characters to use to quote table and column names. C expects either a single character, in which case is it is placed on either side of the table/column name, or an arrayref of length @@ -440,14 +481,9 @@ SQL Server you should use C<< quote_char => [qw/[ ]/] >>. =item name_sep -This only needs to be used in conjunction with C, and is used to +This parameter is only useful in conjunction with C, and is used to specify the character that separates elements (schemas, tables, columns) from -each other. In most cases this is simply a C<.>. - -The consequences of not supplying this value is that L -will assume DBIx::Class' uses of aliases to be complete column -names. The output will look like I<"me.name"> when it should actually -be I<"me"."name">. +each other. If unspecified it defaults to the most commonly used C<.>. =item unsafe @@ -500,7 +536,7 @@ L 'postgres', 'my_pg_password', { AutoCommit => 1 }, - { quote_char => q{"}, name_sep => q{.} }, + { quote_char => q{"} }, ] ); @@ -578,8 +614,18 @@ sub connect_info { my @args = @{ $info->{arguments} }; - $self->_dbi_connect_info([@args, - %attrs && !(ref $args[0] eq 'CODE') ? \%attrs : ()]); + if (keys %attrs and ref $args[0] ne 'CODE') { + carp + 'You provided explicit AutoCommit => 0 in your connection_info. ' + . 'This is almost universally a bad idea (see the footnotes of ' + . 'DBIx::Class::Storage::DBI for more info). If you still want to ' + . 'do this you can set $ENV{DBIC_UNSAFE_AUTOCOMMIT_OK} to disable ' + . 'this warning.' + if ! $attrs{AutoCommit} and ! $ENV{DBIC_UNSAFE_AUTOCOMMIT_OK}; + + push @args, \%attrs if keys %attrs; + } + $self->_dbi_connect_info(\@args); # FIXME - dirty: # save attributes them in a separate accessor so they are always @@ -639,7 +685,7 @@ sub _normalize_connect_info { delete @attrs{@storage_opts} if @storage_opts; my @sql_maker_opts = grep exists $attrs{$_}, - qw/limit_dialect quote_char name_sep/; + qw/limit_dialect quote_char name_sep quote_names/; @{ $info{sql_maker_options} }{@sql_maker_opts} = delete @attrs{@sql_maker_opts} if @sql_maker_opts; @@ -649,11 +695,12 @@ sub _normalize_connect_info { return \%info; } -sub _default_dbi_connect_attributes { - return { +sub _default_dbi_connect_attributes () { + +{ AutoCommit => 1, - RaiseError => 1, PrintError => 0, + RaiseError => 1, + ShowErrorStatement => 1, }; } @@ -751,7 +798,7 @@ sub dbh_do { # This is basically a blend of dbh_do above and DBIx::Class::Storage::txn_do. # It also informs dbh_do to bypass itself while under the direction of txn_do, -# via $self->{_in_dbh_do} (this saves some redundant eval and errorcheck, etc) +# via $self->{_in_dbh_do} (this saves some redundant eval and errorcheck, etc) sub txn_do { my $self = shift; my $coderef = shift; @@ -759,12 +806,10 @@ sub txn_do { ref $coderef eq 'CODE' or $self->throw_exception ('$coderef must be a CODE reference'); - return $coderef->(@_) if $self->{transaction_depth} && ! $self->auto_savepoint; - local $self->{_in_dbh_do} = 1; my @result; - my $want_array = wantarray; + my $want = wantarray; my $tried = 0; while(1) { @@ -774,26 +819,33 @@ sub txn_do { my $args = \@_; try { - $self->_get_dbh; - $self->txn_begin; - if($want_array) { + my $txn_start_depth = $self->transaction_depth; + if($want) { @result = $coderef->(@$args); } - elsif(defined $want_array) { + elsif(defined $want) { $result[0] = $coderef->(@$args); } else { $coderef->(@$args); } - $self->txn_commit; + + my $delta_txn = $txn_start_depth - $self->transaction_depth; + if ($delta_txn == 0) { + $self->txn_commit; + } + elsif ($delta_txn != 1) { + # an off-by-one would mean we fired a rollback + carp "Unexpected reduction of transaction depth by $delta_txn after execution of $coderef"; + } } catch { $exception = $_; }; - if(! defined $exception) { return $want_array ? @result : $result[0] } + if(! defined $exception) { return wantarray ? @result : $result[0] } - if($tried++ || $self->connected) { + if($self->transaction_depth > 1 || $tried++ || $self->connected) { my $rollback_exception; try { $self->txn_rollback } catch { $rollback_exception = shift }; if(defined $rollback_exception) { @@ -812,7 +864,7 @@ sub txn_do { # We were not connected, and was first try - reconnect and retry # via the while loop carp "Retrying $coderef after catching disconnected exception: $exception" - if $ENV{DBIC_DBIRETRY_DEBUG}; + if $ENV{DBIC_TXNRETRY_DEBUG}; $self->_populate_dbh; } } @@ -894,7 +946,7 @@ sub connected { sub _seems_connected { my $self = shift; - $self->_preserve_foreign_dbh; + $self->_verify_pid; my $dbh = $self->_dbh or return 0; @@ -942,28 +994,63 @@ sub dbh { # this is the internal "get dbh or connect (don't check)" method sub _get_dbh { my $self = shift; - $self->_preserve_foreign_dbh; + $self->_verify_pid; $self->_populate_dbh unless $self->_dbh; return $self->_dbh; } -sub _sql_maker_args { - my ($self) = @_; - - return ( - bindtype=>'columns', - array_datatypes => 1, - limit_dialect => $self->_get_dbh, - %{$self->_sql_maker_opts} - ); -} - sub sql_maker { my ($self) = @_; unless ($self->_sql_maker) { my $sql_maker_class = $self->sql_maker_class; - $self->ensure_class_loaded ($sql_maker_class); - $self->_sql_maker($sql_maker_class->new( $self->_sql_maker_args )); + + my %opts = %{$self->_sql_maker_opts||{}}; + my $dialect = + $opts{limit_dialect} + || + $self->sql_limit_dialect + || + do { + my $s_class = (ref $self) || $self; + carp ( + "Your storage class ($s_class) does not set sql_limit_dialect and you " + . 'have not supplied an explicit limit_dialect in your connection_info. ' + . 'DBIC will attempt to use the GenericSubQ dialect, which works on most ' + . 'databases but can be (and often is) painfully slow. ' + . "Please file an RT ticket against '$s_class' ." + ); + + 'GenericSubQ'; + } + ; + + my ($quote_char, $name_sep); + + if ($opts{quote_names}) { + $quote_char = (delete $opts{quote_char}) || $self->sql_quote_char || do { + my $s_class = (ref $self) || $self; + carp ( + "You requested 'quote_names' but your storage class ($s_class) does " + . 'not explicitly define a default sql_quote_char and you have not ' + . 'supplied a quote_char as part of your connection_info. DBIC will ' + .q{default to the ANSI SQL standard quote '"', which works most of } + . "the time. Please file an RT ticket against '$s_class'." + ); + + '"'; # RV + }; + + $name_sep = (delete $opts{name_sep}) || $self->sql_name_sep; + } + + $self->_sql_maker($sql_maker_class->new( + bindtype=>'columns', + array_datatypes => 1, + limit_dialect => $dialect, + ($quote_char ? (quote_char => $quote_char) : ()), + name_sep => ($name_sep || '.'), + %opts, + )); } return $self->_sql_maker; } @@ -977,11 +1064,11 @@ sub _populate_dbh { my @info = @{$self->_dbi_connect_info || []}; $self->_dbh(undef); # in case ->connected failed we might get sent here - $self->_server_info_hash (undef); + $self->_dbh_details({}); # reset everything we know + $self->_dbh($self->_connect(@info)); - $self->_conn_pid($$); - $self->_conn_tid(threads->tid) if $INC{'threads.pm'}; + $self->_conn_pid($$) if $^O ne 'MSWin32'; # on win32 these are in fact threads $self->_determine_driver; @@ -1002,17 +1089,57 @@ sub _run_connection_actions { $self->_do_connection_actions(connect_call_ => $_) for @actions; } + + +sub set_use_dbms_capability { + $_[0]->set_inherited ($_[1], $_[2]); +} + +sub get_use_dbms_capability { + my ($self, $capname) = @_; + + my $use = $self->get_inherited ($capname); + return defined $use + ? $use + : do { $capname =~ s/^_use_/_supports_/; $self->get_dbms_capability ($capname) } + ; +} + +sub set_dbms_capability { + $_[0]->_dbh_details->{capability}{$_[1]} = $_[2]; +} + +sub get_dbms_capability { + my ($self, $capname) = @_; + + my $cap = $self->_dbh_details->{capability}{$capname}; + + unless (defined $cap) { + if (my $meth = $self->can ("_determine$capname")) { + $cap = $self->$meth ? 1 : 0; + } + else { + $cap = 0; + } + + $self->set_dbms_capability ($capname, $cap); + } + + return $cap; +} + sub _server_info { my $self = shift; - unless ($self->_server_info_hash) { + my $info; + unless ($info = $self->_dbh_details->{info}) { - my %info; + $info = {}; my $server_version = try { $self->_get_server_version }; if (defined $server_version) { - $info{dbms_version} = $server_version; + $info->{dbms_version} = $server_version; my ($numeric_version) = $server_version =~ /^([\d\.]+)/; my @verparts = split (/\./, $numeric_version); @@ -1030,18 +1157,24 @@ sub _server_info { } push @use_parts, 0 while @use_parts < 3; - $info{normalized_dbms_version} = sprintf "%d.%03d%03d", @use_parts; + $info->{normalized_dbms_version} = sprintf "%d.%03d%03d", @use_parts; } } - $self->_server_info_hash(\%info); + $self->_dbh_details->{info} = $info; } - return $self->_server_info_hash + return $info; } sub _get_server_version { - shift->_get_dbh->get_info(18); + shift->_dbh_get_info(18); +} + +sub _dbh_get_info { + my ($self, $info) = @_; + + return try { $self->_get_dbh->get_info($info) } || undef; } sub _determine_driver { @@ -1085,6 +1218,8 @@ sub _determine_driver { $self->_driver_determined(1); + Class::C3->reinitialize() if DBIx::Class::_ENV_::OLD_MRO; + $self->_init; # run driver-specific initializations $self->_run_connection_actions @@ -1145,9 +1280,9 @@ sub _do_query { my $attrs = shift @do_args; my @bind = map { [ undef, $_ ] } @do_args; - $self->_query_start($sql, @bind); + $self->_query_start($sql, \@bind); $self->_get_dbh->do($sql, $attrs, @do_args); - $self->_query_end($sql, @bind); + $self->_query_end($sql, \@bind); } return $self; @@ -1168,10 +1303,11 @@ sub _connect { try { if(ref $info[0] eq 'CODE') { - $dbh = $info[0]->(); + $dbh = $info[0]->(); } else { - $dbh = DBI->connect(@info); + require DBI; + $dbh = DBI->connect(@info); } if (!$dbh) { @@ -1179,21 +1315,48 @@ sub _connect { } unless ($self->unsafe) { - my $weak_self = $self; - weaken $weak_self; - $dbh->{HandleError} = sub { + + $self->throw_exception( + 'Refusing clobbering of {HandleError} installed on externally supplied ' + ."DBI handle $dbh. Either remove the handler or use the 'unsafe' attribute." + ) if $dbh->{HandleError} and ref $dbh->{HandleError} ne '__DBIC__DBH__ERROR__HANDLER__'; + + # Default via _default_dbi_connect_attributes is 1, hence it was an explicit + # request, or an external handle. Complain and set anyway + unless ($dbh->{RaiseError}) { + carp( ref $info[0] eq 'CODE' + + ? "The 'RaiseError' of the externally supplied DBI handle is set to false. " + ."DBIx::Class will toggle it back to true, unless the 'unsafe' connect " + .'attribute has been supplied' + + : 'RaiseError => 0 supplied in your connection_info, without an explicit ' + .'unsafe => 1. Toggling RaiseError back to true' + ); + + $dbh->{RaiseError} = 1; + } + + # this odd anonymous coderef dereference is in fact really + # necessary to avoid the unwanted effect described in perl5 + # RT#75792 + sub { + my $weak_self = $_[0]; + weaken $weak_self; + + # the coderef is blessed so we can distinguish it from externally + # supplied handles (which must be preserved) + $_[1]->{HandleError} = bless sub { if ($weak_self) { $weak_self->throw_exception("DBI Exception: $_[0]"); } else { # the handler may be invoked by something totally out of # the scope of DBIC - croak ("DBI Exception: $_[0]"); + DBIx::Class::Exception->throw("DBI Exception (unhandled by DBIC, ::Schema GCed): $_[0]"); } - }; - $dbh->{ShowErrorStatement} = 1; - $dbh->{RaiseError} = 1; - $dbh->{PrintError} = 0; + }, '__DBIC__DBH__ERROR__HANDLER__'; + }->($self, $dbh); } } catch { @@ -1287,9 +1450,8 @@ sub svp_rollback { } sub _svp_generate_name { - my ($self) = @_; - - return 'savepoint_'.scalar(@{ $self->{'savepoints'} }); + my ($self) = @_; + return 'savepoint_'.scalar(@{ $self->{'savepoints'} }); } sub txn_begin { @@ -1297,9 +1459,18 @@ sub txn_begin { # this means we have not yet connected and do not know the AC status # (e.g. coderef $dbh) - $self->ensure_connected if (! defined $self->_dbh_autocommit); + if (! defined $self->_dbh_autocommit) { + $self->ensure_connected; + } + # otherwise re-connect on pid changes, so + # that the txn_depth is adjusted properly + # the lightweight _get_dbh is good enoug here + # (only superficial handle check, no pings) + else { + $self->_get_dbh; + } - if($self->{transaction_depth} == 0) { + if($self->transaction_depth == 0) { $self->debugobj->txn_begin() if $self->debug; $self->_dbh_begin_work; @@ -1327,7 +1498,10 @@ sub _dbh_begin_work { sub txn_commit { my $self = shift; - if ($self->{transaction_depth} == 1) { + if (! $self->_dbh) { + $self->throw_exception('cannot COMMIT on a disconnected handle'); + } + elsif ($self->{transaction_depth} == 1) { $self->debugobj->txn_commit() if ($self->debug); $self->_dbh_commit; @@ -1339,6 +1513,20 @@ sub txn_commit { $self->svp_release if $self->auto_savepoint; } + elsif (! $self->_dbh->FETCH('AutoCommit') ) { + + carp "Storage transaction_depth $self->{transaction_depth} does not match " + ."false AutoCommit of $self->{_dbh}, attempting COMMIT anyway"; + + $self->debugobj->txn_commit() + if ($self->debug); + $self->_dbh_commit; + $self->{transaction_depth} = 0 + if $self->_dbh_autocommit; + } + else { + $self->throw_exception( 'Refusing to commit without a started transaction' ); + } } sub _dbh_commit { @@ -1393,82 +1581,163 @@ sub _dbh_rollback { # easier to override in NoBindVars without duping the rest. It takes up # all of _execute's args, and emits $sql, @bind. sub _prep_for_execute { - my ($self, $op, $extra_bind, $ident, $args) = @_; + my ($self, $op, $ident, $args) = @_; - if( blessed $ident && $ident->isa("DBIx::Class::ResultSource") ) { - $ident = $ident->from(); - } + my ($sql, @bind) = $self->sql_maker->$op( + blessed($ident) ? $ident->from : $ident, + @$args, + ); - my ($sql, @bind) = $self->sql_maker->$op($ident, @$args); + my (@final_bind, $colinfos); + my $resolve_bindinfo = sub { + $colinfos ||= $self->_resolve_column_info($ident); + if (my $col = $_[1]->{dbic_colname}) { + $_[1]->{sqlt_datatype} ||= $colinfos->{$col}{data_type} + if $colinfos->{$col}{data_type}; + $_[1]->{sqlt_size} ||= $colinfos->{$col}{size} + if $colinfos->{$col}{size}; + } + $_[1]; + }; + + for my $e (@{$args->[2]{bind}||[]}, @bind) { + push @final_bind, [ do { + if (ref $e ne 'ARRAY') { + ({}, $e) + } + elsif (! defined $e->[0]) { + ({}, $e->[1]) + } + elsif (ref $e->[0] eq 'HASH') { + ( + (first { $e->[0]{$_} } qw/dbd_attrs sqlt_datatype/) ? $e->[0] : $self->$resolve_bindinfo($e->[0]), + $e->[1] + ) + } + elsif (ref $e->[0] eq 'SCALAR') { + ( { sqlt_datatype => ${$e->[0]} }, $e->[1] ) + } + else { + ( $self->$resolve_bindinfo({ dbic_colname => $e->[0] }), $e->[1] ) + } + }]; + } - unshift(@bind, - map { ref $_ eq 'ARRAY' ? $_ : [ '!!dummy', $_ ] } @$extra_bind) - if $extra_bind; - return ($sql, \@bind); + ($sql, \@final_bind); } +sub _format_for_trace { + #my ($self, $bind) = @_; -sub _fix_bind_params { - my ($self, @bind) = @_; + ### Turn @bind from something like this: + ### ( [ "artist", 1 ], [ \%attrs, 3 ] ) + ### to this: + ### ( "'1'", "'3'" ) - ### Turn @bind from something like this: - ### ( [ "artist", 1 ], [ "cdid", 1, 3 ] ) - ### to this: - ### ( "'1'", "'1'", "'3'" ) - return - map { - if ( defined( $_ && $_->[1] ) ) { - map { qq{'$_'}; } @{$_}[ 1 .. $#$_ ]; - } - else { q{'NULL'}; } - } @bind; + map { + defined( $_ && $_->[1] ) + ? qq{'$_->[1]'} + : q{NULL} + } @{$_[1] || []}; } sub _query_start { - my ( $self, $sql, @bind ) = @_; + my ( $self, $sql, $bind ) = @_; - if ( $self->debug ) { - @bind = $self->_fix_bind_params(@bind); - - $self->debugobj->query_start( $sql, @bind ); - } + $self->debugobj->query_start( $sql, $self->_format_for_trace($bind) ) + if $self->debug; } sub _query_end { - my ( $self, $sql, @bind ) = @_; + my ( $self, $sql, $bind ) = @_; - if ( $self->debug ) { - @bind = $self->_fix_bind_params(@bind); - $self->debugobj->query_end( $sql, @bind ); - } + $self->debugobj->query_end( $sql, $self->_format_for_trace($bind) ) + if $self->debug; } -sub _dbh_execute { - my ($self, $dbh, $op, $extra_bind, $ident, $bind_attributes, @args) = @_; +my $sba_compat; +sub _dbi_attrs_for_bind { + my ($self, $ident, $bind) = @_; - my ($sql, $bind) = $self->_prep_for_execute($op, $extra_bind, $ident, \@args); - - $self->_query_start( $sql, @$bind ); + if (! defined $sba_compat) { + $self->_determine_driver; + $sba_compat = $self->can('source_bind_attributes') == \&source_bind_attributes + ? 0 + : 1 + ; + } - my $sth = $self->sth($sql,$op); + my $sba_attrs; + if ($sba_compat) { + my $class = ref $self; + carp_unique ( + "The source_bind_attributes() override in $class relies on a deprecated codepath. " + .'You are strongly advised to switch your code to override bind_attribute_by_datatype() ' + .'instead. This legacy compat shim will also disappear some time before DBIC 0.09' + ); - my $placeholder_index = 1; + my $sba_attrs = $self->source_bind_attributes + } - foreach my $bound (@$bind) { - my $attributes = {}; - my($column_name, @data) = @$bound; + my @attrs; - if ($bind_attributes) { - $attributes = $bind_attributes->{$column_name} - if defined $bind_attributes->{$column_name}; + for (map { $_->[0] } @$bind) { + push @attrs, do { + if ($_->{dbd_attrs}) { + $_->{dbd_attrs} + } + elsif($_->{sqlt_datatype}) { + $self->bind_attribute_by_data_type($_->{sqlt_datatype}) || undef; + } + elsif ($sba_attrs and $_->{dbic_colname}) { + $sba_attrs->{$_->{dbic_colname}} || undef; + } + else { + undef; # always push something at this position + } } + } + + return \@attrs; +} - foreach my $data (@data) { - my $ref = ref $data; - $data = $ref && $ref ne 'ARRAY' ? ''.$data : $data; # stringify args (except arrayrefs) +sub _execute { + my ($self, $op, $ident, @args) = @_; + + my ($sql, $bind) = $self->_prep_for_execute($op, $ident, \@args); - $sth->bind_param($placeholder_index, $data, $attributes); - $placeholder_index++; + shift->dbh_do( # retry over disconnects + '_dbh_execute', + $sql, + $bind, + $self->_dbi_attrs_for_bind($ident, $bind) + ); +} + +sub _dbh_execute { + my ($self, undef, $sql, $bind, $bind_attrs) = @_; + + $self->_query_start( $sql, $bind ); + my $sth = $self->_sth($sql); + + for my $i (0 .. $#$bind) { + if (ref $bind->[$i][1] eq 'SCALAR') { # any scalarrefs are assumed to be bind_inouts + $sth->bind_param_inout( + $i + 1, # bind params counts are 1-based + $bind->[$i][1], + $bind->[$i][0]{dbd_size} || $self->_max_column_bytesize($bind->[$i][0]), # size + $bind_attrs->[$i], + ); + } + else { + $sth->bind_param( + $i + 1, + (ref $bind->[$i][1] and overload::Method($bind->[$i][1], '""')) + ? "$bind->[$i][1]" + : $bind->[$i][1] + , + $bind_attrs->[$i], + ); } } @@ -1478,70 +1747,85 @@ sub _dbh_execute { $sth->errstr || $sth->err || 'Unknown error: execute() returned false, but error flags were not set...' ) if !$rv; - $self->_query_end( $sql, @$bind ); + $self->_query_end( $sql, $bind ); return (wantarray ? ($rv, $sth, @$bind) : $rv); } -sub _execute { - my $self = shift; - $self->dbh_do('_dbh_execute', @_); # retry over disconnects -} - -sub _prefetch_insert_auto_nextvals { +sub _prefetch_autovalues { my ($self, $source, $to_insert) = @_; - my $upd = {}; - - foreach my $col ( $source->columns ) { - if ( !defined $to_insert->{$col} ) { - my $col_info = $source->column_info($col); - - if ( $col_info->{auto_nextval} ) { - $upd->{$col} = $to_insert->{$col} = $self->_sequence_fetch( - 'nextval', - $col_info->{sequence} ||= + my $colinfo = $source->columns_info; + + my %values; + for my $col (keys %$colinfo) { + if ( + $colinfo->{$col}{auto_nextval} + and + ( + ! exists $to_insert->{$col} + or + ref $to_insert->{$col} eq 'SCALAR' + ) + ) { + $values{$col} = $self->_sequence_fetch( + 'NEXTVAL', + ( $colinfo->{$col}{sequence} ||= $self->_dbh_get_autoinc_seq($self->_get_dbh, $source, $col) - ); - } + ), + ); } } - return $upd; + \%values; } sub insert { - my $self = shift; - my ($source, $to_insert, $opts) = @_; + my ($self, $source, $to_insert) = @_; + + my $prefetched_values = $self->_prefetch_autovalues($source, $to_insert); + + # fuse the values + $to_insert = { %$to_insert, %$prefetched_values }; + + # list of primary keys we try to fetch from the database + # both not-exsists and scalarrefs are considered + my %fetch_pks; + for ($source->primary_columns) { + $fetch_pks{$_} = scalar keys %fetch_pks # so we can preserve order for prettyness + if ! exists $to_insert->{$_} or ref $to_insert->{$_} eq 'SCALAR'; + } - my $updated_cols = $self->_prefetch_insert_auto_nextvals (@_); + my ($sqla_opts, @ir_container); + if ($self->_use_insert_returning) { - my $bind_attributes = $self->source_bind_attributes($source); + # retain order as declared in the resultsource + for (sort { $fetch_pks{$a} <=> $fetch_pks{$b} } keys %fetch_pks ) { + push @{$sqla_opts->{returning}}, $_; + $sqla_opts->{returning_container} = \@ir_container + if $self->_use_insert_returning_bound; + } + } - my ($rv, $sth) = $self->_execute('insert' => [], $source, $bind_attributes, $to_insert, $opts); + my ($rv, $sth) = $self->_execute('insert', $source, $to_insert, $sqla_opts); - if ($opts->{returning}) { - my @ret_cols = @{$opts->{returning}}; + my %returned_cols; - my @ret_vals = try { + if (my $retlist = $sqla_opts->{returning}) { + @ir_container = try { local $SIG{__WARN__} = sub {}; my @r = $sth->fetchrow_array; $sth->finish; @r; - }; - - my %ret; - @ret{@ret_cols} = @ret_vals if (@ret_vals); + } unless @ir_container; - $updated_cols = { - %$updated_cols, - %ret, - }; + @returned_cols{@$retlist} = @ir_container if @ir_container; } - return $updated_cols; + return { %$prefetched_values, %returned_cols }; } + ## Currently it is assumed that all values passed will be "normal", i.e. not ## scalar refs, or at least, all the same type as the first set, the statement is ## only prepped once. @@ -1565,10 +1849,11 @@ sub insert_bulk { $msg, $cols->[$col_idx], do { + require Data::Dumper::Concise; local $Data::Dumper::Maxdepth = 1; # don't dump objects, if any - Dumper { + Data::Dumper::Concise::Dumper ({ map { $cols->[$_] => $data->[$slice_idx][$_] } (0 .. $#$cols) - }, + }), } ); }; @@ -1608,17 +1893,17 @@ sub insert_bulk { } my ($sql, $bind) = $self->_prep_for_execute ( - 'insert', undef, $source, [\%colvalues] + 'insert', $source, [\%colvalues] ); - my @bind = @$bind; - my $empty_bind = 1 if (not @bind) && - (grep { ref $_ eq 'SCALAR' } values %colvalues) == @$cols; + if (! @$bind) { + # if the bindlist is empty - make sure all "values" are in fact + # literal scalarrefs. If not the case this means the storage ate + # them away (e.g. the NoBindVars component) and interpolated them + # directly into the SQL. This obviosly can't be good for multi-inserts - if ((not @bind) && (not $empty_bind)) { - $self->throw_exception( - 'Cannot insert_bulk without support for placeholders' - ); + $self->throw_exception('Cannot insert_bulk without support for placeholders') + if first { ref $_ ne 'SCALAR' } values %colvalues; } # neither _execute_array, nor _execute_inserts_with_no_binds are @@ -1626,24 +1911,24 @@ sub insert_bulk { # scope guard my $guard = $self->txn_scope_guard; - $self->_query_start( $sql, ['__BULK__'] ); - my $sth = $self->sth($sql); + $self->_query_start( $sql, @$bind ? [[undef => '__BULK_INSERT__' ]] : () ); + my $sth = $self->_sth($sql); my $rv = do { - if ($empty_bind) { - # bind_param_array doesn't work if there are no binds - $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data ); + if (@$bind) { + #@bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args + $self->_execute_array( $source, $sth, $bind, $cols, $data ); } else { -# @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args - $self->_execute_array( $source, $sth, \@bind, $cols, $data ); + # bind_param_array doesn't work if there are no binds + $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data ); } }; - $self->_query_end( $sql, ['__BULK__'] ); + $self->_query_end( $sql, @$bind ? [[ undef => '__BULK_INSERT__' ]] : () ); $guard->commit; - return (wantarray ? ($rv, $sth, @bind) : $rv); + return (wantarray ? ($rv, $sth, @$bind) : $rv); } sub _execute_array { @@ -1652,30 +1937,18 @@ sub _execute_array { ## This must be an arrayref, else nothing works! my $tuple_status = []; - ## Get the bind_attributes, if any exist - my $bind_attributes = $self->source_bind_attributes($source); + # $bind contains colnames as keys and dbic-col-index as values + my $bind_attrs = $self->_dbi_attrs_for_bind($source, $bind); - ## Bind the values and execute - my $placeholder_index = 1; - - foreach my $bound (@$bind) { - - my $attributes = {}; - my ($column_name, $data_index) = @$bound; - - if( $bind_attributes ) { - $attributes = $bind_attributes->{$column_name} - if defined $bind_attributes->{$column_name}; - } - - my @data = map { $_->[$data_index] } @$data; + # Bind the values by column slices + for my $i (0 .. $#$bind) { + my $dbic_data_index = $bind->[$i][1]; $sth->bind_param_array( - $placeholder_index, - [@data], - (%$attributes ? $attributes : ()), + $i+1, # DBI bind indexes are 1-based + [ map { $_->[$dbic_data_index] } @$data ], + defined $bind_attrs->[$i] ? $bind_attrs->[$i] : (), # some DBDs throw up when given an undef ); - $placeholder_index++; } my ($rv, $err); @@ -1684,19 +1957,23 @@ sub _execute_array { } catch { $err = shift; - } - finally { - # Statement must finish even if there was an exception. - try { - $sth->finish - } - catch { - $err = shift unless defined $err - }; }; - $err = $sth->errstr - if (! defined $err and $sth->err); + # Not all DBDs are create equal. Some throw on error, some return + # an undef $rv, and some set $sth->err - try whatever we can + $err = ($sth->errstr || 'UNKNOWN ERROR ($sth->errstr is unset)') if ( + ! defined $err + and + ( !defined $rv or $sth->err ) + ); + + # Statement must finish even if there was an exception. + try { + $sth->finish + } + catch { + $err = shift unless defined $err + }; if (defined $err) { my $i = 0; @@ -1705,9 +1982,10 @@ sub _execute_array { $self->throw_exception("Unexpected populate error: $err") if ($i > $#$tuple_status); + require Data::Dumper::Concise; $self->throw_exception(sprintf "%s for populate slice:\n%s", ($tuple_status->[$i][1] || $err), - Dumper { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) }, + Data::Dumper::Concise::Dumper( { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) } ), ); } @@ -1733,15 +2011,14 @@ sub _dbh_execute_inserts_with_no_binds { } catch { $err = shift; + }; + + # Make sure statement is finished even if there was an exception. + try { + $sth->finish } - finally { - # Make sure statement is finished even if there was an exception. - try { - $sth->finish - } - catch { - $err = shift unless defined $err; - }; + catch { + $err = shift unless defined $err; }; $self->throw_exception($err) if defined $err; @@ -1750,20 +2027,14 @@ sub _dbh_execute_inserts_with_no_binds { } sub update { - my ($self, $source, @args) = @_; - - my $bind_attrs = $self->source_bind_attributes($source); - - return $self->_execute('update' => [], $source, $bind_attrs, @args); + #my ($self, $source, @args) = @_; + shift->_execute('update', @_); } sub delete { - my ($self, $source, @args) = @_; - - my $bind_attrs = $self->source_bind_attributes($source); - - return $self->_execute('delete' => [], $source, $bind_attrs, @args); + #my ($self, $source, @args) = @_; + shift->_execute('delete', @_); } # We were sent here because the $rs contains a complex search @@ -1871,17 +2142,17 @@ sub _select { sub _select_args_to_query { my $self = shift; - # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $rs_attrs, $rows, $offset) + # my ($op, $ident, $select, $cond, $rs_attrs, $rows, $offset) # = $self->_select_args($ident, $select, $cond, $attrs); - my ($op, $bind, $ident, $bind_attrs, @args) = + my ($op, $ident, @args) = $self->_select_args(@_); - # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $rs_attrs, $rows, $offset ]); - my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, \@args); + # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $ident, [ $select, $cond, $rs_attrs, $rows, $offset ]); + my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $ident, \@args); $prepared_bind ||= []; return wantarray - ? ($sql, $prepared_bind, $bind_attrs) + ? ($sql, $prepared_bind) : \[ "($sql)", @$prepared_bind ] ; } @@ -1898,44 +2169,23 @@ sub _select_args { from => $ident, where => $where, $rs_alias && $alias2source->{$rs_alias} - ? ( _rsroot_source_handle => $alias2source->{$rs_alias}->handle ) + ? ( _rsroot_rsrc => $alias2source->{$rs_alias} ) : () , }; - # calculate bind_attrs before possible $ident mangling - my $bind_attrs = {}; - for my $alias (keys %$alias2source) { - my $bindtypes = $self->source_bind_attributes ($alias2source->{$alias}) || {}; - for my $col (keys %$bindtypes) { - - my $fqcn = join ('.', $alias, $col); - $bind_attrs->{$fqcn} = $bindtypes->{$col} if $bindtypes->{$col}; - - # Unqialified column names are nice, but at the same time can be - # rather ambiguous. What we do here is basically go along with - # the loop, adding an unqualified column slot to $bind_attrs, - # alongside the fully qualified name. As soon as we encounter - # another column by that name (which would imply another table) - # we unset the unqualified slot and never add any info to it - # to avoid erroneous type binding. If this happens the users - # only choice will be to fully qualify his column name - - if (exists $bind_attrs->{$col}) { - $bind_attrs->{$col} = {}; - } - else { - $bind_attrs->{$col} = $bind_attrs->{$fqcn}; - } - } + # Sanity check the attributes (SQLMaker does it too, but + # in case of a software_limit we'll never reach there) + if (defined $attrs->{offset}) { + $self->throw_exception('A supplied offset attribute must be a non-negative integer') + if ( $attrs->{offset} =~ /\D/ or $attrs->{offset} < 0 ); } - # adjust limits if (defined $attrs->{rows}) { - $self->throw_exception("rows attribute must be positive if present") - unless $attrs->{rows} > 0; + $self->throw_exception("The rows attribute must be a positive integer if present") + if ( $attrs->{rows} =~ /\D/ or $attrs->{rows} <= 0 ); } - elsif (defined $attrs->{offset}) { + elsif ($attrs->{offset}) { # MySQL actually recommends this approach. I cringe. $attrs->{rows} = $sql_maker->__max_int; } @@ -1953,16 +2203,17 @@ sub _select_args { && @{$attrs->{group_by}} && - $attrs->{_prefetch_select} - && - @{$attrs->{_prefetch_select}} + $attrs->{_prefetch_selector_range} ) ) { ($ident, $select, $where, $attrs) = $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs); } elsif (! $attrs->{software_limit} ) { - push @limit, $attrs->{rows}, $attrs->{offset}; + push @limit, ( + $attrs->{rows} || (), + $attrs->{offset} || (), + ); } # try to simplify the joinmap further (prune unreferenced type-single joins) @@ -1978,7 +2229,7 @@ sub _select_args { # invoked, and that's just bad... ### - return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $attrs, @limit); + return ('select', $ident, $select, $where, $attrs, @limit); } # Returns a counting SELECT for a simple count @@ -1990,19 +2241,13 @@ sub _count_select { return { count => '*' }; } - sub source_bind_attributes { - my ($self, $source) = @_; - - my $bind_attributes; - foreach my $column ($source->columns) { - - my $data_type = $source->column_info($column)->{data_type} || ''; - $bind_attributes->{$column} = $self->bind_attribute_by_data_type($data_type) - if $data_type; - } - - return $bind_attributes; + shift->throw_exception( + 'source_bind_attributes() was never meant to be a callable public method - ' + .'please contact the DBIC dev-team and describe your use case so that a reasonable ' + .'solution can be provided' + ."\nhttp://search.cpan.org/dist/DBIx-Class/lib/DBIx/Class.pm#GETTING_HELP/SUPPORT" + ); } =head2 select @@ -2036,6 +2281,13 @@ sub select_single { return @row; } +=head2 sql_limit_dialect + +This is an accessor for the default SQL limit dialect used by a particular +storage driver. Can be overridden by supplying an explicit L +to L. For a list of available limit dialects +see L. + =head2 sth =over 4 @@ -2058,12 +2310,28 @@ sub _dbh_sth { # XXX You would think RaiseError would make this impossible, # but apparently that's not true :( - $self->throw_exception($dbh->errstr) if !$sth; + $self->throw_exception( + $dbh->errstr + || + sprintf( "\$dbh->prepare() of '%s' through %s failed *silently* without " + .'an exception and/or setting $dbh->errstr', + length ($sql) > 20 + ? substr($sql, 0, 20) . '...' + : $sql + , + 'DBD::' . $dbh->{Driver}{Name}, + ) + ) if !$sth; $sth; } sub sth { + carp_unique 'sth was mistakenly marked/documented as public, stop calling it (will be removed before DBIC v0.09)'; + shift->_sth(@_); +} + +sub _sth { my ($self, $sql) = @_; $self->dbh_do('_dbh_sth', $sql); # retry over disconnects } @@ -2187,7 +2455,7 @@ sub _native_data_type { } # Check if placeholders are supported at all -sub _placeholders_supported { +sub _determine_supports_placeholders { my $self = shift; my $dbh = $self->_get_dbh; @@ -2206,7 +2474,7 @@ sub _placeholders_supported { # Check if placeholders bound to non-string types throw exceptions # -sub _typeless_placeholders_supported { +sub _determine_supports_typeless_placeholders { my $self = shift; my $dbh = $self->_get_dbh; @@ -2257,11 +2525,11 @@ be performed instead of the usual C. =cut sub is_datatype_numeric { - my ($self, $dt) = @_; + #my ($self, $dt) = @_; - return 0 unless $dt; + return 0 unless $_[1]; - return $dt =~ /^ (?: + $_[1] =~ /^ (?: numeric | int(?:eger)? | (?:tiny|small|medium|big)int | dec(?:imal)? | real | float | double (?: \s+ precision)? | (?:big)?serial ) $/ix; } @@ -2331,8 +2599,13 @@ sub create_ddl_dir { carp "No directory given, using ./\n"; $dir = './'; } else { - -d $dir or mkpath $dir - or $self->throw_exception("create_ddl_dir: $! creating dir '$dir'"); + -d $dir + or + (require File::Path and File::Path::make_path ("$dir")) # make_path does not like objects (i.e. Path::Class::Dir) + or + $self->throw_exception( + "Failed to create '$dir': " . ($! || $@ || 'error unknown') + ); } $self->throw_exception ("Directory '$dir' does not exist\n") unless(-d $dir); @@ -2486,6 +2759,7 @@ sub deployment_statements { my $filename = $schema->ddl_filename($type, $version, $dir); if(-f $filename) { + # FIXME replace this block when a proper sane sql parser is available my $file; open($file, "<$filename") or $self->throw_exception("Can't open $filename ($!)"); @@ -2511,8 +2785,7 @@ sub deployment_statements { ); my @ret; - my $wa = wantarray; - if ($wa) { + if (wantarray) { @ret = $tr->translate; } else { @@ -2522,15 +2795,17 @@ sub deployment_statements { $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error) unless (@ret && defined $ret[0]); - return $wa ? @ret : $ret[0]; + return wantarray ? @ret : $ret[0]; } +# FIXME deploy() currently does not accurately report sql errors +# Will always return true while errors are warned sub deploy { my ($self, $schema, $type, $sqltargs, $dir) = @_; my $deploy = sub { my $line = shift; - return if($line =~ /^--/); return if(!$line); + return if($line =~ /^--/); # next if($line =~ /^DROP/m); return if($line =~ /^BEGIN TRANSACTION/m); return if($line =~ /^COMMIT/m); @@ -2552,7 +2827,8 @@ sub deploy { } } elsif (@statements == 1) { - foreach my $line ( split(";\n", $statements[0])) { + # split on single line comments and end of statements + foreach my $line ( split(/\s*--.*\n|;\n/, $statements[0])) { $deploy->( $line ); } } @@ -2573,12 +2849,7 @@ sub datetime_parser { =head2 datetime_parser_type -Defines (returns) the datetime parser class - currently hardwired to -L - -=cut - -sub datetime_parser_type { "DateTime::Format::MySQL"; } +Defines the datetime parser class - currently defaults to L =head2 build_datetime_parser @@ -2589,7 +2860,6 @@ See L sub build_datetime_parser { my $self = shift; my $type = $self->datetime_parser_type(@_); - $self->ensure_class_loaded ($type); return $type; } @@ -2647,6 +2917,75 @@ sub relname_to_table_alias { return $alias; } +# The size in bytes to use for DBI's ->bind_param_inout, this is the generic +# version and it may be necessary to amend or override it for a specific storage +# if such binds are necessary. +sub _max_column_bytesize { + my ($self, $attr) = @_; + + my $max_size; + + if ($attr->{sqlt_datatype}) { + my $data_type = lc($attr->{sqlt_datatype}); + + if ($attr->{sqlt_size}) { + + # String/sized-binary types + if ($data_type =~ /^(?: + l? (?:var)? char(?:acter)? (?:\s*varying)? + | + (?:var)? binary (?:\s*varying)? + | + raw + )\b/x + ) { + $max_size = $attr->{sqlt_size}; + } + # Other charset/unicode types, assume scale of 4 + elsif ($data_type =~ /^(?: + national \s* character (?:\s*varying)? + | + nchar + | + univarchar + | + nvarchar + )\b/x + ) { + $max_size = $attr->{sqlt_size} * 4; + } + } + + if (!$max_size and !$self->_is_lob_type($data_type)) { + $max_size = 100 # for all other (numeric?) datatypes + } + } + + $max_size || $self->_dbic_connect_attributes->{LongReadLen} || $self->_get_dbh->{LongReadLen} || 8000; +} + +# Determine if a data_type is some type of BLOB +sub _is_lob_type { + my ($self, $data_type) = @_; + $data_type && ($data_type =~ /lob|bfile|text|image|bytea|memo/i + || $data_type =~ /^long(?:\s+(?:raw|bit\s*varying|varbit|binary + |varchar|character\s*varying|nvarchar + |national\s*character\s*varying))?\z/xi); +} + +sub _is_binary_lob_type { + my ($self, $data_type) = @_; + $data_type && ($data_type =~ /blob|bfile|image|bytea/i + || $data_type =~ /^long(?:\s+(?:raw|bit\s*varying|varbit|binary))?\z/xi); +} + +sub _is_text_lob_type { + my ($self, $data_type) = @_; + $data_type && ($data_type =~ /^(?:clob|memo)\z/i + || $data_type =~ /^long(?:\s+(?:varchar|character\s*varying|nvarchar + |national\s*character\s*varying))\z/xi); +} + 1; =head1 USAGE NOTES