X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI.pm;h=906ed9784b7f0b40a88d32ae0594b2447fa63508;hb=9361b05d319e60314bf2caff1e96ff3c388a50bb;hp=99befacd9312b852bd0e5f921707cc4e9c6e112e;hpb=87bf71d54c0e5bd50700d8e3be59b79e9b670012;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index 99befac..906ed97 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -7,34 +7,32 @@ use warnings; use base qw/DBIx::Class::Storage::DBIHacks DBIx::Class::Storage/; use mro 'c3'; -use Carp::Clan qw/^DBIx::Class|^Try::Tiny/; -use DBI; -use DBIx::Class::Storage::DBI::Cursor; +use DBIx::Class::Carp; +use DBIx::Class::Exception; use Scalar::Util qw/refaddr weaken reftype blessed/; use List::Util qw/first/; -use Data::Dumper::Concise 'Dumper'; use Sub::Name 'subname'; use Try::Tiny; -use File::Path 'make_path'; use overload (); use namespace::clean; - # default cursor class, overridable in connect_info attributes __PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor'); __PACKAGE__->mk_group_accessors('inherited' => qw/ - sql_maker_class sql_limit_dialect sql_quote_char sql_name_sep + sql_limit_dialect sql_quote_char sql_name_sep /); -__PACKAGE__->sql_name_sep('.'); +__PACKAGE__->mk_group_accessors('component_class' => qw/sql_maker_class datetime_parser_type/); __PACKAGE__->sql_maker_class('DBIx::Class::SQLMaker'); +__PACKAGE__->datetime_parser_type('DateTime::Format::MySQL'); # historic default + +__PACKAGE__->sql_name_sep('.'); __PACKAGE__->mk_group_accessors('simple' => qw/ _connect_info _dbi_connect_info _dbic_connect_attributes _driver_determined - _dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts - transaction_depth _dbh_autocommit savepoints + _dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts _dbh_autocommit /); # the values for these accessors are picked out (and deleted) from @@ -87,12 +85,14 @@ my @rdbms_specific_methods = qw/ build_datetime_parser datetime_parser_type + txn_begin insert insert_bulk update delete select select_single + with_deferred_fk_checks get_use_dbms_capability get_dbms_capability @@ -109,7 +109,15 @@ for my $meth (@rdbms_specific_methods) { no strict qw/refs/; no warnings qw/redefine/; *{__PACKAGE__ ."::$meth"} = subname $meth => sub { - if (not $_[0]->_driver_determined and not $_[0]->{_in_determine_driver}) { + if ( + # only fire when invoked on an instance, a valid class-based invocation + # would e.g. be setting a default for an inherited accessor + ref $_[0] + and + ! $_[0]->_driver_determined + and + ! $_[0]->{_in_determine_driver} + ) { $_[0]->_determine_driver; # This for some reason crashes and burns on perl 5.8.1 @@ -119,11 +127,11 @@ for my $meth (@rdbms_specific_methods) { my $cref = $_[0]->can ($meth); goto $cref; } + goto $orig; }; } - =head1 NAME DBIx::Class::Storage::DBI - DBI storage handler @@ -159,11 +167,9 @@ documents DBI-specific methods and behaviors. sub new { my $new = shift->next::method(@_); - $new->transaction_depth(0); $new->_sql_maker_opts({}); $new->_dbh_details({}); - $new->{savepoints} = []; - $new->{_in_dbh_do} = 0; + $new->{_in_do_block} = 0; $new->{_dbh_gen} = 0; # read below to see what this does @@ -208,6 +214,9 @@ sub new { next unless $_; $_->{_dbh_gen}++; # so that existing cursors will drop as well $_->_dbh(undef); + + $_->transaction_depth(0); + $_->savepoints([]); } } } @@ -235,6 +244,8 @@ sub _verify_pid { $dbh->{InactiveDestroy} = 1; $self->{_dbh_gen}++; $self->_dbh(undef); + $self->transaction_depth(0); + $self->savepoints([]); } return; @@ -314,8 +325,8 @@ for most DBDs. See L for details. =head3 DBIx::Class specific connection attributes -In addition to the standard L -L attributes, DBIx::Class recognizes +In addition to the standard L +L attributes, DBIx::Class recognizes the following connection options. These options can be mixed in with your other L connection attributes, or placed in a separate hashref (C<\%extra_attributes>) as shown above. @@ -767,98 +778,33 @@ sub dbh_do { my $dbh = $self->_get_dbh; return $self->$code($dbh, @_) - if ( $self->{_in_dbh_do} || $self->{transaction_depth} ); + if ( $self->{_in_do_block} || $self->{transaction_depth} ); - local $self->{_in_dbh_do} = 1; + local $self->{_in_do_block} = 1; # take a ref instead of a copy, to preserve coderef @_ aliasing semantics my $args = \@_; - return try { + + try { $self->$code ($dbh, @$args); } catch { $self->throw_exception($_) if $self->connected; # We were not connected - reconnect and retry, but let any # exception fall right through this time - carp "Retrying $code after catching disconnected exception: $_" - if $ENV{DBIC_DBIRETRY_DEBUG}; + carp "Retrying dbh_do($code) after catching disconnected exception: $_" + if $ENV{DBIC_STORAGE_RETRY_DEBUG}; $self->_populate_dbh; $self->$code($self->_dbh, @$args); }; } -# This is basically a blend of dbh_do above and DBIx::Class::Storage::txn_do. -# It also informs dbh_do to bypass itself while under the direction of txn_do, -# via $self->{_in_dbh_do} (this saves some redundant eval and errorcheck, etc) sub txn_do { - my $self = shift; - my $coderef = shift; - - ref $coderef eq 'CODE' or $self->throw_exception - ('$coderef must be a CODE reference'); - - local $self->{_in_dbh_do} = 1; - - my @result; - my $want = wantarray; - - my $tried = 0; - while(1) { - my $exception; - - # take a ref instead of a copy, to preserve coderef @_ aliasing semantics - my $args = \@_; - - try { - $self->txn_begin; - my $txn_start_depth = $self->transaction_depth; - if($want) { - @result = $coderef->(@$args); - } - elsif(defined $want) { - $result[0] = $coderef->(@$args); - } - else { - $coderef->(@$args); - } - - my $delta_txn = $txn_start_depth - $self->transaction_depth; - if ($delta_txn == 0) { - $self->txn_commit; - } - elsif ($delta_txn != 1) { - # an off-by-one would mean we fired a rollback - carp "Unexpected reduction of transaction depth by $delta_txn after execution of $coderef"; - } - } catch { - $exception = $_; - }; - - if(! defined $exception) { return wantarray ? @result : $result[0] } - - if($self->transaction_depth > 1 || $tried++ || $self->connected) { - my $rollback_exception; - try { $self->txn_rollback } catch { $rollback_exception = shift }; - if(defined $rollback_exception) { - my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION"; - $self->throw_exception($exception) # propagate nested rollback - if $rollback_exception =~ /$exception_class/; - - $self->throw_exception( - "Transaction aborted: ${exception}. " - . "Rollback failed: ${rollback_exception}" - ); - } - $self->throw_exception($exception) - } - - # We were not connected, and was first try - reconnect and retry - # via the while loop - carp "Retrying $coderef after catching disconnected exception: $exception" - if $ENV{DBIC_TXNRETRY_DEBUG}; - $self->_populate_dbh; - } + # connects or reconnects on pid change, necessary to grab correct txn_depth + $_[0]->_get_dbh; + local $_[0]->{_in_do_block} = 1; + shift->next::method(@_); } =head2 disconnect @@ -879,7 +825,8 @@ sub disconnect { $self->_do_connection_actions(disconnect_call_ => $_) for @actions; - $self->_dbh_rollback unless $self->_dbh_autocommit; + # stops the "implicit rollback on disconnect" warning + $self->_exec_txn_rollback unless $self->_dbh_autocommit; %{ $self->_dbh->{CachedKids} } = (); $self->_dbh->disconnect; @@ -995,7 +942,6 @@ sub sql_maker { my ($self) = @_; unless ($self->_sql_maker) { my $sql_maker_class = $self->sql_maker_class; - $self->ensure_class_loaded ($sql_maker_class); my %opts = %{$self->_sql_maker_opts||{}}; my $dialect = @@ -1273,9 +1219,9 @@ sub _do_query { my $attrs = shift @do_args; my @bind = map { [ undef, $_ ] } @do_args; - $self->_query_start($sql, @bind); + $self->_query_start($sql, \@bind); $self->_get_dbh->do($sql, $attrs, @do_args); - $self->_query_end($sql, @bind); + $self->_query_end($sql, \@bind); } return $self; @@ -1296,10 +1242,11 @@ sub _connect { try { if(ref $info[0] eq 'CODE') { - $dbh = $info[0]->(); + $dbh = $info[0]->(); } else { - $dbh = DBI->connect(@info); + require DBI; + $dbh = DBI->connect(@info); } if (!$dbh) { @@ -1345,7 +1292,7 @@ sub _connect { else { # the handler may be invoked by something totally out of # the scope of DBIC - croak ("DBI Exception (unhandled by DBIC, ::Schema GCed): $_[0]"); + DBIx::Class::Exception->throw("DBI Exception (unhandled by DBIC, ::Schema GCed): $_[0]"); } }, '__DBIC__DBH__ERROR__HANDLER__'; }->($self, $dbh); @@ -1362,118 +1309,23 @@ sub _connect { $dbh; } -sub svp_begin { - my ($self, $name) = @_; - - $name = $self->_svp_generate_name - unless defined $name; - - $self->throw_exception ("You can't use savepoints outside a transaction") - if $self->{transaction_depth} == 0; - - $self->throw_exception ("Your Storage implementation doesn't support savepoints") - unless $self->can('_svp_begin'); - - push @{ $self->{savepoints} }, $name; - - $self->debugobj->svp_begin($name) if $self->debug; - - return $self->_svp_begin($name); -} - -sub svp_release { - my ($self, $name) = @_; - - $self->throw_exception ("You can't use savepoints outside a transaction") - if $self->{transaction_depth} == 0; - - $self->throw_exception ("Your Storage implementation doesn't support savepoints") - unless $self->can('_svp_release'); - - if (defined $name) { - $self->throw_exception ("Savepoint '$name' does not exist") - unless grep { $_ eq $name } @{ $self->{savepoints} }; - - # Dig through the stack until we find the one we are releasing. This keeps - # the stack up to date. - my $svp; - - do { $svp = pop @{ $self->{savepoints} } } while $svp ne $name; - } else { - $name = pop @{ $self->{savepoints} }; - } - - $self->debugobj->svp_release($name) if $self->debug; - - return $self->_svp_release($name); -} - -sub svp_rollback { - my ($self, $name) = @_; - - $self->throw_exception ("You can't use savepoints outside a transaction") - if $self->{transaction_depth} == 0; - - $self->throw_exception ("Your Storage implementation doesn't support savepoints") - unless $self->can('_svp_rollback'); - - if (defined $name) { - # If they passed us a name, verify that it exists in the stack - unless(grep({ $_ eq $name } @{ $self->{savepoints} })) { - $self->throw_exception("Savepoint '$name' does not exist!"); - } - - # Dig through the stack until we find the one we are releasing. This keeps - # the stack up to date. - while(my $s = pop(@{ $self->{savepoints} })) { - last if($s eq $name); - } - # Add the savepoint back to the stack, as a rollback doesn't remove the - # named savepoint, only everything after it. - push(@{ $self->{savepoints} }, $name); - } else { - # We'll assume they want to rollback to the last savepoint - $name = $self->{savepoints}->[-1]; - } - - $self->debugobj->svp_rollback($name) if $self->debug; - - return $self->_svp_rollback($name); -} - -sub _svp_generate_name { - my ($self) = @_; - return 'savepoint_'.scalar(@{ $self->{'savepoints'} }); -} - sub txn_begin { my $self = shift; # this means we have not yet connected and do not know the AC status - # (e.g. coderef $dbh) + # (e.g. coderef $dbh), need a full-fledged connection check if (! defined $self->_dbh_autocommit) { $self->ensure_connected; } - # otherwise re-connect on pid changes, so - # that the txn_depth is adjusted properly - # the lightweight _get_dbh is good enoug here - # (only superficial handle check, no pings) + # Otherwise simply connect or re-connect on pid changes else { $self->_get_dbh; } - if($self->transaction_depth == 0) { - $self->debugobj->txn_begin() - if $self->debug; - $self->_dbh_begin_work; - } - elsif ($self->auto_savepoint) { - $self->svp_begin; - } - $self->{transaction_depth}++; + $self->next::method(@_); } -sub _dbh_begin_work { +sub _exec_txn_begin { my $self = shift; # if the user is utilizing txn_do - good for him, otherwise we need to @@ -1481,7 +1333,7 @@ sub _dbh_begin_work { # We do this via ->dbh_do instead of ->dbh, so that the ->dbh "ping" # will be replaced by a failure of begin_work itself (which will be # then retried on reconnect) - if ($self->{_in_dbh_do}) { + if ($self->{_in_do_block}) { $self->_dbh->begin_work; } else { $self->dbh_do(sub { $_[1]->begin_work }); @@ -1490,176 +1342,259 @@ sub _dbh_begin_work { sub txn_commit { my $self = shift; - if (! $self->_dbh) { - $self->throw_exception('cannot COMMIT on a disconnected handle'); - } - elsif ($self->{transaction_depth} == 1) { - $self->debugobj->txn_commit() - if ($self->debug); - $self->_dbh_commit; - $self->{transaction_depth} = 0 - if $self->_dbh_autocommit; - } - elsif($self->{transaction_depth} > 1) { - $self->{transaction_depth}--; - $self->svp_release - if $self->auto_savepoint; - } - elsif (! $self->_dbh->FETCH('AutoCommit') ) { - carp "Storage transaction_depth $self->{transaction_depth} does not match " - ."false AutoCommit of $self->{_dbh}, attempting COMMIT anyway"; + $self->_verify_pid if $self->_dbh; + $self->throw_exception("Unable to txn_commit() on a disconnected storage") + unless $self->_dbh; - $self->debugobj->txn_commit() - if ($self->debug); - $self->_dbh_commit; - $self->{transaction_depth} = 0 - if $self->_dbh_autocommit; - } - else { - $self->throw_exception( 'Refusing to commit without a started transaction' ); + # esoteric case for folks using external $dbh handles + if (! $self->transaction_depth and ! $self->_dbh->FETCH('AutoCommit') ) { + carp "Storage transaction_depth 0 does not match " + ."false AutoCommit of $self->{_dbh}, attempting COMMIT anyway"; + $self->transaction_depth(1); } + + $self->next::method(@_); + + # if AutoCommit is disabled txn_depth never goes to 0 + # as a new txn is started immediately on commit + $self->transaction_depth(1) if ( + !$self->transaction_depth + and + defined $self->_dbh_autocommit + and + ! $self->_dbh_autocommit + ); } -sub _dbh_commit { - my $self = shift; - my $dbh = $self->_dbh - or $self->throw_exception('cannot COMMIT on a disconnected handle'); - $dbh->commit; +sub _exec_txn_commit { + shift->_dbh->commit; } sub txn_rollback { my $self = shift; - my $dbh = $self->_dbh; - try { - if ($self->{transaction_depth} == 1) { - $self->debugobj->txn_rollback() - if ($self->debug); - $self->{transaction_depth} = 0 - if $self->_dbh_autocommit; - $self->_dbh_rollback; - } - elsif($self->{transaction_depth} > 1) { - $self->{transaction_depth}--; - if ($self->auto_savepoint) { - $self->svp_rollback; - $self->svp_release; - } - } - else { - die DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION->new; - } + + $self->_verify_pid if $self->_dbh; + $self->throw_exception("Unable to txn_rollback() on a disconnected storage") + unless $self->_dbh; + + # esoteric case for folks using external $dbh handles + if (! $self->transaction_depth and ! $self->_dbh->FETCH('AutoCommit') ) { + carp "Storage transaction_depth 0 does not match " + ."false AutoCommit of $self->{_dbh}, attempting ROLLBACK anyway"; + $self->transaction_depth(1); } - catch { - my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION"; - if ($_ !~ /$exception_class/) { - # ensure that a failed rollback resets the transaction depth - $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1; - } + $self->next::method(@_); - $self->throw_exception($_) - }; + # if AutoCommit is disabled txn_depth never goes to 0 + # as a new txn is started immediately on commit + $self->transaction_depth(1) if ( + !$self->transaction_depth + and + defined $self->_dbh_autocommit + and + ! $self->_dbh_autocommit + ); } -sub _dbh_rollback { - my $self = shift; - my $dbh = $self->_dbh - or $self->throw_exception('cannot ROLLBACK on a disconnected handle'); - $dbh->rollback; +sub _exec_txn_rollback { + shift->_dbh->rollback; +} + +# generate some identical methods +for my $meth (qw/svp_begin svp_release svp_rollback/) { + no strict qw/refs/; + *{__PACKAGE__ ."::$meth"} = subname $meth => sub { + my $self = shift; + $self->_verify_pid if $self->_dbh; + $self->throw_exception("Unable to $meth() on a disconnected storage") + unless $self->_dbh; + $self->next::method(@_); + }; } # This used to be the top-half of _execute. It was split out to make it # easier to override in NoBindVars without duping the rest. It takes up # all of _execute's args, and emits $sql, @bind. sub _prep_for_execute { - my ($self, $op, $extra_bind, $ident, $args) = @_; + #my ($self, $op, $ident, $args) = @_; + return shift->_gen_sql_bind(@_) +} - if( blessed $ident && $ident->isa("DBIx::Class::ResultSource") ) { - $ident = $ident->from(); +sub _gen_sql_bind { + my ($self, $op, $ident, $args) = @_; + + my ($sql, @bind) = $self->sql_maker->$op( + blessed($ident) ? $ident->from : $ident, + @$args, + ); + + my (@final_bind, $colinfos); + my $resolve_bindinfo = sub { + $colinfos ||= $self->_resolve_column_info($ident); + if (my $col = $_[1]->{dbic_colname}) { + $_[1]->{sqlt_datatype} ||= $colinfos->{$col}{data_type} + if $colinfos->{$col}{data_type}; + $_[1]->{sqlt_size} ||= $colinfos->{$col}{size} + if $colinfos->{$col}{size}; + } + $_[1]; + }; + + for my $e (@{$args->[2]{bind}||[]}, @bind) { + push @final_bind, [ do { + if (ref $e ne 'ARRAY') { + ({}, $e) + } + elsif (! defined $e->[0]) { + ({}, $e->[1]) + } + elsif (ref $e->[0] eq 'HASH') { + ( + (first { $e->[0]{$_} } qw/dbd_attrs sqlt_datatype/) ? $e->[0] : $self->$resolve_bindinfo($e->[0]), + $e->[1] + ) + } + elsif (ref $e->[0] eq 'SCALAR') { + ( { sqlt_datatype => ${$e->[0]} }, $e->[1] ) + } + else { + ( $self->$resolve_bindinfo({ dbic_colname => $e->[0] }), $e->[1] ) + } + }]; } - my ($sql, @bind) = $self->sql_maker->$op($ident, @$args); + if ($op eq 'select' + && first { blessed($_->[1]) && $_->[1]->isa('DateTime') } @final_bind) { - unshift(@bind, - map { ref $_ eq 'ARRAY' ? $_ : [ '!!dummy', $_ ] } @$extra_bind) - if $extra_bind; - return ($sql, \@bind); + carp_unique 'DateTime objects passed to search() are not supported ' + . 'properly (InflateColumn::DateTime formats and settings are not ' + . 'respected.) See "Formatting DateTime objects in queries" in ' + . 'DBIx::Class::Manual::Cookbook'; + } + + ($sql, \@final_bind); } +sub _format_for_trace { + #my ($self, $bind) = @_; -sub _fix_bind_params { - my ($self, @bind) = @_; + ### Turn @bind from something like this: + ### ( [ "artist", 1 ], [ \%attrs, 3 ] ) + ### to this: + ### ( "'1'", "'3'" ) - ### Turn @bind from something like this: - ### ( [ "artist", 1 ], [ "cdid", 1, 3 ] ) - ### to this: - ### ( "'1'", "'1'", "'3'" ) - return - map { - if ( defined( $_ && $_->[1] ) ) { - map { qq{'$_'}; } @{$_}[ 1 .. $#$_ ]; - } - else { q{NULL}; } - } @bind; + map { + defined( $_ && $_->[1] ) + ? qq{'$_->[1]'} + : q{NULL} + } @{$_[1] || []}; } sub _query_start { - my ( $self, $sql, @bind ) = @_; - - if ( $self->debug ) { - @bind = $self->_fix_bind_params(@bind); + my ( $self, $sql, $bind ) = @_; - $self->debugobj->query_start( $sql, @bind ); - } + $self->debugobj->query_start( $sql, $self->_format_for_trace($bind) ) + if $self->debug; } sub _query_end { - my ( $self, $sql, @bind ) = @_; + my ( $self, $sql, $bind ) = @_; - if ( $self->debug ) { - @bind = $self->_fix_bind_params(@bind); - $self->debugobj->query_end( $sql, @bind ); - } + $self->debugobj->query_end( $sql, $self->_format_for_trace($bind) ) + if $self->debug; } -sub _dbh_execute { - my ($self, $dbh, $op, $extra_bind, $ident, $bind_attributes, @args) = @_; - - my ($sql, $bind) = $self->_prep_for_execute($op, $extra_bind, $ident, \@args); +my $sba_compat; +sub _dbi_attrs_for_bind { + my ($self, $ident, $bind) = @_; - $self->_query_start( $sql, @$bind ); + if (! defined $sba_compat) { + $self->_determine_driver; + $sba_compat = $self->can('source_bind_attributes') == \&source_bind_attributes + ? 0 + : 1 + ; + } - my $sth = $self->sth($sql,$op); + my $sba_attrs; + if ($sba_compat) { + my $class = ref $self; + carp_unique ( + "The source_bind_attributes() override in $class relies on a deprecated codepath. " + .'You are strongly advised to switch your code to override bind_attribute_by_datatype() ' + .'instead. This legacy compat shim will also disappear some time before DBIC 0.09' + ); - my $placeholder_index = 1; + my $sba_attrs = $self->source_bind_attributes + } - foreach my $bound (@$bind) { - my $attributes = {}; - my($column_name, @data) = @$bound; + my @attrs; - if ($bind_attributes) { - $attributes = $bind_attributes->{$column_name} - if defined $bind_attributes->{$column_name}; + for (map { $_->[0] } @$bind) { + push @attrs, do { + if (exists $_->{dbd_attrs}) { + $_->{dbd_attrs} + } + elsif($_->{sqlt_datatype}) { + # cache the result in the dbh_details hash, as it can not change unless + # we connect to something else + my $cache = $self->_dbh_details->{_datatype_map_cache} ||= {}; + if (not exists $cache->{$_->{sqlt_datatype}}) { + $cache->{$_->{sqlt_datatype}} = $self->bind_attribute_by_data_type($_->{sqlt_datatype}) || undef; + } + $cache->{$_->{sqlt_datatype}}; + } + elsif ($sba_attrs and $_->{dbic_colname}) { + $sba_attrs->{$_->{dbic_colname}} || undef; + } + else { + undef; # always push something at this position + } } + } - foreach my $data (@data) { - my $ref = ref $data; + return \@attrs; +} - if ($ref and overload::Method($data, '""') ) { - $data = "$data"; - } - elsif ($ref eq 'SCALAR') { # any scalarrefs are assumed to be bind_inouts - $sth->bind_param_inout( - $placeholder_index++, - $data, - $self->_max_column_bytesize($ident, $column_name), - $attributes - ); - next; - } +sub _execute { + my ($self, $op, $ident, @args) = @_; + + my ($sql, $bind) = $self->_prep_for_execute($op, $ident, \@args); + + shift->dbh_do( # retry over disconnects + '_dbh_execute', + $sql, + $bind, + $self->_dbi_attrs_for_bind($ident, $bind) + ); +} - $sth->bind_param($placeholder_index++, $data, $attributes); +sub _dbh_execute { + my ($self, undef, $sql, $bind, $bind_attrs) = @_; + + $self->_query_start( $sql, $bind ); + my $sth = $self->_sth($sql); + + for my $i (0 .. $#$bind) { + if (ref $bind->[$i][1] eq 'SCALAR') { # any scalarrefs are assumed to be bind_inouts + $sth->bind_param_inout( + $i + 1, # bind params counts are 1-based + $bind->[$i][1], + $bind->[$i][0]{dbd_size} || $self->_max_column_bytesize($bind->[$i][0]), # size + $bind_attrs->[$i], + ); + } + else { + $sth->bind_param( + $i + 1, + (ref $bind->[$i][1] and overload::Method($bind->[$i][1], '""')) + ? "$bind->[$i][1]" + : $bind->[$i][1] + , + $bind_attrs->[$i], + ); } } @@ -1669,16 +1604,11 @@ sub _dbh_execute { $sth->errstr || $sth->err || 'Unknown error: execute() returned false, but error flags were not set...' ) if !$rv; - $self->_query_end( $sql, @$bind ); + $self->_query_end( $sql, $bind ); return (wantarray ? ($rv, $sth, @$bind) : $rv); } -sub _execute { - my $self = shift; - $self->dbh_do('_dbh_execute', @_); # retry over disconnects -} - sub _prefetch_autovalues { my ($self, $source, $to_insert) = @_; @@ -1693,6 +1623,8 @@ sub _prefetch_autovalues { ! exists $to_insert->{$col} or ref $to_insert->{$col} eq 'SCALAR' + or + (ref $to_insert->{$col} eq 'REF' and ref ${$to_insert->{$col}} eq 'ARRAY') ) ) { $values{$col} = $self->_sequence_fetch( @@ -1712,35 +1644,43 @@ sub insert { my $prefetched_values = $self->_prefetch_autovalues($source, $to_insert); - # fuse the values + # fuse the values, but keep a separate list of prefetched_values so that + # they can be fused once again with the final return $to_insert = { %$to_insert, %$prefetched_values }; - # list of primary keys we try to fetch from the database - # both not-exsists and scalarrefs are considered - my %fetch_pks; - for ($source->primary_columns) { - $fetch_pks{$_} = scalar keys %fetch_pks # so we can preserve order for prettyness - if ! exists $to_insert->{$_} or ref $to_insert->{$_} eq 'SCALAR'; - } + my $col_infos = $source->columns_info; + my %pcols = map { $_ => 1 } $source->primary_columns; + my %retrieve_cols; + for my $col ($source->columns) { + # nothing to retrieve when explicit values are supplied + next if (defined $to_insert->{$col} and ! ( + ref $to_insert->{$col} eq 'SCALAR' + or + (ref $to_insert->{$col} eq 'REF' and ref ${$to_insert->{$col}} eq 'ARRAY') + )); + + # the 'scalar keys' is a trick to preserve the ->columns declaration order + $retrieve_cols{$col} = scalar keys %retrieve_cols if ( + $pcols{$col} + or + $col_infos->{$col}{retrieve_on_insert} + ); + }; my ($sqla_opts, @ir_container); - if ($self->_use_insert_returning) { + if (%retrieve_cols and $self->_use_insert_returning) { + $sqla_opts->{returning_container} = \@ir_container + if $self->_use_insert_returning_bound; - # retain order as declared in the resultsource - for (sort { $fetch_pks{$a} <=> $fetch_pks{$b} } keys %fetch_pks ) { - push @{$sqla_opts->{returning}}, $_; - $sqla_opts->{returning_container} = \@ir_container - if $self->_use_insert_returning_bound; - } + $sqla_opts->{returning} = [ + sort { $retrieve_cols{$a} <=> $retrieve_cols{$b} } keys %retrieve_cols + ]; } - my $bind_attributes = $self->source_bind_attributes($source); - - my ($rv, $sth) = $self->_execute('insert' => [], $source, $bind_attributes, $to_insert, $sqla_opts); + my ($rv, $sth) = $self->_execute('insert', $source, $to_insert, $sqla_opts); - my %returned_cols; - - if (my $retlist = $sqla_opts->{returning}) { + my %returned_cols = %$to_insert; + if (my $retlist = $sqla_opts->{returning}) { # if IR is supported - we will get everything in one set @ir_container = try { local $SIG{__WARN__} = sub {}; my @r = $sth->fetchrow_array; @@ -1750,88 +1690,166 @@ sub insert { @returned_cols{@$retlist} = @ir_container if @ir_container; } + else { + # pull in PK if needed and then everything else + if (my @missing_pri = grep { $pcols{$_} } keys %retrieve_cols) { - return { %$prefetched_values, %returned_cols }; -} + $self->throw_exception( "Missing primary key but Storage doesn't support last_insert_id" ) + unless $self->can('last_insert_id'); + my @pri_values = $self->last_insert_id($source, @missing_pri); -## Currently it is assumed that all values passed will be "normal", i.e. not -## scalar refs, or at least, all the same type as the first set, the statement is -## only prepped once. -sub insert_bulk { - my ($self, $source, $cols, $data) = @_; + $self->throw_exception( "Can't get last insert id" ) + unless (@pri_values == @missing_pri); + + @returned_cols{@missing_pri} = @pri_values; + delete $retrieve_cols{$_} for @missing_pri; + } - my %colvalues; - @colvalues{@$cols} = (0..$#$cols); + # if there is more left to pull + if (%retrieve_cols) { + $self->throw_exception( + 'Unable to retrieve additional columns without a Primary Key on ' . $source->source_name + ) unless %pcols; + + my @left_to_fetch = sort { $retrieve_cols{$a} <=> $retrieve_cols{$b} } keys %retrieve_cols; - for my $i (0..$#$cols) { - my $first_val = $data->[0][$i]; - next unless ref $first_val eq 'SCALAR'; + my $cur = DBIx::Class::ResultSet->new($source, { + where => { map { $_ => $returned_cols{$_} } (keys %pcols) }, + select => \@left_to_fetch, + })->cursor; - $colvalues{ $cols->[$i] } = $first_val; + @returned_cols{@left_to_fetch} = $cur->next; + + $self->throw_exception('Duplicate row returned for PK-search after fresh insert') + if scalar $cur->next; + } } - # check for bad data and stringify stringifiable objects - my $bad_slice = sub { - my ($msg, $col_idx, $slice_idx) = @_; - $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s", - $msg, - $cols->[$col_idx], - do { - local $Data::Dumper::Maxdepth = 1; # don't dump objects, if any - Dumper { - map { $cols->[$_] => $data->[$slice_idx][$_] } (0 .. $#$cols) - }, - } - ); - }; + return { %$prefetched_values, %returned_cols }; +} + +sub insert_bulk { + my ($self, $source, $cols, $data) = @_; + + # FIXME - perhaps this is not even needed? does DBI stringify? + # + # forcibly stringify whatever is stringifiable + for my $r (0 .. $#$data) { + for my $c (0 .. $#{$data->[$r]}) { + $data->[$r][$c] = "$data->[$r][$c]" + if ( ref $data->[$r][$c] and overload::Method($data->[$r][$c], '""') ); + } + } - for my $datum_idx (0..$#$data) { - my $datum = $data->[$datum_idx]; + # check the data for consistency + # report a sensible error on bad data + # + # also create a list of dynamic binds (ones that will be changing + # for each row) + my $dyn_bind_idx; + for my $col_idx (0..$#$cols) { + + # the first "row" is used as a point of reference + my $reference_val = $data->[0][$col_idx]; + my $is_literal = ref $reference_val eq 'SCALAR'; + my $is_literal_bind = ( !$is_literal and ( + ref $reference_val eq 'REF' + and + ref $$reference_val eq 'ARRAY' + ) ); + + $dyn_bind_idx->{$col_idx} = 1 + if (!$is_literal and !$is_literal_bind); + + # use a closure for convenience (less to pass) + my $bad_slice = sub { + my ($msg, $slice_idx) = @_; + $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s", + $msg, + $cols->[$col_idx], + do { + require Data::Dumper::Concise; + local $Data::Dumper::Maxdepth = 2; + Data::Dumper::Concise::Dumper ({ + map { $cols->[$_] => + $data->[$slice_idx][$_] + } (0 .. $#$cols) + }), + } + ); + }; - for my $col_idx (0..$#$cols) { - my $val = $datum->[$col_idx]; - my $sqla_bind = $colvalues{ $cols->[$col_idx] }; - my $is_literal_sql = (ref $sqla_bind) eq 'SCALAR'; + for my $row_idx (1..$#$data) { # we are comparing against what we got from [0] above, hence start from 1 + my $val = $data->[$row_idx][$col_idx]; - if ($is_literal_sql) { - if (not ref $val) { - $bad_slice->('bind found where literal SQL expected', $col_idx, $datum_idx); + if ($is_literal) { + if (ref $val ne 'SCALAR') { + $bad_slice->( + "Incorrect value (expecting SCALAR-ref \\'$$reference_val')", + $row_idx + ); + } + elsif ($$val ne $$reference_val) { + $bad_slice->( + "Inconsistent literal SQL value (expecting \\'$$reference_val')", + $row_idx + ); } - elsif ((my $reftype = ref $val) ne 'SCALAR') { - $bad_slice->("$reftype reference found where literal SQL expected", - $col_idx, $datum_idx); + } + elsif ($is_literal_bind) { + if (ref $val ne 'REF' or ref $$val ne 'ARRAY') { + $bad_slice->( + "Incorrect value (expecting ARRAYREF-ref \\['${$reference_val}->[0]', ... ])", + $row_idx + ); } - elsif ($$val ne $$sqla_bind){ - $bad_slice->("inconsistent literal SQL value, expecting: '$$sqla_bind'", - $col_idx, $datum_idx); + elsif (${$val}->[0] ne ${$reference_val}->[0]) { + $bad_slice->( + "Inconsistent literal SQL-bind value (expecting \\['${$reference_val}->[0]', ... ])", + $row_idx + ); } } - elsif (my $reftype = ref $val) { - require overload; - if (overload::Method($val, '""')) { - $datum->[$col_idx] = "".$val; + elsif (ref $val) { + if (ref $val eq 'SCALAR' or (ref $val eq 'REF' and ref $$val eq 'ARRAY') ) { + $bad_slice->("Literal SQL found where a plain bind value is expected", $row_idx); } else { - $bad_slice->("$reftype reference found where bind expected", - $col_idx, $datum_idx); + $bad_slice->("$val reference found where bind expected", $row_idx); } } } } - my ($sql, $bind) = $self->_prep_for_execute ( - 'insert', undef, $source, [\%colvalues] + # Get the sql with bind values interpolated where necessary. For dynamic + # binds convert the values of the first row into a literal+bind combo, with + # extra positional info in the bind attr hashref. This will allow us to match + # the order properly, and is so contrived because a user-supplied literal + # bind (or something else specific to a resultsource and/or storage driver) + # can inject extra binds along the way, so one can't rely on "shift + # positions" ordering at all. Also we can't just hand SQLA a set of some + # known "values" (e.g. hashrefs that can be later matched up by address), + # because we want to supply a real value on which perhaps e.g. datatype + # checks will be performed + my ($sql, $proto_bind) = $self->_prep_for_execute ( + 'insert', + $source, + [ { map { $cols->[$_] => $dyn_bind_idx->{$_} + ? \[ '?', [ + { dbic_colname => $cols->[$_], _bind_data_slice_idx => $_ } + => + $data->[0][$_] + ] ] + : $data->[0][$_] + } (0..$#$cols) } ], ); - if (! @$bind) { - # if the bindlist is empty - make sure all "values" are in fact - # literal scalarrefs. If not the case this means the storage ate - # them away (e.g. the NoBindVars component) and interpolated them - # directly into the SQL. This obviosly can't be good for multi-inserts - - $self->throw_exception('Cannot insert_bulk without support for placeholders') - if first { ref $_ ne 'SCALAR' } values %colvalues; + if (! @$proto_bind and keys %$dyn_bind_idx) { + # if the bindlist is empty and we had some dynamic binds, this means the + # storage ate them away (e.g. the NoBindVars component) and interpolated + # them directly into the SQL. This obviosly can't be good for multi-inserts + $self->throw_exception('Cannot insert_bulk without support for placeholders'); } # neither _execute_array, nor _execute_inserts_with_no_binds are @@ -1839,12 +1857,13 @@ sub insert_bulk { # scope guard my $guard = $self->txn_scope_guard; - $self->_query_start( $sql, @$bind ? [ dummy => '__BULK_INSERT__' ] : () ); - my $sth = $self->sth($sql); + $self->_query_start( $sql, @$proto_bind ? [[undef => '__BULK_INSERT__' ]] : () ); + my $sth = $self->_sth($sql); my $rv = do { - if (@$bind) { - #@bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args - $self->_execute_array( $source, $sth, $bind, $cols, $data ); + if (@$proto_bind) { + # proto bind contains the information on which pieces of $data to pull + # $cols is passed in only for prettier error-reporting + $self->_execute_array( $source, $sth, $proto_bind, $cols, $data ); } else { # bind_param_array doesn't work if there are no binds @@ -1852,43 +1871,39 @@ sub insert_bulk { } }; - $self->_query_end( $sql, @$bind ? [ dummy => '__BULK_INSERT__' ] : () ); + $self->_query_end( $sql, @$proto_bind ? [[ undef => '__BULK_INSERT__' ]] : () ); $guard->commit; - return (wantarray ? ($rv, $sth, @$bind) : $rv); + return (wantarray ? ($rv, $sth, @$proto_bind) : $rv); } sub _execute_array { - my ($self, $source, $sth, $bind, $cols, $data, @extra) = @_; + my ($self, $source, $sth, $proto_bind, $cols, $data, @extra) = @_; ## This must be an arrayref, else nothing works! my $tuple_status = []; - ## Get the bind_attributes, if any exist - my $bind_attributes = $self->source_bind_attributes($source); - - ## Bind the values and execute - my $placeholder_index = 1; + my $bind_attrs = $self->_dbi_attrs_for_bind($source, $proto_bind); - foreach my $bound (@$bind) { - - my $attributes = {}; - my ($column_name, $data_index) = @$bound; - - if( $bind_attributes ) { - $attributes = $bind_attributes->{$column_name} - if defined $bind_attributes->{$column_name}; - } - - my @data = map { $_->[$data_index] } @$data; + # Bind the values by column slices + for my $i (0 .. $#$proto_bind) { + my $data_slice_idx = ( + ref $proto_bind->[$i][0] eq 'HASH' + and + exists $proto_bind->[$i][0]{_bind_data_slice_idx} + ) ? $proto_bind->[$i][0]{_bind_data_slice_idx} : undef; $sth->bind_param_array( - $placeholder_index, - [@data], - (%$attributes ? $attributes : ()), + $i+1, # DBI bind indexes are 1-based + defined $data_slice_idx + # either get a "column" of dynamic values, or just repeat the same + # bind over and over + ? [ map { $_->[$data_slice_idx] } @$data ] + : [ ($proto_bind->[$i][1]) x @$data ] + , + defined $bind_attrs->[$i] ? $bind_attrs->[$i] : (), # some DBDs throw up when given an undef ); - $placeholder_index++; } my ($rv, $err); @@ -1922,9 +1937,10 @@ sub _execute_array { $self->throw_exception("Unexpected populate error: $err") if ($i > $#$tuple_status); - $self->throw_exception(sprintf "%s for populate slice:\n%s", + require Data::Dumper::Concise; + $self->throw_exception(sprintf "execute_array() aborted with '%s' at populate slice:\n%s", ($tuple_status->[$i][1] || $err), - Dumper { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) }, + Data::Dumper::Concise::Dumper( { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) } ), ); } @@ -1932,9 +1948,8 @@ sub _execute_array { } sub _dbh_execute_array { - my ($self, $sth, $tuple_status, @extra) = @_; - - return $sth->execute_array({ArrayTupleStatus => $tuple_status}); + #my ($self, $sth, $tuple_status, @extra) = @_; + return $_[1]->execute_array({ArrayTupleStatus => $_[2]}); } sub _dbh_execute_inserts_with_no_binds { @@ -1966,20 +1981,14 @@ sub _dbh_execute_inserts_with_no_binds { } sub update { - my ($self, $source, @args) = @_; - - my $bind_attrs = $self->source_bind_attributes($source); - - return $self->_execute('update' => [], $source, $bind_attrs, @args); + #my ($self, $source, @args) = @_; + shift->_execute('update', @_); } sub delete { - my ($self, $source, @args) = @_; - - my $bind_attrs = $self->source_bind_attributes($source); - - return $self->_execute('delete' => [], $source, $bind_attrs, @args); + #my ($self, $source, @args) = @_; + shift->_execute('delete', @_); } # We were sent here because the $rs contains a complex search @@ -2087,17 +2096,17 @@ sub _select { sub _select_args_to_query { my $self = shift; - # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $rs_attrs, $rows, $offset) + # my ($op, $ident, $select, $cond, $rs_attrs, $rows, $offset) # = $self->_select_args($ident, $select, $cond, $attrs); - my ($op, $bind, $ident, $bind_attrs, @args) = + my ($op, $ident, @args) = $self->_select_args(@_); - # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $rs_attrs, $rows, $offset ]); - my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, \@args); + # my ($sql, $prepared_bind) = $self->_gen_sql_bind($op, $ident, [ $select, $cond, $rs_attrs, $rows, $offset ]); + my ($sql, $prepared_bind) = $self->_gen_sql_bind($op, $ident, \@args); $prepared_bind ||= []; return wantarray - ? ($sql, $prepared_bind, $bind_attrs) + ? ($sql, $prepared_bind) : \[ "($sql)", @$prepared_bind ] ; } @@ -2119,40 +2128,12 @@ sub _select_args { , }; - # calculate bind_attrs before possible $ident mangling - my $bind_attrs = {}; - for my $alias (keys %$alias2source) { - my $bindtypes = $self->source_bind_attributes ($alias2source->{$alias}) || {}; - for my $col (keys %$bindtypes) { - - my $fqcn = join ('.', $alias, $col); - $bind_attrs->{$fqcn} = $bindtypes->{$col} if $bindtypes->{$col}; - - # Unqialified column names are nice, but at the same time can be - # rather ambiguous. What we do here is basically go along with - # the loop, adding an unqualified column slot to $bind_attrs, - # alongside the fully qualified name. As soon as we encounter - # another column by that name (which would imply another table) - # we unset the unqualified slot and never add any info to it - # to avoid erroneous type binding. If this happens the users - # only choice will be to fully qualify his column name - - if (exists $bind_attrs->{$col}) { - $bind_attrs->{$col} = {}; - } - else { - $bind_attrs->{$col} = $bind_attrs->{$fqcn}; - } - } - } - # Sanity check the attributes (SQLMaker does it too, but # in case of a software_limit we'll never reach there) if (defined $attrs->{offset}) { $self->throw_exception('A supplied offset attribute must be a non-negative integer') if ( $attrs->{offset} =~ /\D/ or $attrs->{offset} < 0 ); } - $attrs->{offset} ||= 0; if (defined $attrs->{rows}) { $self->throw_exception("The rows attribute must be a positive integer if present") @@ -2183,7 +2164,10 @@ sub _select_args { = $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs); } elsif (! $attrs->{software_limit} ) { - push @limit, $attrs->{rows}, $attrs->{offset}; + push @limit, ( + $attrs->{rows} || (), + $attrs->{offset} || (), + ); } # try to simplify the joinmap further (prune unreferenced type-single joins) @@ -2199,7 +2183,7 @@ sub _select_args { # invoked, and that's just bad... ### - return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $attrs, @limit); + return ('select', $ident, $select, $where, $attrs, @limit); } # Returns a counting SELECT for a simple count @@ -2211,21 +2195,13 @@ sub _count_select { return { count => '*' }; } - sub source_bind_attributes { - my ($self, $source) = @_; - - my $bind_attributes; - - my $colinfo = $source->columns_info; - - for my $col (keys %$colinfo) { - if (my $dt = $colinfo->{$col}{data_type} ) { - $bind_attributes->{$col} = $self->bind_attribute_by_data_type($dt) - } - } - - return $bind_attributes; + shift->throw_exception( + 'source_bind_attributes() was never meant to be a callable public method - ' + .'please contact the DBIC dev-team and describe your use case so that a reasonable ' + .'solution can be provided' + ."\nhttp://search.cpan.org/dist/DBIx-Class/lib/DBIx/Class.pm#GETTING_HELP/SUPPORT" + ); } =head2 select @@ -2262,20 +2238,10 @@ sub select_single { =head2 sql_limit_dialect This is an accessor for the default SQL limit dialect used by a particular -storage driver. Can be overriden by supplying an explicit L +storage driver. Can be overridden by supplying an explicit L to L. For a list of available limit dialects see L. -=head2 sth - -=over 4 - -=item Arguments: $sql - -=back - -Returns a L sth (statement handle) for the supplied SQL. - =cut sub _dbh_sth { @@ -2305,6 +2271,11 @@ sub _dbh_sth { } sub sth { + carp_unique 'sth was mistakenly marked/documented as public, stop calling it (will be removed before DBIC v0.09)'; + shift->_sth(@_); +} + +sub _sth { my ($self, $sql) = @_; $self->dbh_do('_dbh_sth', $sql); # retry over disconnects } @@ -2498,11 +2469,11 @@ be performed instead of the usual C. =cut sub is_datatype_numeric { - my ($self, $dt) = @_; + #my ($self, $dt) = @_; - return 0 unless $dt; + return 0 unless $_[1]; - return $dt =~ /^ (?: + $_[1] =~ /^ (?: numeric | int(?:eger)? | (?:tiny|small|medium|big)int | dec(?:imal)? | real | float | double (?: \s+ precision)? | (?:big)?serial ) $/ix; } @@ -2574,10 +2545,10 @@ sub create_ddl_dir { } else { -d $dir or - make_path ("$dir") # make_path does not like objects (i.e. Path::Class::Dir) + (require File::Path and File::Path::make_path ("$dir")) # make_path does not like objects (i.e. Path::Class::Dir) or $self->throw_exception( - "Failed to create '$dir': " . ($! || $@ || 'error unknow') + "Failed to create '$dir': " . ($! || $@ || 'error unknown') ); } @@ -2822,12 +2793,7 @@ sub datetime_parser { =head2 datetime_parser_type -Defines (returns) the datetime parser class - currently hardwired to -L - -=cut - -sub datetime_parser_type { "DateTime::Format::MySQL"; } +Defines the datetime parser class - currently defaults to L =head2 build_datetime_parser @@ -2838,7 +2804,6 @@ See L sub build_datetime_parser { my $self = shift; my $type = $self->datetime_parser_type(@_); - $self->ensure_class_loaded ($type); return $type; } @@ -2900,45 +2865,50 @@ sub relname_to_table_alias { # version and it may be necessary to amend or override it for a specific storage # if such binds are necessary. sub _max_column_bytesize { - my ($self, $source, $col) = @_; + my ($self, $attr) = @_; - my $inf = $source->column_info($col); - return $inf->{_max_bytesize} ||= do { + my $max_size; - my $max_size; + if ($attr->{sqlt_datatype}) { + my $data_type = lc($attr->{sqlt_datatype}); - if (my $data_type = $inf->{data_type}) { - $data_type = lc($data_type); + if ($attr->{sqlt_size}) { # String/sized-binary types - if ($data_type =~ /^(?:l?(?:var)?char(?:acter)?(?:\s*varying)? - |(?:var)?binary(?:\s*varying)?|raw)\b/x + if ($data_type =~ /^(?: + l? (?:var)? char(?:acter)? (?:\s*varying)? + | + (?:var)? binary (?:\s*varying)? + | + raw + )\b/x ) { - $max_size = $inf->{size}; + $max_size = $attr->{sqlt_size}; } # Other charset/unicode types, assume scale of 4 - elsif ($data_type =~ /^(?:national\s*character(?:\s*varying)?|nchar - |univarchar - |nvarchar)\b/x + elsif ($data_type =~ /^(?: + national \s* character (?:\s*varying)? + | + nchar + | + univarchar + | + nvarchar + )\b/x ) { - $max_size = $inf->{size} * 4 if $inf->{size}; - } - # Blob types - elsif ($self->_is_lob_type($data_type)) { - # default to longreadlen - } - else { - $max_size = 100; # for all other (numeric?) datatypes + $max_size = $attr->{sqlt_size} * 4; } } - $max_size ||= $self->_get_dbh->{LongReadLen} || 8000; - }; + if (!$max_size and !$self->_is_lob_type($data_type)) { + $max_size = 100 # for all other (numeric?) datatypes + } + } + + $max_size || $self->_dbic_connect_attributes->{LongReadLen} || $self->_get_dbh->{LongReadLen} || 8000; } # Determine if a data_type is some type of BLOB -# FIXME: these regexes are expensive, result of these checks should be cached in -# the column_info . sub _is_lob_type { my ($self, $data_type) = @_; $data_type && ($data_type =~ /lob|bfile|text|image|bytea|memo/i @@ -2968,7 +2938,8 @@ sub _is_text_lob_type { DBIx::Class can do some wonderful magic with handling exceptions, disconnections, and transactions when you use C<< AutoCommit => 1 >> -(the default) combined with C for transaction support. +(the default) combined with L for +transaction support. If you set C<< AutoCommit => 0 >> in your connect info, then you are always in an assumed transaction between commits, and you're telling us you'd