use Scalar::Util qw/refaddr weaken reftype blessed/;
use List::Util qw/first/;
use Sub::Name 'subname';
+use Context::Preserve 'preserve_context';
use Try::Tiny;
use overload ();
use Data::Compare (); # no imports!!! guard against insane architecture
+use DBI::Const::GetInfoType (); # no import of retarded global hash
use namespace::clean;
# default cursor class, overridable in connect_info attributes
__PACKAGE__->mk_group_accessors('simple' => qw/
_connect_info _dbi_connect_info _dbic_connect_attributes _driver_determined
_dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts _dbh_autocommit
+ _perform_autoinc_retrieval _autoinc_supplied_for_op
/);
# the values for these accessors are picked out (and deleted) from
}
sub _get_server_version {
- shift->_dbh_get_info(18);
+ shift->_dbh_get_info('SQL_DBMS_VER');
}
sub _dbh_get_info {
my ($self, $info) = @_;
+ if ($info =~ /[^0-9]/) {
+ $info = $DBI::Const::GetInfoType::GetInfoType{$info};
+ $self->throw_exception("Info type '$_[1]' not provided by DBI::Const::GetInfoType")
+ unless defined $info;
+ }
+
return try { $self->_get_dbh->get_info($info) } || undef;
}
my $attrs = shift @do_args;
my @bind = map { [ undef, $_ ] } @do_args;
- $self->_query_start($sql, \@bind);
- $self->_get_dbh->do($sql, $attrs, @do_args);
- $self->_query_end($sql, \@bind);
+ $self->dbh_do(sub {
+ $_[0]->_query_start($sql, \@bind);
+ $_[1]->do($sql, $attrs, @do_args);
+ $_[0]->_query_end($sql, \@bind);
+ });
}
return $self;
my ($old_connect_via, $dbh);
- if ($INC{'Apache/DBI.pm'} && $ENV{MOD_PERL}) {
- $old_connect_via = $DBI::connect_via;
- $DBI::connect_via = 'connect';
- }
+ local $DBI::connect_via = 'connect' if $INC{'Apache/DBI.pm'} && $ENV{MOD_PERL};
try {
if(ref $info[0] eq 'CODE') {
}
catch {
$self->throw_exception("DBI Connection failed: $_")
- }
- finally {
- $DBI::connect_via = $old_connect_via if $old_connect_via;
};
$self->_dbh_autocommit($dbh->{AutoCommit});
# they can be fused once again with the final return
$to_insert = { %$to_insert, %$prefetched_values };
+ # FIXME - we seem to assume undef values as non-supplied. This is wrong.
+ # Investigate what does it take to s/defined/exists/
my $col_infos = $source->columns_info;
my %pcols = map { $_ => 1 } $source->primary_columns;
- my %retrieve_cols;
+ my (%retrieve_cols, $autoinc_supplied, $retrieve_autoinc_col);
for my $col ($source->columns) {
+ if ($col_infos->{$col}{is_auto_increment}) {
+ $autoinc_supplied ||= 1 if defined $to_insert->{$col};
+ $retrieve_autoinc_col ||= $col unless $autoinc_supplied;
+ }
+
# nothing to retrieve when explicit values are supplied
next if (defined $to_insert->{$col} and ! (
ref $to_insert->{$col} eq 'SCALAR'
);
};
+ local $self->{_autoinc_supplied_for_op} = $autoinc_supplied;
+ local $self->{_perform_autoinc_retrieval} = $retrieve_autoinc_col;
+
my ($sqla_opts, @ir_container);
if (%retrieve_cols and $self->_use_insert_returning) {
$sqla_opts->{returning_container} = \@ir_container
}
}
- my $colinfo_cache = {}; # since we will run _resolve_bindattrs on the same $source a lot
+ my $colinfos = $source->columns_info($cols);
+
+ local $self->{_autoinc_supplied_for_op} =
+ (first { $_->{is_auto_increment} } values %$colinfos)
+ ? 1
+ : 0
+ ;
# get a slice type index based on first row of data
# a "column" in this context may refer to more than one bind value
# normalization of user supplied stuff
my $resolved_bind = $self->_resolve_bindattrs(
- $source, \@bind, $colinfo_cache,
+ $source, \@bind, $colinfos,
);
# store value-less (attrs only) bind info - we will be comparing all
map
{ $_->[0] }
@{$self->_resolve_bindattrs(
- $source, [ @{$$val}[1 .. $#$$val] ], $colinfo_cache,
+ $source, [ @{$$val}[1 .. $#$$val] ], $colinfos,
)}
],
)) {
$guard->commit;
- return (wantarray ? ($rv, $sth, @$proto_bind) : $rv);
+ return wantarray ? ($rv, $sth, @$proto_bind) : $rv;
}
# execute_for_fetch is capable of returning data just fine (it means it
sub _select_args_to_query {
my $self = shift;
+ $self->throw_exception(
+ "Unable to generate limited query representation with 'software_limit' enabled"
+ ) if ($_[3]->{software_limit} and ($_[3]->{offset} or $_[3]->{rows}) );
+
# my ($op, $ident, $select, $cond, $rs_attrs, $rows, $offset)
# = $self->_select_args($ident, $select, $cond, $attrs);
my ($op, $ident, @args) =
# see if we need to tear the prefetch apart otherwise delegate the limiting to the
# storage, unless software limit was requested
if (
- #limited has_many
- ( $attrs->{rows} && keys %{$attrs->{collapse}} )
+ # limited collapsing has_many
+ ( $attrs->{rows} && $attrs->{collapse} )
||
# grouped prefetch (to satisfy group_by == select)
( $attrs->{group_by}
data => $schema,
);
- my @ret;
- if (wantarray) {
- @ret = $tr->translate;
- }
- else {
- $ret[0] = $tr->translate;
- }
-
- $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error)
- unless (@ret && defined $ret[0]);
-
- return wantarray ? @ret : $ret[0];
+ return preserve_context {
+ $tr->translate
+ } after => sub {
+ $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error)
+ unless defined $_[0];
+ };
}
# FIXME deploy() currently does not accurately report sql errors