__PACKAGE__->mk_group_accessors('simple' => qw/
_connect_info _dbi_connect_info _dbic_connect_attributes _driver_determined
_dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts _dbh_autocommit
+ _perform_autoinc_retrieval _autoinc_supplied_for_op
/);
# the values for these accessors are picked out (and deleted) from
# they can be fused once again with the final return
$to_insert = { %$to_insert, %$prefetched_values };
+ # FIXME - we seem to assume undef values as non-supplied. This is wrong.
+ # Investigate what does it take to s/defined/exists/
my $col_infos = $source->columns_info;
my %pcols = map { $_ => 1 } $source->primary_columns;
- my %retrieve_cols;
+ my (%retrieve_cols, $autoinc_supplied, $retrieve_autoinc_col);
for my $col ($source->columns) {
+ if ($col_infos->{$col}{is_auto_increment}) {
+ $autoinc_supplied ||= 1 if defined $to_insert->{$col};
+ $retrieve_autoinc_col ||= $col unless $autoinc_supplied;
+ }
+
# nothing to retrieve when explicit values are supplied
next if (defined $to_insert->{$col} and ! (
ref $to_insert->{$col} eq 'SCALAR'
);
};
+ local $self->{_autoinc_supplied_for_op} = $autoinc_supplied;
+ local $self->{_perform_autoinc_retrieval} = $retrieve_autoinc_col;
+
my ($sqla_opts, @ir_container);
if (%retrieve_cols and $self->_use_insert_returning) {
$sqla_opts->{returning_container} = \@ir_container
}
}
- my $colinfo_cache = {}; # since we will run _resolve_bindattrs on the same $source a lot
+ my $colinfos = $source->columns_info($cols);
+
+ local $self->{_autoinc_supplied_for_op} =
+ (first { $_->{is_auto_increment} } values %$colinfos)
+ ? 1
+ : 0
+ ;
# get a slice type index based on first row of data
# a "column" in this context may refer to more than one bind value
# normalization of user supplied stuff
my $resolved_bind = $self->_resolve_bindattrs(
- $source, \@bind, $colinfo_cache,
+ $source, \@bind, $colinfos,
);
# store value-less (attrs only) bind info - we will be comparing all
map
{ $_->[0] }
@{$self->_resolve_bindattrs(
- $source, [ @{$$val}[1 .. $#$$val] ], $colinfo_cache,
+ $source, [ @{$$val}[1 .. $#$$val] ], $colinfos,
)}
],
)) {
$guard->commit;
- return (wantarray ? ($rv, $sth, @$proto_bind) : $rv);
+ return wantarray ? ($rv, $sth, @$proto_bind) : $rv;
}
# execute_for_fetch is capable of returning data just fine (it means it
use strict;
use warnings;
-use base qw/DBIx::Class::Storage::DBI::UniqueIdentifier/;
+use base qw/
+ DBIx::Class::Storage::DBI::UniqueIdentifier
+ DBIx::Class::Storage::DBI::IdentityInsert
+/;
use mro 'c3';
+
use Try::Tiny;
use List::Util 'first';
use namespace::clean;
__PACKAGE__->mk_group_accessors(simple => qw/
- _identity _identity_method _pre_insert_sql _post_insert_sql
+ _identity _identity_method
/);
__PACKAGE__->sql_maker_class('DBIx::Class::SQLMaker::MSSQL');
'DBIx::Class::Storage::DBI::MSSQL::DateTime::Format'
);
-
__PACKAGE__->new_guid('NEWID()');
-sub _set_identity_insert {
- my ($self, $table) = @_;
-
- my $stmt = 'SET IDENTITY_INSERT %s %s';
- $table = $self->sql_maker->_quote($table);
-
- $self->_pre_insert_sql (sprintf $stmt, $table, 'ON');
- $self->_post_insert_sql(sprintf $stmt, $table, 'OFF');
-}
-
-sub insert_bulk {
- my $self = shift;
- my ($source, $cols, $data) = @_;
-
- my $is_identity_insert =
- (first { $_->{is_auto_increment} } values %{ $source->columns_info($cols) } )
- ? 1
- : 0
- ;
-
- if ($is_identity_insert) {
- $self->_set_identity_insert ($source->name);
- }
-
- $self->next::method(@_);
-}
-
-sub insert {
- my $self = shift;
- my ($source, $to_insert) = @_;
-
- my $supplied_col_info = $self->_resolve_column_info($source, [keys %$to_insert] );
-
- my $is_identity_insert =
- (first { $_->{is_auto_increment} } values %$supplied_col_info) ? 1 : 0;
-
- if ($is_identity_insert) {
- $self->_set_identity_insert ($source->name);
- }
-
- my $updated_cols = $self->next::method(@_);
-
- return $updated_cols;
-}
-
sub _prep_for_execute {
my $self = shift;
my ($op, $ident, $args) = @_;
my ($sql, $bind) = $self->next::method (@_);
- if ($op eq 'insert') {
- if (my $prepend = $self->_pre_insert_sql) {
- $sql = "${prepend}\n${sql}";
- $self->_pre_insert_sql(undef);
- }
- if (my $append = $self->_post_insert_sql) {
- $sql = "${sql}\n${append}";
- $self->_post_insert_sql(undef);
- }
+ # SELECT SCOPE_IDENTITY only works within a statement scope. We
+ # must try to always use this particular idiom frist, as it is the
+ # only one that guarantees retrieving the correct id under high
+ # concurrency. When this fails we will fall back to whatever secondary
+ # retrieval method is specified in _identity_method, but at this
+ # point we don't have many guarantees we will get what we expected.
+ # http://msdn.microsoft.com/en-us/library/ms190315.aspx
+ # http://davidhayden.com/blog/dave/archive/2006/01/17/2736.aspx
+ if ($self->_perform_autoinc_retrieval) {
$sql .= "\nSELECT SCOPE_IDENTITY()";
}
my $self = shift;
my ($op) = @_;
+ # always list ctx - we need the $sth
my ($rv, $sth, @bind) = $self->next::method(@_);
- if ($op eq 'insert') {
+ if ($self->_perform_autoinc_retrieval) {
# this should bring back the result of SELECT SCOPE_IDENTITY() we tacked
# on in _prep_for_execute above
use warnings;
use base qw/
- DBIx::Class::Storage::DBI::Sybase
- DBIx::Class::Storage::DBI::AutoCast
+ DBIx::Class::Storage::DBI::Sybase
+ DBIx::Class::Storage::DBI::AutoCast
+ DBIx::Class::Storage::DBI::IdentityInsert
/;
use mro 'c3';
use DBIx::Class::Carp;
-use Scalar::Util 'blessed';
+use Scalar::Util qw/blessed weaken/;
use List::Util 'first';
use Sub::Name();
use Data::Dumper::Concise 'Dumper';
use Try::Tiny;
+use Context::Preserve 'preserve_context';
use namespace::clean;
__PACKAGE__->sql_limit_dialect ('RowCountOrGenericSubQ');
);
__PACKAGE__->mk_group_accessors('simple' =>
- qw/_identity _blob_log_on_update _writer_storage _is_extra_storage
+ qw/_identity _identity_method _blob_log_on_update _parent_storage
+ _writer_storage _is_writer_storage
_bulk_storage _is_bulk_storage _began_bulk_work
- _identity_method/
+ /
);
# create storage for insert/(update blob) transactions,
# unless this is that storage
- return if $self->_is_extra_storage;
+ return if $self->_parent_storage;
my $writer_storage = (ref $self)->new;
- $writer_storage->_is_extra_storage(1);
+ $writer_storage->_is_writer_storage(1); # just info
$writer_storage->connect_info($self->connect_info);
$writer_storage->auto_cast($self->auto_cast);
+ weaken ($writer_storage->{_parent_storage} = $self);
$self->_writer_storage($writer_storage);
# create a bulk storage unless connect_info is a coderef
my $bulk_storage = (ref $self)->new;
- $bulk_storage->_is_extra_storage(1);
$bulk_storage->_is_bulk_storage(1); # for special ->disconnect acrobatics
$bulk_storage->connect_info($self->connect_info);
# this is why
$bulk_storage->_dbi_connect_info->[0] .= ';bulkLogin=1';
+ weaken ($bulk_storage->{_parent_storage} = $self);
$self->_bulk_storage($bulk_storage);
}
sub _prep_for_execute {
my $self = shift;
- my ($op, $ident, $args) = @_;
+ my ($op, $ident) = @_;
+
+ #
+### This is commented out because all tests pass. However I am leaving it
+### here as it may prove necessary (can't think through all combinations)
+### BTW it doesn't currently work exactly - need better sensitivity to
+ # currently set value
+ #
+ # inherit these from the parent for the duration of _prep_for_execute
+ # Don't know how to make a localizing loop with if's, otherwise I would
+ #local $self->{_autoinc_supplied_for_op}
+ # = $self->_parent_storage->_autoinc_supplied_for_op
+ #if ($op eq 'insert' or $op eq 'update') and $self->_parent_storage;
+ #local $self->{_perform_autoinc_retrieval}
+ # = $self->_parent_storage->_perform_autoinc_retrieval
+ #if ($op eq 'insert' or $op eq 'update') and $self->_parent_storage;
my ($sql, $bind) = $self->next::method (@_);
- my $table = blessed $ident ? $ident->from : $ident;
-
- my $bind_info = $self->_resolve_column_info(
- $ident, [map { $_->[0]{dbic_colname} || () } @{$bind}]
- );
- my $bound_identity_col =
- first { $bind_info->{$_}{is_auto_increment} }
- keys %$bind_info
- ;
-
- my $columns_info = blessed $ident && $ident->columns_info;
-
- my $identity_col =
- $columns_info &&
- first { $columns_info->{$_}{is_auto_increment} }
- keys %$columns_info
- ;
-
- if (
- ($bound_identity_col and $op eq 'insert')
- or
- (
- $op eq 'update'
- and
- defined $identity_col
- and
- exists $args->[0]{$identity_col}
- )
- ) {
- $sql = join ("\n",
- $self->_set_table_identity_sql($op => $table, 'on'),
- $sql,
- $self->_set_table_identity_sql($op => $table, 'off'),
- );
- }
-
- if (
- (not $bound_identity_col)
- and
- $identity_col
- and
- (not $self->{insert_bulk})
- and
- $op eq 'insert'
- ) {
- $sql =
- "$sql\n" .
- $self->_fetch_identity_sql($ident, $identity_col);
+ if (my $identity_col = $self->_perform_autoinc_retrieval) {
+ $sql .= "\n" . $self->_fetch_identity_sql($ident, $identity_col)
}
return ($sql, $bind);
}
-sub _set_table_identity_sql {
- my ($self, $op, $table, $on_off) = @_;
+sub _fetch_identity_sql {
+ my ($self, $source, $col) = @_;
- return sprintf 'SET IDENTITY_%s %s %s',
- uc($op), $self->sql_maker->_quote($table), uc($on_off);
+ return sprintf ("SELECT MAX(%s) FROM %s",
+ map { $self->sql_maker->_quote ($_) } ($col, $source->from)
+ );
}
# Stolen from SQLT, with some modifications. This is a makeshift
return uc($TYPE_MAPPING{$type} || $type);
}
-sub _fetch_identity_sql {
- my ($self, $source, $col) = @_;
-
- return sprintf ("SELECT MAX(%s) FROM %s",
- map { $self->sql_maker->_quote ($_) } ($col, $source->from)
- );
-}
sub _execute {
my $self = shift;
my ($rv, $sth, @bind) = $self->next::method(@_);
- if ($op eq 'insert') {
- $self->_identity($sth->fetchrow_array);
- $sth->finish;
- }
+ $self->_identity( ($sth->fetchall_arrayref)[0][0] )
+ if $self->_perform_autoinc_retrieval;
return wantarray ? ($rv, $sth, @bind) : $rv;
}
keys %$columns_info )
|| '';
+ # FIXME - this is duplication from DBI.pm. When refactored towards
+ # the LobWriter this can be folded back where it belongs.
+ local $self->{_autoinc_supplied_for_op} = exists $to_insert->{$identity_col}
+ ? 1
+ : 0
+ ;
+ local $self->{_perform_autoinc_retrieval} =
+ ($identity_col and ! exists $to_insert->{$identity_col})
+ ? $identity_col
+ : undef
+ ;
+
# check for empty insert
# INSERT INTO foo DEFAULT VALUES -- does not work with Sybase
# try to insert explicit 'DEFAULT's instead (except for identity, timestamp
my $blob_cols = $self->_remove_blob_cols($source, $to_insert);
# do we need the horrific SELECT MAX(COL) hack?
- my $dumb_last_insert_id =
- $identity_col
- && (not exists $to_insert->{$identity_col})
- && ($self->_identity_method||'') ne '@@IDENTITY';
+ my $need_dumb_last_insert_id = (
+ $self->_perform_autoinc_retrieval
+ &&
+ ($self->_identity_method||'') ne '@@IDENTITY'
+ );
my $next = $self->next::can;
# we are already in a transaction, or there are no blobs
# and we don't need the PK - just (try to) do it
if ($self->{transaction_depth}
- || (!$blob_cols && !$dumb_last_insert_id)
+ || (!$blob_cols && !$need_dumb_last_insert_id)
) {
return $self->_insert (
$next, $source, $to_insert, $blob_cols, $identity_col
my $self = shift;
my ($source, $fields, $where, @rest) = @_;
- my $blob_cols = $self->_remove_blob_cols($source, $fields);
+ #
+ # When *updating* identities, ASE requires SET IDENTITY_UPDATE called
+ #
+ if (my $blob_cols = $self->_remove_blob_cols($source, $fields)) {
- my $table = $source->name;
+ # If there are any blobs in $where, Sybase will return a descriptive error
+ # message.
+ # XXX blobs can still be used with a LIKE query, and this should be handled.
- my $columns_info = $source->columns_info;
+ # update+blob update(s) done atomically on separate connection
+ $self = $self->_writer_storage;
- my $identity_col =
- first { $columns_info->{$_}{is_auto_increment} }
- keys %$columns_info;
-
- my $is_identity_update = $identity_col && defined $fields->{$identity_col};
+ my $guard = $self->txn_scope_guard;
- return $self->next::method(@_) unless $blob_cols;
+ # First update the blob columns to be updated to '' (taken from $fields, where
+ # it is originally put by _remove_blob_cols .)
+ my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols;
-# If there are any blobs in $where, Sybase will return a descriptive error
-# message.
-# XXX blobs can still be used with a LIKE query, and this should be handled.
+ # We can't only update NULL blobs, because blobs cannot be in the WHERE clause.
+ $self->next::method($source, \%blobs_to_empty, $where, @rest);
-# update+blob update(s) done atomically on separate connection
- $self = $self->_writer_storage;
+ # Now update the blobs before the other columns in case the update of other
+ # columns makes the search condition invalid.
+ my $rv = $self->_update_blobs($source, $blob_cols, $where);
- my $guard = $self->txn_scope_guard;
+ if (keys %$fields) {
-# First update the blob columns to be updated to '' (taken from $fields, where
-# it is originally put by _remove_blob_cols .)
- my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols;
+ # Now set the identity update flags for the actual update
+ local $self->{_autoinc_supplied_for_op} = (first
+ { $_->{is_auto_increment} }
+ values %{ $source->columns_info([ keys %$fields ]) }
+ ) ? 1 : 0;
-# We can't only update NULL blobs, because blobs cannot be in the WHERE clause.
- $self->next::method($source, \%blobs_to_empty, $where, @rest);
-
-# Now update the blobs before the other columns in case the update of other
-# columns makes the search condition invalid.
- $self->_update_blobs($source, $blob_cols, $where);
-
- my @res;
- if (%$fields) {
- if (wantarray) {
- @res = $self->next::method(@_);
- }
- elsif (defined wantarray) {
- $res[0] = $self->next::method(@_);
+ my $next = $self->next::can;
+ my $args = \@_;
+ return preserve_context {
+ $self->$next(@$args);
+ } after => sub { $guard->commit };
}
else {
- $self->next::method(@_);
+ $guard->commit;
+ return $rv;
}
}
+ else {
+ # Set the identity update flags for the actual update
+ local $self->{_autoinc_supplied_for_op} = (first
+ { $_->{is_auto_increment} }
+ values %{ $source->columns_info([ keys %$fields ]) }
+ ) ? 1 : 0;
- $guard->commit;
-
- return wantarray ? @res : $res[0];
+ return $self->next::method(@_);
+ }
}
sub insert_bulk {
first { $columns_info->{$_}{is_auto_increment} }
keys %$columns_info;
- my $is_identity_insert = (first { $_ eq $identity_col } @{$cols}) ? 1 : 0;
+ # FIXME - this is duplication from DBI.pm. When refactored towards
+ # the LobWriter this can be folded back where it belongs.
+ local $self->{_autoinc_supplied_for_op} =
+ (first { $_ eq $identity_col } @$cols)
+ ? 1
+ : 0
+ ;
my $use_bulk_api =
$self->_bulk_storage &&
:
($self, undef);
- local $self->{insert_bulk} = 1;
-
$self->next::method(@_);
if ($blob_cols) {
- if ($is_identity_insert) {
+ if ($self->_autoinc_supplied_for_op) {
$self->_insert_blobs_array ($source, $blob_cols, $cols, $data);
}
else {
my @new_data;
for my $slice_idx (0..$#$data) {
push @new_data, [map {
- # identity data will be 'undef' if not $is_identity_insert
+ # identity data will be 'undef' if not _autoinc_supplied_for_op()
# columns with defaults will also be 'undef'
exists $orig_order{$_}
? $data->[$slice_idx][$orig_order{$_}]
## will require SQLA or *Hacks changes for ordered columns
# $bulk->next::method($source, \@source_columns, \@new_data, {
# syb_bcp_attribs => {
-# identity_flag => $is_identity_insert,
+# identity_flag => $self->_autoinc_supplied_for_op ? 1 : 0,
# identity_column => $identity_idx,
# }
# });
# 'insert', # op
{
syb_bcp_attribs => {
- identity_flag => $is_identity_insert,
+ identity_flag => $self->_autoinc_supplied_for_op ? 1 : 0,
identity_column => $identity_idx,
}
}