use strict;
use warnings;
-use base 'DBIx::Class::Storage';
+use base qw/DBIx::Class::Storage::DBIHacks DBIx::Class::Storage/;
use mro 'c3';
use Carp::Clan qw/^DBIx::Class/;
use DBIx::Class::Storage::Statistics;
use Scalar::Util();
use List::Util();
+use Data::Dumper::Concise();
+use Sub::Name ();
+
+# what version of sqlt do we require if deploy() without a ddl_dir is invoked
+# when changing also adjust the corresponding author_require in Makefile.PL
+my $minimum_sqlt_version = '0.11002';
+
__PACKAGE__->mk_group_accessors('simple' =>
qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts _conn_pid
__PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks');
+# Each of these methods need _determine_driver called before itself
+# in order to function reliably. This is a purely DRY optimization
+my @rdbms_specific_methods = qw/
+ sqlt_type
+ build_datetime_parser
+ datetime_parser_type
+
+ insert
+ insert_bulk
+ update
+ delete
+ select
+ select_single
+/;
+
+for my $meth (@rdbms_specific_methods) {
+
+ my $orig = __PACKAGE__->can ($meth)
+ or next;
+
+ no strict qw/refs/;
+ no warnings qw/redefine/;
+ *{__PACKAGE__ ."::$meth"} = Sub::Name::subname $meth => sub {
+ if (not $_[0]->_driver_determined) {
+ $_[0]->_determine_driver;
+ goto $_[0]->can($meth);
+ }
+ $orig->(@_);
+ };
+}
+
+
=head1 NAME
DBIx::Class::Storage::DBI - DBI storage handler
my $schema = MySchema->connect('dbi:SQLite:my.db');
$schema->storage->debug(1);
- $schema->dbh_do("DROP TABLE authors");
+
+ my @stuff = $schema->storage->dbh_do(
+ sub {
+ my ($storage, $dbh, @args) = @_;
+ $dbh->do("DROP TABLE authors");
+ },
+ @column_list
+ );
$schema->resultset('Book')->search({
written_on => $schema->storage->datetime_parser(DateTime->now)
%extra_attributes,
}];
+ $connect_info_args = [{
+ dbh_maker => sub { DBI->connect (...) },
+ %dbi_attributes,
+ %extra_attributes,
+ }];
+
This is particularly useful for L<Catalyst> based applications, allowing the
following config (L<Config::General> style):
</connect_info>
</Model::DB>
+The C<dsn>/C<user>/C<password> combination can be substituted by the
+C<dbh_maker> key whose value is a coderef that returns a connected
+L<DBI database handle|DBI/connect>
+
=back
Please note that the L<DBI> docs recommend that you always explicitly
# Connect via subref
->connect_info([ sub { DBI->connect(...) } ]);
+ # Connect via subref in hashref
+ ->connect_info([{
+ dbh_maker => sub { DBI->connect(...) },
+ on_connect_do => 'alter session ...',
+ }]);
+
# A bit more complicated
->connect_info(
[
=cut
sub connect_info {
- my ($self, $info_arg) = @_;
+ my ($self, $info) = @_;
- return $self->_connect_info if !$info_arg;
+ return $self->_connect_info if !$info;
- my @args = @$info_arg; # take a shallow copy for further mutilation
- $self->_connect_info([@args]); # copy for _connect_info
+ $self->_connect_info($info); # copy for _connect_info
+
+ $info = $self->_normalize_connect_info($info)
+ if ref $info eq 'ARRAY';
+
+ for my $storage_opt (keys %{ $info->{storage_options} }) {
+ my $value = $info->{storage_options}{$storage_opt};
+
+ $self->$storage_opt($value);
+ }
+
+ # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only
+ # the new set of options
+ $self->_sql_maker(undef);
+ $self->_sql_maker_opts({});
+ for my $sql_maker_opt (keys %{ $info->{sql_maker_options} }) {
+ my $value = $info->{sql_maker_options}{$sql_maker_opt};
+
+ $self->_sql_maker_opts->{$sql_maker_opt} = $value;
+ }
+
+ my %attrs = (
+ %{ $self->_default_dbi_connect_attributes || {} },
+ %{ $info->{attributes} || {} },
+ );
+
+ my @args = @{ $info->{arguments} };
+
+ $self->_dbi_connect_info([@args,
+ %attrs && !(ref $args[0] eq 'CODE') ? \%attrs : ()]);
+
+ return $self->_connect_info;
+}
+
+sub _normalize_connect_info {
+ my ($self, $info_arg) = @_;
+ my %info;
+
+ my @args = @$info_arg; # take a shallow copy for further mutilation
# combine/pre-parse arguments depending on invocation style
elsif (ref $args[0] eq 'HASH') { # single hashref (i.e. Catalyst config)
%attrs = %{$args[0]};
@args = ();
- for (qw/password user dsn/) {
- unshift @args, delete $attrs{$_};
+ if (my $code = delete $attrs{dbh_maker}) {
+ @args = $code;
+
+ my @ignored = grep { delete $attrs{$_} } (qw/dsn user password/);
+ if (@ignored) {
+ carp sprintf (
+ 'Attribute(s) %s in connect_info were ignored, as they can not be applied '
+ . "to the result of 'dbh_maker'",
+
+ join (', ', map { "'$_'" } (@ignored) ),
+ );
+ }
+ }
+ else {
+ @args = delete @attrs{qw/dsn user password/};
}
}
else { # otherwise assume dsn/user/password + \%attrs + \%extra_attrs
@args = @args[0,1,2];
}
- # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only
- # the new set of options
- $self->_sql_maker(undef);
- $self->_sql_maker_opts({});
+ $info{arguments} = \@args;
- if(keys %attrs) {
- for my $storage_opt (@storage_options, 'cursor_class') { # @storage_options is declared at the top of the module
- if(my $value = delete $attrs{$storage_opt}) {
- $self->$storage_opt($value);
- }
- }
- for my $sql_maker_opt (qw/limit_dialect quote_char name_sep/) {
- if(my $opt_val = delete $attrs{$sql_maker_opt}) {
- $self->_sql_maker_opts->{$sql_maker_opt} = $opt_val;
- }
- }
- }
+ my @storage_opts = grep exists $attrs{$_},
+ @storage_options, 'cursor_class';
+
+ @{ $info{storage_options} }{@storage_opts} =
+ delete @attrs{@storage_opts} if @storage_opts;
+
+ my @sql_maker_opts = grep exists $attrs{$_},
+ qw/limit_dialect quote_char name_sep/;
+
+ @{ $info{sql_maker_options} }{@sql_maker_opts} =
+ delete @attrs{@sql_maker_opts} if @sql_maker_opts;
- %attrs = () if (ref $args[0] eq 'CODE'); # _connect() never looks past $args[0] in this case
+ $info{attributes} = \%attrs if %attrs;
- $self->_dbi_connect_info([@args, keys %attrs ? \%attrs : ()]);
- $self->_connect_info;
+ return \%info;
+}
+
+sub _default_dbi_connect_attributes {
+ return {
+ AutoCommit => 1,
+ RaiseError => 1,
+ PrintError => 0,
+ };
}
=head2 on_connect_do
my $self = shift;
my $code = shift;
- my $dbh = $self->_dbh;
+ my $dbh = $self->_get_dbh;
return $self->$code($dbh, @_) if $self->{_in_dbh_do}
|| $self->{transaction_depth};
my $want_array = wantarray;
eval {
- $self->_verify_pid if $dbh;
- if(!$self->_dbh) {
- $self->_populate_dbh;
- $dbh = $self->_dbh;
- }
if($want_array) {
@result = $self->$code($dbh, @_);
}
};
+ # ->connected might unset $@ - copy
my $exception = $@;
if(!$exception) { return $want_array ? @result : $result[0] }
# We were not connected - reconnect and retry, but let any
# exception fall right through this time
+ carp "Retrying $code after catching disconnected exception: $exception"
+ if $ENV{DBIC_DBIRETRY_DEBUG};
$self->_populate_dbh;
$self->$code($self->_dbh, @_);
}
my $tried = 0;
while(1) {
eval {
- $self->_verify_pid if $self->_dbh;
- $self->_populate_dbh if !$self->_dbh;
+ $self->_get_dbh;
$self->txn_begin;
if($want_array) {
$self->txn_commit;
};
+ # ->connected might unset $@ - copy
my $exception = $@;
if(!$exception) { return $want_array ? @result : $result[0] }
- if($tried++ > 0 || $self->connected) {
+ if($tried++ || $self->connected) {
eval { $self->txn_rollback };
my $rollback_exception = $@;
if($rollback_exception) {
# We were not connected, and was first try - reconnect and retry
# via the while loop
+ carp "Retrying $coderef after catching disconnected exception: $exception"
+ if $ENV{DBIC_DBIRETRY_DEBUG};
$self->_populate_dbh;
}
}
sub disconnect {
my ($self) = @_;
- if( $self->connected ) {
+ if( $self->_dbh ) {
my @actions;
push @actions, ( $self->on_disconnect_call || () );
$self->_do_connection_actions(disconnect_call_ => $_) for @actions;
- $self->_dbh->rollback unless $self->_dbh_autocommit;
+ $self->_dbh_rollback unless $self->_dbh_autocommit;
+
$self->_dbh->disconnect;
$self->_dbh(undef);
$self->{_dbh_gen}++;
# Storage subclasses should override this
sub with_deferred_fk_checks {
my ($self, $sub) = @_;
-
$sub->();
}
+=head2 connected
+
+=over
+
+=item Arguments: none
+
+=item Return Value: 1|0
+
+=back
+
+Verifies that the the current database handle is active and ready to execute
+an SQL statement (i.e. the connection did not get stale, server is still
+answering, etc.) This method is used internally by L</dbh>.
+
+=cut
+
sub connected {
- my ($self) = @_;
+ my $self = shift;
+ return 0 unless $self->_seems_connected;
- if(my $dbh = $self->_dbh) {
- if(defined $self->_conn_tid && $self->_conn_tid != threads->tid) {
- $self->_dbh(undef);
- $self->{_dbh_gen}++;
- return;
- }
- else {
- $self->_verify_pid;
- return 0 if !$self->_dbh;
- }
- return ($dbh->FETCH('Active') && $self->_ping);
+ #be on the safe side
+ local $self->_dbh->{RaiseError} = 1;
+
+ return $self->_ping;
+}
+
+sub _seems_connected {
+ my $self = shift;
+
+ my $dbh = $self->_dbh
+ or return 0;
+
+ if(defined $self->_conn_tid && $self->_conn_tid != threads->tid) {
+ $self->_dbh(undef);
+ $self->{_dbh_gen}++;
+ return 0;
+ }
+ else {
+ $self->_verify_pid;
+ return 0 if !$self->_dbh;
}
- return 0;
+ return $dbh->FETCH('Active');
}
sub _ping {
=head2 dbh
-Returns the dbh - a data base handle of class L<DBI>.
+Returns a C<$dbh> - a data base handle of class L<DBI>. The returned handle
+is guaranteed to be healthy by implicitly calling L</connected>, and if
+necessary performing a reconnection before returning. Keep in mind that this
+is very B<expensive> on some database engines. Consider using L<dbh_do>
+instead.
=cut
sub dbh {
my ($self) = @_;
- $self->ensure_connected;
+ if (not $self->_dbh) {
+ $self->_populate_dbh;
+ } else {
+ $self->ensure_connected;
+ }
+ return $self->_dbh;
+}
+
+# this is the internal "get dbh or connect (don't check)" method
+sub _get_dbh {
+ my $self = shift;
+ $self->_verify_pid if $self->_dbh;
+ $self->_populate_dbh unless $self->_dbh;
return $self->_dbh;
}
sub _sql_maker_args {
my ($self) = @_;
- return ( bindtype=>'columns', array_datatypes => 1, limit_dialect => $self->dbh, %{$self->_sql_maker_opts} );
+ return (
+ bindtype=>'columns',
+ array_datatypes => 1,
+ limit_dialect => $self->_get_dbh,
+ %{$self->_sql_maker_opts}
+ );
}
sub sql_maker {
return $self->_sql_maker;
}
+# nothing to do by default
sub _rebless {}
+sub _init {}
sub _populate_dbh {
my ($self) = @_;
+
my @info = @{$self->_dbi_connect_info || []};
+ $self->_dbh(undef); # in case ->connected failed we might get sent here
$self->_dbh($self->_connect(@info));
$self->_conn_pid($$);
# there is no transaction in progress by definition
$self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1;
+ $self->_run_connection_actions unless $self->{_in_determine_driver};
+}
+
+sub _run_connection_actions {
+ my $self = shift;
my @actions;
push @actions, ( $self->on_connect_call || () );
sub _determine_driver {
my ($self) = @_;
- if (not $self->_driver_determined) {
+ if ((not $self->_driver_determined) && (not $self->{_in_determine_driver})) {
+ my $started_connected = 0;
+ local $self->{_in_determine_driver} = 1;
+
if (ref($self) eq __PACKAGE__) {
my $driver;
-
if ($self->_dbh) { # we are connected
$driver = $self->_dbh->{Driver}{Name};
+ $started_connected = 1;
} else {
- # try to use dsn to not require being connected, the driver may still
- # force a connection in _rebless to determine version
- ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i;
+ # if connect_info is a CODEREF, we have no choice but to connect
+ if (ref $self->_dbi_connect_info->[0] &&
+ Scalar::Util::reftype($self->_dbi_connect_info->[0]) eq 'CODE') {
+ $self->_populate_dbh;
+ $driver = $self->_dbh->{Driver}{Name};
+ }
+ else {
+ # try to use dsn to not require being connected, the driver may still
+ # force a connection in _rebless to determine version
+ ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i;
+ }
}
my $storage_class = "DBIx::Class::Storage::DBI::${driver}";
}
$self->_driver_determined(1);
+
+ $self->_init; # run driver-specific initializations
+
+ $self->_run_connection_actions
+ if !$started_connected && defined $self->_dbh;
}
}
my @bind = map { [ undef, $_ ] } @do_args;
$self->_query_start($sql, @bind);
- $self->_dbh->do($sql, $attrs, @do_args);
+ $self->_get_dbh->do($sql, $attrs, @do_args);
$self->_query_end($sql, @bind);
}
$weak_self->throw_exception("DBI Exception: $_[0]");
}
else {
+ # the handler may be invoked by something totally out of
+ # the scope of DBIC
croak ("DBI Exception: $_[0]");
}
};
sub txn_begin {
my $self = shift;
- $self->ensure_connected();
if($self->{transaction_depth} == 0) {
$self->debugobj->txn_begin()
if $self->debug;
- # this isn't ->_dbh-> because
- # we should reconnect on begin_work
- # for AutoCommit users
- $self->dbh->begin_work;
- } elsif ($self->auto_savepoint) {
+ $self->_dbh_begin_work;
+ }
+ elsif ($self->auto_savepoint) {
$self->svp_begin;
}
$self->{transaction_depth}++;
}
+sub _dbh_begin_work {
+ my $self = shift;
+
+ # if the user is utilizing txn_do - good for him, otherwise we need to
+ # ensure that the $dbh is healthy on BEGIN.
+ # We do this via ->dbh_do instead of ->dbh, so that the ->dbh "ping"
+ # will be replaced by a failure of begin_work itself (which will be
+ # then retried on reconnect)
+ if ($self->{_in_dbh_do}) {
+ $self->_dbh->begin_work;
+ } else {
+ $self->dbh_do(sub { $_[1]->begin_work });
+ }
+}
+
sub txn_commit {
my $self = shift;
if ($self->{transaction_depth} == 1) {
- my $dbh = $self->_dbh;
$self->debugobj->txn_commit()
if ($self->debug);
- $dbh->commit;
+ $self->_dbh_commit;
$self->{transaction_depth} = 0
if $self->_dbh_autocommit;
}
}
}
+sub _dbh_commit {
+ my $self = shift;
+ my $dbh = $self->_dbh
+ or $self->throw_exception('cannot COMMIT on a disconnected handle');
+ $dbh->commit;
+}
+
sub txn_rollback {
my $self = shift;
my $dbh = $self->_dbh;
if ($self->debug);
$self->{transaction_depth} = 0
if $self->_dbh_autocommit;
- $dbh->rollback;
+ $self->_dbh_rollback;
}
elsif($self->{transaction_depth} > 1) {
$self->{transaction_depth}--;
}
}
+sub _dbh_rollback {
+ my $self = shift;
+ my $dbh = $self->_dbh
+ or $self->throw_exception('cannot ROLLBACK on a disconnected handle');
+ $dbh->rollback;
+}
+
# This used to be the top-half of _execute. It was split out to make it
# easier to override in NoBindVars without duping the rest. It takes up
# all of _execute's args, and emits $sql, @bind.
sub _execute {
my $self = shift;
- $self->dbh_do('_dbh_execute', @_)
+ $self->dbh_do('_dbh_execute', @_); # retry over disconnects
}
sub insert {
my ($self, $source, $to_insert) = @_;
-# redispatch to insert method of storage we reblessed into, if necessary
- if (not $self->_driver_determined) {
- $self->_determine_driver;
- goto $self->can('insert');
- }
-
my $ident = $source->from;
my $bind_attributes = $self->source_bind_attributes($source);
my $col_info = $source->column_info($col);
if ( $col_info->{auto_nextval} ) {
- $updated_cols->{$col} = $to_insert->{$col} = $self->_sequence_fetch( 'nextval', $col_info->{sequence} || $self->_dbh_get_autoinc_seq($self->dbh, $source) );
+ $updated_cols->{$col} = $to_insert->{$col} = $self->_sequence_fetch(
+ 'nextval',
+ $col_info->{sequence} ||
+ $self->_dbh_get_autoinc_seq($self->_get_dbh, $source)
+ );
}
}
}
return $updated_cols;
}
-## Still not quite perfect, and EXPERIMENTAL
## Currently it is assumed that all values passed will be "normal", i.e. not
## scalar refs, or at least, all the same type as the first set, the statement is
## only prepped once.
sub insert_bulk {
my ($self, $source, $cols, $data) = @_;
+
my %colvalues;
- my $table = $source->from;
@colvalues{@$cols} = (0..$#$cols);
- my ($sql, @bind) = $self->sql_maker->insert($table, \%colvalues);
- $self->_query_start( $sql, @bind );
+ for my $i (0..$#$cols) {
+ my $first_val = $data->[0][$i];
+ next unless ref $first_val eq 'SCALAR';
+
+ $colvalues{ $cols->[$i] } = $first_val;
+ }
+
+ # check for bad data and stringify stringifiable objects
+ my $bad_slice = sub {
+ my ($msg, $col_idx, $slice_idx) = @_;
+ $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s",
+ $msg,
+ $cols->[$col_idx],
+ do {
+ local $Data::Dumper::Maxdepth = 1; # don't dump objects, if any
+ Data::Dumper::Concise::Dumper({
+ map { $cols->[$_] => $data->[$slice_idx][$_] } (0 .. $#$cols)
+ }),
+ }
+ );
+ };
+
+ for my $datum_idx (0..$#$data) {
+ my $datum = $data->[$datum_idx];
+
+ for my $col_idx (0..$#$cols) {
+ my $val = $datum->[$col_idx];
+ my $sqla_bind = $colvalues{ $cols->[$col_idx] };
+ my $is_literal_sql = (ref $sqla_bind) eq 'SCALAR';
+
+ if ($is_literal_sql) {
+ if (not ref $val) {
+ $bad_slice->('bind found where literal SQL expected', $col_idx, $datum_idx);
+ }
+ elsif ((my $reftype = ref $val) ne 'SCALAR') {
+ $bad_slice->("$reftype reference found where literal SQL expected",
+ $col_idx, $datum_idx);
+ }
+ elsif ($$val ne $$sqla_bind){
+ $bad_slice->("inconsistent literal SQL value, expecting: '$$sqla_bind'",
+ $col_idx, $datum_idx);
+ }
+ }
+ elsif (my $reftype = ref $val) {
+ require overload;
+ if (overload::Method($val, '""')) {
+ $datum->[$col_idx] = "".$val;
+ }
+ else {
+ $bad_slice->("$reftype reference found where bind expected",
+ $col_idx, $datum_idx);
+ }
+ }
+ }
+ }
+
+ my ($sql, $bind) = $self->_prep_for_execute (
+ 'insert', undef, $source, [\%colvalues]
+ );
+ my @bind = @$bind;
+
+ my $empty_bind = 1 if (not @bind) &&
+ (grep { ref $_ eq 'SCALAR' } values %colvalues) == @$cols;
+
+ if ((not @bind) && (not $empty_bind)) {
+ $self->throw_exception(
+ 'Cannot insert_bulk without support for placeholders'
+ );
+ }
+
+ $self->_query_start( $sql, ['__BULK__'] );
my $sth = $self->sth($sql);
-# @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
+ my $rv = do {
+ if ($empty_bind) {
+ # bind_param_array doesn't work if there are no binds
+ $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data );
+ }
+ else {
+# @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
+ $self->_execute_array( $source, $sth, \@bind, $cols, $data );
+ }
+ };
+
+ $self->_query_end( $sql, ['__BULK__'] );
+
+ return (wantarray ? ($rv, $sth, @bind) : $rv);
+}
+
+sub _execute_array {
+ my ($self, $source, $sth, $bind, $cols, $data, @extra) = @_;
+
+ my $guard = $self->txn_scope_guard unless $self->{transaction_depth} != 0;
## This must be an arrayref, else nothing works!
my $tuple_status = [];
## Bind the values and execute
my $placeholder_index = 1;
- foreach my $bound (@bind) {
+ foreach my $bound (@$bind) {
my $attributes = {};
my ($column_name, $data_index) = @$bound;
$sth->bind_param_array( $placeholder_index, [@data], $attributes );
$placeholder_index++;
}
- my $rv = eval { $sth->execute_array({ArrayTupleStatus => $tuple_status}) };
- if (my $err = $@) {
+
+ my $rv = eval {
+ $self->_dbh_execute_array($sth, $tuple_status, @extra);
+ };
+ my $err = $@ || $sth->errstr;
+
+# Statement must finish even if there was an exception.
+ eval { $sth->finish };
+ $err = $@ unless $err;
+
+ if ($err) {
my $i = 0;
++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i];
- $self->throw_exception($sth->errstr || "Unexpected populate error: $err")
+ $self->throw_exception("Unexpected populate error: $err")
if ($i > $#$tuple_status);
- require Data::Dumper;
- local $Data::Dumper::Terse = 1;
- local $Data::Dumper::Indent = 1;
- local $Data::Dumper::Useqq = 1;
- local $Data::Dumper::Quotekeys = 0;
-
$self->throw_exception(sprintf "%s for populate slice:\n%s",
- $tuple_status->[$i][1],
- Data::Dumper::Dumper(
- { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) }
- ),
+ ($tuple_status->[$i][1] || $err),
+ Data::Dumper::Concise::Dumper({
+ map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols)
+ }),
);
}
- $self->throw_exception($sth->errstr) if !$rv;
- $self->_query_end( $sql, @bind );
- return (wantarray ? ($rv, $sth, @bind) : $rv);
+ $guard->commit if $guard;
+
+ return $rv;
+}
+
+sub _dbh_execute_array {
+ my ($self, $sth, $tuple_status, @extra) = @_;
+
+ return $sth->execute_array({ArrayTupleStatus => $tuple_status});
+}
+
+sub _dbh_execute_inserts_with_no_binds {
+ my ($self, $sth, $count) = @_;
+
+ my $guard = $self->txn_scope_guard unless $self->{transaction_depth} != 0;
+
+ eval {
+ my $dbh = $self->_get_dbh;
+ local $dbh->{RaiseError} = 1;
+ local $dbh->{PrintError} = 0;
+
+ $sth->execute foreach 1..$count;
+ };
+ my $exception = $@;
+
+# Make sure statement is finished even if there was an exception.
+ eval { $sth->finish };
+ $exception = $@ unless $exception;
+
+ $self->throw_exception($exception) if $exception;
+
+ $guard->commit if $guard;
+
+ return $count;
}
sub update {
- my $self = shift @_;
- my $source = shift @_;
- my $bind_attributes = $self->source_bind_attributes($source);
+ my ($self, $source, @args) = @_;
+
+ my $bind_attrs = $self->source_bind_attributes($source);
- return $self->_execute('update' => [], $source, $bind_attributes, @_);
+ return $self->_execute('update' => [], $source, $bind_attrs, @args);
}
sub delete {
- my $self = shift @_;
- my $source = shift @_;
+ my ($self, $source, @args) = @_;
my $bind_attrs = $self->source_bind_attributes($source);
- return $self->_execute('delete' => [], $source, $bind_attrs, @_);
+ return $self->_execute('delete' => [], $source, $bind_attrs, @args);
}
# We were sent here because the $rs contains a complex search
# which will require a subquery to select the correct rows
-# (i.e. joined or limited resultsets)
+# (i.e. joined or limited resultsets, or non-introspectable conditions)
#
-# Genarating a single PK column subquery is trivial and supported
+# Generating a single PK column subquery is trivial and supported
# by all RDBMS. However if we have a multicolumn PK, things get ugly.
# Look at _multipk_update_delete()
sub _subq_update_delete {
my $rsrc = $rs->result_source;
- # we already check this, but double check naively just in case. Should be removed soon
+ # quick check if we got a sane rs on our hands
+ my @pcols = $rsrc->primary_columns;
+ unless (@pcols) {
+ $self->throw_exception (
+ sprintf (
+ "You must declare primary key(s) on source '%s' (via set_primary_key) in order to update or delete complex resultsets",
+ $rsrc->source_name || $rsrc->from
+ )
+ );
+ }
+
my $sel = $rs->_resolved_attrs->{select};
$sel = [ $sel ] unless ref $sel eq 'ARRAY';
- my @pcols = $rsrc->primary_columns;
- if (@$sel != @pcols) {
+
+ if (
+ join ("\x00", map { join '.', $rs->{attrs}{alias}, $_ } sort @pcols)
+ ne
+ join ("\x00", sort @$sel )
+ ) {
$self->throw_exception (
- 'Subquery update/delete can not be called on resultsets selecting a'
- .' number of columns different than the number of primary keys'
+ '_subq_update_delete can not be called on resultsets selecting columns other than the primary keys'
);
}
my $fqcn = join ('.', $alias, $col);
$bind_attrs->{$fqcn} = $bindtypes->{$col} if $bindtypes->{$col};
- # so that unqualified searches can be bound too
- $bind_attrs->{$col} = $bind_attrs->{$fqcn} if $alias eq $rs_alias;
+ # Unqialified column names are nice, but at the same time can be
+ # rather ambiguous. What we do here is basically go along with
+ # the loop, adding an unqualified column slot to $bind_attrs,
+ # alongside the fully qualified name. As soon as we encounter
+ # another column by that name (which would imply another table)
+ # we unset the unqualified slot and never add any info to it
+ # to avoid erroneous type binding. If this happens the users
+ # only choice will be to fully qualify his column name
+
+ if (exists $bind_attrs->{$col}) {
+ $bind_attrs->{$col} = {};
+ }
+ else {
+ $bind_attrs->{$col} = $bind_attrs->{$fqcn};
+ }
}
}
my @limit;
- # see if we need to tear the prefetch apart (either limited has_many or grouped prefetch)
- # otherwise delegate the limiting to the storage, unless software limit was requested
+ # see if we need to tear the prefetch apart otherwise delegate the limiting to the
+ # storage, unless software limit was requested
if (
+ #limited has_many
( $attrs->{rows} && keys %{$attrs->{collapse}} )
||
- ( $attrs->{group_by} && @{$attrs->{group_by}} &&
- $attrs->{prefetch_select} && @{$attrs->{prefetch_select}} )
+ # limited prefetch with RNO subqueries
+ (
+ $attrs->{rows}
+ &&
+ $sql_maker->limit_dialect eq 'RowNumberOver'
+ &&
+ $attrs->{_prefetch_select}
+ &&
+ @{$attrs->{_prefetch_select}}
+ )
+ ||
+ # grouped prefetch
+ ( $attrs->{group_by}
+ &&
+ @{$attrs->{group_by}}
+ &&
+ $attrs->{_prefetch_select}
+ &&
+ @{$attrs->{_prefetch_select}}
+ )
) {
($ident, $select, $where, $attrs)
= $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs);
}
+
+ elsif (
+ ($attrs->{rows} || $attrs->{offset})
+ &&
+ $sql_maker->limit_dialect eq 'RowNumberOver'
+ &&
+ (ref $ident eq 'ARRAY' && @$ident > 1) # indicates a join
+ &&
+ scalar $sql_maker->_order_by_chunks ($attrs->{order_by})
+ ) {
+ # the RNO limit dialect above mangles the SQL such that the join gets lost
+ # wrap a subquery here
+
+ push @limit, delete @{$attrs}{qw/rows offset/};
+
+ my $subq = $self->_select_args_to_query (
+ $ident,
+ $select,
+ $where,
+ $attrs,
+ );
+
+ $ident = {
+ -alias => $attrs->{alias},
+ -source_handle => $ident->[0]{-source_handle},
+ $attrs->{alias} => $subq,
+ };
+
+ # all part of the subquery now
+ delete @{$attrs}{qw/order_by group_by having/};
+ $where = undef;
+ }
+
elsif (! $attrs->{software_limit} ) {
push @limit, $attrs->{rows}, $attrs->{offset};
}
return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $order, @limit);
}
-#
-# This is the code producing joined subqueries like:
-# SELECT me.*, other.* FROM ( SELECT me.* FROM ... ) JOIN other ON ...
-#
-sub _adjust_select_args_for_complex_prefetch {
- my ($self, $from, $select, $where, $attrs) = @_;
-
- $self->throw_exception ('Complex prefetches are not supported on resultsets with a custom from attribute')
- if (ref $from ne 'ARRAY');
-
- # copies for mangling
- $from = [ @$from ];
- $select = [ @$select ];
- $attrs = { %$attrs };
-
- # separate attributes
- my $sub_attrs = { %$attrs };
- delete $attrs->{$_} for qw/where bind rows offset group_by having/;
- delete $sub_attrs->{$_} for qw/for collapse prefetch_select _collapse_order_by select as/;
-
- my $alias = $attrs->{alias};
- my $sql_maker = $self->sql_maker;
-
- # create subquery select list - consider only stuff *not* brought in by the prefetch
- my $sub_select = [];
- for my $i (0 .. @{$attrs->{select}} - @{$attrs->{prefetch_select}} - 1) {
- my $sel = $attrs->{select}[$i];
-
- # alias any functions to the dbic-side 'as' label
- # adjust the outer select accordingly
- if (ref $sel eq 'HASH' && !$sel->{-select}) {
- $sel = { -select => $sel, -as => $attrs->{as}[$i] };
- $select->[$i] = join ('.', $attrs->{alias}, ($attrs->{as}[$i] || "select_$i") );
- }
-
- push @$sub_select, $sel;
- }
-
- # bring over all non-collapse-induced order_by into the inner query (if any)
- # the outer one will have to keep them all
- delete $sub_attrs->{order_by};
- if (my $ord_cnt = @{$attrs->{order_by}} - @{$attrs->{_collapse_order_by}} ) {
- $sub_attrs->{order_by} = [
- @{$attrs->{order_by}}[ 0 .. $ord_cnt - 1]
- ];
- }
-
- # mangle {from}
- my $join_root = shift @$from;
- my @outer_from = @$from;
-
- my %inner_joins;
- my %join_info = map { $_->[0]{-alias} => $_->[0] } (@$from);
-
- # in complex search_related chains $alias may *not* be 'me'
- # so always include it in the inner join, and also shift away
- # from the outer stack, so that the two datasets actually do
- # meet
- if ($join_root->{-alias} ne $alias) {
- $inner_joins{$alias} = 1;
-
- while (@outer_from && $outer_from[0][0]{-alias} ne $alias) {
- shift @outer_from;
- }
- if (! @outer_from) {
- $self->throw_exception ("Unable to find '$alias' in the {from} stack, something is wrong");
- }
-
- shift @outer_from; # the new subquery will represent this alias, so get rid of it
- }
-
-
- # decide which parts of the join will remain on the inside
- #
- # this is not a very viable optimisation, but it was written
- # before I realised this, so might as well remain. We can throw
- # away _any_ branches of the join tree that are:
- # 1) not mentioned in the condition/order
- # 2) left-join leaves (or left-join leaf chains)
- # Most of the join conditions will not satisfy this, but for real
- # complex queries some might, and we might make some RDBMS happy.
- #
- #
- # since we do not have introspectable SQLA, we fall back to ugly
- # scanning of raw SQL for WHERE, and for pieces of ORDER BY
- # in order to determine what goes into %inner_joins
- # It may not be very efficient, but it's a reasonable stop-gap
- {
- # produce stuff unquoted, so it can be scanned
- local $sql_maker->{quote_char};
- my $sep = $self->_sql_maker_opts->{name_sep} || '.';
- $sep = "\Q$sep\E";
-
- my @order_by = (map
- { ref $_ ? $_->[0] : $_ }
- $sql_maker->_order_by_chunks ($sub_attrs->{order_by})
- );
-
- my $where_sql = $sql_maker->where ($where);
- my $select_sql = $sql_maker->_recurse_fields ($sub_select);
-
- # sort needed joins
- for my $alias (keys %join_info) {
-
- # any table alias found on a column name in where or order_by
- # gets included in %inner_joins
- # Also any parent joins that are needed to reach this particular alias
- for my $piece ($select_sql, $where_sql, @order_by ) {
- if ($piece =~ /\b $alias $sep/x) {
- $inner_joins{$alias} = 1;
- }
- }
- }
- }
-
- # scan for non-leaf/non-left joins and mark as needed
- # also mark all ancestor joins that are needed to reach this particular alias
- # (e.g. join => { cds => 'tracks' } - tracks will bring cds too )
- #
- # traverse by the size of the -join_path i.e. reverse depth first
- for my $alias (sort { @{$join_info{$b}{-join_path}} <=> @{$join_info{$a}{-join_path}} } (keys %join_info) ) {
-
- my $j = $join_info{$alias};
- $inner_joins{$alias} = 1 if (! $j->{-join_type} || ($j->{-join_type} !~ /^left$/i) );
-
- if ($inner_joins{$alias}) {
- $inner_joins{$_} = 1 for (@{$j->{-join_path}});
- }
- }
-
- # construct the inner $from for the subquery
- my $inner_from = [ $join_root ];
- for my $j (@$from) {
- push @$inner_from, $j if $inner_joins{$j->[0]{-alias}};
- }
-
- # if a multi-type join was needed in the subquery ("multi" is indicated by
- # presence in {collapse}) - add a group_by to simulate the collapse in the subq
- for my $alias (keys %inner_joins) {
-
- # the dot comes from some weirdness in collapse
- # remove after the rewrite
- if ($attrs->{collapse}{".$alias"}) {
- $sub_attrs->{group_by} ||= $sub_select;
- last;
- }
- }
-
- # generate the subquery
- my $subq = $self->_select_args_to_query (
- $inner_from,
- $sub_select,
- $where,
- $sub_attrs
- );
-
- # put it in the new {from}
- unshift @outer_from, {
- -alias => $alias,
- -source_handle => $join_root->{-source_handle},
- $alias => $subq,
- };
-
- # This is totally horrific - the $where ends up in both the inner and outer query
- # Unfortunately not much can be done until SQLA2 introspection arrives, and even
- # then if where conditions apply to the *right* side of the prefetch, you may have
- # to both filter the inner select (e.g. to apply a limit) and then have to re-filter
- # the outer select to exclude joins you didin't want in the first place
- #
- # OTOH it can be seen as a plus: <ash> (notes that this query would make a DBA cry ;)
- return (\@outer_from, $select, $where, $attrs);
-}
-
-sub _resolve_ident_sources {
- my ($self, $ident) = @_;
-
- my $alias2source = {};
- my $rs_alias;
-
- # the reason this is so contrived is that $ident may be a {from}
- # structure, specifying multiple tables to join
- if ( Scalar::Util::blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) {
- # this is compat mode for insert/update/delete which do not deal with aliases
- $alias2source->{me} = $ident;
- $rs_alias = 'me';
- }
- elsif (ref $ident eq 'ARRAY') {
-
- for (@$ident) {
- my $tabinfo;
- if (ref $_ eq 'HASH') {
- $tabinfo = $_;
- $rs_alias = $tabinfo->{-alias};
- }
- if (ref $_ eq 'ARRAY' and ref $_->[0] eq 'HASH') {
- $tabinfo = $_->[0];
- }
-
- $alias2source->{$tabinfo->{-alias}} = $tabinfo->{-source_handle}->resolve
- if ($tabinfo->{-source_handle});
- }
- }
-
- return ($alias2source, $rs_alias);
-}
-
-# Takes $ident, \@column_names
-#
-# returns { $column_name => \%column_info, ... }
-# also note: this adds -result_source => $rsrc to the column info
-#
-# usage:
-# my $col_sources = $self->_resolve_column_info($ident, [map $_->[0], @{$bind}]);
-sub _resolve_column_info {
- my ($self, $ident, $colnames) = @_;
- my ($alias2src, $root_alias) = $self->_resolve_ident_sources($ident);
-
- my $sep = $self->_sql_maker_opts->{name_sep} || '.';
- $sep = "\Q$sep\E";
-
- my (%return, %converted);
-
- if (not $colnames) {
- $colnames = [ map {
- my $alias = $_;
- my $source = $alias2src->{$alias};
- map "${alias}${sep}$_", $source->columns
- } keys %$alias2src ];
-
-# also add unqualified columns for 'me' table
- push @$colnames, $alias2src->{$root_alias}->columns;
- }
-
- foreach my $col (@$colnames) {
- my ($alias, $colname) = $col =~ m/^ (?: ([^$sep]+) $sep)? (.+) $/x;
-
- # deal with unqualified cols - we assume the main alias for all
- # unqualified ones, ugly but can't think of anything better right now
- $alias ||= $root_alias;
-
- my $rsrc = $alias2src->{$alias};
- $return{$col} = $rsrc && { %{$rsrc->column_info($colname)}, -result_source => $rsrc };
- }
- return \%return;
-}
-
# Returns a counting SELECT for a simple count
# query. Abstracted so that a storage could override
# this to { count => 'firstcol' } or whatever makes
return @pcols ? \@pcols : [ 1 ];
}
-
sub source_bind_attributes {
my ($self, $source) = @_;
sub sth {
my ($self, $sql) = @_;
- $self->dbh_do('_dbh_sth', $sql);
+ $self->dbh_do('_dbh_sth', $sql); # retry over disconnects
}
sub _dbh_columns_info_for {
sub columns_info_for {
my ($self, $table) = @_;
- $self->dbh_do('_dbh_columns_info_for', $table);
+ $self->_dbh_columns_info_for ($self->_get_dbh, $table);
}
=head2 last_insert_id
sub last_insert_id {
my $self = shift;
- $self->dbh_do('_dbh_last_insert_id', @_);
+ $self->_dbh_last_insert_id ($self->_dbh, @_);
+}
+
+=head2 _native_data_type
+
+=over 4
+
+=item Arguments: $type_name
+
+=back
+
+This API is B<EXPERIMENTAL>, will almost definitely change in the future, and
+currently only used by L<::AutoCast|DBIx::Class::Storage::DBI::AutoCast> and
+L<::Sybase::ASE|DBIx::Class::Storage::DBI::Sybase::ASE>.
+
+The default implementation returns C<undef>, implement in your Storage driver if
+you need this functionality.
+
+Should map types from other databases to the native RDBMS type, for example
+C<VARCHAR2> to C<VARCHAR>.
+
+Types with modifiers should map to the underlying data type. For example,
+C<INTEGER AUTO_INCREMENT> should become C<INTEGER>.
+
+Composite types should map to the container type, for example
+C<ENUM(foo,bar,baz)> becomes C<ENUM>.
+
+=cut
+
+sub _native_data_type {
+ #my ($self, $data_type) = @_;
+ return undef
+}
+
+# Check if placeholders are supported at all
+sub _placeholders_supported {
+ my $self = shift;
+ my $dbh = $self->_get_dbh;
+
+ # some drivers provide a $dbh attribute (e.g. Sybase and $dbh->{syb_dynamic_supported})
+ # but it is inaccurate more often than not
+ eval {
+ local $dbh->{PrintError} = 0;
+ local $dbh->{RaiseError} = 1;
+ $dbh->do('select ?', {}, 1);
+ };
+ return $@ ? 0 : 1;
+}
+
+# Check if placeholders bound to non-string types throw exceptions
+#
+sub _typeless_placeholders_supported {
+ my $self = shift;
+ my $dbh = $self->_get_dbh;
+
+ eval {
+ local $dbh->{PrintError} = 0;
+ local $dbh->{RaiseError} = 1;
+ # this specifically tests a bind that is NOT a string
+ $dbh->do('select 1 where 1 = ?', {}, 1);
+ };
+ return $@ ? 0 : 1;
}
=head2 sqlt_type
=cut
-sub sqlt_type { shift->dbh->{Driver}->{Name} }
+sub sqlt_type {
+ shift->_get_dbh->{Driver}->{Name};
+}
=head2 bind_attribute_by_data_type
}
-=head2 create_ddl_dir (EXPERIMENTAL)
+=head2 create_ddl_dir
=over 4
{ ignore_constraint_names => 0, # ... other options }
-Note that this feature is currently EXPERIMENTAL and may not work correctly
-across all databases, or fully handle complex relationships.
-
-WARNING: Please check all SQL files created, before applying them.
+WARNING: You are strongly advised to check all SQL files created, before applying
+them.
=cut
%{$sqltargs || {}}
};
- $self->throw_exception(q{Can't create a ddl file without SQL::Translator 0.09003: '}
- . $self->_check_sqlt_message . q{'})
- if !$self->_check_sqlt_version;
+ $self->throw_exception("Can't create a ddl file without SQL::Translator: " . $self->_sqlt_version_error)
+ if !$self->_sqlt_version_ok;
my $sqlt = SQL::Translator->new( $sqltargs );
sub deployment_statements {
my ($self, $schema, $type, $version, $dir, $sqltargs) = @_;
- # Need to be connected to get the correct sqlt_type
- $self->ensure_connected() unless $type;
$type ||= $self->sqlt_type;
$version ||= $schema->schema_version || '1.x';
$dir ||= './';
return join('', @rows);
}
- $self->throw_exception(q{Can't deploy without SQL::Translator 0.09003: '}
- . $self->_check_sqlt_message . q{'})
- if !$self->_check_sqlt_version;
-
- require SQL::Translator::Parser::DBIx::Class;
- eval qq{use SQL::Translator::Producer::${type}};
- $self->throw_exception($@) if $@;
+ $self->throw_exception("Can't deploy without either SQL::Translator or a ddl_dir: " . $self->_sqlt_version_error )
+ if !$self->_sqlt_version_ok;
# sources needs to be a parser arg, but for simplicty allow at top level
# coming in
$sqltargs->{parser_args}{sources} = delete $sqltargs->{sources}
if exists $sqltargs->{sources};
- my $tr = SQL::Translator->new(%$sqltargs);
- SQL::Translator::Parser::DBIx::Class::parse( $tr, $schema );
- return "SQL::Translator::Producer::${type}"->can('produce')->($tr);
+ my $tr = SQL::Translator->new(
+ producer => "SQL::Translator::Producer::${type}",
+ %$sqltargs,
+ parser => 'SQL::Translator::Parser::DBIx::Class',
+ data => $schema,
+ );
+
+ my @ret;
+ my $wa = wantarray;
+ if ($wa) {
+ @ret = $tr->translate;
+ }
+ else {
+ $ret[0] = $tr->translate;
+ }
+
+ $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error)
+ unless (@ret && defined $ret[0]);
+
+ return $wa ? @ret : $ret[0];
}
sub deploy {
return if $line =~ /^\s+$/; # skip whitespace only
$self->_query_start($line);
eval {
- $self->dbh->do($line); # shouldn't be using ->dbh ?
+ # do a dbh_do cycle here, as we need some error checking in
+ # place (even though we will ignore errors)
+ $self->dbh_do (sub { $_[1]->do($line) });
};
if ($@) {
carp qq{$@ (running "${line}")};
sub datetime_parser {
my $self = shift;
return $self->{datetime_parser} ||= do {
- $self->ensure_connected;
$self->build_datetime_parser(@_);
};
}
sub build_datetime_parser {
my $self = shift;
my $type = $self->datetime_parser_type(@_);
- eval "use ${type}";
- $self->throw_exception("Couldn't load ${type}: $@") if $@;
+ $self->ensure_class_loaded ($type);
return $type;
}
-{
- my $_check_sqlt_version; # private
- my $_check_sqlt_message; # private
- sub _check_sqlt_version {
- return $_check_sqlt_version if defined $_check_sqlt_version;
- eval 'use SQL::Translator "0.09003"';
- $_check_sqlt_message = $@ || '';
- $_check_sqlt_version = !$@;
- }
-
- sub _check_sqlt_message {
- _check_sqlt_version if !defined $_check_sqlt_message;
- $_check_sqlt_message;
- }
-}
=head2 is_replicating
return;
}
+# SQLT version handling
+{
+ my $_sqlt_version_ok; # private
+ my $_sqlt_version_error; # private
+
+ sub _sqlt_version_ok {
+ if (!defined $_sqlt_version_ok) {
+ eval "use SQL::Translator $minimum_sqlt_version";
+ if ($@) {
+ $_sqlt_version_ok = 0;
+ $_sqlt_version_error = $@;
+ }
+ else {
+ $_sqlt_version_ok = 1;
+ }
+ }
+ return $_sqlt_version_ok;
+ }
+
+ sub _sqlt_version_error {
+ shift->_sqlt_version_ok unless defined $_sqlt_version_ok;
+ return $_sqlt_version_error;
+ }
+
+ sub _sqlt_minimum_version { $minimum_sqlt_version };
+}
+
+=head2 relname_to_table_alias
+
+=over 4
+
+=item Arguments: $relname, $join_count
+
+=back
+
+L<DBIx::Class> uses L<DBIx::Class::Relationship> names as table aliases in
+queries.
+
+This hook is to allow specific L<DBIx::Class::Storage> drivers to change the
+way these aliases are named.
+
+The default behavior is C<"$relname_$join_count" if $join_count > 1>, otherwise
+C<"$relname">.
+
+=cut
+
+sub relname_to_table_alias {
+ my ($self, $relname, $join_count) = @_;
+
+ my $alias = ($join_count && $join_count > 1 ?
+ join('_', $relname, $join_count) : $relname);
+
+ return $alias;
+}
+
sub DESTROY {
my $self = shift;
- return if !$self->_dbh;
- $self->_verify_pid;
+
+ $self->_verify_pid if $self->_dbh;
+
+ # some databases need this to stop spewing warnings
+ if (my $dbh = $self->_dbh) {
+ local $@;
+ eval { $dbh->disconnect };
+ }
+
$self->_dbh(undef);
}
DBIx::Class can do some wonderful magic with handling exceptions,
disconnections, and transactions when you use C<< AutoCommit => 1 >>
-combined with C<txn_do> for transaction support.
+(the default) combined with C<txn_do> for transaction support.
If you set C<< AutoCommit => 0 >> in your connect info, then you are always
in an assumed transaction between commits, and you're telling us you'd
be with raw DBI.
-
=head1 AUTHORS
Matt S. Trout <mst@shadowcatsystems.co.uk>