use warnings;
use Carp::Clan qw/^DBIx::Class/;
use DBI;
-use SQL::Abstract::Limit;
+use DBIx::Class::SQLAHacks;
use DBIx::Class::Storage::DBI::Cursor;
use DBIx::Class::Storage::Statistics;
use Scalar::Util qw/blessed weaken/;
__PACKAGE__->mk_group_accessors('simple' =>
qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts
- _conn_pid _conn_tid disable_sth_caching on_connect_do
- on_disconnect_do transaction_depth unsafe _dbh_autocommit
- auto_savepoint savepoints/
+ _conn_pid _conn_tid transaction_depth _dbh_autocommit savepoints/
);
-__PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor');
-
-__PACKAGE__->mk_group_accessors('inherited' => qw/sql_maker_class/);
-__PACKAGE__->sql_maker_class('DBIC::SQL::Abstract');
-
-BEGIN {
-
-package # Hide from PAUSE
- DBIC::SQL::Abstract; # Would merge upstream, but nate doesn't reply :(
-
-use base qw/SQL::Abstract::Limit/;
-
-# This prevents the caching of $dbh in S::A::L, I believe
-sub new {
- my $self = shift->SUPER::new(@_);
-
- # If limit_dialect is a ref (like a $dbh), go ahead and replace
- # it with what it resolves to:
- $self->{limit_dialect} = $self->_find_syntax($self->{limit_dialect})
- if ref $self->{limit_dialect};
-
- $self;
-}
+# the values for these accessors are picked out (and deleted) from
+# the attribute hashref passed to connect_info
+my @storage_options = qw/
+ on_connect_do on_disconnect_do disable_sth_caching unsafe auto_savepoint
+/;
+__PACKAGE__->mk_group_accessors('simple' => @storage_options);
-sub _RowNumberOver {
- my ($self, $sql, $order, $rows, $offset ) = @_;
- $offset += 1;
- my $last = $rows + $offset;
- my ( $order_by ) = $self->_order_by( $order );
-
- $sql = <<"";
-SELECT * FROM
-(
- SELECT Q1.*, ROW_NUMBER() OVER( ) AS ROW_NUM FROM (
- $sql
- $order_by
- ) Q1
-) Q2
-WHERE ROW_NUM BETWEEN $offset AND $last
-
- return $sql;
-}
+# default cursor class, overridable in connect_info attributes
+__PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor');
+__PACKAGE__->mk_group_accessors('inherited' => qw/sql_maker_class/);
+__PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks');
-# While we're at it, this should make LIMIT queries more efficient,
-# without digging into things too deeply
-use Scalar::Util 'blessed';
-sub _find_syntax {
- my ($self, $syntax) = @_;
- my $dbhname = blessed($syntax) ? $syntax->{Driver}{Name} : $syntax;
- if(ref($self) && $dbhname && $dbhname eq 'DB2') {
- return 'RowNumberOver';
- }
- $self->{_cached_syntax} ||= $self->SUPER::_find_syntax($syntax);
-}
+=head1 NAME
-sub select {
- my ($self, $table, $fields, $where, $order, @rest) = @_;
- $table = $self->_quote($table) unless ref($table);
- local $self->{rownum_hack_count} = 1
- if (defined $rest[0] && $self->{limit_dialect} eq 'RowNum');
- @rest = (-1) unless defined $rest[0];
- die "LIMIT 0 Does Not Compute" if $rest[0] == 0;
- # and anyway, SQL::Abstract::Limit will cause a barf if we don't first
- local $self->{having_bind} = [];
- my ($sql, @ret) = $self->SUPER::select(
- $table, $self->_recurse_fields($fields), $where, $order, @rest
- );
- $sql .=
- $self->{for} ?
- (
- $self->{for} eq 'update' ? ' FOR UPDATE' :
- $self->{for} eq 'shared' ? ' FOR SHARE' :
- ''
- ) :
- ''
- ;
- return wantarray ? ($sql, @ret, @{$self->{having_bind}}) : $sql;
-}
+DBIx::Class::Storage::DBI - DBI storage handler
-sub insert {
- my $self = shift;
- my $table = shift;
- $table = $self->_quote($table) unless ref($table);
- $self->SUPER::insert($table, @_);
-}
+=head1 SYNOPSIS
-sub update {
- my $self = shift;
- my $table = shift;
- $table = $self->_quote($table) unless ref($table);
- $self->SUPER::update($table, @_);
-}
+ my $schema = MySchema->connect('dbi:SQLite:my.db');
-sub delete {
- my $self = shift;
- my $table = shift;
- $table = $self->_quote($table) unless ref($table);
- $self->SUPER::delete($table, @_);
-}
+ $schema->storage->debug(1);
+ $schema->dbh_do("DROP TABLE authors");
-sub _emulate_limit {
- my $self = shift;
- if ($_[3] == -1) {
- return $_[1].$self->_order_by($_[2]);
- } else {
- return $self->SUPER::_emulate_limit(@_);
- }
-}
+ $schema->resultset('Book')->search({
+ written_on => $schema->storage->datetime_parser(DateTime->now)
+ });
-sub _recurse_fields {
- my ($self, $fields, $params) = @_;
- my $ref = ref $fields;
- return $self->_quote($fields) unless $ref;
- return $$fields if $ref eq 'SCALAR';
-
- if ($ref eq 'ARRAY') {
- return join(', ', map {
- $self->_recurse_fields($_)
- .(exists $self->{rownum_hack_count} && !($params && $params->{no_rownum_hack})
- ? ' AS col'.$self->{rownum_hack_count}++
- : '')
- } @$fields);
- } elsif ($ref eq 'HASH') {
- foreach my $func (keys %$fields) {
- return $self->_sqlcase($func)
- .'( '.$self->_recurse_fields($fields->{$func}).' )';
- }
- }
-}
+=head1 DESCRIPTION
-sub _order_by {
- my $self = shift;
- my $ret = '';
- my @extra;
- if (ref $_[0] eq 'HASH') {
- if (defined $_[0]->{group_by}) {
- $ret = $self->_sqlcase(' group by ')
- .$self->_recurse_fields($_[0]->{group_by}, { no_rownum_hack => 1 });
- }
- if (defined $_[0]->{having}) {
- my $frag;
- ($frag, @extra) = $self->_recurse_where($_[0]->{having});
- push(@{$self->{having_bind}}, @extra);
- $ret .= $self->_sqlcase(' having ').$frag;
- }
- if (defined $_[0]->{order_by}) {
- $ret .= $self->_order_by($_[0]->{order_by});
- }
- } elsif (ref $_[0] eq 'SCALAR') {
- $ret = $self->_sqlcase(' order by ').${ $_[0] };
- } elsif (ref $_[0] eq 'ARRAY' && @{$_[0]}) {
- my @order = @{+shift};
- $ret = $self->_sqlcase(' order by ')
- .join(', ', map {
- my $r = $self->_order_by($_, @_);
- $r =~ s/^ ?ORDER BY //i;
- $r;
- } @order);
- } else {
- $ret = $self->SUPER::_order_by(@_);
- }
- return $ret;
-}
+This class represents the connection to an RDBMS via L<DBI>. See
+L<DBIx::Class::Storage> for general information. This pod only
+documents DBI-specific methods and behaviors.
-sub _order_directions {
- my ($self, $order) = @_;
- $order = $order->{order_by} if ref $order eq 'HASH';
- return $self->SUPER::_order_directions($order);
-}
+=head1 METHODS
-sub _table {
- my ($self, $from) = @_;
- if (ref $from eq 'ARRAY') {
- return $self->_recurse_from(@$from);
- } elsif (ref $from eq 'HASH') {
- return $self->_make_as($from);
- } else {
- return $from; # would love to quote here but _table ends up getting called
- # twice during an ->select without a limit clause due to
- # the way S::A::Limit->select works. should maybe consider
- # bypassing this and doing S::A::select($self, ...) in
- # our select method above. meantime, quoting shims have
- # been added to select/insert/update/delete here
- }
-}
+=cut
-sub _recurse_from {
- my ($self, $from, @join) = @_;
- my @sqlf;
- push(@sqlf, $self->_make_as($from));
- foreach my $j (@join) {
- my ($to, $on) = @$j;
-
- # check whether a join type exists
- my $join_clause = '';
- my $to_jt = ref($to) eq 'ARRAY' ? $to->[0] : $to;
- if (ref($to_jt) eq 'HASH' and exists($to_jt->{-join_type})) {
- $join_clause = ' '.uc($to_jt->{-join_type}).' JOIN ';
- } else {
- $join_clause = ' JOIN ';
- }
- push(@sqlf, $join_clause);
+sub new {
+ my $new = shift->next::method(@_);
- if (ref $to eq 'ARRAY') {
- push(@sqlf, '(', $self->_recurse_from(@$to), ')');
- } else {
- push(@sqlf, $self->_make_as($to));
- }
- push(@sqlf, ' ON ', $self->_join_condition($on));
- }
- return join('', @sqlf);
-}
+ $new->transaction_depth(0);
+ $new->_sql_maker_opts({});
+ $new->{savepoints} = [];
+ $new->{_in_dbh_do} = 0;
+ $new->{_dbh_gen} = 0;
-sub _make_as {
- my ($self, $from) = @_;
- return join(' ', map { (ref $_ eq 'SCALAR' ? $$_ : $self->_quote($_)) }
- reverse each %{$self->_skip_options($from)});
+ $new;
}
-sub _skip_options {
- my ($self, $hash) = @_;
- my $clean_hash = {};
- $clean_hash->{$_} = $hash->{$_}
- for grep {!/^-/} keys %$hash;
- return $clean_hash;
-}
+=head2 connect_info
-sub _join_condition {
- my ($self, $cond) = @_;
- if (ref $cond eq 'HASH') {
- my %j;
- for (keys %$cond) {
- my $v = $cond->{$_};
- if (ref $v) {
- # XXX no throw_exception() in this package and croak() fails with strange results
- Carp::croak(ref($v) . qq{ reference arguments are not supported in JOINS - try using \"..." instead'})
- if ref($v) ne 'SCALAR';
- $j{$_} = $v;
- }
- else {
- my $x = '= '.$self->_quote($v); $j{$_} = \$x;
- }
- };
- return scalar($self->_recurse_where(\%j));
- } elsif (ref $cond eq 'ARRAY') {
- return join(' OR ', map { $self->_join_condition($_) } @$cond);
- } else {
- die "Can't handle this yet!";
- }
-}
+This method is normally called by L<DBIx::Class::Schema/connection>, which
+encapsulates its argument list in an arrayref before passing them here.
-sub _quote {
- my ($self, $label) = @_;
- return '' unless defined $label;
- return "*" if $label eq '*';
- return $label unless $self->{quote_char};
- if(ref $self->{quote_char} eq "ARRAY"){
- return $self->{quote_char}->[0] . $label . $self->{quote_char}->[1]
- if !defined $self->{name_sep};
- my $sep = $self->{name_sep};
- return join($self->{name_sep},
- map { $self->{quote_char}->[0] . $_ . $self->{quote_char}->[1] }
- split(/\Q$sep\E/,$label));
- }
- return $self->SUPER::_quote($label);
-}
+The argument list may contain:
-sub limit_dialect {
- my $self = shift;
- $self->{limit_dialect} = shift if @_;
- return $self->{limit_dialect};
-}
+=over
-sub quote_char {
- my $self = shift;
- $self->{quote_char} = shift if @_;
- return $self->{quote_char};
-}
+=item *
-sub name_sep {
- my $self = shift;
- $self->{name_sep} = shift if @_;
- return $self->{name_sep};
-}
+The same 4-element argument set one would normally pass to
+L<DBI/connect>, optionally followed by
+L<extra attributes|/DBIx::Class specific connection attributes>
+recognized by DBIx::Class:
-} # End of BEGIN block
+ $connect_info_args = [ $dsn, $user, $password, \%dbi_attributes?, \%extra_attributes? ];
-=head1 NAME
+=item *
-DBIx::Class::Storage::DBI - DBI storage handler
+A single code reference which returns a connected
+L<DBI database handle|DBI/connect> optionally followed by
+L<extra attributes|/DBIx::Class specific connection attributes> recognized
+by DBIx::Class:
-=head1 SYNOPSIS
+ $connect_info_args = [ sub { DBI->connect (...) }, \%extra_attributes? ];
-=head1 DESCRIPTION
+=item *
-This class represents the connection to an RDBMS via L<DBI>. See
-L<DBIx::Class::Storage> for general information. This pod only
-documents DBI-specific methods and behaviors.
+A single hashref with all the attributes and the dsn/user/password
+mixed together:
-=head1 METHODS
+ $connect_info_args = [{
+ dsn => $dsn,
+ user => $user,
+ password => $pass,
+ %dbi_attributes,
+ %extra_attributes,
+ }];
-=cut
+This is particularly useful for L<Catalyst> based applications, allowing the
+following config (L<Config::General> style):
-sub new {
- my $new = shift->next::method(@_);
+ <Model::DB>
+ schema_class App::DB
+ <connect_info>
+ dsn dbi:mysql:database=test
+ user testuser
+ password TestPass
+ AutoCommit 1
+ </connect_info>
+ </Model::DB>
- $new->transaction_depth(0);
- $new->_sql_maker_opts({});
- $new->{savepoints} = [];
- $new->{_in_dbh_do} = 0;
- $new->{_dbh_gen} = 0;
-
- $new;
-}
+=back
-=head2 connect_info
+Please note that the L<DBI> docs recommend that you always explicitly
+set C<AutoCommit> to either I<0> or I<1>. L<DBIx::Class> further
+recommends that it be set to I<1>, and that you perform transactions
+via our L<DBIx::Class::Schema/txn_do> method. L<DBIx::Class> will set it
+to I<1> if you do not do explicitly set it to zero. This is the default
+for most DBDs. See L</DBIx::Class and AutoCommit> for details.
-The arguments of C<connect_info> are always a single array reference.
+=head3 DBIx::Class specific connection attributes
-This is normally accessed via L<DBIx::Class::Schema/connection>, which
-encapsulates its argument list in an arrayref before calling
-C<connect_info> here.
+In addition to the standard L<DBI|DBI/ATTRIBUTES_COMMON_TO_ALL_HANDLES>
+L<connection|DBI/Database_Handle_Attributes> attributes, DBIx::Class recognizes
+the following connection options. These options can be mixed in with your other
+L<DBI> connection attributes, or placed in a seperate hashref
+(C<\%extra_attributes>) as shown above.
-The arrayref can either contain the same set of arguments one would
-normally pass to L<DBI/connect>, or a lone code reference which returns
-a connected database handle. Please note that the L<DBI> docs
-recommend that you always explicitly set C<AutoCommit> to either
-C<0> or C<1>. L<DBIx::Class> further recommends that it be set
-to C<1>, and that you perform transactions via our L</txn_do>
-method. L<DBIx::Class> will set it to C<1> if you do not do explicitly
-set it to zero. This is the default for most DBDs. See below for more
-details.
+Every time C<connect_info> is invoked, any previous settings for
+these options will be cleared before setting the new ones, regardless of
+whether any options are specified in the new C<connect_info>.
-In either case, if the final argument in your connect_info happens
-to be a hashref, C<connect_info> will look there for several
-connection-specific options:
-=over 4
+=over
=item on_connect_do
=over
+=item a scalar
+
+This contains one SQL statement to execute.
+
=item an array reference
This contains SQL statements to execute in order. Each element contains
=item on_disconnect_do
-Takes arguments in the same form as L<on_connect_do> and executes them
+Takes arguments in the same form as L</on_connect_do> and executes them
immediately before disconnecting from the database.
-Note, this only runs if you explicitly call L<disconnect> on the
+Note, this only runs if you explicitly call L</disconnect> on the
storage object.
=item disable_sth_caching
Sets the limit dialect. This is useful for JDBC-bridge among others
where the remote SQL-dialect cannot be determined by the name of the
-driver alone.
+driver alone. See also L<SQL::Abstract::Limit>.
=item quote_char
Specifies what characters to use to quote table and column names. If
-you use this you will want to specify L<name_sep> as well.
+you use this you will want to specify L</name_sep> as well.
-quote_char expects either a single character, in which case is it is placed
-on either side of the table/column, or an arrayref of length 2 in which case the
-table/column name is placed between the elements.
+C<quote_char> expects either a single character, in which case is it
+is placed on either side of the table/column name, or an arrayref of length
+2 in which case the table/column name is placed between the elements.
-For example under MySQL you'd use C<quote_char =E<gt> '`'>, and user SQL Server you'd
-use C<quote_char =E<gt> [qw/[ ]/]>.
+For example under MySQL you should use C<< quote_char => '`' >>, and for
+SQL Server you should use C<< quote_char => [qw/[ ]/] >>.
=item name_sep
-This only needs to be used in conjunction with L<quote_char>, and is used to
+This only needs to be used in conjunction with C<quote_char>, and is used to
specify the charecter that seperates elements (schemas, tables, columns) from
each other. In most cases this is simply a C<.>.
+The consequences of not supplying this value is that L<SQL::Abstract>
+will assume DBIx::Class' uses of aliases to be complete column
+names. The output will look like I<"me.name"> when it should actually
+be I<"me"."name">.
+
=item unsafe
This Storage driver normally installs its own C<HandleError>, sets
transactions, making it possible to recover from failure in the inner
transaction without having to abort all outer transactions.
-=back
-
-These options can be mixed in with your other L<DBI> connection attributes,
-or placed in a seperate hashref after all other normal L<DBI> connection
-arguments.
+=item cursor_class
-Every time C<connect_info> is invoked, any previous settings for
-these options will be cleared before setting the new ones, regardless of
-whether any options are specified in the new C<connect_info>.
+Use this argument to supply a cursor class other than the default
+L<DBIx::Class::Storage::DBI::Cursor>.
-Another Important Note:
-
-DBIC can do some wonderful magic with handling exceptions,
-disconnections, and transactions when you use C<< AutoCommit => 1 >>
-combined with C<txn_do> for transaction support.
-
-If you set C<< AutoCommit => 0 >> in your connect info, then you are always
-in an assumed transaction between commits, and you're telling us you'd
-like to manage that manually. A lot of DBIC's magic protections
-go away. We can't protect you from exceptions due to database
-disconnects because we don't know anything about how to restart your
-transactions. You're on your own for handling all sorts of exceptional
-cases if you choose the C<< AutoCommit => 0 >> path, just as you would
-be with raw DBI.
+=back
-Examples:
+Some real-life examples of arguments to L</connect_info> and
+L<DBIx::Class::Schema/connect>
# Simple SQLite connection
->connect_info([ 'dbi:SQLite:./foo.db' ]);
]
);
- # Subref + DBIC-specific connection options
+ # Same, but with hashref as argument
+ # See parse_connect_info for explanation
+ ->connect_info(
+ [{
+ dsn => 'dbi:Pg:dbname=foo',
+ user => 'postgres',
+ password => 'my_pg_password',
+ AutoCommit => 1,
+ quote_char => q{"},
+ name_sep => q{.},
+ }]
+ );
+
+ # Subref + DBIx::Class-specific connection options
->connect_info(
[
sub { DBI->connect(...) },
]
);
+
+
=cut
sub connect_info {
return $self->_connect_info if !$info_arg;
+ my @args = @$info_arg; # take a shallow copy for further mutilation
+ $self->_connect_info([@args]); # copy for _connect_info
+
+
+ # combine/pre-parse arguments depending on invocation style
+
+ my %attrs;
+ if (ref $args[0] eq 'CODE') { # coderef with optional \%extra_attributes
+ %attrs = %{ $args[1] || {} };
+ @args = $args[0];
+ }
+ elsif (ref $args[0] eq 'HASH') { # single hashref (i.e. Catalyst config)
+ %attrs = %{$args[0]};
+ @args = ();
+ for (qw/password user dsn/) {
+ unshift @args, delete $attrs{$_};
+ }
+ }
+ else { # otherwise assume dsn/user/password + \%attrs + \%extra_attrs
+ %attrs = (
+ % { $args[3] || {} },
+ % { $args[4] || {} },
+ );
+ @args = @args[0,1,2];
+ }
+
# Kill sql_maker/_sql_maker_opts, so we get a fresh one with only
# the new set of options
$self->_sql_maker(undef);
$self->_sql_maker_opts({});
- $self->_connect_info([@$info_arg]); # copy for _connect_info
- my $dbi_info = [@$info_arg]; # copy for _dbi_connect_info
-
- my $last_info = $dbi_info->[-1];
- if(ref $last_info eq 'HASH') {
- $last_info = { %$last_info }; # so delete is non-destructive
- my @storage_option = qw(
- on_connect_do on_disconnect_do disable_sth_caching unsafe cursor_class
- auto_savepoint
- );
- for my $storage_opt (@storage_option) {
- if(my $value = delete $last_info->{$storage_opt}) {
+ if(keys %attrs) {
+ for my $storage_opt (@storage_options, 'cursor_class') { # @storage_options is declared at the top of the module
+ if(my $value = delete $attrs{$storage_opt}) {
$self->$storage_opt($value);
}
}
for my $sql_maker_opt (qw/limit_dialect quote_char name_sep/) {
- if(my $opt_val = delete $last_info->{$sql_maker_opt}) {
+ if(my $opt_val = delete $attrs{$sql_maker_opt}) {
$self->_sql_maker_opts->{$sql_maker_opt} = $opt_val;
}
}
- # re-insert modified hashref
- $dbi_info->[-1] = $last_info;
-
- # Get rid of any trailing empty hashref
- pop(@$dbi_info) if !keys %$last_info;
}
- $self->_dbi_connect_info($dbi_info);
+ %attrs = () if (ref $args[0] eq 'CODE'); # _connect() never looks past $args[0] in this case
+
+ $self->_dbi_connect_info([@args, keys %attrs ? \%attrs : ()]);
$self->_connect_info;
}
=head2 on_connect_do
-This method is deprecated in favor of setting via L</connect_info>.
+This method is deprecated in favour of setting via L</connect_info>.
+
=head2 dbh_do
eval {
$self->_verify_pid if $dbh;
- if( !$dbh ) {
+ if(!$self->_dbh) {
$self->_populate_dbh;
$dbh = $self->_dbh;
}
}
}
+=head2 with_deferred_fk_checks
+
+=over 4
+
+=item Arguments: C<$coderef>
+
+=item Return Value: The return value of $coderef
+
+=back
+
+Storage specific method to run the code ref with FK checks deferred or
+in MySQL's case disabled entirely.
+
+=cut
+
+# Storage subclasses should override this
+sub with_deferred_fk_checks {
+ my ($self, $sub) = @_;
+
+ $sub->();
+}
+
sub connected {
my ($self) = @_;
sub _sql_maker_args {
my ($self) = @_;
- return ( bindtype=>'columns', limit_dialect => $self->dbh, %{$self->_sql_maker_opts} );
+ return ( bindtype=>'columns', array_datatypes => 1, limit_dialect => $self->dbh, %{$self->_sql_maker_opts} );
}
sub sql_maker {
my @info = @{$self->_dbi_connect_info || []};
$self->_dbh($self->_connect(@info));
+ $self->_conn_pid($$);
+ $self->_conn_tid(threads->tid) if $INC{'threads.pm'};
+
+ $self->_determine_driver;
+
# Always set the transaction depth on connect, since
# there is no transaction in progress by definition
$self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1;
- if(ref $self eq 'DBIx::Class::Storage::DBI') {
- my $driver = $self->_dbh->{Driver}->{Name};
+ my $connection_do = $self->on_connect_do;
+ $self->_do_connection_actions($connection_do) if $connection_do;
+}
+
+sub _determine_driver {
+ my ($self) = @_;
+
+ if (ref $self eq 'DBIx::Class::Storage::DBI') {
+ my $driver;
+
+ if ($self->_dbh) { # we are connected
+ $driver = $self->_dbh->{Driver}{Name};
+ } else {
+ # try to use dsn to not require being connected, the driver may still
+ # force a connection in _rebless to determine version
+ ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i;
+ }
+
if ($self->load_optional_class("DBIx::Class::Storage::DBI::${driver}")) {
bless $self, "DBIx::Class::Storage::DBI::${driver}";
$self->_rebless();
}
}
-
- my $connection_do = $self->on_connect_do;
- $self->_do_connection_actions($connection_do) if ref($connection_do);
-
- $self->_conn_pid($$);
- $self->_conn_tid(threads->tid) if $INC{'threads.pm'};
}
sub _do_connection_actions {
my $self = shift;
my $connection_do = shift;
- if (ref $connection_do eq 'ARRAY') {
+ if (!ref $connection_do) {
+ $self->_do_query($connection_do);
+ }
+ elsif (ref $connection_do eq 'ARRAY') {
$self->_do_query($_) foreach @$connection_do;
}
elsif (ref $connection_do eq 'CODE') {
- $connection_do->();
+ $connection_do->($self);
+ }
+ else {
+ $self->throw_exception (sprintf ("Don't know how to process conection actions of type '%s'", ref $connection_do) );
}
return $self;
$self->_do_query($_) foreach @$action;
}
else {
- my @to_run = (ref $action eq 'ARRAY') ? (@$action) : ($action);
- $self->_query_start(@to_run);
- $self->_dbh->do(@to_run);
- $self->_query_end(@to_run);
+ # Most debuggers expect ($sql, @bind), so we need to exclude
+ # the attribute hash which is the second argument to $dbh->do
+ # furthermore the bind values are usually to be presented
+ # as named arrayref pairs, so wrap those here too
+ my @do_args = (ref $action eq 'ARRAY') ? (@$action) : ($action);
+ my $sql = shift @do_args;
+ my $attrs = shift @do_args;
+ my @bind = map { [ undef, $_ ] } @do_args;
+
+ $self->_query_start($sql, @bind);
+ $self->_dbh->do($sql, $attrs, @do_args);
+ $self->_query_end($sql, @bind);
}
return $self;
my $weak_self = $self;
weaken($weak_self);
$dbh->{HandleError} = sub {
- $weak_self->throw_exception("DBI Exception: $_[0]")
+ if ($weak_self) {
+ $weak_self->throw_exception("DBI Exception: $_[0]");
+ }
+ else {
+ croak ("DBI Exception: $_[0]");
+ }
};
$dbh->{ShowErrorStatement} = 1;
$dbh->{RaiseError} = 1;
sub _prep_for_execute {
my ($self, $op, $extra_bind, $ident, $args) = @_;
+ if( blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) {
+ $ident = $ident->from();
+ }
+
my ($sql, @bind) = $self->sql_maker->$op($ident, @$args);
+
unshift(@bind,
map { ref $_ eq 'ARRAY' ? $_ : [ '!!dummy', $_ ] } @$extra_bind)
if $extra_bind;
-
return ($sql, \@bind);
}
sub _dbh_execute {
my ($self, $dbh, $op, $extra_bind, $ident, $bind_attributes, @args) = @_;
-
- if( blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) {
- $ident = $ident->from();
- }
my ($sql, $bind) = $self->_prep_for_execute($op, $extra_bind, $ident, \@args);
}
foreach my $data (@data) {
- $data = ref $data ? ''.$data : $data; # stringify args
+ my $ref = ref $data;
+ $data = $ref && $ref ne 'ARRAY' ? ''.$data : $data; # stringify args (except arrayrefs)
$sth->bind_param($placeholder_index, $data, $attributes);
$placeholder_index++;
my $ident = $source->from;
my $bind_attributes = $self->source_bind_attributes($source);
+ my $updated_cols = {};
+
+ $self->ensure_connected;
foreach my $col ( $source->columns ) {
if ( !defined $to_insert->{$col} ) {
my $col_info = $source->column_info($col);
if ( $col_info->{auto_nextval} ) {
- $self->ensure_connected;
- $to_insert->{$col} = $self->_sequence_fetch( 'nextval', $col_info->{sequence} || $self->_dbh_get_autoinc_seq($self->dbh, $source) );
+ $updated_cols->{$col} = $to_insert->{$col} = $self->_sequence_fetch( 'nextval', $col_info->{sequence} || $self->_dbh_get_autoinc_seq($self->dbh, $source) );
}
}
}
$self->_execute('insert' => [], $source, $bind_attributes, $to_insert);
- return $to_insert;
+ return $updated_cols;
}
## Still not quite perfect, and EXPERIMENTAL
# @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
## This must be an arrayref, else nothing works!
-
my $tuple_status = [];
-
- ##use Data::Dumper;
- ##print STDERR Dumper( $data, $sql, [@bind] );
-
- my $time = time();
## Get the bind_attributes, if any exist
my $bind_attributes = $self->source_bind_attributes($source);
return $self->_execute('delete' => [], $source, $bind_attrs, @_);
}
+# We were sent here because the $rs contains a complex search
+# which will require a subquery to select the correct rows
+# (i.e. joined or limited resultsets)
+#
+# Genarating a single PK column subquery is trivial and supported
+# by all RDBMS. However if we have a multicolumn PK, things get ugly.
+# Look at _multipk_update_delete()
+sub subq_update_delete {
+ my $self = shift;
+ my ($rs, $op, $values) = @_;
+
+ my $rsrc = $rs->result_source;
+
+ # we already check this, but double check naively just in case. Should be removed soon
+ my $sel = $rs->_resolved_attrs->{select};
+ $sel = [ $sel ] unless ref $sel eq 'ARRAY';
+ my @pcols = $rsrc->primary_columns;
+ if (@$sel != @pcols) {
+ $self->throw_exception (
+ 'Subquery update/delete can not be called on resultsets selecting a'
+ .' number of columns different than the number of primary keys'
+ );
+ }
+
+ if (@pcols == 1) {
+ return $self->$op (
+ $rsrc,
+ $op eq 'update' ? $values : (),
+ { $pcols[0] => { -in => $rs->as_query } },
+ );
+ }
+
+ else {
+ return $self->_multipk_update_delete (@_);
+ }
+}
+
+# ANSI SQL does not provide a reliable way to perform a multicol-PK
+# resultset update/delete involving subqueries. So by default resort
+# to simple (and inefficient) delete_all style per-row opearations,
+# while allowing specific storages to override this with a faster
+# implementation.
+#
+sub _multipk_update_delete {
+ return shift->_per_row_update_delete (@_);
+}
+
+# This is the default loop used to delete/update rows for multi PK
+# resultsets, and used by mysql exclusively (because it can't do anything
+# else).
+#
+# We do not use $row->$op style queries, because resultset update/delete
+# is not expected to cascade (this is what delete_all/update_all is for).
+#
+# There should be no race conditions as the entire operation is rolled
+# in a transaction.
+#
+sub _per_row_update_delete {
+ my $self = shift;
+ my ($rs, $op, $values) = @_;
+
+ my $rsrc = $rs->result_source;
+ my @pcols = $rsrc->primary_columns;
+
+ my $guard = $self->txn_scope_guard;
+
+ my $subrs_cur = $rs->cursor;
+ while (my @pks = $subrs_cur->next) {
+
+ my $cond;
+ for my $i (0.. $#pcols) {
+ $cond->{$pcols[$i]} = $pks[$i];
+ }
+
+ $self->$op (
+ $rsrc,
+ $op eq 'update' ? $values : (),
+ $cond,
+ );
+ }
+
+ $guard->commit;
+
+ return 1;
+}
+
sub _select {
+ my $self = shift;
+ my $sql_maker = $self->sql_maker;
+ local $sql_maker->{for};
+ return $self->_execute($self->_select_args(@_));
+}
+
+sub _select_args {
my ($self, $ident, $select, $condition, $attrs) = @_;
my $order = $attrs->{order_by};
- if (ref $condition eq 'SCALAR') {
- $order = $1 if $$condition =~ s/ORDER BY (.*)$//i;
- }
-
my $for = delete $attrs->{for};
my $sql_maker = $self->sql_maker;
- local $sql_maker->{for} = $for;
+ $sql_maker->{for} = $for;
if (exists $attrs->{group_by} || $attrs->{having}) {
$order = {
$attrs->{rows} = 2**48 if not defined $attrs->{rows} and defined $attrs->{offset};
push @args, $attrs->{rows}, $attrs->{offset};
}
-
- return $self->_execute(@args);
+ return @args;
}
sub source_bind_attributes {
my $self = shift;
my ($rv, $sth, @bind) = $self->_select(@_);
my @row = $sth->fetchrow_array;
- if(@row && $sth->fetchrow_array) {
+ my @nextrow = $sth->fetchrow_array if @row;
+ if(@row && @nextrow) {
carp "Query returned more than one row. SQL that returns multiple rows is DEPRECATED for ->find and ->single";
}
# Need to call finish() to work round broken DBDs
return @row;
}
-sub reload_row {
- my ($self, $row) = @_;
- delete $row->{_dirty_columns};
- return unless $row->in_storage; # Don't reload if we aren't real!
-
- my $reload = $row->result_source->resultset->find(
- map { $row->$_ } $row->primary_columns
- );
- unless ($reload) { # If we got deleted in the mean-time
- $row->in_storage(0);
- return $row;
- }
-
- $row = %$reload;
-
- # Avoid a possible infinite loop with
- # sub DESTROY { $_[0]->discard_changes }
- bless $reload, 'Do::Not::Exist';
-
- return $row;
-}
-
=head2 sth
=over 4
=cut
sub _dbh_last_insert_id {
- my ($self, $dbh, $source, $col) = @_;
- # XXX This is a SQLite-ism as a default... is there a DBI-generic way?
- $dbh->func('last_insert_rowid');
+ # All Storage's need to register their own _dbh_last_insert_id
+ # the old SQLite-based method was highly inappropriate
+
+ my $self = shift;
+ my $class = ref $self;
+ $self->throw_exception (<<EOE);
+
+No _dbh_last_insert_id() method found in $class.
+Since the method of obtaining the autoincrement id of the last insert
+operation varies greatly between different databases, this method must be
+individually implemented for every storage class.
+EOE
}
sub last_insert_id {
=head2 bind_attribute_by_data_type
-Given a datatype from column info, returns a database specific bind attribute for
-$dbh->bind_param($val,$attribute) or nothing if we will let the database planner
-just handle it.
+Given a datatype from column info, returns a database specific bind
+attribute for C<< $dbh->bind_param($val,$attribute) >> or nothing if we will
+let the database planner just handle it.
Generally only needed for special case column types, like bytea in postgres.
=over 4
-=item Arguments: $schema \@databases, $version, $directory, $preversion, $sqlt_args
+=item Arguments: $schema \@databases, $version, $directory, $preversion, \%sqlt_args
=back
Creates a SQL file based on the Schema, for each of the specified
database types, in the given directory.
+By default, C<\%sqlt_args> will have
+
+ { add_drop_table => 1, ignore_constraint_names => 1, ignore_index_names => 1 }
+
+merged with the hash passed in. To disable any of those features, pass in a
+hashref like the following
+
+ { ignore_constraint_names => 0, # ... other options }
+
=cut
-sub create_ddl_dir
-{
+sub create_ddl_dir {
my ($self, $schema, $databases, $version, $dir, $preversion, $sqltargs) = @_;
- if(!$dir || !-d $dir)
- {
- warn "No directory given, using ./\n";
+ if(!$dir || !-d $dir) {
+ carp "No directory given, using ./\n";
$dir = "./";
}
$databases ||= ['MySQL', 'SQLite', 'PostgreSQL'];
$databases = [ $databases ] if(ref($databases) ne 'ARRAY');
- $version ||= $schema->VERSION || '1.x';
- $sqltargs = { ( add_drop_table => 1 ), %{$sqltargs || {}} };
- $self->throw_exception(q{Can't create a ddl file without SQL::Translator 0.09: '}
+ my $schema_version = $schema->schema_version || '1.x';
+ $version ||= $schema_version;
+
+ $sqltargs = {
+ add_drop_table => 1,
+ ignore_constraint_names => 1,
+ ignore_index_names => 1,
+ %{$sqltargs || {}}
+ };
+
+ $self->throw_exception(q{Can't create a ddl file without SQL::Translator 0.09003: '}
. $self->_check_sqlt_message . q{'})
if !$self->_check_sqlt_version;
my $sqlt = SQL::Translator->new( $sqltargs );
$sqlt->parser('SQL::Translator::Parser::DBIx::Class');
- my $sqlt_schema = $sqlt->translate({ data => $schema }) or die $sqlt->error;
+ my $sqlt_schema = $sqlt->translate({ data => $schema })
+ or $self->throw_exception ($sqlt->error);
- foreach my $db (@$databases)
- {
+ foreach my $db (@$databases) {
$sqlt->reset();
- $sqlt = $self->configure_sqlt($sqlt, $db);
$sqlt->{schema} = $sqlt_schema;
$sqlt->producer($db);
my $file;
- my $filename = $schema->ddl_filename($db, $dir, $version);
- if(-e $filename)
- {
- warn("$filename already exists, skipping $db");
- next unless ($preversion);
- } else {
- my $output = $sqlt->translate;
- if(!$output)
- {
- warn("Failed to translate to $db, skipping. (" . $sqlt->error . ")");
- next;
- }
- if(!open($file, ">$filename"))
- {
- $self->throw_exception("Can't open $filename for writing ($!)");
- next;
- }
- print $file $output;
- close($file);
- }
- if($preversion)
- {
- require SQL::Translator::Diff;
+ my $filename = $schema->ddl_filename($db, $version, $dir);
+ if (-e $filename && ($version eq $schema_version )) {
+ # if we are dumping the current version, overwrite the DDL
+ carp "Overwriting existing DDL file - $filename";
+ unlink($filename);
+ }
- my $prefilename = $schema->ddl_filename($db, $dir, $preversion);
-# print "Previous version $prefilename\n";
- if(!-e $prefilename)
- {
- warn("No previous schema file found ($prefilename)");
- next;
- }
+ my $output = $sqlt->translate;
+ if(!$output) {
+ carp("Failed to translate to $db, skipping. (" . $sqlt->error . ")");
+ next;
+ }
+ if(!open($file, ">$filename")) {
+ $self->throw_exception("Can't open $filename for writing ($!)");
+ next;
+ }
+ print $file $output;
+ close($file);
+
+ next unless ($preversion);
- my $difffile = $schema->ddl_filename($db, $dir, $version, $preversion);
- print STDERR "Diff: $difffile: $db, $dir, $version, $preversion \n";
- if(-e $difffile)
- {
- warn("$difffile already exists, skipping");
- next;
- }
+ require SQL::Translator::Diff;
- my $source_schema;
- {
- my $t = SQL::Translator->new($sqltargs);
- $t->debug( 0 );
- $t->trace( 0 );
- $t->parser( $db ) or die $t->error;
- $t = $self->configure_sqlt($t, $db);
- my $out = $t->translate( $prefilename ) or die $t->error;
- $source_schema = $t->schema;
- unless ( $source_schema->name ) {
- $source_schema->name( $prefilename );
- }
- }
+ my $prefilename = $schema->ddl_filename($db, $preversion, $dir);
+ if(!-e $prefilename) {
+ carp("No previous schema file found ($prefilename)");
+ next;
+ }
- # The "new" style of producers have sane normalization and can support
- # diffing a SQL file against a DBIC->SQLT schema. Old style ones don't
- # And we have to diff parsed SQL against parsed SQL.
- my $dest_schema = $sqlt_schema;
-
- unless ( "SQL::Translator::Producer::$db"->can('preprocess_schema') ) {
- my $t = SQL::Translator->new($sqltargs);
- $t->debug( 0 );
- $t->trace( 0 );
- $t->parser( $db ) or die $t->error;
- $t = $self->configure_sqlt($t, $db);
- my $out = $t->translate( $filename ) or die $t->error;
- $dest_schema = $t->schema;
- $dest_schema->name( $filename )
- unless $dest_schema->name;
- }
+ my $difffile = $schema->ddl_filename($db, $version, $dir, $preversion);
+ if(-e $difffile) {
+ carp("Overwriting existing diff file - $difffile");
+ unlink($difffile);
+ }
+
+ my $source_schema;
+ {
+ my $t = SQL::Translator->new($sqltargs);
+ $t->debug( 0 );
+ $t->trace( 0 );
- $DB::single = 1;
- my $diff = SQL::Translator::Diff::schema_diff($source_schema, $db,
- $dest_schema, $db,
- $sqltargs
- );
- if(!open $file, ">$difffile")
- {
- $self->throw_exception("Can't write to $difffile ($!)");
- next;
- }
- print $file $diff;
- close($file);
+ $t->parser( $db )
+ or $self->throw_exception ($t->error);
+
+ my $out = $t->translate( $prefilename )
+ or $self->throw_exception ($t->error);
+
+ $source_schema = $t->schema;
+
+ $source_schema->name( $prefilename )
+ unless ( $source_schema->name );
}
- }
-}
-sub configure_sqlt() {
- my $self = shift;
- my $tr = shift;
- my $db = shift || $self->sqlt_type;
- if ($db eq 'PostgreSQL') {
- $tr->quote_table_names(0);
- $tr->quote_field_names(0);
+ # The "new" style of producers have sane normalization and can support
+ # diffing a SQL file against a DBIC->SQLT schema. Old style ones don't
+ # And we have to diff parsed SQL against parsed SQL.
+ my $dest_schema = $sqlt_schema;
+
+ unless ( "SQL::Translator::Producer::$db"->can('preprocess_schema') ) {
+ my $t = SQL::Translator->new($sqltargs);
+ $t->debug( 0 );
+ $t->trace( 0 );
+
+ $t->parser( $db )
+ or $self->throw_exception ($t->error);
+
+ my $out = $t->translate( $filename )
+ or $self->throw_exception ($t->error);
+
+ $dest_schema = $t->schema;
+
+ $dest_schema->name( $filename )
+ unless $dest_schema->name;
+ }
+
+ my $diff = SQL::Translator::Diff::schema_diff($source_schema, $db,
+ $dest_schema, $db,
+ $sqltargs
+ );
+ if(!open $file, ">$difffile") {
+ $self->throw_exception("Can't write to $difffile ($!)");
+ next;
+ }
+ print $file $diff;
+ close($file);
}
- return $tr;
}
=head2 deployment_statements
# Need to be connected to get the correct sqlt_type
$self->ensure_connected() unless $type;
$type ||= $self->sqlt_type;
- $version ||= $schema->VERSION || '1.x';
+ $version ||= $schema->schema_version || '1.x';
$dir ||= './';
- my $filename = $schema->ddl_filename($type, $dir, $version);
+ my $filename = $schema->ddl_filename($type, $version, $dir);
if(-f $filename)
{
my $file;
return join('', @rows);
}
- $self->throw_exception(q{Can't deploy without SQL::Translator 0.09: '}
+ $self->throw_exception(q{Can't deploy without SQL::Translator 0.09003: '}
. $self->_check_sqlt_message . q{'})
if !$self->_check_sqlt_version;
my $tr = SQL::Translator->new(%$sqltargs);
SQL::Translator::Parser::DBIx::Class::parse( $tr, $schema );
return "SQL::Translator::Producer::${type}"->can('produce')->($tr);
-
- return;
-
}
sub deploy {
my ($self, $schema, $type, $sqltargs, $dir) = @_;
- foreach my $statement ( $self->deployment_statements($schema, $type, undef, $dir, { no_comments => 1, %{ $sqltargs || {} } } ) ) {
- foreach my $line ( split(";\n", $statement)) {
- next if($line =~ /^--/);
- next if(!$line);
-# next if($line =~ /^DROP/m);
- next if($line =~ /^BEGIN TRANSACTION/m);
- next if($line =~ /^COMMIT/m);
- next if $line =~ /^\s+$/; # skip whitespace only
- $self->_query_start($line);
- eval {
- $self->dbh->do($line); # shouldn't be using ->dbh ?
- };
- if ($@) {
- warn qq{$@ (running "${line}")};
- }
- $self->_query_end($line);
+ my $deploy = sub {
+ my $line = shift;
+ return if($line =~ /^--/);
+ return if(!$line);
+ # next if($line =~ /^DROP/m);
+ return if($line =~ /^BEGIN TRANSACTION/m);
+ return if($line =~ /^COMMIT/m);
+ return if $line =~ /^\s+$/; # skip whitespace only
+ $self->_query_start($line);
+ eval {
+ $self->dbh->do($line); # shouldn't be using ->dbh ?
+ };
+ if ($@) {
+ carp qq{$@ (running "${line}")};
+ }
+ $self->_query_end($line);
+ };
+ my @statements = $self->deployment_statements($schema, $type, undef, $dir, { no_comments => 1, %{ $sqltargs || {} } } );
+ if (@statements > 1) {
+ foreach my $statement (@statements) {
+ $deploy->( $statement );
+ }
+ }
+ elsif (@statements == 1) {
+ foreach my $line ( split(";\n", $statements[0])) {
+ $deploy->( $line );
}
}
}
my $_check_sqlt_message; # private
sub _check_sqlt_version {
return $_check_sqlt_version if defined $_check_sqlt_version;
- eval 'use SQL::Translator "0.09"';
+ eval 'use SQL::Translator "0.09003"';
$_check_sqlt_message = $@ || '';
$_check_sqlt_version = !$@;
}
1;
-=head1 SQL METHODS
-
-The module defines a set of methods within the DBIC::SQL::Abstract
-namespace. These build on L<SQL::Abstract::Limit> to provide the
-SQL query functions.
+=head1 USAGE NOTES
-The following methods are extended:-
+=head2 DBIx::Class and AutoCommit
-=over 4
-
-=item delete
-
-=item insert
-
-=item select
-
-=item update
-
-=item limit_dialect
-
-See L</connect_info> for details.
-For setting, this method is deprecated in favor of L</connect_info>.
-
-=item quote_char
-
-See L</connect_info> for details.
-For setting, this method is deprecated in favor of L</connect_info>.
+DBIx::Class can do some wonderful magic with handling exceptions,
+disconnections, and transactions when you use C<< AutoCommit => 1 >>
+combined with C<txn_do> for transaction support.
-=item name_sep
+If you set C<< AutoCommit => 0 >> in your connect info, then you are always
+in an assumed transaction between commits, and you're telling us you'd
+like to manage that manually. A lot of the magic protections offered by
+this module will go away. We can't protect you from exceptions due to database
+disconnects because we don't know anything about how to restart your
+transactions. You're on your own for handling all sorts of exceptional
+cases if you choose the C<< AutoCommit => 0 >> path, just as you would
+be with raw DBI.
-See L</connect_info> for details.
-For setting, this method is deprecated in favor of L</connect_info>.
-=back
=head1 AUTHORS