1 package DBIx::Class::Storage::DBI;
2 # -*- mode: cperl; cperl-indent-level: 2 -*-
7 use base qw/DBIx::Class::Storage::DBIHacks DBIx::Class::Storage/;
10 use Carp::Clan qw/^DBIx::Class|^Try::Tiny/;
12 use DBIx::Class::Storage::DBI::Cursor;
13 use Scalar::Util qw/refaddr weaken reftype blessed/;
14 use List::Util qw/first/;
15 use Data::Dumper::Concise 'Dumper';
16 use Sub::Name 'subname';
18 use File::Path 'make_path';
23 # default cursor class, overridable in connect_info attributes
24 __PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor');
26 __PACKAGE__->mk_group_accessors('inherited' => qw/sql_maker_class sql_limit_dialect/);
27 __PACKAGE__->sql_maker_class('DBIx::Class::SQLMaker');
29 __PACKAGE__->mk_group_accessors('simple' => qw/
30 _connect_info _dbi_connect_info _dbic_connect_attributes _driver_determined
31 _dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts
32 transaction_depth _dbh_autocommit savepoints
35 # the values for these accessors are picked out (and deleted) from
36 # the attribute hashref passed to connect_info
37 my @storage_options = qw/
38 on_connect_call on_disconnect_call on_connect_do on_disconnect_do
39 disable_sth_caching unsafe auto_savepoint
41 __PACKAGE__->mk_group_accessors('simple' => @storage_options);
44 # capability definitions, using a 2-tiered accessor system
47 # A driver/user may define _use_X, which blindly without any checks says:
48 # "(do not) use this capability", (use_dbms_capability is an "inherited"
51 # If _use_X is undef, _supports_X is then queried. This is a "simple" style
52 # accessor, which in turn calls _determine_supports_X, and stores the return
53 # in a special slot on the storage object, which is wiped every time a $dbh
54 # reconnection takes place (it is not guaranteed that upon reconnection we
55 # will get the same rdbms version). _determine_supports_X does not need to
56 # exist on a driver, as we ->can for it before calling.
58 my @capabilities = (qw/
60 insert_returning_bound
65 __PACKAGE__->mk_group_accessors( dbms_capability => map { "_supports_$_" } @capabilities );
66 __PACKAGE__->mk_group_accessors( use_dbms_capability => map { "_use_$_" } (@capabilities ) );
68 # on by default, not strictly a capability (pending rewrite)
69 __PACKAGE__->_use_join_optimizer (1);
70 sub _determine_supports_join_optimizer { 1 };
72 # Each of these methods need _determine_driver called before itself
73 # in order to function reliably. This is a purely DRY optimization
75 # get_(use)_dbms_capability need to be called on the correct Storage
76 # class, as _use_X may be hardcoded class-wide, and _supports_X calls
77 # _determine_supports_X which obv. needs a correct driver as well
78 my @rdbms_specific_methods = qw/
92 get_use_dbms_capability
99 for my $meth (@rdbms_specific_methods) {
101 my $orig = __PACKAGE__->can ($meth)
102 or die "$meth is not a ::Storage::DBI method!";
105 no warnings qw/redefine/;
106 *{__PACKAGE__ ."::$meth"} = subname $meth => sub {
107 if (not $_[0]->_driver_determined and not $_[0]->{_in_determine_driver}) {
108 $_[0]->_determine_driver;
110 # This for some reason crashes and burns on perl 5.8.1
111 # IFF the method ends up throwing an exception
112 #goto $_[0]->can ($meth);
114 my $cref = $_[0]->can ($meth);
124 DBIx::Class::Storage::DBI - DBI storage handler
128 my $schema = MySchema->connect('dbi:SQLite:my.db');
130 $schema->storage->debug(1);
132 my @stuff = $schema->storage->dbh_do(
134 my ($storage, $dbh, @args) = @_;
135 $dbh->do("DROP TABLE authors");
140 $schema->resultset('Book')->search({
141 written_on => $schema->storage->datetime_parser->format_datetime(DateTime->now)
146 This class represents the connection to an RDBMS via L<DBI>. See
147 L<DBIx::Class::Storage> for general information. This pod only
148 documents DBI-specific methods and behaviors.
155 my $new = shift->next::method(@_);
157 $new->transaction_depth(0);
158 $new->_sql_maker_opts({});
159 $new->_dbh_details({});
160 $new->{savepoints} = [];
161 $new->{_in_dbh_do} = 0;
162 $new->{_dbh_gen} = 0;
164 # read below to see what this does
165 $new->_arm_global_destructor;
170 # This is hack to work around perl shooting stuff in random
171 # order on exit(). If we do not walk the remaining storage
172 # objects in an END block, there is a *small but real* chance
173 # of a fork()ed child to kill the parent's shared DBI handle,
174 # *before perl reaches the DESTROY in this package*
175 # Yes, it is ugly and effective.
176 # Additionally this registry is used by the CLONE method to
177 # make sure no handles are shared between threads
179 my %seek_and_destroy;
181 sub _arm_global_destructor {
183 my $key = refaddr ($self);
184 $seek_and_destroy{$key} = $self;
185 weaken ($seek_and_destroy{$key});
189 local $?; # just in case the DBI destructor changes it somehow
191 # destroy just the object if not native to this process/thread
192 $_->_verify_pid for (grep
194 values %seek_and_destroy
199 # As per DBI's recommendation, DBIC disconnects all handles as
200 # soon as possible (DBIC will reconnect only on demand from within
202 for (values %seek_and_destroy) {
204 $_->{_dbh_gen}++; # so that existing cursors will drop as well
213 # some databases spew warnings on implicit disconnect
214 local $SIG{__WARN__} = sub {};
217 # this op is necessary, since the very last perl runtime statement
218 # triggers a global destruction shootout, and the $SIG localization
219 # may very well be destroyed before perl actually gets to do the
224 # handle pid changes correctly - do not destroy parent's connection
228 my $pid = $self->_conn_pid;
229 if( defined $pid and $pid != $$ and my $dbh = $self->_dbh ) {
230 $dbh->{InactiveDestroy} = 1;
240 This method is normally called by L<DBIx::Class::Schema/connection>, which
241 encapsulates its argument list in an arrayref before passing them here.
243 The argument list may contain:
249 The same 4-element argument set one would normally pass to
250 L<DBI/connect>, optionally followed by
251 L<extra attributes|/DBIx::Class specific connection attributes>
252 recognized by DBIx::Class:
254 $connect_info_args = [ $dsn, $user, $password, \%dbi_attributes?, \%extra_attributes? ];
258 A single code reference which returns a connected
259 L<DBI database handle|DBI/connect> optionally followed by
260 L<extra attributes|/DBIx::Class specific connection attributes> recognized
263 $connect_info_args = [ sub { DBI->connect (...) }, \%extra_attributes? ];
267 A single hashref with all the attributes and the dsn/user/password
270 $connect_info_args = [{
278 $connect_info_args = [{
279 dbh_maker => sub { DBI->connect (...) },
284 This is particularly useful for L<Catalyst> based applications, allowing the
285 following config (L<Config::General> style):
290 dsn dbi:mysql:database=test
297 The C<dsn>/C<user>/C<password> combination can be substituted by the
298 C<dbh_maker> key whose value is a coderef that returns a connected
299 L<DBI database handle|DBI/connect>
303 Please note that the L<DBI> docs recommend that you always explicitly
304 set C<AutoCommit> to either I<0> or I<1>. L<DBIx::Class> further
305 recommends that it be set to I<1>, and that you perform transactions
306 via our L<DBIx::Class::Schema/txn_do> method. L<DBIx::Class> will set it
307 to I<1> if you do not do explicitly set it to zero. This is the default
308 for most DBDs. See L</DBIx::Class and AutoCommit> for details.
310 =head3 DBIx::Class specific connection attributes
312 In addition to the standard L<DBI|DBI/ATTRIBUTES_COMMON_TO_ALL_HANDLES>
313 L<connection|DBI/Database_Handle_Attributes> attributes, DBIx::Class recognizes
314 the following connection options. These options can be mixed in with your other
315 L<DBI> connection attributes, or placed in a separate hashref
316 (C<\%extra_attributes>) as shown above.
318 Every time C<connect_info> is invoked, any previous settings for
319 these options will be cleared before setting the new ones, regardless of
320 whether any options are specified in the new C<connect_info>.
327 Specifies things to do immediately after connecting or re-connecting to
328 the database. Its value may contain:
334 This contains one SQL statement to execute.
336 =item an array reference
338 This contains SQL statements to execute in order. Each element contains
339 a string or a code reference that returns a string.
341 =item a code reference
343 This contains some code to execute. Unlike code references within an
344 array reference, its return value is ignored.
348 =item on_disconnect_do
350 Takes arguments in the same form as L</on_connect_do> and executes them
351 immediately before disconnecting from the database.
353 Note, this only runs if you explicitly call L</disconnect> on the
356 =item on_connect_call
358 A more generalized form of L</on_connect_do> that calls the specified
359 C<connect_call_METHOD> methods in your storage driver.
361 on_connect_do => 'select 1'
365 on_connect_call => [ [ do_sql => 'select 1' ] ]
367 Its values may contain:
373 Will call the C<connect_call_METHOD> method.
375 =item a code reference
377 Will execute C<< $code->($storage) >>
379 =item an array reference
381 Each value can be a method name or code reference.
383 =item an array of arrays
385 For each array, the first item is taken to be the C<connect_call_> method name
386 or code reference, and the rest are parameters to it.
390 Some predefined storage methods you may use:
396 Executes a SQL string or a code reference that returns a SQL string. This is
397 what L</on_connect_do> and L</on_disconnect_do> use.
405 Will execute the scalar as SQL.
409 Taken to be arguments to L<DBI/do>, the SQL string optionally followed by the
410 attributes hashref and bind values.
412 =item a code reference
414 Will execute C<< $code->($storage) >> and execute the return array refs as
421 Execute any statements necessary to initialize the database session to return
422 and accept datetime/timestamp values used with
423 L<DBIx::Class::InflateColumn::DateTime>.
425 Only necessary for some databases, see your specific storage driver for
426 implementation details.
430 =item on_disconnect_call
432 Takes arguments in the same form as L</on_connect_call> and executes them
433 immediately before disconnecting from the database.
435 Calls the C<disconnect_call_METHOD> methods as opposed to the
436 C<connect_call_METHOD> methods called by L</on_connect_call>.
438 Note, this only runs if you explicitly call L</disconnect> on the
441 =item disable_sth_caching
443 If set to a true value, this option will disable the caching of
444 statement handles via L<DBI/prepare_cached>.
448 Sets a specific SQL::Abstract::Limit-style limit dialect, overriding the
449 default L</sql_limit_dialect> setting of the storage (if any). For a list
450 of available limit dialects see L<DBIx::Class::SQLMaker::LimitDialects>.
454 Specifies what characters to use to quote table and column names.
456 C<quote_char> expects either a single character, in which case is it
457 is placed on either side of the table/column name, or an arrayref of length
458 2 in which case the table/column name is placed between the elements.
460 For example under MySQL you should use C<< quote_char => '`' >>, and for
461 SQL Server you should use C<< quote_char => [qw/[ ]/] >>.
465 This parameter is only useful in conjunction with C<quote_char>, and is used to
466 specify the character that separates elements (schemas, tables, columns) from
467 each other. If unspecified it defaults to the most commonly used C<.>.
471 This Storage driver normally installs its own C<HandleError>, sets
472 C<RaiseError> and C<ShowErrorStatement> on, and sets C<PrintError> off on
473 all database handles, including those supplied by a coderef. It does this
474 so that it can have consistent and useful error behavior.
476 If you set this option to a true value, Storage will not do its usual
477 modifications to the database handle's attributes, and instead relies on
478 the settings in your connect_info DBI options (or the values you set in
479 your connection coderef, in the case that you are connecting via coderef).
481 Note that your custom settings can cause Storage to malfunction,
482 especially if you set a C<HandleError> handler that suppresses exceptions
483 and/or disable C<RaiseError>.
487 If this option is true, L<DBIx::Class> will use savepoints when nesting
488 transactions, making it possible to recover from failure in the inner
489 transaction without having to abort all outer transactions.
493 Use this argument to supply a cursor class other than the default
494 L<DBIx::Class::Storage::DBI::Cursor>.
498 Some real-life examples of arguments to L</connect_info> and
499 L<DBIx::Class::Schema/connect>
501 # Simple SQLite connection
502 ->connect_info([ 'dbi:SQLite:./foo.db' ]);
505 ->connect_info([ sub { DBI->connect(...) } ]);
507 # Connect via subref in hashref
509 dbh_maker => sub { DBI->connect(...) },
510 on_connect_do => 'alter session ...',
513 # A bit more complicated
520 { quote_char => q{"} },
524 # Equivalent to the previous example
530 { AutoCommit => 1, quote_char => q{"}, name_sep => q{.} },
534 # Same, but with hashref as argument
535 # See parse_connect_info for explanation
538 dsn => 'dbi:Pg:dbname=foo',
540 password => 'my_pg_password',
547 # Subref + DBIx::Class-specific connection options
550 sub { DBI->connect(...) },
554 on_connect_do => ['SET search_path TO myschema,otherschema,public'],
555 disable_sth_caching => 1,
565 my ($self, $info) = @_;
567 return $self->_connect_info if !$info;
569 $self->_connect_info($info); # copy for _connect_info
571 $info = $self->_normalize_connect_info($info)
572 if ref $info eq 'ARRAY';
574 for my $storage_opt (keys %{ $info->{storage_options} }) {
575 my $value = $info->{storage_options}{$storage_opt};
577 $self->$storage_opt($value);
580 # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only
581 # the new set of options
582 $self->_sql_maker(undef);
583 $self->_sql_maker_opts({});
585 for my $sql_maker_opt (keys %{ $info->{sql_maker_options} }) {
586 my $value = $info->{sql_maker_options}{$sql_maker_opt};
588 $self->_sql_maker_opts->{$sql_maker_opt} = $value;
592 %{ $self->_default_dbi_connect_attributes || {} },
593 %{ $info->{attributes} || {} },
596 my @args = @{ $info->{arguments} };
598 if (keys %attrs and ref $args[0] ne 'CODE') {
600 'You provided explicit AutoCommit => 0 in your connection_info. '
601 . 'This is almost universally a bad idea (see the footnotes of '
602 . 'DBIx::Class::Storage::DBI for more info). If you still want to '
603 . 'do this you can set $ENV{DBIC_UNSAFE_AUTOCOMMIT_OK} to disable '
605 if ! $attrs{AutoCommit} and ! $ENV{DBIC_UNSAFE_AUTOCOMMIT_OK};
607 push @args, \%attrs if keys %attrs;
609 $self->_dbi_connect_info(\@args);
612 # save attributes them in a separate accessor so they are always
613 # introspectable, even in case of a CODE $dbhmaker
614 $self->_dbic_connect_attributes (\%attrs);
616 return $self->_connect_info;
619 sub _normalize_connect_info {
620 my ($self, $info_arg) = @_;
623 my @args = @$info_arg; # take a shallow copy for further mutilation
625 # combine/pre-parse arguments depending on invocation style
628 if (ref $args[0] eq 'CODE') { # coderef with optional \%extra_attributes
629 %attrs = %{ $args[1] || {} };
632 elsif (ref $args[0] eq 'HASH') { # single hashref (i.e. Catalyst config)
633 %attrs = %{$args[0]};
635 if (my $code = delete $attrs{dbh_maker}) {
638 my @ignored = grep { delete $attrs{$_} } (qw/dsn user password/);
641 'Attribute(s) %s in connect_info were ignored, as they can not be applied '
642 . "to the result of 'dbh_maker'",
644 join (', ', map { "'$_'" } (@ignored) ),
649 @args = delete @attrs{qw/dsn user password/};
652 else { # otherwise assume dsn/user/password + \%attrs + \%extra_attrs
654 % { $args[3] || {} },
655 % { $args[4] || {} },
657 @args = @args[0,1,2];
660 $info{arguments} = \@args;
662 my @storage_opts = grep exists $attrs{$_},
663 @storage_options, 'cursor_class';
665 @{ $info{storage_options} }{@storage_opts} =
666 delete @attrs{@storage_opts} if @storage_opts;
668 my @sql_maker_opts = grep exists $attrs{$_},
669 qw/limit_dialect quote_char name_sep/;
671 @{ $info{sql_maker_options} }{@sql_maker_opts} =
672 delete @attrs{@sql_maker_opts} if @sql_maker_opts;
674 $info{attributes} = \%attrs if %attrs;
679 sub _default_dbi_connect_attributes () {
684 ShowErrorStatement => 1,
690 This method is deprecated in favour of setting via L</connect_info>.
694 =head2 on_disconnect_do
696 This method is deprecated in favour of setting via L</connect_info>.
700 sub _parse_connect_do {
701 my ($self, $type) = @_;
703 my $val = $self->$type;
704 return () if not defined $val;
709 push @res, [ 'do_sql', $val ];
710 } elsif (ref($val) eq 'CODE') {
712 } elsif (ref($val) eq 'ARRAY') {
713 push @res, map { [ 'do_sql', $_ ] } @$val;
715 $self->throw_exception("Invalid type for $type: ".ref($val));
723 Arguments: ($subref | $method_name), @extra_coderef_args?
725 Execute the given $subref or $method_name using the new exception-based
726 connection management.
728 The first two arguments will be the storage object that C<dbh_do> was called
729 on and a database handle to use. Any additional arguments will be passed
730 verbatim to the called subref as arguments 2 and onwards.
732 Using this (instead of $self->_dbh or $self->dbh) ensures correct
733 exception handling and reconnection (or failover in future subclasses).
735 Your subref should have no side-effects outside of the database, as
736 there is the potential for your subref to be partially double-executed
737 if the database connection was stale/dysfunctional.
741 my @stuff = $schema->storage->dbh_do(
743 my ($storage, $dbh, @cols) = @_;
744 my $cols = join(q{, }, @cols);
745 $dbh->selectrow_array("SELECT $cols FROM foo");
756 my $dbh = $self->_get_dbh;
758 return $self->$code($dbh, @_)
759 if ( $self->{_in_dbh_do} || $self->{transaction_depth} );
761 local $self->{_in_dbh_do} = 1;
763 # take a ref instead of a copy, to preserve coderef @_ aliasing semantics
766 $self->$code ($dbh, @$args);
768 $self->throw_exception($_) if $self->connected;
770 # We were not connected - reconnect and retry, but let any
771 # exception fall right through this time
772 carp "Retrying $code after catching disconnected exception: $_"
773 if $ENV{DBIC_DBIRETRY_DEBUG};
775 $self->_populate_dbh;
776 $self->$code($self->_dbh, @$args);
780 # This is basically a blend of dbh_do above and DBIx::Class::Storage::txn_do.
781 # It also informs dbh_do to bypass itself while under the direction of txn_do,
782 # via $self->{_in_dbh_do} (this saves some redundant eval and errorcheck, etc)
787 ref $coderef eq 'CODE' or $self->throw_exception
788 ('$coderef must be a CODE reference');
790 local $self->{_in_dbh_do} = 1;
793 my $want = wantarray;
799 # take a ref instead of a copy, to preserve coderef @_ aliasing semantics
804 my $txn_start_depth = $self->transaction_depth;
806 @result = $coderef->(@$args);
808 elsif(defined $want) {
809 $result[0] = $coderef->(@$args);
815 my $delta_txn = $txn_start_depth - $self->transaction_depth;
816 if ($delta_txn == 0) {
819 elsif ($delta_txn != 1) {
820 # an off-by-one would mean we fired a rollback
821 carp "Unexpected reduction of transaction depth by $delta_txn after execution of $coderef";
827 if(! defined $exception) { return wantarray ? @result : $result[0] }
829 if($self->transaction_depth > 1 || $tried++ || $self->connected) {
830 my $rollback_exception;
831 try { $self->txn_rollback } catch { $rollback_exception = shift };
832 if(defined $rollback_exception) {
833 my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION";
834 $self->throw_exception($exception) # propagate nested rollback
835 if $rollback_exception =~ /$exception_class/;
837 $self->throw_exception(
838 "Transaction aborted: ${exception}. "
839 . "Rollback failed: ${rollback_exception}"
842 $self->throw_exception($exception)
845 # We were not connected, and was first try - reconnect and retry
847 carp "Retrying $coderef after catching disconnected exception: $exception"
848 if $ENV{DBIC_TXNRETRY_DEBUG};
849 $self->_populate_dbh;
855 Our C<disconnect> method also performs a rollback first if the
856 database is not in C<AutoCommit> mode.
866 push @actions, ( $self->on_disconnect_call || () );
867 push @actions, $self->_parse_connect_do ('on_disconnect_do');
869 $self->_do_connection_actions(disconnect_call_ => $_) for @actions;
871 $self->_dbh_rollback unless $self->_dbh_autocommit;
873 %{ $self->_dbh->{CachedKids} } = ();
874 $self->_dbh->disconnect;
880 =head2 with_deferred_fk_checks
884 =item Arguments: C<$coderef>
886 =item Return Value: The return value of $coderef
890 Storage specific method to run the code ref with FK checks deferred or
891 in MySQL's case disabled entirely.
895 # Storage subclasses should override this
896 sub with_deferred_fk_checks {
897 my ($self, $sub) = @_;
905 =item Arguments: none
907 =item Return Value: 1|0
911 Verifies that the current database handle is active and ready to execute
912 an SQL statement (e.g. the connection did not get stale, server is still
913 answering, etc.) This method is used internally by L</dbh>.
919 return 0 unless $self->_seems_connected;
922 local $self->_dbh->{RaiseError} = 1;
927 sub _seems_connected {
932 my $dbh = $self->_dbh
935 return $dbh->FETCH('Active');
941 my $dbh = $self->_dbh or return 0;
946 sub ensure_connected {
949 unless ($self->connected) {
950 $self->_populate_dbh;
956 Returns a C<$dbh> - a data base handle of class L<DBI>. The returned handle
957 is guaranteed to be healthy by implicitly calling L</connected>, and if
958 necessary performing a reconnection before returning. Keep in mind that this
959 is very B<expensive> on some database engines. Consider using L</dbh_do>
967 if (not $self->_dbh) {
968 $self->_populate_dbh;
970 $self->ensure_connected;
975 # this is the internal "get dbh or connect (don't check)" method
979 $self->_populate_dbh unless $self->_dbh;
985 unless ($self->_sql_maker) {
986 my $sql_maker_class = $self->sql_maker_class;
987 $self->ensure_class_loaded ($sql_maker_class);
989 my %opts = %{$self->_sql_maker_opts||{}};
993 $self->sql_limit_dialect
996 my $s_class = (ref $self) || $self;
998 "Your storage class ($s_class) does not set sql_limit_dialect and you "
999 . 'have not supplied an explicit limit_dialect in your connection_info. '
1000 . 'DBIC will attempt to use the GenericSubQ dialect, which works on most '
1001 . 'databases but can be (and often is) painfully slow.'
1008 $self->_sql_maker($sql_maker_class->new(
1009 bindtype=>'columns',
1010 array_datatypes => 1,
1011 limit_dialect => $dialect,
1016 return $self->_sql_maker;
1019 # nothing to do by default
1026 my @info = @{$self->_dbi_connect_info || []};
1027 $self->_dbh(undef); # in case ->connected failed we might get sent here
1028 $self->_dbh_details({}); # reset everything we know
1030 $self->_dbh($self->_connect(@info));
1032 $self->_conn_pid($$) if $^O ne 'MSWin32'; # on win32 these are in fact threads
1034 $self->_determine_driver;
1036 # Always set the transaction depth on connect, since
1037 # there is no transaction in progress by definition
1038 $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1;
1040 $self->_run_connection_actions unless $self->{_in_determine_driver};
1043 sub _run_connection_actions {
1047 push @actions, ( $self->on_connect_call || () );
1048 push @actions, $self->_parse_connect_do ('on_connect_do');
1050 $self->_do_connection_actions(connect_call_ => $_) for @actions;
1055 sub set_use_dbms_capability {
1056 $_[0]->set_inherited ($_[1], $_[2]);
1059 sub get_use_dbms_capability {
1060 my ($self, $capname) = @_;
1062 my $use = $self->get_inherited ($capname);
1065 : do { $capname =~ s/^_use_/_supports_/; $self->get_dbms_capability ($capname) }
1069 sub set_dbms_capability {
1070 $_[0]->_dbh_details->{capability}{$_[1]} = $_[2];
1073 sub get_dbms_capability {
1074 my ($self, $capname) = @_;
1076 my $cap = $self->_dbh_details->{capability}{$capname};
1078 unless (defined $cap) {
1079 if (my $meth = $self->can ("_determine$capname")) {
1080 $cap = $self->$meth ? 1 : 0;
1086 $self->set_dbms_capability ($capname, $cap);
1096 unless ($info = $self->_dbh_details->{info}) {
1100 my $server_version = try { $self->_get_server_version };
1102 if (defined $server_version) {
1103 $info->{dbms_version} = $server_version;
1105 my ($numeric_version) = $server_version =~ /^([\d\.]+)/;
1106 my @verparts = split (/\./, $numeric_version);
1112 # consider only up to 3 version parts, iff not more than 3 digits
1114 while (@verparts && @use_parts < 3) {
1115 my $p = shift @verparts;
1117 push @use_parts, $p;
1119 push @use_parts, 0 while @use_parts < 3;
1121 $info->{normalized_dbms_version} = sprintf "%d.%03d%03d", @use_parts;
1125 $self->_dbh_details->{info} = $info;
1131 sub _get_server_version {
1132 shift->_get_dbh->get_info(18);
1135 sub _determine_driver {
1138 if ((not $self->_driver_determined) && (not $self->{_in_determine_driver})) {
1139 my $started_connected = 0;
1140 local $self->{_in_determine_driver} = 1;
1142 if (ref($self) eq __PACKAGE__) {
1144 if ($self->_dbh) { # we are connected
1145 $driver = $self->_dbh->{Driver}{Name};
1146 $started_connected = 1;
1148 # if connect_info is a CODEREF, we have no choice but to connect
1149 if (ref $self->_dbi_connect_info->[0] &&
1150 reftype $self->_dbi_connect_info->[0] eq 'CODE') {
1151 $self->_populate_dbh;
1152 $driver = $self->_dbh->{Driver}{Name};
1155 # try to use dsn to not require being connected, the driver may still
1156 # force a connection in _rebless to determine version
1157 # (dsn may not be supplied at all if all we do is make a mock-schema)
1158 my $dsn = $self->_dbi_connect_info->[0] || $ENV{DBI_DSN} || '';
1159 ($driver) = $dsn =~ /dbi:([^:]+):/i;
1160 $driver ||= $ENV{DBI_DRIVER};
1165 my $storage_class = "DBIx::Class::Storage::DBI::${driver}";
1166 if ($self->load_optional_class($storage_class)) {
1167 mro::set_mro($storage_class, 'c3');
1168 bless $self, $storage_class;
1174 $self->_driver_determined(1);
1176 $self->_init; # run driver-specific initializations
1178 $self->_run_connection_actions
1179 if !$started_connected && defined $self->_dbh;
1183 sub _do_connection_actions {
1185 my $method_prefix = shift;
1188 if (not ref($call)) {
1189 my $method = $method_prefix . $call;
1191 } elsif (ref($call) eq 'CODE') {
1193 } elsif (ref($call) eq 'ARRAY') {
1194 if (ref($call->[0]) ne 'ARRAY') {
1195 $self->_do_connection_actions($method_prefix, $_) for @$call;
1197 $self->_do_connection_actions($method_prefix, @$_) for @$call;
1200 $self->throw_exception (sprintf ("Don't know how to process conection actions of type '%s'", ref($call)) );
1206 sub connect_call_do_sql {
1208 $self->_do_query(@_);
1211 sub disconnect_call_do_sql {
1213 $self->_do_query(@_);
1216 # override in db-specific backend when necessary
1217 sub connect_call_datetime_setup { 1 }
1220 my ($self, $action) = @_;
1222 if (ref $action eq 'CODE') {
1223 $action = $action->($self);
1224 $self->_do_query($_) foreach @$action;
1227 # Most debuggers expect ($sql, @bind), so we need to exclude
1228 # the attribute hash which is the second argument to $dbh->do
1229 # furthermore the bind values are usually to be presented
1230 # as named arrayref pairs, so wrap those here too
1231 my @do_args = (ref $action eq 'ARRAY') ? (@$action) : ($action);
1232 my $sql = shift @do_args;
1233 my $attrs = shift @do_args;
1234 my @bind = map { [ undef, $_ ] } @do_args;
1236 $self->_query_start($sql, @bind);
1237 $self->_get_dbh->do($sql, $attrs, @do_args);
1238 $self->_query_end($sql, @bind);
1245 my ($self, @info) = @_;
1247 $self->throw_exception("You failed to provide any connection info")
1250 my ($old_connect_via, $dbh);
1252 if ($INC{'Apache/DBI.pm'} && $ENV{MOD_PERL}) {
1253 $old_connect_via = $DBI::connect_via;
1254 $DBI::connect_via = 'connect';
1258 if(ref $info[0] eq 'CODE') {
1259 $dbh = $info[0]->();
1262 $dbh = DBI->connect(@info);
1269 unless ($self->unsafe) {
1271 $self->throw_exception(
1272 'Refusing clobbering of {HandleError} installed on externally supplied '
1273 ."DBI handle $dbh. Either remove the handler or use the 'unsafe' attribute."
1274 ) if $dbh->{HandleError} and ref $dbh->{HandleError} ne '__DBIC__DBH__ERROR__HANDLER__';
1276 # Default via _default_dbi_connect_attributes is 1, hence it was an explicit
1277 # request, or an external handle. Complain and set anyway
1278 unless ($dbh->{RaiseError}) {
1279 carp( ref $info[0] eq 'CODE'
1281 ? "The 'RaiseError' of the externally supplied DBI handle is set to false. "
1282 ."DBIx::Class will toggle it back to true, unless the 'unsafe' connect "
1283 .'attribute has been supplied'
1285 : 'RaiseError => 0 supplied in your connection_info, without an explicit '
1286 .'unsafe => 1. Toggling RaiseError back to true'
1289 $dbh->{RaiseError} = 1;
1292 # this odd anonymous coderef dereference is in fact really
1293 # necessary to avoid the unwanted effect described in perl5
1296 my $weak_self = $_[0];
1299 # the coderef is blessed so we can distinguish it from externally
1300 # supplied handles (which must be preserved)
1301 $_[1]->{HandleError} = bless sub {
1303 $weak_self->throw_exception("DBI Exception: $_[0]");
1306 # the handler may be invoked by something totally out of
1308 croak ("DBI Exception (unhandled by DBIC, ::Schema GCed): $_[0]");
1310 }, '__DBIC__DBH__ERROR__HANDLER__';
1315 $self->throw_exception("DBI Connection failed: $_")
1318 $DBI::connect_via = $old_connect_via if $old_connect_via;
1321 $self->_dbh_autocommit($dbh->{AutoCommit});
1326 my ($self, $name) = @_;
1328 $name = $self->_svp_generate_name
1329 unless defined $name;
1331 $self->throw_exception ("You can't use savepoints outside a transaction")
1332 if $self->{transaction_depth} == 0;
1334 $self->throw_exception ("Your Storage implementation doesn't support savepoints")
1335 unless $self->can('_svp_begin');
1337 push @{ $self->{savepoints} }, $name;
1339 $self->debugobj->svp_begin($name) if $self->debug;
1341 return $self->_svp_begin($name);
1345 my ($self, $name) = @_;
1347 $self->throw_exception ("You can't use savepoints outside a transaction")
1348 if $self->{transaction_depth} == 0;
1350 $self->throw_exception ("Your Storage implementation doesn't support savepoints")
1351 unless $self->can('_svp_release');
1353 if (defined $name) {
1354 $self->throw_exception ("Savepoint '$name' does not exist")
1355 unless grep { $_ eq $name } @{ $self->{savepoints} };
1357 # Dig through the stack until we find the one we are releasing. This keeps
1358 # the stack up to date.
1361 do { $svp = pop @{ $self->{savepoints} } } while $svp ne $name;
1363 $name = pop @{ $self->{savepoints} };
1366 $self->debugobj->svp_release($name) if $self->debug;
1368 return $self->_svp_release($name);
1372 my ($self, $name) = @_;
1374 $self->throw_exception ("You can't use savepoints outside a transaction")
1375 if $self->{transaction_depth} == 0;
1377 $self->throw_exception ("Your Storage implementation doesn't support savepoints")
1378 unless $self->can('_svp_rollback');
1380 if (defined $name) {
1381 # If they passed us a name, verify that it exists in the stack
1382 unless(grep({ $_ eq $name } @{ $self->{savepoints} })) {
1383 $self->throw_exception("Savepoint '$name' does not exist!");
1386 # Dig through the stack until we find the one we are releasing. This keeps
1387 # the stack up to date.
1388 while(my $s = pop(@{ $self->{savepoints} })) {
1389 last if($s eq $name);
1391 # Add the savepoint back to the stack, as a rollback doesn't remove the
1392 # named savepoint, only everything after it.
1393 push(@{ $self->{savepoints} }, $name);
1395 # We'll assume they want to rollback to the last savepoint
1396 $name = $self->{savepoints}->[-1];
1399 $self->debugobj->svp_rollback($name) if $self->debug;
1401 return $self->_svp_rollback($name);
1404 sub _svp_generate_name {
1406 return 'savepoint_'.scalar(@{ $self->{'savepoints'} });
1412 # this means we have not yet connected and do not know the AC status
1413 # (e.g. coderef $dbh)
1414 if (! defined $self->_dbh_autocommit) {
1415 $self->ensure_connected;
1417 # otherwise re-connect on pid changes, so
1418 # that the txn_depth is adjusted properly
1419 # the lightweight _get_dbh is good enoug here
1420 # (only superficial handle check, no pings)
1425 if($self->transaction_depth == 0) {
1426 $self->debugobj->txn_begin()
1428 $self->_dbh_begin_work;
1430 elsif ($self->auto_savepoint) {
1433 $self->{transaction_depth}++;
1436 sub _dbh_begin_work {
1439 # if the user is utilizing txn_do - good for him, otherwise we need to
1440 # ensure that the $dbh is healthy on BEGIN.
1441 # We do this via ->dbh_do instead of ->dbh, so that the ->dbh "ping"
1442 # will be replaced by a failure of begin_work itself (which will be
1443 # then retried on reconnect)
1444 if ($self->{_in_dbh_do}) {
1445 $self->_dbh->begin_work;
1447 $self->dbh_do(sub { $_[1]->begin_work });
1453 if (! $self->_dbh) {
1454 $self->throw_exception('cannot COMMIT on a disconnected handle');
1456 elsif ($self->{transaction_depth} == 1) {
1457 $self->debugobj->txn_commit()
1460 $self->{transaction_depth} = 0
1461 if $self->_dbh_autocommit;
1463 elsif($self->{transaction_depth} > 1) {
1464 $self->{transaction_depth}--;
1466 if $self->auto_savepoint;
1468 elsif (! $self->_dbh->FETCH('AutoCommit') ) {
1470 carp "Storage transaction_depth $self->{transaction_depth} does not match "
1471 ."false AutoCommit of $self->{_dbh}, attempting COMMIT anyway";
1473 $self->debugobj->txn_commit()
1476 $self->{transaction_depth} = 0
1477 if $self->_dbh_autocommit;
1480 $self->throw_exception( 'Refusing to commit without a started transaction' );
1486 my $dbh = $self->_dbh
1487 or $self->throw_exception('cannot COMMIT on a disconnected handle');
1493 my $dbh = $self->_dbh;
1495 if ($self->{transaction_depth} == 1) {
1496 $self->debugobj->txn_rollback()
1498 $self->{transaction_depth} = 0
1499 if $self->_dbh_autocommit;
1500 $self->_dbh_rollback;
1502 elsif($self->{transaction_depth} > 1) {
1503 $self->{transaction_depth}--;
1504 if ($self->auto_savepoint) {
1505 $self->svp_rollback;
1510 die DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION->new;
1514 my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION";
1516 if ($_ !~ /$exception_class/) {
1517 # ensure that a failed rollback resets the transaction depth
1518 $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1;
1521 $self->throw_exception($_)
1527 my $dbh = $self->_dbh
1528 or $self->throw_exception('cannot ROLLBACK on a disconnected handle');
1532 # This used to be the top-half of _execute. It was split out to make it
1533 # easier to override in NoBindVars without duping the rest. It takes up
1534 # all of _execute's args, and emits $sql, @bind.
1535 sub _prep_for_execute {
1536 my ($self, $op, $extra_bind, $ident, $args) = @_;
1538 if( blessed $ident && $ident->isa("DBIx::Class::ResultSource") ) {
1539 $ident = $ident->from();
1542 my ($sql, @bind) = $self->sql_maker->$op($ident, @$args);
1545 map { ref $_ eq 'ARRAY' ? $_ : [ '!!dummy', $_ ] } @$extra_bind)
1547 return ($sql, \@bind);
1551 sub _fix_bind_params {
1552 my ($self, @bind) = @_;
1554 ### Turn @bind from something like this:
1555 ### ( [ "artist", 1 ], [ "cdid", 1, 3 ] )
1557 ### ( "'1'", "'1'", "'3'" )
1560 if ( defined( $_ && $_->[1] ) ) {
1561 map { qq{'$_'}; } @{$_}[ 1 .. $#$_ ];
1568 my ( $self, $sql, @bind ) = @_;
1570 if ( $self->debug ) {
1571 @bind = $self->_fix_bind_params(@bind);
1573 $self->debugobj->query_start( $sql, @bind );
1578 my ( $self, $sql, @bind ) = @_;
1580 if ( $self->debug ) {
1581 @bind = $self->_fix_bind_params(@bind);
1582 $self->debugobj->query_end( $sql, @bind );
1587 my ($self, $dbh, $op, $extra_bind, $ident, $bind_attributes, @args) = @_;
1589 my ($sql, $bind) = $self->_prep_for_execute($op, $extra_bind, $ident, \@args);
1591 $self->_query_start( $sql, @$bind );
1593 my $sth = $self->sth($sql,$op);
1595 my $placeholder_index = 1;
1597 foreach my $bound (@$bind) {
1598 my $attributes = {};
1599 my($column_name, @data) = @$bound;
1601 if ($bind_attributes) {
1602 $attributes = $bind_attributes->{$column_name}
1603 if defined $bind_attributes->{$column_name};
1606 foreach my $data (@data) {
1607 my $ref = ref $data;
1609 if ($ref and overload::Method($data, '""') ) {
1612 elsif ($ref eq 'SCALAR') { # any scalarrefs are assumed to be bind_inouts
1613 $sth->bind_param_inout(
1614 $placeholder_index++,
1616 $self->_max_column_bytesize($ident, $column_name),
1622 $sth->bind_param($placeholder_index++, $data, $attributes);
1626 # Can this fail without throwing an exception anyways???
1627 my $rv = $sth->execute();
1628 $self->throw_exception(
1629 $sth->errstr || $sth->err || 'Unknown error: execute() returned false, but error flags were not set...'
1632 $self->_query_end( $sql, @$bind );
1634 return (wantarray ? ($rv, $sth, @$bind) : $rv);
1639 $self->dbh_do('_dbh_execute', @_); # retry over disconnects
1642 sub _prefetch_autovalues {
1643 my ($self, $source, $to_insert) = @_;
1645 my $colinfo = $source->columns_info;
1648 for my $col (keys %$colinfo) {
1650 $colinfo->{$col}{auto_nextval}
1653 ! exists $to_insert->{$col}
1655 ref $to_insert->{$col} eq 'SCALAR'
1658 $values{$col} = $self->_sequence_fetch(
1660 ( $colinfo->{$col}{sequence} ||=
1661 $self->_dbh_get_autoinc_seq($self->_get_dbh, $source, $col)
1671 my ($self, $source, $to_insert) = @_;
1673 my $prefetched_values = $self->_prefetch_autovalues($source, $to_insert);
1676 $to_insert = { %$to_insert, %$prefetched_values };
1678 # list of primary keys we try to fetch from the database
1679 # both not-exsists and scalarrefs are considered
1681 for ($source->primary_columns) {
1682 $fetch_pks{$_} = scalar keys %fetch_pks # so we can preserve order for prettyness
1683 if ! exists $to_insert->{$_} or ref $to_insert->{$_} eq 'SCALAR';
1686 my ($sqla_opts, @ir_container);
1687 if ($self->_use_insert_returning) {
1689 # retain order as declared in the resultsource
1690 for (sort { $fetch_pks{$a} <=> $fetch_pks{$b} } keys %fetch_pks ) {
1691 push @{$sqla_opts->{returning}}, $_;
1692 $sqla_opts->{returning_container} = \@ir_container
1693 if $self->_use_insert_returning_bound;
1697 my $bind_attributes = $self->source_bind_attributes($source);
1699 my ($rv, $sth) = $self->_execute('insert' => [], $source, $bind_attributes, $to_insert, $sqla_opts);
1703 if (my $retlist = $sqla_opts->{returning}) {
1704 @ir_container = try {
1705 local $SIG{__WARN__} = sub {};
1706 my @r = $sth->fetchrow_array;
1709 } unless @ir_container;
1711 @returned_cols{@$retlist} = @ir_container if @ir_container;
1714 return { %$prefetched_values, %returned_cols };
1718 ## Currently it is assumed that all values passed will be "normal", i.e. not
1719 ## scalar refs, or at least, all the same type as the first set, the statement is
1720 ## only prepped once.
1722 my ($self, $source, $cols, $data) = @_;
1725 @colvalues{@$cols} = (0..$#$cols);
1727 for my $i (0..$#$cols) {
1728 my $first_val = $data->[0][$i];
1729 next unless ref $first_val eq 'SCALAR';
1731 $colvalues{ $cols->[$i] } = $first_val;
1734 # check for bad data and stringify stringifiable objects
1735 my $bad_slice = sub {
1736 my ($msg, $col_idx, $slice_idx) = @_;
1737 $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s",
1741 local $Data::Dumper::Maxdepth = 1; # don't dump objects, if any
1743 map { $cols->[$_] => $data->[$slice_idx][$_] } (0 .. $#$cols)
1749 for my $datum_idx (0..$#$data) {
1750 my $datum = $data->[$datum_idx];
1752 for my $col_idx (0..$#$cols) {
1753 my $val = $datum->[$col_idx];
1754 my $sqla_bind = $colvalues{ $cols->[$col_idx] };
1755 my $is_literal_sql = (ref $sqla_bind) eq 'SCALAR';
1757 if ($is_literal_sql) {
1759 $bad_slice->('bind found where literal SQL expected', $col_idx, $datum_idx);
1761 elsif ((my $reftype = ref $val) ne 'SCALAR') {
1762 $bad_slice->("$reftype reference found where literal SQL expected",
1763 $col_idx, $datum_idx);
1765 elsif ($$val ne $$sqla_bind){
1766 $bad_slice->("inconsistent literal SQL value, expecting: '$$sqla_bind'",
1767 $col_idx, $datum_idx);
1770 elsif (my $reftype = ref $val) {
1772 if (overload::Method($val, '""')) {
1773 $datum->[$col_idx] = "".$val;
1776 $bad_slice->("$reftype reference found where bind expected",
1777 $col_idx, $datum_idx);
1783 my ($sql, $bind) = $self->_prep_for_execute (
1784 'insert', undef, $source, [\%colvalues]
1788 # if the bindlist is empty - make sure all "values" are in fact
1789 # literal scalarrefs. If not the case this means the storage ate
1790 # them away (e.g. the NoBindVars component) and interpolated them
1791 # directly into the SQL. This obviosly can't be good for multi-inserts
1793 $self->throw_exception('Cannot insert_bulk without support for placeholders')
1794 if first { ref $_ ne 'SCALAR' } values %colvalues;
1797 # neither _execute_array, nor _execute_inserts_with_no_binds are
1798 # atomic (even if _execute _array is a single call). Thus a safety
1800 my $guard = $self->txn_scope_guard;
1802 $self->_query_start( $sql, @$bind ? [ dummy => '__BULK_INSERT__' ] : () );
1803 my $sth = $self->sth($sql);
1806 #@bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
1807 $self->_execute_array( $source, $sth, $bind, $cols, $data );
1810 # bind_param_array doesn't work if there are no binds
1811 $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data );
1815 $self->_query_end( $sql, @$bind ? [ dummy => '__BULK_INSERT__' ] : () );
1819 return (wantarray ? ($rv, $sth, @$bind) : $rv);
1822 sub _execute_array {
1823 my ($self, $source, $sth, $bind, $cols, $data, @extra) = @_;
1825 ## This must be an arrayref, else nothing works!
1826 my $tuple_status = [];
1828 ## Get the bind_attributes, if any exist
1829 my $bind_attributes = $self->source_bind_attributes($source);
1831 ## Bind the values and execute
1832 my $placeholder_index = 1;
1834 foreach my $bound (@$bind) {
1836 my $attributes = {};
1837 my ($column_name, $data_index) = @$bound;
1839 if( $bind_attributes ) {
1840 $attributes = $bind_attributes->{$column_name}
1841 if defined $bind_attributes->{$column_name};
1844 my @data = map { $_->[$data_index] } @$data;
1846 $sth->bind_param_array(
1849 (%$attributes ? $attributes : ()),
1851 $placeholder_index++;
1856 $rv = $self->_dbh_execute_array($sth, $tuple_status, @extra);
1862 # Not all DBDs are create equal. Some throw on error, some return
1863 # an undef $rv, and some set $sth->err - try whatever we can
1864 $err = ($sth->errstr || 'UNKNOWN ERROR ($sth->errstr is unset)') if (
1867 ( !defined $rv or $sth->err )
1870 # Statement must finish even if there was an exception.
1875 $err = shift unless defined $err
1880 ++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i];
1882 $self->throw_exception("Unexpected populate error: $err")
1883 if ($i > $#$tuple_status);
1885 $self->throw_exception(sprintf "%s for populate slice:\n%s",
1886 ($tuple_status->[$i][1] || $err),
1887 Dumper { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) },
1894 sub _dbh_execute_array {
1895 my ($self, $sth, $tuple_status, @extra) = @_;
1897 return $sth->execute_array({ArrayTupleStatus => $tuple_status});
1900 sub _dbh_execute_inserts_with_no_binds {
1901 my ($self, $sth, $count) = @_;
1905 my $dbh = $self->_get_dbh;
1906 local $dbh->{RaiseError} = 1;
1907 local $dbh->{PrintError} = 0;
1909 $sth->execute foreach 1..$count;
1915 # Make sure statement is finished even if there was an exception.
1920 $err = shift unless defined $err;
1923 $self->throw_exception($err) if defined $err;
1929 my ($self, $source, @args) = @_;
1931 my $bind_attrs = $self->source_bind_attributes($source);
1933 return $self->_execute('update' => [], $source, $bind_attrs, @args);
1938 my ($self, $source, @args) = @_;
1940 my $bind_attrs = $self->source_bind_attributes($source);
1942 return $self->_execute('delete' => [], $source, $bind_attrs, @args);
1945 # We were sent here because the $rs contains a complex search
1946 # which will require a subquery to select the correct rows
1947 # (i.e. joined or limited resultsets, or non-introspectable conditions)
1949 # Generating a single PK column subquery is trivial and supported
1950 # by all RDBMS. However if we have a multicolumn PK, things get ugly.
1951 # Look at _multipk_update_delete()
1952 sub _subq_update_delete {
1954 my ($rs, $op, $values) = @_;
1956 my $rsrc = $rs->result_source;
1958 # quick check if we got a sane rs on our hands
1959 my @pcols = $rsrc->_pri_cols;
1961 my $sel = $rs->_resolved_attrs->{select};
1962 $sel = [ $sel ] unless ref $sel eq 'ARRAY';
1965 join ("\x00", map { join '.', $rs->{attrs}{alias}, $_ } sort @pcols)
1967 join ("\x00", sort @$sel )
1969 $self->throw_exception (
1970 '_subq_update_delete can not be called on resultsets selecting columns other than the primary keys'
1977 $op eq 'update' ? $values : (),
1978 { $pcols[0] => { -in => $rs->as_query } },
1983 return $self->_multipk_update_delete (@_);
1987 # ANSI SQL does not provide a reliable way to perform a multicol-PK
1988 # resultset update/delete involving subqueries. So by default resort
1989 # to simple (and inefficient) delete_all style per-row opearations,
1990 # while allowing specific storages to override this with a faster
1993 sub _multipk_update_delete {
1994 return shift->_per_row_update_delete (@_);
1997 # This is the default loop used to delete/update rows for multi PK
1998 # resultsets, and used by mysql exclusively (because it can't do anything
2001 # We do not use $row->$op style queries, because resultset update/delete
2002 # is not expected to cascade (this is what delete_all/update_all is for).
2004 # There should be no race conditions as the entire operation is rolled
2007 sub _per_row_update_delete {
2009 my ($rs, $op, $values) = @_;
2011 my $rsrc = $rs->result_source;
2012 my @pcols = $rsrc->_pri_cols;
2014 my $guard = $self->txn_scope_guard;
2016 # emulate the return value of $sth->execute for non-selects
2017 my $row_cnt = '0E0';
2019 my $subrs_cur = $rs->cursor;
2020 my @all_pk = $subrs_cur->all;
2021 for my $pks ( @all_pk) {
2024 for my $i (0.. $#pcols) {
2025 $cond->{$pcols[$i]} = $pks->[$i];
2030 $op eq 'update' ? $values : (),
2044 $self->_execute($self->_select_args(@_));
2047 sub _select_args_to_query {
2050 # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $rs_attrs, $rows, $offset)
2051 # = $self->_select_args($ident, $select, $cond, $attrs);
2052 my ($op, $bind, $ident, $bind_attrs, @args) =
2053 $self->_select_args(@_);
2055 # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $rs_attrs, $rows, $offset ]);
2056 my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, \@args);
2057 $prepared_bind ||= [];
2060 ? ($sql, $prepared_bind, $bind_attrs)
2061 : \[ "($sql)", @$prepared_bind ]
2066 my ($self, $ident, $select, $where, $attrs) = @_;
2068 my $sql_maker = $self->sql_maker;
2069 my ($alias2source, $rs_alias) = $self->_resolve_ident_sources ($ident);
2076 $rs_alias && $alias2source->{$rs_alias}
2077 ? ( _rsroot_rsrc => $alias2source->{$rs_alias} )
2082 # calculate bind_attrs before possible $ident mangling
2083 my $bind_attrs = {};
2084 for my $alias (keys %$alias2source) {
2085 my $bindtypes = $self->source_bind_attributes ($alias2source->{$alias}) || {};
2086 for my $col (keys %$bindtypes) {
2088 my $fqcn = join ('.', $alias, $col);
2089 $bind_attrs->{$fqcn} = $bindtypes->{$col} if $bindtypes->{$col};
2091 # Unqialified column names are nice, but at the same time can be
2092 # rather ambiguous. What we do here is basically go along with
2093 # the loop, adding an unqualified column slot to $bind_attrs,
2094 # alongside the fully qualified name. As soon as we encounter
2095 # another column by that name (which would imply another table)
2096 # we unset the unqualified slot and never add any info to it
2097 # to avoid erroneous type binding. If this happens the users
2098 # only choice will be to fully qualify his column name
2100 if (exists $bind_attrs->{$col}) {
2101 $bind_attrs->{$col} = {};
2104 $bind_attrs->{$col} = $bind_attrs->{$fqcn};
2109 # Sanity check the attributes (SQLMaker does it too, but
2110 # in case of a software_limit we'll never reach there)
2111 if (defined $attrs->{offset}) {
2112 $self->throw_exception('A supplied offset attribute must be a non-negative integer')
2113 if ( $attrs->{offset} =~ /\D/ or $attrs->{offset} < 0 );
2115 $attrs->{offset} ||= 0;
2117 if (defined $attrs->{rows}) {
2118 $self->throw_exception("The rows attribute must be a positive integer if present")
2119 if ( $attrs->{rows} =~ /\D/ or $attrs->{rows} <= 0 );
2121 elsif ($attrs->{offset}) {
2122 # MySQL actually recommends this approach. I cringe.
2123 $attrs->{rows} = $sql_maker->__max_int;
2128 # see if we need to tear the prefetch apart otherwise delegate the limiting to the
2129 # storage, unless software limit was requested
2132 ( $attrs->{rows} && keys %{$attrs->{collapse}} )
2134 # grouped prefetch (to satisfy group_by == select)
2135 ( $attrs->{group_by}
2137 @{$attrs->{group_by}}
2139 $attrs->{_prefetch_selector_range}
2142 ($ident, $select, $where, $attrs)
2143 = $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs);
2145 elsif (! $attrs->{software_limit} ) {
2146 push @limit, $attrs->{rows}, $attrs->{offset};
2149 # try to simplify the joinmap further (prune unreferenced type-single joins)
2150 $ident = $self->_prune_unused_joins ($ident, $select, $where, $attrs);
2153 # This would be the point to deflate anything found in $where
2154 # (and leave $attrs->{bind} intact). Problem is - inflators historically
2155 # expect a row object. And all we have is a resultsource (it is trivial
2156 # to extract deflator coderefs via $alias2source above).
2158 # I don't see a way forward other than changing the way deflators are
2159 # invoked, and that's just bad...
2162 return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $attrs, @limit);
2165 # Returns a counting SELECT for a simple count
2166 # query. Abstracted so that a storage could override
2167 # this to { count => 'firstcol' } or whatever makes
2168 # sense as a performance optimization
2170 #my ($self, $source, $rs_attrs) = @_;
2171 return { count => '*' };
2175 sub source_bind_attributes {
2176 my ($self, $source) = @_;
2178 my $bind_attributes;
2180 my $colinfo = $source->columns_info;
2182 for my $col (keys %$colinfo) {
2183 if (my $dt = $colinfo->{$col}{data_type} ) {
2184 $bind_attributes->{$col} = $self->bind_attribute_by_data_type($dt)
2188 return $bind_attributes;
2195 =item Arguments: $ident, $select, $condition, $attrs
2199 Handle a SQL select statement.
2205 my ($ident, $select, $condition, $attrs) = @_;
2206 return $self->cursor_class->new($self, \@_, $attrs);
2211 my ($rv, $sth, @bind) = $self->_select(@_);
2212 my @row = $sth->fetchrow_array;
2213 my @nextrow = $sth->fetchrow_array if @row;
2214 if(@row && @nextrow) {
2215 carp "Query returned more than one row. SQL that returns multiple rows is DEPRECATED for ->find and ->single";
2217 # Need to call finish() to work round broken DBDs
2222 =head2 sql_limit_dialect
2224 This is an accessor for the default SQL limit dialect used by a particular
2225 storage driver. Can be overriden by supplying an explicit L</limit_dialect>
2226 to L<DBIx::Class::Schema/connect>. For a list of available limit dialects
2227 see L<DBIx::Class::SQLMaker::LimitDialects>.
2233 =item Arguments: $sql
2237 Returns a L<DBI> sth (statement handle) for the supplied SQL.
2242 my ($self, $dbh, $sql) = @_;
2244 # 3 is the if_active parameter which avoids active sth re-use
2245 my $sth = $self->disable_sth_caching
2246 ? $dbh->prepare($sql)
2247 : $dbh->prepare_cached($sql, {}, 3);
2249 # XXX You would think RaiseError would make this impossible,
2250 # but apparently that's not true :(
2251 $self->throw_exception($dbh->errstr) if !$sth;
2257 my ($self, $sql) = @_;
2258 $self->dbh_do('_dbh_sth', $sql); # retry over disconnects
2261 sub _dbh_columns_info_for {
2262 my ($self, $dbh, $table) = @_;
2264 if ($dbh->can('column_info')) {
2268 my ($schema,$tab) = $table =~ /^(.+?)\.(.+)$/ ? ($1,$2) : (undef,$table);
2269 my $sth = $dbh->column_info( undef,$schema, $tab, '%' );
2271 while ( my $info = $sth->fetchrow_hashref() ){
2273 $column_info{data_type} = $info->{TYPE_NAME};
2274 $column_info{size} = $info->{COLUMN_SIZE};
2275 $column_info{is_nullable} = $info->{NULLABLE} ? 1 : 0;
2276 $column_info{default_value} = $info->{COLUMN_DEF};
2277 my $col_name = $info->{COLUMN_NAME};
2278 $col_name =~ s/^\"(.*)\"$/$1/;
2280 $result{$col_name} = \%column_info;
2285 return \%result if !$caught && scalar keys %result;
2289 my $sth = $dbh->prepare($self->sql_maker->select($table, undef, \'1 = 0'));
2291 my @columns = @{$sth->{NAME_lc}};
2292 for my $i ( 0 .. $#columns ){
2294 $column_info{data_type} = $sth->{TYPE}->[$i];
2295 $column_info{size} = $sth->{PRECISION}->[$i];
2296 $column_info{is_nullable} = $sth->{NULLABLE}->[$i] ? 1 : 0;
2298 if ($column_info{data_type} =~ m/^(.*?)\((.*?)\)$/) {
2299 $column_info{data_type} = $1;
2300 $column_info{size} = $2;
2303 $result{$columns[$i]} = \%column_info;
2307 foreach my $col (keys %result) {
2308 my $colinfo = $result{$col};
2309 my $type_num = $colinfo->{data_type};
2311 if(defined $type_num && $dbh->can('type_info')) {
2312 my $type_info = $dbh->type_info($type_num);
2313 $type_name = $type_info->{TYPE_NAME} if $type_info;
2314 $colinfo->{data_type} = $type_name if $type_name;
2321 sub columns_info_for {
2322 my ($self, $table) = @_;
2323 $self->_dbh_columns_info_for ($self->_get_dbh, $table);
2326 =head2 last_insert_id
2328 Return the row id of the last insert.
2332 sub _dbh_last_insert_id {
2333 my ($self, $dbh, $source, $col) = @_;
2335 my $id = try { $dbh->last_insert_id (undef, undef, $source->name, $col) };
2337 return $id if defined $id;
2339 my $class = ref $self;
2340 $self->throw_exception ("No storage specific _dbh_last_insert_id() method implemented in $class, and the generic DBI::last_insert_id() failed");
2343 sub last_insert_id {
2345 $self->_dbh_last_insert_id ($self->_dbh, @_);
2348 =head2 _native_data_type
2352 =item Arguments: $type_name
2356 This API is B<EXPERIMENTAL>, will almost definitely change in the future, and
2357 currently only used by L<::AutoCast|DBIx::Class::Storage::DBI::AutoCast> and
2358 L<::Sybase::ASE|DBIx::Class::Storage::DBI::Sybase::ASE>.
2360 The default implementation returns C<undef>, implement in your Storage driver if
2361 you need this functionality.
2363 Should map types from other databases to the native RDBMS type, for example
2364 C<VARCHAR2> to C<VARCHAR>.
2366 Types with modifiers should map to the underlying data type. For example,
2367 C<INTEGER AUTO_INCREMENT> should become C<INTEGER>.
2369 Composite types should map to the container type, for example
2370 C<ENUM(foo,bar,baz)> becomes C<ENUM>.
2374 sub _native_data_type {
2375 #my ($self, $data_type) = @_;
2379 # Check if placeholders are supported at all
2380 sub _determine_supports_placeholders {
2382 my $dbh = $self->_get_dbh;
2384 # some drivers provide a $dbh attribute (e.g. Sybase and $dbh->{syb_dynamic_supported})
2385 # but it is inaccurate more often than not
2387 local $dbh->{PrintError} = 0;
2388 local $dbh->{RaiseError} = 1;
2389 $dbh->do('select ?', {}, 1);
2397 # Check if placeholders bound to non-string types throw exceptions
2399 sub _determine_supports_typeless_placeholders {
2401 my $dbh = $self->_get_dbh;
2404 local $dbh->{PrintError} = 0;
2405 local $dbh->{RaiseError} = 1;
2406 # this specifically tests a bind that is NOT a string
2407 $dbh->do('select 1 where 1 = ?', {}, 1);
2417 Returns the database driver name.
2422 shift->_get_dbh->{Driver}->{Name};
2425 =head2 bind_attribute_by_data_type
2427 Given a datatype from column info, returns a database specific bind
2428 attribute for C<< $dbh->bind_param($val,$attribute) >> or nothing if we will
2429 let the database planner just handle it.
2431 Generally only needed for special case column types, like bytea in postgres.
2435 sub bind_attribute_by_data_type {
2439 =head2 is_datatype_numeric
2441 Given a datatype from column_info, returns a boolean value indicating if
2442 the current RDBMS considers it a numeric value. This controls how
2443 L<DBIx::Class::Row/set_column> decides whether to mark the column as
2444 dirty - when the datatype is deemed numeric a C<< != >> comparison will
2445 be performed instead of the usual C<eq>.
2449 sub is_datatype_numeric {
2450 my ($self, $dt) = @_;
2452 return 0 unless $dt;
2454 return $dt =~ /^ (?:
2455 numeric | int(?:eger)? | (?:tiny|small|medium|big)int | dec(?:imal)? | real | float | double (?: \s+ precision)? | (?:big)?serial
2460 =head2 create_ddl_dir
2464 =item Arguments: $schema \@databases, $version, $directory, $preversion, \%sqlt_args
2468 Creates a SQL file based on the Schema, for each of the specified
2469 database engines in C<\@databases> in the given directory.
2470 (note: specify L<SQL::Translator> names, not L<DBI> driver names).
2472 Given a previous version number, this will also create a file containing
2473 the ALTER TABLE statements to transform the previous schema into the
2474 current one. Note that these statements may contain C<DROP TABLE> or
2475 C<DROP COLUMN> statements that can potentially destroy data.
2477 The file names are created using the C<ddl_filename> method below, please
2478 override this method in your schema if you would like a different file
2479 name format. For the ALTER file, the same format is used, replacing
2480 $version in the name with "$preversion-$version".
2482 See L<SQL::Translator/METHODS> for a list of values for C<\%sqlt_args>.
2483 The most common value for this would be C<< { add_drop_table => 1 } >>
2484 to have the SQL produced include a C<DROP TABLE> statement for each table
2485 created. For quoting purposes supply C<quote_table_names> and
2486 C<quote_field_names>.
2488 If no arguments are passed, then the following default values are assumed:
2492 =item databases - ['MySQL', 'SQLite', 'PostgreSQL']
2494 =item version - $schema->schema_version
2496 =item directory - './'
2498 =item preversion - <none>
2502 By default, C<\%sqlt_args> will have
2504 { add_drop_table => 1, ignore_constraint_names => 1, ignore_index_names => 1 }
2506 merged with the hash passed in. To disable any of those features, pass in a
2507 hashref like the following
2509 { ignore_constraint_names => 0, # ... other options }
2512 WARNING: You are strongly advised to check all SQL files created, before applying
2517 sub create_ddl_dir {
2518 my ($self, $schema, $databases, $version, $dir, $preversion, $sqltargs) = @_;
2521 carp "No directory given, using ./\n";
2526 make_path ("$dir") # make_path does not like objects (i.e. Path::Class::Dir)
2528 $self->throw_exception(
2529 "Failed to create '$dir': " . ($! || $@ || 'error unknow')
2533 $self->throw_exception ("Directory '$dir' does not exist\n") unless(-d $dir);
2535 $databases ||= ['MySQL', 'SQLite', 'PostgreSQL'];
2536 $databases = [ $databases ] if(ref($databases) ne 'ARRAY');
2538 my $schema_version = $schema->schema_version || '1.x';
2539 $version ||= $schema_version;
2542 add_drop_table => 1,
2543 ignore_constraint_names => 1,
2544 ignore_index_names => 1,
2548 unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')) {
2549 $self->throw_exception("Can't create a ddl file without " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
2552 my $sqlt = SQL::Translator->new( $sqltargs );
2554 $sqlt->parser('SQL::Translator::Parser::DBIx::Class');
2555 my $sqlt_schema = $sqlt->translate({ data => $schema })
2556 or $self->throw_exception ($sqlt->error);
2558 foreach my $db (@$databases) {
2560 $sqlt->{schema} = $sqlt_schema;
2561 $sqlt->producer($db);
2564 my $filename = $schema->ddl_filename($db, $version, $dir);
2565 if (-e $filename && ($version eq $schema_version )) {
2566 # if we are dumping the current version, overwrite the DDL
2567 carp "Overwriting existing DDL file - $filename";
2571 my $output = $sqlt->translate;
2573 carp("Failed to translate to $db, skipping. (" . $sqlt->error . ")");
2576 if(!open($file, ">$filename")) {
2577 $self->throw_exception("Can't open $filename for writing ($!)");
2580 print $file $output;
2583 next unless ($preversion);
2585 require SQL::Translator::Diff;
2587 my $prefilename = $schema->ddl_filename($db, $preversion, $dir);
2588 if(!-e $prefilename) {
2589 carp("No previous schema file found ($prefilename)");
2593 my $difffile = $schema->ddl_filename($db, $version, $dir, $preversion);
2595 carp("Overwriting existing diff file - $difffile");
2601 my $t = SQL::Translator->new($sqltargs);
2606 or $self->throw_exception ($t->error);
2608 my $out = $t->translate( $prefilename )
2609 or $self->throw_exception ($t->error);
2611 $source_schema = $t->schema;
2613 $source_schema->name( $prefilename )
2614 unless ( $source_schema->name );
2617 # The "new" style of producers have sane normalization and can support
2618 # diffing a SQL file against a DBIC->SQLT schema. Old style ones don't
2619 # And we have to diff parsed SQL against parsed SQL.
2620 my $dest_schema = $sqlt_schema;
2622 unless ( "SQL::Translator::Producer::$db"->can('preprocess_schema') ) {
2623 my $t = SQL::Translator->new($sqltargs);
2628 or $self->throw_exception ($t->error);
2630 my $out = $t->translate( $filename )
2631 or $self->throw_exception ($t->error);
2633 $dest_schema = $t->schema;
2635 $dest_schema->name( $filename )
2636 unless $dest_schema->name;
2639 my $diff = SQL::Translator::Diff::schema_diff($source_schema, $db,
2643 if(!open $file, ">$difffile") {
2644 $self->throw_exception("Can't write to $difffile ($!)");
2652 =head2 deployment_statements
2656 =item Arguments: $schema, $type, $version, $directory, $sqlt_args
2660 Returns the statements used by L</deploy> and L<DBIx::Class::Schema/deploy>.
2662 The L<SQL::Translator> (not L<DBI>) database driver name can be explicitly
2663 provided in C<$type>, otherwise the result of L</sqlt_type> is used as default.
2665 C<$directory> is used to return statements from files in a previously created
2666 L</create_ddl_dir> directory and is optional. The filenames are constructed
2667 from L<DBIx::Class::Schema/ddl_filename>, the schema name and the C<$version>.
2669 If no C<$directory> is specified then the statements are constructed on the
2670 fly using L<SQL::Translator> and C<$version> is ignored.
2672 See L<SQL::Translator/METHODS> for a list of values for C<$sqlt_args>.
2676 sub deployment_statements {
2677 my ($self, $schema, $type, $version, $dir, $sqltargs) = @_;
2678 $type ||= $self->sqlt_type;
2679 $version ||= $schema->schema_version || '1.x';
2681 my $filename = $schema->ddl_filename($type, $version, $dir);
2685 open($file, "<$filename")
2686 or $self->throw_exception("Can't open $filename ($!)");
2689 return join('', @rows);
2692 unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy') ) {
2693 $self->throw_exception("Can't deploy without a ddl_dir or " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
2696 # sources needs to be a parser arg, but for simplicty allow at top level
2698 $sqltargs->{parser_args}{sources} = delete $sqltargs->{sources}
2699 if exists $sqltargs->{sources};
2701 my $tr = SQL::Translator->new(
2702 producer => "SQL::Translator::Producer::${type}",
2704 parser => 'SQL::Translator::Parser::DBIx::Class',
2710 @ret = $tr->translate;
2713 $ret[0] = $tr->translate;
2716 $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error)
2717 unless (@ret && defined $ret[0]);
2719 return wantarray ? @ret : $ret[0];
2723 my ($self, $schema, $type, $sqltargs, $dir) = @_;
2726 return if($line =~ /^--/);
2728 # next if($line =~ /^DROP/m);
2729 return if($line =~ /^BEGIN TRANSACTION/m);
2730 return if($line =~ /^COMMIT/m);
2731 return if $line =~ /^\s+$/; # skip whitespace only
2732 $self->_query_start($line);
2734 # do a dbh_do cycle here, as we need some error checking in
2735 # place (even though we will ignore errors)
2736 $self->dbh_do (sub { $_[1]->do($line) });
2738 carp qq{$_ (running "${line}")};
2740 $self->_query_end($line);
2742 my @statements = $schema->deployment_statements($type, undef, $dir, { %{ $sqltargs || {} }, no_comments => 1 } );
2743 if (@statements > 1) {
2744 foreach my $statement (@statements) {
2745 $deploy->( $statement );
2748 elsif (@statements == 1) {
2749 foreach my $line ( split(";\n", $statements[0])) {
2755 =head2 datetime_parser
2757 Returns the datetime parser class
2761 sub datetime_parser {
2763 return $self->{datetime_parser} ||= do {
2764 $self->build_datetime_parser(@_);
2768 =head2 datetime_parser_type
2770 Defines (returns) the datetime parser class - currently hardwired to
2771 L<DateTime::Format::MySQL>
2775 sub datetime_parser_type { "DateTime::Format::MySQL"; }
2777 =head2 build_datetime_parser
2779 See L</datetime_parser>
2783 sub build_datetime_parser {
2785 my $type = $self->datetime_parser_type(@_);
2786 $self->ensure_class_loaded ($type);
2791 =head2 is_replicating
2793 A boolean that reports if a particular L<DBIx::Class::Storage::DBI> is set to
2794 replicate from a master database. Default is undef, which is the result
2795 returned by databases that don't support replication.
2799 sub is_replicating {
2804 =head2 lag_behind_master
2806 Returns a number that represents a certain amount of lag behind a master db
2807 when a given storage is replicating. The number is database dependent, but
2808 starts at zero and increases with the amount of lag. Default in undef
2812 sub lag_behind_master {
2816 =head2 relname_to_table_alias
2820 =item Arguments: $relname, $join_count
2824 L<DBIx::Class> uses L<DBIx::Class::Relationship> names as table aliases in
2827 This hook is to allow specific L<DBIx::Class::Storage> drivers to change the
2828 way these aliases are named.
2830 The default behavior is C<< "$relname_$join_count" if $join_count > 1 >>,
2831 otherwise C<"$relname">.
2835 sub relname_to_table_alias {
2836 my ($self, $relname, $join_count) = @_;
2838 my $alias = ($join_count && $join_count > 1 ?
2839 join('_', $relname, $join_count) : $relname);
2844 # The size in bytes to use for DBI's ->bind_param_inout, this is the generic
2845 # version and it may be necessary to amend or override it for a specific storage
2846 # if such binds are necessary.
2847 sub _max_column_bytesize {
2848 my ($self, $source, $col) = @_;
2850 my $inf = $source->column_info($col);
2851 return $inf->{_max_bytesize} ||= do {
2855 if (my $data_type = $inf->{data_type}) {
2856 $data_type = lc($data_type);
2858 # String/sized-binary types
2859 if ($data_type =~ /^(?:l?(?:var)?char(?:acter)?(?:\s*varying)?
2860 |(?:var)?binary(?:\s*varying)?|raw)\b/x
2862 $max_size = $inf->{size};
2864 # Other charset/unicode types, assume scale of 4
2865 elsif ($data_type =~ /^(?:national\s*character(?:\s*varying)?|nchar
2869 $max_size = $inf->{size} * 4 if $inf->{size};
2872 elsif ($self->_is_lob_type($data_type)) {
2873 # default to longreadlen
2876 $max_size = 100; # for all other (numeric?) datatypes
2880 $max_size ||= $self->_get_dbh->{LongReadLen} || 8000;
2884 # Determine if a data_type is some type of BLOB
2886 my ($self, $data_type) = @_;
2887 $data_type && ($data_type =~ /(?:lob|bfile|text|image|bytea|memo)/i
2888 || $data_type =~ /^long(?:\s*(?:raw|bit\s*varying|varbit|binary
2889 |varchar|character\s*varying|nvarchar
2890 |national\s*character\s*varying))?$/xi);
2897 =head2 DBIx::Class and AutoCommit
2899 DBIx::Class can do some wonderful magic with handling exceptions,
2900 disconnections, and transactions when you use C<< AutoCommit => 1 >>
2901 (the default) combined with C<txn_do> for transaction support.
2903 If you set C<< AutoCommit => 0 >> in your connect info, then you are always
2904 in an assumed transaction between commits, and you're telling us you'd
2905 like to manage that manually. A lot of the magic protections offered by
2906 this module will go away. We can't protect you from exceptions due to database
2907 disconnects because we don't know anything about how to restart your
2908 transactions. You're on your own for handling all sorts of exceptional
2909 cases if you choose the C<< AutoCommit => 0 >> path, just as you would
2915 Matt S. Trout <mst@shadowcatsystems.co.uk>
2917 Andy Grundman <andy@hybridized.org>
2921 You may distribute this code under the same terms as Perl itself.