1 package DBIx::Class::Storage::DBI;
2 # -*- mode: cperl; cperl-indent-level: 2 -*-
7 use base qw/DBIx::Class::Storage::DBIHacks DBIx::Class::Storage/;
10 use Carp::Clan qw/^DBIx::Class|^Try::Tiny/;
12 use DBIx::Class::Storage::DBI::Cursor;
13 use Scalar::Util qw/refaddr weaken reftype blessed/;
14 use List::Util qw/first/;
15 use Data::Dumper::Concise 'Dumper';
16 use Sub::Name 'subname';
18 use File::Path 'make_path';
22 # default cursor class, overridable in connect_info attributes
23 __PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor');
25 __PACKAGE__->mk_group_accessors('inherited' => qw/sql_maker_class sql_limit_dialect/);
26 __PACKAGE__->sql_maker_class('DBIx::Class::SQLMaker');
28 __PACKAGE__->mk_group_accessors('simple' => qw/
29 _connect_info _dbi_connect_info _dbic_connect_attributes _driver_determined
30 _dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts
31 transaction_depth _dbh_autocommit savepoints
34 # the values for these accessors are picked out (and deleted) from
35 # the attribute hashref passed to connect_info
36 my @storage_options = qw/
37 on_connect_call on_disconnect_call on_connect_do on_disconnect_do
38 disable_sth_caching unsafe auto_savepoint
40 __PACKAGE__->mk_group_accessors('simple' => @storage_options);
43 # capability definitions, using a 2-tiered accessor system
46 # A driver/user may define _use_X, which blindly without any checks says:
47 # "(do not) use this capability", (use_dbms_capability is an "inherited"
50 # If _use_X is undef, _supports_X is then queried. This is a "simple" style
51 # accessor, which in turn calls _determine_supports_X, and stores the return
52 # in a special slot on the storage object, which is wiped every time a $dbh
53 # reconnection takes place (it is not guaranteed that upon reconnection we
54 # will get the same rdbms version). _determine_supports_X does not need to
55 # exist on a driver, as we ->can for it before calling.
57 my @capabilities = (qw/insert_returning placeholders typeless_placeholders join_optimizer/);
58 __PACKAGE__->mk_group_accessors( dbms_capability => map { "_supports_$_" } @capabilities );
59 __PACKAGE__->mk_group_accessors( use_dbms_capability => map { "_use_$_" } (@capabilities ) );
61 # on by default, not strictly a capability (pending rewrite)
62 __PACKAGE__->_use_join_optimizer (1);
63 sub _determine_supports_join_optimizer { 1 };
65 # Each of these methods need _determine_driver called before itself
66 # in order to function reliably. This is a purely DRY optimization
68 # get_(use)_dbms_capability need to be called on the correct Storage
69 # class, as _use_X may be hardcoded class-wide, and _supports_X calls
70 # _determine_supports_X which obv. needs a correct driver as well
71 my @rdbms_specific_methods = qw/
85 get_use_dbms_capability
92 for my $meth (@rdbms_specific_methods) {
94 my $orig = __PACKAGE__->can ($meth)
95 or die "$meth is not a ::Storage::DBI method!";
98 no warnings qw/redefine/;
99 *{__PACKAGE__ ."::$meth"} = subname $meth => sub {
100 if (not $_[0]->_driver_determined and not $_[0]->{_in_determine_driver}) {
101 $_[0]->_determine_driver;
103 # This for some reason crashes and burns on perl 5.8.1
104 # IFF the method ends up throwing an exception
105 #goto $_[0]->can ($meth);
107 my $cref = $_[0]->can ($meth);
117 DBIx::Class::Storage::DBI - DBI storage handler
121 my $schema = MySchema->connect('dbi:SQLite:my.db');
123 $schema->storage->debug(1);
125 my @stuff = $schema->storage->dbh_do(
127 my ($storage, $dbh, @args) = @_;
128 $dbh->do("DROP TABLE authors");
133 $schema->resultset('Book')->search({
134 written_on => $schema->storage->datetime_parser->format_datetime(DateTime->now)
139 This class represents the connection to an RDBMS via L<DBI>. See
140 L<DBIx::Class::Storage> for general information. This pod only
141 documents DBI-specific methods and behaviors.
148 my $new = shift->next::method(@_);
150 $new->transaction_depth(0);
151 $new->_sql_maker_opts({});
152 $new->_dbh_details({});
153 $new->{savepoints} = [];
154 $new->{_in_dbh_do} = 0;
155 $new->{_dbh_gen} = 0;
157 # read below to see what this does
158 $new->_arm_global_destructor;
163 # This is hack to work around perl shooting stuff in random
164 # order on exit(). If we do not walk the remaining storage
165 # objects in an END block, there is a *small but real* chance
166 # of a fork()ed child to kill the parent's shared DBI handle,
167 # *before perl reaches the DESTROY in this package*
168 # Yes, it is ugly and effective.
169 # Additionally this registry is used by the CLONE method to
170 # make sure no handles are shared between threads
172 my %seek_and_destroy;
174 sub _arm_global_destructor {
176 my $key = refaddr ($self);
177 $seek_and_destroy{$key} = $self;
178 weaken ($seek_and_destroy{$key});
182 local $?; # just in case the DBI destructor changes it somehow
184 # destroy just the object if not native to this process/thread
185 $_->_verify_pid for (grep
187 values %seek_and_destroy
192 # As per DBI's recommendation, DBIC disconnects all handles as
193 # soon as possible (DBIC will reconnect only on demand from within
195 for (values %seek_and_destroy) {
197 $_->{_dbh_gen}++; # so that existing cursors will drop as well
206 # some databases spew warnings on implicit disconnect
207 local $SIG{__WARN__} = sub {};
210 # this op is necessary, since the very last perl runtime statement
211 # triggers a global destruction shootout, and the $SIG localization
212 # may very well be destroyed before perl actually gets to do the
217 # handle pid changes correctly - do not destroy parent's connection
221 my $pid = $self->_conn_pid;
222 if( defined $pid and $pid != $$ and my $dbh = $self->_dbh ) {
223 $dbh->{InactiveDestroy} = 1;
233 This method is normally called by L<DBIx::Class::Schema/connection>, which
234 encapsulates its argument list in an arrayref before passing them here.
236 The argument list may contain:
242 The same 4-element argument set one would normally pass to
243 L<DBI/connect>, optionally followed by
244 L<extra attributes|/DBIx::Class specific connection attributes>
245 recognized by DBIx::Class:
247 $connect_info_args = [ $dsn, $user, $password, \%dbi_attributes?, \%extra_attributes? ];
251 A single code reference which returns a connected
252 L<DBI database handle|DBI/connect> optionally followed by
253 L<extra attributes|/DBIx::Class specific connection attributes> recognized
256 $connect_info_args = [ sub { DBI->connect (...) }, \%extra_attributes? ];
260 A single hashref with all the attributes and the dsn/user/password
263 $connect_info_args = [{
271 $connect_info_args = [{
272 dbh_maker => sub { DBI->connect (...) },
277 This is particularly useful for L<Catalyst> based applications, allowing the
278 following config (L<Config::General> style):
283 dsn dbi:mysql:database=test
290 The C<dsn>/C<user>/C<password> combination can be substituted by the
291 C<dbh_maker> key whose value is a coderef that returns a connected
292 L<DBI database handle|DBI/connect>
296 Please note that the L<DBI> docs recommend that you always explicitly
297 set C<AutoCommit> to either I<0> or I<1>. L<DBIx::Class> further
298 recommends that it be set to I<1>, and that you perform transactions
299 via our L<DBIx::Class::Schema/txn_do> method. L<DBIx::Class> will set it
300 to I<1> if you do not do explicitly set it to zero. This is the default
301 for most DBDs. See L</DBIx::Class and AutoCommit> for details.
303 =head3 DBIx::Class specific connection attributes
305 In addition to the standard L<DBI|DBI/ATTRIBUTES_COMMON_TO_ALL_HANDLES>
306 L<connection|DBI/Database_Handle_Attributes> attributes, DBIx::Class recognizes
307 the following connection options. These options can be mixed in with your other
308 L<DBI> connection attributes, or placed in a separate hashref
309 (C<\%extra_attributes>) as shown above.
311 Every time C<connect_info> is invoked, any previous settings for
312 these options will be cleared before setting the new ones, regardless of
313 whether any options are specified in the new C<connect_info>.
320 Specifies things to do immediately after connecting or re-connecting to
321 the database. Its value may contain:
327 This contains one SQL statement to execute.
329 =item an array reference
331 This contains SQL statements to execute in order. Each element contains
332 a string or a code reference that returns a string.
334 =item a code reference
336 This contains some code to execute. Unlike code references within an
337 array reference, its return value is ignored.
341 =item on_disconnect_do
343 Takes arguments in the same form as L</on_connect_do> and executes them
344 immediately before disconnecting from the database.
346 Note, this only runs if you explicitly call L</disconnect> on the
349 =item on_connect_call
351 A more generalized form of L</on_connect_do> that calls the specified
352 C<connect_call_METHOD> methods in your storage driver.
354 on_connect_do => 'select 1'
358 on_connect_call => [ [ do_sql => 'select 1' ] ]
360 Its values may contain:
366 Will call the C<connect_call_METHOD> method.
368 =item a code reference
370 Will execute C<< $code->($storage) >>
372 =item an array reference
374 Each value can be a method name or code reference.
376 =item an array of arrays
378 For each array, the first item is taken to be the C<connect_call_> method name
379 or code reference, and the rest are parameters to it.
383 Some predefined storage methods you may use:
389 Executes a SQL string or a code reference that returns a SQL string. This is
390 what L</on_connect_do> and L</on_disconnect_do> use.
398 Will execute the scalar as SQL.
402 Taken to be arguments to L<DBI/do>, the SQL string optionally followed by the
403 attributes hashref and bind values.
405 =item a code reference
407 Will execute C<< $code->($storage) >> and execute the return array refs as
414 Execute any statements necessary to initialize the database session to return
415 and accept datetime/timestamp values used with
416 L<DBIx::Class::InflateColumn::DateTime>.
418 Only necessary for some databases, see your specific storage driver for
419 implementation details.
423 =item on_disconnect_call
425 Takes arguments in the same form as L</on_connect_call> and executes them
426 immediately before disconnecting from the database.
428 Calls the C<disconnect_call_METHOD> methods as opposed to the
429 C<connect_call_METHOD> methods called by L</on_connect_call>.
431 Note, this only runs if you explicitly call L</disconnect> on the
434 =item disable_sth_caching
436 If set to a true value, this option will disable the caching of
437 statement handles via L<DBI/prepare_cached>.
441 Sets a specific SQL::Abstract::Limit-style limit dialect, overriding the
442 default L</sql_limit_dialect> setting of the storage (if any). For a list
443 of available limit dialects see L<DBIx::Class::SQLMaker::LimitDialects>.
447 Specifies what characters to use to quote table and column names.
449 C<quote_char> expects either a single character, in which case is it
450 is placed on either side of the table/column name, or an arrayref of length
451 2 in which case the table/column name is placed between the elements.
453 For example under MySQL you should use C<< quote_char => '`' >>, and for
454 SQL Server you should use C<< quote_char => [qw/[ ]/] >>.
458 This parameter is only useful in conjunction with C<quote_char>, and is used to
459 specify the character that separates elements (schemas, tables, columns) from
460 each other. If unspecified it defaults to the most commonly used C<.>.
464 This Storage driver normally installs its own C<HandleError>, sets
465 C<RaiseError> and C<ShowErrorStatement> on, and sets C<PrintError> off on
466 all database handles, including those supplied by a coderef. It does this
467 so that it can have consistent and useful error behavior.
469 If you set this option to a true value, Storage will not do its usual
470 modifications to the database handle's attributes, and instead relies on
471 the settings in your connect_info DBI options (or the values you set in
472 your connection coderef, in the case that you are connecting via coderef).
474 Note that your custom settings can cause Storage to malfunction,
475 especially if you set a C<HandleError> handler that suppresses exceptions
476 and/or disable C<RaiseError>.
480 If this option is true, L<DBIx::Class> will use savepoints when nesting
481 transactions, making it possible to recover from failure in the inner
482 transaction without having to abort all outer transactions.
486 Use this argument to supply a cursor class other than the default
487 L<DBIx::Class::Storage::DBI::Cursor>.
491 Some real-life examples of arguments to L</connect_info> and
492 L<DBIx::Class::Schema/connect>
494 # Simple SQLite connection
495 ->connect_info([ 'dbi:SQLite:./foo.db' ]);
498 ->connect_info([ sub { DBI->connect(...) } ]);
500 # Connect via subref in hashref
502 dbh_maker => sub { DBI->connect(...) },
503 on_connect_do => 'alter session ...',
506 # A bit more complicated
513 { quote_char => q{"} },
517 # Equivalent to the previous example
523 { AutoCommit => 1, quote_char => q{"}, name_sep => q{.} },
527 # Same, but with hashref as argument
528 # See parse_connect_info for explanation
531 dsn => 'dbi:Pg:dbname=foo',
533 password => 'my_pg_password',
540 # Subref + DBIx::Class-specific connection options
543 sub { DBI->connect(...) },
547 on_connect_do => ['SET search_path TO myschema,otherschema,public'],
548 disable_sth_caching => 1,
558 my ($self, $info) = @_;
560 return $self->_connect_info if !$info;
562 $self->_connect_info($info); # copy for _connect_info
564 $info = $self->_normalize_connect_info($info)
565 if ref $info eq 'ARRAY';
567 for my $storage_opt (keys %{ $info->{storage_options} }) {
568 my $value = $info->{storage_options}{$storage_opt};
570 $self->$storage_opt($value);
573 # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only
574 # the new set of options
575 $self->_sql_maker(undef);
576 $self->_sql_maker_opts({});
578 for my $sql_maker_opt (keys %{ $info->{sql_maker_options} }) {
579 my $value = $info->{sql_maker_options}{$sql_maker_opt};
581 $self->_sql_maker_opts->{$sql_maker_opt} = $value;
585 %{ $self->_default_dbi_connect_attributes || {} },
586 %{ $info->{attributes} || {} },
589 my @args = @{ $info->{arguments} };
591 $self->_dbi_connect_info([@args,
592 %attrs && !(ref $args[0] eq 'CODE') ? \%attrs : ()]);
595 # save attributes them in a separate accessor so they are always
596 # introspectable, even in case of a CODE $dbhmaker
597 $self->_dbic_connect_attributes (\%attrs);
599 return $self->_connect_info;
602 sub _normalize_connect_info {
603 my ($self, $info_arg) = @_;
606 my @args = @$info_arg; # take a shallow copy for further mutilation
608 # combine/pre-parse arguments depending on invocation style
611 if (ref $args[0] eq 'CODE') { # coderef with optional \%extra_attributes
612 %attrs = %{ $args[1] || {} };
615 elsif (ref $args[0] eq 'HASH') { # single hashref (i.e. Catalyst config)
616 %attrs = %{$args[0]};
618 if (my $code = delete $attrs{dbh_maker}) {
621 my @ignored = grep { delete $attrs{$_} } (qw/dsn user password/);
624 'Attribute(s) %s in connect_info were ignored, as they can not be applied '
625 . "to the result of 'dbh_maker'",
627 join (', ', map { "'$_'" } (@ignored) ),
632 @args = delete @attrs{qw/dsn user password/};
635 else { # otherwise assume dsn/user/password + \%attrs + \%extra_attrs
637 % { $args[3] || {} },
638 % { $args[4] || {} },
640 @args = @args[0,1,2];
643 $info{arguments} = \@args;
645 my @storage_opts = grep exists $attrs{$_},
646 @storage_options, 'cursor_class';
648 @{ $info{storage_options} }{@storage_opts} =
649 delete @attrs{@storage_opts} if @storage_opts;
651 my @sql_maker_opts = grep exists $attrs{$_},
652 qw/limit_dialect quote_char name_sep/;
654 @{ $info{sql_maker_options} }{@sql_maker_opts} =
655 delete @attrs{@sql_maker_opts} if @sql_maker_opts;
657 $info{attributes} = \%attrs if %attrs;
662 sub _default_dbi_connect_attributes {
672 This method is deprecated in favour of setting via L</connect_info>.
676 =head2 on_disconnect_do
678 This method is deprecated in favour of setting via L</connect_info>.
682 sub _parse_connect_do {
683 my ($self, $type) = @_;
685 my $val = $self->$type;
686 return () if not defined $val;
691 push @res, [ 'do_sql', $val ];
692 } elsif (ref($val) eq 'CODE') {
694 } elsif (ref($val) eq 'ARRAY') {
695 push @res, map { [ 'do_sql', $_ ] } @$val;
697 $self->throw_exception("Invalid type for $type: ".ref($val));
705 Arguments: ($subref | $method_name), @extra_coderef_args?
707 Execute the given $subref or $method_name using the new exception-based
708 connection management.
710 The first two arguments will be the storage object that C<dbh_do> was called
711 on and a database handle to use. Any additional arguments will be passed
712 verbatim to the called subref as arguments 2 and onwards.
714 Using this (instead of $self->_dbh or $self->dbh) ensures correct
715 exception handling and reconnection (or failover in future subclasses).
717 Your subref should have no side-effects outside of the database, as
718 there is the potential for your subref to be partially double-executed
719 if the database connection was stale/dysfunctional.
723 my @stuff = $schema->storage->dbh_do(
725 my ($storage, $dbh, @cols) = @_;
726 my $cols = join(q{, }, @cols);
727 $dbh->selectrow_array("SELECT $cols FROM foo");
738 my $dbh = $self->_get_dbh;
740 return $self->$code($dbh, @_)
741 if ( $self->{_in_dbh_do} || $self->{transaction_depth} );
743 local $self->{_in_dbh_do} = 1;
745 # take a ref instead of a copy, to preserve coderef @_ aliasing semantics
748 $self->$code ($dbh, @$args);
750 $self->throw_exception($_) if $self->connected;
752 # We were not connected - reconnect and retry, but let any
753 # exception fall right through this time
754 carp "Retrying $code after catching disconnected exception: $_"
755 if $ENV{DBIC_DBIRETRY_DEBUG};
757 $self->_populate_dbh;
758 $self->$code($self->_dbh, @$args);
762 # This is basically a blend of dbh_do above and DBIx::Class::Storage::txn_do.
763 # It also informs dbh_do to bypass itself while under the direction of txn_do,
764 # via $self->{_in_dbh_do} (this saves some redundant eval and errorcheck, etc)
769 ref $coderef eq 'CODE' or $self->throw_exception
770 ('$coderef must be a CODE reference');
772 local $self->{_in_dbh_do} = 1;
775 my $want_array = wantarray;
781 # take a ref instead of a copy, to preserve coderef @_ aliasing semantics
786 my $txn_start_depth = $self->transaction_depth;
788 @result = $coderef->(@$args);
790 elsif(defined $want_array) {
791 $result[0] = $coderef->(@$args);
797 my $delta_txn = $txn_start_depth - $self->transaction_depth;
798 if ($delta_txn == 0) {
801 elsif ($delta_txn != 1) {
802 # an off-by-one would mean we fired a rollback
803 carp "Unexpected reduction of transaction depth by $delta_txn after execution of $coderef";
809 if(! defined $exception) { return $want_array ? @result : $result[0] }
811 if($self->transaction_depth > 1 || $tried++ || $self->connected) {
812 my $rollback_exception;
813 try { $self->txn_rollback } catch { $rollback_exception = shift };
814 if(defined $rollback_exception) {
815 my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION";
816 $self->throw_exception($exception) # propagate nested rollback
817 if $rollback_exception =~ /$exception_class/;
819 $self->throw_exception(
820 "Transaction aborted: ${exception}. "
821 . "Rollback failed: ${rollback_exception}"
824 $self->throw_exception($exception)
827 # We were not connected, and was first try - reconnect and retry
829 carp "Retrying $coderef after catching disconnected exception: $exception"
830 if $ENV{DBIC_TXNRETRY_DEBUG};
831 $self->_populate_dbh;
837 Our C<disconnect> method also performs a rollback first if the
838 database is not in C<AutoCommit> mode.
848 push @actions, ( $self->on_disconnect_call || () );
849 push @actions, $self->_parse_connect_do ('on_disconnect_do');
851 $self->_do_connection_actions(disconnect_call_ => $_) for @actions;
853 $self->_dbh_rollback unless $self->_dbh_autocommit;
855 %{ $self->_dbh->{CachedKids} } = ();
856 $self->_dbh->disconnect;
862 =head2 with_deferred_fk_checks
866 =item Arguments: C<$coderef>
868 =item Return Value: The return value of $coderef
872 Storage specific method to run the code ref with FK checks deferred or
873 in MySQL's case disabled entirely.
877 # Storage subclasses should override this
878 sub with_deferred_fk_checks {
879 my ($self, $sub) = @_;
887 =item Arguments: none
889 =item Return Value: 1|0
893 Verifies that the current database handle is active and ready to execute
894 an SQL statement (e.g. the connection did not get stale, server is still
895 answering, etc.) This method is used internally by L</dbh>.
901 return 0 unless $self->_seems_connected;
904 local $self->_dbh->{RaiseError} = 1;
909 sub _seems_connected {
914 my $dbh = $self->_dbh
917 return $dbh->FETCH('Active');
923 my $dbh = $self->_dbh or return 0;
928 sub ensure_connected {
931 unless ($self->connected) {
932 $self->_populate_dbh;
938 Returns a C<$dbh> - a data base handle of class L<DBI>. The returned handle
939 is guaranteed to be healthy by implicitly calling L</connected>, and if
940 necessary performing a reconnection before returning. Keep in mind that this
941 is very B<expensive> on some database engines. Consider using L</dbh_do>
949 if (not $self->_dbh) {
950 $self->_populate_dbh;
952 $self->ensure_connected;
957 # this is the internal "get dbh or connect (don't check)" method
961 $self->_populate_dbh unless $self->_dbh;
967 unless ($self->_sql_maker) {
968 my $sql_maker_class = $self->sql_maker_class;
969 $self->ensure_class_loaded ($sql_maker_class);
971 my %opts = %{$self->_sql_maker_opts||{}};
975 $self->sql_limit_dialect
978 my $s_class = (ref $self) || $self;
980 "Your storage class ($s_class) does not set sql_limit_dialect and you "
981 . 'have not supplied an explicit limit_dialect in your connection_info. '
982 . 'DBIC will attempt to use the GenericSubQ dialect, which works on most '
983 . 'databases but can be (and often is) painfully slow.'
990 $self->_sql_maker($sql_maker_class->new(
992 array_datatypes => 1,
993 limit_dialect => $dialect,
998 return $self->_sql_maker;
1001 # nothing to do by default
1008 my @info = @{$self->_dbi_connect_info || []};
1009 $self->_dbh(undef); # in case ->connected failed we might get sent here
1010 $self->_dbh_details({}); # reset everything we know
1012 $self->_dbh($self->_connect(@info));
1014 $self->_conn_pid($$) if $^O ne 'MSWin32'; # on win32 these are in fact threads
1016 $self->_determine_driver;
1018 # Always set the transaction depth on connect, since
1019 # there is no transaction in progress by definition
1020 $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1;
1022 $self->_run_connection_actions unless $self->{_in_determine_driver};
1025 sub _run_connection_actions {
1029 push @actions, ( $self->on_connect_call || () );
1030 push @actions, $self->_parse_connect_do ('on_connect_do');
1032 $self->_do_connection_actions(connect_call_ => $_) for @actions;
1037 sub set_use_dbms_capability {
1038 $_[0]->set_inherited ($_[1], $_[2]);
1041 sub get_use_dbms_capability {
1042 my ($self, $capname) = @_;
1044 my $use = $self->get_inherited ($capname);
1047 : do { $capname =~ s/^_use_/_supports_/; $self->get_dbms_capability ($capname) }
1051 sub set_dbms_capability {
1052 $_[0]->_dbh_details->{capability}{$_[1]} = $_[2];
1055 sub get_dbms_capability {
1056 my ($self, $capname) = @_;
1058 my $cap = $self->_dbh_details->{capability}{$capname};
1060 unless (defined $cap) {
1061 if (my $meth = $self->can ("_determine$capname")) {
1062 $cap = $self->$meth ? 1 : 0;
1068 $self->set_dbms_capability ($capname, $cap);
1078 unless ($info = $self->_dbh_details->{info}) {
1082 my $server_version = try { $self->_get_server_version };
1084 if (defined $server_version) {
1085 $info->{dbms_version} = $server_version;
1087 my ($numeric_version) = $server_version =~ /^([\d\.]+)/;
1088 my @verparts = split (/\./, $numeric_version);
1094 # consider only up to 3 version parts, iff not more than 3 digits
1096 while (@verparts && @use_parts < 3) {
1097 my $p = shift @verparts;
1099 push @use_parts, $p;
1101 push @use_parts, 0 while @use_parts < 3;
1103 $info->{normalized_dbms_version} = sprintf "%d.%03d%03d", @use_parts;
1107 $self->_dbh_details->{info} = $info;
1113 sub _get_server_version {
1114 shift->_get_dbh->get_info(18);
1117 sub _determine_driver {
1120 if ((not $self->_driver_determined) && (not $self->{_in_determine_driver})) {
1121 my $started_connected = 0;
1122 local $self->{_in_determine_driver} = 1;
1124 if (ref($self) eq __PACKAGE__) {
1126 if ($self->_dbh) { # we are connected
1127 $driver = $self->_dbh->{Driver}{Name};
1128 $started_connected = 1;
1130 # if connect_info is a CODEREF, we have no choice but to connect
1131 if (ref $self->_dbi_connect_info->[0] &&
1132 reftype $self->_dbi_connect_info->[0] eq 'CODE') {
1133 $self->_populate_dbh;
1134 $driver = $self->_dbh->{Driver}{Name};
1137 # try to use dsn to not require being connected, the driver may still
1138 # force a connection in _rebless to determine version
1139 # (dsn may not be supplied at all if all we do is make a mock-schema)
1140 my $dsn = $self->_dbi_connect_info->[0] || $ENV{DBI_DSN} || '';
1141 ($driver) = $dsn =~ /dbi:([^:]+):/i;
1142 $driver ||= $ENV{DBI_DRIVER};
1147 my $storage_class = "DBIx::Class::Storage::DBI::${driver}";
1148 if ($self->load_optional_class($storage_class)) {
1149 mro::set_mro($storage_class, 'c3');
1150 bless $self, $storage_class;
1156 $self->_driver_determined(1);
1158 $self->_init; # run driver-specific initializations
1160 $self->_run_connection_actions
1161 if !$started_connected && defined $self->_dbh;
1165 sub _do_connection_actions {
1167 my $method_prefix = shift;
1170 if (not ref($call)) {
1171 my $method = $method_prefix . $call;
1173 } elsif (ref($call) eq 'CODE') {
1175 } elsif (ref($call) eq 'ARRAY') {
1176 if (ref($call->[0]) ne 'ARRAY') {
1177 $self->_do_connection_actions($method_prefix, $_) for @$call;
1179 $self->_do_connection_actions($method_prefix, @$_) for @$call;
1182 $self->throw_exception (sprintf ("Don't know how to process conection actions of type '%s'", ref($call)) );
1188 sub connect_call_do_sql {
1190 $self->_do_query(@_);
1193 sub disconnect_call_do_sql {
1195 $self->_do_query(@_);
1198 # override in db-specific backend when necessary
1199 sub connect_call_datetime_setup { 1 }
1202 my ($self, $action) = @_;
1204 if (ref $action eq 'CODE') {
1205 $action = $action->($self);
1206 $self->_do_query($_) foreach @$action;
1209 # Most debuggers expect ($sql, @bind), so we need to exclude
1210 # the attribute hash which is the second argument to $dbh->do
1211 # furthermore the bind values are usually to be presented
1212 # as named arrayref pairs, so wrap those here too
1213 my @do_args = (ref $action eq 'ARRAY') ? (@$action) : ($action);
1214 my $sql = shift @do_args;
1215 my $attrs = shift @do_args;
1216 my @bind = map { [ undef, $_ ] } @do_args;
1218 $self->_query_start($sql, @bind);
1219 $self->_get_dbh->do($sql, $attrs, @do_args);
1220 $self->_query_end($sql, @bind);
1227 my ($self, @info) = @_;
1229 $self->throw_exception("You failed to provide any connection info")
1232 my ($old_connect_via, $dbh);
1234 if ($INC{'Apache/DBI.pm'} && $ENV{MOD_PERL}) {
1235 $old_connect_via = $DBI::connect_via;
1236 $DBI::connect_via = 'connect';
1240 if(ref $info[0] eq 'CODE') {
1241 $dbh = $info[0]->();
1244 $dbh = DBI->connect(@info);
1251 unless ($self->unsafe) {
1253 # this odd anonymous coderef dereference is in fact really
1254 # necessary to avoid the unwanted effect described in perl5
1257 my $weak_self = $_[0];
1260 $_[1]->{HandleError} = sub {
1262 $weak_self->throw_exception("DBI Exception: $_[0]");
1265 # the handler may be invoked by something totally out of
1267 croak ("DBI Exception (unhandled by DBIC, ::Schema GCed): $_[0]");
1272 $dbh->{ShowErrorStatement} = 1;
1273 $dbh->{RaiseError} = 1;
1274 $dbh->{PrintError} = 0;
1278 $self->throw_exception("DBI Connection failed: $_")
1281 $DBI::connect_via = $old_connect_via if $old_connect_via;
1284 $self->_dbh_autocommit($dbh->{AutoCommit});
1289 my ($self, $name) = @_;
1291 $name = $self->_svp_generate_name
1292 unless defined $name;
1294 $self->throw_exception ("You can't use savepoints outside a transaction")
1295 if $self->{transaction_depth} == 0;
1297 $self->throw_exception ("Your Storage implementation doesn't support savepoints")
1298 unless $self->can('_svp_begin');
1300 push @{ $self->{savepoints} }, $name;
1302 $self->debugobj->svp_begin($name) if $self->debug;
1304 return $self->_svp_begin($name);
1308 my ($self, $name) = @_;
1310 $self->throw_exception ("You can't use savepoints outside a transaction")
1311 if $self->{transaction_depth} == 0;
1313 $self->throw_exception ("Your Storage implementation doesn't support savepoints")
1314 unless $self->can('_svp_release');
1316 if (defined $name) {
1317 $self->throw_exception ("Savepoint '$name' does not exist")
1318 unless grep { $_ eq $name } @{ $self->{savepoints} };
1320 # Dig through the stack until we find the one we are releasing. This keeps
1321 # the stack up to date.
1324 do { $svp = pop @{ $self->{savepoints} } } while $svp ne $name;
1326 $name = pop @{ $self->{savepoints} };
1329 $self->debugobj->svp_release($name) if $self->debug;
1331 return $self->_svp_release($name);
1335 my ($self, $name) = @_;
1337 $self->throw_exception ("You can't use savepoints outside a transaction")
1338 if $self->{transaction_depth} == 0;
1340 $self->throw_exception ("Your Storage implementation doesn't support savepoints")
1341 unless $self->can('_svp_rollback');
1343 if (defined $name) {
1344 # If they passed us a name, verify that it exists in the stack
1345 unless(grep({ $_ eq $name } @{ $self->{savepoints} })) {
1346 $self->throw_exception("Savepoint '$name' does not exist!");
1349 # Dig through the stack until we find the one we are releasing. This keeps
1350 # the stack up to date.
1351 while(my $s = pop(@{ $self->{savepoints} })) {
1352 last if($s eq $name);
1354 # Add the savepoint back to the stack, as a rollback doesn't remove the
1355 # named savepoint, only everything after it.
1356 push(@{ $self->{savepoints} }, $name);
1358 # We'll assume they want to rollback to the last savepoint
1359 $name = $self->{savepoints}->[-1];
1362 $self->debugobj->svp_rollback($name) if $self->debug;
1364 return $self->_svp_rollback($name);
1367 sub _svp_generate_name {
1369 return 'savepoint_'.scalar(@{ $self->{'savepoints'} });
1375 # this means we have not yet connected and do not know the AC status
1376 # (e.g. coderef $dbh)
1377 if (! defined $self->_dbh_autocommit) {
1378 $self->ensure_connected;
1380 # otherwise re-connect on pid changes, so
1381 # that the txn_depth is adjusted properly
1382 # the lightweight _get_dbh is good enoug here
1383 # (only superficial handle check, no pings)
1388 if($self->transaction_depth == 0) {
1389 $self->debugobj->txn_begin()
1391 $self->_dbh_begin_work;
1393 elsif ($self->auto_savepoint) {
1396 $self->{transaction_depth}++;
1399 sub _dbh_begin_work {
1402 # if the user is utilizing txn_do - good for him, otherwise we need to
1403 # ensure that the $dbh is healthy on BEGIN.
1404 # We do this via ->dbh_do instead of ->dbh, so that the ->dbh "ping"
1405 # will be replaced by a failure of begin_work itself (which will be
1406 # then retried on reconnect)
1407 if ($self->{_in_dbh_do}) {
1408 $self->_dbh->begin_work;
1410 $self->dbh_do(sub { $_[1]->begin_work });
1416 if ($self->{transaction_depth} == 1) {
1417 $self->debugobj->txn_commit()
1420 $self->{transaction_depth} = 0
1421 if $self->_dbh_autocommit;
1423 elsif($self->{transaction_depth} > 1) {
1424 $self->{transaction_depth}--;
1426 if $self->auto_savepoint;
1429 $self->throw_exception( 'Refusing to commit without a started transaction' );
1435 my $dbh = $self->_dbh
1436 or $self->throw_exception('cannot COMMIT on a disconnected handle');
1442 my $dbh = $self->_dbh;
1444 if ($self->{transaction_depth} == 1) {
1445 $self->debugobj->txn_rollback()
1447 $self->{transaction_depth} = 0
1448 if $self->_dbh_autocommit;
1449 $self->_dbh_rollback;
1451 elsif($self->{transaction_depth} > 1) {
1452 $self->{transaction_depth}--;
1453 if ($self->auto_savepoint) {
1454 $self->svp_rollback;
1459 die DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION->new;
1463 my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION";
1465 if ($_ !~ /$exception_class/) {
1466 # ensure that a failed rollback resets the transaction depth
1467 $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1;
1470 $self->throw_exception($_)
1476 my $dbh = $self->_dbh
1477 or $self->throw_exception('cannot ROLLBACK on a disconnected handle');
1481 # This used to be the top-half of _execute. It was split out to make it
1482 # easier to override in NoBindVars without duping the rest. It takes up
1483 # all of _execute's args, and emits $sql, @bind.
1484 sub _prep_for_execute {
1485 my ($self, $op, $extra_bind, $ident, $args) = @_;
1487 if( blessed $ident && $ident->isa("DBIx::Class::ResultSource") ) {
1488 $ident = $ident->from();
1491 my ($sql, @bind) = $self->sql_maker->$op($ident, @$args);
1494 map { ref $_ eq 'ARRAY' ? $_ : [ '!!dummy', $_ ] } @$extra_bind)
1496 return ($sql, \@bind);
1500 sub _fix_bind_params {
1501 my ($self, @bind) = @_;
1503 ### Turn @bind from something like this:
1504 ### ( [ "artist", 1 ], [ "cdid", 1, 3 ] )
1506 ### ( "'1'", "'1'", "'3'" )
1509 if ( defined( $_ && $_->[1] ) ) {
1510 map { qq{'$_'}; } @{$_}[ 1 .. $#$_ ];
1517 my ( $self, $sql, @bind ) = @_;
1519 if ( $self->debug ) {
1520 @bind = $self->_fix_bind_params(@bind);
1522 $self->debugobj->query_start( $sql, @bind );
1527 my ( $self, $sql, @bind ) = @_;
1529 if ( $self->debug ) {
1530 @bind = $self->_fix_bind_params(@bind);
1531 $self->debugobj->query_end( $sql, @bind );
1536 my ($self, $dbh, $op, $extra_bind, $ident, $bind_attributes, @args) = @_;
1538 my ($sql, $bind) = $self->_prep_for_execute($op, $extra_bind, $ident, \@args);
1540 $self->_query_start( $sql, @$bind );
1542 my $sth = $self->sth($sql,$op);
1544 my $placeholder_index = 1;
1546 foreach my $bound (@$bind) {
1547 my $attributes = {};
1548 my($column_name, @data) = @$bound;
1550 if ($bind_attributes) {
1551 $attributes = $bind_attributes->{$column_name}
1552 if defined $bind_attributes->{$column_name};
1555 foreach my $data (@data) {
1556 my $ref = ref $data;
1557 $data = $ref && $ref ne 'ARRAY' ? ''.$data : $data; # stringify args (except arrayrefs)
1559 $sth->bind_param($placeholder_index, $data, $attributes);
1560 $placeholder_index++;
1564 # Can this fail without throwing an exception anyways???
1565 my $rv = $sth->execute();
1566 $self->throw_exception(
1567 $sth->errstr || $sth->err || 'Unknown error: execute() returned false, but error flags were not set...'
1570 $self->_query_end( $sql, @$bind );
1572 return (wantarray ? ($rv, $sth, @$bind) : $rv);
1577 $self->dbh_do('_dbh_execute', @_); # retry over disconnects
1580 sub _prefetch_autovalues {
1581 my ($self, $source, $to_insert) = @_;
1583 my $colinfo = $source->columns_info;
1586 for my $col (keys %$colinfo) {
1588 $colinfo->{$col}{auto_nextval}
1591 ! exists $to_insert->{$col}
1593 ref $to_insert->{$col} eq 'SCALAR'
1596 $values{$col} = $self->_sequence_fetch(
1598 ( $colinfo->{$col}{sequence} ||=
1599 $self->_dbh_get_autoinc_seq($self->_get_dbh, $source, $col)
1609 my ($self, $source, $to_insert) = @_;
1611 my $prefetched_values = $self->_prefetch_autovalues($source, $to_insert);
1614 $to_insert = { %$to_insert, %$prefetched_values };
1616 # list of primary keys we try to fetch from the database
1617 # both not-exsists and scalarrefs are considered
1620 { $_ => scalar keys %fetch_pks } # so we can preserve order for prettyness
1622 { ! exists $to_insert->{$_} or ref $to_insert->{$_} eq 'SCALAR' }
1623 $source->primary_columns
1627 if ($self->_use_insert_returning) {
1629 # retain order as declared in the resultsource
1630 for (sort { $fetch_pks{$a} <=> $fetch_pks{$b} } keys %fetch_pks ) {
1631 push @{$sqla_opts->{returning}}, $_;
1635 my $bind_attributes = $self->source_bind_attributes($source);
1637 my ($rv, $sth) = $self->_execute('insert' => [], $source, $bind_attributes, $to_insert, $sqla_opts);
1641 if (my $retlist = $sqla_opts->{returning}) {
1642 my @ret_vals = try {
1643 local $SIG{__WARN__} = sub {};
1644 my @r = $sth->fetchrow_array;
1649 @returned_cols{@$retlist} = @ret_vals if @ret_vals;
1652 return { %$prefetched_values, %returned_cols };
1656 ## Currently it is assumed that all values passed will be "normal", i.e. not
1657 ## scalar refs, or at least, all the same type as the first set, the statement is
1658 ## only prepped once.
1660 my ($self, $source, $cols, $data) = @_;
1663 @colvalues{@$cols} = (0..$#$cols);
1665 for my $i (0..$#$cols) {
1666 my $first_val = $data->[0][$i];
1667 next unless ref $first_val eq 'SCALAR';
1669 $colvalues{ $cols->[$i] } = $first_val;
1672 # check for bad data and stringify stringifiable objects
1673 my $bad_slice = sub {
1674 my ($msg, $col_idx, $slice_idx) = @_;
1675 $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s",
1679 local $Data::Dumper::Maxdepth = 1; # don't dump objects, if any
1681 map { $cols->[$_] => $data->[$slice_idx][$_] } (0 .. $#$cols)
1687 for my $datum_idx (0..$#$data) {
1688 my $datum = $data->[$datum_idx];
1690 for my $col_idx (0..$#$cols) {
1691 my $val = $datum->[$col_idx];
1692 my $sqla_bind = $colvalues{ $cols->[$col_idx] };
1693 my $is_literal_sql = (ref $sqla_bind) eq 'SCALAR';
1695 if ($is_literal_sql) {
1697 $bad_slice->('bind found where literal SQL expected', $col_idx, $datum_idx);
1699 elsif ((my $reftype = ref $val) ne 'SCALAR') {
1700 $bad_slice->("$reftype reference found where literal SQL expected",
1701 $col_idx, $datum_idx);
1703 elsif ($$val ne $$sqla_bind){
1704 $bad_slice->("inconsistent literal SQL value, expecting: '$$sqla_bind'",
1705 $col_idx, $datum_idx);
1708 elsif (my $reftype = ref $val) {
1710 if (overload::Method($val, '""')) {
1711 $datum->[$col_idx] = "".$val;
1714 $bad_slice->("$reftype reference found where bind expected",
1715 $col_idx, $datum_idx);
1721 my ($sql, $bind) = $self->_prep_for_execute (
1722 'insert', undef, $source, [\%colvalues]
1726 # if the bindlist is empty - make sure all "values" are in fact
1727 # literal scalarrefs. If not the case this means the storage ate
1728 # them away (e.g. the NoBindVars component) and interpolated them
1729 # directly into the SQL. This obviosly can't be good for multi-inserts
1731 $self->throw_exception('Cannot insert_bulk without support for placeholders')
1732 if first { ref $_ ne 'SCALAR' } values %colvalues;
1735 # neither _execute_array, nor _execute_inserts_with_no_binds are
1736 # atomic (even if _execute _array is a single call). Thus a safety
1738 my $guard = $self->txn_scope_guard;
1740 $self->_query_start( $sql, @$bind ? [ dummy => '__BULK_INSERT__' ] : () );
1741 my $sth = $self->sth($sql);
1744 #@bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
1745 $self->_execute_array( $source, $sth, $bind, $cols, $data );
1748 # bind_param_array doesn't work if there are no binds
1749 $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data );
1753 $self->_query_end( $sql, @$bind ? [ dummy => '__BULK_INSERT__' ] : () );
1757 return (wantarray ? ($rv, $sth, @$bind) : $rv);
1760 sub _execute_array {
1761 my ($self, $source, $sth, $bind, $cols, $data, @extra) = @_;
1763 ## This must be an arrayref, else nothing works!
1764 my $tuple_status = [];
1766 ## Get the bind_attributes, if any exist
1767 my $bind_attributes = $self->source_bind_attributes($source);
1769 ## Bind the values and execute
1770 my $placeholder_index = 1;
1772 foreach my $bound (@$bind) {
1774 my $attributes = {};
1775 my ($column_name, $data_index) = @$bound;
1777 if( $bind_attributes ) {
1778 $attributes = $bind_attributes->{$column_name}
1779 if defined $bind_attributes->{$column_name};
1782 my @data = map { $_->[$data_index] } @$data;
1784 $sth->bind_param_array(
1787 (%$attributes ? $attributes : ()),
1789 $placeholder_index++;
1794 $rv = $self->_dbh_execute_array($sth, $tuple_status, @extra);
1800 # Statement must finish even if there was an exception.
1805 $err = shift unless defined $err
1809 if (! defined $err and $sth->err);
1813 ++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i];
1815 $self->throw_exception("Unexpected populate error: $err")
1816 if ($i > $#$tuple_status);
1818 $self->throw_exception(sprintf "%s for populate slice:\n%s",
1819 ($tuple_status->[$i][1] || $err),
1820 Dumper { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) },
1827 sub _dbh_execute_array {
1828 my ($self, $sth, $tuple_status, @extra) = @_;
1830 return $sth->execute_array({ArrayTupleStatus => $tuple_status});
1833 sub _dbh_execute_inserts_with_no_binds {
1834 my ($self, $sth, $count) = @_;
1838 my $dbh = $self->_get_dbh;
1839 local $dbh->{RaiseError} = 1;
1840 local $dbh->{PrintError} = 0;
1842 $sth->execute foreach 1..$count;
1848 # Make sure statement is finished even if there was an exception.
1853 $err = shift unless defined $err;
1856 $self->throw_exception($err) if defined $err;
1862 my ($self, $source, @args) = @_;
1864 my $bind_attrs = $self->source_bind_attributes($source);
1866 return $self->_execute('update' => [], $source, $bind_attrs, @args);
1871 my ($self, $source, @args) = @_;
1873 my $bind_attrs = $self->source_bind_attributes($source);
1875 return $self->_execute('delete' => [], $source, $bind_attrs, @args);
1878 # We were sent here because the $rs contains a complex search
1879 # which will require a subquery to select the correct rows
1880 # (i.e. joined or limited resultsets, or non-introspectable conditions)
1882 # Generating a single PK column subquery is trivial and supported
1883 # by all RDBMS. However if we have a multicolumn PK, things get ugly.
1884 # Look at _multipk_update_delete()
1885 sub _subq_update_delete {
1887 my ($rs, $op, $values) = @_;
1889 my $rsrc = $rs->result_source;
1891 # quick check if we got a sane rs on our hands
1892 my @pcols = $rsrc->_pri_cols;
1894 my $sel = $rs->_resolved_attrs->{select};
1895 $sel = [ $sel ] unless ref $sel eq 'ARRAY';
1898 join ("\x00", map { join '.', $rs->{attrs}{alias}, $_ } sort @pcols)
1900 join ("\x00", sort @$sel )
1902 $self->throw_exception (
1903 '_subq_update_delete can not be called on resultsets selecting columns other than the primary keys'
1910 $op eq 'update' ? $values : (),
1911 { $pcols[0] => { -in => $rs->as_query } },
1916 return $self->_multipk_update_delete (@_);
1920 # ANSI SQL does not provide a reliable way to perform a multicol-PK
1921 # resultset update/delete involving subqueries. So by default resort
1922 # to simple (and inefficient) delete_all style per-row opearations,
1923 # while allowing specific storages to override this with a faster
1926 sub _multipk_update_delete {
1927 return shift->_per_row_update_delete (@_);
1930 # This is the default loop used to delete/update rows for multi PK
1931 # resultsets, and used by mysql exclusively (because it can't do anything
1934 # We do not use $row->$op style queries, because resultset update/delete
1935 # is not expected to cascade (this is what delete_all/update_all is for).
1937 # There should be no race conditions as the entire operation is rolled
1940 sub _per_row_update_delete {
1942 my ($rs, $op, $values) = @_;
1944 my $rsrc = $rs->result_source;
1945 my @pcols = $rsrc->_pri_cols;
1947 my $guard = $self->txn_scope_guard;
1949 # emulate the return value of $sth->execute for non-selects
1950 my $row_cnt = '0E0';
1952 my $subrs_cur = $rs->cursor;
1953 my @all_pk = $subrs_cur->all;
1954 for my $pks ( @all_pk) {
1957 for my $i (0.. $#pcols) {
1958 $cond->{$pcols[$i]} = $pks->[$i];
1963 $op eq 'update' ? $values : (),
1977 $self->_execute($self->_select_args(@_));
1980 sub _select_args_to_query {
1983 # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $rs_attrs, $rows, $offset)
1984 # = $self->_select_args($ident, $select, $cond, $attrs);
1985 my ($op, $bind, $ident, $bind_attrs, @args) =
1986 $self->_select_args(@_);
1988 # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $rs_attrs, $rows, $offset ]);
1989 my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, \@args);
1990 $prepared_bind ||= [];
1993 ? ($sql, $prepared_bind, $bind_attrs)
1994 : \[ "($sql)", @$prepared_bind ]
1999 my ($self, $ident, $select, $where, $attrs) = @_;
2001 my $sql_maker = $self->sql_maker;
2002 my ($alias2source, $rs_alias) = $self->_resolve_ident_sources ($ident);
2009 $rs_alias && $alias2source->{$rs_alias}
2010 ? ( _rsroot_source_handle => $alias2source->{$rs_alias}->handle )
2015 # calculate bind_attrs before possible $ident mangling
2016 my $bind_attrs = {};
2017 for my $alias (keys %$alias2source) {
2018 my $bindtypes = $self->source_bind_attributes ($alias2source->{$alias}) || {};
2019 for my $col (keys %$bindtypes) {
2021 my $fqcn = join ('.', $alias, $col);
2022 $bind_attrs->{$fqcn} = $bindtypes->{$col} if $bindtypes->{$col};
2024 # Unqialified column names are nice, but at the same time can be
2025 # rather ambiguous. What we do here is basically go along with
2026 # the loop, adding an unqualified column slot to $bind_attrs,
2027 # alongside the fully qualified name. As soon as we encounter
2028 # another column by that name (which would imply another table)
2029 # we unset the unqualified slot and never add any info to it
2030 # to avoid erroneous type binding. If this happens the users
2031 # only choice will be to fully qualify his column name
2033 if (exists $bind_attrs->{$col}) {
2034 $bind_attrs->{$col} = {};
2037 $bind_attrs->{$col} = $bind_attrs->{$fqcn};
2042 # Sanity check the attributes (SQLMaker does it too, but
2043 # in case of a software_limit we'll never reach there)
2044 if (defined $attrs->{offset}) {
2045 $self->throw_exception('A supplied offset attribute must be a non-negative integer')
2046 if ( $attrs->{offset} =~ /\D/ or $attrs->{offset} < 0 );
2048 $attrs->{offset} ||= 0;
2050 if (defined $attrs->{rows}) {
2051 $self->throw_exception("The rows attribute must be a positive integer if present")
2052 if ( $attrs->{rows} =~ /\D/ or $attrs->{rows} <= 0 );
2054 elsif ($attrs->{offset}) {
2055 # MySQL actually recommends this approach. I cringe.
2056 $attrs->{rows} = $sql_maker->__max_int;
2061 # see if we need to tear the prefetch apart otherwise delegate the limiting to the
2062 # storage, unless software limit was requested
2065 ( $attrs->{rows} && keys %{$attrs->{collapse}} )
2067 # grouped prefetch (to satisfy group_by == select)
2068 ( $attrs->{group_by}
2070 @{$attrs->{group_by}}
2072 $attrs->{_prefetch_select}
2074 @{$attrs->{_prefetch_select}}
2077 ($ident, $select, $where, $attrs)
2078 = $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs);
2080 elsif (! $attrs->{software_limit} ) {
2081 push @limit, $attrs->{rows}, $attrs->{offset};
2084 # try to simplify the joinmap further (prune unreferenced type-single joins)
2085 $ident = $self->_prune_unused_joins ($ident, $select, $where, $attrs);
2088 # This would be the point to deflate anything found in $where
2089 # (and leave $attrs->{bind} intact). Problem is - inflators historically
2090 # expect a row object. And all we have is a resultsource (it is trivial
2091 # to extract deflator coderefs via $alias2source above).
2093 # I don't see a way forward other than changing the way deflators are
2094 # invoked, and that's just bad...
2097 return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $attrs, @limit);
2100 # Returns a counting SELECT for a simple count
2101 # query. Abstracted so that a storage could override
2102 # this to { count => 'firstcol' } or whatever makes
2103 # sense as a performance optimization
2105 #my ($self, $source, $rs_attrs) = @_;
2106 return { count => '*' };
2110 sub source_bind_attributes {
2111 my ($self, $source) = @_;
2113 my $bind_attributes;
2115 my $colinfo = $source->columns_info;
2117 for my $col (keys %$colinfo) {
2118 if (my $dt = $colinfo->{$col}{data_type} ) {
2119 $bind_attributes->{$col} = $self->bind_attribute_by_data_type($dt)
2123 return $bind_attributes;
2130 =item Arguments: $ident, $select, $condition, $attrs
2134 Handle a SQL select statement.
2140 my ($ident, $select, $condition, $attrs) = @_;
2141 return $self->cursor_class->new($self, \@_, $attrs);
2146 my ($rv, $sth, @bind) = $self->_select(@_);
2147 my @row = $sth->fetchrow_array;
2148 my @nextrow = $sth->fetchrow_array if @row;
2149 if(@row && @nextrow) {
2150 carp "Query returned more than one row. SQL that returns multiple rows is DEPRECATED for ->find and ->single";
2152 # Need to call finish() to work round broken DBDs
2157 =head2 sql_limit_dialect
2159 This is an accessor for the default SQL limit dialect used by a particular
2160 storage driver. Can be overriden by supplying an explicit L</limit_dialect>
2161 to L<DBIx::Class::Schema/connect>. For a list of available limit dialects
2162 see L<DBIx::Class::SQLMaker::LimitDialects>.
2168 =item Arguments: $sql
2172 Returns a L<DBI> sth (statement handle) for the supplied SQL.
2177 my ($self, $dbh, $sql) = @_;
2179 # 3 is the if_active parameter which avoids active sth re-use
2180 my $sth = $self->disable_sth_caching
2181 ? $dbh->prepare($sql)
2182 : $dbh->prepare_cached($sql, {}, 3);
2184 # XXX You would think RaiseError would make this impossible,
2185 # but apparently that's not true :(
2186 $self->throw_exception($dbh->errstr) if !$sth;
2192 my ($self, $sql) = @_;
2193 $self->dbh_do('_dbh_sth', $sql); # retry over disconnects
2196 sub _dbh_columns_info_for {
2197 my ($self, $dbh, $table) = @_;
2199 if ($dbh->can('column_info')) {
2203 my ($schema,$tab) = $table =~ /^(.+?)\.(.+)$/ ? ($1,$2) : (undef,$table);
2204 my $sth = $dbh->column_info( undef,$schema, $tab, '%' );
2206 while ( my $info = $sth->fetchrow_hashref() ){
2208 $column_info{data_type} = $info->{TYPE_NAME};
2209 $column_info{size} = $info->{COLUMN_SIZE};
2210 $column_info{is_nullable} = $info->{NULLABLE} ? 1 : 0;
2211 $column_info{default_value} = $info->{COLUMN_DEF};
2212 my $col_name = $info->{COLUMN_NAME};
2213 $col_name =~ s/^\"(.*)\"$/$1/;
2215 $result{$col_name} = \%column_info;
2220 return \%result if !$caught && scalar keys %result;
2224 my $sth = $dbh->prepare($self->sql_maker->select($table, undef, \'1 = 0'));
2226 my @columns = @{$sth->{NAME_lc}};
2227 for my $i ( 0 .. $#columns ){
2229 $column_info{data_type} = $sth->{TYPE}->[$i];
2230 $column_info{size} = $sth->{PRECISION}->[$i];
2231 $column_info{is_nullable} = $sth->{NULLABLE}->[$i] ? 1 : 0;
2233 if ($column_info{data_type} =~ m/^(.*?)\((.*?)\)$/) {
2234 $column_info{data_type} = $1;
2235 $column_info{size} = $2;
2238 $result{$columns[$i]} = \%column_info;
2242 foreach my $col (keys %result) {
2243 my $colinfo = $result{$col};
2244 my $type_num = $colinfo->{data_type};
2246 if(defined $type_num && $dbh->can('type_info')) {
2247 my $type_info = $dbh->type_info($type_num);
2248 $type_name = $type_info->{TYPE_NAME} if $type_info;
2249 $colinfo->{data_type} = $type_name if $type_name;
2256 sub columns_info_for {
2257 my ($self, $table) = @_;
2258 $self->_dbh_columns_info_for ($self->_get_dbh, $table);
2261 =head2 last_insert_id
2263 Return the row id of the last insert.
2267 sub _dbh_last_insert_id {
2268 my ($self, $dbh, $source, $col) = @_;
2270 my $id = try { $dbh->last_insert_id (undef, undef, $source->name, $col) };
2272 return $id if defined $id;
2274 my $class = ref $self;
2275 $self->throw_exception ("No storage specific _dbh_last_insert_id() method implemented in $class, and the generic DBI::last_insert_id() failed");
2278 sub last_insert_id {
2280 $self->_dbh_last_insert_id ($self->_dbh, @_);
2283 =head2 _native_data_type
2287 =item Arguments: $type_name
2291 This API is B<EXPERIMENTAL>, will almost definitely change in the future, and
2292 currently only used by L<::AutoCast|DBIx::Class::Storage::DBI::AutoCast> and
2293 L<::Sybase::ASE|DBIx::Class::Storage::DBI::Sybase::ASE>.
2295 The default implementation returns C<undef>, implement in your Storage driver if
2296 you need this functionality.
2298 Should map types from other databases to the native RDBMS type, for example
2299 C<VARCHAR2> to C<VARCHAR>.
2301 Types with modifiers should map to the underlying data type. For example,
2302 C<INTEGER AUTO_INCREMENT> should become C<INTEGER>.
2304 Composite types should map to the container type, for example
2305 C<ENUM(foo,bar,baz)> becomes C<ENUM>.
2309 sub _native_data_type {
2310 #my ($self, $data_type) = @_;
2314 # Check if placeholders are supported at all
2315 sub _determine_supports_placeholders {
2317 my $dbh = $self->_get_dbh;
2319 # some drivers provide a $dbh attribute (e.g. Sybase and $dbh->{syb_dynamic_supported})
2320 # but it is inaccurate more often than not
2322 local $dbh->{PrintError} = 0;
2323 local $dbh->{RaiseError} = 1;
2324 $dbh->do('select ?', {}, 1);
2332 # Check if placeholders bound to non-string types throw exceptions
2334 sub _determine_supports_typeless_placeholders {
2336 my $dbh = $self->_get_dbh;
2339 local $dbh->{PrintError} = 0;
2340 local $dbh->{RaiseError} = 1;
2341 # this specifically tests a bind that is NOT a string
2342 $dbh->do('select 1 where 1 = ?', {}, 1);
2352 Returns the database driver name.
2357 shift->_get_dbh->{Driver}->{Name};
2360 =head2 bind_attribute_by_data_type
2362 Given a datatype from column info, returns a database specific bind
2363 attribute for C<< $dbh->bind_param($val,$attribute) >> or nothing if we will
2364 let the database planner just handle it.
2366 Generally only needed for special case column types, like bytea in postgres.
2370 sub bind_attribute_by_data_type {
2374 =head2 is_datatype_numeric
2376 Given a datatype from column_info, returns a boolean value indicating if
2377 the current RDBMS considers it a numeric value. This controls how
2378 L<DBIx::Class::Row/set_column> decides whether to mark the column as
2379 dirty - when the datatype is deemed numeric a C<< != >> comparison will
2380 be performed instead of the usual C<eq>.
2384 sub is_datatype_numeric {
2385 my ($self, $dt) = @_;
2387 return 0 unless $dt;
2389 return $dt =~ /^ (?:
2390 numeric | int(?:eger)? | (?:tiny|small|medium|big)int | dec(?:imal)? | real | float | double (?: \s+ precision)? | (?:big)?serial
2395 =head2 create_ddl_dir
2399 =item Arguments: $schema \@databases, $version, $directory, $preversion, \%sqlt_args
2403 Creates a SQL file based on the Schema, for each of the specified
2404 database engines in C<\@databases> in the given directory.
2405 (note: specify L<SQL::Translator> names, not L<DBI> driver names).
2407 Given a previous version number, this will also create a file containing
2408 the ALTER TABLE statements to transform the previous schema into the
2409 current one. Note that these statements may contain C<DROP TABLE> or
2410 C<DROP COLUMN> statements that can potentially destroy data.
2412 The file names are created using the C<ddl_filename> method below, please
2413 override this method in your schema if you would like a different file
2414 name format. For the ALTER file, the same format is used, replacing
2415 $version in the name with "$preversion-$version".
2417 See L<SQL::Translator/METHODS> for a list of values for C<\%sqlt_args>.
2418 The most common value for this would be C<< { add_drop_table => 1 } >>
2419 to have the SQL produced include a C<DROP TABLE> statement for each table
2420 created. For quoting purposes supply C<quote_table_names> and
2421 C<quote_field_names>.
2423 If no arguments are passed, then the following default values are assumed:
2427 =item databases - ['MySQL', 'SQLite', 'PostgreSQL']
2429 =item version - $schema->schema_version
2431 =item directory - './'
2433 =item preversion - <none>
2437 By default, C<\%sqlt_args> will have
2439 { add_drop_table => 1, ignore_constraint_names => 1, ignore_index_names => 1 }
2441 merged with the hash passed in. To disable any of those features, pass in a
2442 hashref like the following
2444 { ignore_constraint_names => 0, # ... other options }
2447 WARNING: You are strongly advised to check all SQL files created, before applying
2452 sub create_ddl_dir {
2453 my ($self, $schema, $databases, $version, $dir, $preversion, $sqltargs) = @_;
2456 carp "No directory given, using ./\n";
2461 make_path ("$dir") # make_path does not like objects (i.e. Path::Class::Dir)
2463 $self->throw_exception(
2464 "Failed to create '$dir': " . ($! || $@ || 'error unknow')
2468 $self->throw_exception ("Directory '$dir' does not exist\n") unless(-d $dir);
2470 $databases ||= ['MySQL', 'SQLite', 'PostgreSQL'];
2471 $databases = [ $databases ] if(ref($databases) ne 'ARRAY');
2473 my $schema_version = $schema->schema_version || '1.x';
2474 $version ||= $schema_version;
2477 add_drop_table => 1,
2478 ignore_constraint_names => 1,
2479 ignore_index_names => 1,
2483 unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')) {
2484 $self->throw_exception("Can't create a ddl file without " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
2487 my $sqlt = SQL::Translator->new( $sqltargs );
2489 $sqlt->parser('SQL::Translator::Parser::DBIx::Class');
2490 my $sqlt_schema = $sqlt->translate({ data => $schema })
2491 or $self->throw_exception ($sqlt->error);
2493 foreach my $db (@$databases) {
2495 $sqlt->{schema} = $sqlt_schema;
2496 $sqlt->producer($db);
2499 my $filename = $schema->ddl_filename($db, $version, $dir);
2500 if (-e $filename && ($version eq $schema_version )) {
2501 # if we are dumping the current version, overwrite the DDL
2502 carp "Overwriting existing DDL file - $filename";
2506 my $output = $sqlt->translate;
2508 carp("Failed to translate to $db, skipping. (" . $sqlt->error . ")");
2511 if(!open($file, ">$filename")) {
2512 $self->throw_exception("Can't open $filename for writing ($!)");
2515 print $file $output;
2518 next unless ($preversion);
2520 require SQL::Translator::Diff;
2522 my $prefilename = $schema->ddl_filename($db, $preversion, $dir);
2523 if(!-e $prefilename) {
2524 carp("No previous schema file found ($prefilename)");
2528 my $difffile = $schema->ddl_filename($db, $version, $dir, $preversion);
2530 carp("Overwriting existing diff file - $difffile");
2536 my $t = SQL::Translator->new($sqltargs);
2541 or $self->throw_exception ($t->error);
2543 my $out = $t->translate( $prefilename )
2544 or $self->throw_exception ($t->error);
2546 $source_schema = $t->schema;
2548 $source_schema->name( $prefilename )
2549 unless ( $source_schema->name );
2552 # The "new" style of producers have sane normalization and can support
2553 # diffing a SQL file against a DBIC->SQLT schema. Old style ones don't
2554 # And we have to diff parsed SQL against parsed SQL.
2555 my $dest_schema = $sqlt_schema;
2557 unless ( "SQL::Translator::Producer::$db"->can('preprocess_schema') ) {
2558 my $t = SQL::Translator->new($sqltargs);
2563 or $self->throw_exception ($t->error);
2565 my $out = $t->translate( $filename )
2566 or $self->throw_exception ($t->error);
2568 $dest_schema = $t->schema;
2570 $dest_schema->name( $filename )
2571 unless $dest_schema->name;
2574 my $diff = SQL::Translator::Diff::schema_diff($source_schema, $db,
2578 if(!open $file, ">$difffile") {
2579 $self->throw_exception("Can't write to $difffile ($!)");
2587 =head2 deployment_statements
2591 =item Arguments: $schema, $type, $version, $directory, $sqlt_args
2595 Returns the statements used by L</deploy> and L<DBIx::Class::Schema/deploy>.
2597 The L<SQL::Translator> (not L<DBI>) database driver name can be explicitly
2598 provided in C<$type>, otherwise the result of L</sqlt_type> is used as default.
2600 C<$directory> is used to return statements from files in a previously created
2601 L</create_ddl_dir> directory and is optional. The filenames are constructed
2602 from L<DBIx::Class::Schema/ddl_filename>, the schema name and the C<$version>.
2604 If no C<$directory> is specified then the statements are constructed on the
2605 fly using L<SQL::Translator> and C<$version> is ignored.
2607 See L<SQL::Translator/METHODS> for a list of values for C<$sqlt_args>.
2611 sub deployment_statements {
2612 my ($self, $schema, $type, $version, $dir, $sqltargs) = @_;
2613 $type ||= $self->sqlt_type;
2614 $version ||= $schema->schema_version || '1.x';
2616 my $filename = $schema->ddl_filename($type, $version, $dir);
2620 open($file, "<$filename")
2621 or $self->throw_exception("Can't open $filename ($!)");
2624 return join('', @rows);
2627 unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy') ) {
2628 $self->throw_exception("Can't deploy without a ddl_dir or " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
2631 # sources needs to be a parser arg, but for simplicty allow at top level
2633 $sqltargs->{parser_args}{sources} = delete $sqltargs->{sources}
2634 if exists $sqltargs->{sources};
2636 my $tr = SQL::Translator->new(
2637 producer => "SQL::Translator::Producer::${type}",
2639 parser => 'SQL::Translator::Parser::DBIx::Class',
2646 @ret = $tr->translate;
2649 $ret[0] = $tr->translate;
2652 $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error)
2653 unless (@ret && defined $ret[0]);
2655 return $wa ? @ret : $ret[0];
2659 my ($self, $schema, $type, $sqltargs, $dir) = @_;
2662 return if($line =~ /^--/);
2664 # next if($line =~ /^DROP/m);
2665 return if($line =~ /^BEGIN TRANSACTION/m);
2666 return if($line =~ /^COMMIT/m);
2667 return if $line =~ /^\s+$/; # skip whitespace only
2668 $self->_query_start($line);
2670 # do a dbh_do cycle here, as we need some error checking in
2671 # place (even though we will ignore errors)
2672 $self->dbh_do (sub { $_[1]->do($line) });
2674 carp qq{$_ (running "${line}")};
2676 $self->_query_end($line);
2678 my @statements = $schema->deployment_statements($type, undef, $dir, { %{ $sqltargs || {} }, no_comments => 1 } );
2679 if (@statements > 1) {
2680 foreach my $statement (@statements) {
2681 $deploy->( $statement );
2684 elsif (@statements == 1) {
2685 foreach my $line ( split(";\n", $statements[0])) {
2691 =head2 datetime_parser
2693 Returns the datetime parser class
2697 sub datetime_parser {
2699 return $self->{datetime_parser} ||= do {
2700 $self->build_datetime_parser(@_);
2704 =head2 datetime_parser_type
2706 Defines (returns) the datetime parser class - currently hardwired to
2707 L<DateTime::Format::MySQL>
2711 sub datetime_parser_type { "DateTime::Format::MySQL"; }
2713 =head2 build_datetime_parser
2715 See L</datetime_parser>
2719 sub build_datetime_parser {
2721 my $type = $self->datetime_parser_type(@_);
2722 $self->ensure_class_loaded ($type);
2727 =head2 is_replicating
2729 A boolean that reports if a particular L<DBIx::Class::Storage::DBI> is set to
2730 replicate from a master database. Default is undef, which is the result
2731 returned by databases that don't support replication.
2735 sub is_replicating {
2740 =head2 lag_behind_master
2742 Returns a number that represents a certain amount of lag behind a master db
2743 when a given storage is replicating. The number is database dependent, but
2744 starts at zero and increases with the amount of lag. Default in undef
2748 sub lag_behind_master {
2752 =head2 relname_to_table_alias
2756 =item Arguments: $relname, $join_count
2760 L<DBIx::Class> uses L<DBIx::Class::Relationship> names as table aliases in
2763 This hook is to allow specific L<DBIx::Class::Storage> drivers to change the
2764 way these aliases are named.
2766 The default behavior is C<< "$relname_$join_count" if $join_count > 1 >>,
2767 otherwise C<"$relname">.
2771 sub relname_to_table_alias {
2772 my ($self, $relname, $join_count) = @_;
2774 my $alias = ($join_count && $join_count > 1 ?
2775 join('_', $relname, $join_count) : $relname);
2784 =head2 DBIx::Class and AutoCommit
2786 DBIx::Class can do some wonderful magic with handling exceptions,
2787 disconnections, and transactions when you use C<< AutoCommit => 1 >>
2788 (the default) combined with C<txn_do> for transaction support.
2790 If you set C<< AutoCommit => 0 >> in your connect info, then you are always
2791 in an assumed transaction between commits, and you're telling us you'd
2792 like to manage that manually. A lot of the magic protections offered by
2793 this module will go away. We can't protect you from exceptions due to database
2794 disconnects because we don't know anything about how to restart your
2795 transactions. You're on your own for handling all sorts of exceptional
2796 cases if you choose the C<< AutoCommit => 0 >> path, just as you would
2802 Matt S. Trout <mst@shadowcatsystems.co.uk>
2804 Andy Grundman <andy@hybridized.org>
2808 You may distribute this code under the same terms as Perl itself.