1 package DBIx::Class::Storage::DBI;
2 # -*- mode: cperl; cperl-indent-level: 2 -*-
7 use base qw/DBIx::Class::Storage::DBIHacks DBIx::Class::Storage/;
10 use Carp::Clan qw/^DBIx::Class|^Try::Tiny/;
12 use DBIx::Class::Storage::DBI::Cursor;
13 use DBIx::Class::Storage::Statistics;
14 use Scalar::Util qw/refaddr weaken reftype blessed/;
15 use Data::Dumper::Concise 'Dumper';
16 use Sub::Name 'subname';
18 use File::Path 'make_path';
22 # default cursor class, overridable in connect_info attributes
23 __PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor');
25 __PACKAGE__->mk_group_accessors('inherited' => qw/sql_maker_class sql_limit_dialect/);
26 __PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks');
28 __PACKAGE__->mk_group_accessors('simple' => qw/
29 _connect_info _dbi_connect_info _dbic_connect_attributes _driver_determined
30 _dbh _dbh_details _conn_pid _conn_tid _sql_maker _sql_maker_opts
31 transaction_depth _dbh_autocommit savepoints
34 # the values for these accessors are picked out (and deleted) from
35 # the attribute hashref passed to connect_info
36 my @storage_options = qw/
37 on_connect_call on_disconnect_call on_connect_do on_disconnect_do
38 disable_sth_caching unsafe auto_savepoint
40 __PACKAGE__->mk_group_accessors('simple' => @storage_options);
43 # capability definitions, using a 2-tiered accessor system
46 # A driver/user may define _use_X, which blindly without any checks says:
47 # "(do not) use this capability", (use_dbms_capability is an "inherited"
50 # If _use_X is undef, _supports_X is then queried. This is a "simple" style
51 # accessor, which in turn calls _determine_supports_X, and stores the return
52 # in a special slot on the storage object, which is wiped every time a $dbh
53 # reconnection takes place (it is not guaranteed that upon reconnection we
54 # will get the same rdbms version). _determine_supports_X does not need to
55 # exist on a driver, as we ->can for it before calling.
57 my @capabilities = (qw/insert_returning placeholders typeless_placeholders/);
58 __PACKAGE__->mk_group_accessors( dbms_capability => map { "_supports_$_" } @capabilities );
59 __PACKAGE__->mk_group_accessors( use_dbms_capability => map { "_use_$_" } @capabilities );
62 # Each of these methods need _determine_driver called before itself
63 # in order to function reliably. This is a purely DRY optimization
65 # get_(use)_dbms_capability need to be called on the correct Storage
66 # class, as _use_X may be hardcoded class-wide, and _supports_X calls
67 # _determine_supports_X which obv. needs a correct driver as well
68 my @rdbms_specific_methods = qw/
83 get_use_dbms_capability
87 for my $meth (@rdbms_specific_methods) {
89 my $orig = __PACKAGE__->can ($meth)
90 or die "$meth is not a ::Storage::DBI method!";
93 no warnings qw/redefine/;
94 *{__PACKAGE__ ."::$meth"} = subname $meth => sub {
95 if (not $_[0]->_driver_determined and not $_[0]->{_in_determine_driver}) {
96 $_[0]->_determine_driver;
97 goto $_[0]->can($meth);
106 DBIx::Class::Storage::DBI - DBI storage handler
110 my $schema = MySchema->connect('dbi:SQLite:my.db');
112 $schema->storage->debug(1);
114 my @stuff = $schema->storage->dbh_do(
116 my ($storage, $dbh, @args) = @_;
117 $dbh->do("DROP TABLE authors");
122 $schema->resultset('Book')->search({
123 written_on => $schema->storage->datetime_parser->format_datetime(DateTime->now)
128 This class represents the connection to an RDBMS via L<DBI>. See
129 L<DBIx::Class::Storage> for general information. This pod only
130 documents DBI-specific methods and behaviors.
137 my $new = shift->next::method(@_);
139 $new->transaction_depth(0);
140 $new->_sql_maker_opts({});
141 $new->_dbh_details({});
142 $new->{savepoints} = [];
143 $new->{_in_dbh_do} = 0;
144 $new->{_dbh_gen} = 0;
146 # read below to see what this does
147 $new->_arm_global_destructor;
152 # This is hack to work around perl shooting stuff in random
153 # order on exit(). If we do not walk the remaining storage
154 # objects in an END block, there is a *small but real* chance
155 # of a fork()ed child to kill the parent's shared DBI handle,
156 # *before perl reaches the DESTROY in this package*
157 # Yes, it is ugly and effective.
159 my %seek_and_destroy;
161 sub _arm_global_destructor {
163 my $key = Scalar::Util::refaddr ($self);
164 $seek_and_destroy{$key} = $self;
165 Scalar::Util::weaken ($seek_and_destroy{$key});
169 local $?; # just in case the DBI destructor changes it somehow
171 # destroy just the object if not native to this process/thread
172 $_->_preserve_foreign_dbh for (grep
174 values %seek_and_destroy
182 # destroy just the object if not native to this process/thread
183 $self->_preserve_foreign_dbh;
185 # some databases need this to stop spewing warnings
186 if (my $dbh = $self->_dbh) {
188 %{ $dbh->{CachedKids} } = ();
196 sub _preserve_foreign_dbh {
199 return unless $self->_dbh;
203 return unless $self->_dbh;
209 # handle pid changes correctly - do not destroy parent's connection
213 return if ( defined $self->_conn_pid and $self->_conn_pid == $$ );
215 $self->_dbh->{InactiveDestroy} = 1;
222 # very similar to above, but seems to FAIL if I set InactiveDestroy
226 if ( ! defined $self->_conn_tid ) {
229 elsif ( $self->_conn_tid == threads->tid ) {
230 return; # same thread
233 #$self->_dbh->{InactiveDestroy} = 1; # why does t/51threads.t fail...?
243 This method is normally called by L<DBIx::Class::Schema/connection>, which
244 encapsulates its argument list in an arrayref before passing them here.
246 The argument list may contain:
252 The same 4-element argument set one would normally pass to
253 L<DBI/connect>, optionally followed by
254 L<extra attributes|/DBIx::Class specific connection attributes>
255 recognized by DBIx::Class:
257 $connect_info_args = [ $dsn, $user, $password, \%dbi_attributes?, \%extra_attributes? ];
261 A single code reference which returns a connected
262 L<DBI database handle|DBI/connect> optionally followed by
263 L<extra attributes|/DBIx::Class specific connection attributes> recognized
266 $connect_info_args = [ sub { DBI->connect (...) }, \%extra_attributes? ];
270 A single hashref with all the attributes and the dsn/user/password
273 $connect_info_args = [{
281 $connect_info_args = [{
282 dbh_maker => sub { DBI->connect (...) },
287 This is particularly useful for L<Catalyst> based applications, allowing the
288 following config (L<Config::General> style):
293 dsn dbi:mysql:database=test
300 The C<dsn>/C<user>/C<password> combination can be substituted by the
301 C<dbh_maker> key whose value is a coderef that returns a connected
302 L<DBI database handle|DBI/connect>
306 Please note that the L<DBI> docs recommend that you always explicitly
307 set C<AutoCommit> to either I<0> or I<1>. L<DBIx::Class> further
308 recommends that it be set to I<1>, and that you perform transactions
309 via our L<DBIx::Class::Schema/txn_do> method. L<DBIx::Class> will set it
310 to I<1> if you do not do explicitly set it to zero. This is the default
311 for most DBDs. See L</DBIx::Class and AutoCommit> for details.
313 =head3 DBIx::Class specific connection attributes
315 In addition to the standard L<DBI|DBI/ATTRIBUTES_COMMON_TO_ALL_HANDLES>
316 L<connection|DBI/Database_Handle_Attributes> attributes, DBIx::Class recognizes
317 the following connection options. These options can be mixed in with your other
318 L<DBI> connection attributes, or placed in a separate hashref
319 (C<\%extra_attributes>) as shown above.
321 Every time C<connect_info> is invoked, any previous settings for
322 these options will be cleared before setting the new ones, regardless of
323 whether any options are specified in the new C<connect_info>.
330 Specifies things to do immediately after connecting or re-connecting to
331 the database. Its value may contain:
337 This contains one SQL statement to execute.
339 =item an array reference
341 This contains SQL statements to execute in order. Each element contains
342 a string or a code reference that returns a string.
344 =item a code reference
346 This contains some code to execute. Unlike code references within an
347 array reference, its return value is ignored.
351 =item on_disconnect_do
353 Takes arguments in the same form as L</on_connect_do> and executes them
354 immediately before disconnecting from the database.
356 Note, this only runs if you explicitly call L</disconnect> on the
359 =item on_connect_call
361 A more generalized form of L</on_connect_do> that calls the specified
362 C<connect_call_METHOD> methods in your storage driver.
364 on_connect_do => 'select 1'
368 on_connect_call => [ [ do_sql => 'select 1' ] ]
370 Its values may contain:
376 Will call the C<connect_call_METHOD> method.
378 =item a code reference
380 Will execute C<< $code->($storage) >>
382 =item an array reference
384 Each value can be a method name or code reference.
386 =item an array of arrays
388 For each array, the first item is taken to be the C<connect_call_> method name
389 or code reference, and the rest are parameters to it.
393 Some predefined storage methods you may use:
399 Executes a SQL string or a code reference that returns a SQL string. This is
400 what L</on_connect_do> and L</on_disconnect_do> use.
408 Will execute the scalar as SQL.
412 Taken to be arguments to L<DBI/do>, the SQL string optionally followed by the
413 attributes hashref and bind values.
415 =item a code reference
417 Will execute C<< $code->($storage) >> and execute the return array refs as
424 Execute any statements necessary to initialize the database session to return
425 and accept datetime/timestamp values used with
426 L<DBIx::Class::InflateColumn::DateTime>.
428 Only necessary for some databases, see your specific storage driver for
429 implementation details.
433 =item on_disconnect_call
435 Takes arguments in the same form as L</on_connect_call> and executes them
436 immediately before disconnecting from the database.
438 Calls the C<disconnect_call_METHOD> methods as opposed to the
439 C<connect_call_METHOD> methods called by L</on_connect_call>.
441 Note, this only runs if you explicitly call L</disconnect> on the
444 =item disable_sth_caching
446 If set to a true value, this option will disable the caching of
447 statement handles via L<DBI/prepare_cached>.
451 Sets the limit dialect. This is useful for JDBC-bridge among others
452 where the remote SQL-dialect cannot be determined by the name of the
453 driver alone. See also L<SQL::Abstract::Limit>.
457 Specifies what characters to use to quote table and column names. If
458 you use this you will want to specify L</name_sep> as well.
460 C<quote_char> expects either a single character, in which case is it
461 is placed on either side of the table/column name, or an arrayref of length
462 2 in which case the table/column name is placed between the elements.
464 For example under MySQL you should use C<< quote_char => '`' >>, and for
465 SQL Server you should use C<< quote_char => [qw/[ ]/] >>.
469 This only needs to be used in conjunction with C<quote_char>, and is used to
470 specify the character that separates elements (schemas, tables, columns) from
471 each other. In most cases this is simply a C<.>.
473 The consequences of not supplying this value is that L<SQL::Abstract>
474 will assume DBIx::Class' uses of aliases to be complete column
475 names. The output will look like I<"me.name"> when it should actually
480 This Storage driver normally installs its own C<HandleError>, sets
481 C<RaiseError> and C<ShowErrorStatement> on, and sets C<PrintError> off on
482 all database handles, including those supplied by a coderef. It does this
483 so that it can have consistent and useful error behavior.
485 If you set this option to a true value, Storage will not do its usual
486 modifications to the database handle's attributes, and instead relies on
487 the settings in your connect_info DBI options (or the values you set in
488 your connection coderef, in the case that you are connecting via coderef).
490 Note that your custom settings can cause Storage to malfunction,
491 especially if you set a C<HandleError> handler that suppresses exceptions
492 and/or disable C<RaiseError>.
496 If this option is true, L<DBIx::Class> will use savepoints when nesting
497 transactions, making it possible to recover from failure in the inner
498 transaction without having to abort all outer transactions.
502 Use this argument to supply a cursor class other than the default
503 L<DBIx::Class::Storage::DBI::Cursor>.
507 Some real-life examples of arguments to L</connect_info> and
508 L<DBIx::Class::Schema/connect>
510 # Simple SQLite connection
511 ->connect_info([ 'dbi:SQLite:./foo.db' ]);
514 ->connect_info([ sub { DBI->connect(...) } ]);
516 # Connect via subref in hashref
518 dbh_maker => sub { DBI->connect(...) },
519 on_connect_do => 'alter session ...',
522 # A bit more complicated
529 { quote_char => q{"}, name_sep => q{.} },
533 # Equivalent to the previous example
539 { AutoCommit => 1, quote_char => q{"}, name_sep => q{.} },
543 # Same, but with hashref as argument
544 # See parse_connect_info for explanation
547 dsn => 'dbi:Pg:dbname=foo',
549 password => 'my_pg_password',
556 # Subref + DBIx::Class-specific connection options
559 sub { DBI->connect(...) },
563 on_connect_do => ['SET search_path TO myschema,otherschema,public'],
564 disable_sth_caching => 1,
574 my ($self, $info) = @_;
576 return $self->_connect_info if !$info;
578 $self->_connect_info($info); # copy for _connect_info
580 $info = $self->_normalize_connect_info($info)
581 if ref $info eq 'ARRAY';
583 for my $storage_opt (keys %{ $info->{storage_options} }) {
584 my $value = $info->{storage_options}{$storage_opt};
586 $self->$storage_opt($value);
589 # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only
590 # the new set of options
591 $self->_sql_maker(undef);
592 $self->_sql_maker_opts({});
594 for my $sql_maker_opt (keys %{ $info->{sql_maker_options} }) {
595 my $value = $info->{sql_maker_options}{$sql_maker_opt};
597 $self->_sql_maker_opts->{$sql_maker_opt} = $value;
601 %{ $self->_default_dbi_connect_attributes || {} },
602 %{ $info->{attributes} || {} },
605 my @args = @{ $info->{arguments} };
607 $self->_dbi_connect_info([@args,
608 %attrs && !(ref $args[0] eq 'CODE') ? \%attrs : ()]);
611 # save attributes them in a separate accessor so they are always
612 # introspectable, even in case of a CODE $dbhmaker
613 $self->_dbic_connect_attributes (\%attrs);
615 return $self->_connect_info;
618 sub _normalize_connect_info {
619 my ($self, $info_arg) = @_;
622 my @args = @$info_arg; # take a shallow copy for further mutilation
624 # combine/pre-parse arguments depending on invocation style
627 if (ref $args[0] eq 'CODE') { # coderef with optional \%extra_attributes
628 %attrs = %{ $args[1] || {} };
631 elsif (ref $args[0] eq 'HASH') { # single hashref (i.e. Catalyst config)
632 %attrs = %{$args[0]};
634 if (my $code = delete $attrs{dbh_maker}) {
637 my @ignored = grep { delete $attrs{$_} } (qw/dsn user password/);
640 'Attribute(s) %s in connect_info were ignored, as they can not be applied '
641 . "to the result of 'dbh_maker'",
643 join (', ', map { "'$_'" } (@ignored) ),
648 @args = delete @attrs{qw/dsn user password/};
651 else { # otherwise assume dsn/user/password + \%attrs + \%extra_attrs
653 % { $args[3] || {} },
654 % { $args[4] || {} },
656 @args = @args[0,1,2];
659 $info{arguments} = \@args;
661 my @storage_opts = grep exists $attrs{$_},
662 @storage_options, 'cursor_class';
664 @{ $info{storage_options} }{@storage_opts} =
665 delete @attrs{@storage_opts} if @storage_opts;
667 my @sql_maker_opts = grep exists $attrs{$_},
668 qw/limit_dialect quote_char name_sep/;
670 @{ $info{sql_maker_options} }{@sql_maker_opts} =
671 delete @attrs{@sql_maker_opts} if @sql_maker_opts;
673 $info{attributes} = \%attrs if %attrs;
678 sub _default_dbi_connect_attributes {
688 This method is deprecated in favour of setting via L</connect_info>.
692 =head2 on_disconnect_do
694 This method is deprecated in favour of setting via L</connect_info>.
698 sub _parse_connect_do {
699 my ($self, $type) = @_;
701 my $val = $self->$type;
702 return () if not defined $val;
707 push @res, [ 'do_sql', $val ];
708 } elsif (ref($val) eq 'CODE') {
710 } elsif (ref($val) eq 'ARRAY') {
711 push @res, map { [ 'do_sql', $_ ] } @$val;
713 $self->throw_exception("Invalid type for $type: ".ref($val));
721 Arguments: ($subref | $method_name), @extra_coderef_args?
723 Execute the given $subref or $method_name using the new exception-based
724 connection management.
726 The first two arguments will be the storage object that C<dbh_do> was called
727 on and a database handle to use. Any additional arguments will be passed
728 verbatim to the called subref as arguments 2 and onwards.
730 Using this (instead of $self->_dbh or $self->dbh) ensures correct
731 exception handling and reconnection (or failover in future subclasses).
733 Your subref should have no side-effects outside of the database, as
734 there is the potential for your subref to be partially double-executed
735 if the database connection was stale/dysfunctional.
739 my @stuff = $schema->storage->dbh_do(
741 my ($storage, $dbh, @cols) = @_;
742 my $cols = join(q{, }, @cols);
743 $dbh->selectrow_array("SELECT $cols FROM foo");
754 my $dbh = $self->_get_dbh;
756 return $self->$code($dbh, @_)
757 if ( $self->{_in_dbh_do} || $self->{transaction_depth} );
759 local $self->{_in_dbh_do} = 1;
761 # take a ref instead of a copy, to preserve coderef @_ aliasing semantics
764 $self->$code ($dbh, @$args);
766 $self->throw_exception($_) if $self->connected;
768 # We were not connected - reconnect and retry, but let any
769 # exception fall right through this time
770 carp "Retrying $code after catching disconnected exception: $_"
771 if $ENV{DBIC_DBIRETRY_DEBUG};
773 $self->_populate_dbh;
774 $self->$code($self->_dbh, @$args);
778 # This is basically a blend of dbh_do above and DBIx::Class::Storage::txn_do.
779 # It also informs dbh_do to bypass itself while under the direction of txn_do,
780 # via $self->{_in_dbh_do} (this saves some redundant eval and errorcheck, etc)
785 ref $coderef eq 'CODE' or $self->throw_exception
786 ('$coderef must be a CODE reference');
788 return $coderef->(@_) if $self->{transaction_depth} && ! $self->auto_savepoint;
790 local $self->{_in_dbh_do} = 1;
793 my $want_array = wantarray;
799 # take a ref instead of a copy, to preserve coderef @_ aliasing semantics
807 @result = $coderef->(@$args);
809 elsif(defined $want_array) {
810 $result[0] = $coderef->(@$args);
820 if(! defined $exception) { return $want_array ? @result : $result[0] }
822 if($tried++ || $self->connected) {
823 my $rollback_exception;
824 try { $self->txn_rollback } catch { $rollback_exception = shift };
825 if(defined $rollback_exception) {
826 my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION";
827 $self->throw_exception($exception) # propagate nested rollback
828 if $rollback_exception =~ /$exception_class/;
830 $self->throw_exception(
831 "Transaction aborted: ${exception}. "
832 . "Rollback failed: ${rollback_exception}"
835 $self->throw_exception($exception)
838 # We were not connected, and was first try - reconnect and retry
840 carp "Retrying $coderef after catching disconnected exception: $exception"
841 if $ENV{DBIC_DBIRETRY_DEBUG};
842 $self->_populate_dbh;
848 Our C<disconnect> method also performs a rollback first if the
849 database is not in C<AutoCommit> mode.
859 push @actions, ( $self->on_disconnect_call || () );
860 push @actions, $self->_parse_connect_do ('on_disconnect_do');
862 $self->_do_connection_actions(disconnect_call_ => $_) for @actions;
864 $self->_dbh_rollback unless $self->_dbh_autocommit;
866 %{ $self->_dbh->{CachedKids} } = ();
867 $self->_dbh->disconnect;
873 =head2 with_deferred_fk_checks
877 =item Arguments: C<$coderef>
879 =item Return Value: The return value of $coderef
883 Storage specific method to run the code ref with FK checks deferred or
884 in MySQL's case disabled entirely.
888 # Storage subclasses should override this
889 sub with_deferred_fk_checks {
890 my ($self, $sub) = @_;
898 =item Arguments: none
900 =item Return Value: 1|0
904 Verifies that the current database handle is active and ready to execute
905 an SQL statement (e.g. the connection did not get stale, server is still
906 answering, etc.) This method is used internally by L</dbh>.
912 return 0 unless $self->_seems_connected;
915 local $self->_dbh->{RaiseError} = 1;
920 sub _seems_connected {
923 $self->_preserve_foreign_dbh;
925 my $dbh = $self->_dbh
928 return $dbh->FETCH('Active');
934 my $dbh = $self->_dbh or return 0;
939 sub ensure_connected {
942 unless ($self->connected) {
943 $self->_populate_dbh;
949 Returns a C<$dbh> - a data base handle of class L<DBI>. The returned handle
950 is guaranteed to be healthy by implicitly calling L</connected>, and if
951 necessary performing a reconnection before returning. Keep in mind that this
952 is very B<expensive> on some database engines. Consider using L</dbh_do>
960 if (not $self->_dbh) {
961 $self->_populate_dbh;
963 $self->ensure_connected;
968 # this is the internal "get dbh or connect (don't check)" method
971 $self->_preserve_foreign_dbh;
972 $self->_populate_dbh unless $self->_dbh;
978 unless ($self->_sql_maker) {
979 my $sql_maker_class = $self->sql_maker_class;
980 $self->ensure_class_loaded ($sql_maker_class);
982 my %opts = %{$self->_sql_maker_opts||{}};
986 $self->sql_limit_dialect
989 my $s_class = (ref $self) || $self;
991 "Your storage class ($s_class) does not set sql_limit_dialect and you "
992 . 'have not supplied an explicit limit_dialect in your connection_info. '
993 . 'DBIC will attempt to use the GenericSubQ dialect, which works on most '
994 . 'databases but can be (and often is) painfully slow.'
1001 $self->_sql_maker($sql_maker_class->new(
1002 bindtype=>'columns',
1003 array_datatypes => 1,
1004 limit_dialect => $dialect,
1008 return $self->_sql_maker;
1011 # nothing to do by default
1018 my @info = @{$self->_dbi_connect_info || []};
1019 $self->_dbh(undef); # in case ->connected failed we might get sent here
1020 $self->_dbh_details({}); # reset everything we know
1022 $self->_dbh($self->_connect(@info));
1024 $self->_conn_pid($$);
1025 $self->_conn_tid(threads->tid) if $INC{'threads.pm'};
1027 $self->_determine_driver;
1029 # Always set the transaction depth on connect, since
1030 # there is no transaction in progress by definition
1031 $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1;
1033 $self->_run_connection_actions unless $self->{_in_determine_driver};
1036 sub _run_connection_actions {
1040 push @actions, ( $self->on_connect_call || () );
1041 push @actions, $self->_parse_connect_do ('on_connect_do');
1043 $self->_do_connection_actions(connect_call_ => $_) for @actions;
1048 sub set_use_dbms_capability {
1049 $_[0]->set_inherited ($_[1], $_[2]);
1052 sub get_use_dbms_capability {
1053 my ($self, $capname) = @_;
1055 my $use = $self->get_inherited ($capname);
1058 : do { $capname =~ s/^_use_/_supports_/; $self->get_dbms_capability ($capname) }
1062 sub set_dbms_capability {
1063 $_[0]->_dbh_details->{capability}{$_[1]} = $_[2];
1066 sub get_dbms_capability {
1067 my ($self, $capname) = @_;
1069 my $cap = $self->_dbh_details->{capability}{$capname};
1071 unless (defined $cap) {
1072 if (my $meth = $self->can ("_determine$capname")) {
1073 $cap = $self->$meth ? 1 : 0;
1079 $self->set_dbms_capability ($capname, $cap);
1089 unless ($info = $self->_dbh_details->{info}) {
1093 my $server_version = try { $self->_get_server_version };
1095 if (defined $server_version) {
1096 $info->{dbms_version} = $server_version;
1098 my ($numeric_version) = $server_version =~ /^([\d\.]+)/;
1099 my @verparts = split (/\./, $numeric_version);
1105 # consider only up to 3 version parts, iff not more than 3 digits
1107 while (@verparts && @use_parts < 3) {
1108 my $p = shift @verparts;
1110 push @use_parts, $p;
1112 push @use_parts, 0 while @use_parts < 3;
1114 $info->{normalized_dbms_version} = sprintf "%d.%03d%03d", @use_parts;
1118 $self->_dbh_details->{info} = $info;
1124 sub _get_server_version {
1125 shift->_get_dbh->get_info(18);
1128 sub _determine_driver {
1131 if ((not $self->_driver_determined) && (not $self->{_in_determine_driver})) {
1132 my $started_connected = 0;
1133 local $self->{_in_determine_driver} = 1;
1135 if (ref($self) eq __PACKAGE__) {
1137 if ($self->_dbh) { # we are connected
1138 $driver = $self->_dbh->{Driver}{Name};
1139 $started_connected = 1;
1141 # if connect_info is a CODEREF, we have no choice but to connect
1142 if (ref $self->_dbi_connect_info->[0] &&
1143 reftype $self->_dbi_connect_info->[0] eq 'CODE') {
1144 $self->_populate_dbh;
1145 $driver = $self->_dbh->{Driver}{Name};
1148 # try to use dsn to not require being connected, the driver may still
1149 # force a connection in _rebless to determine version
1150 # (dsn may not be supplied at all if all we do is make a mock-schema)
1151 my $dsn = $self->_dbi_connect_info->[0] || $ENV{DBI_DSN} || '';
1152 ($driver) = $dsn =~ /dbi:([^:]+):/i;
1153 $driver ||= $ENV{DBI_DRIVER};
1158 my $storage_class = "DBIx::Class::Storage::DBI::${driver}";
1159 if ($self->load_optional_class($storage_class)) {
1160 mro::set_mro($storage_class, 'c3');
1161 bless $self, $storage_class;
1167 $self->_driver_determined(1);
1169 $self->_init; # run driver-specific initializations
1171 $self->_run_connection_actions
1172 if !$started_connected && defined $self->_dbh;
1176 sub _do_connection_actions {
1178 my $method_prefix = shift;
1181 if (not ref($call)) {
1182 my $method = $method_prefix . $call;
1184 } elsif (ref($call) eq 'CODE') {
1186 } elsif (ref($call) eq 'ARRAY') {
1187 if (ref($call->[0]) ne 'ARRAY') {
1188 $self->_do_connection_actions($method_prefix, $_) for @$call;
1190 $self->_do_connection_actions($method_prefix, @$_) for @$call;
1193 $self->throw_exception (sprintf ("Don't know how to process conection actions of type '%s'", ref($call)) );
1199 sub connect_call_do_sql {
1201 $self->_do_query(@_);
1204 sub disconnect_call_do_sql {
1206 $self->_do_query(@_);
1209 # override in db-specific backend when necessary
1210 sub connect_call_datetime_setup { 1 }
1213 my ($self, $action) = @_;
1215 if (ref $action eq 'CODE') {
1216 $action = $action->($self);
1217 $self->_do_query($_) foreach @$action;
1220 # Most debuggers expect ($sql, @bind), so we need to exclude
1221 # the attribute hash which is the second argument to $dbh->do
1222 # furthermore the bind values are usually to be presented
1223 # as named arrayref pairs, so wrap those here too
1224 my @do_args = (ref $action eq 'ARRAY') ? (@$action) : ($action);
1225 my $sql = shift @do_args;
1226 my $attrs = shift @do_args;
1227 my @bind = map { [ undef, $_ ] } @do_args;
1229 $self->_query_start($sql, @bind);
1230 $self->_get_dbh->do($sql, $attrs, @do_args);
1231 $self->_query_end($sql, @bind);
1238 my ($self, @info) = @_;
1240 $self->throw_exception("You failed to provide any connection info")
1243 my ($old_connect_via, $dbh);
1245 if ($INC{'Apache/DBI.pm'} && $ENV{MOD_PERL}) {
1246 $old_connect_via = $DBI::connect_via;
1247 $DBI::connect_via = 'connect';
1251 if(ref $info[0] eq 'CODE') {
1252 $dbh = $info[0]->();
1255 $dbh = DBI->connect(@info);
1262 unless ($self->unsafe) {
1264 # this odd anonymous coderef dereference is in fact really
1265 # necessary to avoid the unwanted effect described in perl5
1268 my $weak_self = $_[0];
1271 $_[1]->{HandleError} = sub {
1273 $weak_self->throw_exception("DBI Exception: $_[0]");
1276 # the handler may be invoked by something totally out of
1278 croak ("DBI Exception (unhandled by DBIC, ::Schema GCed): $_[0]");
1283 $dbh->{ShowErrorStatement} = 1;
1284 $dbh->{RaiseError} = 1;
1285 $dbh->{PrintError} = 0;
1289 $self->throw_exception("DBI Connection failed: $_")
1292 $DBI::connect_via = $old_connect_via if $old_connect_via;
1295 $self->_dbh_autocommit($dbh->{AutoCommit});
1300 my ($self, $name) = @_;
1302 $name = $self->_svp_generate_name
1303 unless defined $name;
1305 $self->throw_exception ("You can't use savepoints outside a transaction")
1306 if $self->{transaction_depth} == 0;
1308 $self->throw_exception ("Your Storage implementation doesn't support savepoints")
1309 unless $self->can('_svp_begin');
1311 push @{ $self->{savepoints} }, $name;
1313 $self->debugobj->svp_begin($name) if $self->debug;
1315 return $self->_svp_begin($name);
1319 my ($self, $name) = @_;
1321 $self->throw_exception ("You can't use savepoints outside a transaction")
1322 if $self->{transaction_depth} == 0;
1324 $self->throw_exception ("Your Storage implementation doesn't support savepoints")
1325 unless $self->can('_svp_release');
1327 if (defined $name) {
1328 $self->throw_exception ("Savepoint '$name' does not exist")
1329 unless grep { $_ eq $name } @{ $self->{savepoints} };
1331 # Dig through the stack until we find the one we are releasing. This keeps
1332 # the stack up to date.
1335 do { $svp = pop @{ $self->{savepoints} } } while $svp ne $name;
1337 $name = pop @{ $self->{savepoints} };
1340 $self->debugobj->svp_release($name) if $self->debug;
1342 return $self->_svp_release($name);
1346 my ($self, $name) = @_;
1348 $self->throw_exception ("You can't use savepoints outside a transaction")
1349 if $self->{transaction_depth} == 0;
1351 $self->throw_exception ("Your Storage implementation doesn't support savepoints")
1352 unless $self->can('_svp_rollback');
1354 if (defined $name) {
1355 # If they passed us a name, verify that it exists in the stack
1356 unless(grep({ $_ eq $name } @{ $self->{savepoints} })) {
1357 $self->throw_exception("Savepoint '$name' does not exist!");
1360 # Dig through the stack until we find the one we are releasing. This keeps
1361 # the stack up to date.
1362 while(my $s = pop(@{ $self->{savepoints} })) {
1363 last if($s eq $name);
1365 # Add the savepoint back to the stack, as a rollback doesn't remove the
1366 # named savepoint, only everything after it.
1367 push(@{ $self->{savepoints} }, $name);
1369 # We'll assume they want to rollback to the last savepoint
1370 $name = $self->{savepoints}->[-1];
1373 $self->debugobj->svp_rollback($name) if $self->debug;
1375 return $self->_svp_rollback($name);
1378 sub _svp_generate_name {
1381 return 'savepoint_'.scalar(@{ $self->{'savepoints'} });
1387 # this means we have not yet connected and do not know the AC status
1388 # (e.g. coderef $dbh)
1389 $self->ensure_connected if (! defined $self->_dbh_autocommit);
1391 if($self->{transaction_depth} == 0) {
1392 $self->debugobj->txn_begin()
1394 $self->_dbh_begin_work;
1396 elsif ($self->auto_savepoint) {
1399 $self->{transaction_depth}++;
1402 sub _dbh_begin_work {
1405 # if the user is utilizing txn_do - good for him, otherwise we need to
1406 # ensure that the $dbh is healthy on BEGIN.
1407 # We do this via ->dbh_do instead of ->dbh, so that the ->dbh "ping"
1408 # will be replaced by a failure of begin_work itself (which will be
1409 # then retried on reconnect)
1410 if ($self->{_in_dbh_do}) {
1411 $self->_dbh->begin_work;
1413 $self->dbh_do(sub { $_[1]->begin_work });
1419 if ($self->{transaction_depth} == 1) {
1420 $self->debugobj->txn_commit()
1423 $self->{transaction_depth} = 0
1424 if $self->_dbh_autocommit;
1426 elsif($self->{transaction_depth} > 1) {
1427 $self->{transaction_depth}--;
1429 if $self->auto_savepoint;
1435 my $dbh = $self->_dbh
1436 or $self->throw_exception('cannot COMMIT on a disconnected handle');
1442 my $dbh = $self->_dbh;
1444 if ($self->{transaction_depth} == 1) {
1445 $self->debugobj->txn_rollback()
1447 $self->{transaction_depth} = 0
1448 if $self->_dbh_autocommit;
1449 $self->_dbh_rollback;
1451 elsif($self->{transaction_depth} > 1) {
1452 $self->{transaction_depth}--;
1453 if ($self->auto_savepoint) {
1454 $self->svp_rollback;
1459 die DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION->new;
1463 my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION";
1465 if ($_ !~ /$exception_class/) {
1466 # ensure that a failed rollback resets the transaction depth
1467 $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1;
1470 $self->throw_exception($_)
1476 my $dbh = $self->_dbh
1477 or $self->throw_exception('cannot ROLLBACK on a disconnected handle');
1481 # This used to be the top-half of _execute. It was split out to make it
1482 # easier to override in NoBindVars without duping the rest. It takes up
1483 # all of _execute's args, and emits $sql, @bind.
1484 sub _prep_for_execute {
1485 my ($self, $op, $extra_bind, $ident, $args) = @_;
1487 if( blessed $ident && $ident->isa("DBIx::Class::ResultSource") ) {
1488 $ident = $ident->from();
1491 my ($sql, @bind) = $self->sql_maker->$op($ident, @$args);
1494 map { ref $_ eq 'ARRAY' ? $_ : [ '!!dummy', $_ ] } @$extra_bind)
1496 return ($sql, \@bind);
1500 sub _fix_bind_params {
1501 my ($self, @bind) = @_;
1503 ### Turn @bind from something like this:
1504 ### ( [ "artist", 1 ], [ "cdid", 1, 3 ] )
1506 ### ( "'1'", "'1'", "'3'" )
1509 if ( defined( $_ && $_->[1] ) ) {
1510 map { qq{'$_'}; } @{$_}[ 1 .. $#$_ ];
1517 my ( $self, $sql, @bind ) = @_;
1519 if ( $self->debug ) {
1520 @bind = $self->_fix_bind_params(@bind);
1522 $self->debugobj->query_start( $sql, @bind );
1527 my ( $self, $sql, @bind ) = @_;
1529 if ( $self->debug ) {
1530 @bind = $self->_fix_bind_params(@bind);
1531 $self->debugobj->query_end( $sql, @bind );
1536 my ($self, $dbh, $op, $extra_bind, $ident, $bind_attributes, @args) = @_;
1538 my ($sql, $bind) = $self->_prep_for_execute($op, $extra_bind, $ident, \@args);
1540 $self->_query_start( $sql, @$bind );
1542 my $sth = $self->sth($sql,$op);
1544 my $placeholder_index = 1;
1546 foreach my $bound (@$bind) {
1547 my $attributes = {};
1548 my($column_name, @data) = @$bound;
1550 if ($bind_attributes) {
1551 $attributes = $bind_attributes->{$column_name}
1552 if defined $bind_attributes->{$column_name};
1555 foreach my $data (@data) {
1556 my $ref = ref $data;
1557 $data = $ref && $ref ne 'ARRAY' ? ''.$data : $data; # stringify args (except arrayrefs)
1559 $sth->bind_param($placeholder_index, $data, $attributes);
1560 $placeholder_index++;
1564 # Can this fail without throwing an exception anyways???
1565 my $rv = $sth->execute();
1566 $self->throw_exception(
1567 $sth->errstr || $sth->err || 'Unknown error: execute() returned false, but error flags were not set...'
1570 $self->_query_end( $sql, @$bind );
1572 return (wantarray ? ($rv, $sth, @$bind) : $rv);
1577 $self->dbh_do('_dbh_execute', @_); # retry over disconnects
1580 sub _prefetch_insert_auto_nextvals {
1581 my ($self, $source, $to_insert) = @_;
1585 foreach my $col ( $source->columns ) {
1586 if ( !defined $to_insert->{$col} ) {
1587 my $col_info = $source->column_info($col);
1589 if ( $col_info->{auto_nextval} ) {
1590 $upd->{$col} = $to_insert->{$col} = $self->_sequence_fetch(
1592 $col_info->{sequence} ||=
1593 $self->_dbh_get_autoinc_seq($self->_get_dbh, $source, $col)
1604 my ($source, $to_insert, $opts) = @_;
1606 my $updated_cols = $self->_prefetch_insert_auto_nextvals (@_);
1608 my $bind_attributes = $self->source_bind_attributes($source);
1610 my ($rv, $sth) = $self->_execute('insert' => [], $source, $bind_attributes, $to_insert, $opts);
1612 if ($opts->{returning}) {
1613 my @ret_cols = @{$opts->{returning}};
1615 my @ret_vals = try {
1616 local $SIG{__WARN__} = sub {};
1617 my @r = $sth->fetchrow_array;
1623 @ret{@ret_cols} = @ret_vals if (@ret_vals);
1631 return $updated_cols;
1634 ## Currently it is assumed that all values passed will be "normal", i.e. not
1635 ## scalar refs, or at least, all the same type as the first set, the statement is
1636 ## only prepped once.
1638 my ($self, $source, $cols, $data) = @_;
1641 @colvalues{@$cols} = (0..$#$cols);
1643 for my $i (0..$#$cols) {
1644 my $first_val = $data->[0][$i];
1645 next unless ref $first_val eq 'SCALAR';
1647 $colvalues{ $cols->[$i] } = $first_val;
1650 # check for bad data and stringify stringifiable objects
1651 my $bad_slice = sub {
1652 my ($msg, $col_idx, $slice_idx) = @_;
1653 $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s",
1657 local $Data::Dumper::Maxdepth = 1; # don't dump objects, if any
1659 map { $cols->[$_] => $data->[$slice_idx][$_] } (0 .. $#$cols)
1665 for my $datum_idx (0..$#$data) {
1666 my $datum = $data->[$datum_idx];
1668 for my $col_idx (0..$#$cols) {
1669 my $val = $datum->[$col_idx];
1670 my $sqla_bind = $colvalues{ $cols->[$col_idx] };
1671 my $is_literal_sql = (ref $sqla_bind) eq 'SCALAR';
1673 if ($is_literal_sql) {
1675 $bad_slice->('bind found where literal SQL expected', $col_idx, $datum_idx);
1677 elsif ((my $reftype = ref $val) ne 'SCALAR') {
1678 $bad_slice->("$reftype reference found where literal SQL expected",
1679 $col_idx, $datum_idx);
1681 elsif ($$val ne $$sqla_bind){
1682 $bad_slice->("inconsistent literal SQL value, expecting: '$$sqla_bind'",
1683 $col_idx, $datum_idx);
1686 elsif (my $reftype = ref $val) {
1688 if (overload::Method($val, '""')) {
1689 $datum->[$col_idx] = "".$val;
1692 $bad_slice->("$reftype reference found where bind expected",
1693 $col_idx, $datum_idx);
1699 my ($sql, $bind) = $self->_prep_for_execute (
1700 'insert', undef, $source, [\%colvalues]
1704 my $empty_bind = 1 if (not @bind) &&
1705 (grep { ref $_ eq 'SCALAR' } values %colvalues) == @$cols;
1707 if ((not @bind) && (not $empty_bind)) {
1708 $self->throw_exception(
1709 'Cannot insert_bulk without support for placeholders'
1713 # neither _execute_array, nor _execute_inserts_with_no_binds are
1714 # atomic (even if _execute _array is a single call). Thus a safety
1716 my $guard = $self->txn_scope_guard;
1718 $self->_query_start( $sql, [ dummy => '__BULK_INSERT__' ] );
1719 my $sth = $self->sth($sql);
1722 # bind_param_array doesn't work if there are no binds
1723 $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data );
1726 # @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
1727 $self->_execute_array( $source, $sth, \@bind, $cols, $data );
1731 $self->_query_end( $sql, [ dummy => '__BULK_INSERT__' ] );
1735 return (wantarray ? ($rv, $sth, @bind) : $rv);
1738 sub _execute_array {
1739 my ($self, $source, $sth, $bind, $cols, $data, @extra) = @_;
1741 ## This must be an arrayref, else nothing works!
1742 my $tuple_status = [];
1744 ## Get the bind_attributes, if any exist
1745 my $bind_attributes = $self->source_bind_attributes($source);
1747 ## Bind the values and execute
1748 my $placeholder_index = 1;
1750 foreach my $bound (@$bind) {
1752 my $attributes = {};
1753 my ($column_name, $data_index) = @$bound;
1755 if( $bind_attributes ) {
1756 $attributes = $bind_attributes->{$column_name}
1757 if defined $bind_attributes->{$column_name};
1760 my @data = map { $_->[$data_index] } @$data;
1762 $sth->bind_param_array(
1765 (%$attributes ? $attributes : ()),
1767 $placeholder_index++;
1772 $rv = $self->_dbh_execute_array($sth, $tuple_status, @extra);
1778 # Statement must finish even if there was an exception.
1783 $err = shift unless defined $err
1788 if (! defined $err and $sth->err);
1792 ++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i];
1794 $self->throw_exception("Unexpected populate error: $err")
1795 if ($i > $#$tuple_status);
1797 $self->throw_exception(sprintf "%s for populate slice:\n%s",
1798 ($tuple_status->[$i][1] || $err),
1799 Dumper { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) },
1806 sub _dbh_execute_array {
1807 my ($self, $sth, $tuple_status, @extra) = @_;
1809 return $sth->execute_array({ArrayTupleStatus => $tuple_status});
1812 sub _dbh_execute_inserts_with_no_binds {
1813 my ($self, $sth, $count) = @_;
1817 my $dbh = $self->_get_dbh;
1818 local $dbh->{RaiseError} = 1;
1819 local $dbh->{PrintError} = 0;
1821 $sth->execute foreach 1..$count;
1827 # Make sure statement is finished even if there was an exception.
1832 $err = shift unless defined $err;
1836 $self->throw_exception($err) if defined $err;
1842 my ($self, $source, @args) = @_;
1844 my $bind_attrs = $self->source_bind_attributes($source);
1846 return $self->_execute('update' => [], $source, $bind_attrs, @args);
1851 my ($self, $source, @args) = @_;
1853 my $bind_attrs = $self->source_bind_attributes($source);
1855 return $self->_execute('delete' => [], $source, $bind_attrs, @args);
1858 # We were sent here because the $rs contains a complex search
1859 # which will require a subquery to select the correct rows
1860 # (i.e. joined or limited resultsets, or non-introspectable conditions)
1862 # Generating a single PK column subquery is trivial and supported
1863 # by all RDBMS. However if we have a multicolumn PK, things get ugly.
1864 # Look at _multipk_update_delete()
1865 sub _subq_update_delete {
1867 my ($rs, $op, $values) = @_;
1869 my $rsrc = $rs->result_source;
1871 # quick check if we got a sane rs on our hands
1872 my @pcols = $rsrc->_pri_cols;
1874 my $sel = $rs->_resolved_attrs->{select};
1875 $sel = [ $sel ] unless ref $sel eq 'ARRAY';
1878 join ("\x00", map { join '.', $rs->{attrs}{alias}, $_ } sort @pcols)
1880 join ("\x00", sort @$sel )
1882 $self->throw_exception (
1883 '_subq_update_delete can not be called on resultsets selecting columns other than the primary keys'
1890 $op eq 'update' ? $values : (),
1891 { $pcols[0] => { -in => $rs->as_query } },
1896 return $self->_multipk_update_delete (@_);
1900 # ANSI SQL does not provide a reliable way to perform a multicol-PK
1901 # resultset update/delete involving subqueries. So by default resort
1902 # to simple (and inefficient) delete_all style per-row opearations,
1903 # while allowing specific storages to override this with a faster
1906 sub _multipk_update_delete {
1907 return shift->_per_row_update_delete (@_);
1910 # This is the default loop used to delete/update rows for multi PK
1911 # resultsets, and used by mysql exclusively (because it can't do anything
1914 # We do not use $row->$op style queries, because resultset update/delete
1915 # is not expected to cascade (this is what delete_all/update_all is for).
1917 # There should be no race conditions as the entire operation is rolled
1920 sub _per_row_update_delete {
1922 my ($rs, $op, $values) = @_;
1924 my $rsrc = $rs->result_source;
1925 my @pcols = $rsrc->_pri_cols;
1927 my $guard = $self->txn_scope_guard;
1929 # emulate the return value of $sth->execute for non-selects
1930 my $row_cnt = '0E0';
1932 my $subrs_cur = $rs->cursor;
1933 my @all_pk = $subrs_cur->all;
1934 for my $pks ( @all_pk) {
1937 for my $i (0.. $#pcols) {
1938 $cond->{$pcols[$i]} = $pks->[$i];
1943 $op eq 'update' ? $values : (),
1957 $self->_execute($self->_select_args(@_));
1960 sub _select_args_to_query {
1963 # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $rs_attrs, $rows, $offset)
1964 # = $self->_select_args($ident, $select, $cond, $attrs);
1965 my ($op, $bind, $ident, $bind_attrs, @args) =
1966 $self->_select_args(@_);
1968 # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $rs_attrs, $rows, $offset ]);
1969 my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, \@args);
1970 $prepared_bind ||= [];
1973 ? ($sql, $prepared_bind, $bind_attrs)
1974 : \[ "($sql)", @$prepared_bind ]
1979 my ($self, $ident, $select, $where, $attrs) = @_;
1981 my $sql_maker = $self->sql_maker;
1982 my ($alias2source, $rs_alias) = $self->_resolve_ident_sources ($ident);
1989 $rs_alias && $alias2source->{$rs_alias}
1990 ? ( _rsroot_source_handle => $alias2source->{$rs_alias}->handle )
1995 # calculate bind_attrs before possible $ident mangling
1996 my $bind_attrs = {};
1997 for my $alias (keys %$alias2source) {
1998 my $bindtypes = $self->source_bind_attributes ($alias2source->{$alias}) || {};
1999 for my $col (keys %$bindtypes) {
2001 my $fqcn = join ('.', $alias, $col);
2002 $bind_attrs->{$fqcn} = $bindtypes->{$col} if $bindtypes->{$col};
2004 # Unqialified column names are nice, but at the same time can be
2005 # rather ambiguous. What we do here is basically go along with
2006 # the loop, adding an unqualified column slot to $bind_attrs,
2007 # alongside the fully qualified name. As soon as we encounter
2008 # another column by that name (which would imply another table)
2009 # we unset the unqualified slot and never add any info to it
2010 # to avoid erroneous type binding. If this happens the users
2011 # only choice will be to fully qualify his column name
2013 if (exists $bind_attrs->{$col}) {
2014 $bind_attrs->{$col} = {};
2017 $bind_attrs->{$col} = $bind_attrs->{$fqcn};
2022 # Sanity check the attributes (SQLAHacks does it too, but
2023 # in case of a software_limit we'll never reach there)
2024 if (defined $attrs->{offset}) {
2025 $self->throw_exception('A supplied offset attribute must be a non-negative integer')
2026 if ( $attrs->{offset} =~ /\D/ or $attrs->{offset} < 0 );
2028 $attrs->{offset} ||= 0;
2030 if (defined $attrs->{rows}) {
2031 $self->throw_exception("The rows attribute must be a positive integer if present")
2032 if ( $attrs->{rows} =~ /\D/ or $attrs->{rows} <= 0 );
2034 elsif ($attrs->{offset}) {
2035 # MySQL actually recommends this approach. I cringe.
2036 $attrs->{rows} = $sql_maker->__max_int;
2041 # see if we need to tear the prefetch apart otherwise delegate the limiting to the
2042 # storage, unless software limit was requested
2045 ( $attrs->{rows} && keys %{$attrs->{collapse}} )
2047 # grouped prefetch (to satisfy group_by == select)
2048 ( $attrs->{group_by}
2050 @{$attrs->{group_by}}
2052 $attrs->{_prefetch_select}
2054 @{$attrs->{_prefetch_select}}
2057 ($ident, $select, $where, $attrs)
2058 = $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs);
2060 elsif (! $attrs->{software_limit} ) {
2061 push @limit, $attrs->{rows}, $attrs->{offset};
2064 # try to simplify the joinmap further (prune unreferenced type-single joins)
2065 $ident = $self->_prune_unused_joins ($ident, $select, $where, $attrs);
2068 # This would be the point to deflate anything found in $where
2069 # (and leave $attrs->{bind} intact). Problem is - inflators historically
2070 # expect a row object. And all we have is a resultsource (it is trivial
2071 # to extract deflator coderefs via $alias2source above).
2073 # I don't see a way forward other than changing the way deflators are
2074 # invoked, and that's just bad...
2077 return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $attrs, @limit);
2080 # Returns a counting SELECT for a simple count
2081 # query. Abstracted so that a storage could override
2082 # this to { count => 'firstcol' } or whatever makes
2083 # sense as a performance optimization
2085 #my ($self, $source, $rs_attrs) = @_;
2086 return { count => '*' };
2090 sub source_bind_attributes {
2091 my ($self, $source) = @_;
2093 my $bind_attributes;
2094 foreach my $column ($source->columns) {
2096 my $data_type = $source->column_info($column)->{data_type} || '';
2097 $bind_attributes->{$column} = $self->bind_attribute_by_data_type($data_type)
2101 return $bind_attributes;
2108 =item Arguments: $ident, $select, $condition, $attrs
2112 Handle a SQL select statement.
2118 my ($ident, $select, $condition, $attrs) = @_;
2119 return $self->cursor_class->new($self, \@_, $attrs);
2124 my ($rv, $sth, @bind) = $self->_select(@_);
2125 my @row = $sth->fetchrow_array;
2126 my @nextrow = $sth->fetchrow_array if @row;
2127 if(@row && @nextrow) {
2128 carp "Query returned more than one row. SQL that returns multiple rows is DEPRECATED for ->find and ->single";
2130 # Need to call finish() to work round broken DBDs
2139 =item Arguments: $sql
2143 Returns a L<DBI> sth (statement handle) for the supplied SQL.
2148 my ($self, $dbh, $sql) = @_;
2150 # 3 is the if_active parameter which avoids active sth re-use
2151 my $sth = $self->disable_sth_caching
2152 ? $dbh->prepare($sql)
2153 : $dbh->prepare_cached($sql, {}, 3);
2155 # XXX You would think RaiseError would make this impossible,
2156 # but apparently that's not true :(
2157 $self->throw_exception($dbh->errstr) if !$sth;
2163 my ($self, $sql) = @_;
2164 $self->dbh_do('_dbh_sth', $sql); # retry over disconnects
2167 sub _dbh_columns_info_for {
2168 my ($self, $dbh, $table) = @_;
2170 if ($dbh->can('column_info')) {
2174 my ($schema,$tab) = $table =~ /^(.+?)\.(.+)$/ ? ($1,$2) : (undef,$table);
2175 my $sth = $dbh->column_info( undef,$schema, $tab, '%' );
2177 while ( my $info = $sth->fetchrow_hashref() ){
2179 $column_info{data_type} = $info->{TYPE_NAME};
2180 $column_info{size} = $info->{COLUMN_SIZE};
2181 $column_info{is_nullable} = $info->{NULLABLE} ? 1 : 0;
2182 $column_info{default_value} = $info->{COLUMN_DEF};
2183 my $col_name = $info->{COLUMN_NAME};
2184 $col_name =~ s/^\"(.*)\"$/$1/;
2186 $result{$col_name} = \%column_info;
2191 return \%result if !$caught && scalar keys %result;
2195 my $sth = $dbh->prepare($self->sql_maker->select($table, undef, \'1 = 0'));
2197 my @columns = @{$sth->{NAME_lc}};
2198 for my $i ( 0 .. $#columns ){
2200 $column_info{data_type} = $sth->{TYPE}->[$i];
2201 $column_info{size} = $sth->{PRECISION}->[$i];
2202 $column_info{is_nullable} = $sth->{NULLABLE}->[$i] ? 1 : 0;
2204 if ($column_info{data_type} =~ m/^(.*?)\((.*?)\)$/) {
2205 $column_info{data_type} = $1;
2206 $column_info{size} = $2;
2209 $result{$columns[$i]} = \%column_info;
2213 foreach my $col (keys %result) {
2214 my $colinfo = $result{$col};
2215 my $type_num = $colinfo->{data_type};
2217 if(defined $type_num && $dbh->can('type_info')) {
2218 my $type_info = $dbh->type_info($type_num);
2219 $type_name = $type_info->{TYPE_NAME} if $type_info;
2220 $colinfo->{data_type} = $type_name if $type_name;
2227 sub columns_info_for {
2228 my ($self, $table) = @_;
2229 $self->_dbh_columns_info_for ($self->_get_dbh, $table);
2232 =head2 last_insert_id
2234 Return the row id of the last insert.
2238 sub _dbh_last_insert_id {
2239 my ($self, $dbh, $source, $col) = @_;
2241 my $id = try { $dbh->last_insert_id (undef, undef, $source->name, $col) };
2243 return $id if defined $id;
2245 my $class = ref $self;
2246 $self->throw_exception ("No storage specific _dbh_last_insert_id() method implemented in $class, and the generic DBI::last_insert_id() failed");
2249 sub last_insert_id {
2251 $self->_dbh_last_insert_id ($self->_dbh, @_);
2254 =head2 _native_data_type
2258 =item Arguments: $type_name
2262 This API is B<EXPERIMENTAL>, will almost definitely change in the future, and
2263 currently only used by L<::AutoCast|DBIx::Class::Storage::DBI::AutoCast> and
2264 L<::Sybase::ASE|DBIx::Class::Storage::DBI::Sybase::ASE>.
2266 The default implementation returns C<undef>, implement in your Storage driver if
2267 you need this functionality.
2269 Should map types from other databases to the native RDBMS type, for example
2270 C<VARCHAR2> to C<VARCHAR>.
2272 Types with modifiers should map to the underlying data type. For example,
2273 C<INTEGER AUTO_INCREMENT> should become C<INTEGER>.
2275 Composite types should map to the container type, for example
2276 C<ENUM(foo,bar,baz)> becomes C<ENUM>.
2280 sub _native_data_type {
2281 #my ($self, $data_type) = @_;
2285 # Check if placeholders are supported at all
2286 sub _determine_supports_placeholders {
2288 my $dbh = $self->_get_dbh;
2290 # some drivers provide a $dbh attribute (e.g. Sybase and $dbh->{syb_dynamic_supported})
2291 # but it is inaccurate more often than not
2293 local $dbh->{PrintError} = 0;
2294 local $dbh->{RaiseError} = 1;
2295 $dbh->do('select ?', {}, 1);
2303 # Check if placeholders bound to non-string types throw exceptions
2305 sub _determine_supports_typeless_placeholders {
2307 my $dbh = $self->_get_dbh;
2310 local $dbh->{PrintError} = 0;
2311 local $dbh->{RaiseError} = 1;
2312 # this specifically tests a bind that is NOT a string
2313 $dbh->do('select 1 where 1 = ?', {}, 1);
2323 Returns the database driver name.
2328 shift->_get_dbh->{Driver}->{Name};
2331 =head2 bind_attribute_by_data_type
2333 Given a datatype from column info, returns a database specific bind
2334 attribute for C<< $dbh->bind_param($val,$attribute) >> or nothing if we will
2335 let the database planner just handle it.
2337 Generally only needed for special case column types, like bytea in postgres.
2341 sub bind_attribute_by_data_type {
2345 =head2 is_datatype_numeric
2347 Given a datatype from column_info, returns a boolean value indicating if
2348 the current RDBMS considers it a numeric value. This controls how
2349 L<DBIx::Class::Row/set_column> decides whether to mark the column as
2350 dirty - when the datatype is deemed numeric a C<< != >> comparison will
2351 be performed instead of the usual C<eq>.
2355 sub is_datatype_numeric {
2356 my ($self, $dt) = @_;
2358 return 0 unless $dt;
2360 return $dt =~ /^ (?:
2361 numeric | int(?:eger)? | (?:tiny|small|medium|big)int | dec(?:imal)? | real | float | double (?: \s+ precision)? | (?:big)?serial
2366 =head2 create_ddl_dir
2370 =item Arguments: $schema \@databases, $version, $directory, $preversion, \%sqlt_args
2374 Creates a SQL file based on the Schema, for each of the specified
2375 database engines in C<\@databases> in the given directory.
2376 (note: specify L<SQL::Translator> names, not L<DBI> driver names).
2378 Given a previous version number, this will also create a file containing
2379 the ALTER TABLE statements to transform the previous schema into the
2380 current one. Note that these statements may contain C<DROP TABLE> or
2381 C<DROP COLUMN> statements that can potentially destroy data.
2383 The file names are created using the C<ddl_filename> method below, please
2384 override this method in your schema if you would like a different file
2385 name format. For the ALTER file, the same format is used, replacing
2386 $version in the name with "$preversion-$version".
2388 See L<SQL::Translator/METHODS> for a list of values for C<\%sqlt_args>.
2389 The most common value for this would be C<< { add_drop_table => 1 } >>
2390 to have the SQL produced include a C<DROP TABLE> statement for each table
2391 created. For quoting purposes supply C<quote_table_names> and
2392 C<quote_field_names>.
2394 If no arguments are passed, then the following default values are assumed:
2398 =item databases - ['MySQL', 'SQLite', 'PostgreSQL']
2400 =item version - $schema->schema_version
2402 =item directory - './'
2404 =item preversion - <none>
2408 By default, C<\%sqlt_args> will have
2410 { add_drop_table => 1, ignore_constraint_names => 1, ignore_index_names => 1 }
2412 merged with the hash passed in. To disable any of those features, pass in a
2413 hashref like the following
2415 { ignore_constraint_names => 0, # ... other options }
2418 WARNING: You are strongly advised to check all SQL files created, before applying
2423 sub create_ddl_dir {
2424 my ($self, $schema, $databases, $version, $dir, $preversion, $sqltargs) = @_;
2427 carp "No directory given, using ./\n";
2432 make_path ("$dir") # make_path does not like objects (i.e. Path::Class::Dir)
2434 $self->throw_exception(
2435 "Failed to create '$dir': " . ($! || $@ || 'error unknow')
2439 $self->throw_exception ("Directory '$dir' does not exist\n") unless(-d $dir);
2441 $databases ||= ['MySQL', 'SQLite', 'PostgreSQL'];
2442 $databases = [ $databases ] if(ref($databases) ne 'ARRAY');
2444 my $schema_version = $schema->schema_version || '1.x';
2445 $version ||= $schema_version;
2448 add_drop_table => 1,
2449 ignore_constraint_names => 1,
2450 ignore_index_names => 1,
2454 unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')) {
2455 $self->throw_exception("Can't create a ddl file without " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
2458 my $sqlt = SQL::Translator->new( $sqltargs );
2460 $sqlt->parser('SQL::Translator::Parser::DBIx::Class');
2461 my $sqlt_schema = $sqlt->translate({ data => $schema })
2462 or $self->throw_exception ($sqlt->error);
2464 foreach my $db (@$databases) {
2466 $sqlt->{schema} = $sqlt_schema;
2467 $sqlt->producer($db);
2470 my $filename = $schema->ddl_filename($db, $version, $dir);
2471 if (-e $filename && ($version eq $schema_version )) {
2472 # if we are dumping the current version, overwrite the DDL
2473 carp "Overwriting existing DDL file - $filename";
2477 my $output = $sqlt->translate;
2479 carp("Failed to translate to $db, skipping. (" . $sqlt->error . ")");
2482 if(!open($file, ">$filename")) {
2483 $self->throw_exception("Can't open $filename for writing ($!)");
2486 print $file $output;
2489 next unless ($preversion);
2491 require SQL::Translator::Diff;
2493 my $prefilename = $schema->ddl_filename($db, $preversion, $dir);
2494 if(!-e $prefilename) {
2495 carp("No previous schema file found ($prefilename)");
2499 my $difffile = $schema->ddl_filename($db, $version, $dir, $preversion);
2501 carp("Overwriting existing diff file - $difffile");
2507 my $t = SQL::Translator->new($sqltargs);
2512 or $self->throw_exception ($t->error);
2514 my $out = $t->translate( $prefilename )
2515 or $self->throw_exception ($t->error);
2517 $source_schema = $t->schema;
2519 $source_schema->name( $prefilename )
2520 unless ( $source_schema->name );
2523 # The "new" style of producers have sane normalization and can support
2524 # diffing a SQL file against a DBIC->SQLT schema. Old style ones don't
2525 # And we have to diff parsed SQL against parsed SQL.
2526 my $dest_schema = $sqlt_schema;
2528 unless ( "SQL::Translator::Producer::$db"->can('preprocess_schema') ) {
2529 my $t = SQL::Translator->new($sqltargs);
2534 or $self->throw_exception ($t->error);
2536 my $out = $t->translate( $filename )
2537 or $self->throw_exception ($t->error);
2539 $dest_schema = $t->schema;
2541 $dest_schema->name( $filename )
2542 unless $dest_schema->name;
2545 my $diff = SQL::Translator::Diff::schema_diff($source_schema, $db,
2549 if(!open $file, ">$difffile") {
2550 $self->throw_exception("Can't write to $difffile ($!)");
2558 =head2 deployment_statements
2562 =item Arguments: $schema, $type, $version, $directory, $sqlt_args
2566 Returns the statements used by L</deploy> and L<DBIx::Class::Schema/deploy>.
2568 The L<SQL::Translator> (not L<DBI>) database driver name can be explicitly
2569 provided in C<$type>, otherwise the result of L</sqlt_type> is used as default.
2571 C<$directory> is used to return statements from files in a previously created
2572 L</create_ddl_dir> directory and is optional. The filenames are constructed
2573 from L<DBIx::Class::Schema/ddl_filename>, the schema name and the C<$version>.
2575 If no C<$directory> is specified then the statements are constructed on the
2576 fly using L<SQL::Translator> and C<$version> is ignored.
2578 See L<SQL::Translator/METHODS> for a list of values for C<$sqlt_args>.
2582 sub deployment_statements {
2583 my ($self, $schema, $type, $version, $dir, $sqltargs) = @_;
2584 $type ||= $self->sqlt_type;
2585 $version ||= $schema->schema_version || '1.x';
2587 my $filename = $schema->ddl_filename($type, $version, $dir);
2591 open($file, "<$filename")
2592 or $self->throw_exception("Can't open $filename ($!)");
2595 return join('', @rows);
2598 unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy') ) {
2599 $self->throw_exception("Can't deploy without a ddl_dir or " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
2602 # sources needs to be a parser arg, but for simplicty allow at top level
2604 $sqltargs->{parser_args}{sources} = delete $sqltargs->{sources}
2605 if exists $sqltargs->{sources};
2607 my $tr = SQL::Translator->new(
2608 producer => "SQL::Translator::Producer::${type}",
2610 parser => 'SQL::Translator::Parser::DBIx::Class',
2617 @ret = $tr->translate;
2620 $ret[0] = $tr->translate;
2623 $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error)
2624 unless (@ret && defined $ret[0]);
2626 return $wa ? @ret : $ret[0];
2630 my ($self, $schema, $type, $sqltargs, $dir) = @_;
2633 return if($line =~ /^--/);
2635 # next if($line =~ /^DROP/m);
2636 return if($line =~ /^BEGIN TRANSACTION/m);
2637 return if($line =~ /^COMMIT/m);
2638 return if $line =~ /^\s+$/; # skip whitespace only
2639 $self->_query_start($line);
2641 # do a dbh_do cycle here, as we need some error checking in
2642 # place (even though we will ignore errors)
2643 $self->dbh_do (sub { $_[1]->do($line) });
2645 carp qq{$_ (running "${line}")};
2647 $self->_query_end($line);
2649 my @statements = $schema->deployment_statements($type, undef, $dir, { %{ $sqltargs || {} }, no_comments => 1 } );
2650 if (@statements > 1) {
2651 foreach my $statement (@statements) {
2652 $deploy->( $statement );
2655 elsif (@statements == 1) {
2656 foreach my $line ( split(";\n", $statements[0])) {
2662 =head2 datetime_parser
2664 Returns the datetime parser class
2668 sub datetime_parser {
2670 return $self->{datetime_parser} ||= do {
2671 $self->build_datetime_parser(@_);
2675 =head2 datetime_parser_type
2677 Defines (returns) the datetime parser class - currently hardwired to
2678 L<DateTime::Format::MySQL>
2682 sub datetime_parser_type { "DateTime::Format::MySQL"; }
2684 =head2 build_datetime_parser
2686 See L</datetime_parser>
2690 sub build_datetime_parser {
2692 my $type = $self->datetime_parser_type(@_);
2693 $self->ensure_class_loaded ($type);
2698 =head2 is_replicating
2700 A boolean that reports if a particular L<DBIx::Class::Storage::DBI> is set to
2701 replicate from a master database. Default is undef, which is the result
2702 returned by databases that don't support replication.
2706 sub is_replicating {
2711 =head2 lag_behind_master
2713 Returns a number that represents a certain amount of lag behind a master db
2714 when a given storage is replicating. The number is database dependent, but
2715 starts at zero and increases with the amount of lag. Default in undef
2719 sub lag_behind_master {
2723 =head2 relname_to_table_alias
2727 =item Arguments: $relname, $join_count
2731 L<DBIx::Class> uses L<DBIx::Class::Relationship> names as table aliases in
2734 This hook is to allow specific L<DBIx::Class::Storage> drivers to change the
2735 way these aliases are named.
2737 The default behavior is C<< "$relname_$join_count" if $join_count > 1 >>,
2738 otherwise C<"$relname">.
2742 sub relname_to_table_alias {
2743 my ($self, $relname, $join_count) = @_;
2745 my $alias = ($join_count && $join_count > 1 ?
2746 join('_', $relname, $join_count) : $relname);
2755 =head2 DBIx::Class and AutoCommit
2757 DBIx::Class can do some wonderful magic with handling exceptions,
2758 disconnections, and transactions when you use C<< AutoCommit => 1 >>
2759 (the default) combined with C<txn_do> for transaction support.
2761 If you set C<< AutoCommit => 0 >> in your connect info, then you are always
2762 in an assumed transaction between commits, and you're telling us you'd
2763 like to manage that manually. A lot of the magic protections offered by
2764 this module will go away. We can't protect you from exceptions due to database
2765 disconnects because we don't know anything about how to restart your
2766 transactions. You're on your own for handling all sorts of exceptional
2767 cases if you choose the C<< AutoCommit => 0 >> path, just as you would
2773 Matt S. Trout <mst@shadowcatsystems.co.uk>
2775 Andy Grundman <andy@hybridized.org>
2779 You may distribute this code under the same terms as Perl itself.