1 package DBIx::Class::Storage::DBI;
2 # -*- mode: cperl; cperl-indent-level: 2 -*-
7 use base qw/DBIx::Class::Storage::DBIHacks DBIx::Class::Storage/;
10 use Carp::Clan qw/^DBIx::Class|^Try::Tiny/;
12 use DBIx::Class::Storage::DBI::Cursor;
13 use Scalar::Util qw/refaddr weaken reftype blessed/;
14 use List::Util qw/first/;
15 use Data::Dumper::Concise 'Dumper';
16 use Sub::Name 'subname';
18 use File::Path 'make_path';
22 # default cursor class, overridable in connect_info attributes
23 __PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor');
25 __PACKAGE__->mk_group_accessors('inherited' => qw/sql_maker_class sql_limit_dialect/);
26 __PACKAGE__->sql_maker_class('DBIx::Class::SQLMaker');
28 __PACKAGE__->mk_group_accessors('simple' => qw/
29 _connect_info _dbi_connect_info _dbic_connect_attributes _driver_determined
30 _dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts
31 transaction_depth _dbh_autocommit savepoints
34 # the values for these accessors are picked out (and deleted) from
35 # the attribute hashref passed to connect_info
36 my @storage_options = qw/
37 on_connect_call on_disconnect_call on_connect_do on_disconnect_do
38 disable_sth_caching unsafe auto_savepoint
40 __PACKAGE__->mk_group_accessors('simple' => @storage_options);
43 # capability definitions, using a 2-tiered accessor system
46 # A driver/user may define _use_X, which blindly without any checks says:
47 # "(do not) use this capability", (use_dbms_capability is an "inherited"
50 # If _use_X is undef, _supports_X is then queried. This is a "simple" style
51 # accessor, which in turn calls _determine_supports_X, and stores the return
52 # in a special slot on the storage object, which is wiped every time a $dbh
53 # reconnection takes place (it is not guaranteed that upon reconnection we
54 # will get the same rdbms version). _determine_supports_X does not need to
55 # exist on a driver, as we ->can for it before calling.
57 my @capabilities = (qw/insert_returning placeholders typeless_placeholders join_optimizer/);
58 __PACKAGE__->mk_group_accessors( dbms_capability => map { "_supports_$_" } @capabilities );
59 __PACKAGE__->mk_group_accessors( use_dbms_capability => map { "_use_$_" } (@capabilities ) );
61 # on by default, not strictly a capability (pending rewrite)
62 __PACKAGE__->_use_join_optimizer (1);
63 sub _determine_supports_join_optimizer { 1 };
65 # Each of these methods need _determine_driver called before itself
66 # in order to function reliably. This is a purely DRY optimization
68 # get_(use)_dbms_capability need to be called on the correct Storage
69 # class, as _use_X may be hardcoded class-wide, and _supports_X calls
70 # _determine_supports_X which obv. needs a correct driver as well
71 my @rdbms_specific_methods = qw/
85 get_use_dbms_capability
92 for my $meth (@rdbms_specific_methods) {
94 my $orig = __PACKAGE__->can ($meth)
95 or die "$meth is not a ::Storage::DBI method!";
98 no warnings qw/redefine/;
99 *{__PACKAGE__ ."::$meth"} = subname $meth => sub {
100 if (not $_[0]->_driver_determined and not $_[0]->{_in_determine_driver}) {
101 $_[0]->_determine_driver;
103 # This for some reason crashes and burns on perl 5.8.1
104 # IFF the method ends up throwing an exception
105 #goto $_[0]->can ($meth);
107 my $cref = $_[0]->can ($meth);
117 DBIx::Class::Storage::DBI - DBI storage handler
121 my $schema = MySchema->connect('dbi:SQLite:my.db');
123 $schema->storage->debug(1);
125 my @stuff = $schema->storage->dbh_do(
127 my ($storage, $dbh, @args) = @_;
128 $dbh->do("DROP TABLE authors");
133 $schema->resultset('Book')->search({
134 written_on => $schema->storage->datetime_parser->format_datetime(DateTime->now)
139 This class represents the connection to an RDBMS via L<DBI>. See
140 L<DBIx::Class::Storage> for general information. This pod only
141 documents DBI-specific methods and behaviors.
148 my $new = shift->next::method(@_);
150 $new->transaction_depth(0);
151 $new->_sql_maker_opts({});
152 $new->_dbh_details({});
153 $new->{savepoints} = [];
154 $new->{_in_dbh_do} = 0;
155 $new->{_dbh_gen} = 0;
157 # read below to see what this does
158 $new->_arm_global_destructor;
163 # This is hack to work around perl shooting stuff in random
164 # order on exit(). If we do not walk the remaining storage
165 # objects in an END block, there is a *small but real* chance
166 # of a fork()ed child to kill the parent's shared DBI handle,
167 # *before perl reaches the DESTROY in this package*
168 # Yes, it is ugly and effective.
169 # Additionally this registry is used by the CLONE method to
170 # make sure no handles are shared between threads
172 my %seek_and_destroy;
174 sub _arm_global_destructor {
176 my $key = refaddr ($self);
177 $seek_and_destroy{$key} = $self;
178 weaken ($seek_and_destroy{$key});
182 local $?; # just in case the DBI destructor changes it somehow
184 # destroy just the object if not native to this process/thread
185 $_->_verify_pid for (grep
187 values %seek_and_destroy
192 # As per DBI's recommendation, DBIC disconnects all handles as
193 # soon as possible (DBIC will reconnect only on demand from within
195 for (values %seek_and_destroy) {
197 $_->{_dbh_gen}++; # so that existing cursors will drop as well
206 # some databases spew warnings on implicit disconnect
207 local $SIG{__WARN__} = sub {};
211 # handle pid changes correctly - do not destroy parent's connection
215 my $pid = $self->_conn_pid;
216 if( defined $pid and $pid != $$ and my $dbh = $self->_dbh ) {
217 $dbh->{InactiveDestroy} = 1;
227 This method is normally called by L<DBIx::Class::Schema/connection>, which
228 encapsulates its argument list in an arrayref before passing them here.
230 The argument list may contain:
236 The same 4-element argument set one would normally pass to
237 L<DBI/connect>, optionally followed by
238 L<extra attributes|/DBIx::Class specific connection attributes>
239 recognized by DBIx::Class:
241 $connect_info_args = [ $dsn, $user, $password, \%dbi_attributes?, \%extra_attributes? ];
245 A single code reference which returns a connected
246 L<DBI database handle|DBI/connect> optionally followed by
247 L<extra attributes|/DBIx::Class specific connection attributes> recognized
250 $connect_info_args = [ sub { DBI->connect (...) }, \%extra_attributes? ];
254 A single hashref with all the attributes and the dsn/user/password
257 $connect_info_args = [{
265 $connect_info_args = [{
266 dbh_maker => sub { DBI->connect (...) },
271 This is particularly useful for L<Catalyst> based applications, allowing the
272 following config (L<Config::General> style):
277 dsn dbi:mysql:database=test
284 The C<dsn>/C<user>/C<password> combination can be substituted by the
285 C<dbh_maker> key whose value is a coderef that returns a connected
286 L<DBI database handle|DBI/connect>
290 Please note that the L<DBI> docs recommend that you always explicitly
291 set C<AutoCommit> to either I<0> or I<1>. L<DBIx::Class> further
292 recommends that it be set to I<1>, and that you perform transactions
293 via our L<DBIx::Class::Schema/txn_do> method. L<DBIx::Class> will set it
294 to I<1> if you do not do explicitly set it to zero. This is the default
295 for most DBDs. See L</DBIx::Class and AutoCommit> for details.
297 =head3 DBIx::Class specific connection attributes
299 In addition to the standard L<DBI|DBI/ATTRIBUTES_COMMON_TO_ALL_HANDLES>
300 L<connection|DBI/Database_Handle_Attributes> attributes, DBIx::Class recognizes
301 the following connection options. These options can be mixed in with your other
302 L<DBI> connection attributes, or placed in a separate hashref
303 (C<\%extra_attributes>) as shown above.
305 Every time C<connect_info> is invoked, any previous settings for
306 these options will be cleared before setting the new ones, regardless of
307 whether any options are specified in the new C<connect_info>.
314 Specifies things to do immediately after connecting or re-connecting to
315 the database. Its value may contain:
321 This contains one SQL statement to execute.
323 =item an array reference
325 This contains SQL statements to execute in order. Each element contains
326 a string or a code reference that returns a string.
328 =item a code reference
330 This contains some code to execute. Unlike code references within an
331 array reference, its return value is ignored.
335 =item on_disconnect_do
337 Takes arguments in the same form as L</on_connect_do> and executes them
338 immediately before disconnecting from the database.
340 Note, this only runs if you explicitly call L</disconnect> on the
343 =item on_connect_call
345 A more generalized form of L</on_connect_do> that calls the specified
346 C<connect_call_METHOD> methods in your storage driver.
348 on_connect_do => 'select 1'
352 on_connect_call => [ [ do_sql => 'select 1' ] ]
354 Its values may contain:
360 Will call the C<connect_call_METHOD> method.
362 =item a code reference
364 Will execute C<< $code->($storage) >>
366 =item an array reference
368 Each value can be a method name or code reference.
370 =item an array of arrays
372 For each array, the first item is taken to be the C<connect_call_> method name
373 or code reference, and the rest are parameters to it.
377 Some predefined storage methods you may use:
383 Executes a SQL string or a code reference that returns a SQL string. This is
384 what L</on_connect_do> and L</on_disconnect_do> use.
392 Will execute the scalar as SQL.
396 Taken to be arguments to L<DBI/do>, the SQL string optionally followed by the
397 attributes hashref and bind values.
399 =item a code reference
401 Will execute C<< $code->($storage) >> and execute the return array refs as
408 Execute any statements necessary to initialize the database session to return
409 and accept datetime/timestamp values used with
410 L<DBIx::Class::InflateColumn::DateTime>.
412 Only necessary for some databases, see your specific storage driver for
413 implementation details.
417 =item on_disconnect_call
419 Takes arguments in the same form as L</on_connect_call> and executes them
420 immediately before disconnecting from the database.
422 Calls the C<disconnect_call_METHOD> methods as opposed to the
423 C<connect_call_METHOD> methods called by L</on_connect_call>.
425 Note, this only runs if you explicitly call L</disconnect> on the
428 =item disable_sth_caching
430 If set to a true value, this option will disable the caching of
431 statement handles via L<DBI/prepare_cached>.
435 Sets a specific SQL::Abstract::Limit-style limit dialect, overriding the
436 default L</sql_limit_dialect> setting of the storage (if any). For a list
437 of available limit dialects see L<DBIx::Class::SQLMaker::LimitDialects>.
441 Specifies what characters to use to quote table and column names.
443 C<quote_char> expects either a single character, in which case is it
444 is placed on either side of the table/column name, or an arrayref of length
445 2 in which case the table/column name is placed between the elements.
447 For example under MySQL you should use C<< quote_char => '`' >>, and for
448 SQL Server you should use C<< quote_char => [qw/[ ]/] >>.
452 This parameter is only useful in conjunction with C<quote_char>, and is used to
453 specify the character that separates elements (schemas, tables, columns) from
454 each other. If unspecified it defaults to the most commonly used C<.>.
458 This Storage driver normally installs its own C<HandleError>, sets
459 C<RaiseError> and C<ShowErrorStatement> on, and sets C<PrintError> off on
460 all database handles, including those supplied by a coderef. It does this
461 so that it can have consistent and useful error behavior.
463 If you set this option to a true value, Storage will not do its usual
464 modifications to the database handle's attributes, and instead relies on
465 the settings in your connect_info DBI options (or the values you set in
466 your connection coderef, in the case that you are connecting via coderef).
468 Note that your custom settings can cause Storage to malfunction,
469 especially if you set a C<HandleError> handler that suppresses exceptions
470 and/or disable C<RaiseError>.
474 If this option is true, L<DBIx::Class> will use savepoints when nesting
475 transactions, making it possible to recover from failure in the inner
476 transaction without having to abort all outer transactions.
480 Use this argument to supply a cursor class other than the default
481 L<DBIx::Class::Storage::DBI::Cursor>.
485 Some real-life examples of arguments to L</connect_info> and
486 L<DBIx::Class::Schema/connect>
488 # Simple SQLite connection
489 ->connect_info([ 'dbi:SQLite:./foo.db' ]);
492 ->connect_info([ sub { DBI->connect(...) } ]);
494 # Connect via subref in hashref
496 dbh_maker => sub { DBI->connect(...) },
497 on_connect_do => 'alter session ...',
500 # A bit more complicated
507 { quote_char => q{"} },
511 # Equivalent to the previous example
517 { AutoCommit => 1, quote_char => q{"}, name_sep => q{.} },
521 # Same, but with hashref as argument
522 # See parse_connect_info for explanation
525 dsn => 'dbi:Pg:dbname=foo',
527 password => 'my_pg_password',
534 # Subref + DBIx::Class-specific connection options
537 sub { DBI->connect(...) },
541 on_connect_do => ['SET search_path TO myschema,otherschema,public'],
542 disable_sth_caching => 1,
552 my ($self, $info) = @_;
554 return $self->_connect_info if !$info;
556 $self->_connect_info($info); # copy for _connect_info
558 $info = $self->_normalize_connect_info($info)
559 if ref $info eq 'ARRAY';
561 for my $storage_opt (keys %{ $info->{storage_options} }) {
562 my $value = $info->{storage_options}{$storage_opt};
564 $self->$storage_opt($value);
567 # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only
568 # the new set of options
569 $self->_sql_maker(undef);
570 $self->_sql_maker_opts({});
572 for my $sql_maker_opt (keys %{ $info->{sql_maker_options} }) {
573 my $value = $info->{sql_maker_options}{$sql_maker_opt};
575 $self->_sql_maker_opts->{$sql_maker_opt} = $value;
579 %{ $self->_default_dbi_connect_attributes || {} },
580 %{ $info->{attributes} || {} },
583 my @args = @{ $info->{arguments} };
585 $self->_dbi_connect_info([@args,
586 %attrs && !(ref $args[0] eq 'CODE') ? \%attrs : ()]);
589 # save attributes them in a separate accessor so they are always
590 # introspectable, even in case of a CODE $dbhmaker
591 $self->_dbic_connect_attributes (\%attrs);
593 return $self->_connect_info;
596 sub _normalize_connect_info {
597 my ($self, $info_arg) = @_;
600 my @args = @$info_arg; # take a shallow copy for further mutilation
602 # combine/pre-parse arguments depending on invocation style
605 if (ref $args[0] eq 'CODE') { # coderef with optional \%extra_attributes
606 %attrs = %{ $args[1] || {} };
609 elsif (ref $args[0] eq 'HASH') { # single hashref (i.e. Catalyst config)
610 %attrs = %{$args[0]};
612 if (my $code = delete $attrs{dbh_maker}) {
615 my @ignored = grep { delete $attrs{$_} } (qw/dsn user password/);
618 'Attribute(s) %s in connect_info were ignored, as they can not be applied '
619 . "to the result of 'dbh_maker'",
621 join (', ', map { "'$_'" } (@ignored) ),
626 @args = delete @attrs{qw/dsn user password/};
629 else { # otherwise assume dsn/user/password + \%attrs + \%extra_attrs
631 % { $args[3] || {} },
632 % { $args[4] || {} },
634 @args = @args[0,1,2];
637 $info{arguments} = \@args;
639 my @storage_opts = grep exists $attrs{$_},
640 @storage_options, 'cursor_class';
642 @{ $info{storage_options} }{@storage_opts} =
643 delete @attrs{@storage_opts} if @storage_opts;
645 my @sql_maker_opts = grep exists $attrs{$_},
646 qw/limit_dialect quote_char name_sep/;
648 @{ $info{sql_maker_options} }{@sql_maker_opts} =
649 delete @attrs{@sql_maker_opts} if @sql_maker_opts;
651 $info{attributes} = \%attrs if %attrs;
656 sub _default_dbi_connect_attributes {
666 This method is deprecated in favour of setting via L</connect_info>.
670 =head2 on_disconnect_do
672 This method is deprecated in favour of setting via L</connect_info>.
676 sub _parse_connect_do {
677 my ($self, $type) = @_;
679 my $val = $self->$type;
680 return () if not defined $val;
685 push @res, [ 'do_sql', $val ];
686 } elsif (ref($val) eq 'CODE') {
688 } elsif (ref($val) eq 'ARRAY') {
689 push @res, map { [ 'do_sql', $_ ] } @$val;
691 $self->throw_exception("Invalid type for $type: ".ref($val));
699 Arguments: ($subref | $method_name), @extra_coderef_args?
701 Execute the given $subref or $method_name using the new exception-based
702 connection management.
704 The first two arguments will be the storage object that C<dbh_do> was called
705 on and a database handle to use. Any additional arguments will be passed
706 verbatim to the called subref as arguments 2 and onwards.
708 Using this (instead of $self->_dbh or $self->dbh) ensures correct
709 exception handling and reconnection (or failover in future subclasses).
711 Your subref should have no side-effects outside of the database, as
712 there is the potential for your subref to be partially double-executed
713 if the database connection was stale/dysfunctional.
717 my @stuff = $schema->storage->dbh_do(
719 my ($storage, $dbh, @cols) = @_;
720 my $cols = join(q{, }, @cols);
721 $dbh->selectrow_array("SELECT $cols FROM foo");
732 my $dbh = $self->_get_dbh;
734 return $self->$code($dbh, @_)
735 if ( $self->{_in_dbh_do} || $self->{transaction_depth} );
737 local $self->{_in_dbh_do} = 1;
739 # take a ref instead of a copy, to preserve coderef @_ aliasing semantics
742 $self->$code ($dbh, @$args);
744 $self->throw_exception($_) if $self->connected;
746 # We were not connected - reconnect and retry, but let any
747 # exception fall right through this time
748 carp "Retrying $code after catching disconnected exception: $_"
749 if $ENV{DBIC_DBIRETRY_DEBUG};
751 $self->_populate_dbh;
752 $self->$code($self->_dbh, @$args);
756 # This is basically a blend of dbh_do above and DBIx::Class::Storage::txn_do.
757 # It also informs dbh_do to bypass itself while under the direction of txn_do,
758 # via $self->{_in_dbh_do} (this saves some redundant eval and errorcheck, etc)
763 ref $coderef eq 'CODE' or $self->throw_exception
764 ('$coderef must be a CODE reference');
766 local $self->{_in_dbh_do} = 1;
769 my $want_array = wantarray;
775 # take a ref instead of a copy, to preserve coderef @_ aliasing semantics
780 my $txn_start_depth = $self->transaction_depth;
782 @result = $coderef->(@$args);
784 elsif(defined $want_array) {
785 $result[0] = $coderef->(@$args);
791 my $delta_txn = $txn_start_depth - $self->transaction_depth;
792 if ($delta_txn == 0) {
795 elsif ($delta_txn != 1) {
796 # an off-by-one would mean we fired a rollback
797 carp "Unexpected reduction of transaction depth by $delta_txn after execution of $coderef";
803 if(! defined $exception) { return $want_array ? @result : $result[0] }
805 if($self->transaction_depth > 1 || $tried++ || $self->connected) {
806 my $rollback_exception;
807 try { $self->txn_rollback } catch { $rollback_exception = shift };
808 if(defined $rollback_exception) {
809 my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION";
810 $self->throw_exception($exception) # propagate nested rollback
811 if $rollback_exception =~ /$exception_class/;
813 $self->throw_exception(
814 "Transaction aborted: ${exception}. "
815 . "Rollback failed: ${rollback_exception}"
818 $self->throw_exception($exception)
821 # We were not connected, and was first try - reconnect and retry
823 carp "Retrying $coderef after catching disconnected exception: $exception"
824 if $ENV{DBIC_TXNRETRY_DEBUG};
825 $self->_populate_dbh;
831 Our C<disconnect> method also performs a rollback first if the
832 database is not in C<AutoCommit> mode.
842 push @actions, ( $self->on_disconnect_call || () );
843 push @actions, $self->_parse_connect_do ('on_disconnect_do');
845 $self->_do_connection_actions(disconnect_call_ => $_) for @actions;
847 $self->_dbh_rollback unless $self->_dbh_autocommit;
849 %{ $self->_dbh->{CachedKids} } = ();
850 $self->_dbh->disconnect;
856 =head2 with_deferred_fk_checks
860 =item Arguments: C<$coderef>
862 =item Return Value: The return value of $coderef
866 Storage specific method to run the code ref with FK checks deferred or
867 in MySQL's case disabled entirely.
871 # Storage subclasses should override this
872 sub with_deferred_fk_checks {
873 my ($self, $sub) = @_;
881 =item Arguments: none
883 =item Return Value: 1|0
887 Verifies that the current database handle is active and ready to execute
888 an SQL statement (e.g. the connection did not get stale, server is still
889 answering, etc.) This method is used internally by L</dbh>.
895 return 0 unless $self->_seems_connected;
898 local $self->_dbh->{RaiseError} = 1;
903 sub _seems_connected {
908 my $dbh = $self->_dbh
911 return $dbh->FETCH('Active');
917 my $dbh = $self->_dbh or return 0;
922 sub ensure_connected {
925 unless ($self->connected) {
926 $self->_populate_dbh;
932 Returns a C<$dbh> - a data base handle of class L<DBI>. The returned handle
933 is guaranteed to be healthy by implicitly calling L</connected>, and if
934 necessary performing a reconnection before returning. Keep in mind that this
935 is very B<expensive> on some database engines. Consider using L</dbh_do>
943 if (not $self->_dbh) {
944 $self->_populate_dbh;
946 $self->ensure_connected;
951 # this is the internal "get dbh or connect (don't check)" method
955 $self->_populate_dbh unless $self->_dbh;
961 unless ($self->_sql_maker) {
962 my $sql_maker_class = $self->sql_maker_class;
963 $self->ensure_class_loaded ($sql_maker_class);
965 my %opts = %{$self->_sql_maker_opts||{}};
969 $self->sql_limit_dialect
972 my $s_class = (ref $self) || $self;
974 "Your storage class ($s_class) does not set sql_limit_dialect and you "
975 . 'have not supplied an explicit limit_dialect in your connection_info. '
976 . 'DBIC will attempt to use the GenericSubQ dialect, which works on most '
977 . 'databases but can be (and often is) painfully slow.'
984 $self->_sql_maker($sql_maker_class->new(
986 array_datatypes => 1,
987 limit_dialect => $dialect,
992 return $self->_sql_maker;
995 # nothing to do by default
1002 my @info = @{$self->_dbi_connect_info || []};
1003 $self->_dbh(undef); # in case ->connected failed we might get sent here
1004 $self->_dbh_details({}); # reset everything we know
1006 $self->_dbh($self->_connect(@info));
1008 $self->_conn_pid($$) if $^O ne 'MSWin32'; # on win32 these are in fact threads
1010 $self->_determine_driver;
1012 # Always set the transaction depth on connect, since
1013 # there is no transaction in progress by definition
1014 $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1;
1016 $self->_run_connection_actions unless $self->{_in_determine_driver};
1019 sub _run_connection_actions {
1023 push @actions, ( $self->on_connect_call || () );
1024 push @actions, $self->_parse_connect_do ('on_connect_do');
1026 $self->_do_connection_actions(connect_call_ => $_) for @actions;
1031 sub set_use_dbms_capability {
1032 $_[0]->set_inherited ($_[1], $_[2]);
1035 sub get_use_dbms_capability {
1036 my ($self, $capname) = @_;
1038 my $use = $self->get_inherited ($capname);
1041 : do { $capname =~ s/^_use_/_supports_/; $self->get_dbms_capability ($capname) }
1045 sub set_dbms_capability {
1046 $_[0]->_dbh_details->{capability}{$_[1]} = $_[2];
1049 sub get_dbms_capability {
1050 my ($self, $capname) = @_;
1052 my $cap = $self->_dbh_details->{capability}{$capname};
1054 unless (defined $cap) {
1055 if (my $meth = $self->can ("_determine$capname")) {
1056 $cap = $self->$meth ? 1 : 0;
1062 $self->set_dbms_capability ($capname, $cap);
1072 unless ($info = $self->_dbh_details->{info}) {
1076 my $server_version = try { $self->_get_server_version };
1078 if (defined $server_version) {
1079 $info->{dbms_version} = $server_version;
1081 my ($numeric_version) = $server_version =~ /^([\d\.]+)/;
1082 my @verparts = split (/\./, $numeric_version);
1088 # consider only up to 3 version parts, iff not more than 3 digits
1090 while (@verparts && @use_parts < 3) {
1091 my $p = shift @verparts;
1093 push @use_parts, $p;
1095 push @use_parts, 0 while @use_parts < 3;
1097 $info->{normalized_dbms_version} = sprintf "%d.%03d%03d", @use_parts;
1101 $self->_dbh_details->{info} = $info;
1107 sub _get_server_version {
1108 shift->_get_dbh->get_info(18);
1111 sub _determine_driver {
1114 if ((not $self->_driver_determined) && (not $self->{_in_determine_driver})) {
1115 my $started_connected = 0;
1116 local $self->{_in_determine_driver} = 1;
1118 if (ref($self) eq __PACKAGE__) {
1120 if ($self->_dbh) { # we are connected
1121 $driver = $self->_dbh->{Driver}{Name};
1122 $started_connected = 1;
1124 # if connect_info is a CODEREF, we have no choice but to connect
1125 if (ref $self->_dbi_connect_info->[0] &&
1126 reftype $self->_dbi_connect_info->[0] eq 'CODE') {
1127 $self->_populate_dbh;
1128 $driver = $self->_dbh->{Driver}{Name};
1131 # try to use dsn to not require being connected, the driver may still
1132 # force a connection in _rebless to determine version
1133 # (dsn may not be supplied at all if all we do is make a mock-schema)
1134 my $dsn = $self->_dbi_connect_info->[0] || $ENV{DBI_DSN} || '';
1135 ($driver) = $dsn =~ /dbi:([^:]+):/i;
1136 $driver ||= $ENV{DBI_DRIVER};
1141 my $storage_class = "DBIx::Class::Storage::DBI::${driver}";
1142 if ($self->load_optional_class($storage_class)) {
1143 mro::set_mro($storage_class, 'c3');
1144 bless $self, $storage_class;
1150 $self->_driver_determined(1);
1152 $self->_init; # run driver-specific initializations
1154 $self->_run_connection_actions
1155 if !$started_connected && defined $self->_dbh;
1159 sub _do_connection_actions {
1161 my $method_prefix = shift;
1164 if (not ref($call)) {
1165 my $method = $method_prefix . $call;
1167 } elsif (ref($call) eq 'CODE') {
1169 } elsif (ref($call) eq 'ARRAY') {
1170 if (ref($call->[0]) ne 'ARRAY') {
1171 $self->_do_connection_actions($method_prefix, $_) for @$call;
1173 $self->_do_connection_actions($method_prefix, @$_) for @$call;
1176 $self->throw_exception (sprintf ("Don't know how to process conection actions of type '%s'", ref($call)) );
1182 sub connect_call_do_sql {
1184 $self->_do_query(@_);
1187 sub disconnect_call_do_sql {
1189 $self->_do_query(@_);
1192 # override in db-specific backend when necessary
1193 sub connect_call_datetime_setup { 1 }
1196 my ($self, $action) = @_;
1198 if (ref $action eq 'CODE') {
1199 $action = $action->($self);
1200 $self->_do_query($_) foreach @$action;
1203 # Most debuggers expect ($sql, @bind), so we need to exclude
1204 # the attribute hash which is the second argument to $dbh->do
1205 # furthermore the bind values are usually to be presented
1206 # as named arrayref pairs, so wrap those here too
1207 my @do_args = (ref $action eq 'ARRAY') ? (@$action) : ($action);
1208 my $sql = shift @do_args;
1209 my $attrs = shift @do_args;
1210 my @bind = map { [ undef, $_ ] } @do_args;
1212 $self->_query_start($sql, @bind);
1213 $self->_get_dbh->do($sql, $attrs, @do_args);
1214 $self->_query_end($sql, @bind);
1221 my ($self, @info) = @_;
1223 $self->throw_exception("You failed to provide any connection info")
1226 my ($old_connect_via, $dbh);
1228 if ($INC{'Apache/DBI.pm'} && $ENV{MOD_PERL}) {
1229 $old_connect_via = $DBI::connect_via;
1230 $DBI::connect_via = 'connect';
1234 if(ref $info[0] eq 'CODE') {
1235 $dbh = $info[0]->();
1238 $dbh = DBI->connect(@info);
1245 unless ($self->unsafe) {
1247 # this odd anonymous coderef dereference is in fact really
1248 # necessary to avoid the unwanted effect described in perl5
1251 my $weak_self = $_[0];
1254 $_[1]->{HandleError} = sub {
1256 $weak_self->throw_exception("DBI Exception: $_[0]");
1259 # the handler may be invoked by something totally out of
1261 croak ("DBI Exception (unhandled by DBIC, ::Schema GCed): $_[0]");
1266 $dbh->{ShowErrorStatement} = 1;
1267 $dbh->{RaiseError} = 1;
1268 $dbh->{PrintError} = 0;
1272 $self->throw_exception("DBI Connection failed: $_")
1275 $DBI::connect_via = $old_connect_via if $old_connect_via;
1278 $self->_dbh_autocommit($dbh->{AutoCommit});
1283 my ($self, $name) = @_;
1285 $name = $self->_svp_generate_name
1286 unless defined $name;
1288 $self->throw_exception ("You can't use savepoints outside a transaction")
1289 if $self->{transaction_depth} == 0;
1291 $self->throw_exception ("Your Storage implementation doesn't support savepoints")
1292 unless $self->can('_svp_begin');
1294 push @{ $self->{savepoints} }, $name;
1296 $self->debugobj->svp_begin($name) if $self->debug;
1298 return $self->_svp_begin($name);
1302 my ($self, $name) = @_;
1304 $self->throw_exception ("You can't use savepoints outside a transaction")
1305 if $self->{transaction_depth} == 0;
1307 $self->throw_exception ("Your Storage implementation doesn't support savepoints")
1308 unless $self->can('_svp_release');
1310 if (defined $name) {
1311 $self->throw_exception ("Savepoint '$name' does not exist")
1312 unless grep { $_ eq $name } @{ $self->{savepoints} };
1314 # Dig through the stack until we find the one we are releasing. This keeps
1315 # the stack up to date.
1318 do { $svp = pop @{ $self->{savepoints} } } while $svp ne $name;
1320 $name = pop @{ $self->{savepoints} };
1323 $self->debugobj->svp_release($name) if $self->debug;
1325 return $self->_svp_release($name);
1329 my ($self, $name) = @_;
1331 $self->throw_exception ("You can't use savepoints outside a transaction")
1332 if $self->{transaction_depth} == 0;
1334 $self->throw_exception ("Your Storage implementation doesn't support savepoints")
1335 unless $self->can('_svp_rollback');
1337 if (defined $name) {
1338 # If they passed us a name, verify that it exists in the stack
1339 unless(grep({ $_ eq $name } @{ $self->{savepoints} })) {
1340 $self->throw_exception("Savepoint '$name' does not exist!");
1343 # Dig through the stack until we find the one we are releasing. This keeps
1344 # the stack up to date.
1345 while(my $s = pop(@{ $self->{savepoints} })) {
1346 last if($s eq $name);
1348 # Add the savepoint back to the stack, as a rollback doesn't remove the
1349 # named savepoint, only everything after it.
1350 push(@{ $self->{savepoints} }, $name);
1352 # We'll assume they want to rollback to the last savepoint
1353 $name = $self->{savepoints}->[-1];
1356 $self->debugobj->svp_rollback($name) if $self->debug;
1358 return $self->_svp_rollback($name);
1361 sub _svp_generate_name {
1363 return 'savepoint_'.scalar(@{ $self->{'savepoints'} });
1369 # this means we have not yet connected and do not know the AC status
1370 # (e.g. coderef $dbh)
1371 if (! defined $self->_dbh_autocommit) {
1372 $self->ensure_connected;
1374 # otherwise re-connect on pid changes, so
1375 # that the txn_depth is adjusted properly
1376 # the lightweight _get_dbh is good enoug here
1377 # (only superficial handle check, no pings)
1382 if($self->transaction_depth == 0) {
1383 $self->debugobj->txn_begin()
1385 $self->_dbh_begin_work;
1387 elsif ($self->auto_savepoint) {
1390 $self->{transaction_depth}++;
1393 sub _dbh_begin_work {
1396 # if the user is utilizing txn_do - good for him, otherwise we need to
1397 # ensure that the $dbh is healthy on BEGIN.
1398 # We do this via ->dbh_do instead of ->dbh, so that the ->dbh "ping"
1399 # will be replaced by a failure of begin_work itself (which will be
1400 # then retried on reconnect)
1401 if ($self->{_in_dbh_do}) {
1402 $self->_dbh->begin_work;
1404 $self->dbh_do(sub { $_[1]->begin_work });
1410 if ($self->{transaction_depth} == 1) {
1411 $self->debugobj->txn_commit()
1414 $self->{transaction_depth} = 0
1415 if $self->_dbh_autocommit;
1417 elsif($self->{transaction_depth} > 1) {
1418 $self->{transaction_depth}--;
1420 if $self->auto_savepoint;
1423 $self->throw_exception( 'Refusing to commit without a started transaction' );
1429 my $dbh = $self->_dbh
1430 or $self->throw_exception('cannot COMMIT on a disconnected handle');
1436 my $dbh = $self->_dbh;
1438 if ($self->{transaction_depth} == 1) {
1439 $self->debugobj->txn_rollback()
1441 $self->{transaction_depth} = 0
1442 if $self->_dbh_autocommit;
1443 $self->_dbh_rollback;
1445 elsif($self->{transaction_depth} > 1) {
1446 $self->{transaction_depth}--;
1447 if ($self->auto_savepoint) {
1448 $self->svp_rollback;
1453 die DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION->new;
1457 my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION";
1459 if ($_ !~ /$exception_class/) {
1460 # ensure that a failed rollback resets the transaction depth
1461 $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1;
1464 $self->throw_exception($_)
1470 my $dbh = $self->_dbh
1471 or $self->throw_exception('cannot ROLLBACK on a disconnected handle');
1475 # This used to be the top-half of _execute. It was split out to make it
1476 # easier to override in NoBindVars without duping the rest. It takes up
1477 # all of _execute's args, and emits $sql, @bind.
1478 sub _prep_for_execute {
1479 my ($self, $op, $extra_bind, $ident, $args) = @_;
1481 if( blessed $ident && $ident->isa("DBIx::Class::ResultSource") ) {
1482 $ident = $ident->from();
1485 my ($sql, @bind) = $self->sql_maker->$op($ident, @$args);
1488 map { ref $_ eq 'ARRAY' ? $_ : [ '!!dummy', $_ ] } @$extra_bind)
1490 return ($sql, \@bind);
1494 sub _fix_bind_params {
1495 my ($self, @bind) = @_;
1497 ### Turn @bind from something like this:
1498 ### ( [ "artist", 1 ], [ "cdid", 1, 3 ] )
1500 ### ( "'1'", "'1'", "'3'" )
1503 if ( defined( $_ && $_->[1] ) ) {
1504 map { qq{'$_'}; } @{$_}[ 1 .. $#$_ ];
1511 my ( $self, $sql, @bind ) = @_;
1513 if ( $self->debug ) {
1514 @bind = $self->_fix_bind_params(@bind);
1516 $self->debugobj->query_start( $sql, @bind );
1521 my ( $self, $sql, @bind ) = @_;
1523 if ( $self->debug ) {
1524 @bind = $self->_fix_bind_params(@bind);
1525 $self->debugobj->query_end( $sql, @bind );
1530 my ($self, $dbh, $op, $extra_bind, $ident, $bind_attributes, @args) = @_;
1532 my ($sql, $bind) = $self->_prep_for_execute($op, $extra_bind, $ident, \@args);
1534 $self->_query_start( $sql, @$bind );
1536 my $sth = $self->sth($sql,$op);
1538 my $placeholder_index = 1;
1540 foreach my $bound (@$bind) {
1541 my $attributes = {};
1542 my($column_name, @data) = @$bound;
1544 if ($bind_attributes) {
1545 $attributes = $bind_attributes->{$column_name}
1546 if defined $bind_attributes->{$column_name};
1549 foreach my $data (@data) {
1550 my $ref = ref $data;
1551 $data = $ref && $ref ne 'ARRAY' ? ''.$data : $data; # stringify args (except arrayrefs)
1553 $sth->bind_param($placeholder_index, $data, $attributes);
1554 $placeholder_index++;
1558 # Can this fail without throwing an exception anyways???
1559 my $rv = $sth->execute();
1560 $self->throw_exception(
1561 $sth->errstr || $sth->err || 'Unknown error: execute() returned false, but error flags were not set...'
1564 $self->_query_end( $sql, @$bind );
1566 return (wantarray ? ($rv, $sth, @$bind) : $rv);
1571 $self->dbh_do('_dbh_execute', @_); # retry over disconnects
1574 sub _prefetch_autovalues {
1575 my ($self, $source, $to_insert) = @_;
1577 my $colinfo = $source->columns_info;
1580 for my $col (keys %$colinfo) {
1582 $colinfo->{$col}{auto_nextval}
1585 ! exists $to_insert->{$col}
1587 ref $to_insert->{$col} eq 'SCALAR'
1590 $values{$col} = $self->_sequence_fetch(
1592 ( $colinfo->{$col}{sequence} ||=
1593 $self->_dbh_get_autoinc_seq($self->_get_dbh, $source, $col)
1603 my ($self, $source, $to_insert) = @_;
1605 my $prefetched_values = $self->_prefetch_autovalues($source, $to_insert);
1608 $to_insert = { %$to_insert, %$prefetched_values };
1610 # list of primary keys we try to fetch from the database
1611 # both not-exsists and scalarrefs are considered
1614 { $_ => scalar keys %fetch_pks } # so we can preserve order for prettyness
1616 { ! exists $to_insert->{$_} or ref $to_insert->{$_} eq 'SCALAR' }
1617 $source->primary_columns
1621 if ($self->_use_insert_returning) {
1623 # retain order as declared in the resultsource
1624 for (sort { $fetch_pks{$a} <=> $fetch_pks{$b} } keys %fetch_pks ) {
1625 push @{$sqla_opts->{returning}}, $_;
1629 my $bind_attributes = $self->source_bind_attributes($source);
1631 my ($rv, $sth) = $self->_execute('insert' => [], $source, $bind_attributes, $to_insert, $sqla_opts);
1635 if (my $retlist = $sqla_opts->{returning}) {
1636 my @ret_vals = try {
1637 local $SIG{__WARN__} = sub {};
1638 my @r = $sth->fetchrow_array;
1643 @returned_cols{@$retlist} = @ret_vals if @ret_vals;
1646 return { %$prefetched_values, %returned_cols };
1650 ## Currently it is assumed that all values passed will be "normal", i.e. not
1651 ## scalar refs, or at least, all the same type as the first set, the statement is
1652 ## only prepped once.
1654 my ($self, $source, $cols, $data) = @_;
1657 @colvalues{@$cols} = (0..$#$cols);
1659 for my $i (0..$#$cols) {
1660 my $first_val = $data->[0][$i];
1661 next unless ref $first_val eq 'SCALAR';
1663 $colvalues{ $cols->[$i] } = $first_val;
1666 # check for bad data and stringify stringifiable objects
1667 my $bad_slice = sub {
1668 my ($msg, $col_idx, $slice_idx) = @_;
1669 $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s",
1673 local $Data::Dumper::Maxdepth = 1; # don't dump objects, if any
1675 map { $cols->[$_] => $data->[$slice_idx][$_] } (0 .. $#$cols)
1681 for my $datum_idx (0..$#$data) {
1682 my $datum = $data->[$datum_idx];
1684 for my $col_idx (0..$#$cols) {
1685 my $val = $datum->[$col_idx];
1686 my $sqla_bind = $colvalues{ $cols->[$col_idx] };
1687 my $is_literal_sql = (ref $sqla_bind) eq 'SCALAR';
1689 if ($is_literal_sql) {
1691 $bad_slice->('bind found where literal SQL expected', $col_idx, $datum_idx);
1693 elsif ((my $reftype = ref $val) ne 'SCALAR') {
1694 $bad_slice->("$reftype reference found where literal SQL expected",
1695 $col_idx, $datum_idx);
1697 elsif ($$val ne $$sqla_bind){
1698 $bad_slice->("inconsistent literal SQL value, expecting: '$$sqla_bind'",
1699 $col_idx, $datum_idx);
1702 elsif (my $reftype = ref $val) {
1704 if (overload::Method($val, '""')) {
1705 $datum->[$col_idx] = "".$val;
1708 $bad_slice->("$reftype reference found where bind expected",
1709 $col_idx, $datum_idx);
1715 my ($sql, $bind) = $self->_prep_for_execute (
1716 'insert', undef, $source, [\%colvalues]
1720 # if the bindlist is empty - make sure all "values" are in fact
1721 # literal scalarrefs. If not the case this means the storage ate
1722 # them away (e.g. the NoBindVars component) and interpolated them
1723 # directly into the SQL. This obviosly can't be good for multi-inserts
1725 $self->throw_exception('Cannot insert_bulk without support for placeholders')
1726 if first { ref $_ ne 'SCALAR' } values %colvalues;
1729 # neither _execute_array, nor _execute_inserts_with_no_binds are
1730 # atomic (even if _execute _array is a single call). Thus a safety
1732 my $guard = $self->txn_scope_guard;
1734 $self->_query_start( $sql, @$bind ? [ dummy => '__BULK_INSERT__' ] : () );
1735 my $sth = $self->sth($sql);
1738 #@bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
1739 $self->_execute_array( $source, $sth, $bind, $cols, $data );
1742 # bind_param_array doesn't work if there are no binds
1743 $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data );
1747 $self->_query_end( $sql, @$bind ? [ dummy => '__BULK_INSERT__' ] : () );
1751 return (wantarray ? ($rv, $sth, @$bind) : $rv);
1754 sub _execute_array {
1755 my ($self, $source, $sth, $bind, $cols, $data, @extra) = @_;
1757 ## This must be an arrayref, else nothing works!
1758 my $tuple_status = [];
1760 ## Get the bind_attributes, if any exist
1761 my $bind_attributes = $self->source_bind_attributes($source);
1763 ## Bind the values and execute
1764 my $placeholder_index = 1;
1766 foreach my $bound (@$bind) {
1768 my $attributes = {};
1769 my ($column_name, $data_index) = @$bound;
1771 if( $bind_attributes ) {
1772 $attributes = $bind_attributes->{$column_name}
1773 if defined $bind_attributes->{$column_name};
1776 my @data = map { $_->[$data_index] } @$data;
1778 $sth->bind_param_array(
1781 (%$attributes ? $attributes : ()),
1783 $placeholder_index++;
1788 $rv = $self->_dbh_execute_array($sth, $tuple_status, @extra);
1794 # Statement must finish even if there was an exception.
1799 $err = shift unless defined $err
1803 if (! defined $err and $sth->err);
1807 ++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i];
1809 $self->throw_exception("Unexpected populate error: $err")
1810 if ($i > $#$tuple_status);
1812 $self->throw_exception(sprintf "%s for populate slice:\n%s",
1813 ($tuple_status->[$i][1] || $err),
1814 Dumper { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) },
1821 sub _dbh_execute_array {
1822 my ($self, $sth, $tuple_status, @extra) = @_;
1824 return $sth->execute_array({ArrayTupleStatus => $tuple_status});
1827 sub _dbh_execute_inserts_with_no_binds {
1828 my ($self, $sth, $count) = @_;
1832 my $dbh = $self->_get_dbh;
1833 local $dbh->{RaiseError} = 1;
1834 local $dbh->{PrintError} = 0;
1836 $sth->execute foreach 1..$count;
1842 # Make sure statement is finished even if there was an exception.
1847 $err = shift unless defined $err;
1850 $self->throw_exception($err) if defined $err;
1856 my ($self, $source, @args) = @_;
1858 my $bind_attrs = $self->source_bind_attributes($source);
1860 return $self->_execute('update' => [], $source, $bind_attrs, @args);
1865 my ($self, $source, @args) = @_;
1867 my $bind_attrs = $self->source_bind_attributes($source);
1869 return $self->_execute('delete' => [], $source, $bind_attrs, @args);
1872 # We were sent here because the $rs contains a complex search
1873 # which will require a subquery to select the correct rows
1874 # (i.e. joined or limited resultsets, or non-introspectable conditions)
1876 # Generating a single PK column subquery is trivial and supported
1877 # by all RDBMS. However if we have a multicolumn PK, things get ugly.
1878 # Look at _multipk_update_delete()
1879 sub _subq_update_delete {
1881 my ($rs, $op, $values) = @_;
1883 my $rsrc = $rs->result_source;
1885 # quick check if we got a sane rs on our hands
1886 my @pcols = $rsrc->_pri_cols;
1888 my $sel = $rs->_resolved_attrs->{select};
1889 $sel = [ $sel ] unless ref $sel eq 'ARRAY';
1892 join ("\x00", map { join '.', $rs->{attrs}{alias}, $_ } sort @pcols)
1894 join ("\x00", sort @$sel )
1896 $self->throw_exception (
1897 '_subq_update_delete can not be called on resultsets selecting columns other than the primary keys'
1904 $op eq 'update' ? $values : (),
1905 { $pcols[0] => { -in => $rs->as_query } },
1910 return $self->_multipk_update_delete (@_);
1914 # ANSI SQL does not provide a reliable way to perform a multicol-PK
1915 # resultset update/delete involving subqueries. So by default resort
1916 # to simple (and inefficient) delete_all style per-row opearations,
1917 # while allowing specific storages to override this with a faster
1920 sub _multipk_update_delete {
1921 return shift->_per_row_update_delete (@_);
1924 # This is the default loop used to delete/update rows for multi PK
1925 # resultsets, and used by mysql exclusively (because it can't do anything
1928 # We do not use $row->$op style queries, because resultset update/delete
1929 # is not expected to cascade (this is what delete_all/update_all is for).
1931 # There should be no race conditions as the entire operation is rolled
1934 sub _per_row_update_delete {
1936 my ($rs, $op, $values) = @_;
1938 my $rsrc = $rs->result_source;
1939 my @pcols = $rsrc->_pri_cols;
1941 my $guard = $self->txn_scope_guard;
1943 # emulate the return value of $sth->execute for non-selects
1944 my $row_cnt = '0E0';
1946 my $subrs_cur = $rs->cursor;
1947 my @all_pk = $subrs_cur->all;
1948 for my $pks ( @all_pk) {
1951 for my $i (0.. $#pcols) {
1952 $cond->{$pcols[$i]} = $pks->[$i];
1957 $op eq 'update' ? $values : (),
1971 $self->_execute($self->_select_args(@_));
1974 sub _select_args_to_query {
1977 # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $rs_attrs, $rows, $offset)
1978 # = $self->_select_args($ident, $select, $cond, $attrs);
1979 my ($op, $bind, $ident, $bind_attrs, @args) =
1980 $self->_select_args(@_);
1982 # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $rs_attrs, $rows, $offset ]);
1983 my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, \@args);
1984 $prepared_bind ||= [];
1987 ? ($sql, $prepared_bind, $bind_attrs)
1988 : \[ "($sql)", @$prepared_bind ]
1993 my ($self, $ident, $select, $where, $attrs) = @_;
1995 my $sql_maker = $self->sql_maker;
1996 my ($alias2source, $rs_alias) = $self->_resolve_ident_sources ($ident);
2003 $rs_alias && $alias2source->{$rs_alias}
2004 ? ( _rsroot_source_handle => $alias2source->{$rs_alias}->handle )
2009 # calculate bind_attrs before possible $ident mangling
2010 my $bind_attrs = {};
2011 for my $alias (keys %$alias2source) {
2012 my $bindtypes = $self->source_bind_attributes ($alias2source->{$alias}) || {};
2013 for my $col (keys %$bindtypes) {
2015 my $fqcn = join ('.', $alias, $col);
2016 $bind_attrs->{$fqcn} = $bindtypes->{$col} if $bindtypes->{$col};
2018 # Unqialified column names are nice, but at the same time can be
2019 # rather ambiguous. What we do here is basically go along with
2020 # the loop, adding an unqualified column slot to $bind_attrs,
2021 # alongside the fully qualified name. As soon as we encounter
2022 # another column by that name (which would imply another table)
2023 # we unset the unqualified slot and never add any info to it
2024 # to avoid erroneous type binding. If this happens the users
2025 # only choice will be to fully qualify his column name
2027 if (exists $bind_attrs->{$col}) {
2028 $bind_attrs->{$col} = {};
2031 $bind_attrs->{$col} = $bind_attrs->{$fqcn};
2036 # Sanity check the attributes (SQLMaker does it too, but
2037 # in case of a software_limit we'll never reach there)
2038 if (defined $attrs->{offset}) {
2039 $self->throw_exception('A supplied offset attribute must be a non-negative integer')
2040 if ( $attrs->{offset} =~ /\D/ or $attrs->{offset} < 0 );
2042 $attrs->{offset} ||= 0;
2044 if (defined $attrs->{rows}) {
2045 $self->throw_exception("The rows attribute must be a positive integer if present")
2046 if ( $attrs->{rows} =~ /\D/ or $attrs->{rows} <= 0 );
2048 elsif ($attrs->{offset}) {
2049 # MySQL actually recommends this approach. I cringe.
2050 $attrs->{rows} = $sql_maker->__max_int;
2055 # see if we need to tear the prefetch apart otherwise delegate the limiting to the
2056 # storage, unless software limit was requested
2059 ( $attrs->{rows} && keys %{$attrs->{collapse}} )
2061 # grouped prefetch (to satisfy group_by == select)
2062 ( $attrs->{group_by}
2064 @{$attrs->{group_by}}
2066 $attrs->{_prefetch_select}
2068 @{$attrs->{_prefetch_select}}
2071 ($ident, $select, $where, $attrs)
2072 = $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs);
2074 elsif (! $attrs->{software_limit} ) {
2075 push @limit, $attrs->{rows}, $attrs->{offset};
2078 # try to simplify the joinmap further (prune unreferenced type-single joins)
2079 $ident = $self->_prune_unused_joins ($ident, $select, $where, $attrs);
2082 # This would be the point to deflate anything found in $where
2083 # (and leave $attrs->{bind} intact). Problem is - inflators historically
2084 # expect a row object. And all we have is a resultsource (it is trivial
2085 # to extract deflator coderefs via $alias2source above).
2087 # I don't see a way forward other than changing the way deflators are
2088 # invoked, and that's just bad...
2091 return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $attrs, @limit);
2094 # Returns a counting SELECT for a simple count
2095 # query. Abstracted so that a storage could override
2096 # this to { count => 'firstcol' } or whatever makes
2097 # sense as a performance optimization
2099 #my ($self, $source, $rs_attrs) = @_;
2100 return { count => '*' };
2104 sub source_bind_attributes {
2105 my ($self, $source) = @_;
2107 my $bind_attributes;
2109 my $colinfo = $source->columns_info;
2111 for my $col (keys %$colinfo) {
2112 if (my $dt = $colinfo->{$col}{data_type} ) {
2113 $bind_attributes->{$col} = $self->bind_attribute_by_data_type($dt)
2117 return $bind_attributes;
2124 =item Arguments: $ident, $select, $condition, $attrs
2128 Handle a SQL select statement.
2134 my ($ident, $select, $condition, $attrs) = @_;
2135 return $self->cursor_class->new($self, \@_, $attrs);
2140 my ($rv, $sth, @bind) = $self->_select(@_);
2141 my @row = $sth->fetchrow_array;
2142 my @nextrow = $sth->fetchrow_array if @row;
2143 if(@row && @nextrow) {
2144 carp "Query returned more than one row. SQL that returns multiple rows is DEPRECATED for ->find and ->single";
2146 # Need to call finish() to work round broken DBDs
2151 =head2 sql_limit_dialect
2153 This is an accessor for the default SQL limit dialect used by a particular
2154 storage driver. Can be overriden by supplying an explicit L</limit_dialect>
2155 to L<DBIx::Class::Schema/connect>. For a list of available limit dialects
2156 see L<DBIx::Class::SQLMaker::LimitDialects>.
2162 =item Arguments: $sql
2166 Returns a L<DBI> sth (statement handle) for the supplied SQL.
2171 my ($self, $dbh, $sql) = @_;
2173 # 3 is the if_active parameter which avoids active sth re-use
2174 my $sth = $self->disable_sth_caching
2175 ? $dbh->prepare($sql)
2176 : $dbh->prepare_cached($sql, {}, 3);
2178 # XXX You would think RaiseError would make this impossible,
2179 # but apparently that's not true :(
2180 $self->throw_exception($dbh->errstr) if !$sth;
2186 my ($self, $sql) = @_;
2187 $self->dbh_do('_dbh_sth', $sql); # retry over disconnects
2190 sub _dbh_columns_info_for {
2191 my ($self, $dbh, $table) = @_;
2193 if ($dbh->can('column_info')) {
2197 my ($schema,$tab) = $table =~ /^(.+?)\.(.+)$/ ? ($1,$2) : (undef,$table);
2198 my $sth = $dbh->column_info( undef,$schema, $tab, '%' );
2200 while ( my $info = $sth->fetchrow_hashref() ){
2202 $column_info{data_type} = $info->{TYPE_NAME};
2203 $column_info{size} = $info->{COLUMN_SIZE};
2204 $column_info{is_nullable} = $info->{NULLABLE} ? 1 : 0;
2205 $column_info{default_value} = $info->{COLUMN_DEF};
2206 my $col_name = $info->{COLUMN_NAME};
2207 $col_name =~ s/^\"(.*)\"$/$1/;
2209 $result{$col_name} = \%column_info;
2214 return \%result if !$caught && scalar keys %result;
2218 my $sth = $dbh->prepare($self->sql_maker->select($table, undef, \'1 = 0'));
2220 my @columns = @{$sth->{NAME_lc}};
2221 for my $i ( 0 .. $#columns ){
2223 $column_info{data_type} = $sth->{TYPE}->[$i];
2224 $column_info{size} = $sth->{PRECISION}->[$i];
2225 $column_info{is_nullable} = $sth->{NULLABLE}->[$i] ? 1 : 0;
2227 if ($column_info{data_type} =~ m/^(.*?)\((.*?)\)$/) {
2228 $column_info{data_type} = $1;
2229 $column_info{size} = $2;
2232 $result{$columns[$i]} = \%column_info;
2236 foreach my $col (keys %result) {
2237 my $colinfo = $result{$col};
2238 my $type_num = $colinfo->{data_type};
2240 if(defined $type_num && $dbh->can('type_info')) {
2241 my $type_info = $dbh->type_info($type_num);
2242 $type_name = $type_info->{TYPE_NAME} if $type_info;
2243 $colinfo->{data_type} = $type_name if $type_name;
2250 sub columns_info_for {
2251 my ($self, $table) = @_;
2252 $self->_dbh_columns_info_for ($self->_get_dbh, $table);
2255 =head2 last_insert_id
2257 Return the row id of the last insert.
2261 sub _dbh_last_insert_id {
2262 my ($self, $dbh, $source, $col) = @_;
2264 my $id = try { $dbh->last_insert_id (undef, undef, $source->name, $col) };
2266 return $id if defined $id;
2268 my $class = ref $self;
2269 $self->throw_exception ("No storage specific _dbh_last_insert_id() method implemented in $class, and the generic DBI::last_insert_id() failed");
2272 sub last_insert_id {
2274 $self->_dbh_last_insert_id ($self->_dbh, @_);
2277 =head2 _native_data_type
2281 =item Arguments: $type_name
2285 This API is B<EXPERIMENTAL>, will almost definitely change in the future, and
2286 currently only used by L<::AutoCast|DBIx::Class::Storage::DBI::AutoCast> and
2287 L<::Sybase::ASE|DBIx::Class::Storage::DBI::Sybase::ASE>.
2289 The default implementation returns C<undef>, implement in your Storage driver if
2290 you need this functionality.
2292 Should map types from other databases to the native RDBMS type, for example
2293 C<VARCHAR2> to C<VARCHAR>.
2295 Types with modifiers should map to the underlying data type. For example,
2296 C<INTEGER AUTO_INCREMENT> should become C<INTEGER>.
2298 Composite types should map to the container type, for example
2299 C<ENUM(foo,bar,baz)> becomes C<ENUM>.
2303 sub _native_data_type {
2304 #my ($self, $data_type) = @_;
2308 # Check if placeholders are supported at all
2309 sub _determine_supports_placeholders {
2311 my $dbh = $self->_get_dbh;
2313 # some drivers provide a $dbh attribute (e.g. Sybase and $dbh->{syb_dynamic_supported})
2314 # but it is inaccurate more often than not
2316 local $dbh->{PrintError} = 0;
2317 local $dbh->{RaiseError} = 1;
2318 $dbh->do('select ?', {}, 1);
2326 # Check if placeholders bound to non-string types throw exceptions
2328 sub _determine_supports_typeless_placeholders {
2330 my $dbh = $self->_get_dbh;
2333 local $dbh->{PrintError} = 0;
2334 local $dbh->{RaiseError} = 1;
2335 # this specifically tests a bind that is NOT a string
2336 $dbh->do('select 1 where 1 = ?', {}, 1);
2346 Returns the database driver name.
2351 shift->_get_dbh->{Driver}->{Name};
2354 =head2 bind_attribute_by_data_type
2356 Given a datatype from column info, returns a database specific bind
2357 attribute for C<< $dbh->bind_param($val,$attribute) >> or nothing if we will
2358 let the database planner just handle it.
2360 Generally only needed for special case column types, like bytea in postgres.
2364 sub bind_attribute_by_data_type {
2368 =head2 is_datatype_numeric
2370 Given a datatype from column_info, returns a boolean value indicating if
2371 the current RDBMS considers it a numeric value. This controls how
2372 L<DBIx::Class::Row/set_column> decides whether to mark the column as
2373 dirty - when the datatype is deemed numeric a C<< != >> comparison will
2374 be performed instead of the usual C<eq>.
2378 sub is_datatype_numeric {
2379 my ($self, $dt) = @_;
2381 return 0 unless $dt;
2383 return $dt =~ /^ (?:
2384 numeric | int(?:eger)? | (?:tiny|small|medium|big)int | dec(?:imal)? | real | float | double (?: \s+ precision)? | (?:big)?serial
2389 =head2 create_ddl_dir
2393 =item Arguments: $schema \@databases, $version, $directory, $preversion, \%sqlt_args
2397 Creates a SQL file based on the Schema, for each of the specified
2398 database engines in C<\@databases> in the given directory.
2399 (note: specify L<SQL::Translator> names, not L<DBI> driver names).
2401 Given a previous version number, this will also create a file containing
2402 the ALTER TABLE statements to transform the previous schema into the
2403 current one. Note that these statements may contain C<DROP TABLE> or
2404 C<DROP COLUMN> statements that can potentially destroy data.
2406 The file names are created using the C<ddl_filename> method below, please
2407 override this method in your schema if you would like a different file
2408 name format. For the ALTER file, the same format is used, replacing
2409 $version in the name with "$preversion-$version".
2411 See L<SQL::Translator/METHODS> for a list of values for C<\%sqlt_args>.
2412 The most common value for this would be C<< { add_drop_table => 1 } >>
2413 to have the SQL produced include a C<DROP TABLE> statement for each table
2414 created. For quoting purposes supply C<quote_table_names> and
2415 C<quote_field_names>.
2417 If no arguments are passed, then the following default values are assumed:
2421 =item databases - ['MySQL', 'SQLite', 'PostgreSQL']
2423 =item version - $schema->schema_version
2425 =item directory - './'
2427 =item preversion - <none>
2431 By default, C<\%sqlt_args> will have
2433 { add_drop_table => 1, ignore_constraint_names => 1, ignore_index_names => 1 }
2435 merged with the hash passed in. To disable any of those features, pass in a
2436 hashref like the following
2438 { ignore_constraint_names => 0, # ... other options }
2441 WARNING: You are strongly advised to check all SQL files created, before applying
2446 sub create_ddl_dir {
2447 my ($self, $schema, $databases, $version, $dir, $preversion, $sqltargs) = @_;
2450 carp "No directory given, using ./\n";
2455 make_path ("$dir") # make_path does not like objects (i.e. Path::Class::Dir)
2457 $self->throw_exception(
2458 "Failed to create '$dir': " . ($! || $@ || 'error unknow')
2462 $self->throw_exception ("Directory '$dir' does not exist\n") unless(-d $dir);
2464 $databases ||= ['MySQL', 'SQLite', 'PostgreSQL'];
2465 $databases = [ $databases ] if(ref($databases) ne 'ARRAY');
2467 my $schema_version = $schema->schema_version || '1.x';
2468 $version ||= $schema_version;
2471 add_drop_table => 1,
2472 ignore_constraint_names => 1,
2473 ignore_index_names => 1,
2477 unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')) {
2478 $self->throw_exception("Can't create a ddl file without " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
2481 my $sqlt = SQL::Translator->new( $sqltargs );
2483 $sqlt->parser('SQL::Translator::Parser::DBIx::Class');
2484 my $sqlt_schema = $sqlt->translate({ data => $schema })
2485 or $self->throw_exception ($sqlt->error);
2487 foreach my $db (@$databases) {
2489 $sqlt->{schema} = $sqlt_schema;
2490 $sqlt->producer($db);
2493 my $filename = $schema->ddl_filename($db, $version, $dir);
2494 if (-e $filename && ($version eq $schema_version )) {
2495 # if we are dumping the current version, overwrite the DDL
2496 carp "Overwriting existing DDL file - $filename";
2500 my $output = $sqlt->translate;
2502 carp("Failed to translate to $db, skipping. (" . $sqlt->error . ")");
2505 if(!open($file, ">$filename")) {
2506 $self->throw_exception("Can't open $filename for writing ($!)");
2509 print $file $output;
2512 next unless ($preversion);
2514 require SQL::Translator::Diff;
2516 my $prefilename = $schema->ddl_filename($db, $preversion, $dir);
2517 if(!-e $prefilename) {
2518 carp("No previous schema file found ($prefilename)");
2522 my $difffile = $schema->ddl_filename($db, $version, $dir, $preversion);
2524 carp("Overwriting existing diff file - $difffile");
2530 my $t = SQL::Translator->new($sqltargs);
2535 or $self->throw_exception ($t->error);
2537 my $out = $t->translate( $prefilename )
2538 or $self->throw_exception ($t->error);
2540 $source_schema = $t->schema;
2542 $source_schema->name( $prefilename )
2543 unless ( $source_schema->name );
2546 # The "new" style of producers have sane normalization and can support
2547 # diffing a SQL file against a DBIC->SQLT schema. Old style ones don't
2548 # And we have to diff parsed SQL against parsed SQL.
2549 my $dest_schema = $sqlt_schema;
2551 unless ( "SQL::Translator::Producer::$db"->can('preprocess_schema') ) {
2552 my $t = SQL::Translator->new($sqltargs);
2557 or $self->throw_exception ($t->error);
2559 my $out = $t->translate( $filename )
2560 or $self->throw_exception ($t->error);
2562 $dest_schema = $t->schema;
2564 $dest_schema->name( $filename )
2565 unless $dest_schema->name;
2568 my $diff = SQL::Translator::Diff::schema_diff($source_schema, $db,
2572 if(!open $file, ">$difffile") {
2573 $self->throw_exception("Can't write to $difffile ($!)");
2581 =head2 deployment_statements
2585 =item Arguments: $schema, $type, $version, $directory, $sqlt_args
2589 Returns the statements used by L</deploy> and L<DBIx::Class::Schema/deploy>.
2591 The L<SQL::Translator> (not L<DBI>) database driver name can be explicitly
2592 provided in C<$type>, otherwise the result of L</sqlt_type> is used as default.
2594 C<$directory> is used to return statements from files in a previously created
2595 L</create_ddl_dir> directory and is optional. The filenames are constructed
2596 from L<DBIx::Class::Schema/ddl_filename>, the schema name and the C<$version>.
2598 If no C<$directory> is specified then the statements are constructed on the
2599 fly using L<SQL::Translator> and C<$version> is ignored.
2601 See L<SQL::Translator/METHODS> for a list of values for C<$sqlt_args>.
2605 sub deployment_statements {
2606 my ($self, $schema, $type, $version, $dir, $sqltargs) = @_;
2607 $type ||= $self->sqlt_type;
2608 $version ||= $schema->schema_version || '1.x';
2610 my $filename = $schema->ddl_filename($type, $version, $dir);
2614 open($file, "<$filename")
2615 or $self->throw_exception("Can't open $filename ($!)");
2618 return join('', @rows);
2621 unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy') ) {
2622 $self->throw_exception("Can't deploy without a ddl_dir or " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
2625 # sources needs to be a parser arg, but for simplicty allow at top level
2627 $sqltargs->{parser_args}{sources} = delete $sqltargs->{sources}
2628 if exists $sqltargs->{sources};
2630 my $tr = SQL::Translator->new(
2631 producer => "SQL::Translator::Producer::${type}",
2633 parser => 'SQL::Translator::Parser::DBIx::Class',
2640 @ret = $tr->translate;
2643 $ret[0] = $tr->translate;
2646 $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error)
2647 unless (@ret && defined $ret[0]);
2649 return $wa ? @ret : $ret[0];
2653 my ($self, $schema, $type, $sqltargs, $dir) = @_;
2656 return if($line =~ /^--/);
2658 # next if($line =~ /^DROP/m);
2659 return if($line =~ /^BEGIN TRANSACTION/m);
2660 return if($line =~ /^COMMIT/m);
2661 return if $line =~ /^\s+$/; # skip whitespace only
2662 $self->_query_start($line);
2664 # do a dbh_do cycle here, as we need some error checking in
2665 # place (even though we will ignore errors)
2666 $self->dbh_do (sub { $_[1]->do($line) });
2668 carp qq{$_ (running "${line}")};
2670 $self->_query_end($line);
2672 my @statements = $schema->deployment_statements($type, undef, $dir, { %{ $sqltargs || {} }, no_comments => 1 } );
2673 if (@statements > 1) {
2674 foreach my $statement (@statements) {
2675 $deploy->( $statement );
2678 elsif (@statements == 1) {
2679 foreach my $line ( split(";\n", $statements[0])) {
2685 =head2 datetime_parser
2687 Returns the datetime parser class
2691 sub datetime_parser {
2693 return $self->{datetime_parser} ||= do {
2694 $self->build_datetime_parser(@_);
2698 =head2 datetime_parser_type
2700 Defines (returns) the datetime parser class - currently hardwired to
2701 L<DateTime::Format::MySQL>
2705 sub datetime_parser_type { "DateTime::Format::MySQL"; }
2707 =head2 build_datetime_parser
2709 See L</datetime_parser>
2713 sub build_datetime_parser {
2715 my $type = $self->datetime_parser_type(@_);
2716 $self->ensure_class_loaded ($type);
2721 =head2 is_replicating
2723 A boolean that reports if a particular L<DBIx::Class::Storage::DBI> is set to
2724 replicate from a master database. Default is undef, which is the result
2725 returned by databases that don't support replication.
2729 sub is_replicating {
2734 =head2 lag_behind_master
2736 Returns a number that represents a certain amount of lag behind a master db
2737 when a given storage is replicating. The number is database dependent, but
2738 starts at zero and increases with the amount of lag. Default in undef
2742 sub lag_behind_master {
2746 =head2 relname_to_table_alias
2750 =item Arguments: $relname, $join_count
2754 L<DBIx::Class> uses L<DBIx::Class::Relationship> names as table aliases in
2757 This hook is to allow specific L<DBIx::Class::Storage> drivers to change the
2758 way these aliases are named.
2760 The default behavior is C<< "$relname_$join_count" if $join_count > 1 >>,
2761 otherwise C<"$relname">.
2765 sub relname_to_table_alias {
2766 my ($self, $relname, $join_count) = @_;
2768 my $alias = ($join_count && $join_count > 1 ?
2769 join('_', $relname, $join_count) : $relname);
2778 =head2 DBIx::Class and AutoCommit
2780 DBIx::Class can do some wonderful magic with handling exceptions,
2781 disconnections, and transactions when you use C<< AutoCommit => 1 >>
2782 (the default) combined with C<txn_do> for transaction support.
2784 If you set C<< AutoCommit => 0 >> in your connect info, then you are always
2785 in an assumed transaction between commits, and you're telling us you'd
2786 like to manage that manually. A lot of the magic protections offered by
2787 this module will go away. We can't protect you from exceptions due to database
2788 disconnects because we don't know anything about how to restart your
2789 transactions. You're on your own for handling all sorts of exceptional
2790 cases if you choose the C<< AutoCommit => 0 >> path, just as you would
2796 Matt S. Trout <mst@shadowcatsystems.co.uk>
2798 Andy Grundman <andy@hybridized.org>
2802 You may distribute this code under the same terms as Perl itself.