1 package DBIx::Class::Storage::DBI;
2 # -*- mode: cperl; cperl-indent-level: 2 -*-
7 use base qw/DBIx::Class::Storage::DBIHacks DBIx::Class::Storage/;
10 use DBIx::Class::Carp;
11 use DBIx::Class::Exception;
12 use Scalar::Util qw/refaddr weaken reftype blessed/;
13 use List::Util qw/first/;
14 use Sub::Name 'subname';
19 # default cursor class, overridable in connect_info attributes
20 __PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor');
22 __PACKAGE__->mk_group_accessors('inherited' => qw/
23 sql_limit_dialect sql_quote_char sql_name_sep
26 __PACKAGE__->mk_group_accessors('component_class' => qw/sql_maker_class datetime_parser_type/);
28 __PACKAGE__->sql_maker_class('DBIx::Class::SQLMaker');
29 __PACKAGE__->datetime_parser_type('DateTime::Format::MySQL'); # historic default
31 __PACKAGE__->sql_name_sep('.');
33 __PACKAGE__->mk_group_accessors('simple' => qw/
34 _connect_info _dbi_connect_info _dbic_connect_attributes _driver_determined
35 _dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts
36 transaction_depth _dbh_autocommit savepoints
39 # the values for these accessors are picked out (and deleted) from
40 # the attribute hashref passed to connect_info
41 my @storage_options = qw/
42 on_connect_call on_disconnect_call on_connect_do on_disconnect_do
43 disable_sth_caching unsafe auto_savepoint
45 __PACKAGE__->mk_group_accessors('simple' => @storage_options);
48 # capability definitions, using a 2-tiered accessor system
51 # A driver/user may define _use_X, which blindly without any checks says:
52 # "(do not) use this capability", (use_dbms_capability is an "inherited"
55 # If _use_X is undef, _supports_X is then queried. This is a "simple" style
56 # accessor, which in turn calls _determine_supports_X, and stores the return
57 # in a special slot on the storage object, which is wiped every time a $dbh
58 # reconnection takes place (it is not guaranteed that upon reconnection we
59 # will get the same rdbms version). _determine_supports_X does not need to
60 # exist on a driver, as we ->can for it before calling.
62 my @capabilities = (qw/
64 insert_returning_bound
69 __PACKAGE__->mk_group_accessors( dbms_capability => map { "_supports_$_" } @capabilities );
70 __PACKAGE__->mk_group_accessors( use_dbms_capability => map { "_use_$_" } (@capabilities ) );
72 # on by default, not strictly a capability (pending rewrite)
73 __PACKAGE__->_use_join_optimizer (1);
74 sub _determine_supports_join_optimizer { 1 };
76 # Each of these methods need _determine_driver called before itself
77 # in order to function reliably. This is a purely DRY optimization
79 # get_(use)_dbms_capability need to be called on the correct Storage
80 # class, as _use_X may be hardcoded class-wide, and _supports_X calls
81 # _determine_supports_X which obv. needs a correct driver as well
82 my @rdbms_specific_methods = qw/
96 get_use_dbms_capability
103 for my $meth (@rdbms_specific_methods) {
105 my $orig = __PACKAGE__->can ($meth)
106 or die "$meth is not a ::Storage::DBI method!";
109 no warnings qw/redefine/;
110 *{__PACKAGE__ ."::$meth"} = subname $meth => sub {
112 # only fire when invoked on an instance, a valid class-based invocation
113 # would e.g. be setting a default for an inherited accessor
116 ! $_[0]->_driver_determined
118 ! $_[0]->{_in_determine_driver}
120 $_[0]->_determine_driver;
122 # This for some reason crashes and burns on perl 5.8.1
123 # IFF the method ends up throwing an exception
124 #goto $_[0]->can ($meth);
126 my $cref = $_[0]->can ($meth);
137 DBIx::Class::Storage::DBI - DBI storage handler
141 my $schema = MySchema->connect('dbi:SQLite:my.db');
143 $schema->storage->debug(1);
145 my @stuff = $schema->storage->dbh_do(
147 my ($storage, $dbh, @args) = @_;
148 $dbh->do("DROP TABLE authors");
153 $schema->resultset('Book')->search({
154 written_on => $schema->storage->datetime_parser->format_datetime(DateTime->now)
159 This class represents the connection to an RDBMS via L<DBI>. See
160 L<DBIx::Class::Storage> for general information. This pod only
161 documents DBI-specific methods and behaviors.
168 my $new = shift->next::method(@_);
170 $new->transaction_depth(0);
171 $new->_sql_maker_opts({});
172 $new->_dbh_details({});
173 $new->{savepoints} = [];
174 $new->{_in_dbh_do} = 0;
175 $new->{_dbh_gen} = 0;
177 # read below to see what this does
178 $new->_arm_global_destructor;
183 # This is hack to work around perl shooting stuff in random
184 # order on exit(). If we do not walk the remaining storage
185 # objects in an END block, there is a *small but real* chance
186 # of a fork()ed child to kill the parent's shared DBI handle,
187 # *before perl reaches the DESTROY in this package*
188 # Yes, it is ugly and effective.
189 # Additionally this registry is used by the CLONE method to
190 # make sure no handles are shared between threads
192 my %seek_and_destroy;
194 sub _arm_global_destructor {
196 my $key = refaddr ($self);
197 $seek_and_destroy{$key} = $self;
198 weaken ($seek_and_destroy{$key});
202 local $?; # just in case the DBI destructor changes it somehow
204 # destroy just the object if not native to this process/thread
205 $_->_verify_pid for (grep
207 values %seek_and_destroy
212 # As per DBI's recommendation, DBIC disconnects all handles as
213 # soon as possible (DBIC will reconnect only on demand from within
215 for (values %seek_and_destroy) {
217 $_->{_dbh_gen}++; # so that existing cursors will drop as well
226 # some databases spew warnings on implicit disconnect
227 local $SIG{__WARN__} = sub {};
230 # this op is necessary, since the very last perl runtime statement
231 # triggers a global destruction shootout, and the $SIG localization
232 # may very well be destroyed before perl actually gets to do the
237 # handle pid changes correctly - do not destroy parent's connection
241 my $pid = $self->_conn_pid;
242 if( defined $pid and $pid != $$ and my $dbh = $self->_dbh ) {
243 $dbh->{InactiveDestroy} = 1;
253 This method is normally called by L<DBIx::Class::Schema/connection>, which
254 encapsulates its argument list in an arrayref before passing them here.
256 The argument list may contain:
262 The same 4-element argument set one would normally pass to
263 L<DBI/connect>, optionally followed by
264 L<extra attributes|/DBIx::Class specific connection attributes>
265 recognized by DBIx::Class:
267 $connect_info_args = [ $dsn, $user, $password, \%dbi_attributes?, \%extra_attributes? ];
271 A single code reference which returns a connected
272 L<DBI database handle|DBI/connect> optionally followed by
273 L<extra attributes|/DBIx::Class specific connection attributes> recognized
276 $connect_info_args = [ sub { DBI->connect (...) }, \%extra_attributes? ];
280 A single hashref with all the attributes and the dsn/user/password
283 $connect_info_args = [{
291 $connect_info_args = [{
292 dbh_maker => sub { DBI->connect (...) },
297 This is particularly useful for L<Catalyst> based applications, allowing the
298 following config (L<Config::General> style):
303 dsn dbi:mysql:database=test
310 The C<dsn>/C<user>/C<password> combination can be substituted by the
311 C<dbh_maker> key whose value is a coderef that returns a connected
312 L<DBI database handle|DBI/connect>
316 Please note that the L<DBI> docs recommend that you always explicitly
317 set C<AutoCommit> to either I<0> or I<1>. L<DBIx::Class> further
318 recommends that it be set to I<1>, and that you perform transactions
319 via our L<DBIx::Class::Schema/txn_do> method. L<DBIx::Class> will set it
320 to I<1> if you do not do explicitly set it to zero. This is the default
321 for most DBDs. See L</DBIx::Class and AutoCommit> for details.
323 =head3 DBIx::Class specific connection attributes
325 In addition to the standard L<DBI|DBI/ATTRIBUTES_COMMON_TO_ALL_HANDLES>
326 L<connection|DBI/Database_Handle_Attributes> attributes, DBIx::Class recognizes
327 the following connection options. These options can be mixed in with your other
328 L<DBI> connection attributes, or placed in a separate hashref
329 (C<\%extra_attributes>) as shown above.
331 Every time C<connect_info> is invoked, any previous settings for
332 these options will be cleared before setting the new ones, regardless of
333 whether any options are specified in the new C<connect_info>.
340 Specifies things to do immediately after connecting or re-connecting to
341 the database. Its value may contain:
347 This contains one SQL statement to execute.
349 =item an array reference
351 This contains SQL statements to execute in order. Each element contains
352 a string or a code reference that returns a string.
354 =item a code reference
356 This contains some code to execute. Unlike code references within an
357 array reference, its return value is ignored.
361 =item on_disconnect_do
363 Takes arguments in the same form as L</on_connect_do> and executes them
364 immediately before disconnecting from the database.
366 Note, this only runs if you explicitly call L</disconnect> on the
369 =item on_connect_call
371 A more generalized form of L</on_connect_do> that calls the specified
372 C<connect_call_METHOD> methods in your storage driver.
374 on_connect_do => 'select 1'
378 on_connect_call => [ [ do_sql => 'select 1' ] ]
380 Its values may contain:
386 Will call the C<connect_call_METHOD> method.
388 =item a code reference
390 Will execute C<< $code->($storage) >>
392 =item an array reference
394 Each value can be a method name or code reference.
396 =item an array of arrays
398 For each array, the first item is taken to be the C<connect_call_> method name
399 or code reference, and the rest are parameters to it.
403 Some predefined storage methods you may use:
409 Executes a SQL string or a code reference that returns a SQL string. This is
410 what L</on_connect_do> and L</on_disconnect_do> use.
418 Will execute the scalar as SQL.
422 Taken to be arguments to L<DBI/do>, the SQL string optionally followed by the
423 attributes hashref and bind values.
425 =item a code reference
427 Will execute C<< $code->($storage) >> and execute the return array refs as
434 Execute any statements necessary to initialize the database session to return
435 and accept datetime/timestamp values used with
436 L<DBIx::Class::InflateColumn::DateTime>.
438 Only necessary for some databases, see your specific storage driver for
439 implementation details.
443 =item on_disconnect_call
445 Takes arguments in the same form as L</on_connect_call> and executes them
446 immediately before disconnecting from the database.
448 Calls the C<disconnect_call_METHOD> methods as opposed to the
449 C<connect_call_METHOD> methods called by L</on_connect_call>.
451 Note, this only runs if you explicitly call L</disconnect> on the
454 =item disable_sth_caching
456 If set to a true value, this option will disable the caching of
457 statement handles via L<DBI/prepare_cached>.
461 Sets a specific SQL::Abstract::Limit-style limit dialect, overriding the
462 default L</sql_limit_dialect> setting of the storage (if any). For a list
463 of available limit dialects see L<DBIx::Class::SQLMaker::LimitDialects>.
467 When true automatically sets L</quote_char> and L</name_sep> to the characters
468 appropriate for your particular RDBMS. This option is preferred over specifying
469 L</quote_char> directly.
473 Specifies what characters to use to quote table and column names.
475 C<quote_char> expects either a single character, in which case is it
476 is placed on either side of the table/column name, or an arrayref of length
477 2 in which case the table/column name is placed between the elements.
479 For example under MySQL you should use C<< quote_char => '`' >>, and for
480 SQL Server you should use C<< quote_char => [qw/[ ]/] >>.
484 This parameter is only useful in conjunction with C<quote_char>, and is used to
485 specify the character that separates elements (schemas, tables, columns) from
486 each other. If unspecified it defaults to the most commonly used C<.>.
490 This Storage driver normally installs its own C<HandleError>, sets
491 C<RaiseError> and C<ShowErrorStatement> on, and sets C<PrintError> off on
492 all database handles, including those supplied by a coderef. It does this
493 so that it can have consistent and useful error behavior.
495 If you set this option to a true value, Storage will not do its usual
496 modifications to the database handle's attributes, and instead relies on
497 the settings in your connect_info DBI options (or the values you set in
498 your connection coderef, in the case that you are connecting via coderef).
500 Note that your custom settings can cause Storage to malfunction,
501 especially if you set a C<HandleError> handler that suppresses exceptions
502 and/or disable C<RaiseError>.
506 If this option is true, L<DBIx::Class> will use savepoints when nesting
507 transactions, making it possible to recover from failure in the inner
508 transaction without having to abort all outer transactions.
512 Use this argument to supply a cursor class other than the default
513 L<DBIx::Class::Storage::DBI::Cursor>.
517 Some real-life examples of arguments to L</connect_info> and
518 L<DBIx::Class::Schema/connect>
520 # Simple SQLite connection
521 ->connect_info([ 'dbi:SQLite:./foo.db' ]);
524 ->connect_info([ sub { DBI->connect(...) } ]);
526 # Connect via subref in hashref
528 dbh_maker => sub { DBI->connect(...) },
529 on_connect_do => 'alter session ...',
532 # A bit more complicated
539 { quote_char => q{"} },
543 # Equivalent to the previous example
549 { AutoCommit => 1, quote_char => q{"}, name_sep => q{.} },
553 # Same, but with hashref as argument
554 # See parse_connect_info for explanation
557 dsn => 'dbi:Pg:dbname=foo',
559 password => 'my_pg_password',
566 # Subref + DBIx::Class-specific connection options
569 sub { DBI->connect(...) },
573 on_connect_do => ['SET search_path TO myschema,otherschema,public'],
574 disable_sth_caching => 1,
584 my ($self, $info) = @_;
586 return $self->_connect_info if !$info;
588 $self->_connect_info($info); # copy for _connect_info
590 $info = $self->_normalize_connect_info($info)
591 if ref $info eq 'ARRAY';
593 for my $storage_opt (keys %{ $info->{storage_options} }) {
594 my $value = $info->{storage_options}{$storage_opt};
596 $self->$storage_opt($value);
599 # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only
600 # the new set of options
601 $self->_sql_maker(undef);
602 $self->_sql_maker_opts({});
604 for my $sql_maker_opt (keys %{ $info->{sql_maker_options} }) {
605 my $value = $info->{sql_maker_options}{$sql_maker_opt};
607 $self->_sql_maker_opts->{$sql_maker_opt} = $value;
611 %{ $self->_default_dbi_connect_attributes || {} },
612 %{ $info->{attributes} || {} },
615 my @args = @{ $info->{arguments} };
617 if (keys %attrs and ref $args[0] ne 'CODE') {
619 'You provided explicit AutoCommit => 0 in your connection_info. '
620 . 'This is almost universally a bad idea (see the footnotes of '
621 . 'DBIx::Class::Storage::DBI for more info). If you still want to '
622 . 'do this you can set $ENV{DBIC_UNSAFE_AUTOCOMMIT_OK} to disable '
624 if ! $attrs{AutoCommit} and ! $ENV{DBIC_UNSAFE_AUTOCOMMIT_OK};
626 push @args, \%attrs if keys %attrs;
628 $self->_dbi_connect_info(\@args);
631 # save attributes them in a separate accessor so they are always
632 # introspectable, even in case of a CODE $dbhmaker
633 $self->_dbic_connect_attributes (\%attrs);
635 return $self->_connect_info;
638 sub _normalize_connect_info {
639 my ($self, $info_arg) = @_;
642 my @args = @$info_arg; # take a shallow copy for further mutilation
644 # combine/pre-parse arguments depending on invocation style
647 if (ref $args[0] eq 'CODE') { # coderef with optional \%extra_attributes
648 %attrs = %{ $args[1] || {} };
651 elsif (ref $args[0] eq 'HASH') { # single hashref (i.e. Catalyst config)
652 %attrs = %{$args[0]};
654 if (my $code = delete $attrs{dbh_maker}) {
657 my @ignored = grep { delete $attrs{$_} } (qw/dsn user password/);
660 'Attribute(s) %s in connect_info were ignored, as they can not be applied '
661 . "to the result of 'dbh_maker'",
663 join (', ', map { "'$_'" } (@ignored) ),
668 @args = delete @attrs{qw/dsn user password/};
671 else { # otherwise assume dsn/user/password + \%attrs + \%extra_attrs
673 % { $args[3] || {} },
674 % { $args[4] || {} },
676 @args = @args[0,1,2];
679 $info{arguments} = \@args;
681 my @storage_opts = grep exists $attrs{$_},
682 @storage_options, 'cursor_class';
684 @{ $info{storage_options} }{@storage_opts} =
685 delete @attrs{@storage_opts} if @storage_opts;
687 my @sql_maker_opts = grep exists $attrs{$_},
688 qw/limit_dialect quote_char name_sep quote_names/;
690 @{ $info{sql_maker_options} }{@sql_maker_opts} =
691 delete @attrs{@sql_maker_opts} if @sql_maker_opts;
693 $info{attributes} = \%attrs if %attrs;
698 sub _default_dbi_connect_attributes () {
703 ShowErrorStatement => 1,
709 This method is deprecated in favour of setting via L</connect_info>.
713 =head2 on_disconnect_do
715 This method is deprecated in favour of setting via L</connect_info>.
719 sub _parse_connect_do {
720 my ($self, $type) = @_;
722 my $val = $self->$type;
723 return () if not defined $val;
728 push @res, [ 'do_sql', $val ];
729 } elsif (ref($val) eq 'CODE') {
731 } elsif (ref($val) eq 'ARRAY') {
732 push @res, map { [ 'do_sql', $_ ] } @$val;
734 $self->throw_exception("Invalid type for $type: ".ref($val));
742 Arguments: ($subref | $method_name), @extra_coderef_args?
744 Execute the given $subref or $method_name using the new exception-based
745 connection management.
747 The first two arguments will be the storage object that C<dbh_do> was called
748 on and a database handle to use. Any additional arguments will be passed
749 verbatim to the called subref as arguments 2 and onwards.
751 Using this (instead of $self->_dbh or $self->dbh) ensures correct
752 exception handling and reconnection (or failover in future subclasses).
754 Your subref should have no side-effects outside of the database, as
755 there is the potential for your subref to be partially double-executed
756 if the database connection was stale/dysfunctional.
760 my @stuff = $schema->storage->dbh_do(
762 my ($storage, $dbh, @cols) = @_;
763 my $cols = join(q{, }, @cols);
764 $dbh->selectrow_array("SELECT $cols FROM foo");
775 my $dbh = $self->_get_dbh;
777 return $self->$code($dbh, @_)
778 if ( $self->{_in_dbh_do} || $self->{transaction_depth} );
780 local $self->{_in_dbh_do} = 1;
782 # take a ref instead of a copy, to preserve coderef @_ aliasing semantics
785 $self->$code ($dbh, @$args);
787 $self->throw_exception($_) if $self->connected;
789 # We were not connected - reconnect and retry, but let any
790 # exception fall right through this time
791 carp "Retrying $code after catching disconnected exception: $_"
792 if $ENV{DBIC_DBIRETRY_DEBUG};
794 $self->_populate_dbh;
795 $self->$code($self->_dbh, @$args);
799 # This is basically a blend of dbh_do above and DBIx::Class::Storage::txn_do.
800 # It also informs dbh_do to bypass itself while under the direction of txn_do,
801 # via $self->{_in_dbh_do} (this saves some redundant eval and errorcheck, etc)
806 ref $coderef eq 'CODE' or $self->throw_exception
807 ('$coderef must be a CODE reference');
809 local $self->{_in_dbh_do} = 1;
812 my $want = wantarray;
818 # take a ref instead of a copy, to preserve coderef @_ aliasing semantics
823 my $txn_start_depth = $self->transaction_depth;
825 @result = $coderef->(@$args);
827 elsif(defined $want) {
828 $result[0] = $coderef->(@$args);
834 my $delta_txn = $txn_start_depth - $self->transaction_depth;
835 if ($delta_txn == 0) {
838 elsif ($delta_txn != 1) {
839 # an off-by-one would mean we fired a rollback
840 carp "Unexpected reduction of transaction depth by $delta_txn after execution of $coderef";
846 if(! defined $exception) { return wantarray ? @result : $result[0] }
848 if($self->transaction_depth > 1 || $tried++ || $self->connected) {
849 my $rollback_exception;
850 try { $self->txn_rollback } catch { $rollback_exception = shift };
851 if(defined $rollback_exception) {
852 my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION";
853 $self->throw_exception($exception) # propagate nested rollback
854 if $rollback_exception =~ /$exception_class/;
856 $self->throw_exception(
857 "Transaction aborted: ${exception}. "
858 . "Rollback failed: ${rollback_exception}"
861 $self->throw_exception($exception)
864 # We were not connected, and was first try - reconnect and retry
866 carp "Retrying $coderef after catching disconnected exception: $exception"
867 if $ENV{DBIC_TXNRETRY_DEBUG};
868 $self->_populate_dbh;
874 Our C<disconnect> method also performs a rollback first if the
875 database is not in C<AutoCommit> mode.
885 push @actions, ( $self->on_disconnect_call || () );
886 push @actions, $self->_parse_connect_do ('on_disconnect_do');
888 $self->_do_connection_actions(disconnect_call_ => $_) for @actions;
890 $self->_dbh_rollback unless $self->_dbh_autocommit;
892 %{ $self->_dbh->{CachedKids} } = ();
893 $self->_dbh->disconnect;
899 =head2 with_deferred_fk_checks
903 =item Arguments: C<$coderef>
905 =item Return Value: The return value of $coderef
909 Storage specific method to run the code ref with FK checks deferred or
910 in MySQL's case disabled entirely.
914 # Storage subclasses should override this
915 sub with_deferred_fk_checks {
916 my ($self, $sub) = @_;
924 =item Arguments: none
926 =item Return Value: 1|0
930 Verifies that the current database handle is active and ready to execute
931 an SQL statement (e.g. the connection did not get stale, server is still
932 answering, etc.) This method is used internally by L</dbh>.
938 return 0 unless $self->_seems_connected;
941 local $self->_dbh->{RaiseError} = 1;
946 sub _seems_connected {
951 my $dbh = $self->_dbh
954 return $dbh->FETCH('Active');
960 my $dbh = $self->_dbh or return 0;
965 sub ensure_connected {
968 unless ($self->connected) {
969 $self->_populate_dbh;
975 Returns a C<$dbh> - a data base handle of class L<DBI>. The returned handle
976 is guaranteed to be healthy by implicitly calling L</connected>, and if
977 necessary performing a reconnection before returning. Keep in mind that this
978 is very B<expensive> on some database engines. Consider using L</dbh_do>
986 if (not $self->_dbh) {
987 $self->_populate_dbh;
989 $self->ensure_connected;
994 # this is the internal "get dbh or connect (don't check)" method
998 $self->_populate_dbh unless $self->_dbh;
1004 unless ($self->_sql_maker) {
1005 my $sql_maker_class = $self->sql_maker_class;
1007 my %opts = %{$self->_sql_maker_opts||{}};
1009 $opts{limit_dialect}
1011 $self->sql_limit_dialect
1014 my $s_class = (ref $self) || $self;
1016 "Your storage class ($s_class) does not set sql_limit_dialect and you "
1017 . 'have not supplied an explicit limit_dialect in your connection_info. '
1018 . 'DBIC will attempt to use the GenericSubQ dialect, which works on most '
1019 . 'databases but can be (and often is) painfully slow. '
1020 . "Please file an RT ticket against '$s_class' ."
1027 my ($quote_char, $name_sep);
1029 if ($opts{quote_names}) {
1030 $quote_char = (delete $opts{quote_char}) || $self->sql_quote_char || do {
1031 my $s_class = (ref $self) || $self;
1033 "You requested 'quote_names' but your storage class ($s_class) does "
1034 . 'not explicitly define a default sql_quote_char and you have not '
1035 . 'supplied a quote_char as part of your connection_info. DBIC will '
1036 .q{default to the ANSI SQL standard quote '"', which works most of }
1037 . "the time. Please file an RT ticket against '$s_class'."
1043 $name_sep = (delete $opts{name_sep}) || $self->sql_name_sep;
1046 $self->_sql_maker($sql_maker_class->new(
1047 bindtype=>'columns',
1048 array_datatypes => 1,
1049 limit_dialect => $dialect,
1050 ($quote_char ? (quote_char => $quote_char) : ()),
1051 name_sep => ($name_sep || '.'),
1055 return $self->_sql_maker;
1058 # nothing to do by default
1065 my @info = @{$self->_dbi_connect_info || []};
1066 $self->_dbh(undef); # in case ->connected failed we might get sent here
1067 $self->_dbh_details({}); # reset everything we know
1069 $self->_dbh($self->_connect(@info));
1071 $self->_conn_pid($$) if $^O ne 'MSWin32'; # on win32 these are in fact threads
1073 $self->_determine_driver;
1075 # Always set the transaction depth on connect, since
1076 # there is no transaction in progress by definition
1077 $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1;
1079 $self->_run_connection_actions unless $self->{_in_determine_driver};
1082 sub _run_connection_actions {
1086 push @actions, ( $self->on_connect_call || () );
1087 push @actions, $self->_parse_connect_do ('on_connect_do');
1089 $self->_do_connection_actions(connect_call_ => $_) for @actions;
1094 sub set_use_dbms_capability {
1095 $_[0]->set_inherited ($_[1], $_[2]);
1098 sub get_use_dbms_capability {
1099 my ($self, $capname) = @_;
1101 my $use = $self->get_inherited ($capname);
1104 : do { $capname =~ s/^_use_/_supports_/; $self->get_dbms_capability ($capname) }
1108 sub set_dbms_capability {
1109 $_[0]->_dbh_details->{capability}{$_[1]} = $_[2];
1112 sub get_dbms_capability {
1113 my ($self, $capname) = @_;
1115 my $cap = $self->_dbh_details->{capability}{$capname};
1117 unless (defined $cap) {
1118 if (my $meth = $self->can ("_determine$capname")) {
1119 $cap = $self->$meth ? 1 : 0;
1125 $self->set_dbms_capability ($capname, $cap);
1135 unless ($info = $self->_dbh_details->{info}) {
1139 my $server_version = try { $self->_get_server_version };
1141 if (defined $server_version) {
1142 $info->{dbms_version} = $server_version;
1144 my ($numeric_version) = $server_version =~ /^([\d\.]+)/;
1145 my @verparts = split (/\./, $numeric_version);
1151 # consider only up to 3 version parts, iff not more than 3 digits
1153 while (@verparts && @use_parts < 3) {
1154 my $p = shift @verparts;
1156 push @use_parts, $p;
1158 push @use_parts, 0 while @use_parts < 3;
1160 $info->{normalized_dbms_version} = sprintf "%d.%03d%03d", @use_parts;
1164 $self->_dbh_details->{info} = $info;
1170 sub _get_server_version {
1171 shift->_dbh_get_info(18);
1175 my ($self, $info) = @_;
1177 return try { $self->_get_dbh->get_info($info) } || undef;
1180 sub _determine_driver {
1183 if ((not $self->_driver_determined) && (not $self->{_in_determine_driver})) {
1184 my $started_connected = 0;
1185 local $self->{_in_determine_driver} = 1;
1187 if (ref($self) eq __PACKAGE__) {
1189 if ($self->_dbh) { # we are connected
1190 $driver = $self->_dbh->{Driver}{Name};
1191 $started_connected = 1;
1193 # if connect_info is a CODEREF, we have no choice but to connect
1194 if (ref $self->_dbi_connect_info->[0] &&
1195 reftype $self->_dbi_connect_info->[0] eq 'CODE') {
1196 $self->_populate_dbh;
1197 $driver = $self->_dbh->{Driver}{Name};
1200 # try to use dsn to not require being connected, the driver may still
1201 # force a connection in _rebless to determine version
1202 # (dsn may not be supplied at all if all we do is make a mock-schema)
1203 my $dsn = $self->_dbi_connect_info->[0] || $ENV{DBI_DSN} || '';
1204 ($driver) = $dsn =~ /dbi:([^:]+):/i;
1205 $driver ||= $ENV{DBI_DRIVER};
1210 my $storage_class = "DBIx::Class::Storage::DBI::${driver}";
1211 if ($self->load_optional_class($storage_class)) {
1212 mro::set_mro($storage_class, 'c3');
1213 bless $self, $storage_class;
1219 $self->_driver_determined(1);
1221 Class::C3->reinitialize() if DBIx::Class::_ENV_::OLD_MRO;
1223 $self->_init; # run driver-specific initializations
1225 $self->_run_connection_actions
1226 if !$started_connected && defined $self->_dbh;
1230 sub _do_connection_actions {
1232 my $method_prefix = shift;
1235 if (not ref($call)) {
1236 my $method = $method_prefix . $call;
1238 } elsif (ref($call) eq 'CODE') {
1240 } elsif (ref($call) eq 'ARRAY') {
1241 if (ref($call->[0]) ne 'ARRAY') {
1242 $self->_do_connection_actions($method_prefix, $_) for @$call;
1244 $self->_do_connection_actions($method_prefix, @$_) for @$call;
1247 $self->throw_exception (sprintf ("Don't know how to process conection actions of type '%s'", ref($call)) );
1253 sub connect_call_do_sql {
1255 $self->_do_query(@_);
1258 sub disconnect_call_do_sql {
1260 $self->_do_query(@_);
1263 # override in db-specific backend when necessary
1264 sub connect_call_datetime_setup { 1 }
1267 my ($self, $action) = @_;
1269 if (ref $action eq 'CODE') {
1270 $action = $action->($self);
1271 $self->_do_query($_) foreach @$action;
1274 # Most debuggers expect ($sql, @bind), so we need to exclude
1275 # the attribute hash which is the second argument to $dbh->do
1276 # furthermore the bind values are usually to be presented
1277 # as named arrayref pairs, so wrap those here too
1278 my @do_args = (ref $action eq 'ARRAY') ? (@$action) : ($action);
1279 my $sql = shift @do_args;
1280 my $attrs = shift @do_args;
1281 my @bind = map { [ undef, $_ ] } @do_args;
1283 $self->_query_start($sql, @bind);
1284 $self->_get_dbh->do($sql, $attrs, @do_args);
1285 $self->_query_end($sql, @bind);
1292 my ($self, @info) = @_;
1294 $self->throw_exception("You failed to provide any connection info")
1297 my ($old_connect_via, $dbh);
1299 if ($INC{'Apache/DBI.pm'} && $ENV{MOD_PERL}) {
1300 $old_connect_via = $DBI::connect_via;
1301 $DBI::connect_via = 'connect';
1305 if(ref $info[0] eq 'CODE') {
1306 $dbh = $info[0]->();
1310 $dbh = DBI->connect(@info);
1317 unless ($self->unsafe) {
1319 $self->throw_exception(
1320 'Refusing clobbering of {HandleError} installed on externally supplied '
1321 ."DBI handle $dbh. Either remove the handler or use the 'unsafe' attribute."
1322 ) if $dbh->{HandleError} and ref $dbh->{HandleError} ne '__DBIC__DBH__ERROR__HANDLER__';
1324 # Default via _default_dbi_connect_attributes is 1, hence it was an explicit
1325 # request, or an external handle. Complain and set anyway
1326 unless ($dbh->{RaiseError}) {
1327 carp( ref $info[0] eq 'CODE'
1329 ? "The 'RaiseError' of the externally supplied DBI handle is set to false. "
1330 ."DBIx::Class will toggle it back to true, unless the 'unsafe' connect "
1331 .'attribute has been supplied'
1333 : 'RaiseError => 0 supplied in your connection_info, without an explicit '
1334 .'unsafe => 1. Toggling RaiseError back to true'
1337 $dbh->{RaiseError} = 1;
1340 # this odd anonymous coderef dereference is in fact really
1341 # necessary to avoid the unwanted effect described in perl5
1344 my $weak_self = $_[0];
1347 # the coderef is blessed so we can distinguish it from externally
1348 # supplied handles (which must be preserved)
1349 $_[1]->{HandleError} = bless sub {
1351 $weak_self->throw_exception("DBI Exception: $_[0]");
1354 # the handler may be invoked by something totally out of
1356 DBIx::Class::Exception->throw("DBI Exception (unhandled by DBIC, ::Schema GCed): $_[0]");
1358 }, '__DBIC__DBH__ERROR__HANDLER__';
1363 $self->throw_exception("DBI Connection failed: $_")
1366 $DBI::connect_via = $old_connect_via if $old_connect_via;
1369 $self->_dbh_autocommit($dbh->{AutoCommit});
1374 my ($self, $name) = @_;
1376 $name = $self->_svp_generate_name
1377 unless defined $name;
1379 $self->throw_exception ("You can't use savepoints outside a transaction")
1380 if $self->{transaction_depth} == 0;
1382 $self->throw_exception ("Your Storage implementation doesn't support savepoints")
1383 unless $self->can('_svp_begin');
1385 push @{ $self->{savepoints} }, $name;
1387 $self->debugobj->svp_begin($name) if $self->debug;
1389 return $self->_svp_begin($name);
1393 my ($self, $name) = @_;
1395 $self->throw_exception ("You can't use savepoints outside a transaction")
1396 if $self->{transaction_depth} == 0;
1398 $self->throw_exception ("Your Storage implementation doesn't support savepoints")
1399 unless $self->can('_svp_release');
1401 if (defined $name) {
1402 $self->throw_exception ("Savepoint '$name' does not exist")
1403 unless grep { $_ eq $name } @{ $self->{savepoints} };
1405 # Dig through the stack until we find the one we are releasing. This keeps
1406 # the stack up to date.
1409 do { $svp = pop @{ $self->{savepoints} } } while $svp ne $name;
1411 $name = pop @{ $self->{savepoints} };
1414 $self->debugobj->svp_release($name) if $self->debug;
1416 return $self->_svp_release($name);
1420 my ($self, $name) = @_;
1422 $self->throw_exception ("You can't use savepoints outside a transaction")
1423 if $self->{transaction_depth} == 0;
1425 $self->throw_exception ("Your Storage implementation doesn't support savepoints")
1426 unless $self->can('_svp_rollback');
1428 if (defined $name) {
1429 # If they passed us a name, verify that it exists in the stack
1430 unless(grep({ $_ eq $name } @{ $self->{savepoints} })) {
1431 $self->throw_exception("Savepoint '$name' does not exist!");
1434 # Dig through the stack until we find the one we are releasing. This keeps
1435 # the stack up to date.
1436 while(my $s = pop(@{ $self->{savepoints} })) {
1437 last if($s eq $name);
1439 # Add the savepoint back to the stack, as a rollback doesn't remove the
1440 # named savepoint, only everything after it.
1441 push(@{ $self->{savepoints} }, $name);
1443 # We'll assume they want to rollback to the last savepoint
1444 $name = $self->{savepoints}->[-1];
1447 $self->debugobj->svp_rollback($name) if $self->debug;
1449 return $self->_svp_rollback($name);
1452 sub _svp_generate_name {
1454 return 'savepoint_'.scalar(@{ $self->{'savepoints'} });
1460 # this means we have not yet connected and do not know the AC status
1461 # (e.g. coderef $dbh)
1462 if (! defined $self->_dbh_autocommit) {
1463 $self->ensure_connected;
1465 # otherwise re-connect on pid changes, so
1466 # that the txn_depth is adjusted properly
1467 # the lightweight _get_dbh is good enoug here
1468 # (only superficial handle check, no pings)
1473 if($self->transaction_depth == 0) {
1474 $self->debugobj->txn_begin()
1476 $self->_dbh_begin_work;
1478 elsif ($self->auto_savepoint) {
1481 $self->{transaction_depth}++;
1484 sub _dbh_begin_work {
1487 # if the user is utilizing txn_do - good for him, otherwise we need to
1488 # ensure that the $dbh is healthy on BEGIN.
1489 # We do this via ->dbh_do instead of ->dbh, so that the ->dbh "ping"
1490 # will be replaced by a failure of begin_work itself (which will be
1491 # then retried on reconnect)
1492 if ($self->{_in_dbh_do}) {
1493 $self->_dbh->begin_work;
1495 $self->dbh_do(sub { $_[1]->begin_work });
1501 if (! $self->_dbh) {
1502 $self->throw_exception('cannot COMMIT on a disconnected handle');
1504 elsif ($self->{transaction_depth} == 1) {
1505 $self->debugobj->txn_commit()
1508 $self->{transaction_depth} = 0
1509 if $self->_dbh_autocommit;
1511 elsif($self->{transaction_depth} > 1) {
1512 $self->{transaction_depth}--;
1514 if $self->auto_savepoint;
1516 elsif (! $self->_dbh->FETCH('AutoCommit') ) {
1518 carp "Storage transaction_depth $self->{transaction_depth} does not match "
1519 ."false AutoCommit of $self->{_dbh}, attempting COMMIT anyway";
1521 $self->debugobj->txn_commit()
1524 $self->{transaction_depth} = 0
1525 if $self->_dbh_autocommit;
1528 $self->throw_exception( 'Refusing to commit without a started transaction' );
1534 my $dbh = $self->_dbh
1535 or $self->throw_exception('cannot COMMIT on a disconnected handle');
1541 my $dbh = $self->_dbh;
1543 if ($self->{transaction_depth} == 1) {
1544 $self->debugobj->txn_rollback()
1546 $self->{transaction_depth} = 0
1547 if $self->_dbh_autocommit;
1548 $self->_dbh_rollback;
1550 elsif($self->{transaction_depth} > 1) {
1551 $self->{transaction_depth}--;
1552 if ($self->auto_savepoint) {
1553 $self->svp_rollback;
1558 die DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION->new;
1562 my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION";
1564 if ($_ !~ /$exception_class/) {
1565 # ensure that a failed rollback resets the transaction depth
1566 $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1;
1569 $self->throw_exception($_)
1575 my $dbh = $self->_dbh
1576 or $self->throw_exception('cannot ROLLBACK on a disconnected handle');
1580 # This used to be the top-half of _execute. It was split out to make it
1581 # easier to override in NoBindVars without duping the rest. It takes up
1582 # all of _execute's args, and emits $sql, @bind.
1583 sub _prep_for_execute {
1584 my ($self, $op, $extra_bind, $ident, $args) = @_;
1586 if( blessed $ident && $ident->isa("DBIx::Class::ResultSource") ) {
1587 $ident = $ident->from();
1590 my ($sql, @bind) = $self->sql_maker->$op($ident, @$args);
1593 map { ref $_ eq 'ARRAY' ? $_ : [ '!!dummy', $_ ] } @$extra_bind)
1595 return ($sql, \@bind);
1599 sub _fix_bind_params {
1600 my ($self, @bind) = @_;
1602 ### Turn @bind from something like this:
1603 ### ( [ "artist", 1 ], [ "cdid", 1, 3 ] )
1605 ### ( "'1'", "'1'", "'3'" )
1608 if ( defined( $_ && $_->[1] ) ) {
1609 map { qq{'$_'}; } @{$_}[ 1 .. $#$_ ];
1616 my ( $self, $sql, @bind ) = @_;
1618 if ( $self->debug ) {
1619 @bind = $self->_fix_bind_params(@bind);
1621 $self->debugobj->query_start( $sql, @bind );
1626 my ( $self, $sql, @bind ) = @_;
1628 if ( $self->debug ) {
1629 @bind = $self->_fix_bind_params(@bind);
1630 $self->debugobj->query_end( $sql, @bind );
1635 my ($self, $dbh, $op, $extra_bind, $ident, $bind_attributes, @args) = @_;
1637 my ($sql, $bind) = $self->_prep_for_execute($op, $extra_bind, $ident, \@args);
1639 $self->_query_start( $sql, @$bind );
1641 my $sth = $self->_sth($sql,$op);
1643 my $placeholder_index = 1;
1645 foreach my $bound (@$bind) {
1646 my $attributes = {};
1647 my($column_name, @data) = @$bound;
1649 if ($bind_attributes) {
1650 $attributes = $bind_attributes->{$column_name}
1651 if defined $bind_attributes->{$column_name};
1654 foreach my $data (@data) {
1655 my $ref = ref $data;
1657 if ($ref and overload::Method($data, '""') ) {
1660 elsif ($ref eq 'SCALAR') { # any scalarrefs are assumed to be bind_inouts
1661 $sth->bind_param_inout(
1662 $placeholder_index++,
1664 $self->_max_column_bytesize($ident, $column_name),
1670 $sth->bind_param($placeholder_index++, $data, $attributes);
1674 # Can this fail without throwing an exception anyways???
1675 my $rv = $sth->execute();
1676 $self->throw_exception(
1677 $sth->errstr || $sth->err || 'Unknown error: execute() returned false, but error flags were not set...'
1680 $self->_query_end( $sql, @$bind );
1682 return (wantarray ? ($rv, $sth, @$bind) : $rv);
1687 $self->dbh_do('_dbh_execute', @_); # retry over disconnects
1690 sub _prefetch_autovalues {
1691 my ($self, $source, $to_insert) = @_;
1693 my $colinfo = $source->columns_info;
1696 for my $col (keys %$colinfo) {
1698 $colinfo->{$col}{auto_nextval}
1701 ! exists $to_insert->{$col}
1703 ref $to_insert->{$col} eq 'SCALAR'
1706 $values{$col} = $self->_sequence_fetch(
1708 ( $colinfo->{$col}{sequence} ||=
1709 $self->_dbh_get_autoinc_seq($self->_get_dbh, $source, $col)
1719 my ($self, $source, $to_insert) = @_;
1721 my $prefetched_values = $self->_prefetch_autovalues($source, $to_insert);
1724 $to_insert = { %$to_insert, %$prefetched_values };
1726 # list of primary keys we try to fetch from the database
1727 # both not-exsists and scalarrefs are considered
1729 for ($source->primary_columns) {
1730 $fetch_pks{$_} = scalar keys %fetch_pks # so we can preserve order for prettyness
1731 if ! exists $to_insert->{$_} or ref $to_insert->{$_} eq 'SCALAR';
1734 my ($sqla_opts, @ir_container);
1735 if ($self->_use_insert_returning) {
1737 # retain order as declared in the resultsource
1738 for (sort { $fetch_pks{$a} <=> $fetch_pks{$b} } keys %fetch_pks ) {
1739 push @{$sqla_opts->{returning}}, $_;
1740 $sqla_opts->{returning_container} = \@ir_container
1741 if $self->_use_insert_returning_bound;
1745 my $bind_attributes = $self->source_bind_attributes($source);
1747 my ($rv, $sth) = $self->_execute('insert' => [], $source, $bind_attributes, $to_insert, $sqla_opts);
1751 if (my $retlist = $sqla_opts->{returning}) {
1752 @ir_container = try {
1753 local $SIG{__WARN__} = sub {};
1754 my @r = $sth->fetchrow_array;
1757 } unless @ir_container;
1759 @returned_cols{@$retlist} = @ir_container if @ir_container;
1762 return { %$prefetched_values, %returned_cols };
1766 ## Currently it is assumed that all values passed will be "normal", i.e. not
1767 ## scalar refs, or at least, all the same type as the first set, the statement is
1768 ## only prepped once.
1770 my ($self, $source, $cols, $data) = @_;
1773 @colvalues{@$cols} = (0..$#$cols);
1775 for my $i (0..$#$cols) {
1776 my $first_val = $data->[0][$i];
1777 next unless ref $first_val eq 'SCALAR';
1779 $colvalues{ $cols->[$i] } = $first_val;
1782 # check for bad data and stringify stringifiable objects
1783 my $bad_slice = sub {
1784 my ($msg, $col_idx, $slice_idx) = @_;
1785 $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s",
1789 require Data::Dumper::Concise;
1790 local $Data::Dumper::Maxdepth = 1; # don't dump objects, if any
1791 Data::Dumper::Concise::Dumper ({
1792 map { $cols->[$_] => $data->[$slice_idx][$_] } (0 .. $#$cols)
1798 for my $datum_idx (0..$#$data) {
1799 my $datum = $data->[$datum_idx];
1801 for my $col_idx (0..$#$cols) {
1802 my $val = $datum->[$col_idx];
1803 my $sqla_bind = $colvalues{ $cols->[$col_idx] };
1804 my $is_literal_sql = (ref $sqla_bind) eq 'SCALAR';
1806 if ($is_literal_sql) {
1808 $bad_slice->('bind found where literal SQL expected', $col_idx, $datum_idx);
1810 elsif ((my $reftype = ref $val) ne 'SCALAR') {
1811 $bad_slice->("$reftype reference found where literal SQL expected",
1812 $col_idx, $datum_idx);
1814 elsif ($$val ne $$sqla_bind){
1815 $bad_slice->("inconsistent literal SQL value, expecting: '$$sqla_bind'",
1816 $col_idx, $datum_idx);
1819 elsif (my $reftype = ref $val) {
1821 if (overload::Method($val, '""')) {
1822 $datum->[$col_idx] = "".$val;
1825 $bad_slice->("$reftype reference found where bind expected",
1826 $col_idx, $datum_idx);
1832 my ($sql, $bind) = $self->_prep_for_execute (
1833 'insert', undef, $source, [\%colvalues]
1837 # if the bindlist is empty - make sure all "values" are in fact
1838 # literal scalarrefs. If not the case this means the storage ate
1839 # them away (e.g. the NoBindVars component) and interpolated them
1840 # directly into the SQL. This obviosly can't be good for multi-inserts
1842 $self->throw_exception('Cannot insert_bulk without support for placeholders')
1843 if first { ref $_ ne 'SCALAR' } values %colvalues;
1846 # neither _execute_array, nor _execute_inserts_with_no_binds are
1847 # atomic (even if _execute _array is a single call). Thus a safety
1849 my $guard = $self->txn_scope_guard;
1851 $self->_query_start( $sql, @$bind ? [ dummy => '__BULK_INSERT__' ] : () );
1852 my $sth = $self->_sth($sql);
1855 #@bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
1856 $self->_execute_array( $source, $sth, $bind, $cols, $data );
1859 # bind_param_array doesn't work if there are no binds
1860 $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data );
1864 $self->_query_end( $sql, @$bind ? [ dummy => '__BULK_INSERT__' ] : () );
1868 return (wantarray ? ($rv, $sth, @$bind) : $rv);
1871 sub _execute_array {
1872 my ($self, $source, $sth, $bind, $cols, $data, @extra) = @_;
1874 ## This must be an arrayref, else nothing works!
1875 my $tuple_status = [];
1877 ## Get the bind_attributes, if any exist
1878 my $bind_attributes = $self->source_bind_attributes($source);
1880 ## Bind the values and execute
1881 my $placeholder_index = 1;
1883 foreach my $bound (@$bind) {
1885 my $attributes = {};
1886 my ($column_name, $data_index) = @$bound;
1888 if( $bind_attributes ) {
1889 $attributes = $bind_attributes->{$column_name}
1890 if defined $bind_attributes->{$column_name};
1893 my @data = map { $_->[$data_index] } @$data;
1895 $sth->bind_param_array(
1898 (%$attributes ? $attributes : ()),
1900 $placeholder_index++;
1905 $rv = $self->_dbh_execute_array($sth, $tuple_status, @extra);
1911 # Not all DBDs are create equal. Some throw on error, some return
1912 # an undef $rv, and some set $sth->err - try whatever we can
1913 $err = ($sth->errstr || 'UNKNOWN ERROR ($sth->errstr is unset)') if (
1916 ( !defined $rv or $sth->err )
1919 # Statement must finish even if there was an exception.
1924 $err = shift unless defined $err
1929 ++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i];
1931 $self->throw_exception("Unexpected populate error: $err")
1932 if ($i > $#$tuple_status);
1934 require Data::Dumper::Concise;
1935 $self->throw_exception(sprintf "%s for populate slice:\n%s",
1936 ($tuple_status->[$i][1] || $err),
1937 Data::Dumper::Concise::Dumper( { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) } ),
1944 sub _dbh_execute_array {
1945 my ($self, $sth, $tuple_status, @extra) = @_;
1947 return $sth->execute_array({ArrayTupleStatus => $tuple_status});
1950 sub _dbh_execute_inserts_with_no_binds {
1951 my ($self, $sth, $count) = @_;
1955 my $dbh = $self->_get_dbh;
1956 local $dbh->{RaiseError} = 1;
1957 local $dbh->{PrintError} = 0;
1959 $sth->execute foreach 1..$count;
1965 # Make sure statement is finished even if there was an exception.
1970 $err = shift unless defined $err;
1973 $self->throw_exception($err) if defined $err;
1979 my ($self, $source, @args) = @_;
1981 my $bind_attrs = $self->source_bind_attributes($source);
1983 return $self->_execute('update' => [], $source, $bind_attrs, @args);
1988 my ($self, $source, @args) = @_;
1990 my $bind_attrs = $self->source_bind_attributes($source);
1992 return $self->_execute('delete' => [], $source, $bind_attrs, @args);
1995 # We were sent here because the $rs contains a complex search
1996 # which will require a subquery to select the correct rows
1997 # (i.e. joined or limited resultsets, or non-introspectable conditions)
1999 # Generating a single PK column subquery is trivial and supported
2000 # by all RDBMS. However if we have a multicolumn PK, things get ugly.
2001 # Look at _multipk_update_delete()
2002 sub _subq_update_delete {
2004 my ($rs, $op, $values) = @_;
2006 my $rsrc = $rs->result_source;
2008 # quick check if we got a sane rs on our hands
2009 my @pcols = $rsrc->_pri_cols;
2011 my $sel = $rs->_resolved_attrs->{select};
2012 $sel = [ $sel ] unless ref $sel eq 'ARRAY';
2015 join ("\x00", map { join '.', $rs->{attrs}{alias}, $_ } sort @pcols)
2017 join ("\x00", sort @$sel )
2019 $self->throw_exception (
2020 '_subq_update_delete can not be called on resultsets selecting columns other than the primary keys'
2027 $op eq 'update' ? $values : (),
2028 { $pcols[0] => { -in => $rs->as_query } },
2033 return $self->_multipk_update_delete (@_);
2037 # ANSI SQL does not provide a reliable way to perform a multicol-PK
2038 # resultset update/delete involving subqueries. So by default resort
2039 # to simple (and inefficient) delete_all style per-row opearations,
2040 # while allowing specific storages to override this with a faster
2043 sub _multipk_update_delete {
2044 return shift->_per_row_update_delete (@_);
2047 # This is the default loop used to delete/update rows for multi PK
2048 # resultsets, and used by mysql exclusively (because it can't do anything
2051 # We do not use $row->$op style queries, because resultset update/delete
2052 # is not expected to cascade (this is what delete_all/update_all is for).
2054 # There should be no race conditions as the entire operation is rolled
2057 sub _per_row_update_delete {
2059 my ($rs, $op, $values) = @_;
2061 my $rsrc = $rs->result_source;
2062 my @pcols = $rsrc->_pri_cols;
2064 my $guard = $self->txn_scope_guard;
2066 # emulate the return value of $sth->execute for non-selects
2067 my $row_cnt = '0E0';
2069 my $subrs_cur = $rs->cursor;
2070 my @all_pk = $subrs_cur->all;
2071 for my $pks ( @all_pk) {
2074 for my $i (0.. $#pcols) {
2075 $cond->{$pcols[$i]} = $pks->[$i];
2080 $op eq 'update' ? $values : (),
2094 $self->_execute($self->_select_args(@_));
2097 sub _select_args_to_query {
2100 # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $rs_attrs, $rows, $offset)
2101 # = $self->_select_args($ident, $select, $cond, $attrs);
2102 my ($op, $bind, $ident, $bind_attrs, @args) =
2103 $self->_select_args(@_);
2105 # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $rs_attrs, $rows, $offset ]);
2106 my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, \@args);
2107 $prepared_bind ||= [];
2110 ? ($sql, $prepared_bind, $bind_attrs)
2111 : \[ "($sql)", @$prepared_bind ]
2116 my ($self, $ident, $select, $where, $attrs) = @_;
2118 my $sql_maker = $self->sql_maker;
2119 my ($alias2source, $rs_alias) = $self->_resolve_ident_sources ($ident);
2126 $rs_alias && $alias2source->{$rs_alias}
2127 ? ( _rsroot_rsrc => $alias2source->{$rs_alias} )
2132 # calculate bind_attrs before possible $ident mangling
2133 my $bind_attrs = {};
2134 for my $alias (keys %$alias2source) {
2135 my $bindtypes = $self->source_bind_attributes ($alias2source->{$alias}) || {};
2136 for my $col (keys %$bindtypes) {
2138 my $fqcn = join ('.', $alias, $col);
2139 $bind_attrs->{$fqcn} = $bindtypes->{$col} if $bindtypes->{$col};
2141 # Unqialified column names are nice, but at the same time can be
2142 # rather ambiguous. What we do here is basically go along with
2143 # the loop, adding an unqualified column slot to $bind_attrs,
2144 # alongside the fully qualified name. As soon as we encounter
2145 # another column by that name (which would imply another table)
2146 # we unset the unqualified slot and never add any info to it
2147 # to avoid erroneous type binding. If this happens the users
2148 # only choice will be to fully qualify his column name
2150 if (exists $bind_attrs->{$col}) {
2151 $bind_attrs->{$col} = {};
2154 $bind_attrs->{$col} = $bind_attrs->{$fqcn};
2159 # Sanity check the attributes (SQLMaker does it too, but
2160 # in case of a software_limit we'll never reach there)
2161 if (defined $attrs->{offset}) {
2162 $self->throw_exception('A supplied offset attribute must be a non-negative integer')
2163 if ( $attrs->{offset} =~ /\D/ or $attrs->{offset} < 0 );
2165 $attrs->{offset} ||= 0;
2167 if (defined $attrs->{rows}) {
2168 $self->throw_exception("The rows attribute must be a positive integer if present")
2169 if ( $attrs->{rows} =~ /\D/ or $attrs->{rows} <= 0 );
2171 elsif ($attrs->{offset}) {
2172 # MySQL actually recommends this approach. I cringe.
2173 $attrs->{rows} = $sql_maker->__max_int;
2178 # see if we need to tear the prefetch apart otherwise delegate the limiting to the
2179 # storage, unless software limit was requested
2182 ( $attrs->{rows} && keys %{$attrs->{collapse}} )
2184 # grouped prefetch (to satisfy group_by == select)
2185 ( $attrs->{group_by}
2187 @{$attrs->{group_by}}
2189 $attrs->{_prefetch_selector_range}
2192 ($ident, $select, $where, $attrs)
2193 = $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs);
2195 elsif (! $attrs->{software_limit} ) {
2196 push @limit, $attrs->{rows}, $attrs->{offset};
2199 # try to simplify the joinmap further (prune unreferenced type-single joins)
2200 $ident = $self->_prune_unused_joins ($ident, $select, $where, $attrs);
2203 # This would be the point to deflate anything found in $where
2204 # (and leave $attrs->{bind} intact). Problem is - inflators historically
2205 # expect a row object. And all we have is a resultsource (it is trivial
2206 # to extract deflator coderefs via $alias2source above).
2208 # I don't see a way forward other than changing the way deflators are
2209 # invoked, and that's just bad...
2212 return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $attrs, @limit);
2215 # Returns a counting SELECT for a simple count
2216 # query. Abstracted so that a storage could override
2217 # this to { count => 'firstcol' } or whatever makes
2218 # sense as a performance optimization
2220 #my ($self, $source, $rs_attrs) = @_;
2221 return { count => '*' };
2225 sub source_bind_attributes {
2226 my ($self, $source) = @_;
2228 my $bind_attributes;
2230 my $colinfo = $source->columns_info;
2232 for my $col (keys %$colinfo) {
2233 if (my $dt = $colinfo->{$col}{data_type} ) {
2234 $bind_attributes->{$col} = $self->bind_attribute_by_data_type($dt)
2238 return $bind_attributes;
2245 =item Arguments: $ident, $select, $condition, $attrs
2249 Handle a SQL select statement.
2255 my ($ident, $select, $condition, $attrs) = @_;
2256 return $self->cursor_class->new($self, \@_, $attrs);
2261 my ($rv, $sth, @bind) = $self->_select(@_);
2262 my @row = $sth->fetchrow_array;
2263 my @nextrow = $sth->fetchrow_array if @row;
2264 if(@row && @nextrow) {
2265 carp "Query returned more than one row. SQL that returns multiple rows is DEPRECATED for ->find and ->single";
2267 # Need to call finish() to work round broken DBDs
2272 =head2 sql_limit_dialect
2274 This is an accessor for the default SQL limit dialect used by a particular
2275 storage driver. Can be overridden by supplying an explicit L</limit_dialect>
2276 to L<DBIx::Class::Schema/connect>. For a list of available limit dialects
2277 see L<DBIx::Class::SQLMaker::LimitDialects>.
2283 =item Arguments: $sql
2287 Returns a L<DBI> sth (statement handle) for the supplied SQL.
2292 my ($self, $dbh, $sql) = @_;
2294 # 3 is the if_active parameter which avoids active sth re-use
2295 my $sth = $self->disable_sth_caching
2296 ? $dbh->prepare($sql)
2297 : $dbh->prepare_cached($sql, {}, 3);
2299 # XXX You would think RaiseError would make this impossible,
2300 # but apparently that's not true :(
2301 $self->throw_exception(
2304 sprintf( "\$dbh->prepare() of '%s' through %s failed *silently* without "
2305 .'an exception and/or setting $dbh->errstr',
2307 ? substr($sql, 0, 20) . '...'
2310 'DBD::' . $dbh->{Driver}{Name},
2318 carp_unique 'sth was mistakenly marked/documented as public, stop calling it (will be removed before DBIC v0.09)';
2323 my ($self, $sql) = @_;
2324 $self->dbh_do('_dbh_sth', $sql); # retry over disconnects
2327 sub _dbh_columns_info_for {
2328 my ($self, $dbh, $table) = @_;
2330 if ($dbh->can('column_info')) {
2334 my ($schema,$tab) = $table =~ /^(.+?)\.(.+)$/ ? ($1,$2) : (undef,$table);
2335 my $sth = $dbh->column_info( undef,$schema, $tab, '%' );
2337 while ( my $info = $sth->fetchrow_hashref() ){
2339 $column_info{data_type} = $info->{TYPE_NAME};
2340 $column_info{size} = $info->{COLUMN_SIZE};
2341 $column_info{is_nullable} = $info->{NULLABLE} ? 1 : 0;
2342 $column_info{default_value} = $info->{COLUMN_DEF};
2343 my $col_name = $info->{COLUMN_NAME};
2344 $col_name =~ s/^\"(.*)\"$/$1/;
2346 $result{$col_name} = \%column_info;
2351 return \%result if !$caught && scalar keys %result;
2355 my $sth = $dbh->prepare($self->sql_maker->select($table, undef, \'1 = 0'));
2357 my @columns = @{$sth->{NAME_lc}};
2358 for my $i ( 0 .. $#columns ){
2360 $column_info{data_type} = $sth->{TYPE}->[$i];
2361 $column_info{size} = $sth->{PRECISION}->[$i];
2362 $column_info{is_nullable} = $sth->{NULLABLE}->[$i] ? 1 : 0;
2364 if ($column_info{data_type} =~ m/^(.*?)\((.*?)\)$/) {
2365 $column_info{data_type} = $1;
2366 $column_info{size} = $2;
2369 $result{$columns[$i]} = \%column_info;
2373 foreach my $col (keys %result) {
2374 my $colinfo = $result{$col};
2375 my $type_num = $colinfo->{data_type};
2377 if(defined $type_num && $dbh->can('type_info')) {
2378 my $type_info = $dbh->type_info($type_num);
2379 $type_name = $type_info->{TYPE_NAME} if $type_info;
2380 $colinfo->{data_type} = $type_name if $type_name;
2387 sub columns_info_for {
2388 my ($self, $table) = @_;
2389 $self->_dbh_columns_info_for ($self->_get_dbh, $table);
2392 =head2 last_insert_id
2394 Return the row id of the last insert.
2398 sub _dbh_last_insert_id {
2399 my ($self, $dbh, $source, $col) = @_;
2401 my $id = try { $dbh->last_insert_id (undef, undef, $source->name, $col) };
2403 return $id if defined $id;
2405 my $class = ref $self;
2406 $self->throw_exception ("No storage specific _dbh_last_insert_id() method implemented in $class, and the generic DBI::last_insert_id() failed");
2409 sub last_insert_id {
2411 $self->_dbh_last_insert_id ($self->_dbh, @_);
2414 =head2 _native_data_type
2418 =item Arguments: $type_name
2422 This API is B<EXPERIMENTAL>, will almost definitely change in the future, and
2423 currently only used by L<::AutoCast|DBIx::Class::Storage::DBI::AutoCast> and
2424 L<::Sybase::ASE|DBIx::Class::Storage::DBI::Sybase::ASE>.
2426 The default implementation returns C<undef>, implement in your Storage driver if
2427 you need this functionality.
2429 Should map types from other databases to the native RDBMS type, for example
2430 C<VARCHAR2> to C<VARCHAR>.
2432 Types with modifiers should map to the underlying data type. For example,
2433 C<INTEGER AUTO_INCREMENT> should become C<INTEGER>.
2435 Composite types should map to the container type, for example
2436 C<ENUM(foo,bar,baz)> becomes C<ENUM>.
2440 sub _native_data_type {
2441 #my ($self, $data_type) = @_;
2445 # Check if placeholders are supported at all
2446 sub _determine_supports_placeholders {
2448 my $dbh = $self->_get_dbh;
2450 # some drivers provide a $dbh attribute (e.g. Sybase and $dbh->{syb_dynamic_supported})
2451 # but it is inaccurate more often than not
2453 local $dbh->{PrintError} = 0;
2454 local $dbh->{RaiseError} = 1;
2455 $dbh->do('select ?', {}, 1);
2463 # Check if placeholders bound to non-string types throw exceptions
2465 sub _determine_supports_typeless_placeholders {
2467 my $dbh = $self->_get_dbh;
2470 local $dbh->{PrintError} = 0;
2471 local $dbh->{RaiseError} = 1;
2472 # this specifically tests a bind that is NOT a string
2473 $dbh->do('select 1 where 1 = ?', {}, 1);
2483 Returns the database driver name.
2488 shift->_get_dbh->{Driver}->{Name};
2491 =head2 bind_attribute_by_data_type
2493 Given a datatype from column info, returns a database specific bind
2494 attribute for C<< $dbh->bind_param($val,$attribute) >> or nothing if we will
2495 let the database planner just handle it.
2497 Generally only needed for special case column types, like bytea in postgres.
2501 sub bind_attribute_by_data_type {
2505 =head2 is_datatype_numeric
2507 Given a datatype from column_info, returns a boolean value indicating if
2508 the current RDBMS considers it a numeric value. This controls how
2509 L<DBIx::Class::Row/set_column> decides whether to mark the column as
2510 dirty - when the datatype is deemed numeric a C<< != >> comparison will
2511 be performed instead of the usual C<eq>.
2515 sub is_datatype_numeric {
2516 my ($self, $dt) = @_;
2518 return 0 unless $dt;
2520 return $dt =~ /^ (?:
2521 numeric | int(?:eger)? | (?:tiny|small|medium|big)int | dec(?:imal)? | real | float | double (?: \s+ precision)? | (?:big)?serial
2526 =head2 create_ddl_dir
2530 =item Arguments: $schema \@databases, $version, $directory, $preversion, \%sqlt_args
2534 Creates a SQL file based on the Schema, for each of the specified
2535 database engines in C<\@databases> in the given directory.
2536 (note: specify L<SQL::Translator> names, not L<DBI> driver names).
2538 Given a previous version number, this will also create a file containing
2539 the ALTER TABLE statements to transform the previous schema into the
2540 current one. Note that these statements may contain C<DROP TABLE> or
2541 C<DROP COLUMN> statements that can potentially destroy data.
2543 The file names are created using the C<ddl_filename> method below, please
2544 override this method in your schema if you would like a different file
2545 name format. For the ALTER file, the same format is used, replacing
2546 $version in the name with "$preversion-$version".
2548 See L<SQL::Translator/METHODS> for a list of values for C<\%sqlt_args>.
2549 The most common value for this would be C<< { add_drop_table => 1 } >>
2550 to have the SQL produced include a C<DROP TABLE> statement for each table
2551 created. For quoting purposes supply C<quote_table_names> and
2552 C<quote_field_names>.
2554 If no arguments are passed, then the following default values are assumed:
2558 =item databases - ['MySQL', 'SQLite', 'PostgreSQL']
2560 =item version - $schema->schema_version
2562 =item directory - './'
2564 =item preversion - <none>
2568 By default, C<\%sqlt_args> will have
2570 { add_drop_table => 1, ignore_constraint_names => 1, ignore_index_names => 1 }
2572 merged with the hash passed in. To disable any of those features, pass in a
2573 hashref like the following
2575 { ignore_constraint_names => 0, # ... other options }
2578 WARNING: You are strongly advised to check all SQL files created, before applying
2583 sub create_ddl_dir {
2584 my ($self, $schema, $databases, $version, $dir, $preversion, $sqltargs) = @_;
2587 carp "No directory given, using ./\n";
2592 (require File::Path and File::Path::make_path ("$dir")) # make_path does not like objects (i.e. Path::Class::Dir)
2594 $self->throw_exception(
2595 "Failed to create '$dir': " . ($! || $@ || 'error unknown')
2599 $self->throw_exception ("Directory '$dir' does not exist\n") unless(-d $dir);
2601 $databases ||= ['MySQL', 'SQLite', 'PostgreSQL'];
2602 $databases = [ $databases ] if(ref($databases) ne 'ARRAY');
2604 my $schema_version = $schema->schema_version || '1.x';
2605 $version ||= $schema_version;
2608 add_drop_table => 1,
2609 ignore_constraint_names => 1,
2610 ignore_index_names => 1,
2614 unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')) {
2615 $self->throw_exception("Can't create a ddl file without " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
2618 my $sqlt = SQL::Translator->new( $sqltargs );
2620 $sqlt->parser('SQL::Translator::Parser::DBIx::Class');
2621 my $sqlt_schema = $sqlt->translate({ data => $schema })
2622 or $self->throw_exception ($sqlt->error);
2624 foreach my $db (@$databases) {
2626 $sqlt->{schema} = $sqlt_schema;
2627 $sqlt->producer($db);
2630 my $filename = $schema->ddl_filename($db, $version, $dir);
2631 if (-e $filename && ($version eq $schema_version )) {
2632 # if we are dumping the current version, overwrite the DDL
2633 carp "Overwriting existing DDL file - $filename";
2637 my $output = $sqlt->translate;
2639 carp("Failed to translate to $db, skipping. (" . $sqlt->error . ")");
2642 if(!open($file, ">$filename")) {
2643 $self->throw_exception("Can't open $filename for writing ($!)");
2646 print $file $output;
2649 next unless ($preversion);
2651 require SQL::Translator::Diff;
2653 my $prefilename = $schema->ddl_filename($db, $preversion, $dir);
2654 if(!-e $prefilename) {
2655 carp("No previous schema file found ($prefilename)");
2659 my $difffile = $schema->ddl_filename($db, $version, $dir, $preversion);
2661 carp("Overwriting existing diff file - $difffile");
2667 my $t = SQL::Translator->new($sqltargs);
2672 or $self->throw_exception ($t->error);
2674 my $out = $t->translate( $prefilename )
2675 or $self->throw_exception ($t->error);
2677 $source_schema = $t->schema;
2679 $source_schema->name( $prefilename )
2680 unless ( $source_schema->name );
2683 # The "new" style of producers have sane normalization and can support
2684 # diffing a SQL file against a DBIC->SQLT schema. Old style ones don't
2685 # And we have to diff parsed SQL against parsed SQL.
2686 my $dest_schema = $sqlt_schema;
2688 unless ( "SQL::Translator::Producer::$db"->can('preprocess_schema') ) {
2689 my $t = SQL::Translator->new($sqltargs);
2694 or $self->throw_exception ($t->error);
2696 my $out = $t->translate( $filename )
2697 or $self->throw_exception ($t->error);
2699 $dest_schema = $t->schema;
2701 $dest_schema->name( $filename )
2702 unless $dest_schema->name;
2705 my $diff = SQL::Translator::Diff::schema_diff($source_schema, $db,
2709 if(!open $file, ">$difffile") {
2710 $self->throw_exception("Can't write to $difffile ($!)");
2718 =head2 deployment_statements
2722 =item Arguments: $schema, $type, $version, $directory, $sqlt_args
2726 Returns the statements used by L</deploy> and L<DBIx::Class::Schema/deploy>.
2728 The L<SQL::Translator> (not L<DBI>) database driver name can be explicitly
2729 provided in C<$type>, otherwise the result of L</sqlt_type> is used as default.
2731 C<$directory> is used to return statements from files in a previously created
2732 L</create_ddl_dir> directory and is optional. The filenames are constructed
2733 from L<DBIx::Class::Schema/ddl_filename>, the schema name and the C<$version>.
2735 If no C<$directory> is specified then the statements are constructed on the
2736 fly using L<SQL::Translator> and C<$version> is ignored.
2738 See L<SQL::Translator/METHODS> for a list of values for C<$sqlt_args>.
2742 sub deployment_statements {
2743 my ($self, $schema, $type, $version, $dir, $sqltargs) = @_;
2744 $type ||= $self->sqlt_type;
2745 $version ||= $schema->schema_version || '1.x';
2747 my $filename = $schema->ddl_filename($type, $version, $dir);
2750 # FIXME replace this block when a proper sane sql parser is available
2752 open($file, "<$filename")
2753 or $self->throw_exception("Can't open $filename ($!)");
2756 return join('', @rows);
2759 unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy') ) {
2760 $self->throw_exception("Can't deploy without a ddl_dir or " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
2763 # sources needs to be a parser arg, but for simplicty allow at top level
2765 $sqltargs->{parser_args}{sources} = delete $sqltargs->{sources}
2766 if exists $sqltargs->{sources};
2768 my $tr = SQL::Translator->new(
2769 producer => "SQL::Translator::Producer::${type}",
2771 parser => 'SQL::Translator::Parser::DBIx::Class',
2777 @ret = $tr->translate;
2780 $ret[0] = $tr->translate;
2783 $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error)
2784 unless (@ret && defined $ret[0]);
2786 return wantarray ? @ret : $ret[0];
2789 # FIXME deploy() currently does not accurately report sql errors
2790 # Will always return true while errors are warned
2792 my ($self, $schema, $type, $sqltargs, $dir) = @_;
2796 return if($line =~ /^--/);
2797 # next if($line =~ /^DROP/m);
2798 return if($line =~ /^BEGIN TRANSACTION/m);
2799 return if($line =~ /^COMMIT/m);
2800 return if $line =~ /^\s+$/; # skip whitespace only
2801 $self->_query_start($line);
2803 # do a dbh_do cycle here, as we need some error checking in
2804 # place (even though we will ignore errors)
2805 $self->dbh_do (sub { $_[1]->do($line) });
2807 carp qq{$_ (running "${line}")};
2809 $self->_query_end($line);
2811 my @statements = $schema->deployment_statements($type, undef, $dir, { %{ $sqltargs || {} }, no_comments => 1 } );
2812 if (@statements > 1) {
2813 foreach my $statement (@statements) {
2814 $deploy->( $statement );
2817 elsif (@statements == 1) {
2818 # split on single line comments and end of statements
2819 foreach my $line ( split(/\s*--.*\n|;\n/, $statements[0])) {
2825 =head2 datetime_parser
2827 Returns the datetime parser class
2831 sub datetime_parser {
2833 return $self->{datetime_parser} ||= do {
2834 $self->build_datetime_parser(@_);
2838 =head2 datetime_parser_type
2840 Defines the datetime parser class - currently defaults to L<DateTime::Format::MySQL>
2842 =head2 build_datetime_parser
2844 See L</datetime_parser>
2848 sub build_datetime_parser {
2850 my $type = $self->datetime_parser_type(@_);
2855 =head2 is_replicating
2857 A boolean that reports if a particular L<DBIx::Class::Storage::DBI> is set to
2858 replicate from a master database. Default is undef, which is the result
2859 returned by databases that don't support replication.
2863 sub is_replicating {
2868 =head2 lag_behind_master
2870 Returns a number that represents a certain amount of lag behind a master db
2871 when a given storage is replicating. The number is database dependent, but
2872 starts at zero and increases with the amount of lag. Default in undef
2876 sub lag_behind_master {
2880 =head2 relname_to_table_alias
2884 =item Arguments: $relname, $join_count
2888 L<DBIx::Class> uses L<DBIx::Class::Relationship> names as table aliases in
2891 This hook is to allow specific L<DBIx::Class::Storage> drivers to change the
2892 way these aliases are named.
2894 The default behavior is C<< "$relname_$join_count" if $join_count > 1 >>,
2895 otherwise C<"$relname">.
2899 sub relname_to_table_alias {
2900 my ($self, $relname, $join_count) = @_;
2902 my $alias = ($join_count && $join_count > 1 ?
2903 join('_', $relname, $join_count) : $relname);
2908 # The size in bytes to use for DBI's ->bind_param_inout, this is the generic
2909 # version and it may be necessary to amend or override it for a specific storage
2910 # if such binds are necessary.
2911 sub _max_column_bytesize {
2912 my ($self, $source, $col) = @_;
2914 my $inf = $source->column_info($col);
2915 return $inf->{_max_bytesize} ||= do {
2919 if (my $data_type = $inf->{data_type}) {
2920 $data_type = lc($data_type);
2922 # String/sized-binary types
2923 if ($data_type =~ /^(?:l?(?:var)?char(?:acter)?(?:\s*varying)?
2924 |(?:var)?binary(?:\s*varying)?|raw)\b/x
2926 $max_size = $inf->{size};
2928 # Other charset/unicode types, assume scale of 4
2929 elsif ($data_type =~ /^(?:national\s*character(?:\s*varying)?|nchar
2933 $max_size = $inf->{size} * 4 if $inf->{size};
2936 elsif ($self->_is_lob_type($data_type)) {
2937 # default to longreadlen
2940 $max_size = 100; # for all other (numeric?) datatypes
2944 $max_size ||= $self->_get_dbh->{LongReadLen} || 8000;
2948 # Determine if a data_type is some type of BLOB
2949 # FIXME: these regexes are expensive, result of these checks should be cached in
2952 my ($self, $data_type) = @_;
2953 $data_type && ($data_type =~ /lob|bfile|text|image|bytea|memo/i
2954 || $data_type =~ /^long(?:\s+(?:raw|bit\s*varying|varbit|binary
2955 |varchar|character\s*varying|nvarchar
2956 |national\s*character\s*varying))?\z/xi);
2959 sub _is_binary_lob_type {
2960 my ($self, $data_type) = @_;
2961 $data_type && ($data_type =~ /blob|bfile|image|bytea/i
2962 || $data_type =~ /^long(?:\s+(?:raw|bit\s*varying|varbit|binary))?\z/xi);
2965 sub _is_text_lob_type {
2966 my ($self, $data_type) = @_;
2967 $data_type && ($data_type =~ /^(?:clob|memo)\z/i
2968 || $data_type =~ /^long(?:\s+(?:varchar|character\s*varying|nvarchar
2969 |national\s*character\s*varying))\z/xi);
2976 =head2 DBIx::Class and AutoCommit
2978 DBIx::Class can do some wonderful magic with handling exceptions,
2979 disconnections, and transactions when you use C<< AutoCommit => 1 >>
2980 (the default) combined with C<txn_do> for transaction support.
2982 If you set C<< AutoCommit => 0 >> in your connect info, then you are always
2983 in an assumed transaction between commits, and you're telling us you'd
2984 like to manage that manually. A lot of the magic protections offered by
2985 this module will go away. We can't protect you from exceptions due to database
2986 disconnects because we don't know anything about how to restart your
2987 transactions. You're on your own for handling all sorts of exceptional
2988 cases if you choose the C<< AutoCommit => 0 >> path, just as you would
2994 Matt S. Trout <mst@shadowcatsystems.co.uk>
2996 Andy Grundman <andy@hybridized.org>
3000 You may distribute this code under the same terms as Perl itself.