use DBIx::Class::Carp;
use Scalar::Util qw/refaddr weaken reftype blessed/;
-use List::Util qw/first/;
use Context::Preserve 'preserve_context';
-use Try::Tiny;
-use Data::Compare (); # no imports!!! guard against insane architecture
use SQL::Abstract qw(is_plain_value is_literal_value);
-use DBIx::Class::_Util qw(quote_sub perlstring);
+use DBIx::Class::_Util qw(
+ quote_sub perlstring serialize dump_value
+ dbic_internal_try dbic_internal_catch
+ detected_reinvoked_destructor scope_guard
+ mkdir_p
+);
use namespace::clean;
# default cursor class, overridable in connect_info attributes
txn_begin
insert
- insert_bulk
update
delete
select
select_single
+ _insert_bulk
+
with_deferred_fk_checks
get_use_dbms_capability
my $orig = __PACKAGE__->can ($meth)
or die "$meth is not a ::Storage::DBI method!";
- my $is_getter = $storage_accessor_idx->{$meth} ? 0 : 1;
+ my $possibly_a_setter = $storage_accessor_idx->{$meth} ? 1 : 0;
quote_sub
- __PACKAGE__ ."::$meth", sprintf( <<'EOC', $is_getter, perlstring $meth ), { '$orig' => \$orig };
+ __PACKAGE__ ."::$meth", sprintf( <<'EOC', $possibly_a_setter, perlstring $meth ), { '$orig' => \$orig };
if (
+ # if this is an actual *setter* - just set it, no need to connect
+ # and determine the driver
+ !( %1$s and @_ > 1 )
+ and
# only fire when invoked on an instance, a valid class-based invocation
# would e.g. be setting a default for an inherited accessor
ref $_[0]
and
! $_[0]->{_in_determine_driver}
and
- # if this is a known *setter* - just set it, no need to connect
- # and determine the driver
- ( %1$s or @_ <= 1 )
- and
# Only try to determine stuff if we have *something* that either is or can
# provide a DSN. Allows for bare $schema's generated with a plain ->connect()
# to still be marginally useful
weaken (
$seek_and_destroy{ refaddr($_[0]) } = $_[0]
);
+
+ # Dummy NEXTSTATE ensuring the all temporaries on the stack are garbage
+ # collected before leaving this scope. Depending on the code above, this
+ # may very well be just a preventive measure guarding future modifications
+ undef;
}
END {
- local $?; # just in case the DBI destructor changes it somehow
- # destroy just the object if not native to this process
- $_->_verify_pid for (grep
- { defined $_ }
- values %seek_and_destroy
- );
+ if(
+ ! DBIx::Class::_ENV_::BROKEN_FORK
+ and
+ my @instances = grep { defined $_ } values %seek_and_destroy
+ ) {
+ local $?; # just in case the DBI destructor changes it somehow
+
+ # disarm the handle if not native to this process (see comment on top)
+ $_->_verify_pid for @instances;
+ }
+
+ # Dummy NEXTSTATE ensuring the all temporaries on the stack are garbage
+ # collected before leaving this scope. Depending on the code above, this
+ # may very well be just a preventive measure guarding future modifications
+ undef;
}
- sub CLONE {
+ sub DBIx::Class::__DBI_Storage_iThreads_handler__::CLONE {
# As per DBI's recommendation, DBIC disconnects all handles as
# soon as possible (DBIC will reconnect only on demand from within
# the thread)
for (@instances) {
$_->_dbh(undef);
-
- $_->transaction_depth(0);
- $_->savepoints([]);
+ $_->disconnect;
# properly renumber existing refs
$_->_arm_global_destructor
}
+
+ # Dummy NEXTSTATE ensuring the all temporaries on the stack are garbage
+ # collected before leaving this scope. Depending on the code above, this
+ # may very well be just a preventive measure guarding future modifications
+ undef;
}
}
sub DESTROY {
- my $self = shift;
+ return if &detected_reinvoked_destructor;
+
+ $_[0]->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
# some databases spew warnings on implicit disconnect
- $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
+ return unless defined $_[0]->_dbh;
+
local $SIG{__WARN__} = sub {};
- $self->_dbh(undef);
+ $_[0]->_dbh(undef);
+ # not calling ->disconnect here - we are being destroyed - nothing to reset
- # this op is necessary, since the very last perl runtime statement
- # triggers a global destruction shootout, and the $SIG localization
- # may very well be destroyed before perl actually gets to do the
- # $dbh undef
- 1;
+ # Dummy NEXTSTATE ensuring the all temporaries on the stack are garbage
+ # collected before leaving this scope. Depending on the code above, this
+ # may very well be just a preventive measure guarding future modifications
+ undef;
}
# handle pid changes correctly - do not destroy parent's connection
sub _verify_pid {
- my $self = shift;
- my $pid = $self->_conn_pid;
- if( defined $pid and $pid != $$ and my $dbh = $self->_dbh ) {
+ my $pid = $_[0]->_conn_pid;
+
+ if( defined $pid and $pid != $$ and my $dbh = $_[0]->_dbh ) {
$dbh->{InactiveDestroy} = 1;
- $self->_dbh(undef);
- $self->transaction_depth(0);
- $self->savepoints([]);
+ $_[0]->_dbh(undef);
+ $_[0]->disconnect;
}
- return;
+ # Dummy NEXTSTATE ensuring the all temporaries on the stack are garbage
+ # collected before leaving this scope. Depending on the code above, this
+ # may very well be just a preventive measure guarding future modifications
+ undef;
}
=head2 connect_info
=cut
sub disconnect {
- my ($self) = @_;
+ my $self = shift;
- if( $self->_dbh ) {
- my @actions;
+ # this physical disconnect below might very well throw
+ # in order to unambiguously reset the state - do the cleanup in guard
+
+ my $g = scope_guard {
- push @actions, ( $self->on_disconnect_call || () );
- push @actions, $self->_parse_connect_do ('on_disconnect_do');
+ defined( $self->_dbh )
+ and dbic_internal_try { $self->_dbh->disconnect };
- $self->_do_connection_actions(disconnect_call_ => $_) for @actions;
+ $self->_dbh(undef);
+ $self->_dbh_details({});
+ $self->transaction_depth(undef);
+ $self->_dbh_autocommit(undef);
+ $self->savepoints([]);
+
+ # FIXME - this needs reenabling with the proper "no reset on same DSN" check
+ #$self->_sql_maker(undef); # this may also end up being different
+ };
+
+ if( $self->_dbh ) {
+
+ $self->_do_connection_actions(disconnect_call_ => $_) for (
+ ( $self->on_disconnect_call || () ),
+ $self->_parse_connect_do ('on_disconnect_do')
+ );
# stops the "implicit rollback on disconnect" warning
$self->_exec_txn_rollback unless $self->_dbh_autocommit;
-
- %{ $self->_dbh->{CachedKids} } = ();
- $self->_dbh->disconnect;
- $self->_dbh(undef);
}
+
+ # Dummy NEXTSTATE ensuring the all temporaries on the stack are garbage
+ # collected before leaving this scope. Depending on the code above, this
+ # may very well be just a preventive measure guarding future modifications
+ undef;
}
=head2 with_deferred_fk_checks
# Storage subclasses should override this
sub with_deferred_fk_checks {
- my ($self, $sub) = @_;
- $sub->();
+ #my ($self, $sub) = @_;
+ $_[1]->();
}
=head2 connected
=cut
sub connected {
- my $self = shift;
- return 0 unless $self->_seems_connected;
+ return 0 unless $_[0]->_seems_connected;
#be on the safe side
- local $self->_dbh->{RaiseError} = 1;
+ local $_[0]->_dbh->{RaiseError} = 1;
- return $self->_ping;
+ return $_[0]->_ping;
}
sub _seems_connected {
- my $self = shift;
-
- $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
+ $_[0]->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
- my $dbh = $self->_dbh
- or return 0;
+ $_[0]->_dbh
+ and
+ $_[0]->_dbh->FETCH('Active')
+ and
+ return 1;
- return $dbh->FETCH('Active');
+ # explicitly reset all state
+ $_[0]->disconnect;
+ return 0;
}
sub _ping {
- my $self = shift;
-
- my $dbh = $self->_dbh or return 0;
-
- return $dbh->ping;
+ ($_[0]->_dbh || return 0)->ping;
}
sub ensure_connected {
- my ($self) = @_;
-
- unless ($self->connected) {
- $self->_populate_dbh;
- }
+ $_[0]->connected || ( $_[0]->_populate_dbh && 1 );
}
=head2 dbh
=cut
sub dbh {
- my ($self) = @_;
-
- if (not $self->_dbh) {
- $self->_populate_dbh;
- } else {
- $self->ensure_connected;
- }
- return $self->_dbh;
+ # maybe save a ping call
+ $_[0]->_dbh
+ ? ( $_[0]->ensure_connected and $_[0]->_dbh )
+ : $_[0]->_populate_dbh
+ ;
}
# this is the internal "get dbh or connect (don't check)" method
sub _get_dbh {
- my $self = shift;
- $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
- $self->_populate_dbh unless $self->_dbh;
- return $self->_dbh;
+ $_[0]->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
+ $_[0]->_dbh || $_[0]->_populate_dbh;
}
# *DELIBERATELY* not a setter (for the time being)
sub _init {}
sub _populate_dbh {
- my ($self) = @_;
- $self->_dbh(undef); # in case ->connected failed we might get sent here
- $self->_dbh_details({}); # reset everything we know
- $self->_sql_maker(undef); # this may also end up being different
+ # reset internal states
+ # also in case ->connected failed we might get sent here
+ $_[0]->disconnect;
- $self->_dbh($self->_connect);
+ $_[0]->_dbh($_[0]->_connect);
- $self->_conn_pid($$) unless DBIx::Class::_ENV_::BROKEN_FORK; # on win32 these are in fact threads
+ $_[0]->_conn_pid($$) unless DBIx::Class::_ENV_::BROKEN_FORK; # on win32 these are in fact threads
- $self->_determine_driver;
+ $_[0]->_determine_driver;
# Always set the transaction depth on connect, since
# there is no transaction in progress by definition
- $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1;
+ $_[0]->transaction_depth( $_[0]->_dbh_autocommit ? 0 : 1 );
- $self->_run_connection_actions unless $self->{_in_determine_driver};
+ $_[0]->_run_connection_actions unless $_[0]->{_in_determine_driver};
+
+ $_[0]->_dbh;
}
sub _run_connection_actions {
- my $self = shift;
- my @actions;
-
- push @actions, ( $self->on_connect_call || () );
- push @actions, $self->_parse_connect_do ('on_connect_do');
- $self->_do_connection_actions(connect_call_ => $_) for @actions;
+ $_[0]->_do_connection_actions(connect_call_ => $_) for (
+ ( $_[0]->on_connect_call || () ),
+ $_[0]->_parse_connect_do ('on_connect_do'),
+ );
}
sub _server_info {
my $self = shift;
- my $info;
- unless ($info = $self->_dbh_details->{info}) {
+ # FIXME - ideally this needs to be an ||= assignment, and the final
+ # assignment at the end of this do{} should be gone entirely. However
+ # this confuses CXSA: https://rt.cpan.org/Ticket/Display.html?id=103296
+ $self->_dbh_details->{info} || do {
+
+ # this guarantees that problematic conninfo won't be hidden
+ # by the try{} below
+ $self->ensure_connected;
- $info = {};
+ my $info = {};
- my $server_version = try {
+ my $server_version = dbic_internal_try {
$self->_get_server_version
- } catch {
+ } dbic_internal_catch {
# driver determination *may* use this codepath
# in which case we must rethrow
$self->throw_exception($_) if $self->{_in_determine_driver};
}
$self->_dbh_details->{info} = $info;
- }
-
- return $info;
+ };
}
sub _get_server_version {
my $self = shift;
my $drv;
- try {
+ dbic_internal_try {
$drv = $self->_extract_driver_from_connect_info;
$self->ensure_connected;
};
DBIC_DRIVER => ref $self,
$drv ? (
DBD => $drv,
- DBD_VER => try { $drv->VERSION },
+ DBD_VER => dbic_internal_try { $drv->VERSION },
) : (),
};
) {
# some drivers barf on things they do not know about instead
# of returning undef
- my $v = try { $self->_dbh_get_info($inf) };
+ my $v = dbic_internal_try { $self->_dbh_get_info($inf) };
next unless defined $v;
#my $key = sprintf( '%s(%s)', $inf, $DBI::Const::GetInfoType::GetInfoType{$inf} );
if ((not $self->_driver_determined) && (not $self->{_in_determine_driver})) {
my $started_connected = 0;
- local $self->{_in_determine_driver} = 1;
+
+ local $self->{_in_determine_driver} = 1
+ unless $self->{_in_determine_driver};
if (ref($self) eq __PACKAGE__) {
my $driver;
if ($driver) {
my $storage_class = "DBIx::Class::Storage::DBI::${driver}";
if ($self->load_optional_class($storage_class)) {
- mro::set_mro($storage_class, 'c3');
+
+ no strict 'refs';
+ mro::set_mro($storage_class, 'c3') if
+ (
+ ${"${storage_class}::__INITIAL_MRO_UPON_DBIC_LOAD__"}
+ ||= mro::get_mro($storage_class)
+ )
+ ne
+ 'c3'
+ ;
+
bless $self, $storage_class;
$self->_rebless();
}
# try to use dsn to not require being connected, the driver may still
# force a connection later in _rebless to determine version
# (dsn may not be supplied at all if all we do is make a mock-schema)
- ($drv) = ($self->_dbi_connect_info->[0] || '') =~ /^dbi:([^:]+):/i;
+ #
+ # Use the same regex as the one used by DBI itself (even if the use of
+ # \w is odd given unicode):
+ # https://metacpan.org/source/TIMB/DBI-1.634/DBI.pm#L621
+ #
+ # DO NOT use https://metacpan.org/source/TIMB/DBI-1.634/DBI.pm#L559-566
+ # as there is a long-standing precedent of not loading DBI.pm until the
+ # very moment we are actually connecting
+ #
+ ($drv) = ($self->_dbi_connect_info->[0] || '') =~ /^dbi:(\w*)/i;
$drv ||= $ENV{DBI_DRIVER};
}
sub _determine_connector_driver {
my ($self, $conn) = @_;
- my $dbtype = $self->_dbh_get_info('SQL_DBMS_NAME');
+ my $dbtype = $self->_get_rdbms_name;
if (not $dbtype) {
$self->_warn_undetermined_driver(
}
}
+sub _get_rdbms_name { shift->_dbh_get_info('SQL_DBMS_NAME') }
+
sub _warn_undetermined_driver {
my ($self, $msg) = @_;
- require Data::Dumper::Concise;
-
carp_once ($msg . ' While we will attempt to continue anyway, the results '
. 'are likely to be underwhelming. Please upgrade DBIC, and if this message '
. "does not go away, file a bugreport including the following info:\n"
- . Data::Dumper::Concise::Dumper($self->_describe_connection)
+ . dump_value $self->_describe_connection
);
}
sub _do_connection_actions {
- my $self = shift;
- my $method_prefix = shift;
- my $call = shift;
-
- if (not ref($call)) {
- my $method = $method_prefix . $call;
- $self->$method(@_);
- } elsif (ref($call) eq 'CODE') {
- $self->$call(@_);
- } elsif (ref($call) eq 'ARRAY') {
- if (ref($call->[0]) ne 'ARRAY') {
- $self->_do_connection_actions($method_prefix, $_) for @$call;
- } else {
- $self->_do_connection_actions($method_prefix, @$_) for @$call;
+ my ($self, $method_prefix, $call, @args) = @_;
+
+ dbic_internal_try {
+ if (not ref($call)) {
+ my $method = $method_prefix . $call;
+ $self->$method(@args);
+ }
+ elsif (ref($call) eq 'CODE') {
+ $self->$call(@args);
+ }
+ elsif (ref($call) eq 'ARRAY') {
+ if (ref($call->[0]) ne 'ARRAY') {
+ $self->_do_connection_actions($method_prefix, $_) for @$call;
+ }
+ else {
+ $self->_do_connection_actions($method_prefix, @$_) for @$call;
+ }
+ }
+ else {
+ $self->throw_exception (sprintf ("Don't know how to process conection actions of type '%s'", ref($call)) );
}
- } else {
- $self->throw_exception (sprintf ("Don't know how to process conection actions of type '%s'", ref($call)) );
}
+ dbic_internal_catch {
+ if ( $method_prefix =~ /^connect/ ) {
+ # this is an on_connect cycle - we can't just throw while leaving
+ # a handle in an undefined state in our storage object
+ # kill it with fire and rethrow
+ $self->_dbh(undef);
+ $self->disconnect; # the $dbh is gone, but we still need to reset the rest
+ $self->throw_exception( $_[0] );
+ }
+ else {
+ carp "Disconnect action failed: $_[0]";
+ }
+ };
return $self;
}
$self->_do_query(@_);
}
-# override in db-specific backend when necessary
+=head2 connect_call_datetime_setup
+
+A no-op stub method, provided so that one can always safely supply the
+L<connection option|/DBIx::Class specific connection attributes>
+
+ on_connect_call => 'datetime_setup'
+
+This way one does not need to know in advance whether the underlying
+storage requires any sort of hand-holding when dealing with calendar
+data.
+
+=cut
+
sub connect_call_datetime_setup { 1 }
sub _do_query {
}, '__DBIC__DBH__ERROR__HANDLER__';
};
- try {
+ dbic_internal_try {
if(ref $info->[0] eq 'CODE') {
$dbh = $info->[0]->();
}
$dbh_error_handler_installer->($self, $dbh);
}
}
- catch {
+ dbic_internal_catch {
$self->throw_exception("DBI Connection failed: $_")
};
}
sub txn_begin {
- my $self = shift;
-
# this means we have not yet connected and do not know the AC status
# (e.g. coderef $dbh), need a full-fledged connection check
- if (! defined $self->_dbh_autocommit) {
- $self->ensure_connected;
+ if (! defined $_[0]->_dbh_autocommit) {
+ $_[0]->ensure_connected;
}
# Otherwise simply connect or re-connect on pid changes
else {
- $self->_get_dbh;
+ $_[0]->_get_dbh;
}
- $self->next::method(@_);
+ shift->next::method(@_);
}
sub _exec_txn_begin {
sub txn_commit {
my $self = shift;
- $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
$self->throw_exception("Unable to txn_commit() on a disconnected storage")
- unless $self->_dbh;
+ unless $self->_seems_connected;
# esoteric case for folks using external $dbh handles
if (! $self->transaction_depth and ! $self->_dbh->FETCH('AutoCommit') ) {
sub txn_rollback {
my $self = shift;
- $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
- $self->throw_exception("Unable to txn_rollback() on a disconnected storage")
- unless $self->_dbh;
+ # do a minimal connectivity check due to weird shit like
+ # https://rt.cpan.org/Public/Bug/Display.html?id=62370
+ $self->throw_exception("lost connection to storage")
+ unless $self->_seems_connected;
# esoteric case for folks using external $dbh handles
if (! $self->transaction_depth and ! $self->_dbh->FETCH('AutoCommit') ) {
# generate the DBI-specific stubs, which then fallback to ::Storage proper
quote_sub __PACKAGE__ . "::$_" => sprintf (<<'EOS', $_) for qw(svp_begin svp_release svp_rollback);
- $_[0]->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
$_[0]->throw_exception('Unable to %s() on a disconnected storage')
- unless $_[0]->_dbh;
+ unless $_[0]->_seems_connected;
shift->next::method(@_);
EOS
and
$op eq 'select'
and
- first {
- length ref $_->[1]
- and
- blessed($_->[1])
+ grep {
+ defined blessed($_->[1])
and
$_->[1]->isa('DateTime')
} @$bind
) {
carp_unique 'DateTime objects passed to search() are not supported '
. 'properly (InflateColumn::DateTime formats and settings are not '
- . 'respected.) See "Formatting DateTime objects in queries" in '
- . 'DBIx::Class::Manual::Cookbook. To disable this warning for good '
+ . 'respected.) See ".. format a DateTime object for searching?" in '
+ . 'DBIx::Class::Manual::FAQ. To disable this warning for good '
. 'set $ENV{DBIC_DT_SEARCH_OK} to true'
}
};
return [ map {
- my $resolved =
( ref $_ ne 'ARRAY' or @$_ != 2 ) ? [ {}, $_ ]
: ( ! defined $_->[0] ) ? [ {}, $_->[1] ]
: (ref $_->[0] eq 'HASH') ? [(
: [ $resolve_bindinfo->(
{ dbic_colname => $_->[0] }
), $_->[1] ]
- ;
-
- if (
- ! exists $resolved->[0]{dbd_attrs}
- and
- ! $resolved->[0]{sqlt_datatype}
- and
- length ref $resolved->[1]
- and
- ! is_plain_value $resolved->[1]
- ) {
- require Data::Dumper;
- local $Data::Dumper::Maxdepth = 1;
- local $Data::Dumper::Terse = 1;
- local $Data::Dumper::Useqq = 1;
- local $Data::Dumper::Indent = 0;
- local $Data::Dumper::Pad = ' ';
- $self->throw_exception(
- 'You must supply a datatype/bindtype (see DBIx::Class::ResultSet/DBIC BIND VALUES) '
- . 'for non-scalar value '. Data::Dumper::Dumper ($resolved->[1])
- );
- }
-
- $resolved;
-
} @$bind ];
}
map {
defined( $_ && $_->[1] )
- ? qq{'$_->[1]'}
+ ? sprintf( "'%s'", "$_->[1]" ) # because overload
: q{NULL}
} @{$_[1] || []};
}
}
sub _dbi_attrs_for_bind {
- my ($self, $ident, $bind) = @_;
+ #my ($self, $ident, $bind) = @_;
- my @attrs;
+ return [ map {
- for (map { $_->[0] } @$bind) {
- push @attrs, do {
- if (exists $_->{dbd_attrs}) {
- $_->{dbd_attrs}
- }
- elsif($_->{sqlt_datatype}) {
- # cache the result in the dbh_details hash, as it can not change unless
- # we connect to something else
- my $cache = $self->_dbh_details->{_datatype_map_cache} ||= {};
- if (not exists $cache->{$_->{sqlt_datatype}}) {
- $cache->{$_->{sqlt_datatype}} = $self->bind_attribute_by_data_type($_->{sqlt_datatype}) || undef;
- }
- $cache->{$_->{sqlt_datatype}};
- }
- else {
- undef; # always push something at this position
- }
- }
- }
+ exists $_->{dbd_attrs} ? $_->{dbd_attrs}
+
+ : ! $_->{sqlt_datatype} ? undef
+
+ : do {
+
+ # cache the result in the dbh_details hash, as it (usually) can not change
+ # unless we connect to something else
+ # FIXME: for the time being Oracle is an exception, pending a rewrite of
+ # the LOB storage
+ my $cache = $_[0]->_dbh_details->{_datatype_map_cache} ||= {};
+
+ $cache->{$_->{sqlt_datatype}} = $_[0]->bind_attribute_by_data_type($_->{sqlt_datatype})
+ if ! exists $cache->{$_->{sqlt_datatype}};
+
+ $cache->{$_->{sqlt_datatype}};
- return \@attrs;
+ } } map { $_->[0] } @{$_[2]} ];
}
sub _execute {
# they can be fused once again with the final return
$to_insert = { %$to_insert, %$prefetched_values };
- # FIXME - we seem to assume undef values as non-supplied. This is wrong.
- # Investigate what does it take to s/defined/exists/
my %pcols = map { $_ => 1 } $source->primary_columns;
+
my (%retrieve_cols, $autoinc_supplied, $retrieve_autoinc_col);
+
for my $col ($source->columns) {
+
+ # first autoinc wins - this is why ->columns() in-order iteration is important
+ #
+ # FIXME - there ought to be a sanity-check for multiple is_auto_increment settings
+ # or something...
+ #
if ($col_infos->{$col}{is_auto_increment}) {
+
+ # FIXME - we seem to assume undef values as non-supplied.
+ # This is wrong.
+ # Investigate what does it take to s/defined/exists/
+ # ( fails t/cdbi/copy.t amoong other things )
$autoinc_supplied ||= 1 if defined $to_insert->{$col};
+
$retrieve_autoinc_col ||= $col unless $autoinc_supplied;
}
# nothing to retrieve when explicit values are supplied
next if (
- defined $to_insert->{$col} and ! is_literal_value($to_insert->{$col})
+ # FIXME - we seem to assume undef values as non-supplied.
+ # This is wrong.
+ # Investigate what does it take to s/defined/exists/
+ # ( fails t/cdbi/copy.t amoong other things )
+ defined $to_insert->{$col}
+ and
+ (
+ # not a ref - cheaper to check before a call to is_literal_value()
+ ! length ref $to_insert->{$col}
+ or
+ # not a literal we *MAY* need to pull out ( see check below )
+ ! is_literal_value( $to_insert->{$col} )
+ )
);
# the 'scalar keys' is a trick to preserve the ->columns declaration order
);
};
+ # corner case of a non-supplied PK which is *not* declared as autoinc
+ if (
+ ! $autoinc_supplied
+ and
+ ! defined $retrieve_autoinc_col
+ and
+ # FIXME - first come-first serve, suboptimal...
+ ($retrieve_autoinc_col) = ( grep
+ {
+ $pcols{$_}
+ and
+ ! $col_infos->{$_}{retrieve_on_insert}
+ and
+ ! defined $col_infos->{$_}{is_auto_increment}
+ }
+ sort
+ { $retrieve_cols{$a} <=> $retrieve_cols{$b} }
+ keys %retrieve_cols
+ )
+ ) {
+ carp_unique(
+ "Missing value for primary key column '$retrieve_autoinc_col' on "
+ . "@{[ $source->source_name ]} - perhaps you forgot to set its "
+ . "'is_auto_increment' attribute during add_columns()? Treating "
+ . "'$retrieve_autoinc_col' implicitly as an autoinc, and attempting "
+ . 'value retrieval'
+ );
+ }
+
local $self->{_autoinc_supplied_for_op} = $autoinc_supplied;
local $self->{_perform_autoinc_retrieval} = $retrieve_autoinc_col;
my %returned_cols = %$to_insert;
if (my $retlist = $sqla_opts->{returning}) { # if IR is supported - we will get everything in one set
- @ir_container = try {
- local $SIG{__WARN__} = sub {};
- my @r = $sth->fetchrow_array;
- $sth->finish;
- @r;
- } unless @ir_container;
+
+ unless( @ir_container ) {
+ dbic_internal_try {
+
+ # FIXME - need to investigate why Caelum silenced this in 4d4dc518
+ local $SIG{__WARN__} = sub {};
+
+ @ir_container = $sth->fetchrow_array;
+ $sth->finish;
+
+ } dbic_internal_catch {
+ # Evict the $sth from the cache in case we got here, since the finish()
+ # is crucial, at least on older Firebirds, possibly on other engines too
+ #
+ # It would be too complex to make this a proper subclass override,
+ # and besides we already take the try{} penalty, adding a catch that
+ # triggers infrequently is a no-brainer
+ #
+ if( my $kids = $self->_dbh->{CachedKids} ) {
+ $kids->{$_} == $sth and delete $kids->{$_}
+ for keys %$kids
+ }
+ };
+ }
@returned_cols{@$retlist} = @ir_container if @ir_container;
}
}
sub insert_bulk {
- my ($self, $source, $cols, $data) = @_;
+ carp_unique(
+ 'insert_bulk() should have never been exposed as a public method and '
+ . 'calling it is depecated as of Aug 2014. If you believe having a genuine '
+ . 'use for this method please contact the development team via '
+ . DBIx::Class::_ENV_::HELP_URL
+ );
- my @col_range = (0..$#$cols);
+ return '0E0' unless @{$_[3]||[]};
- # FIXME SUBOPTIMAL - DBI needs fixing to always stringify regardless of DBD
- # For the time being forcibly stringify whatever is stringifiable
- # ResultSet::populate() hands us a copy - safe to mangle
- for my $r (0 .. $#$data) {
- for my $c (0 .. $#{$data->[$r]}) {
- $data->[$r][$c] = "$data->[$r][$c]"
- if ( length ref $data->[$r][$c] and is_plain_value $data->[$r][$c] );
- }
- }
+ shift->_insert_bulk(@_);
+}
+
+sub _insert_bulk {
+ my ($self, $source, $cols, $data) = @_;
+
+ $self->throw_exception('Calling _insert_bulk without a dataset to process makes no sense')
+ unless @{$data||[]};
my $colinfos = $source->columns_info($cols);
local $self->{_autoinc_supplied_for_op} =
- (first { $_->{is_auto_increment} } values %$colinfos)
+ (grep { $_->{is_auto_increment} } values %$colinfos)
? 1
: 0
;
# can't just hand SQLA a set of some known "values" (e.g. hashrefs that
# can be later matched up by address), because we want to supply a real
# value on which perhaps e.g. datatype checks will be performed
- my ($proto_data, $value_type_by_col_idx);
- for my $i (@col_range) {
- my $colname = $cols->[$i];
- if (ref $data->[0][$i] eq 'SCALAR') {
+ my ($proto_data, $serialized_bind_type_by_col_idx);
+ for my $col_idx (0..$#$cols) {
+ my $colname = $cols->[$col_idx];
+ if (ref $data->[0][$col_idx] eq 'SCALAR') {
# no bind value at all - no type
- $proto_data->{$colname} = $data->[0][$i];
+ $proto_data->{$colname} = $data->[0][$col_idx];
}
- elsif (ref $data->[0][$i] eq 'REF' and ref ${$data->[0][$i]} eq 'ARRAY' ) {
+ elsif (ref $data->[0][$col_idx] eq 'REF' and ref ${$data->[0][$col_idx]} eq 'ARRAY' ) {
# repack, so we don't end up mangling the original \[]
- my ($sql, @bind) = @${$data->[0][$i]};
+ my ($sql, @bind) = @${$data->[0][$col_idx]};
# normalization of user supplied stuff
my $resolved_bind = $self->_resolve_bindattrs(
# store value-less (attrs only) bind info - we will be comparing all
# supplied binds against this for sanity
- $value_type_by_col_idx->{$i} = [ map { $_->[0] } @$resolved_bind ];
+ $serialized_bind_type_by_col_idx->{$col_idx} = serialize [ map { $_->[0] } @$resolved_bind ];
$proto_data->{$colname} = \[ $sql, map { [
# inject slice order to use for $proto_bind construction
- { %{$resolved_bind->[$_][0]}, _bind_data_slice_idx => $i, _literal_bind_subindex => $_+1 }
+ { %{$resolved_bind->[$_][0]}, _bind_data_slice_idx => $col_idx, _literal_bind_subindex => $_+1 }
=>
$resolved_bind->[$_][1]
] } (0 .. $#bind)
];
}
else {
- $value_type_by_col_idx->{$i} = undef;
+ $serialized_bind_type_by_col_idx->{$col_idx} = undef;
$proto_data->{$colname} = \[ '?', [
- { dbic_colname => $colname, _bind_data_slice_idx => $i }
+ { dbic_colname => $colname, _bind_data_slice_idx => $col_idx }
=>
- $data->[0][$i]
+ $data->[0][$col_idx]
] ];
}
}
[ $proto_data ],
);
- if (! @$proto_bind and keys %$value_type_by_col_idx) {
+ if (! @$proto_bind and keys %$serialized_bind_type_by_col_idx) {
# if the bindlist is empty and we had some dynamic binds, this means the
# storage ate them away (e.g. the NoBindVars component) and interpolated
# them directly into the SQL. This obviously can't be good for multi-inserts
- $self->throw_exception('Cannot insert_bulk without support for placeholders');
+ $self->throw_exception('Unable to invoke fast-path insert without storage placeholder support');
}
# sanity checks
$msg,
$cols->[$c_idx],
do {
- require Data::Dumper::Concise;
local $Data::Dumper::Maxdepth = 5;
- Data::Dumper::Concise::Dumper ({
+ dump_value {
map { $cols->[$_] =>
$data->[$r_idx][$_]
- } @col_range
- }),
+ } 0..$#$cols
+ };
}
);
};
- for my $col_idx (@col_range) {
+ for my $col_idx (0..$#$cols) {
my $reference_val = $data->[0][$col_idx];
for my $row_idx (1..$#$data) { # we are comparing against what we got from [0] above, hence start from 1
my $val = $data->[$row_idx][$col_idx];
- if (! exists $value_type_by_col_idx->{$col_idx}) { # literal no binds
+ if (! exists $serialized_bind_type_by_col_idx->{$col_idx}) { # literal no binds
if (ref $val ne 'SCALAR') {
$bad_slice_report_cref->(
"Incorrect value (expecting SCALAR-ref \\'$$reference_val')",
);
}
}
- elsif (! defined $value_type_by_col_idx->{$col_idx} ) { # regular non-literal value
+ elsif (! defined $serialized_bind_type_by_col_idx->{$col_idx} ) { # regular non-literal value
if (is_literal_value($val)) {
$bad_slice_report_cref->("Literal SQL found where a plain bind value is expected", $row_idx, $col_idx);
}
}
# need to check the bind attrs - a bind will happen only once for
# the entire dataset, so any changes further down will be ignored.
- elsif (! Data::Compare::Compare(
- $value_type_by_col_idx->{$col_idx},
- [
+ elsif (
+ $serialized_bind_type_by_col_idx->{$col_idx}
+ ne
+ serialize [
map
{ $_->[0] }
@{$self->_resolve_bindattrs(
$source, [ @{$$val}[1 .. $#$$val] ], $colinfos,
)}
- ],
- )) {
+ ]
+ ) {
$bad_slice_report_cref->(
'Differing bind attributes on literal/bind values not supported',
$row_idx,
# scope guard
my $guard = $self->txn_scope_guard;
- $self->_query_start( $sql, @$proto_bind ? [[undef => '__BULK_INSERT__' ]] : () );
+ $self->_query_start( $sql, @$proto_bind ? [[ {} => '__BULK_INSERT__' ]] : () );
my $sth = $self->_prepare_sth($self->_dbh, $sql);
my $rv = do {
if (@$proto_bind) {
}
};
- $self->_query_end( $sql, @$proto_bind ? [[ undef => '__BULK_INSERT__' ]] : () );
+ $self->_query_end( $sql, @$proto_bind ? [[ {} => '__BULK_INSERT__' ]] : () );
$guard->commit;
sub _dbh_execute_for_fetch {
my ($self, $source, $sth, $proto_bind, $cols, $data) = @_;
- my @idx_range = ( 0 .. $#$proto_bind );
-
# If we have any bind attributes to take care of, we will bind the
# proto-bind data (which will never be used by execute_for_fetch)
# However since column bindtypes are "sticky", this is sufficient
# to get the DBD to apply the bindtype to all values later on
-
my $bind_attrs = $self->_dbi_attrs_for_bind($source, $proto_bind);
- for my $i (@idx_range) {
+ for my $i (0 .. $#$proto_bind) {
$sth->bind_param (
$i+1, # DBI bind indexes are 1-based
$proto_bind->[$i][1],
return undef if ++$fetch_row_idx > $#$data;
return [ map {
- ! defined $_->{_literal_bind_subindex}
+ my $v = ! defined $_->{_literal_bind_subindex}
? $data->[ $fetch_row_idx ]->[ $_->{_bind_data_slice_idx} ]
[ ${ $data->[ $fetch_row_idx ]->[ $_->{_bind_data_slice_idx} ]}->[ $_->{_literal_bind_subindex} ] ],
{}, # a fake column_info bag
)->[0][1]
+ ;
+
+ # FIXME SUBOPTIMAL - DBI needs fixing to always stringify regardless of DBD
+ # For the time being forcibly stringify whatever is stringifiable
+ my $vref;
+ ( !length ref $v or ! ($vref = is_plain_value $v) ) ? $v
+ : defined blessed( $$vref ) ? "$$vref"
+ : $$vref
+ ;
} map { $_->[0] } @$proto_bind ];
};
my $tuple_status = [];
my ($rv, $err);
- try {
+ dbic_internal_try {
$rv = $sth->execute_for_fetch(
$fetch_tuple,
$tuple_status,
);
}
- catch {
+ dbic_internal_catch {
$err = shift;
};
);
# Statement must finish even if there was an exception.
- try {
+ dbic_internal_try {
$sth->finish
}
- catch {
+ dbic_internal_catch {
$err = shift unless defined $err
};
$self->throw_exception("Unexpected populate error: $err")
if ($i > $#$tuple_status);
- require Data::Dumper::Concise;
$self->throw_exception(sprintf "execute_for_fetch() aborted with '%s' at populate slice:\n%s",
($tuple_status->[$i][1] || $err),
- Data::Dumper::Concise::Dumper( { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) } ),
+ dump_value { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) },
);
}
my ($self, $sth, $count) = @_;
my $err;
- try {
+ dbic_internal_try {
my $dbh = $self->_get_dbh;
local $dbh->{RaiseError} = 1;
local $dbh->{PrintError} = 0;
$sth->execute foreach 1..$count;
}
- catch {
+ dbic_internal_catch {
$err = shift;
};
# Make sure statement is finished even if there was an exception.
- try {
+ dbic_internal_try {
$sth->finish
}
- catch {
+ dbic_internal_catch {
$err = shift unless defined $err;
};
where => $where,
};
- # Sanity check the attributes (SQLMaker does it too, but
- # in case of a software_limit we'll never reach there)
- if (defined $attrs->{offset}) {
- $self->throw_exception('A supplied offset attribute must be a non-negative integer')
- if ( $attrs->{offset} =~ /\D/ or $attrs->{offset} < 0 );
- }
-
- if (defined $attrs->{rows}) {
- $self->throw_exception("The rows attribute must be a positive integer if present")
- if ( $attrs->{rows} =~ /\D/ or $attrs->{rows} <= 0 );
- }
- elsif ($attrs->{offset}) {
- # MySQL actually recommends this approach. I cringe.
- $attrs->{rows} = $sql_maker->__max_int;
- }
+ # MySQL actually recommends this approach. I cringe.
+ $attrs->{rows} ||= $sql_maker->__max_int
+ if $attrs->{offset};
# see if we will need to tear the prefetch apart to satisfy group_by == select
# this is *extremely tricky* to get right, I am still not sure I did
# are happy (this includes MySQL in strict_mode)
# If any of the other joined tables are referenced in the group_by
# however - the user is on their own
- ( $prefetch_needs_subquery or $attrs->{_related_results_construction} )
+ ( $prefetch_needs_subquery or ! $attrs->{_simple_passthrough_construction} )
and
$attrs->{group_by}
and
@{$attrs->{group_by}}
and
- my $grp_aliases = try { # try{} because $attrs->{from} may be unreadable
+ my $grp_aliases = dbic_internal_try { # internal_try{} because $attrs->{from} may be unreadable
$self->_resolve_aliastypes_from_select_args({ from => $attrs->{from}, group_by => $attrs->{group_by} })
}
) {
sub _dbh_columns_info_for {
my ($self, $dbh, $table) = @_;
- if ($dbh->can('column_info')) {
- my %result;
- my $caught;
- try {
+ my %result;
+
+ if (! DBIx::Class::_ENV_::STRESSTEST_COLUMN_INFO_UNAWARE_STORAGE and $dbh->can('column_info')) {
+ dbic_internal_try {
my ($schema,$tab) = $table =~ /^(.+?)\.(.+)$/ ? ($1,$2) : (undef,$table);
my $sth = $dbh->column_info( undef,$schema, $tab, '%' );
$sth->execute();
$result{$col_name} = \%column_info;
}
- } catch {
- $caught = 1;
+ } dbic_internal_catch {
+ %result = ();
};
- return \%result if !$caught && scalar keys %result;
+
+ return \%result if keys %result;
}
- my %result;
my $sth = $dbh->prepare($self->sql_maker->select($table, undef, \'1 = 0'));
$sth->execute;
- my @columns = @{$sth->{NAME_lc}};
- for my $i ( 0 .. $#columns ){
- my %column_info;
- $column_info{data_type} = $sth->{TYPE}->[$i];
- $column_info{size} = $sth->{PRECISION}->[$i];
- $column_info{is_nullable} = $sth->{NULLABLE}->[$i] ? 1 : 0;
-
- if ($column_info{data_type} =~ m/^(.*?)\((.*?)\)$/) {
- $column_info{data_type} = $1;
- $column_info{size} = $2;
+
+### The acrobatics with lc names is necessary to support both the legacy
+### API that used NAME_lc exclusively, *AND* at the same time work properly
+### with column names differing in cas eonly (thanks pg!)
+
+ my ($columns, $seen_lcs);
+
+ ++$seen_lcs->{lc($_)} and $columns->{$_} = {
+ idx => scalar keys %$columns,
+ name => $_,
+ lc_name => lc($_),
+ } for @{$sth->{NAME}};
+
+ $seen_lcs->{$_->{lc_name}} == 1
+ and
+ $_->{name} = $_->{lc_name}
+ for values %$columns;
+
+ for ( values %$columns ) {
+ my $inf = {
+ data_type => $sth->{TYPE}->[$_->{idx}],
+ size => $sth->{PRECISION}->[$_->{idx}],
+ is_nullable => $sth->{NULLABLE}->[$_->{idx}] ? 1 : 0,
+ };
+
+ if ($inf->{data_type} =~ m/^(.*?)\((.*?)\)$/) {
+ @{$inf}{qw( data_type size)} = ($1, $2);
}
- $result{$columns[$i]} = \%column_info;
+ $result{$_->{name}} = $inf;
}
+
$sth->finish;
- foreach my $col (keys %result) {
- my $colinfo = $result{$col};
- my $type_num = $colinfo->{data_type};
- my $type_name;
- if(defined $type_num && $dbh->can('type_info')) {
- my $type_info = $dbh->type_info($type_num);
- $type_name = $type_info->{TYPE_NAME} if $type_info;
- $colinfo->{data_type} = $type_name if $type_name;
+ if ($dbh->can('type_info')) {
+ for my $inf (values %result) {
+ next if ! defined $inf->{data_type};
+
+ $inf->{data_type} = (
+ (
+ (
+ $dbh->type_info( $inf->{data_type} )
+ ||
+ next
+ )
+ ||
+ next
+ )->{TYPE_NAME}
+ ||
+ next
+ );
+
+ # FIXME - this may be an artifact of the DBD::Pg implmentation alone
+ # needs more testing in the future...
+ $inf->{size} -= 4 if (
+ ( $inf->{size}||0 > 4 )
+ and
+ $inf->{data_type} =~ qr/^text$/i
+ );
}
+
}
return \%result;
sub _dbh_last_insert_id {
my ($self, $dbh, $source, $col) = @_;
- my $id = try { $dbh->last_insert_id (undef, undef, $source->name, $col) };
+ my $id = dbic_internal_try { $dbh->last_insert_id (undef, undef, $source->name, $col) };
return $id if defined $id;
# some drivers provide a $dbh attribute (e.g. Sybase and $dbh->{syb_dynamic_supported})
# but it is inaccurate more often than not
- return try {
+ ( dbic_internal_try {
local $dbh->{PrintError} = 0;
local $dbh->{RaiseError} = 1;
$dbh->do('select ?', {}, 1);
1;
- }
- catch {
- 0;
- };
+ } )
+ ? 1
+ : 0
+ ;
}
# Check if placeholders bound to non-string types throw exceptions
my $self = shift;
my $dbh = $self->_get_dbh;
- return try {
+ ( dbic_internal_try {
local $dbh->{PrintError} = 0;
local $dbh->{RaiseError} = 1;
# this specifically tests a bind that is NOT a string
$dbh->do('select 1 where 1 = ?', {}, 1);
1;
- }
- catch {
- 0;
- };
+ } )
+ ? 1
+ : 0
+ ;
}
=head2 sqlt_type
sub create_ddl_dir {
my ($self, $schema, $databases, $version, $dir, $preversion, $sqltargs) = @_;
- unless ($dir) {
+ require DBIx::Class::Optional::Dependencies;
+ if (my $missing = DBIx::Class::Optional::Dependencies->req_missing_for ('deploy')) {
+ $self->throw_exception("Can't create a ddl file without $missing");
+ }
+
+ if (!$dir) {
carp "No directory given, using ./\n";
$dir = './';
- } else {
- -d $dir
- or
- (require File::Path and File::Path::mkpath (["$dir"])) # mkpath does not like objects (i.e. Path::Class::Dir)
- or
- $self->throw_exception(
- "Failed to create '$dir': " . ($! || $@ || 'error unknown')
- );
}
-
- $self->throw_exception ("Directory '$dir' does not exist\n") unless(-d $dir);
+ else {
+ mkdir_p( $dir ) unless -d $dir;
+ }
$databases ||= ['MySQL', 'SQLite', 'PostgreSQL'];
$databases = [ $databases ] if(ref($databases) ne 'ARRAY');
add_drop_table => 1,
ignore_constraint_names => 1,
ignore_index_names => 1,
+ quote_identifiers => $self->sql_maker->_quoting_enabled,
%{$sqltargs || {}}
};
- unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')) {
- $self->throw_exception("Can't create a ddl file without " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
- }
-
my $sqlt = SQL::Translator->new( $sqltargs );
$sqlt->parser('SQL::Translator::Parser::DBIx::Class');
unless $dest_schema->name;
}
- my $diff = SQL::Translator::Diff::schema_diff($source_schema, $db,
- $dest_schema, $db,
- $sqltargs
- );
+ my $diff = do {
+ # FIXME - this is a terrible workaround for
+ # https://github.com/dbsrgits/sql-translator/commit/2d23c1e
+ # Fixing it in this sloppy manner so that we don't hve to
+ # lockstep an SQLT release as well. Needs to be removed at
+ # some point, and SQLT dep bumped
+ local $SQL::Translator::Producer::SQLite::NO_QUOTES
+ if $SQL::Translator::Producer::SQLite::NO_QUOTES;
+
+ SQL::Translator::Diff::schema_diff($source_schema, $db,
+ $dest_schema, $db,
+ $sqltargs
+ );
+ };
+
if(!open $file, ">$difffile") {
$self->throw_exception("Can't write to $difffile ($!)");
next;
=back
-Returns the statements used by L</deploy> and L<DBIx::Class::Schema/deploy>.
+Returns the statements used by L<DBIx::Class::Storage/deploy>
+and L<DBIx::Class::Schema/deploy>.
The L<SQL::Translator> (not L<DBI>) database driver name can be explicitly
provided in C<$type>, otherwise the result of L</sqlt_type> is used as default.
sub deployment_statements {
my ($self, $schema, $type, $version, $dir, $sqltargs) = @_;
+
+ $self->throw_exception(
+ 'Calling deployment_statements() in void context makes no sense'
+ ) unless defined wantarray;
+
$type ||= $self->sqlt_type;
$version ||= $schema->schema_version || '1.x';
$dir ||= './';
return join('', @rows);
}
- unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy') ) {
- $self->throw_exception("Can't deploy without a ddl_dir or " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
+ require DBIx::Class::Optional::Dependencies;
+ if (my $missing = DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') ) {
+ $self->throw_exception("Can't deploy without a pregenerated 'ddl_dir' directory or $missing");
}
# sources needs to be a parser arg, but for simplicity allow at top level
$sqltargs->{parser_args}{sources} = delete $sqltargs->{sources}
if exists $sqltargs->{sources};
+ $sqltargs->{quote_identifiers} = $self->sql_maker->_quoting_enabled
+ unless exists $sqltargs->{quote_identifiers};
+
my $tr = SQL::Translator->new(
producer => "SQL::Translator::Producer::${type}",
%$sqltargs,
return if($line =~ /^COMMIT/m);
return if $line =~ /^\s+$/; # skip whitespace only
$self->_query_start($line);
- try {
+ dbic_internal_try {
# do a dbh_do cycle here, as we need some error checking in
# place (even though we will ignore errors)
$self->dbh_do (sub { $_[1]->do($line) });
- } catch {
+ } dbic_internal_catch {
carp qq{$_ (running "${line}")};
};
$self->_query_end($line);
cases if you choose the C<< AutoCommit => 0 >> path, just as you would
be with raw DBI.
+=head1 FURTHER QUESTIONS?
-=head1 AUTHOR AND CONTRIBUTORS
+Check the list of L<additional DBIC resources|DBIx::Class/GETTING HELP/SUPPORT>.
-See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
+=head1 COPYRIGHT AND LICENSE
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
-
-=cut
+This module is free software L<copyright|DBIx::Class/COPYRIGHT AND LICENSE>
+by the L<DBIx::Class (DBIC) authors|DBIx::Class/AUTHORS>. You can
+redistribute it and/or modify it under the same terms as the
+L<DBIx::Class library|DBIx::Class/COPYRIGHT AND LICENSE>.