use strict;
use warnings;
+use Carp::Clan qw/^DBIx::Class/;
use DBI;
use SQL::Abstract::Limit;
use DBIx::Class::Storage::DBI::Cursor;
__PACKAGE__->mk_group_accessors('simple' =>
qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts
_conn_pid _conn_tid disable_sth_caching on_connect_do
- on_disconnect_do transaction_depth unsafe _dbh_autocommit/
+ on_disconnect_do transaction_depth unsafe _dbh_autocommit
+ auto_savepoint savepoints/
);
__PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor');
BEGIN {
-package DBIC::SQL::Abstract; # Would merge upstream, but nate doesn't reply :(
+package # Hide from PAUSE
+ DBIC::SQL::Abstract; # Would merge upstream, but nate doesn't reply :(
use base qw/SQL::Abstract::Limit/;
$new->transaction_depth(0);
$new->_sql_maker_opts({});
+ $new->{savepoints} = [];
$new->{_in_dbh_do} = 0;
$new->{_dbh_gen} = 0;
especially if you set a C<HandleError> handler that suppresses exceptions
and/or disable C<RaiseError>.
+=item auto_savepoint
+
+If this option is true, L<DBIx::Class> will use savepoints when nesting
+transactions, making it possible to recover from failure in the inner
+transaction without having to abort all outer transactions.
+
=back
These options can be mixed in with your other L<DBI> connection attributes,
$last_info = { %$last_info }; # so delete is non-destructive
my @storage_option = qw(
on_connect_do on_disconnect_do disable_sth_caching unsafe cursor_class
+ auto_savepoint
);
for my $storage_opt (@storage_option) {
if(my $value = delete $last_info->{$storage_opt}) {
eval {
$self->_verify_pid if $dbh;
- if( !$dbh ) {
+ if(!$self->_dbh) {
$self->_populate_dbh;
$dbh = $self->_dbh;
}
ref $coderef eq 'CODE' or $self->throw_exception
('$coderef must be a CODE reference');
- return $coderef->(@_) if $self->{transaction_depth};
+ return $coderef->(@_) if $self->{transaction_depth} && ! $self->auto_savepoint;
local $self->{_in_dbh_do} = 1;
$dbh;
}
+sub svp_begin {
+ my ($self, $name) = @_;
+
+ $name = $self->_svp_generate_name
+ unless defined $name;
+
+ $self->throw_exception ("You can't use savepoints outside a transaction")
+ if $self->{transaction_depth} == 0;
+
+ $self->throw_exception ("Your Storage implementation doesn't support savepoints")
+ unless $self->can('_svp_begin');
+
+ push @{ $self->{savepoints} }, $name;
+
+ $self->debugobj->svp_begin($name) if $self->debug;
+
+ return $self->_svp_begin($name);
+}
+
+sub svp_release {
+ my ($self, $name) = @_;
+
+ $self->throw_exception ("You can't use savepoints outside a transaction")
+ if $self->{transaction_depth} == 0;
+
+ $self->throw_exception ("Your Storage implementation doesn't support savepoints")
+ unless $self->can('_svp_release');
+
+ if (defined $name) {
+ $self->throw_exception ("Savepoint '$name' does not exist")
+ unless grep { $_ eq $name } @{ $self->{savepoints} };
+
+ # Dig through the stack until we find the one we are releasing. This keeps
+ # the stack up to date.
+ my $svp;
+
+ do { $svp = pop @{ $self->{savepoints} } } while $svp ne $name;
+ } else {
+ $name = pop @{ $self->{savepoints} };
+ }
+
+ $self->debugobj->svp_release($name) if $self->debug;
+
+ return $self->_svp_release($name);
+}
+
+sub svp_rollback {
+ my ($self, $name) = @_;
+
+ $self->throw_exception ("You can't use savepoints outside a transaction")
+ if $self->{transaction_depth} == 0;
+
+ $self->throw_exception ("Your Storage implementation doesn't support savepoints")
+ unless $self->can('_svp_rollback');
+
+ if (defined $name) {
+ # If they passed us a name, verify that it exists in the stack
+ unless(grep({ $_ eq $name } @{ $self->{savepoints} })) {
+ $self->throw_exception("Savepoint '$name' does not exist!");
+ }
+
+ # Dig through the stack until we find the one we are releasing. This keeps
+ # the stack up to date.
+ while(my $s = pop(@{ $self->{savepoints} })) {
+ last if($s eq $name);
+ }
+ # Add the savepoint back to the stack, as a rollback doesn't remove the
+ # named savepoint, only everything after it.
+ push(@{ $self->{savepoints} }, $name);
+ } else {
+ # We'll assume they want to rollback to the last savepoint
+ $name = $self->{savepoints}->[-1];
+ }
+
+ $self->debugobj->svp_rollback($name) if $self->debug;
+
+ return $self->_svp_rollback($name);
+}
+
+sub _svp_generate_name {
+ my ($self) = @_;
+
+ return 'savepoint_'.scalar(@{ $self->{'savepoints'} });
+}
sub txn_begin {
my $self = shift;
# we should reconnect on begin_work
# for AutoCommit users
$self->dbh->begin_work;
+ } elsif ($self->auto_savepoint) {
+ $self->svp_begin;
}
$self->{transaction_depth}++;
}
if $self->_dbh_autocommit;
}
elsif($self->{transaction_depth} > 1) {
- $self->{transaction_depth}--
+ $self->{transaction_depth}--;
+ $self->svp_release
+ if $self->auto_savepoint;
}
}
}
elsif($self->{transaction_depth} > 1) {
$self->{transaction_depth}--;
+ if ($self->auto_savepoint) {
+ $self->svp_rollback;
+ $self->svp_release;
+ }
}
else {
die DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION->new;
my $self = shift;
my ($rv, $sth, @bind) = $self->_select(@_);
my @row = $sth->fetchrow_array;
+ if(@row && $sth->fetchrow_array) {
+ carp "Query returned more than one row. SQL that returns multiple rows is DEPRECATED for ->find and ->single";
+ }
# Need to call finish() to work round broken DBDs
$sth->finish();
return @row;
=over 4
-=item Arguments: $schema \@databases, $version, $directory, $preversion, $sqlt_args
+=item Arguments: $schema \@databases, $version, $directory, $preversion, \%sqlt_args
=back
Creates a SQL file based on the Schema, for each of the specified
database types, in the given directory.
+By default, C<\%sqlt_args> will have
+
+ { add_drop_table => 1, ignore_constraint_names => 1, ignore_index_names => 1 }
+
+merged with the hash passed in. To disable any of those features, pass in a
+hashref like the following
+
+ { ignore_constraint_names => 0, # ... other options }
+
=cut
-sub create_ddl_dir
-{
+sub create_ddl_dir {
my ($self, $schema, $databases, $version, $dir, $preversion, $sqltargs) = @_;
- if(!$dir || !-d $dir)
- {
+ if(!$dir || !-d $dir) {
warn "No directory given, using ./\n";
$dir = "./";
}
$databases ||= ['MySQL', 'SQLite', 'PostgreSQL'];
$databases = [ $databases ] if(ref($databases) ne 'ARRAY');
$version ||= $schema->VERSION || '1.x';
- $sqltargs = { ( add_drop_table => 1 ), %{$sqltargs || {}} };
+ $sqltargs = {
+ add_drop_table => 1,
+ ignore_constraint_names => 1,
+ ignore_index_names => 1,
+ %{$sqltargs || {}}
+ };
$self->throw_exception(q{Can't create a ddl file without SQL::Translator 0.09: '}
. $self->_check_sqlt_message . q{'})
$sqlt->parser('SQL::Translator::Parser::DBIx::Class');
my $sqlt_schema = $sqlt->translate({ data => $schema }) or die $sqlt->error;
- foreach my $db (@$databases)
- {
+ foreach my $db (@$databases) {
$sqlt->reset();
$sqlt = $self->configure_sqlt($sqlt, $db);
$sqlt->{schema} = $sqlt_schema;
$sqlt->producer($db);
my $file;
- my $filename = $schema->ddl_filename($db, $dir, $version);
- if(-e $filename)
- {
- warn("$filename already exists, skipping $db");
- next unless ($preversion);
- } else {
- my $output = $sqlt->translate;
- if(!$output)
- {
- warn("Failed to translate to $db, skipping. (" . $sqlt->error . ")");
- next;
- }
- if(!open($file, ">$filename"))
- {
- $self->throw_exception("Can't open $filename for writing ($!)");
- next;
- }
- print $file $output;
- close($file);
- }
- if($preversion)
- {
- require SQL::Translator::Diff;
+ my $filename = $schema->ddl_filename($db, $version, $dir);
+ if (-e $filename && (!$version || ($version == $schema->schema_version()))) {
+ # if we are dumping the current version, overwrite the DDL
+ warn "Overwriting existing DDL file - $filename";
+ unlink($filename);
+ }
- my $prefilename = $schema->ddl_filename($db, $dir, $preversion);
-# print "Previous version $prefilename\n";
- if(!-e $prefilename)
- {
- warn("No previous schema file found ($prefilename)");
- next;
- }
+ my $output = $sqlt->translate;
+ if(!$output) {
+ warn("Failed to translate to $db, skipping. (" . $sqlt->error . ")");
+ next;
+ }
+ if(!open($file, ">$filename")) {
+ $self->throw_exception("Can't open $filename for writing ($!)");
+ next;
+ }
+ print $file $output;
+ close($file);
+
+ next unless ($preversion);
- my $difffile = $schema->ddl_filename($db, $dir, $version, $preversion);
- print STDERR "Diff: $difffile: $db, $dir, $version, $preversion \n";
- if(-e $difffile)
- {
- warn("$difffile already exists, skipping");
- next;
- }
+ require SQL::Translator::Diff;
- my $source_schema;
- {
- my $t = SQL::Translator->new($sqltargs);
- $t->debug( 0 );
- $t->trace( 0 );
- $t->parser( $db ) or die $t->error;
- $t = $self->configure_sqlt($t, $db);
- my $out = $t->translate( $prefilename ) or die $t->error;
- $source_schema = $t->schema;
- unless ( $source_schema->name ) {
- $source_schema->name( $prefilename );
- }
- }
+ my $prefilename = $schema->ddl_filename($db, $preversion, $dir);
+ if(!-e $prefilename) {
+ warn("No previous schema file found ($prefilename)");
+ next;
+ }
- # The "new" style of producers have sane normalization and can support
- # diffing a SQL file against a DBIC->SQLT schema. Old style ones don't
- # And we have to diff parsed SQL against parsed SQL.
- my $dest_schema = $sqlt_schema;
-
- unless ( "SQL::Translator::Producer::$db"->can('preprocess_schema') ) {
- my $t = SQL::Translator->new($sqltargs);
- $t->debug( 0 );
- $t->trace( 0 );
- $t->parser( $db ) or die $t->error;
- $t = $self->configure_sqlt($t, $db);
- my $out = $t->translate( $filename ) or die $t->error;
- $dest_schema = $t->schema;
- $dest_schema->name( $filename )
- unless $dest_schema->name;
+ my $difffile = $schema->ddl_filename($db, $version, $dir, $preversion);
+ if(-e $difffile) {
+ warn("Overwriting existing diff file - $difffile");
+ unlink($difffile);
+ }
+
+ my $source_schema;
+ {
+ my $t = SQL::Translator->new($sqltargs);
+ $t->debug( 0 );
+ $t->trace( 0 );
+ $t->parser( $db ) or die $t->error;
+ $t = $self->configure_sqlt($t, $db);
+ my $out = $t->translate( $prefilename ) or die $t->error;
+ $source_schema = $t->schema;
+ unless ( $source_schema->name ) {
+ $source_schema->name( $prefilename );
}
+ }
- my $diff = SQL::Translator::Diff::schema_diff($source_schema, $db,
- $dest_schema, $db,
- $sqltargs
- );
- if(!open $file, ">$difffile")
- {
- $self->throw_exception("Can't write to $difffile ($!)");
- next;
- }
- print $file $diff;
- close($file);
+ # The "new" style of producers have sane normalization and can support
+ # diffing a SQL file against a DBIC->SQLT schema. Old style ones don't
+ # And we have to diff parsed SQL against parsed SQL.
+ my $dest_schema = $sqlt_schema;
+
+ unless ( "SQL::Translator::Producer::$db"->can('preprocess_schema') ) {
+ my $t = SQL::Translator->new($sqltargs);
+ $t->debug( 0 );
+ $t->trace( 0 );
+ $t->parser( $db ) or die $t->error;
+ $t = $self->configure_sqlt($t, $db);
+ my $out = $t->translate( $filename ) or die $t->error;
+ $dest_schema = $t->schema;
+ $dest_schema->name( $filename )
+ unless $dest_schema->name;
}
+
+ my $diff = SQL::Translator::Diff::schema_diff($source_schema, $db,
+ $dest_schema, $db,
+ $sqltargs
+ );
+ if(!open $file, ">$difffile") {
+ $self->throw_exception("Can't write to $difffile ($!)");
+ next;
+ }
+ print $file $diff;
+ close($file);
}
}
my $tr = SQL::Translator->new(%$sqltargs);
SQL::Translator::Parser::DBIx::Class::parse( $tr, $schema );
return "SQL::Translator::Producer::${type}"->can('produce')->($tr);
-
- return;
-
}
sub deploy {