use autodie;
use Carp qw( carp croak );
-use Log::Contextual::WarnLogger;
-use Log::Contextual qw(:log :dlog), -default_logger => Log::Contextual::WarnLogger->new({
- env_prefix => 'DBICDH'
-});
-use Data::Dumper::Concise;
+use DBIx::Class::DeploymentHandler::Logger;
+use Log::Contextual qw(:log :dlog), -default_logger =>
+ DBIx::Class::DeploymentHandler::Logger->new({
+ env_prefix => 'DBICDH'
+ });
use Method::Signatures::Simple;
use Try::Tiny;
}
opendir my($dh), $dir;
- my %files = map { $_ => "$dir/$_" } grep { /\.(?:sql|pl)$/ && -f "$dir/$_" } readdir $dh;
+ my %files = map { $_ => "$dir/$_" } grep { /\.(?:sql|pl|sql-\w+)$/ && -f "$dir/$_" } readdir $dh;
closedir $dh;
if (-d $common) {
return catfile( $dirname, '001-auto.sql');
}
-method _run_sql($filename) {
+method _run_sql_array($sql) {
my $storage = $self->storage;
- log_debug { "[DBICDH] Running SQL from $filename" };
- my @sql = @{$self->_read_sql_file($filename)};
- my $sql .= join "\n", @sql;
- log_trace { "[DBICDH] Running SQL $sql" };
- foreach my $line (@sql) {
+ $sql = [grep {
+ $_ && # remove blank lines
+ !/^(BEGIN|BEGIN TRANSACTION|COMMIT)/ # strip txn's
+ } map {
+ s/^\s+//; s/\s+$//; # trim whitespace
+ join '', grep { !/^--/ } split /\n/ # remove comments
+ } @$sql];
+
+ Dlog_trace { "Running SQL $_" } $sql;
+ foreach my $line (@{$sql}) {
$storage->_query_start($line);
+ # the whole reason we do this is so that we can see the line that was run
try {
- # do a dbh_do cycle here, as we need some error checking in
- # place (even though we will ignore errors)
$storage->dbh_do (sub { $_[1]->do($line) });
}
catch {
- carp "$_ (running '${line}')"
+ die "$_ (running line '$line')"
}
$storage->_query_end($line);
}
- return $sql
+ return join "\n", @$sql
+}
+
+method _run_sql($filename) {
+ log_debug { "Running SQL from $filename" };
+ return $self->_run_sql_array($self->_read_sql_file($filename));
}
method _run_perl($filename) {
- log_debug { "[DBICDH] Running Perl from $filename" };
+ log_debug { "Running Perl from $filename" };
my $filedata = do { local( @ARGV, $/ ) = $filename; <> };
no warnings 'redefine';
my $fn = eval "$filedata";
use warnings;
- log_trace { '[DBICDH] Running Perl ' . Dumper($fn) };
+ Dlog_trace { "Running Perl $_" } $fn;
if ($@) {
carp "$filename failed to compile: $@";
carp "$filename should define an anonymouse sub that takes a schema but it didn't!";
}
}
-
-method _run_serialized_sql($filename, $type) {
+{
+ my $json;
+
+ method _run_serialized_sql($filename, $type) {
+ if ($type eq 'json') {
+ require JSON;
+ $json ||= JSON->new->pretty;
+ my @sql = @{$json->decode($filename)};
+ } else {
+ croak "A file ($filename) got to deploy that wasn't sql or perl!";
+ }
+ }
}
method _run_sql_and_perl($filenames) {
- my $storage = $self->storage;
my @files = @{$filenames};
my $guard = $self->schema->txn_scope_guard if $self->txn_wrap;
sub deploy {
my $self = shift;
my $version = (shift @_ || {})->{version} || $self->schema_version;
- log_info { "[DBICDH] deploying version $version" };
+ log_info { "deploying version $version" };
return $self->_run_sql_and_perl($self->_ddl_schema_consume_filenames(
$self->storage->sqlt_type,
my $self = shift;
my $args = shift;
my $version = $args->{version} || $self->schema_version;
- log_info { "[DBICDH] preinstalling version $version" };
+ log_info { "preinstalling version $version" };
my $storage_type = $args->{storage_type} || $self->storage->sqlt_type;
my @files = @{$self->_ddl_preinstall_consume_filenames(
my ($self, $args) = @_;
my $source = $args->{result_source};
my $version = $args->{version};
- log_info { '[DBICDH] installing_resultsource ' . $source->source_name . ", version $version" };
+ log_info { 'installing_resultsource ' . $source->source_name . ", version $version" };
my $rs_install_file =
$self->_resultsource_install_filename($source->source_name);
sub prepare_resultsource_install {
my $self = shift;
my $source = (shift @_)->{result_source};
- log_info { '[DBICDH] preparing install for resultsource ' . $source->source_name };
+ log_info { 'preparing install for resultsource ' . $source->source_name };
my $filename = $self->_resultsource_install_filename($source->source_name);
$self->_prepare_install({
}
sub prepare_deploy {
- log_info { '[DBICDH] preparing deploy' };
+ log_info { 'preparing deploy' };
my $self = shift;
$self->_prepare_install({}, '_ddl_schema_produce_filename');
}
sub prepare_upgrade {
my ($self, $args) = @_;
log_info {
- '[DBICDH] preparing upgrade ' .
- "from $args->{from_version} to $args->{to_version}"
+ "preparing upgrade from $args->{from_version} to $args->{to_version}"
};
$self->_prepare_changegrade(
$args->{from_version}, $args->{to_version}, $args->{version_set}, 'up'
sub prepare_downgrade {
my ($self, $args) = @_;
log_info {
- '[DBICDH] preparing downgrade ' .
- "from $args->{from_version} to $args->{to_version}"
+ "preparing downgrade from $args->{from_version} to $args->{to_version}"
};
$self->_prepare_changegrade(
$args->{from_version}, $args->{to_version}, $args->{version_set}, 'down'
sub downgrade_single_step {
my $self = shift;
my $version_set = (shift @_)->{version_set};
- log_info { qq([DBICDH] downgrade_single_step'ing ) . Dumper($version_set) };
+ Dlog_info { "downgrade_single_step'ing $_" } $version_set;
my $sql = $self->_run_sql_and_perl($self->_ddl_schema_down_consume_filenames(
$self->storage->sqlt_type,
sub upgrade_single_step {
my $self = shift;
my $version_set = (shift @_)->{version_set};
- log_info { qq([DBICDH] upgrade_single_step'ing ) . Dumper($version_set) };
+ Dlog_info { "upgrade_single_step'ing $_" } $version_set;
my $sql = $self->_run_sql_and_perl($self->_ddl_schema_up_consume_filenames(
$self->storage->sqlt_type,
=head1 DESCRIPTION
-This class is the meat of L<DBIx::Class::DeploymentHandler>. It takes care of
-generating sql files representing schemata as well as sql files to move from
-one version of a schema to the rest. One of the hallmark features of this
-class is that it allows for multiple sql files for deploy and upgrade, allowing
-developers to fine tune deployment. In addition it also allows for perl files
-to be run at any stage of the process.
+This class is the meat of L<DBIx::Class::DeploymentHandler>. It takes
+care of generating serialized sql files representing schemata as well
+as serialized sql files to move from one version of a schema to the rest.
+One of the hallmark features of this class is that it allows for multiple sql
+files for deploy and upgrade, allowing developers to fine tune deployment.
+In addition it also allows for perl files to be run
+at any stage of the process.
For basic usage see L<DBIx::Class::DeploymentHandler::HandlesDeploy>. What's
documented here is extra fun stuff or private methods.
|- SQLite
| |- down
| | `- 2-1
- | | `- 001-auto.sql
+ | | `- 001-auto.sql-json
| |- schema
| | `- 1
- | | `- 001-auto.sql
+ | | `- 001-auto.sql-json
| `- up
| |- 1-2
- | | `- 001-auto.sql
+ | | `- 001-auto.sql-json
| `- 2-3
- | `- 001-auto.sql
+ | `- 001-auto.sql-json
|- _common
| |- down
| | `- 2-1
|- _generic
| |- down
| | `- 2-1
- | | `- 001-auto.sql
+ | | `- 001-auto.sql-json
| |- schema
| | `- 1
- | | `- 001-auto.sql
+ | | `- 001-auto.sql-json
| `- up
| `- 1-2
- | |- 001-auto.sql
+ | |- 001-auto.sql-json
| `- 002-create-stored-procedures.sql
`- MySQL
|- down
| `- 2-1
- | `- 001-auto.sql
+ | `- 001-auto.sql-json
|- preinstall
| `- 1
| |- 001-create_database.pl
| `- 002-create_users_and_permissions.pl
|- schema
| `- 1
- | `- 001-auto.sql
+ | `- 001-auto.sql-json
`- up
`- 1-2
- `- 001-auto.sql
+ `- 001-auto.sql-json
So basically, the code
$dm->deploy(1)
on an C<SQLite> database that would simply run
-C<$sql_migration_dir/SQLite/schema/1/001-auto.sql>. Next,
+C<$sql_migration_dir/SQLite/schema/1/001-auto.sql-json>. Next,
$dm->upgrade_single_step([1,2])
-would run C<$sql_migration_dir/SQLite/up/1-2/001-auto.sql> followed by
+would run C<$sql_migration_dir/SQLite/up/1-2/001-auto.sql-json> followed by
C<$sql_migration_dir/_common/up/1-2/002-generate-customers.pl>.
-Now, a C<.pl> file doesn't have to be in the C<_common> directory, but most of
-the time it probably should be, since perl scripts will mostly be database
-independent.
+C<.pl> files don't have to be in the C<_common> directory, but most of the time
+they should be, because perl scripts are generally be database independent.
C<_generic> exists for when you for some reason are sure that your SQL is
generic enough to run on all databases. Good luck with that one.
of preinstall is to have it prompt for username and password, and then call the
appropriate C<< CREATE DATABASE >> commands etc.
+=head1 SERIALIZED SQL
+
+The SQL that this module generates and uses is serialized into an array of
+SQL statements. The reason being that some databases handle multiple
+statements in a single execution differently. Generally you do not need to
+worry about this as these are scripts generated for you. If you find that
+you are editing them on a regular basis something is wrong and you either need
+to submit a bug or consider writing extra serialized SQL or Perl scripts to run
+before or after the automatically generated script.
+
+B<NOTE:> Currently the SQL is serialized into JSON. I am willing to merge in
+patches that will allow more serialization formats if you want that feature,
+but if you do send me a patch for that realize that I do not want to add YAML
+support or whatever, I would rather add a generic method of adding any
+serialization format.
+
=head1 PERL SCRIPTS
A perl script for this tool is very simple. It merely needs to contain an