- create an SQL file, via SQLT, for the current schema
- passing prev. version + version will create an sqlt-diff'ed upgrade file, such as
- $preversion->$currentversion-$dbtype.sql, which contains ALTER foo statements.
+DONE
2) Make deploy/deploy_statements able to to load from the appropriate file, for the current DB, or on the fly? - Compare against current schema version..
3) Add an on_connect_cb (callback) thingy to storage.
+DONE
4) create a component to deploy version/updates:
+ - is injected into the Schemas base classes
- it hooks itself into on_connect_cb ?
- when run it:
- Attempts or prompts a backup of the database. (commands for these per-rdbms can be stored in storage::dbi::<dbtype> ?)
push(@pre_objects, $pre_source->result_class->inflate_result(
$pre_source, @{$pre_rec}));
}
- $new->related_resultset($pre)->set_cache(\@pre_objects);
+ $new->related_resultset($pre)->set_cache('all', \@pre_objects);
} elsif (defined $pre_val->[0]) {
my $fetched;
unless ($pre_source->primary_columns == grep { exists $pre_val->[0]{$_}
use Carp::Clan qw/^DBIx::Class/;
use Scalar::Util qw/weaken/;
+use File::Spec;
use base qw/DBIx::Class/;
my $storage = $storage_class->new;
$storage->connect_info(\@info);
$self->storage($storage);
+ $self->on_connect() if($self->can('on_connect'));
return $self;
}
=over 4
-=item Arguments: \@databases, $version, $directory, $sqlt_args
+=item Arguments: \@databases, $version, $directory, $preversion, $sqlt_args
=back
Creates an SQL file based on the Schema, for each of the specified
-database types, in the given directory.
+database types, in the given directory. Given a previous version number,
+this will also create a file containing the ALTER TABLE statements to
+transform the previous schema into the current one. Note that these
+statements may contain DROP TABLE or DROP COLUMN statements that can
+potentially destroy data.
+The file names are created using the C<ddl_filename> method below, please
+override thus method in your schema if you would like a different file
+name format. For the ALTER file, the same format is used, replacing
+$version in the name with "$preversion-$version".
+
+If no arguments are passed, then the following default values are used:
+
+=over 4
+
+=item databases - ['MySQL', 'SQLite', 'PostgreSQL']
+
+=item version - $schema->VERSION
+
+=item directory - './'
+
+=item preversion - <none>
+
+=back
Note that this feature is currently EXPERIMENTAL and may not work correctly
across all databases, or fully handle complex relationships.
+WARNING: Please check all SQL files created, before applying them.
+
=cut
sub create_ddl_dir
$self->storage->create_ddl_dir($self, @_);
}
+=head2 ddl_filename
+
+=over 4
+
+=item Arguments: $directory, $database-type, $version
+
+=back
+
+This method is called by C<create_ddl_dir> to compose a file name out of
+the supplied directory, database type and version number. The default file
+name format is: "$filename-$version-$type.sql".
+
+You may override this method in your schema if you wish to use a different
+format.
+
+=cut
+
sub ddl_filename
{
- my ($self, $type, $dir, $version) = @_;
+ my ($self, $dir, $type, $version) = @_;
my $filename = ref($self);
$filename =~ s/^.*:://;
- $filename = "$dir$filename-$version-$type.sql";
+ $filename = File::Spec->catpath($dir, "$filename-$version-$type.sql");
return $filename;
}
use SQL::Abstract::Limit;
use DBIx::Class::Storage::DBI::Cursor;
use IO::File;
+use Storable 'dclone';
use Carp::Clan qw/DBIx::Class/;
BEGIN {
sub create_ddl_dir
{
- my ($self, $schema, $databases, $version, $dir, $sqltargs) = @_;
+ my ($self, $schema, $databases, $version, $dir, $preversion, $sqltargs) = @_;
if(!$dir || !-d $dir)
{
$sqlt->producer($db);
my $file;
- my $filename = $schema->ddl_filename($db, $dir, $version);
+ my $filename = $schema->ddl_filename($dir, $db, $version);
if(-e $filename)
{
- $self->throw_exception("$filename already exists, skipping $db");
+ warn("$filename already exists, skipping $db");
next;
}
open($file, ">$filename")
- or $self->throw_exception("Can't open $filename for writing ($!)");
+ or warn("Can't open $filename for writing ($!)"), next;
my $output = $sqlt->translate;
-#use Data::Dumper;
-# print join(":", keys %{$schema->source_registrations});
-# print Dumper($sqlt->schema);
if(!$output)
{
- $self->throw_exception("Failed to translate to $db. (" . $sqlt->error . ")");
+ warn("Failed to translate to $db, skipping. (" . $sqlt->error . ")");
next;
}
print $file $output;
close($file);
+
+ if($preversion)
+ {
+ eval "use SQL::Translator::Diff";
+ warn("Can't diff versions without SQL::Translator::Diff: $@"), next if $@;
+
+ my $prefilename = $schema->ddl_filename($dir, $db, $preversion);
+ print "Previous version $prefilename\n";
+ if(!-e $prefilename)
+ {
+ warn("No previous schema file found ($prefilename)");
+ next;
+ }
+ #### We need to reparse the SQLite file we just wrote, so that
+ ## Diff doesnt get all confoosed, and Diff is *very* confused.
+ ## FIXME: rip Diff to pieces!
+# my $target_schema = $sqlt->schema;
+# unless ( $target_schema->name ) {
+# $target_schema->name( $filename );
+# }
+ my $sqlt = SQL::Translator->new();
+ $sqlt->parser("SQL::Translator::Parser::$db");
+ $sqlt->filename($filename);
+ $sqlt->translate() or warn("Failed to parse $filename as $db, (" .
+ $sqlt->error . ")"), next;
+ my $target_schema = $sqlt->schema;
+ unless ( $target_schema->name ) {
+ $target_schema->name( $filename );
+ }
+ ## end FIXME
+
+ my $psqlt = SQL::Translator->new();
+ $psqlt->parser("SQL::Translator::Parser::$db");
+ $psqlt->filename($prefilename);
+ $psqlt->translate() or warn("Failed to parse $filename as $db, (" .
+ $sqlt->error . ")"), next ;
+ my $source_schema = $psqlt->schema;
+ unless ( $source_schema->name ) {
+ $source_schema->name( $prefilename );
+ }
+
+ my $diff = SQL::Translator::Diff::schema_diff($source_schema, $db,
+ $target_schema, $db,
+ {}
+ );
+ my $difffile = $filename;
+ $difffile =~ s/$version/${preversion}-${version}/;
+ if(-e $difffile)
+ {
+ warn("$difffile already exists, skipping");
+ next;
+ }
+ open $file, ">$difffile" or
+ warn("Can't write to $difffile ($!)"), next;
+ print $file $diff;
+ close($file);
+ }
}
}
return "SQL::Translator::Producer::${type}"->can('produce')->($tr);
}
- my $filename = $schema->ddl_filename($type, $dir, $version);
+ my $filename = $schema->ddl_filename($dir, $type, $version);
if(!-f $filename)
{
# $schema->create_ddl_dir([ $type ], $version, $dir, $sqltargs);
Andy Grundman <andy@hybridized.org>
+Jess Robinson <castaway@desert-island.demon.co.uk>
+
=head1 LICENSE
You may distribute this code under the same terms as Perl itself.
if ($colinfo{is_nullable}) {
$colinfo{default} = '' unless exists $colinfo{default};
}
+ warn "No length set for char field in " . $colinfo{name}
+ if(!$colinfo{size} && $colinfo{data_type} =~ /char/);
my $f = $table->add_field(%colinfo) || die $table->error;
}
$table->primary_key($source->primary_columns);
+$|=1;
sub run_tests {
my $schema = shift;
eval "use DBD::SQLite";
plan skip_all => 'needs DBD::SQLite for testing' if $@;
-plan tests => 23;
+plan tests => 25;
my $rs = $schema->resultset("Artist")->search(
{ artistid => 1 }
my $artist = $rs->first;
-is( scalar @{ $rs->get_cache }, 0, 'cache is not populated without cache attribute' );
+is( scalar @{ $rs->get_cache('all') }, 0, 'cache is not populated without cache attribute' );
$rs = $schema->resultset('Artist')->search( undef, { cache => 1 } );
my $artists = [ $rs->all ];
-is( scalar @{$rs->get_cache}, 3, 'all() populates cache for search with cache attribute' );
+is( scalar @{$rs->get_cache('all')}, 3, 'all() populates cache for search with cache attribute' );
$rs->clear_cache;
-is( scalar @{$rs->get_cache}, 0, 'clear_cache is functional' );
+is( scalar @{$rs->get_cache('all')}, 0, 'clear_cache is functional' );
$rs->next;
-is( scalar @{$rs->get_cache}, 3, 'next() populates cache for search with cache attribute' );
+is( scalar @{$rs->get_cache('all')}, 3, 'next() populates cache for search with cache attribute' );
pop( @$artists );
-$rs->set_cache( $artists );
+$rs->set_cache('all', $artists );
-is( scalar @{$rs->get_cache}, 2, 'set_cache() is functional' );
+is( scalar @{$rs->get_cache('all')}, 2, 'set_cache() is functional' );
$cd = $schema->resultset('CD')->find(1);
$rs->clear_cache;
eval {
- $rs->set_cache( [ $cd ] );
+ $rs->set_cache('all', [ $cd ] );
};
-is( scalar @{$rs->get_cache}, 0, 'set_cache() only accepts objects of correct type for the resultset' );
+is( scalar @{$rs->get_cache('all')}, 0, 'set_cache() only accepts objects of correct type for the resultset' );
$queries = 0;
$schema->storage->debug(1);
is( ref $artist->{related_resultsets}->{cds}, 'DBIx::Class::ResultSet', 'artist has a related_resultset for cds' );
# check if $artist->cds->get_cache is populated
-is( scalar @{$artist->cds->get_cache}, 3, 'cache for artist->cds contains correct number of records');
+is( scalar @{$artist->cds->get_cache('all')}, 3, 'cache for artist->cds contains correct number of records');
# ensure that $artist->cds returns correct number of objects
is( scalar ($artist->cds), 3, 'artist->cds returns correct number of objects' );
$schema->storage->debug(0);
+
+# start test for prefetch SELECT count
+unlink 't/var/dbic.trace' if -e 't/var/dbic.trace';
+DBI->trace(1, 't/var/dbic.trace');
+
+$rs = $schema->resultset("Artist");
+
+$rs->clear_cache;
+
+$rs->find(1, { cache => 1 });
+
+is (scalar keys %{$rs->get_cache('find')}, 1, 'find created one cached value');
+
+$rs->find(1, {cache => 1});
+
+# count the SELECTs
+DBI->trace(0, undef);
+$selects = 0;
+$trace = IO::File->new('t/var/dbic.trace', '<')
+ or die "Unable to read trace file";
+while (<$trace>) {
+ $selects++ if /SELECT/;
}
+$trace->close;
+# unlink 't/var/dbic.trace';
+is( $selects, 1, 'only one select statement on find' );
+
+}
1;