my $rdbms_ss_dbm = {
'DBD::DBM' => '0',
'MLDBM' => '0',
+ 'BerkeleyDB' => '0',
'SQL::Statement' => '1.33',
};
my $rdbms_ss_po = {
return $sql;
}
-=head2 LimitXY_NoBinds
-
- SELECT ... LIMIT $offset $limit
-
-Supported by any L<SQL::Statement> based DBD. (Implemented without
-bindvals, since L<SQL::Statement> doesn't like them in C<LIMIT>.)
-
-=cut
-sub _LimitXY_NoBinds {
- my ( $self, $sql, $rs_attrs, $rows, $offset ) = @_;
- $sql .= $self->_parse_rs_attrs( $rs_attrs ) . " LIMIT ";
- $sql .= "$offset, " if +$offset;
- $sql .= $rows;
- return $sql;
-}
-
=head2 RowNumberOver
SELECT * FROM (
# Disable it here
sub _lock_select () { '' };
-1;
+# SQL::Statement hates LIMIT ?, ?
+# Change it to a non-bind version
+sub _LimitXY {
+ my ( $self, $sql, $rs_attrs, $rows, $offset ) = @_;
+ $sql .= $self->_parse_rs_attrs( $rs_attrs ) . " LIMIT ";
+ $sql .= "$offset, " if +$offset;
+ $sql .= $rows;
+ return $sql;
+}
# SQL::Statement can't handle more than
# one ANSI join, so just convert them all
package DBIx::Class::Storage::DBI::AnyData;
-use base 'DBIx::Class::Storage::DBI::SQL::Statement';
+use base 'DBIx::Class::Storage::DBI::DBDFile';
use mro 'c3';
use namespace::clean;
=head1 DESCRIPTION
This subclass is essentially just a stub that uses the super class
-L<DBIx::Class::Storage::DBI::SQL::Statement>. Patches welcome if
+L<DBIx::Class::Storage::DBI::DBDFile>. Patches welcome if
anything specific to this driver is required.
=head1 AUTHOR
package DBIx::Class::Storage::DBI::CSV;
-use base 'DBIx::Class::Storage::DBI::SQL::Statement';
+use base 'DBIx::Class::Storage::DBI::DBDFile';
use mro 'c3';
use namespace::clean;
=head1 NAME
-DBIx::Class::Storage::DBI::SNMP - Support for CSV files via DBD::CSV
+DBIx::Class::Storage::DBI::CSV - Support for CSV files via DBD::CSV
=head1 SYNOPSIS
=head1 DESCRIPTION
This subclass is essentially just a stub that uses the super class
-L<DBIx::Class::Storage::DBI::SQL::Statement>. Patches welcome if
+L<DBIx::Class::Storage::DBI::DBDFile>. Patches welcome if
anything specific to this driver is required.
=head1 AUTHOR
-package DBIx::Class::Storage::DBI::SQL::Statement;
+package DBIx::Class::Storage::DBI::DBDFile;
use strict;
use base 'DBIx::Class::Storage::DBI';
use mro 'c3';
+use DBIx::Class::Carp;
use namespace::clean;
__PACKAGE__->sql_maker_class('DBIx::Class::SQLMaker::SQLStatement');
__PACKAGE__->sql_quote_char('"');
-__PACKAGE__->sql_limit_dialect('LimitXY_NoBinds');
+__PACKAGE__->sql_limit_dialect('LimitXY');
# Unsupported options
sub _determine_supports_insert_returning { 0 };
$self->disable_sth_caching(1);
}
-# No support for transactions; sorry...
+# No support for transactions; warn and continue
sub txn_begin {
- my $self = shift;
-
- # Only certain internal calls are allowed through, and even then, we are merely
- # ignoring the txn part
- my $callers = join "\n", map { (caller($_))[3] } (1 .. 4);
- return $self->_get_dbh
- if ($callers =~ /
- DBIx::Class::Storage::DBI::insert_bulk|
- DBIx::Class::Relationship::CascadeActions::update
- /x);
+ carp_once <<'EOF' unless $ENV{DBIC_DBDFILE_TXN_NOWARN};
+SQL::Statement-based drivers do not support transactions - proceeding at your own risk!
- $self->throw_exception('SQL::Statement-based drivers do not support transactions!');
+To turn off this warning, set the DBIC_DBDFILE_TXN_NOWARN environment variable.
+EOF
}
-sub svp_begin { shift->throw_exception('SQL::Statement-based drivers do not support savepoints!'); }
+sub txn_commit { 1; }
+sub txn_rollback { shift->throw_exception('Transaction protection was ignored and unable to rollback - your data is likely inconsistent!'); }
# Nor is there any last_insert_id support (unless the driver supports it directly)
sub _dbh_last_insert_id { shift->throw_exception('SQL::Statement-based drivers do not support AUTOINCREMENT keys! You will need to specify the PKs directly.'); }
-# leftovers to support txn_begin exceptions
-sub txn_commit { 1; }
-
1;
=head1 NAME
-DBIx::Class::Storage::DBI::SQL::Statement - Base Class for SQL::Statement- / DBI::DBD::SqlEngine-based
+DBIx::Class::Storage::DBI::DBDFile - Base Class for SQL::Statement- / DBI::DBD::SqlEngine-based
DBD support in DBIx::Class
=head1 SYNOPSIS
This is the base class for DBDs that use L<SQL::Statement> and/or
-L<DBI::DBD::SqlEngine|DBI::DBD::SqlEngine::Developers>. This class is
-used for:
+L<DBI::DBD::SqlEngine|DBI::DBD::SqlEngine::Developers>, ie: based off of
+L<DBD::File>. This class is used for:
=over
-=item L<DBD::Sys>
=item L<DBD::AnyData>
=item L<DBD::TreeData>
-=item L<DBD::SNMP>
-=item L<DBD::PO>
=item L<DBD::CSV>
=item L<DBD::DBM>
=back
=head2 Transactions
These drivers do not support transactions (and in fact, even the SQL syntax for
-them). Therefore, any attempts to use txn_* or svp_* methods will throw an
-exception.
-
-In a future release, they may be replaced with emulated functionality. (Then
-again, it would probably be added into L<SQL::Statement> instead.)
+them). Therefore, any attempts to use txn_* or svp_* methods will warn you once
+and silently ignore the transaction protection.
=head2 SELECT ... FOR UPDATE/SHARE
package DBIx::Class::Storage::DBI::DBM;
-use base 'DBIx::Class::Storage::DBI::SQL::Statement';
+use base 'DBIx::Class::Storage::DBI::DBDFile';
use mro 'c3';
use namespace::clean;
my ($self, $source, $to_insert) = @_;
my $col_infos = $source->columns_info;
-
+
foreach my $col (keys %$col_infos) {
# this will naturally fall into undef/NULL if default_value doesn't exist
$to_insert->{$col} = $col_infos->{$col}{default_value}
unless (exists $to_insert->{$col});
}
-
+
$self->next::method($source, $to_insert);
}
sub insert_bulk {
my ($self, $source, $cols, $data) = @_;
-
+
my $col_infos = $source->columns_info;
foreach my $col (keys %$col_infos) {
}
}
}
-
+
$self->next::method($source, $cols, $data);
}
-
+
1;
=head1 NAME
-DBIx::Class::Storage::DBI::SNMP - Support for DBM & MLDBM files via DBD::DBM
+DBIx::Class::Storage::DBI::DBM - Support for DBM & MLDBM files via DBD::DBM
=head1 SYNOPSIS
=head1 DESCRIPTION
This subclass is essentially just a stub that uses the super class
-L<DBIx::Class::Storage::DBI::SQL::Statement>.
+L<DBIx::Class::Storage::DBI::DBDFile>.
=head1 IMPLEMENTATION NOTES
+++ /dev/null
-package DBIx::Class::Storage::DBI::PO;
-
-use base 'DBIx::Class::Storage::DBI::SQL::Statement';
-use mro 'c3';
-use namespace::clean;
-
-1;
-
-=head1 NAME
-
-DBIx::Class::Storage::DBI::SNMP - Support for GNU gettext PO files via DBD::PO
-
-=head1 SYNOPSIS
-
-This subclass supports GNU gettext PO files via L<DBD::PO>.
-
-=head1 DESCRIPTION
-
-This subclass is essentially just a stub that uses the super class
-L<DBIx::Class::Storage::DBI::SQL::Statement>. Patches welcome if
-anything specific to this driver is required.
-
-=head1 AUTHOR
-
-See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
-
-=cut
\ No newline at end of file
+++ /dev/null
-package DBIx::Class::Storage::DBI::SNMP;
-
-use base 'DBIx::Class::Storage::DBI::SQL::Statement';
-use mro 'c3';
-use namespace::clean;
-
-1;
-
-=head1 NAME
-
-DBIx::Class::Storage::DBI::SNMP - Support for SNMP data via DBD::SNMP
-
-=head1 SYNOPSIS
-
-This subclass supports SNMP data via L<DBD::SNMP>.
-
-=head1 DESCRIPTION
-
-This subclass is essentially just a stub that uses the super class
-L<DBIx::Class::Storage::DBI::SQL::Statement>. Patches welcome if
-anything specific to this driver is required.
-
-=head1 AUTHOR
-
-See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
-
-=cut
\ No newline at end of file
package DBIx::Class::Storage::DBI::TreeData;
-use base 'DBIx::Class::Storage::DBI::SQL::Statement';
+use base 'DBIx::Class::Storage::DBI::DBDFile';
use mro 'c3';
use namespace::clean;
=head1 DESCRIPTION
This subclass is essentially just a stub that uses the super class
-L<DBIx::Class::Storage::DBI::SQL::Statement>. Patches welcome if
+L<DBIx::Class::Storage::DBI::DBDFile>. Patches welcome if
anything specific to this driver is required.
=head1 AUTHOR
require File::Spec;
-my $ddl_fn = File::Spec->catfile(qw(t lib sqlite.sql));
+my $ddl_fn = {
+ sqlite => File::Spec->catfile(qw(t lib sqlite.sql)),
+ dbdfile => File::Spec->catfile(qw(t lib dbdfile.sql)),
+};
# If the author doesn't have the prereqs, we will end up obliterating
# the ddl file, and all tests will fail, therefore don't do anything
# errors will not be trapped
require DBIx::Class::Optional::Dependencies;
if ( DBIx::Class::Optional::Dependencies->req_ok_for ('deploy') ) {
- print "Regenerating t/lib/sqlite.sql\n";
- if (my $out = ` "$^X" -Ilib maint/gen_schema `) {
- open (my $fh, '>:unix', $ddl_fn) or die "Unable to open $ddl_fn: $!";
- print $fh $out;
- close $fh;
+ foreach my $type (qw(sqlite dbdfile)) {
+ print "Regenerating t/lib/$type.sql\n";
+ if (my $out = ` "$^X" -Ilib maint/gen_schema_$type `) {
+ open (my $fh, '>:unix', $ddl_fn->{$type}) or die "Unable to open $ddl_fn->{$type}: $!";
+ print $fh $out;
+ close $fh;
- # if we don't do it some git tools (e.g. gitk) get confused that the
- # ddl file is modified, when it clearly isn't
- system('git status --porcelain >' . File::Spec->devnull);
+ # if we don't do it some git tools (e.g. gitk) get confused that the
+ # ddl file is modified, when it clearly isn't
+ system('git status --porcelain >' . File::Spec->devnull);
+ }
}
}
-postamble <<"EOP";
+my $postamble = <<"EOP";
clonedir_generate_files : dbic_clonedir_regen_test_ddl
dbic_clonedir_regen_test_ddl :
-\t\$(ABSPERLRUN) -Ilib -- maint/gen_schema > @{[ $mm_proto->quote_literal($ddl_fn) ]}
-@{[ $crlf_fixup->($ddl_fn) ]}
EOP
+foreach my $type (qw(sqlite dbdfile)) {
+ $postamble .= <<"EOP";
+\t\$(ABSPERLRUN) -Ilib -- maint/gen_schema_$type > @{[ $mm_proto->quote_literal($ddl_fn->{$type}) ]}
+@{[ $crlf_fixup->($ddl_fn->{$type}) ]}
+EOP
+}
+
+postamble $postamble;
# keep the Makefile.PL eval happy
1;
--- /dev/null
+#!/usr/bin/env perl
+
+use strict;
+use warnings;
+use lib qw(lib t/lib);
+
+use DBICTest::Schema;
+use SQL::Translator;
+
+my $schema = DBICTest::Schema->connect;
+my $sql = scalar ($schema->storage->deployment_statements(
+ $schema,
+ 'SQLite', # close enough to SQL::Statement's format
+ undef,
+ undef,
+ {
+ parser_args => {
+ add_fk_index => 0, # doesn't use index statements, anyway
+ sources => [
+ grep { !/^(?:
+ # unsupported data type tests
+ BindType|Money|
+ # DBD::DBM doesn't support single-column tables
+ Artwork|TimestampPrimaryKey|
+ # No support for views
+ Year2000CDs
+ )$/x } ($schema->sources)
+ ],
+ },
+ producer_args => { no_transaction => 1 },
+ quote_identifiers => 0,
+ no_comments => 1,
+ },
+));
+
+# Clean up to remove non-supporting elements
+my $field_type_lookahead = qr/(?=(?: NOT NULL)?(?:,|\n))/;
+
+$sql =~ s/^\s+FOREIGN KEY.+//gm; # FKs
+$sql =~ s/^CREATE(?: UNIQUE)? INDEX.+\n\n//gm; # indexes
+$sql =~ s/ DEFAULT .+?(?=,?\n)//g; # default values
+$sql =~ s/^(\s+)text/$1texta/gm; # 'text' is a SQL reserved word
+no warnings 'uninitialized';
+$sql =~ s/(?:date(?:time)?|time(?:stamp)?)$field_type_lookahead/varchar(20)/g; # date/timestamp fields (only support in some S:S DBDs)
+$sql =~ s/character(\(\d+\))?$field_type_lookahead/char$1/g; # character --> char
+
+$sql =~ s/,(?=\n\);)//g; # dangling comma cleanup
+
+print $sql;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+
+use lib qw(t/lib);
+use DBICTest;
+use DBIC::SqlMakerTest;
+use DBIx::Class::Optional::Dependencies ();
+
+use Path::Class;
+
+plan skip_all => 'Set $ENV{DBICTEST_DBD_ANYDATA} = 1 to run this test'
+ unless ($ENV{DBICTEST_DBD_ANYDATA});
+
+plan skip_all =>
+ 'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for ('test_rdbms_ss_anydata')
+ unless DBIx::Class::Optional::Dependencies->req_ok_for ('test_rdbms_ss_anydata');
+
+# just in case they try to use these conflicting versions
+use DBD::AnyData;
+plan skip_all =>
+ 'Incompatible versions of DBD::AnyData and DBI'
+ if ($DBD::AnyData::VERSION <= 0.110 && $DBI::VERSION >= 1.623);
+
+my ($dsn, $opts) = ('dbi:AnyData:', {});
+
+my $schema = DBICTest::Schema->connect($dsn, '', '', $opts);
+is ($schema->storage->sqlt_type, 'AnyData', 'sqlt_type correct pre-connection');
+isa_ok($schema->storage->sql_maker, 'DBIx::Class::SQLMaker::SQLStatement');
+
+# Custom deployment
+my $dbh = $schema->storage->dbh;
+my @cmds = split /\s*\;\s*/, scalar file(qw/t lib dbdfile.sql/)->slurp;
+$dbh->do($_) for @cmds;
+
+### S:S doesn't have any sort of AUTOINCREMENT support, so IDs will have to be generated by hand ###
+
+# test primary key handling
+my $new = $schema->resultset('Artist')->create({
+ artistid => 1,
+ name => 'foo'
+});
+ok($new->artistid, "Create worked");
+
+# test LIMIT support
+for (1..6) {
+ $schema->resultset('Artist')->create({
+ artistid => $_+1,
+ name => 'Artist '.$_,
+ });
+}
+my $it = $schema->resultset('Artist')->search( {}, {
+ rows => 3,
+ offset => 2,
+ order_by => 'artistid'
+});
+is( $it->count, 3, "LIMIT count ok" ); # ask for 3 rows out of 7 artists
+is( $it->next->name, "Artist 2", "iterator->next ok" );
+$it->next;
+$it->next;
+is( $it->next, undef, "next past end of resultset ok" );
+
+# Limit with select-lock (which is silently thrown away)
+lives_ok {
+ isa_ok (
+ $schema->resultset('Artist')->find({artistid => 1}, {for => 'update', rows => 1}),
+ 'DBICTest::Schema::Artist',
+ );
+} 'Limited FOR UPDATE select works';
+
+# shared-lock (which is silently thrown away)
+lives_ok {
+ isa_ok (
+ $schema->resultset('Artist')->find({artistid => 1}, {for => 'shared'}),
+ 'DBICTest::Schema::Artist',
+ );
+} 'LOCK IN SHARE MODE select works';
+
+# (No sizes with DBD::AnyData and all is_nullable)
+my $test_type_info = {
+ 'artistid' => {
+ 'data_type' => 'VARCHAR',
+ 'is_nullable' => 1,
+ 'size' => 0,
+ },
+ 'name' => {
+ 'data_type' => 'VARCHAR',
+ 'is_nullable' => 1,
+ 'size' => 0,
+ },
+ 'rank' => {
+ 'data_type' => 'VARCHAR',
+ 'is_nullable' => 1,
+ 'size' => 0,
+ },
+ 'charfield' => {
+ 'data_type' => 'VARCHAR',
+ 'is_nullable' => 1,
+ 'size' => 0,
+ },
+};
+
+$ENV{DBIC_DBDFILE_TXN_NOWARN} = 1;
+
+$schema->populate ('Owners', [
+ [qw/id name /],
+ [qw/1 wiggle/],
+ [qw/2 woggle/],
+ [qw/3 boggle/],
+]);
+
+$schema->populate ('BooksInLibrary', [
+ [qw/id source owner title /],
+ [qw/1 Library 1 secrets1/],
+ [qw/2 Eatery 1 secrets2/],
+ [qw/3 Library 2 secrets3/],
+]);
+
+{
+ # try a ->has_many direction (due to a 'multi' accessor the select/group_by group is collapsed)
+ my $owners = $schema->resultset('Owners')->search(
+ { 'books.id' => { '!=', undef }},
+ { prefetch => 'books', cache => 1 }
+ );
+ is($owners->all, 2, 'Prefetched grouped search returns correct number of rows');
+
+ # only works here because of the full cache
+ # S:S would croak on a subselect otherwise
+ is($owners->count, 2, 'Prefetched grouped search returns correct count');
+
+ # try a ->belongs_to direction (no select collapse)
+ my $books = $schema->resultset('BooksInLibrary')->search (
+ { 'owner.name' => 'wiggle' },
+ { prefetch => 'owner', distinct => 1 }
+ );
+
+ {
+ local $TODO = 'populate does not subtract the non-Library INSERTs here...';
+ is($owners->all, 1, 'Prefetched grouped search returns correct number of rows');
+ is($owners->count, 1, 'Prefetched grouped search returns correct count');
+ }
+}
+
+my $type_info = $schema->storage->columns_info_for('artist');
+is_deeply($type_info, $test_type_info, 'columns_info_for - column data types');
+
+my $cd = $schema->resultset('CD')->create({ cdid => 1 });
+my $producer = $schema->resultset('Producer')->create({ producerid => 1 });
+lives_ok { $cd->set_producers ([ $producer ]) } 'set_relationship doesnt die';
+
+{
+ my $artist = $schema->resultset('Artist')->next;
+ my $cd = $schema->resultset('CD')->next;
+ $cd->set_from_related('artist', $artist);
+ $cd->update;
+
+ my $rs = $schema->resultset('CD')->search ({}, { prefetch => 'artist' });
+
+ lives_ok sub {
+ my $cd = $rs->next;
+ is ($cd->artist->name, $artist->name, 'Prefetched artist');
+ }, 'join does not throw';
+
+ local $schema->storage->sql_maker->{_default_jointype} = 'inner';
+ is_same_sql_bind (
+ $rs->as_query,
+ '(
+ SELECT
+ me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+ artist.artistid, artist.name, artist.rank, artist.charfield
+ FROM cd me
+ INNER JOIN artist artist ON artist.artistid = me.artist
+ )',
+ [],
+ 'overriden default join type works',
+ );
+}
+
+{
+ # Test support for straight joins
+ my $cdsrc = $schema->source('CD');
+ my $artrel_info = $cdsrc->relationship_info ('artist');
+ $cdsrc->add_relationship(
+ 'straight_artist',
+ $artrel_info->{class},
+ $artrel_info->{cond},
+ { %{$artrel_info->{attrs}}, join_type => 'straight' },
+ );
+ is_same_sql_bind (
+ $cdsrc->resultset->search({}, { prefetch => 'straight_artist' })->as_query,
+ '(
+ SELECT
+ me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+ straight_artist.artistid, straight_artist.name, straight_artist.rank, straight_artist.charfield
+ FROM cd me
+ STRAIGHT JOIN artist straight_artist ON straight_artist.artistid = me.artist
+ )',
+ [],
+ 'straight joins correctly supported'
+ );
+}
+
+# Can we properly deal with the null search problem?
+{
+ $schema->resultset('Artist')->create({ artistid => 2222, name => 'last created artist' });
+
+ ok my $artist1_rs = $schema->resultset('Artist')->search({artistid=>6666})
+ => 'Created an artist resultset of 6666';
+
+ is $artist1_rs->count, 0
+ => 'Got no returned rows';
+
+ ok my $artist2_rs = $schema->resultset('Artist')->search({artistid=>undef})
+ => 'Created an artist resultset of undef';
+
+ is $artist2_rs->count, 0
+ => 'got no rows';
+
+ my $artist = $artist2_rs->single;
+
+ is $artist => undef
+ => 'Nothing Found!';
+}
+
+{
+ my $cds_per_year = {
+ 2001 => 2,
+ 2002 => 1,
+ 2005 => 3,
+ };
+
+ # kill the scalar ref here
+ $schema->source('CD')->name('cd');
+
+ my $rs = $schema->resultset('CD');
+ $rs->delete;
+ my $cdid = 1;
+ foreach my $y (keys %$cds_per_year) {
+ foreach my $c (1 .. $cds_per_year->{$y} ) {
+ $rs->create({ cdid => $cdid++, title => "CD $y-$c", artist => 1, year => "$y-01-01" });
+ }
+ }
+
+ is ($rs->count, 6, 'CDs created successfully');
+}
+
+done_testing;
my ($dsn, $opts) = ('dbi:CSV:', {
f_schema => undef,
f_dir => "$db_dir",
- f_ext => ".csv/r",
+ f_ext => ".csv/r", # /r is a flag (see https://metacpan.org/module/DBD::File#f_ext)
f_lock => 0,
f_encoding => "utf8",
# Custom deployment
my $dbh = $schema->storage->dbh;
-my @cmds = split /\s*\;\s*/, scalar file(qw/t lib test_deploy DBICTest-Schema-1.x-SQL-Statement.sql/)->slurp;
+my @cmds = split /\s*\;\s*/, scalar file(qw/t lib dbdfile.sql/)->slurp;
$dbh->do($_) for @cmds;
### S:S doesn't have any sort of AUTOINCREMENT support, so IDs will have to be generated by hand ###
},
};
+$ENV{DBIC_DBDFILE_TXN_NOWARN} = 1;
+
$schema->populate ('Owners', [
[qw/id name /],
[qw/1 wiggle/],
# Custom deployment
my $dbh = $schema->storage->dbh;
-my @cmds = split /\s*\;\s*/, scalar file(qw/t lib test_deploy DBICTest-Schema-1.x-SQL-Statement.sql/)->slurp;
+my @cmds = split /\s*\;\s*/, scalar file(qw/t lib dbdfile.sql/)->slurp;
$dbh->do($_) for @cmds;
### S:S doesn't have any sort of AUTOINCREMENT support, so IDs will have to be generated by hand ###
},
};
+$ENV{DBIC_DBDFILE_TXN_NOWARN} = 1;
+
$schema->populate ('Owners', [
[qw/id name /],
[qw/1 wiggle/],
hello integer NOT NULL,
goodbye integer NOT NULL,
sensors char(10) NOT NULL,
- read_count integer,
+ read_count int,
PRIMARY KEY (foo, bar, hello, goodbye)
);
name varchar(100) NOT NULL
);
+CREATE TABLE images (
+ id INTEGER PRIMARY KEY NOT NULL,
+ artwork_id integer NOT NULL,
+ name varchar(100) NOT NULL,
+ data blob
+);
+
CREATE TABLE link (
id INTEGER PRIMARY KEY NOT NULL,
url varchar(100),
CREATE TABLE treelike (
id INTEGER PRIMARY KEY NOT NULL,
parent integer,
- name varchar(100) NOT NULL
+ name varchar(100) NOT NULL,
+
);
CREATE TABLE twokeytreelike (
parent1 integer NOT NULL,
parent2 integer NOT NULL,
name varchar(100) NOT NULL,
- PRIMARY KEY (id1, id2)
+ PRIMARY KEY (id1, id2),
+
);
CREATE TABLE typed_object (
CREATE TABLE artist_undirected_map (
id1 integer NOT NULL,
id2 integer NOT NULL,
- PRIMARY KEY (id1, id2)
+ PRIMARY KEY (id1, id2),
+
+
+);
+
+CREATE TABLE artwork_to_artist (
+ artwork_cd_id integer NOT NULL,
+ artist_id integer NOT NULL,
+ PRIMARY KEY (artwork_cd_id, artist_id),
+
);
CREATE TABLE bookmark (
id INTEGER PRIMARY KEY NOT NULL,
- link integer
+ link integer,
+
);
CREATE TABLE books (
source varchar(100) NOT NULL,
owner integer NOT NULL,
title varchar(100) NOT NULL,
- price integer
+ price integer,
+
);
CREATE TABLE employee (
group_id_2 integer,
group_id_3 integer,
name varchar(100),
- encoded integer
+ encoded integer,
+
);
CREATE TABLE forceforeign (
artist INTEGER PRIMARY KEY NOT NULL,
- cd integer NOT NULL
+ cd integer NOT NULL,
+
);
CREATE TABLE self_ref_alias (
self_ref integer NOT NULL,
alias integer NOT NULL,
- PRIMARY KEY (self_ref, alias)
+ PRIMARY KEY (self_ref, alias),
+
+
);
CREATE TABLE track (
position int NOT NULL,
title varchar(100) NOT NULL,
last_updated_on varchar(20),
- last_updated_at varchar(20)
+ last_updated_at varchar(20),
+
);
CREATE TABLE cd (
title varchar(100) NOT NULL,
year varchar(100) NOT NULL,
genreid integer,
- single_track integer
+ single_track integer,
+
+
+
);
CREATE TABLE collection_object (
collection integer NOT NULL,
object integer NOT NULL,
- PRIMARY KEY (collection, object)
+ PRIMARY KEY (collection, object),
+
+
);
CREATE TABLE lyrics (
lyric_id INTEGER PRIMARY KEY NOT NULL,
- track_id integer NOT NULL
+ track_id integer NOT NULL,
+
);
CREATE TABLE liner_notes (
liner_id INTEGER PRIMARY KEY NOT NULL,
- notes varchar(100) NOT NULL
+ notes varchar(100) NOT NULL,
+
);
CREATE TABLE lyric_versions (
id INTEGER PRIMARY KEY NOT NULL,
lyric_id integer NOT NULL,
- ltext varchar(100) NOT NULL
+ texta varchar(100) NOT NULL,
+
);
CREATE TABLE tags (
tagid INTEGER PRIMARY KEY NOT NULL,
cd integer NOT NULL,
- tag varchar(100) NOT NULL
+ tag varchar(100) NOT NULL,
+
);
CREATE TABLE cd_to_producer (
cd integer NOT NULL,
producer integer NOT NULL,
attribute integer,
- PRIMARY KEY (cd, producer)
-);
+ PRIMARY KEY (cd, producer),
+
-CREATE TABLE images (
- id INTEGER PRIMARY KEY NOT NULL,
- artwork_id integer NOT NULL,
- name varchar(100) NOT NULL,
- data blob
);
CREATE TABLE twokeys (
artist integer NOT NULL,
cd integer NOT NULL,
- PRIMARY KEY (artist, cd)
-);
+ PRIMARY KEY (artist, cd),
+
-CREATE TABLE artwork_to_artist (
- artwork_cd_id integer NOT NULL,
- artist_id integer NOT NULL,
- PRIMARY KEY (artwork_cd_id, artist_id)
);
CREATE TABLE fourkeys_to_twokeys (
t_cd integer NOT NULL,
autopilot char NOT NULL,
pilot_sequence integer,
- PRIMARY KEY (f_foo, f_bar, f_hello, f_goodbye, t_artist, t_cd)
+ PRIMARY KEY (f_foo, f_bar, f_hello, f_goodbye, t_artist, t_cd),
+
+
);