my $rdbms_firebird_odbc = {
'DBD::ODBC' => '0',
};
+my $rdbms_ss_csv = {
+ 'DBD::CSV' => '0',
+ 'SQL::Statement' => '1.33',
+};
+my $rdbms_ss_dbm = {
+ 'DBD::DBM' => '0',
+ 'MLDBM' => '0',
+ 'SQL::Statement' => '1.33',
+};
+my $rdbms_ss_po = {
+ 'DBD::PO' => '0',
+ 'SQL::Statement' => '1.33',
+};
+my $rdbms_ss_sys = {
+ 'DBD::Sys' => '0',
+ 'SQL::Statement' => '1.33',
+};
+my $rdbms_ss_anydata = {
+ ### XXX: DBD::AnyData 0.110 and DBI 1.623 conflict! ###
+ 'DBD::AnyData' => '0',
+ 'SQL::Statement' => '1.33',
+};
+my $rdbms_ss_treedata = {
+ ### XXX: DBD::AnyData 0.110 and DBI 1.623 conflict! ###
+ 'DBD::TreeData' => '0',
+ 'SQL::Statement' => '1.33',
+};
my $reqs = {
replicated => {
},
},
+ rdbms_ss_csv => {
+ req => {
+ %$rdbms_ss_csv,
+ },
+ pod => {
+ title => 'CSV support via DBD::CSV',
+ desc => 'Modules required to connect to CSV files via DBD::CSV',
+ },
+ },
+
+ rdbms_ss_dbm => {
+ req => {
+ %$rdbms_ss_dbm,
+ },
+ pod => {
+ title => 'ML/DBM support via DBD::DBM',
+ desc => 'Modules required to connect to DBM & MLDBM files via DBD::DBM',
+ },
+ },
+
+ rdbms_ss_po => {
+ req => {
+ %$rdbms_ss_po,
+ },
+ pod => {
+ title => 'PO support via DBD::PO',
+ desc => 'Modules required to connect to PO files via DBD::PO',
+ },
+ },
+
+ rdbms_ss_sys => {
+ req => {
+ %$rdbms_ss_sys,
+ },
+ pod => {
+ title => 'System tables interface support via DBD::Sys',
+ desc => 'Modules required to connect to system tables via DBD::Sys',
+ },
+ },
+
+ rdbms_ss_anydata => {
+ req => {
+ %$rdbms_ss_anydata,
+ },
+ pod => {
+ title => 'Abstract flat data support via DBD::AnyData',
+ desc => 'Modules required to connect to abstract flat data via DBD::AnyData',
+ },
+ },
+
+ rdbms_ss_treedata => {
+ req => {
+ %$rdbms_ss_treedata,
+ },
+ pod => {
+ title => 'Abstract tree data support via DBD::TreeData',
+ desc => 'Modules required to connect to abstract tree data via DBD::TreeData',
+ },
+ },
+
# the order does matter because the rdbms support group might require
# a different version that the test group
test_rdbms_pg => {
},
},
+ test_rdbms_ss_csv => {
+ req => {
+ %$rdbms_ss_csv,
+ },
+ },
+
+ test_rdbms_ss_dbm => {
+ req => {
+ %$rdbms_ss_dbm,
+ },
+ },
+
+ test_rdbms_ss_po => {
+ req => {
+ $ENV{DBICTEST_DBD_PO}
+ ? (
+ %$rdbms_ss_po,
+ ) : ()
+ },
+ },
+
+ test_rdbms_ss_sys => {
+ req => {
+ $ENV{DBICTEST_DBD_SYS}
+ ? (
+ %$rdbms_ss_sys,
+ ) : ()
+ },
+ },
+
+ test_rdbms_ss_anydata => {
+ req => {
+ $ENV{DBICTEST_DBD_ANYDATA}
+ ? (
+ %$rdbms_ss_anydata,
+ ) : ()
+ },
+ },
+
+ test_rdbms_ss_treedata => {
+ req => {
+ $ENV{DBICTEST_DBD_TREEDATA}
+ ? (
+ %$rdbms_ss_treedata,
+ ) : ()
+ },
+ },
+
test_memcached => {
req => {
$ENV{DBICTEST_MEMCACHED}
SELECT ... LIMIT $offset $limit
-Supported by B<MySQL> and any L<SQL::Statement> based DBD
+Supported by B<MySQL>
=cut
sub _LimitXY {
return $sql;
}
+=head2 LimitXY_NoBinds
+
+ SELECT ... LIMIT $offset $limit
+
+Supported by any L<SQL::Statement> based DBD. (Implemented without
+bindvals, since L<SQL::Statement> doesn't like them in C<LIMIT>.)
+
+=cut
+sub _LimitXY_NoBinds {
+ my ( $self, $sql, $rs_attrs, $rows, $offset ) = @_;
+ $sql .= $self->_parse_rs_attrs( $rs_attrs ) . " LIMIT ";
+ $sql .= "$offset, " if +$offset;
+ $sql .= $rows;
+ return $sql;
+}
+
=head2 RowNumberOver
SELECT * FROM (
--- /dev/null
+package # Hide from PAUSE
+ DBIx::Class::SQLMaker::SQLStatement;
+
+use parent 'DBIx::Class::SQLMaker';
+
+# SQL::Statement does not understand
+# INSERT INTO $table DEFAULT VALUES
+# Adjust SQL here instead
+sub insert { # basically just a copy of the MySQL version...
+ my $self = shift;
+
+ if (! $_[1] or (ref $_[1] eq 'HASH' and !keys %{$_[1]} ) ) {
+ my $table = $self->_quote($_[0]);
+ return "INSERT INTO ${table} (1) VALUES (1)"
+ }
+
+ return $self->next::method (@_);
+}
+
+# SQL::Statement does not understand
+# SELECT ... FOR UPDATE
+# Disable it here
+sub _lock_select () { '' };
+
+1;
+
+# SQL::Statement can't handle more than
+# one ANSI join, so just convert them all
+# to Oracle 8i-style WHERE-clause joins
+
+# (As such, we are stealing globs of code from OracleJoins.pm...)
+
+sub select {
+ my ($self, $table, $fields, $where, $rs_attrs, @rest) = @_;
+
+ if (ref $table eq 'ARRAY') {
+ # count tables accurately
+ my ($cnt, @node) = (0, @$table);
+ while (my $tbl = shift @node) {
+ my $r = ref $tbl;
+ if ($r eq 'ARRAY') { push(@node, @$tbl); }
+ elsif ($r eq 'HASH') { $cnt++ if ($tbl->{'-rsrc'}); }
+ }
+
+ # pull out all join conds as regular WHEREs from all extra tables
+ # (but only if we're joining more than 2 tables)
+ if ($cnt > 2) {
+ $where = $self->_where_joins($where, @{ $table }[ 1 .. $#$table ]);
+ }
+ }
+
+ return $self->next::method($table, $fields, $where, $rs_attrs, @rest);
+}
+
+sub _recurse_from {
+ my ($self, $from, @join) = @_;
+
+ # check for a single JOIN
+ unless (@join > 1) {
+ my $sql = $self->next::method($from, @join);
+
+ # S:S still doesn't like the JOIN X ON ( Y ) syntax with the parens
+ $sql =~ s/JOIN (.+) ON \( (.+) \)/JOIN $1 ON $2/;
+ return $sql;
+ }
+
+ my @sqlf = $self->_from_chunk_to_sql($from);
+
+ for (@join) {
+ my ($to, $on) = @$_;
+
+ push (@sqlf, (ref $to eq 'ARRAY') ?
+ $self->_recurse_from(@$to) :
+ $self->_from_chunk_to_sql($to)
+ );
+ }
+
+ return join q{, }, @sqlf;
+}
+
+sub _where_joins {
+ my ($self, $where, @join) = @_;
+ my $join_where = $self->_recurse_where_joins(@join);
+
+ if (keys %$join_where) {
+ unless (defined $where) { $where = $join_where; }
+ else {
+ $where = { -or => $where } if (ref $where eq 'ARRAY');
+ $where = { -and => [ $join_where, $where ] };
+ }
+ }
+ return $where;
+}
+
+sub _recurse_where_joins {
+ my $self = shift;
+
+ my @where;
+ foreach my $j (@_) {
+ my ($to, $on) = @$j;
+
+ push @where, $self->_recurse_where_joins(@$to) if (ref $to eq 'ARRAY');
+
+ my $join_opts = ref $to eq 'ARRAY' ? $to->[0] : $to;
+ if (ref $join_opts eq 'HASH' and my $jt = $join_opts->{-join_type}) {
+ # TODO: Figure out a weird way to support ANSI joins and WHERE joins at the same time.
+ # (Though, time would be better spent just fixing SQL::Parser to not require this stuff.)
+
+ $self->throw_exception("Can't handle non-inner, non-ANSI joins in SQL::Statement SQL yet!\n")
+ if $jt =~ /NATURAL|LEFT|RIGHT|FULL|CROSS|UNION/i;
+ }
+
+ # sadly SQLA treats where($scalar) as literal, so we need to jump some hoops
+ push @where, map { \sprintf ('%s = %s',
+ ref $_ ? $self->_recurse_where($_) : $self->_quote($_),
+ ref $on->{$_} ? $self->_recurse_where($on->{$_}) : $self->_quote($on->{$_}),
+ ) } keys %$on;
+ }
+
+ return { -and => \@where };
+}
+
+1;
--- /dev/null
+package DBIx::Class::Storage::DBI::AnyData;
+
+use base 'DBIx::Class::Storage::DBI::SQL::Statement';
+use mro 'c3';
+use namespace::clean;
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::AnyData - Support for freeform data via DBD::AnyData
+
+=head1 SYNOPSIS
+
+This subclass supports freeform data tables via L<DBD::AnyData>.
+
+=head1 DESCRIPTION
+
+This subclass is essentially just a stub that uses the super class
+L<DBIx::Class::Storage::DBI::SQL::Statement>. Patches welcome if
+anything specific to this driver is required.
+
+=head1 AUTHOR
+
+See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
\ No newline at end of file
--- /dev/null
+package DBIx::Class::Storage::DBI::CSV;
+
+use base 'DBIx::Class::Storage::DBI::SQL::Statement';
+use mro 'c3';
+use namespace::clean;
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::SNMP - Support for CSV files via DBD::CSV
+
+=head1 SYNOPSIS
+
+This subclass supports CSV files via L<DBD::CSV>.
+
+=head1 DESCRIPTION
+
+This subclass is essentially just a stub that uses the super class
+L<DBIx::Class::Storage::DBI::SQL::Statement>. Patches welcome if
+anything specific to this driver is required.
+
+=head1 AUTHOR
+
+See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
\ No newline at end of file
--- /dev/null
+package DBIx::Class::Storage::DBI::DBM;
+
+use base 'DBIx::Class::Storage::DBI::SQL::Statement';
+use mro 'c3';
+use namespace::clean;
+
+sub insert {
+ my ($self, $source, $to_insert) = @_;
+
+ my $col_infos = $source->columns_info;
+
+ foreach my $col (keys %$col_infos) {
+ # this will naturally fall into undef/NULL if default_value doesn't exist
+ $to_insert->{$col} = $col_infos->{$col}{default_value}
+ unless (exists $to_insert->{$col});
+ }
+
+ $self->next::method($source, $to_insert);
+}
+
+sub insert_bulk {
+ my ($self, $source, $cols, $data) = @_;
+
+ my $col_infos = $source->columns_info;
+
+ foreach my $col (keys %$col_infos) {
+ unless (grep { $_ eq $col } @$cols) {
+ push @$cols, $col;
+ for my $r (0 .. $#$data) {
+ # this will naturally fall into undef/NULL if default_value doesn't exist
+ $data->[$r][$#$cols] = $col_infos->{$col}{default_value};
+ }
+ }
+ }
+
+ $self->next::method($source, $cols, $data);
+}
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::SNMP - Support for DBM & MLDBM files via DBD::DBM
+
+=head1 SYNOPSIS
+
+This subclass supports DBM & MLDBM files via L<DBD::DBM>.
+
+=head1 DESCRIPTION
+
+This subclass is essentially just a stub that uses the super class
+L<DBIx::Class::Storage::DBI::SQL::Statement>.
+
+=head1 IMPLEMENTATION NOTES
+
+=head2 Missing fields on INSERTs
+
+L<DBD::DBM> will balk at missing columns on INSERTs. This storage engine will
+add them in with either the default_value attribute or NULL.
+
+=head1 AUTHOR
+
+See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
\ No newline at end of file
--- /dev/null
+package DBIx::Class::Storage::DBI::PO;
+
+use base 'DBIx::Class::Storage::DBI::SQL::Statement';
+use mro 'c3';
+use namespace::clean;
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::SNMP - Support for GNU gettext PO files via DBD::PO
+
+=head1 SYNOPSIS
+
+This subclass supports GNU gettext PO files via L<DBD::PO>.
+
+=head1 DESCRIPTION
+
+This subclass is essentially just a stub that uses the super class
+L<DBIx::Class::Storage::DBI::SQL::Statement>. Patches welcome if
+anything specific to this driver is required.
+
+=head1 AUTHOR
+
+See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
\ No newline at end of file
--- /dev/null
+package DBIx::Class::Storage::DBI::SNMP;
+
+use base 'DBIx::Class::Storage::DBI::SQL::Statement';
+use mro 'c3';
+use namespace::clean;
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::SNMP - Support for SNMP data via DBD::SNMP
+
+=head1 SYNOPSIS
+
+This subclass supports SNMP data via L<DBD::SNMP>.
+
+=head1 DESCRIPTION
+
+This subclass is essentially just a stub that uses the super class
+L<DBIx::Class::Storage::DBI::SQL::Statement>. Patches welcome if
+anything specific to this driver is required.
+
+=head1 AUTHOR
+
+See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
\ No newline at end of file
--- /dev/null
+package DBIx::Class::Storage::DBI::SQL::Statement;
+
+use strict;
+use base 'DBIx::Class::Storage::DBI';
+use mro 'c3';
+use namespace::clean;
+
+__PACKAGE__->sql_maker_class('DBIx::Class::SQLMaker::SQLStatement');
+__PACKAGE__->sql_quote_char('"');
+__PACKAGE__->sql_limit_dialect('LimitXY_NoBinds');
+
+# Unsupported options
+sub _determine_supports_insert_returning { 0 };
+
+# Statement caching currently buggy with either S:S or DBD::AnyData (and/or possibly others)
+# Disable it here and look into fixing it later on
+sub _init {
+ my $self = shift;
+ $self->next::method(@_);
+ $self->disable_sth_caching(1);
+}
+
+# No support for transactions; sorry...
+sub txn_begin {
+ my $self = shift;
+
+ # Only certain internal calls are allowed through, and even then, we are merely
+ # ignoring the txn part
+ my $callers = join "\n", map { (caller($_))[3] } (1 .. 4);
+ return $self->_get_dbh
+ if ($callers =~ /
+ DBIx::Class::Storage::DBI::insert_bulk|
+ DBIx::Class::Relationship::CascadeActions::update
+ /x);
+
+ $self->throw_exception('SQL::Statement-based drivers do not support transactions!');
+}
+sub svp_begin { shift->throw_exception('SQL::Statement-based drivers do not support savepoints!'); }
+
+# Nor is there any last_insert_id support (unless the driver supports it directly)
+sub _dbh_last_insert_id { shift->throw_exception('SQL::Statement-based drivers do not support AUTOINCREMENT keys! You will need to specify the PKs directly.'); }
+
+# leftovers to support txn_begin exceptions
+sub txn_commit { 1; }
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::SQL::Statement - Base Class for SQL::Statement- / DBI::DBD::SqlEngine-based
+DBD support in DBIx::Class
+
+=head1 SYNOPSIS
+
+This is the base class for DBDs that use L<SQL::Statement> and/or
+L<DBI::DBD::SqlEngine|DBI::DBD::SqlEngine::Developers>. This class is
+used for:
+
+=over
+=item L<DBD::Sys>
+=item L<DBD::AnyData>
+=item L<DBD::TreeData>
+=item L<DBD::SNMP>
+=item L<DBD::PO>
+=item L<DBD::CSV>
+=item L<DBD::DBM>
+=back
+
+=head1 IMPLEMENTATION NOTES
+
+=head2 Transactions
+
+These drivers do not support transactions (and in fact, even the SQL syntax for
+them). Therefore, any attempts to use txn_* or svp_* methods will throw an
+exception.
+
+In a future release, they may be replaced with emulated functionality. (Then
+again, it would probably be added into L<SQL::Statement> instead.)
+
+=head2 SELECT ... FOR UPDATE/SHARE
+
+This also is not supported, but it will silently ignore these.
+
+=head1 AUTHOR
+
+See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
\ No newline at end of file
--- /dev/null
+package DBIx::Class::Storage::DBI::Sys;
+
+use base 'DBIx::Class::Storage::DBI::SQL::Statement';
+use mro 'c3';
+use namespace::clean;
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Sys - Support for system data via DBD::Sys
+
+=head1 SYNOPSIS
+
+This subclass supports system data information via L<DBD::Sys>.
+
+=head1 DESCRIPTION
+
+This subclass is essentially just a stub that uses the super class
+L<DBIx::Class::Storage::DBI::SQL::Statement>. Patches welcome if
+anything specific to this driver is required.
+
+=head1 AUTHOR
+
+See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
\ No newline at end of file
--- /dev/null
+package DBIx::Class::Storage::DBI::TreeData;
+
+use base 'DBIx::Class::Storage::DBI::SQL::Statement';
+use mro 'c3';
+use namespace::clean;
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::TreeData - Support for JSON-like tree data via DBD::TreeData
+
+=head1 SYNOPSIS
+
+This subclass supports JSON-like tree tables via L<DBD::TreeData>.
+
+=head1 DESCRIPTION
+
+This subclass is essentially just a stub that uses the super class
+L<DBIx::Class::Storage::DBI::SQL::Statement>. Patches welcome if
+anything specific to this driver is required.
+
+=head1 AUTHOR
+
+See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
\ No newline at end of file
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+
+use lib qw(t/lib);
+use DBICTest;
+use DBIC::SqlMakerTest;
+use DBIx::Class::Optional::Dependencies ();
+
+use Path::Class;
+
+plan skip_all =>
+ 'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for ('test_rdbms_ss_csv')
+ unless DBIx::Class::Optional::Dependencies->req_ok_for ('test_rdbms_ss_csv');
+
+my $db_dir = dir(qw/t var/, "ss_csv-$$");
+$db_dir->mkpath unless -d $db_dir;
+
+my ($dsn, $opts) = ('dbi:CSV:', {
+ f_schema => undef,
+ f_dir => "$db_dir",
+ f_ext => ".csv/r",
+ f_lock => 0,
+ f_encoding => "utf8",
+
+ csv_null => 1,
+ csv_eol => "\n",
+});
+
+my $schema = DBICTest::Schema->connect($dsn, '', '', $opts);
+is ($schema->storage->sqlt_type, 'CSV', 'sqlt_type correct pre-connection');
+isa_ok($schema->storage->sql_maker, 'DBIx::Class::SQLMaker::SQLStatement');
+
+# Custom deployment
+my $dbh = $schema->storage->dbh;
+my @cmds = split /\s*\;\s*/, scalar file(qw/t lib test_deploy DBICTest-Schema-1.x-SQL-Statement.sql/)->slurp;
+$dbh->do($_) for @cmds;
+
+### S:S doesn't have any sort of AUTOINCREMENT support, so IDs will have to be generated by hand ###
+
+# test primary key handling
+my $new = $schema->resultset('Artist')->create({
+ artistid => 1,
+ name => 'foo'
+});
+ok($new->artistid, "Create worked");
+
+# test LIMIT support
+for (1..6) {
+ $schema->resultset('Artist')->create({
+ artistid => $_+1,
+ name => 'Artist '.$_,
+ });
+}
+my $it = $schema->resultset('Artist')->search( {}, {
+ rows => 3,
+ offset => 2,
+ order_by => 'artistid'
+});
+is( $it->count, 3, "LIMIT count ok" ); # ask for 3 rows out of 7 artists
+is( $it->next->name, "Artist 2", "iterator->next ok" );
+$it->next;
+$it->next;
+is( $it->next, undef, "next past end of resultset ok" );
+
+# Limit with select-lock (which is silently thrown away)
+lives_ok {
+ isa_ok (
+ $schema->resultset('Artist')->find({artistid => 1}, {for => 'update', rows => 1}),
+ 'DBICTest::Schema::Artist',
+ );
+} 'Limited FOR UPDATE select works';
+
+# shared-lock (which is silently thrown away)
+lives_ok {
+ isa_ok (
+ $schema->resultset('Artist')->find({artistid => 1}, {for => 'shared'}),
+ 'DBICTest::Schema::Artist',
+ );
+} 'LOCK IN SHARE MODE select works';
+
+# (everything seems to be a VARCHAR with S:S)
+my $test_type_info = {
+ 'artistid' => {
+ 'data_type' => 'VARCHAR',
+ 'is_nullable' => 0,
+ 'size' => 0,
+ },
+ 'name' => {
+ 'data_type' => 'VARCHAR',
+ 'is_nullable' => 1,
+ 'size' => 100,
+ },
+ 'rank' => {
+ 'data_type' => 'VARCHAR',
+ 'is_nullable' => 0,
+ 'size' => 0,
+ },
+ 'charfield' => {
+ 'data_type' => 'VARCHAR',
+ 'is_nullable' => 1,
+ 'size' => 10,
+ },
+};
+
+$schema->populate ('Owners', [
+ [qw/id name /],
+ [qw/1 wiggle/],
+ [qw/2 woggle/],
+ [qw/3 boggle/],
+]);
+
+$schema->populate ('BooksInLibrary', [
+ [qw/id source owner title /],
+ [qw/1 Library 1 secrets1/],
+ [qw/2 Eatery 1 secrets2/],
+ [qw/3 Library 2 secrets3/],
+]);
+
+{
+ # try a ->has_many direction (due to a 'multi' accessor the select/group_by group is collapsed)
+ my $owners = $schema->resultset('Owners')->search(
+ { 'books.id' => { '!=', undef }},
+ { prefetch => 'books', cache => 1 }
+ );
+ is($owners->all, 2, 'Prefetched grouped search returns correct number of rows');
+
+ # only works here because of the full cache
+ # S:S would croak on a subselect otherwise
+ is($owners->count, 2, 'Prefetched grouped search returns correct count');
+
+ # try a ->belongs_to direction (no select collapse)
+ my $books = $schema->resultset('BooksInLibrary')->search (
+ { 'owner.name' => 'wiggle' },
+ { prefetch => 'owner', distinct => 1 }
+ );
+
+ {
+ local $TODO = 'populate does not subtract the non-Library INSERTs here...';
+ is($owners->all, 1, 'Prefetched grouped search returns correct number of rows');
+ is($owners->count, 1, 'Prefetched grouped search returns correct count');
+ }
+}
+
+my $type_info = $schema->storage->columns_info_for('artist');
+is_deeply($type_info, $test_type_info, 'columns_info_for - column data types');
+
+my $cd = $schema->resultset('CD')->create({ cdid => 1 });
+my $producer = $schema->resultset('Producer')->create({ producerid => 1 });
+lives_ok { $cd->set_producers ([ $producer ]) } 'set_relationship doesnt die';
+
+{
+ my $artist = $schema->resultset('Artist')->next;
+ my $cd = $schema->resultset('CD')->next;
+ $cd->set_from_related('artist', $artist);
+ $cd->update;
+
+ my $rs = $schema->resultset('CD')->search ({}, { prefetch => 'artist' });
+
+ lives_ok sub {
+ my $cd = $rs->next;
+ is ($cd->artist->name, $artist->name, 'Prefetched artist');
+ }, 'join does not throw';
+
+ local $schema->storage->sql_maker->{_default_jointype} = 'inner';
+ is_same_sql_bind (
+ $rs->as_query,
+ '(
+ SELECT
+ me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+ artist.artistid, artist.name, artist.rank, artist.charfield
+ FROM cd me
+ INNER JOIN artist artist ON artist.artistid = me.artist
+ )',
+ [],
+ 'overriden default join type works',
+ );
+}
+
+{
+ # Test support for straight joins
+ my $cdsrc = $schema->source('CD');
+ my $artrel_info = $cdsrc->relationship_info ('artist');
+ $cdsrc->add_relationship(
+ 'straight_artist',
+ $artrel_info->{class},
+ $artrel_info->{cond},
+ { %{$artrel_info->{attrs}}, join_type => 'straight' },
+ );
+ is_same_sql_bind (
+ $cdsrc->resultset->search({}, { prefetch => 'straight_artist' })->as_query,
+ '(
+ SELECT
+ me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+ straight_artist.artistid, straight_artist.name, straight_artist.rank, straight_artist.charfield
+ FROM cd me
+ STRAIGHT JOIN artist straight_artist ON straight_artist.artistid = me.artist
+ )',
+ [],
+ 'straight joins correctly supported'
+ );
+}
+
+# Can we properly deal with the null search problem?
+{
+ $schema->resultset('Artist')->create({ artistid => 2222, name => 'last created artist' });
+
+ ok my $artist1_rs = $schema->resultset('Artist')->search({artistid=>6666})
+ => 'Created an artist resultset of 6666';
+
+ is $artist1_rs->count, 0
+ => 'Got no returned rows';
+
+ ok my $artist2_rs = $schema->resultset('Artist')->search({artistid=>undef})
+ => 'Created an artist resultset of undef';
+
+ is $artist2_rs->count, 0
+ => 'got no rows';
+
+ my $artist = $artist2_rs->single;
+
+ is $artist => undef
+ => 'Nothing Found!';
+}
+
+{
+ my $cds_per_year = {
+ 2001 => 2,
+ 2002 => 1,
+ 2005 => 3,
+ };
+
+ # kill the scalar ref here
+ $schema->source('CD')->name('cd');
+
+ my $rs = $schema->resultset('CD');
+ $rs->delete;
+ my $cdid = 1;
+ foreach my $y (keys %$cds_per_year) {
+ foreach my $c (1 .. $cds_per_year->{$y} ) {
+ $rs->create({ cdid => $cdid++, title => "CD $y-$c", artist => 1, year => "$y-01-01" });
+ }
+ }
+
+ is ($rs->count, 6, 'CDs created successfully');
+}
+
+done_testing;
--- /dev/null
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+
+use lib qw(t/lib);
+use DBICTest;
+use DBIC::SqlMakerTest;
+use DBIx::Class::Optional::Dependencies ();
+
+use Path::Class;
+
+plan skip_all =>
+ 'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for ('test_rdbms_ss_dbm')
+ unless DBIx::Class::Optional::Dependencies->req_ok_for ('test_rdbms_ss_dbm');
+
+my $db_dir = dir(qw/t var/, "ss_dbm-$$");
+$db_dir->mkpath unless -d $db_dir;
+
+my ($dsn, $opts) = ('dbi:DBM:', {
+ f_dir => "$db_dir",
+ f_lockfile => '.lock',
+
+ dbm_type => 'BerkeleyDB',
+ dbm_mldbm => 'Storable',
+ dbm_store_metadata => 1,
+});
+
+my $schema = DBICTest::Schema->connect($dsn, '', '', $opts);
+is ($schema->storage->sqlt_type, 'DBM', 'sqlt_type correct pre-connection');
+isa_ok($schema->storage->sql_maker, 'DBIx::Class::SQLMaker::SQLStatement');
+
+# Custom deployment
+my $dbh = $schema->storage->dbh;
+my @cmds = split /\s*\;\s*/, scalar file(qw/t lib test_deploy DBICTest-Schema-1.x-SQL-Statement.sql/)->slurp;
+$dbh->do($_) for @cmds;
+
+### S:S doesn't have any sort of AUTOINCREMENT support, so IDs will have to be generated by hand ###
+
+# test primary key handling
+my $new = $schema->resultset('Artist')->create({
+ artistid => 1,
+ name => 'foo'
+});
+ok($new->artistid, "Create worked");
+
+# test LIMIT support
+for (1..6) {
+ $schema->resultset('Artist')->create({
+ artistid => $_+1,
+ name => 'Artist '.$_,
+ });
+}
+my $it = $schema->resultset('Artist')->search( {}, {
+ rows => 3,
+ offset => 2,
+ order_by => 'artistid'
+});
+is( $it->count, 3, "LIMIT count ok" ); # ask for 3 rows out of 7 artists
+is( $it->next->name, "Artist 2", "iterator->next ok" );
+$it->next;
+$it->next;
+is( $it->next, undef, "next past end of resultset ok" );
+
+# Limit with select-lock (which is silently thrown away)
+lives_ok {
+ isa_ok (
+ $schema->resultset('Artist')->find({artistid => 1}, {for => 'update', rows => 1}),
+ 'DBICTest::Schema::Artist',
+ );
+} 'Limited FOR UPDATE select works';
+
+# shared-lock (which is silently thrown away)
+lives_ok {
+ isa_ok (
+ $schema->resultset('Artist')->find({artistid => 1}, {for => 'shared'}),
+ 'DBICTest::Schema::Artist',
+ );
+} 'LOCK IN SHARE MODE select works';
+
+# (No nullables with DBD::DBM)
+my $test_type_info = {
+ 'artistid' => {
+ 'data_type' => 'VARCHAR',
+ 'is_nullable' => 0,
+ 'size' => 0,
+ },
+ 'name' => {
+ 'data_type' => 'VARCHAR',
+ 'is_nullable' => 0,
+ 'size' => 100,
+ },
+ 'rank' => {
+ 'data_type' => 'VARCHAR',
+ 'is_nullable' => 0,
+ 'size' => 0,
+ },
+ 'charfield' => {
+ 'data_type' => 'VARCHAR',
+ 'is_nullable' => 0,
+ 'size' => 10,
+ },
+};
+
+$schema->populate ('Owners', [
+ [qw/id name /],
+ [qw/1 wiggle/],
+ [qw/2 woggle/],
+ [qw/3 boggle/],
+]);
+
+$schema->populate ('BooksInLibrary', [
+ [qw/id source owner title /],
+ [qw/1 Library 1 secrets1/],
+ [qw/2 Eatery 1 secrets2/],
+ [qw/3 Library 2 secrets3/],
+]);
+
+{
+ # try a ->has_many direction (due to a 'multi' accessor the select/group_by group is collapsed)
+ my $owners = $schema->resultset('Owners')->search(
+ { 'books.id' => { '!=', undef }},
+ { prefetch => 'books', cache => 1 }
+ );
+ is($owners->all, 2, 'Prefetched grouped search returns correct number of rows');
+
+ # only works here because of the full cache
+ # S:S would croak on a subselect otherwise
+ is($owners->count, 2, 'Prefetched grouped search returns correct count');
+
+ # try a ->belongs_to direction (no select collapse)
+ my $books = $schema->resultset('BooksInLibrary')->search (
+ { 'owner.name' => 'wiggle' },
+ { prefetch => 'owner', distinct => 1 }
+ );
+
+ {
+ local $TODO = 'populate does not subtract the non-Library INSERTs here...';
+ is($owners->all, 1, 'Prefetched grouped search returns correct number of rows');
+ is($owners->count, 1, 'Prefetched grouped search returns correct count');
+ }
+}
+
+my $type_info = $schema->storage->columns_info_for('artist');
+is_deeply($type_info, $test_type_info, 'columns_info_for - column data types');
+
+my $cd = $schema->resultset('CD')->create({ cdid => 1 });
+my $producer = $schema->resultset('Producer')->create({ producerid => 1 });
+lives_ok { $cd->set_producers ([ $producer ]) } 'set_relationship doesnt die';
+
+{
+ my $artist = $schema->resultset('Artist')->next;
+ my $cd = $schema->resultset('CD')->next;
+ $cd->set_from_related('artist', $artist);
+ $cd->update;
+
+ my $rs = $schema->resultset('CD')->search ({}, { prefetch => 'artist' });
+
+ lives_ok sub {
+ my $cd = $rs->next;
+ is ($cd->artist->name, $artist->name, 'Prefetched artist');
+ }, 'join does not throw';
+
+ local $schema->storage->sql_maker->{_default_jointype} = 'inner';
+ is_same_sql_bind (
+ $rs->as_query,
+ '(
+ SELECT
+ me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+ artist.artistid, artist.name, artist.rank, artist.charfield
+ FROM cd me
+ INNER JOIN artist artist ON artist.artistid = me.artist
+ )',
+ [],
+ 'overriden default join type works',
+ );
+}
+
+{
+ # Test support for straight joins
+ my $cdsrc = $schema->source('CD');
+ my $artrel_info = $cdsrc->relationship_info ('artist');
+ $cdsrc->add_relationship(
+ 'straight_artist',
+ $artrel_info->{class},
+ $artrel_info->{cond},
+ { %{$artrel_info->{attrs}}, join_type => 'straight' },
+ );
+ is_same_sql_bind (
+ $cdsrc->resultset->search({}, { prefetch => 'straight_artist' })->as_query,
+ '(
+ SELECT
+ me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+ straight_artist.artistid, straight_artist.name, straight_artist.rank, straight_artist.charfield
+ FROM cd me
+ STRAIGHT JOIN artist straight_artist ON straight_artist.artistid = me.artist
+ )',
+ [],
+ 'straight joins correctly supported'
+ );
+}
+
+# Can we properly deal with the null search problem?
+{
+ $schema->resultset('Artist')->create({ artistid => 2222, name => 'last created artist' });
+
+ ok my $artist1_rs = $schema->resultset('Artist')->search({artistid=>6666})
+ => 'Created an artist resultset of 6666';
+
+ is $artist1_rs->count, 0
+ => 'Got no returned rows';
+
+ ok my $artist2_rs = $schema->resultset('Artist')->search({artistid=>undef})
+ => 'Created an artist resultset of undef';
+
+ is $artist2_rs->count, 0
+ => 'got no rows';
+
+ my $artist = $artist2_rs->single;
+
+ is $artist => undef
+ => 'Nothing Found!';
+}
+
+{
+ my $cds_per_year = {
+ 2001 => 2,
+ 2002 => 1,
+ 2005 => 3,
+ };
+
+ # kill the scalar ref here
+ $schema->source('CD')->name('cd');
+
+ my $rs = $schema->resultset('CD');
+ $rs->delete;
+ my $cdid = 1;
+ foreach my $y (keys %$cds_per_year) {
+ foreach my $c (1 .. $cds_per_year->{$y} ) {
+ $rs->create({ cdid => $cdid++, title => "CD $y-$c", artist => 1, year => "$y-01-01" });
+ }
+ }
+
+ is ($rs->count, 6, 'CDs created successfully');
+}
+
+done_testing;
--- /dev/null
+CREATE TABLE artist (
+ artistid INTEGER PRIMARY KEY NOT NULL,
+ name varchar(100),
+ rank integer NOT NULL,
+ charfield char(10)
+);
+
+CREATE TABLE collection (
+ collectionid INTEGER PRIMARY KEY NOT NULL,
+ name varchar(100) NOT NULL
+);
+
+CREATE TABLE encoded (
+ id INTEGER PRIMARY KEY NOT NULL,
+ encoded varchar(100)
+);
+
+CREATE TABLE event (
+ id INTEGER PRIMARY KEY NOT NULL,
+ starts_at varchar(20) NOT NULL,
+ created_on varchar(20) NOT NULL,
+ varchar_date varchar(20),
+ varchar_datetime varchar(20),
+ skip_inflation varchar(20),
+ ts_without_tz varchar(20)
+);
+
+CREATE TABLE fourkeys (
+ foo integer NOT NULL,
+ bar integer NOT NULL,
+ hello integer NOT NULL,
+ goodbye integer NOT NULL,
+ sensors char(10) NOT NULL,
+ read_count integer,
+ PRIMARY KEY (foo, bar, hello, goodbye)
+);
+
+CREATE TABLE genre (
+ genreid INTEGER PRIMARY KEY NOT NULL,
+ name varchar(100) NOT NULL
+);
+
+CREATE TABLE link (
+ id INTEGER PRIMARY KEY NOT NULL,
+ url varchar(100),
+ title varchar(100)
+);
+
+CREATE TABLE noprimarykey (
+ foo integer NOT NULL,
+ bar integer NOT NULL,
+ baz integer NOT NULL
+);
+
+CREATE TABLE onekey (
+ id INTEGER PRIMARY KEY NOT NULL,
+ artist integer NOT NULL,
+ cd integer NOT NULL
+);
+
+CREATE TABLE owners (
+ id INTEGER PRIMARY KEY NOT NULL,
+ name varchar(100) NOT NULL
+);
+
+CREATE TABLE producer (
+ producerid INTEGER PRIMARY KEY NOT NULL,
+ name varchar(100) NOT NULL
+);
+
+CREATE TABLE self_ref (
+ id INTEGER PRIMARY KEY NOT NULL,
+ name varchar(100) NOT NULL
+);
+
+CREATE TABLE sequence_test (
+ pkid1 integer NOT NULL,
+ pkid2 integer NOT NULL,
+ nonpkid integer NOT NULL,
+ name varchar(100),
+ PRIMARY KEY (pkid1, pkid2)
+);
+
+CREATE TABLE serialized (
+ id INTEGER PRIMARY KEY NOT NULL,
+ serialized text NOT NULL
+);
+
+CREATE TABLE treelike (
+ id INTEGER PRIMARY KEY NOT NULL,
+ parent integer,
+ name varchar(100) NOT NULL
+);
+
+CREATE TABLE twokeytreelike (
+ id1 integer NOT NULL,
+ id2 integer NOT NULL,
+ parent1 integer NOT NULL,
+ parent2 integer NOT NULL,
+ name varchar(100) NOT NULL,
+ PRIMARY KEY (id1, id2)
+);
+
+CREATE TABLE typed_object (
+ objectid INTEGER PRIMARY KEY NOT NULL,
+ type varchar(100) NOT NULL,
+ value varchar(100) NOT NULL
+);
+
+CREATE TABLE artist_undirected_map (
+ id1 integer NOT NULL,
+ id2 integer NOT NULL,
+ PRIMARY KEY (id1, id2)
+);
+
+CREATE TABLE bookmark (
+ id INTEGER PRIMARY KEY NOT NULL,
+ link integer
+);
+
+CREATE TABLE books (
+ id INTEGER PRIMARY KEY NOT NULL,
+ source varchar(100) NOT NULL,
+ owner integer NOT NULL,
+ title varchar(100) NOT NULL,
+ price integer
+);
+
+CREATE TABLE employee (
+ employee_id INTEGER PRIMARY KEY NOT NULL,
+ position integer NOT NULL,
+ group_id integer,
+ group_id_2 integer,
+ group_id_3 integer,
+ name varchar(100),
+ encoded integer
+);
+
+CREATE TABLE forceforeign (
+ artist INTEGER PRIMARY KEY NOT NULL,
+ cd integer NOT NULL
+);
+
+CREATE TABLE self_ref_alias (
+ self_ref integer NOT NULL,
+ alias integer NOT NULL,
+ PRIMARY KEY (self_ref, alias)
+);
+
+CREATE TABLE track (
+ trackid INTEGER PRIMARY KEY NOT NULL,
+ cd integer NOT NULL,
+ position int NOT NULL,
+ title varchar(100) NOT NULL,
+ last_updated_on varchar(20),
+ last_updated_at varchar(20)
+);
+
+CREATE TABLE cd (
+ cdid INTEGER PRIMARY KEY NOT NULL,
+ artist integer NOT NULL,
+ title varchar(100) NOT NULL,
+ year varchar(100) NOT NULL,
+ genreid integer,
+ single_track integer
+);
+
+CREATE TABLE collection_object (
+ collection integer NOT NULL,
+ object integer NOT NULL,
+ PRIMARY KEY (collection, object)
+);
+
+CREATE TABLE lyrics (
+ lyric_id INTEGER PRIMARY KEY NOT NULL,
+ track_id integer NOT NULL
+);
+
+CREATE TABLE liner_notes (
+ liner_id INTEGER PRIMARY KEY NOT NULL,
+ notes varchar(100) NOT NULL
+);
+
+CREATE TABLE lyric_versions (
+ id INTEGER PRIMARY KEY NOT NULL,
+ lyric_id integer NOT NULL,
+ ltext varchar(100) NOT NULL
+);
+
+CREATE TABLE tags (
+ tagid INTEGER PRIMARY KEY NOT NULL,
+ cd integer NOT NULL,
+ tag varchar(100) NOT NULL
+);
+
+CREATE TABLE cd_to_producer (
+ cd integer NOT NULL,
+ producer integer NOT NULL,
+ attribute integer,
+ PRIMARY KEY (cd, producer)
+);
+
+CREATE TABLE images (
+ id INTEGER PRIMARY KEY NOT NULL,
+ artwork_id integer NOT NULL,
+ name varchar(100) NOT NULL,
+ data blob
+);
+
+CREATE TABLE twokeys (
+ artist integer NOT NULL,
+ cd integer NOT NULL,
+ PRIMARY KEY (artist, cd)
+);
+
+CREATE TABLE artwork_to_artist (
+ artwork_cd_id integer NOT NULL,
+ artist_id integer NOT NULL,
+ PRIMARY KEY (artwork_cd_id, artist_id)
+);
+
+CREATE TABLE fourkeys_to_twokeys (
+ f_foo integer NOT NULL,
+ f_bar integer NOT NULL,
+ f_hello integer NOT NULL,
+ f_goodbye integer NOT NULL,
+ t_artist integer NOT NULL,
+ t_cd integer NOT NULL,
+ autopilot char NOT NULL,
+ pilot_sequence integer,
+ PRIMARY KEY (f_foo, f_bar, f_hello, f_goodbye, t_artist, t_cd)
+);