Build
Build.bat
+MANIFEST
META.yml
Makefile
Makefile.old
Revision history for DBIx::Class
+ - Possible to set locale in IC::DateTime extra => {} config
+ - Calling the accessor of a belongs_to when the foreign_key
+ was NULL and the row was not stored would unexpectedly fail (groditi)
+ - Split sql statements for deploy only if SQLT::Producer returned a scalar
+ containing all statements to be executed
+
+0.08099_06 2009-01-23 07:30:00 (UTC)
- Allow a scalarref to be supplied to the 'from' resultset attribute
- Classes submitted as result_class for a resultsource are now
automatically loaded via ensure_loaded()
- new order_by => { -desc => 'colname' } syntax supported with
SQLA >= 1.50
- PG array datatype supported with SQLA >= 1.50
+ - insert should use store_column, not set_column to avoid marking
+ clean just-stored values as dirty. New test for this (groditi)
0.08099_05 2008-10-30 21:30:00 (UTC)
- Rewritte of Storage::DBI::connect_info(), extended with an
--- /dev/null
+(Potential) Features for 0.09
+=============================
+
+Subselects - everyone wants these
+ - Will require new/improved SQL::Abstract
+ - The API will involve passing ResultSets to searches as conditions/values/whatever.
+
+Row/find caching - would be pretty useful
+ - Need to have good definitions of when the cache should be queried and when invalidated
+ - Be able to supply own expiry?
+ - Be able to invalidate manually? Single item / entire cache / single table
+
+Deprecate compose_connection / DB.pm
+ - Everyone has probably forgotten what this is anyway..
+
+Syntax improvements?
+ - "as" to "alias" ?
+ - "belongs_to" to "contains/refers/something"
+ - order_by to take a arrayref/hashref so it also just works with quoting
+ - ??
+
+Using inflated objects/references as values in searches
+ - Goes together with subselects above
+ - should deflate then run search
+
+FilterColumn - like Inflate, only for changing scalar values
+ - This seems to be vaporware atm..
+
+SQL/API feature complete?
+ - UNION
+ - proper join conditions!
+ - function calls on the LHS of conditions..
+
+Moosification - ouch
+
+Prefetch improvements
+ - slow on mysql, speedup?
+ - multi has_many prefetch
+ - paging working with prefetch
+
+Magically "discover" needed joins/prefetches and add them
+ - eg $books->search({ 'author.name' => 'Fred'}), autoadds: join => 'author'
+ - also guess aliases when supplying column names that are on joined/related tables
+
+Metamodel stuff - introspection
+
+Storage API/restructure
+ - call update/insert etc on the ResultSource, which then calls to storage
+ - handle different storages/db-specific code better
+ - better cross-db code .. eg LIKE/ILIKE
+
+Relationships
+ - single vs filter, discrepancies.. remove one of them and make behave the same?
+
+SQL::Abstract et al
+ - be able to extract/view/dump what the SQL is *going to* be before running it
+ - bind params sanity? (order of bind params broken when using a resultsource based on a select statement)
+ - etc
+
+Joining/searching weird attribute tables?
+ - Support legacy/badly designed junk better..
+
+Documentation - improvements
+ - better indexing for finding of stuff in general
+ - more cross-referencing of docs
+
# for developers only :)
^TODO$
+^Features_09$
# Avoid Module::Build generated and utility files.
\bBuild$
# Skip maint stuff
^maint/
-# Avoid copies to .orig
+# Avoid patch remnants
\.orig$
+\.rej$
# Dont use Module::Build anymore
^Build.PL$
use inc::Module::Install 0.67;
use strict;
use warnings;
+use POSIX ();
use 5.006001; # delete this line if you want to send patches for earlier.
requires 'Data::Page' => 2.00;
requires 'Scalar::Util' => 0;
-requires 'SQL::Abstract' => 1.20;
-requires 'SQL::Abstract::Limit' => 0.101;
-requires 'Class::C3' => 0.13;
+requires 'SQL::Abstract' => 1.24;
+requires 'SQL::Abstract::Limit' => 0.13;
+requires 'Class::C3' => 0.20;
requires 'Class::C3::Componentised' => 0;
requires 'Storable' => 0;
requires 'Carp::Clan' => 0;
requires 'DBI' => 1.40;
requires 'Module::Find' => 0;
requires 'Class::Inspector' => 0;
-requires 'Class::Accessor::Grouped' => 0.05002;
-requires 'JSON::Any' => 1.00;
+requires 'Class::Accessor::Grouped' => 0.08002;
+requires 'JSON::Any' => 1.17;
requires 'Scope::Guard' => 0.03;
requires 'Path::Class' => 0;
requires 'List::Util' => 1.19;
# Perl 5.8.0 doesn't have utf8::is_utf8()
requires 'Encode' => 0 if ($] <= 5.008000);
-test_requires 'DBD::SQLite' => 1.14;
+configure_requires 'DBD::SQLite' => 1.14;
+
test_requires 'Test::Builder' => 0.33;
test_requires 'Test::Warn' => 0.11;
test_requires 'Test::Exception' => 0;
install_script 'script/dbicadmin';
-tests "t/*.t t/*/*.t";
+tests_recursive 't';
# re-build README and require CDBI modules for testing if we're in a checkout
auto_install;
# Have all prerequisites, check DBD::SQLite sanity
-{
+if (! $ENV{DBICTEST_NO_SQLITE_CHECK} ) {
+
my $pid = fork();
if (not defined $pid) {
die "Unable to fork(): $!";
}
elsif (! $pid) {
+
+ # Win32 does not have real fork()s so a segfault will bring
+ # everything down. Warn about it.
+ if ($^O eq 'MSWin32') {
+ print <<'EOW';
+
+######################################################################
+# #
+# A short stress-testing of DBD::SQLite will follow. If you have a #
+# buggy library this might very well be the last text you will see #
+# before the installation silently terminates. If this happens it #
+# would mean that you are running a buggy version of DBD::SQLite #
+# known to randomly segfault on errors. Even if you have the latest #
+# CPAN module version, the system sqlite3 dynamic library might have #
+# been compiled against an older buggy sqlite3 dev library (oddly #
+# DBD::SQLite will prefer the system library against the one bundled #
+# with it). You are strongly advised to resolve this issue before #
+# proceeding. #
+# #
+# If this happens to you (this text is the last thing you see), and #
+# you just want to install this module without worrying about the #
+# tests (which will almost certainly fail) - set the environment #
+# variable DBICTEST_NO_SQLITE_CHECK to a true value and try again. #
+# #
+######################################################################
+
+EOW
+ }
+
require DBI;
- for (1 .. 10) {
+ for (1 .. 100) {
my $dbh;
$dbh = DBI->connect ('dbi:SQLite::memory:', undef, undef, {
AutoCommit => 1,
exit 0;
}
else {
- wait();
+ eval {
+ local $SIG{ALRM} = sub { die "timeout\n" };
+ alarm 5;
+ wait();
+ alarm 0;
+ };
my $sig = $? & 127;
- if ($sig == 11) {
+ if ($@ || $sig == POSIX::SIGSEGV) {
warn (<<EOE);
-############################### WARNING ###################################
-# #
-# You are running a buggy version of DBD::SQLite known to randomly #
-# segfault on errors. Even if you have the latest CPAN module version, #
-# the actual sqlite3.so might have been compiled against an older buggy #
-# sqlite3 dev library. You are strongly advised to update DBD::SQLite. #
-# #
-###########################################################################
+############################### WARNING #################################
+# #
+# You are running a buggy version of DBD::SQLite known to randomly #
+# segfault on errors. Even if you have the latest CPAN module version, #
+# the sqlite3 dynamic library on this system might have been compiled #
+# against an older buggy sqlite3 dev library (oddly DBD::SQLite will #
+# prefer the system library against the one bundled with it). You are #
+# strongly advised to resolve this issue before proceeding. #
+# #
+#########################################################################
EOE
my $ans = prompt (
}
-WriteAll;
+WriteAll();
if ($Module::Install::AUTHOR) {
];
Meta->write;
}
-
-
-
# i.e. first release of 0.XX *must* be 0.XX000. This avoids fBSD ports
# brain damage and presumably various other packaging systems too
-$VERSION = '0.08099_05';
+$VERSION = '0.08099_06';
$VERSION = eval $VERSION; # numify for warning-free dev releases
rjbs: Ricardo Signes <rjbs@cpan.org>
+robkinyon: Rob Kinyon <rkinyon@cpan.org>
+
sc_: Just Another Perl Hacker
scotty: Scotty Allen <scotty@scottyallen.com>
# successfully, and false if the class is not installed
sub load_optional_class {
my ($class, $f_class) = @_;
- if ($class->ensure_class_found($f_class)) {
- $class->ensure_class_loaded($f_class);
+ eval { $class->ensure_class_loaded($f_class) };
+ my $err = $@; # so we don't lose it
+ if (! $err) {
return 1;
- } else {
- return 0;
+ }
+ else {
+ my $fn = (join ('/', split ('::', $f_class) ) ) . '.pm';
+ if ($err =~ /Can't locate ${fn} in \@INC/ ) {
+ return 0;
+ }
+ else {
+ die $err;
+ }
}
}
print "This event starts the month of ".
$event->starts_when->month_name();
-If you want to set a specific timezone for that field, use:
+If you want to set a specific timezone and locale for that field, use:
__PACKAGE__->add_columns(
- starts_when => { data_type => 'datetime', extra => { timezone => "America/Chicago" } }
+ starts_when => { data_type => 'datetime', extra => { timezone => "America/Chicago", locale => "de_DE" } }
);
If you want to inflate no matter what data_type your column is,
}
my $timezone;
- if ( exists $info->{extra} and exists $info->{extra}{timezone} and defined $info->{extra}{timezone} ) {
+ if ( defined $info->{extra}{timezone} ) {
$timezone = $info->{extra}{timezone};
}
+ my $locale;
+ if ( defined $info->{extra}{locale} ) {
+ $locale = $info->{extra}{locale};
+ }
+
my $undef_if_invalid = $info->{datetime_undef_if_invalid};
if ($type eq 'datetime' || $type eq 'date') {
die "Error while inflating ${value} for ${column} on ${self}: $@"
if $@ and not $undef_if_invalid;
$dt->set_time_zone($timezone) if $timezone;
+ $dt->set_locale($locale) if $locale;
return $dt;
},
deflate => sub {
and not $floating_tz_ok
and not $ENV{DBIC_FLOATING_TZ_OK};
$value->set_time_zone($timezone);
+ $value->set_locale($locale) if $locale;
}
$obj->_datetime_parser->$format($value);
},
are in any way unsure about the use of the attributes above (C< join
>, C< select >, C< as > and C< group_by >).
+=head2 Subqueries
+
+You can write subqueries relatively easily in DBIC.
+
+ my $inside_rs = $schema->resultset('Artist')->search({
+ name => [ 'Billy Joel', 'Brittany Spears' ],
+ });
+
+ my $rs = $schema->resultset('CD')->search({
+ artist_id => { 'IN' => $inside_rs->get_column('id')->as_query },
+ });
+
+The usual operators ( =, !=, IN, NOT IN, etc) are supported.
+
+B<NOTE>: You have to explicitly use '=' when doing an equality comparison.
+The following will B<not> work:
+
+ my $rs = $schema->resultset('CD')->search({
+ artist_id => $inside_rs->get_column('id')->as_query,
+ });
+
+=head3 Correlated subqueries
+
+ my $cdrs = $schema->resultset('CD');
+ my $rs = $cdrs->search({
+ year => {
+ '=' => $cdrs->search(
+ { artistid => { '=' => \'me.artistid' } },
+ { alias => 'inner' }
+ )->get_column('year')->max_rs->as_query,
+ },
+ });
+
+That creates the following SQL:
+
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+ FROM cd me
+ WHERE year = (
+ SELECT MAX(inner.year)
+ FROM cd inner
+ WHERE artistid = me.artistid
+ )
+
=head2 Predefined searches
You can write your own L<DBIx::Class::ResultSet> class by inheriting from it
my $hash_ref = $rs->find(1);
Wasn't that easy?
-
+
=head2 Get raw data for blindingly fast results
If the L<HashRefInflator|DBIx::Class::ResultClass::HashRefInflator> solution
$resultset->search(
{
- numbers => \[ '= ?', [1, 2, 3] ]
+ numbers => \[ '= ?', [numbers => [1, 2, 3]] ]
}
);
See L<SQL::Abstract/array_datatypes> and L<SQL::Abstract/Literal SQL with
-placeholders and bind values (subqueries)> for more explanation.
+placeholders and bind values (subqueries)> for more explanation. Note that
+L<DBIx::Class> sets L<SQL::Abstract/bindtype> to C<columns>, so you must pass
+the bind values (the C<[1, 2, 3]> arrayref in the above example) wrapped in
+arrayrefs together with the column name, like this: C<< [column_name => value]
+>>.
=head1 BOOTSTRAPPING/MIGRATING
statement and dig down to see if certain parameters cause aberrant behavior.
You might want to check out L<DBIx::Class::QueryLog> as well.
+=head1 STARTUP SPEED
+
+L<DBIx::Class|DBIx::Class> programs can have a significant startup delay
+as the ORM loads all the relevant classes. This section examines
+techniques for reducing the startup delay.
+
+These tips are are listed in order of decreasing effectiveness - so the
+first tip, if applicable, should have the greatest effect on your
+application.
+
+=head2 Statically Define Your Schema
+
+If you are using
+L<DBIx::Class::Schema::Loader|DBIx::Class::Schema::Loader> to build the
+classes dynamically based on the database schema then there will be a
+significant startup delay.
+
+For production use a statically defined schema (which can be generated
+using L<DBIx::Class::Schema::Loader|DBIx::Class::Schema::Loader> to dump
+the database schema once - see
+L<make_schema_at|DBIx::Class::Schema::Loader/make_schema_at> and
+L<dump_directory|DBIx::Class::Schema::Loader/dump_directory> for more
+details on creating static schemas from a database).
+
+=head2 Move Common Startup into a Base Class
+
+Typically L<DBIx::Class> result classes start off with
+
+ use base qw/DBIx::Class/;
+ __PACKAGE__->load_components(qw/InflateColumn::DateTime Core/);
+
+If this preamble is moved into a common base class:-
+
+ package MyDBICbase;
+
+ use base qw/DBIx::Class/;
+ __PACKAGE__->load_components(qw/InflateColumn::DateTime Core/);
+ 1;
+
+and each result class then uses this as a base:-
+
+ use base qw/MyDBICbase/;
+
+then the load_components is only performed once, which can result in a
+considerable startup speedup for schemas with many classes.
+
+=head2 Explicitly List Schema Result Classes
+
+The schema class will normally contain
+
+ __PACKAGE__->load_classes();
+
+to load the result classes. This will use L<Module::Find|Module::Find>
+to find and load the appropriate modules. Explicitly defining the
+classes you wish to load will remove the overhead of
+L<Module::Find|Module::Find> and the related directory operations:-
+
+ __PACKAGE__->load_classes(qw/ CD Artist Track /);
+
+If you are instead using the L<load_namespaces|DBIx::Class::Schema/load_namespaces>
+syntax to load the appropriate classes there is not a direct alternative
+avoiding L<Module::Find|Module::Find>.
=cut
be alarmed if the error from incorrect connection details happens a
lot later.
+=item .. use DBIx::Class across multiple databases?
+
+If your database server allows you to run querys across multiple
+databases at once, then so can DBIx::Class. All you need to do is make
+sure you write the database name as part of the
+L<DBIx::Class::ResultSource/table> call. Eg:
+
+ __PACKAGE__->table('mydb.mytablename');
+
+And load all the Result classes for both / all databases using one
+L<DBIx::Class::Schema/load_namespaces> call.
+
+=item .. use DBIx::Class across PostgreSQL/DB2/Oracle schemas?
+
+Add the name of the schema to the L<DBIx::Class::ResultSource/table>
+as part of the name, and make sure you give the one user you are going
+to connect with rights to read/write all the schemas/tables as
+necessary.
=back
You can add your own data accessors to your classes.
+One method is to use the built in mk_group_accessors (via L<Class::Accessor::Grouped>)
+
+ package MyTable;
+
+ use parent 'DBIx::Class';
+
+ __PACKAGE__->table('foo'); #etc
+ __PACKAGE__->mk_group_accessors('simple' => qw/non_column_data/); # must use simple group
+
+An another method is to use L<Moose> with your L<DBIx::Class> package.
+
+ package MyTable;
+
+ use Moose; # import Moose
+ use Moose::Util::TypeConstraint; # import Moose accessor type constraints
+
+ extends 'DBIx::Class'; # Moose changes the way we define our parent (base) package
+
+ has 'non_column_data' => ( is => 'rw', isa => 'Str' ); # define a simple attribute
+
+ __PACKAGE__->table('foo'); # etc
+
+With either of these methods the resulting use of the accesssor would be
+
+ my $row;
+
+ # assume that some where in here $row will get assigned to a MyTable row
+
+ $row->non_column_data('some string'); # would set the non_column_data accessor
+
+ # some other stuff happens here
+
+ $row->update(); # would not inline the non_column_data accessor into the update
+
+
=item How do I use DBIx::Class objects in my TT templates?
Like normal objects, mostly. However you need to watch out for TT
$resultset->set_primary_key(@column);
+=item How do I make my program start faster?
+
+Look at the tips in L<DBIx::Class::Manual::Cookbook/"STARTUP SPEED">
+
+=item How do I reduce the overhead of database queries?
+
+You can reduce the overhead of object creation within L<DBIx::Class>
+using the tips in L<DBIx::Class::Manual::Cookbook/"Skip row object creation for faster results">
+and L<DBIx::Class::Manual::Cookbook/"Get raw data for blindingly fast results">
+
=back
=head2 Notes for CDBI users
$rel_info->{cond}, $rel, $self
);
if ($rel_info->{attrs}->{undef_on_null_fk}){
+ return unless ref($cond) eq 'HASH';
return if grep { not defined } values %$cond;
}
my $val = $self->find_related($rel, {}, {});
(ref $cond ? ref $cond : 'plain scalar')
) unless ref $cond eq 'HASH';
if (defined $f_obj) {
- my $f_class = $self->result_source->schema->class($rel_obj->{class});
+ my $f_class = $rel_obj->{class};
$self->throw_exception( "Object $f_obj isn't a ".$f_class )
unless Scalar::Util::blessed($f_obj) and $f_obj->isa($f_class);
}
=head1 NAME
-DBIx::Class::ResultSet - Responsible for fetching and creating resultset.
+DBIx::Class::ResultSet - Represents a query used for fetching a set of results.
=head1 SYNOPSIS
- my $rs = $schema->resultset('User')->search({ registered => 1 });
- my @rows = $schema->resultset('CD')->search({ year => 2005 })->all();
+ my $users_rs = $schema->resultset('User');
+ my $registered_users_rs = $schema->resultset('User')->search({ registered => 1 });
+ my @cds_in_2005 = $schema->resultset('CD')->search({ year => 2005 })->all();
=head1 DESCRIPTION
-The resultset is also known as an iterator. It is responsible for handling
-queries that may return an arbitrary number of rows, e.g. via L</search>
-or a C<has_many> relationship.
+A ResultSet is an object which stores a set of conditions representing
+a query. It is the backbone of DBIx::Class (i.e. the really
+important/useful bit).
-In the examples below, the following table classes are used:
+No SQL is executed on the database when a ResultSet is created, it
+just stores all the conditions needed to create the query.
- package MyApp::Schema::Artist;
- use base qw/DBIx::Class/;
- __PACKAGE__->load_components(qw/Core/);
- __PACKAGE__->table('artist');
- __PACKAGE__->add_columns(qw/artistid name/);
- __PACKAGE__->set_primary_key('artistid');
- __PACKAGE__->has_many(cds => 'MyApp::Schema::CD');
- 1;
+A basic ResultSet representing the data of an entire table is returned
+by calling C<resultset> on a L<DBIx::Class::Schema> and passing in a
+L<Source|DBIx::Class::Manual::Glossary/Source> name.
- package MyApp::Schema::CD;
- use base qw/DBIx::Class/;
- __PACKAGE__->load_components(qw/Core/);
- __PACKAGE__->table('cd');
- __PACKAGE__->add_columns(qw/cdid artist title year/);
- __PACKAGE__->set_primary_key('cdid');
- __PACKAGE__->belongs_to(artist => 'MyApp::Schema::Artist');
- 1;
+ my $users_rs = $schema->resultset('User');
+
+A new ResultSet is returned from calling L</search> on an existing
+ResultSet. The new one will contain all the conditions of the
+original, plus any new conditions added in the C<search> call.
+
+A ResultSet is also an iterator. L</next> is used to return all the
+L<DBIx::Class::Row>s the ResultSet represents.
+
+The query that the ResultSet represents is B<only> executed against
+the database when these methods are called:
+
+=over
+
+=item L</find>
+
+=item L</next>
+
+=item L</all>
+
+=item L</count>
+
+=item L</single>
+
+=item L</first>
+
+=back
+
+=head1 EXAMPLES
+
+=head2 Chaining resultsets
+
+Let's say you've got a query that needs to be run to return some data
+to the user. But, you have an authorization system in place that
+prevents certain users from seeing certain information. So, you want
+to construct the basic query in one method, but add constraints to it in
+another.
+
+ sub get_data {
+ my $self = shift;
+ my $request = $self->get_request; # Get a request object somehow.
+ my $schema = $self->get_schema; # Get the DBIC schema object somehow.
+
+ my $cd_rs = $schema->resultset('CD')->search({
+ title => $request->param('title'),
+ year => $request->param('year'),
+ });
+
+ $self->apply_security_policy( $cd_rs );
+
+ return $cd_rs->all();
+ }
+
+ sub apply_security_policy {
+ my $self = shift;
+ my ($rs) = @_;
+
+ return $rs->search({
+ subversive => 0,
+ });
+ }
+
+=head2 Multiple queries
+
+Since a resultset just defines a query, you can do all sorts of
+things with it with the same object.
+
+ # Don't hit the DB yet.
+ my $cd_rs = $schema->resultset('CD')->search({
+ title => 'something',
+ year => 2009,
+ });
+
+ # Each of these hits the DB individually.
+ my $count = $cd_rs->count;
+ my $most_recent = $cd_rs->get_column('date_released')->max();
+ my @records = $cd_rs->all;
+
+And it's not just limited to SELECT statements.
+
+ $cd_rs->delete();
+
+This is even cooler:
+
+ $cd_rs->create({ artist => 'Fred' });
+
+Which is the same as:
+
+ $schema->resultset('CD')->create({
+ title => 'something',
+ year => 2009,
+ artist => 'Fred'
+ });
+
+See: L</search>, L</count>, L</get_column>, L</all>, L</create>.
=head1 OVERLOADING
sub single {
my ($self, $where) = @_;
+ if(@_ > 2) {
+ $self->throw_exception('single() only takes search conditions, no attributes. You want ->search( $cond, $attrs )->single()');
+ }
+
my $attrs = { %{$self->_resolved_attrs} };
if ($where) {
if (defined $attrs->{where}) {
=cut
sub all {
- my ($self) = @_;
+ my $self = shift;
+ if(@_) {
+ $self->throw_exception("all() doesn't take any arguments, you probably wanted ->search(...)->all()");
+ }
+
return @{ $self->get_cache } if $self->get_cache;
my @obj;
$self->throw_exception("Values for update must be a hash")
unless ref $values eq 'HASH';
+ carp( 'WARNING! Currently $rs->update() does not generate proper SQL'
+ . ' on joined resultsets, and may affect rows well outside of the'
+ . ' contents of $rs. Use at your own risk' )
+ if ( $self->{attrs}{seen_join} );
+
my $cond = $self->_cond_for_update_delete;
return $self->result_source->storage->update(
=head2 _normalize_populate_args ($args)
-Private method used by L</populate> to normalize it's incoming arguments. Factored
+Private method used by L</populate> to normalize its incoming arguments. Factored
out in case you want to subclass and accept new argument structures to the
L</populate> method.
return \%unaliased;
}
+=head2 as_query
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: \[ $sql, @bind ]
+
+=back
+
+Returns the SQL query and bind vars associated with the invocant.
+
+This is generally used as the RHS for a subquery.
+
+=cut
+
+sub as_query { return shift->cursor->as_query(@_) }
+
=head2 find_or_new
=over 4
$cd->cd_to_producer->find_or_new({ producer => $producer },
{ key => 'primary });
-Find an existing record from this resultset, based on it's primary
+Find an existing record from this resultset, based on its primary
key, or a unique constraint. If none exists, instantiate a new result
object and return it. The object will not be saved into your storage
until you call L<DBIx::Class::Row/insert> on it.
pairs representing the columns of the table and the values you wish to
store. If the appropriate relationships are set up, foreign key fields
can also be passed an object representing the foreign row, and the
-value will be set to it's primary key.
+value will be set to its primary key.
To create related objects, pass a hashref for the value if the related
item is a foreign key relationship (L<DBIx::Class::Relationship/belongs_to>),
$person_rs->create({
name=>"Some Person",
- email=>"somebody@someplace.com"
+ email=>"somebody@someplace.com"
});
Example of creating a new row and also creating rows in a related C<has_many>
$cd_rs->create({
title=>"Music for Silly Walks",
- year=>2000,
- artist => {
- name=>"Silly Musician",
- }
+ year=>2000,
+ artist => {
+ name=>"Silly Musician",
+ }
});
=cut
push(@{$attrs->{as}}, @$adds);
}
- $attrs->{from} ||= [ { 'me' => $source->from } ];
+ $attrs->{from} ||= [ { $self->{attrs}{alias} => $source->from } ];
if (exists $attrs->{join} || exists $attrs->{prefetch}) {
my $join = delete $attrs->{join} || {};
if (my $prefetch = delete $attrs->{prefetch}) {
$prefetch = $self->_merge_attr({}, $prefetch);
my @pre_order;
- my $seen = $attrs->{seen_join} || {};
+ my $seen = { %{ $attrs->{seen_join} || {} } };
foreach my $p (ref $prefetch eq 'ARRAY' ? @$prefetch : ($prefetch)) {
# bring joins back to level of current class
my @prefetch = $source->resolve_prefetch(
=head1 ATTRIBUTES
-The resultset takes various attributes that modify its behavior. Here's an
-overview of them:
+Attributes are used to refine a ResultSet in various ways when
+searching for data. They can be passed to any method which takes an
+C<\%attrs> argument. See L</search>, L</search_rs>, L</find>,
+L</count>.
+
+These are in no particular order:
=head2 order_by
=over 4
-Indicates additional column names for those added via L</+select>.
+Indicates additional column names for those added via L</+select>. See L</as>.
=back
return $new;
}
+=head2 as_query
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: \[ $sql, @bind ]
+
+=back
+
+Returns the SQL query and bind vars associated with the invocant.
+
+This is generally used as the RHS for a subquery.
+
+=cut
+
+sub as_query { return shift->_resultset->as_query }
+
=head2 next
=over 4
return shift->func('MIN');
}
+=head2 min_rs
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: $resultset
+
+=back
+
+ my $rs = $year_col->min_rs();
+
+Wrapper for ->func_rs for function MIN().
+
+=cut
+
+sub min_rs { return shift->func_rs('MIN') }
+
=head2 max
=over 4
return shift->func('MAX');
}
+=head2 max_rs
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: $resultset
+
+=back
+
+ my $rs = $year_col->max_rs();
+
+Wrapper for ->func_rs for function MAX().
+
+=cut
+
+sub max_rs { return shift->func_rs('MAX') }
+
=head2 sum
=over 4
return shift->func('SUM');
}
+=head2 sum_rs
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: $resultset
+
+=back
+
+ my $rs = $year_col->sum_rs();
+
+Wrapper for ->func_rs for function SUM().
+
+=cut
+
+sub sum_rs { return shift->func_rs('SUM') }
+
=head2 func
=over 4
sub func {
my ($self,$function) = @_;
- my $cursor = $self->{_parent_resultset}->search(undef, {select => {$function => $self->{_select}}, as => [$self->{_as}]})->cursor;
+ my $cursor = $self->func_rs($function)->cursor;
if( wantarray ) {
return map { $_->[ 0 ] } $cursor->all;
return ( $cursor->next )[ 0 ];
}
+=head2 func_rs
+
+=over 4
+
+=item Arguments: $function
+
+=item Return Value: $resultset
+
+=back
+
+Creates the resultset that C<func()> uses to run its query.
+
+=cut
+
+sub func_rs {
+ my ($self,$function) = @_;
+ return $self->{_parent_resultset}->search(
+ undef, {
+ select => {$function => $self->{_select}},
+ as => [$self->{_as}],
+ },
+ );
+}
+
=head2 throw_exception
See L<DBIx::Class::Schema/throw_exception> for details.
);
}
-
1;
=head1 AUTHORS
method will add more columns, not replace them.
The column names given will be created as accessor methods on your
-L<DBIx::Class::Row> objects, you can change the name of the accessor
+L<DBIx::Class::Row> objects. You can change the name of the accessor
by supplying an L</accessor> in the column_info hash.
The contents of the column_info are not set in stone. The following
=item auto_nextval
Set this to a true value for a column whose value is retrieved
-automatically from an oracle sequence. If you do not use an oracle
+automatically from an oracle sequence. If you do not use an Oracle
trigger to get the nextval, you have to set sequence as well.
=item extra
return @{ $unique_constraints{$constraint_name} };
}
+=head2 sqlt_deploy_callback
+
+=over
+
+=item Arguments: $callback
+
+=back
+
+ __PACKAGE__->sqlt_deploy_callback('mycallbackmethod');
+
+An accessor to set a callback to be called during deployment of
+the schema via L<DBIx::Class::Schema/create_ddl_dir> or
+L<DBIx::Class::Schema/deploy>.
+
+The callback can be set as either a code reference or the name of a
+method in the current result class.
+
+If not set, the L</default_sqlt_deploy_hook> is called.
+
+Your callback will be passed the $source object representing the
+ResultSource instance being deployed, and the
+L<SQL::Translator::Schema::Table> object being created from it. The
+callback can be used to manipulate the table object or add your own
+customised indexes. If you need to manipulate a non-table object, use
+the L<DBIx::Class::Schema/sqlt_deploy_hook>.
+
+See L<DBIx::Class::Manual::Cookbook/Adding Indexes And Functions To
+Your SQL> for examples.
+
+This sqlt deployment callback can only be used to manipulate
+SQL::Translator objects as they get turned into SQL. To execute
+post-deploy statements which SQL::Translator does not currently
+handle, override L<DBIx::Class::Schema/deploy> in your Schema class
+and call L<dbh_do|DBIx::Class::Storage::DBI/dbh_do>.
+
+=head2 default_sqlt_deploy_hook
+
+=over
+
+=item Arguments: $source, $sqlt_table
+
+=item Return value: undefined
+
+=back
+
+This is the sensible default for L</sqlt_deploy_callback>.
+
+If a method named C<sqlt_deploy_hook> exists in your Result class, it
+will be called and passed the current C<$source> and the
+C<$sqlt_table> being deployed.
+
+=cut
+
+sub default_sqlt_deploy_hook {
+ my $self = shift;
+
+ my $class = $self->result_class;
+
+ if ($class and $class->can('sqlt_deploy_hook')) {
+ $class->sqlt_deploy_hook(@_);
+ }
+}
+
+sub _invoke_sqlt_deploy_hook {
+ my $self = shift;
+ if ( my $hook = $self->sqlt_deploy_callback) {
+ $self->$hook(@_);
+ }
+}
+
=head2 resultset
=over 4
$source->resultset_class('My::ResultSet::Class');
-Set the class of the resultset, this is useful if you want to create your
+Set the class of the resultset. This is useful if you want to create your
own resultset methods. Create your own class derived from
L<DBIx::Class::ResultSet>, and set it here. If called with no arguments,
this method returns the name of the existing resultset class, if one
Creates a new ResultSource object. Not normally called directly by end users.
-=cut
-
=head2 column_info_from_storage
=over
=back
+ __PACKAGE__->column_info_from_storage(1);
+
Enables the on-demand automatic loading of the above column
metadata from storage as neccesary. This is *deprecated*, and
should not be used. It will be removed before 1.0.
- __PACKAGE__->column_info_from_storage(1);
-
-=head2 sqlt_deploy_callback
-
-An attribute which contains the callback to trigger on L</sqlt_deploy_hook>.
-Defaults to L</default_sqlt_deploy_hook>. Can be a code reference or the name
-of a method in the current result class. You would change the default value
-in case you want to share a hook between several result sources, or if you
-want to use a result source without a declared result class.
-
-=head2 default_sqlt_deploy_hook
-
-=over
-
-=item Arguments: $source, $sqlt_table
-
-=item Return value: undefined
-
-=back
-
-Proxies its arguments to a C<sqlt_deploy_hook> method on the C<result_class>
-if such a method exists. This is useful to make L<SQL::Translator> create
-non-unique indexes, or set table options such as C<Engine=INNODB>. For
-examples of what you can do with this, see
-L<DBIx::Class::Manual::Cookbook/Adding Indexes And Functions To Your SQL>.
-
-=cut
-
-sub default_sqlt_deploy_hook {
- my $self = shift;
-
- my $class = $self->result_class;
-
- if ($class and $class->can('sqlt_deploy_hook')) {
- $class->sqlt_deploy_hook(@_);
- }
-}
-
-
-=head2 sqlt_deploy_hook
-
-=over 4
-
-=item Arguments: $source, $sqlt_table
-
-=item Return value: undefined
-
-=back
-
-This is the entry point invoked by L<SQL::Translator::Parser::DBIx::Class>
-during the execution of L<DBIx::Class::Storage::DBI/deployment_statements>.
-Delegates to the method name or code reference specified in
-L</sqlt_deploy_callback>.
-
-Note that the code is called by
-L<DBIx::Class::Storage::DBI/deployment_statements>, which in turn is called
-before L<DBIx::Class::Schema/deploy>. Therefore the hook can be used only
-to manipulate the L<SQL::Translator::Table> object before it is turned into
-SQL fed to the database. If you want to execute post-deploy statements which
-currently can can not be generated by L<SQL::Translator>, the suggested
-method is to overload L<DBIx::Class::Storage/deploy> and use
-L<dbh_do|DBIx::Class::Storage::DBI/dbh_do>.
-
-=cut
-
-sub sqlt_deploy_hook {
- my $self = shift;
- if ( my $hook = $self->sqlt_deploy_callback) {
- $self->$hook(@_);
- }
-}
=head1 AUTHORS
}
my $updated_cols = $source->storage->insert($source, { $self->get_columns });
- $self->set_columns($updated_cols);
+ foreach my $col (keys %$updated_cols) {
+ $self->store_column($col, $updated_cols->{$col});
+ }
## PK::Auto
my @auto_pri = grep {
matching is done by assuming the package name of the ResultSet class
is the same as that of the Result class.
-You will be warned if ResulSet classes are discovered for which there
+You will be warned if ResultSet classes are discovered for which there
are no matching Result classes like this:
load_namespaces found ResultSet class $classname with no corresponding Result class
return;
}
- eval 'require SQL::Translator "0.09"';
+ eval 'require SQL::Translator "0.09003"';
if ($@) {
- $self->throw_exception("SQL::Translator 0.09 required");
+ $self->throw_exception("SQL::Translator 0.09003 required");
}
my $db_tr = SQL::Translator->new({
$self;
}
+# DB2 is the only remaining DB using this. Even though we are not sure if
+# RowNumberOver is still needed here (should be part of SQLA) leave the
+# code in place
+sub _RowNumberOver {
+ my ($self, $sql, $order, $rows, $offset ) = @_;
+
+ $offset += 1;
+ my $last = $rows + $offset;
+ my ( $order_by ) = $self->_order_by( $order );
+
+ $sql = <<"SQL";
+SELECT * FROM
+(
+ SELECT Q1.*, ROW_NUMBER() OVER( ) AS ROW_NUM FROM (
+ $sql
+ $order_by
+ ) Q1
+) Q2
+WHERE ROW_NUM BETWEEN $offset AND $last
+
+SQL
+
+ return $sql;
+}
+
+
# While we're at it, this should make LIMIT queries more efficient,
# without digging into things too deeply
use Scalar::Util 'blessed';
sub _find_syntax {
my ($self, $syntax) = @_;
- my $dbhname = blessed($syntax) ? $syntax->{Driver}{Name} : $syntax;
+
+ # DB2 is the only remaining DB using this. Even though we are not sure if
+ # RowNumberOver is still needed here (should be part of SQLA) leave the
+ # code in place
+ my $dbhname = blessed($syntax) ? $syntax->{Driver}{Name} : $syntax;
if(ref($self) && $dbhname && $dbhname eq 'DB2') {
return 'RowNumberOver';
}
-
+
$self->{_cached_syntax} ||= $self->SUPER::_find_syntax($syntax);
}
$self->_do_query($_) foreach @$action;
}
else {
- my @to_run = (ref $action eq 'ARRAY') ? (@$action) : ($action);
- $self->_query_start(@to_run);
- $self->_dbh->do(@to_run);
- $self->_query_end(@to_run);
+ # Most debuggers expect ($sql, @bind), so we need to exclude
+ # the attribute hash which is the second argument to $dbh->do
+ # furthermore the bind values are usually to be presented
+ # as named arrayref pairs, so wrap those here too
+ my @do_args = (ref $action eq 'ARRAY') ? (@$action) : ($action);
+ my $sql = shift @do_args;
+ my $attrs = shift @do_args;
+ my @bind = map { [ undef, $_ ] } @do_args;
+
+ $self->_query_start($sql, @bind);
+ $self->_dbh->do($sql, $attrs, @do_args);
+ $self->_query_end($sql, @bind);
}
return $self;
sub _prep_for_execute {
my ($self, $op, $extra_bind, $ident, $args) = @_;
+ if( blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) {
+ $ident = $ident->from();
+ }
+
my ($sql, @bind) = $self->sql_maker->$op($ident, @$args);
+
unshift(@bind,
map { ref $_ eq 'ARRAY' ? $_ : [ '!!dummy', $_ ] } @$extra_bind)
if $extra_bind;
-
return ($sql, \@bind);
}
sub _dbh_execute {
my ($self, $dbh, $op, $extra_bind, $ident, $bind_attributes, @args) = @_;
- if( blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) {
- $ident = $ident->from();
- }
-
my ($sql, $bind) = $self->_prep_for_execute($op, $extra_bind, $ident, \@args);
$self->_query_start( $sql, @$bind );
}
sub _select {
+ my $self = shift;
+ my $sql_maker = $self->sql_maker;
+ local $sql_maker->{for};
+ return $self->_execute($self->_select_args(@_));
+}
+
+sub _select_args {
my ($self, $ident, $select, $condition, $attrs) = @_;
my $order = $attrs->{order_by};
my $for = delete $attrs->{for};
my $sql_maker = $self->sql_maker;
- local $sql_maker->{for} = $for;
+ $sql_maker->{for} = $for;
if (exists $attrs->{group_by} || $attrs->{having}) {
$order = {
push @args, $attrs->{rows}, $attrs->{offset};
}
- return $self->_execute(@args);
+ return @args;
}
sub source_bind_attributes {
%{$sqltargs || {}}
};
- $self->throw_exception(q{Can't create a ddl file without SQL::Translator 0.09: '}
+ $self->throw_exception(q{Can't create a ddl file without SQL::Translator 0.09003: '}
. $self->_check_sqlt_message . q{'})
if !$self->_check_sqlt_version;
return join('', @rows);
}
- $self->throw_exception(q{Can't deploy without SQL::Translator 0.09: '}
+ $self->throw_exception(q{Can't deploy without SQL::Translator 0.09003: '}
. $self->_check_sqlt_message . q{'})
if !$self->_check_sqlt_version;
sub deploy {
my ($self, $schema, $type, $sqltargs, $dir) = @_;
- foreach my $statement ( $self->deployment_statements($schema, $type, undef, $dir, { no_comments => 1, %{ $sqltargs || {} } } ) ) {
- foreach my $line ( split(";\n", $statement)) {
- next if($line =~ /^--/);
- next if(!$line);
-# next if($line =~ /^DROP/m);
- next if($line =~ /^BEGIN TRANSACTION/m);
- next if($line =~ /^COMMIT/m);
- next if $line =~ /^\s+$/; # skip whitespace only
- $self->_query_start($line);
- eval {
- $self->dbh->do($line); # shouldn't be using ->dbh ?
- };
- if ($@) {
- warn qq{$@ (running "${line}")};
- }
- $self->_query_end($line);
+ my $deploy = sub {
+ my $line = shift;
+ return if($line =~ /^--/);
+ return if(!$line);
+ # next if($line =~ /^DROP/m);
+ return if($line =~ /^BEGIN TRANSACTION/m);
+ return if($line =~ /^COMMIT/m);
+ return if $line =~ /^\s+$/; # skip whitespace only
+ $self->_query_start($line);
+ eval {
+ $self->dbh->do($line); # shouldn't be using ->dbh ?
+ };
+ if ($@) {
+ warn qq{$@ (running "${line}")};
+ }
+ $self->_query_end($line);
+ };
+ my @statements = $self->deployment_statements($schema, $type, undef, $dir, { no_comments => 1, %{ $sqltargs || {} } } );
+ if (@statements > 1) {
+ foreach my $statement (@statements) {
+ $deploy->( $statement );
+ }
+ }
+ elsif (@statements == 1) {
+ foreach my $line ( split(";\n", $statements[0])) {
+ $deploy->( $line );
}
}
}
my $_check_sqlt_message; # private
sub _check_sqlt_version {
return $_check_sqlt_version if defined $_check_sqlt_version;
- eval 'use SQL::Translator "0.09"';
+ eval 'use SQL::Translator "0.09003"';
$_check_sqlt_message = $@ || '';
$_check_sqlt_version = !$@;
}
return bless ($new, $class);
}
+=head2 as_query
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: \[ $sql, @bind ]
+
+=back
+
+Returns the SQL statement and bind vars associated with the invocant.
+
+=cut
+
+sub as_query {
+ my $self = shift;
+
+ my $storage = $self->{storage};
+ my $sql_maker = $storage->sql_maker;
+ local $sql_maker->{for};
+
+ my @args = $storage->_select_args(@{$self->{args}});
+ my ($sql, $bind) = $storage->_prep_for_execute(@args[0 .. 2], [@args[4 .. $#args]]);
+ return \[ "($sql)", @$bind ];
+}
+
=head2 next
=over 4
# trigger_body is a LONG
$dbh->{LongReadLen} = 64 * 1024 if ($dbh->{LongReadLen} < 64 * 1024);
- my $sth = $dbh->prepare($sql);
- $sth->execute( uc($source->name) );
+ my $sth;
+
+ # check for fully-qualified name (eg. SCHEMA.TABLENAME)
+ if ( my ( $schema, $table ) = $source->name =~ /(\w+)\.(\w+)/ ) {
+ $sql = q{
+ SELECT trigger_body FROM ALL_TRIGGERS t
+ WHERE t.owner = ? AND t.table_name = ?
+ AND t.triggering_event = 'INSERT'
+ AND t.status = 'ENABLED'
+ };
+ $sth = $dbh->prepare($sql);
+ $sth->execute( uc($schema), uc($table) );
+ }
+ else {
+ $sth = $dbh->prepare($sql);
+ $sth->execute( uc( $source->name ) );
+ }
while (my ($insert_trigger) = $sth->fetchrow_array) {
return uc($1) if $insert_trigger =~ m!(\w+)\.nextval!i; # col name goes here???
}
return $id;
}
+=head2 connected
+
+Returns true if we have an open (and working) database connection, false if it is not (yet)
+open (or does not work). (Executes a simple SELECT to make sure it works.)
+
+The reason this is needed is that L<DBD::Oracle>'s ping() does not do a real
+OCIPing but just gets the server version, which doesn't help if someone killed
+your session.
+
+=cut
+
+sub connected {
+ my $self = shift;
+
+ if (not $self->SUPER::connected(@_)) {
+ return 0;
+ }
+ else {
+ my $dbh = $self->_dbh;
+
+ local $dbh->{RaiseError} = 1;
+
+ eval {
+ my $ping_sth = $dbh->prepare_cached("select 1 from dual");
+ $ping_sth->execute;
+ $ping_sth->finish;
+ };
+
+ return $@ ? 0 : 1;
+ }
+}
+
+sub _dbh_execute {
+ my $self = shift;
+ my ($dbh, $op, $extra_bind, $ident, $bind_attributes, @args) = @_;
+
+ my $wantarray = wantarray;
+
+ my (@res, $exception, $retried);
+
+ RETRY: {
+ do {
+ eval {
+ if ($wantarray) {
+ @res = $self->SUPER::_dbh_execute(@_);
+ } else {
+ $res[0] = $self->SUPER::_dbh_execute(@_);
+ }
+ };
+ $exception = $@;
+ if ($exception =~ /ORA-01003/) {
+ # ORA-01003: no statement parsed (someone changed the table somehow,
+ # invalidating your cursor.)
+ my ($sql, $bind) = $self->_prep_for_execute($op, $extra_bind, $ident, \@args);
+ delete $dbh->{CachedKids}{$sql};
+ } else {
+ last RETRY;
+ }
+ } while (not $retried++);
+ }
+
+ $self->throw_exception($exception) if $exception;
+
+ wantarray ? @res : $res[0]
+}
+
=head2 get_autoinc_seq
Returns the sequence name for an autoincrement column
sub _safely_ensure_connected {
my ($self, $replicant, @args) = @_;
- my $return; eval {
- $return = $replicant->ensure_connected(@args);
- }; if ($@) {
+ eval {
+ $replicant->ensure_connected(@args);
+ };
+ if ($@) {
$replicant
- ->debugobj
- ->print(
- sprintf( "Exception trying to ->ensure_connected for replicant %s, error is %s",
- $self->_dbi_connect_info->[0], $@)
+ ->debugobj
+ ->print(
+ sprintf( "Exception trying to ->ensure_connected for replicant %s, error is %s",
+ $replicant->_dbi_connect_info->[0], $@)
);
+ return;
}
- return $return;
+ return 1;
}
=head2 connected_replicants
use strict;
use warnings;
-use vars qw($DEBUG @EXPORT_OK);
+use vars qw($DEBUG $VERSION @EXPORT_OK);
+$VERSION = '1.10';
$DEBUG = 0 unless defined $DEBUG;
use Exporter;
}
}
- $source->sqlt_deploy_hook($table)
+ $source->_invoke_sqlt_deploy_hook($table);
}
if ($dbicschema->can('sqlt_deploy_hook')) {
=head1 SYNOPSIS
+ ## Via DBIx::Class
+ use MyApp::Schema;
+ my $schema = MyApp::Schema->connect("dbi:SQLite:something.db");
+ $schema->create_ddl_dir();
+ ## or
+ $schema->deploy();
+
+ ## Standalone
use MyApp::Schema;
use SQL::Translator;
=head1 DESCRIPTION
+This class requires L<SQL::Translator> installed to work.
+
C<SQL::Translator::Parser::DBIx::Class> reads a DBIx::Class schema,
interrogates the columns, and stuffs it all in an $sqlt_schema object.
+It's primary use is in deploying database layouts described as a set
+of L<DBIx::Class> classes, to a database. To do this, see the
+L<DBIx::Class::Schema/deploy> method.
+
+This can also be achieved by having DBIx::Class export the schema as a
+set of SQL files ready for import into your database, or passed to
+other machines that need to have your application installed but don't
+have SQL::Translator installed. To do this see the
+L<DBIx::Class::Schema/create_ddl_dir> method.
+
=head1 SEE ALSO
-SQL::Translator.
+L<SQL::Translator>, L<DBIx::Class::Schema>
=head1 AUTHORS
plan ( ($] >= 5.009000 and $] < 5.010001)
? (skip_all => 'warnings::register broken under 5.10: http://rt.perl.org/rt3/Public/Bug/Display.html?id=62522')
- : (tests => 2)
+ : (tests => 4)
);
{
my @w;
local $SIG{__WARN__} = sub { push @w, @_ };
-
my $code = gen_code ( suffix => 1 );
eval "$code";
+ ok (! $@, 'Eval code without warnings suppression')
+ || diag $@;
ok ( (grep { $_ =~ /The many-to-many relationship bars is trying to create/ } @w), "Warning triggered without relevant 'no warnings'");
}
my $code = gen_code ( suffix => 2, no_warn => 1 );
eval "$code";
+ ok (! $@, 'Eval code with warnings suppression')
+ || diag $@;
ok ( (not grep { $_ =~ /The many-to-many relationship bars is trying to create/ } @w), "No warning triggered with relevant 'no warnings'");
}
package #
DBICTest::Schema::Foo${suffix};
use base 'DBIx::Class::Core';
+
__PACKAGE__->table('foo');
__PACKAGE__->add_columns(
'fooid' => {
DBICTest::Schema::Bar${suffix};
use base 'DBIx::Class::Core';
+
__PACKAGE__->table('bar');
__PACKAGE__->add_columns(
'barid' => {
my $schema = DBICTest->init_schema();
-plan tests => 86;
+plan tests => 88;
eval { require DateTime::Format::MySQL };
my $NO_DTFM = $@ ? 1 : 0;
my $table = $class->table($class->table);
is($table, $class->table, '->table($table) returns $table');
}
+
+#make sure insert doesn't use set_column
+{
+ my $en_row = $schema->resultset('Encoded')->new_result({encoded => 'wilma'});
+ is($en_row->encoded, 'amliw', 'new encodes');
+ $en_row->insert;
+ is($en_row->encoded, 'amliw', 'insert does not encode again');
+}
my $schema = DBICTest->init_schema();
-plan tests => 68;
+plan tests => 75;
# has_a test
my $cd = $schema->resultset("CD")->find(4);
$undef_artist_cd->related_resultset('artist')->new({name => 'foo'});
};
is( $@, '', "Object created on a resultset related to not yet inserted object");
-
+lives_ok{
+ $schema->resultset('Artwork')->new_result({})->cd;
+} 'undef_on_null_fk does not choke on empty conds';
+
my $def_artist_cd = $schema->resultset("CD")->new_result({ 'title' => 'badgers', 'year' => 2007, artist => undef });
is($def_artist_cd->has_column_loaded('artist'), 1, 'FK loaded');
is($def_artist_cd->search_related('artist')->count, 0, 'closed search on null FK');
my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_PG_${_}" } qw/DSN USER PASS/};
-#warn "$dsn $user $pass";
-
plan skip_all => 'Set $ENV{DBICTEST_PG_DSN}, _USER and _PASS to run this test '.
'(note: This test drops and creates tables called \'artist\', \'casecheck\', \'array_test\' and \'sequence_test\''.
' as well as following sequences: \'pkid1_seq\', \'pkid2_seq\' and \'nonpkid_seq\''.
' as well as following schemas: \'testschema\'!)'
- unless ($dsn && $user && $pass);
+ unless ($dsn && $user);
plan tests => 37;
my $count;
lives_ok {
$count = $schema->resultset('ArrayTest')->search({
- arrayfield => \[ '= ?' => [3, 4] ], #TODO anything less ugly than this?
+ arrayfield => \[ '= ?' => [arrayfield => [3, 4]] ], #TODO anything less ugly than this?
})->count;
} 'comparing arrayref to pg array data does not blow up';
is($count, 1, 'comparing arrayref to pg array data gives correct result');
+{
+ package # hide from PAUSE
+ DBICTest::Schema::ArtistFQN;
+
+ use base 'DBIx::Class::Core';
+
+ __PACKAGE__->table(
+ defined $ENV{DBICTEST_ORA_USER}
+ ? $ENV{DBICTEST_ORA_USER} . '.artist'
+ : 'artist'
+ );
+ __PACKAGE__->add_columns(
+ 'artistid' => {
+ data_type => 'integer',
+ is_auto_increment => 1,
+ },
+ 'name' => {
+ data_type => 'varchar',
+ size => 100,
+ is_nullable => 1,
+ },
+ );
+ __PACKAGE__->set_primary_key('artistid');
+
+ 1;
+}
+
use strict;
use warnings;
' as well as following sequences: \'pkid1_seq\', \'pkid2_seq\' and \'nonpkid_seq\''
unless ($dsn && $user && $pass);
-plan tests => 23;
+plan tests => 24;
+DBICTest::Schema->load_classes('ArtistFQN');
my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
my $dbh = $schema->storage->dbh;
my $new = $schema->resultset('Artist')->create({ name => 'foo' });
is($new->artistid, 1, "Oracle Auto-PK worked");
+# test again with fully-qualified table name
+$new = $schema->resultset('ArtistFQN')->create( { name => 'bar' } );
+is( $new->artistid, 2, "Oracle Auto-PK worked with fully-qualified tablename" );
+
# test join with row count ambiguity
my $cd = $schema->resultset('CD')->create({ cdid => 1, artist => 1, title => 'EP C', year => '2003' });
my $track = $schema->resultset('Track')->create({ trackid => 1, cd => 1, position => 1, title => 'Track1' });
}
my $it = $schema->resultset('Artist')->search( {},
{ rows => 3,
- offset => 2,
+ offset => 3,
order_by => 'artistid' }
);
is( $it->count, 3, "LIMIT count ok" );
# clean up our mess
END {
- if($dbh) {
+ if($schema && ($dbh = $schema->storage->dbh)) {
$dbh->do("DROP SEQUENCE artist_seq");
$dbh->do("DROP SEQUENCE pkid1_seq");
$dbh->do("DROP SEQUENCE pkid2_seq");
eval { $dbh->do("DROP TABLE artist") };
-$dbh->do("CREATE TABLE artist (artistid INTEGER GENERATED BY DEFAULT AS IDENTITY (START WITH 1, INCREMENT BY 1), name VARCHAR(255), charfield CHAR(10));");
+$dbh->do("CREATE TABLE artist (artistid INTEGER GENERATED BY DEFAULT AS IDENTITY (START WITH 1, INCREMENT BY 1), name VARCHAR(255), charfield CHAR(10), rank INTEGER DEFAULT 13);");
# This is in core, just testing that it still loads ok
$schema->class('Artist')->load_components('PK::Auto');
'is_nullable' => 1,
'size' => 10
},
+ 'rank' => {
+ 'data_type' => 'INTEGER',
+ 'is_nullable' => 1,
+ 'size' => 10
+ },
};
eval { $dbh->do("DROP TABLE artist") };
-$dbh->do("CREATE TABLE artist (artistid INTEGER GENERATED BY DEFAULT AS IDENTITY (START WITH 1, INCREMENT BY 1), name VARCHAR(255), charfield CHAR(10))");
+$dbh->do(<<'');
+CREATE TABLE artist (
+ artistid INTEGER GENERATED BY DEFAULT AS IDENTITY (START WITH 1, INCREMENT BY 1),
+ name VARCHAR(255),
+ rank INTEGER default 13 not null,
+ charfield CHAR(10)
+)
# Just to test loading, already in Core
$schema->class('Artist')->load_components('PK::Auto');
'is_nullable' => 1,
'size' => 255
},
+ 'rank' => {
+ 'data_type' => 'INTEGER',
+ 'is_nullable' => 0,
+ 'size' => 10,
+ },
'charfield' => {
'data_type' => 'CHAR',
'is_nullable' => 1,
$schema->storage->ensure_connected;
isa_ok( $schema->storage, 'DBIx::Class::Storage::DBI::ODBC::Microsoft_SQL_Server' );
-my $dbh = $schema->storage->_dbh;
+$schema->storage->dbh_do (sub {
+ my ($storage, $dbh) = @_;
+ eval { $dbh->do("DROP TABLE artist") };
+ $dbh->do(<<'SQL');
-eval { $dbh->do("DROP TABLE artist") };
-
- $dbh->do(<<'');
CREATE TABLE artist (
artistid INT IDENTITY NOT NULL,
name VARCHAR(100),
primary key(artistid)
)
+SQL
+
+});
+
my %seen_id;
# fresh $schema so we start unconnected
# clean up our mess
END {
- $dbh = eval { $schema->storage->_dbh };
+ my $dbh = eval { $schema->storage->_dbh };
$dbh->do('DROP TABLE artist') if $dbh;
}
eval "use DBD::SQLite";
plan $@
? ( skip_all => 'needs DBD::SQLite for testing' )
- : ( tests => 16 );
+ : ( tests => 18 );
}
# figure out if we've got a version of sqlite that is older than 3.2.6, in
is($rs->first->name, 'We Are Goth', 'Correct record returned');
+# test for warnings on delete of joined resultset
+$rs = $schema->resultset("CD")->search(
+ { 'artist.name' => 'Caterwauler McCrae' },
+ { join => [qw/artist/]}
+);
+my $tst_delete_warning;
+eval {
+ local $SIG{__WARN__} = sub { $tst_delete_warning = shift };
+ $rs->delete();
+};
+
+ok( ($@ || $tst_delete_warning), 'fail/warning on attempt to delete a join-ed resultset');
+
+# test for warnings on update of joined resultset
+$rs = $schema->resultset("CD")->search(
+ { 'artist.name' => 'Random Boy Band' },
+ { join => [qw/artist/]}
+);
+my $tst_update_warning;
+eval {
+ local $SIG{__WARN__} = sub { $tst_update_warning = shift };
+ $rs->update({ 'artist' => 1 });
+};
+
+ok( ($@ || $tst_update_warning), 'fail/warning on attempt to update a join-ed resultset');
use warnings;
use Test::More;
+use Test::Exception;
use lib qw(t/lib);
use DBICTest;
use Data::Dumper;
eval "use DBD::SQLite";
plan $@
? ( skip_all => 'needs DBD::SQLite for testing' )
- : ( tests => 58 );
+ : ( tests => 68 );
}
# figure out if we've got a version of sqlite that is older than 3.2.6, in
is(Dumper($attr), $attr_str, 'Attribute hash untouched after search()');
cmp_ok($rs + 0, '==', 3, 'Correct number of records returned');
+# A search() with prefetch seems to pollute an already joined resultset
+# in a way that offsets future joins (adapted from a test case by Debolaz)
+{
+ my ($cd_rs, $attrs);
+
+ # test a real-life case - rs is obtained by an implicit m2m join
+ $cd_rs = $schema->resultset ('Producer')->first->cds;
+ $attrs = Dumper $cd_rs->{attrs};
+
+ $cd_rs->search ({})->all;
+ is (Dumper ($cd_rs->{attrs}), $attrs, 'Resultset attributes preserved after a simple search');
+
+ lives_ok (sub {
+ $cd_rs->search ({'artist.artistid' => 1}, { prefetch => 'artist' })->all;
+ is (Dumper ($cd_rs->{attrs}), $attrs, 'Resultset attributes preserved after search with prefetch');
+ }, 'first prefetching search ok');
+
+ lives_ok (sub {
+ $cd_rs->search ({'artist.artistid' => 1}, { prefetch => 'artist' })->all;
+ is (Dumper ($cd_rs->{attrs}), $attrs, 'Resultset attributes preserved after another search with prefetch')
+ }, 'second prefetching search ok');
+
+
+ # test a regular rs with an empty seen_join injected - it should still work!
+ $cd_rs = $schema->resultset ('CD');
+ $cd_rs->{attrs}{seen_join} = {};
+ $attrs = Dumper $cd_rs->{attrs};
+
+ $cd_rs->search ({})->all;
+ is (Dumper ($cd_rs->{attrs}), $attrs, 'Resultset attributes preserved after a simple search');
+
+ lives_ok (sub {
+ $cd_rs->search ({'artist.artistid' => 1}, { prefetch => 'artist' })->all;
+ is (Dumper ($cd_rs->{attrs}), $attrs, 'Resultset attributes preserved after search with prefetch');
+ }, 'first prefetching search ok');
+
+ lives_ok (sub {
+ $cd_rs->search ({'artist.artistid' => 1}, { prefetch => 'artist' })->all;
+ is (Dumper ($cd_rs->{attrs}), $attrs, 'Resultset attributes preserved after another search with prefetch')
+ }, 'second prefetching search ok');
+}
+
+
my $queries = 0;
$schema->storage->debugcb(sub { $queries++; });
$schema->storage->debug(1);
});
$guard->commit;
- } qr/No such column made_up_column .*? at .*?81transactions.t line \d+/, "Error propogated okay";
+ } qr/No such column made_up_column .*? at .*?81transactions.t line \d+/s, "Error propogated okay";
ok(!$artist_rs->find({name => 'Death Cab for Cutie'}), "Artist not created");
my $schema = DBICTest->init_schema;
-plan tests => 132;
+plan tests => 133;
my $translator = SQL::Translator->new(
parser_args => {
$schema->source('Track')->sqlt_deploy_callback(sub {
my ($self, $sqlt_table) = @_;
- if ($sqlt_table->schema->translator->producer_type =~ /SQLite$/ ) {
+ if ($schema->storage->sqlt_type eq 'SQLite' ) {
$sqlt_table->add_index( name => 'track_title', fields => ['title'] )
or die $sqlt_table->error;
}
# the 'dummy' table
ok( !defined($tschema->get_table('dummy')), "Dummy table was removed by hook");
+# Test that the Artist resultsource sqlt_deploy_hook was called okay and added
+# an index
+SKIP: {
+ skip ('Artist sqlt_deploy_hook is only called with an SQLite backend', 1)
+ if $schema->storage->sqlt_type ne 'SQLite';
+
+ ok( ( grep
+ { $_->name eq 'artist_name_hookidx' }
+ $tschema->get_table('artist')->get_indices
+ ), 'sqlt_deploy_hook fired within a resultsource');
+}
+
# Test that nonexistent constraints are not found
my $constraint = get_constraint('FOREIGN KEY', 'cd', ['title'], 'cd', ['year']);
ok( !defined($constraint), 'nonexistent FOREIGN KEY constraint not found' );
use lib qw(t/lib);
use DBICTest;
-my $schema = DBICTest->init_schema( sqlite_use_file => 1 );
eval 'require JSON::Any';
plan skip_all => 'Install JSON::Any to run this test' if ($@);
plan skip_all => 'Install Text::CSV_XS or Text::CSV_PP to run this test' if ($@);
}
-plan tests => 5;
+my @json_backends = qw/XS JSON DWIW Syck/;
+my $tests_per_run = 5;
-# the script supports double quotes round the arguments and single-quote within
-# to make sure it runs on windows as well, but only if JSON::Any picks the right module
+plan tests => $tests_per_run * @json_backends;
+use JSON::Any;
+for my $js (@json_backends) {
+ eval {JSON::Any->import ($js) };
+ SKIP: {
+ skip ("Json backend $js is not available, skip testing", $tests_per_run) if $@;
-my $employees = $schema->resultset('Employee');
-my @cmd = ($^X, qw|script/dbicadmin --quiet --schema=DBICTest::Schema --class=Employee --tlibs|, q|--connect=["dbi:SQLite:dbname=t/var/DBIxClass.db","","",{"AutoCommit":1}]|, qw|--force --tlibs|);
+ $ENV{JSON_ANY_ORDER} = $js;
+ eval { test_dbicadmin () };
+ diag $@ if $@;
+ }
+}
+
+sub test_dbicadmin {
+ my $schema = DBICTest->init_schema( sqlite_use_file => 1 ); # reinit a fresh db for every run
+
+ my $employees = $schema->resultset('Employee');
-system(@cmd, qw|--op=insert --set={"name":"Matt"}|);
-ok( ($employees->count()==1), 'insert count' );
+ system( _prepare_system_args( qw|--op=insert --set={"name":"Matt"}| ) );
+ ok( ($employees->count()==1), "$ENV{JSON_ANY_ORDER}: insert count" );
-my $employee = $employees->find(1);
-ok( ($employee->name() eq 'Matt'), 'insert valid' );
+ my $employee = $employees->find(1);
+ ok( ($employee->name() eq 'Matt'), "$ENV{JSON_ANY_ORDER}: insert valid" );
-system(@cmd, qw|--op=update --set={"name":"Trout"}|);
-$employee = $employees->find(1);
-ok( ($employee->name() eq 'Trout'), 'update' );
+ system( _prepare_system_args( qw|--op=update --set={"name":"Trout"}| ) );
+ $employee = $employees->find(1);
+ ok( ($employee->name() eq 'Trout'), "$ENV{JSON_ANY_ORDER}: update" );
-system(@cmd, qw|--op=insert --set={"name":"Aran"}|);
+ system( _prepare_system_args( qw|--op=insert --set={"name":"Aran"}| ) );
-open(my $fh, "-|", @cmd, qw|--op=select --attrs={"order_by":"name"}|) or die $!;
-my $data = do { local $/; <$fh> };
-close($fh);
-ok( ($data=~/Aran.*Trout/s), 'select with attrs' );
+ SKIP: {
+ skip ("MSWin32 doesn't support -| either", 1) if $^O eq 'MSWin32';
-system(@cmd, qw|--op=delete --where={"name":"Trout"}|);
-ok( ($employees->count()==1), 'delete' );
+ open(my $fh, "-|", _prepare_system_args( qw|--op=select --attrs={"order_by":"name"}| ) ) or die $!;
+ my $data = do { local $/; <$fh> };
+ close($fh);
+ ok( ($data=~/Aran.*Trout/s), "$ENV{JSON_ANY_ORDER}: select with attrs" );
+ }
+ system( _prepare_system_args( qw|--op=delete --where={"name":"Trout"}| ) );
+ ok( ($employees->count()==1), "$ENV{JSON_ANY_ORDER}: delete" );
+}
+
+# Why do we need this crap? Apparently MSWin32 can not pass through quotes properly
+# (sometimes it will and sometimes not, depending on what compiler was used to build
+# perl). So we go the extra mile to escape all the quotes. We can't also use ' instead
+# of ", because JSON::XS (proudly) does not support "malformed JSON" as the author
+# calls it. Bleh.
+#
+sub _prepare_system_args {
+ my $perl = $^X;
+ my @args = (
+ qw|script/dbicadmin --quiet --schema=DBICTest::Schema --class=Employee --tlibs|,
+ q|--connect=["dbi:SQLite:dbname=t/var/DBIxClass.db","","",{"AutoCommit":1}]|,
+ qw|--force --tlibs|,
+ @_,
+ );
+
+ if ( $^O eq 'MSWin32' ) {
+ $perl = qq|"$perl"|; # execution will fail if $^X contains paths
+ for (@args) {
+ $_ =~ s/"/\\"/g;
+ }
+ }
+
+ return ($perl, @args);
+}
eval { require DateTime::Format::MySQL };
plan skip_all => "Need DateTime::Format::MySQL for inflation tests" if $@;
-plan tests => 28;
+plan tests => 32;
# inflation test
my $event = $schema->resultset("Event")->find(1);
hour => 13, minute => 34, second => 56, time_zone => "America/New_York" ),
});
+is ($event_tz->starts_at->day_name, "Montag", 'Locale de_DE loaded: day_name');
+is ($event_tz->starts_at->month_name, "Dezember", 'Locale de_DE loaded: month_name');
+is ($event_tz->created_on->day_name, "Tuesday", 'Default locale loaded: day_name');
+is ($event_tz->created_on->month_name, "January", 'Default locale loaded: month_name');
+
my $starts_at = $event_tz->starts_at;
is("$starts_at", '2007-12-31T00:00:00', 'Correct date/time using timezone');
my $schema = DBICTest->init_schema();
-plan tests => 20;
+plan tests => 28;
# Test ensure_class_found
ok( $schema->ensure_class_found('DBIx::Class::Schema'),
like( $@, qr/did not return a true value/,
'DBICTest::ErrorComponent threw ok' );
+# Simulate a PAR environment
+{
+ my @code;
+ local @INC = @INC;
+ unshift @INC, sub {
+ if ($_[1] eq 'VIRTUAL/PAR/PACKAGE.pm') {
+ return (sub { return 0 unless @code; $_ = shift @code; 1; } );
+ }
+ else {
+ return ();
+ }
+ };
+
+ $retval = eval { $schema->load_optional_class('FAKE::PAR::PACKAGE') };
+ ok( !$@, 'load_optional_class on a nonexistent PAR class did not throw' );
+ ok( !$retval, 'nonexistent PAR package not loaded' );
+
+
+ # simulate a class which does load but does not return true
+ @code = (
+ q/package VIRTUAL::PAR::PACKAGE;/,
+ q/0;/,
+ );
+
+ $retval = eval { $schema->load_optional_class('VIRTUAL::PAR::PACKAGE') };
+ ok( $@, 'load_optional_class of a no-true-returning PAR module did throw' );
+ ok( !$retval, 'no-true-returning PAR package not loaded' );
+
+ # simulate a normal class (no one adjusted %INC so it will be tried again
+ @code = (
+ q/package VIRTUAL::PAR::PACKAGE;/,
+ q/1;/,
+ );
+
+ $retval = eval { $schema->load_optional_class('VIRTUAL::PAR::PACKAGE') };
+ ok( !$@, 'load_optional_class of a PAR module did not throw' );
+ ok( $retval, 'PAR package "loaded"' );
+
+ # see if we can still load stuff with the coderef present
+ $retval = eval { $schema->load_optional_class('DBIx::Class::ResultClass::HashRefInflator') };
+ ok( !$@, 'load_optional_class did not throw' ) || diag $@;
+ ok( $retval, 'DBIx::Class::ResultClass::HashRefInflator loaded' );
+}
+
# Test ensure_class_loaded
ok( Class::Inspector->loaded('TestPackage::A'), 'anonymous package exists' );
eval { $schema->ensure_class_loaded('TestPackage::A'); };
unless ($dsn);
- eval "use DBD::mysql; use SQL::Translator 0.09;";
+ eval "use DBD::mysql; use SQL::Translator 0.09003;";
plan $@
- ? ( skip_all => 'needs DBD::mysql and SQL::Translator 0.09 for testing' )
+ ? ( skip_all => 'needs DBD::mysql and SQL::Translator 0.09003 for testing' )
: ( tests => 22 );
}
BEGIN {
- eval "use DBD::mysql; use SQL::Translator 0.09;";
+ eval "use DBD::mysql; use SQL::Translator 0.09003;";
if ($@) {
- plan skip_all => 'needs DBD::mysql and SQL::Translator 0.09 for testing';
+ plan skip_all => 'needs DBD::mysql and SQL::Translator 0.09003 for testing';
}
}
my $schema = DBICTest->init_schema();
-plan tests => ($schema->sources * 3);
+my @sources = grep { $_ ne 'Dummy' } ($schema->sources); # Dummy was yanked out by the sqlt hook test
+plan tests => ( @sources * 3);
{
my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { } } });
- foreach my $source ($schema->sources) {
+ foreach my $source (@sources) {
my $table = $sqlt_schema->get_table($schema->source($source)->from);
my $fk_count = scalar(grep { $_->type eq 'FOREIGN KEY' } $table->get_constraints);
{
my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { add_fk_index => 1 } } });
- foreach my $source ($schema->sources) {
+ foreach my $source (@sources) {
my $table = $sqlt_schema->get_table($schema->source($source)->from);
my $fk_count = scalar(grep { $_->type eq 'FOREIGN KEY' } $table->get_constraints);
{
my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { add_fk_index => 0 } } });
- foreach my $source ($schema->sources) {
+ foreach my $source (@sources) {
my $table = $sqlt_schema->get_table($schema->source($source)->from);
my @indices = $table->get_indices;
ok( ( $ratio < 2 ), 'Overload/bless performance acceptable' )
|| diag(
+ "\n",
"This perl has a substantial slow down when handling large numbers\n",
"of blessed/overloaded objects. This can severely adversely affect\n",
"the performance of DBIx::Class programs. Please read the section\n",
"in the Troubleshooting POD documentation entitled\n",
"'Perl Performance Issues on Red Hat Systems'\n",
+ "As this is an extremely serious condition, the only way to skip\n",
+ "over this test is to --force the installation, or to edit the test\n",
+ "file " . __FILE__ . "\n",
);
# We will only check for the difference in bless handling (whether the
ok( !_possibly_has_bad_overload_performance(),
'Checking whether bless applies to reference not object' )
|| diag(
+ "\n",
"This perl is probably derived from a buggy Red Hat perl build\n",
"Please read the section in the Troubleshooting POD documentation\n",
"entitled 'Perl Performance Issues on Red Hat Systems'\n",
+ "As this is an extremely serious condition, the only way to skip\n",
+ "over this test is to --force the installation, or to edit the test\n",
+ "file " . __FILE__ . "\n",
);
}
plan skip_all => 'Set $ENV{DBICTEST_PG_DSN}, _USER and _PASS to run this test'
unless ($dsn && $dbuser);
-plan tests => 3;
+plan tests => 6;
my $schema = DBICTest::Schema->connection($dsn, $dbuser, $dbpass, { AutoCommit => 1 });
{
local $SIG{__WARN__} = sub {};
- $dbh->do('DROP TABLE IF EXISTS artist');
+ $dbh->do('DROP TABLE IF EXISTS bindtype_test');
# the blob/clob are for reference only, will be useful when we switch to SQLT and can test Oracle along the way
$dbh->do(qq[
],{ RaiseError => 1, PrintError => 1 });
}
-# test primary key handling
-my $big_long_string = 'abcd' x 250000;
+my $big_long_string = "\x00\x01\x02 abcd" x 125000;
-my $new = $schema->resultset('BindType')->create({ bytea => $big_long_string });
+my $new;
+# test inserting a row
+{
+ $new = $schema->resultset('BindType')->create({ bytea => $big_long_string });
-ok($new->id, "Created a bytea row");
-is($new->bytea, $big_long_string, "Set the blob correctly.");
+ ok($new->id, "Created a bytea row");
+ is($new->bytea, $big_long_string, "Set the blob correctly.");
+}
-my $rs = $schema->resultset('BindType')->find({ id => $new->id });
+# test retrieval of the bytea column
+{
+ my $row = $schema->resultset('BindType')->find({ id => $new->id });
+ is($row->get_column('bytea'), $big_long_string, "Created the blob correctly.");
+}
-is($rs->get_column('bytea'), $big_long_string, "Created the blob correctly.");
+TODO: {
+ local $TODO =
+ 'Passing bind attributes to $sth->bind_param() should be implemented (it only works in $storage->insert ATM)';
+
+ my $rs = $schema->resultset('BindType')->search({ bytea => $big_long_string });
+
+ # search on the bytea column (select)
+ {
+ my $row = $rs->first;
+ is($row ? $row->id : undef, $new->id, "Found the row searching on the bytea column.");
+ }
+
+ # search on the bytea column (update)
+ {
+ my $new_big_long_string = $big_long_string . "2";
+ $schema->txn_do(sub {
+ $rs->update({ bytea => $new_big_long_string });
+ my $row = $schema->resultset('BindType')->find({ id => $new->id });
+ is($row ? $row->get_column('bytea') : undef, $new_big_long_string,
+ "Updated the row correctly (searching on the bytea column)."
+ );
+ $schema->txn_rollback;
+ });
+ }
+
+ # search on the bytea column (delete)
+ {
+ $schema->txn_do(sub {
+ $rs->delete;
+ my $row = $schema->resultset('BindType')->find({ id => $new->id });
+ is($row, undef, "Deleted the row correctly (searching on the bytea column).");
+ $schema->txn_rollback;
+ });
+ }
+}
$dbh->do("DROP TABLE bindtype_test");
-
-
-
}
INIT {
- use lib 't/testlib';
+ use lib 't/cdbi/testlib';
use Film;
}
plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 6);
}
-use lib 't/testlib';
+use lib 't/cdbi/testlib';
use Film;
INIT { @Film::Threat::ISA = qw/Film/; }
}
INIT {
- use lib 't/testlib';
+ use lib 't/cdbi/testlib';
use Lazy;
}
#local $SIG{__WARN__} = sub { };
INIT {
- use lib 't/testlib';
+ use lib 't/cdbi/testlib';
use Film;
use Director;
}
-#!/usr/bin/perl -w
-
use strict;
use Test::More;
}
-use lib 't/testlib';
+use lib 't/cdbi/testlib';
use Film;
use Actor;
Actor->has_a(Film => 'Film');
plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 13);
}
-use lib 't/testlib';
+use lib 't/cdbi/testlib';
use Film;
sub create_trigger2 { ::ok(1, "Running create trigger 2"); }
plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 50);
}
-use lib 't/testlib';
+use lib 't/cdbi/testlib';
use Actor;
use Film;
Film->has_many(actors => 'Actor');
plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 23);
}
-use lib 't/testlib';
+use lib 't/cdbi/testlib';
use Film;
sub valid_rating {
plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 22);
}
-use lib 't/testlib';
+use lib 't/cdbi/testlib';
use Film;
use Blurb;
INIT {
#local $SIG{__WARN__} =
#sub { like $_[0], qr/clashes with built-in method/, $_[0] };
- use lib 't/testlib';
+ use lib 't/cdbi/testlib';
require Film;
require Actor;
require Director;
plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 5);
}
-use lib 't/testlib';
+use lib 't/cdbi/testlib';
require Film;
require Order;
plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 41);
}
-use lib 't/testlib';
+use lib 't/cdbi/testlib';
use Film;
use Director;
@YA::Film::ISA = 'Film';
plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 20);
}
-use lib 't/testlib';
+use lib 't/cdbi/testlib';
use Film;
use Actor;
plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 37);
}
-use lib 't/testlib';
+use lib 't/cdbi/testlib';
use Film;
my $it_class = "DBIx::Class::ResultSet";
eval { require Time::Piece::MySQL };
plan skip_all => "Need Time::Piece::MySQL for this test" if $@;
-eval { require 't/testlib/Log.pm' };
+eval { require 't/cdbi/testlib/Log.pm' };
plan skip_all => "Need MySQL for this test" if $@;
plan tests => 2;
-use Test::More;\r
-\r
-BEGIN {\r
- eval "use DBIx::Class::CDBICompat;";\r
- plan $@ ? (skip_all => 'Class::Trigger and DBIx::ContextualFetch required') : (tests=> 2);\r
-}\r
-\r
-use strict;\r
-\r
-use lib 't/testlib';\r
-use Actor;\r
-use ActorAlias;\r
-Actor->has_many( aliases => [ 'ActorAlias' => 'alias' ] );\r
-\r
-my $first = Actor->create( { Name => 'First' } );\r
-my $second = Actor->create( { Name => 'Second' } );\r
-\r
-ActorAlias->create( { actor => $first, alias => $second } );\r
-\r
-my @aliases = $first->aliases;\r
-\r
-is( scalar @aliases, 1, 'proper number of aliases' );\r
-is( $aliases[ 0 ]->name, 'Second', 'proper alias' );\r
-\r
-\r
+use Test::More;
+
+BEGIN {
+ eval "use DBIx::Class::CDBICompat;";
+ plan $@ ? (skip_all => 'Class::Trigger and DBIx::ContextualFetch required') : (tests=> 2);
+}
+
+use strict;
+
+use lib 't/cdbi/testlib';
+use Actor;
+use ActorAlias;
+Actor->has_many( aliases => [ 'ActorAlias' => 'alias' ] );
+
+my $first = Actor->create( { Name => 'First' } );
+my $second = Actor->create( { Name => 'Second' } );
+
+ActorAlias->create( { actor => $first, alias => $second } );
+
+my @aliases = $first->aliases;
+
+is( scalar @aliases, 1, 'proper number of aliases' );
+is( $aliases[ 0 ]->name, 'Second', 'proper alias' );
+
+
}
INIT {
- use lib 't/testlib';
+ use lib 't/cdbi/testlib';
use Film;
use Director;
}
-#!/usr/bin/perl -w
-
use strict;
use Test::More;
: (tests => 6);
}
-use lib 't/testlib';
+use lib 't/cdbi/testlib';
require Film;
sub Film::accessor_name_for {
-use strict;\r
-use Test::More;\r
-\r
-BEGIN {\r
- eval "use DBIx::Class::CDBICompat;";\r
- if ($@) {\r
- plan (skip_all => 'Class::Trigger and DBIx::ContextualFetch required');\r
- next;\r
- }\r
- eval "use DBD::SQLite";\r
- plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 6);\r
-}\r
-\r
-use lib 't/testlib';\r
-use Film;\r
-\r
-my @film = (\r
- Film->create({ Title => 'Film 1' }),\r
- Film->create({ Title => 'Film 2' }),\r
- Film->create({ Title => 'Film 3' }),\r
- Film->create({ Title => 'Film 4' }),\r
- Film->create({ Title => 'Film 5' }),\r
-);\r
-\r
-# first page\r
-my ( $pager, $it ) = Film->page(\r
- {},\r
- { rows => 3,\r
- page => 1 }\r
-);\r
-\r
-is( $pager->entries_on_this_page, 3, "entries_on_this_page ok" );\r
-\r
-is( $pager->next_page, 2, "next_page ok" );\r
-\r
-is( $it->next->title, "Film 1", "iterator->next ok" );\r
-\r
-$it->next;\r
-$it->next;\r
-\r
-is( $it->next, undef, "next past end of page ok" );\r
-\r
-# second page\r
-( $pager, $it ) = Film->page( \r
- {},\r
- { rows => 3,\r
- page => 2 }\r
-);\r
-\r
-is( $pager->entries_on_this_page, 2, "entries on second page ok" );\r
-\r
-is( $it->next->title, "Film 4", "second page first title ok" );\r
+use strict;
+use Test::More;
+
+BEGIN {
+ eval "use DBIx::Class::CDBICompat;";
+ if ($@) {
+ plan (skip_all => 'Class::Trigger and DBIx::ContextualFetch required');
+ next;
+ }
+ eval "use DBD::SQLite";
+ plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 6);
+}
+
+use lib 't/cdbi/testlib';
+use Film;
+
+my @film = (
+ Film->create({ Title => 'Film 1' }),
+ Film->create({ Title => 'Film 2' }),
+ Film->create({ Title => 'Film 3' }),
+ Film->create({ Title => 'Film 4' }),
+ Film->create({ Title => 'Film 5' }),
+);
+
+# first page
+my ( $pager, $it ) = Film->page(
+ {},
+ { rows => 3,
+ page => 1 }
+);
+
+is( $pager->entries_on_this_page, 3, "entries_on_this_page ok" );
+
+is( $pager->next_page, 2, "next_page ok" );
+
+is( $it->next->title, "Film 1", "iterator->next ok" );
+
+$it->next;
+$it->next;
+
+is( $it->next, undef, "next past end of page ok" );
+
+# second page
+( $pager, $it ) = Film->page(
+ {},
+ { rows => 3,
+ page => 2 }
+);
+
+is( $pager->entries_on_this_page, 2, "entries on second page ok" );
+
+is( $it->next->title, "Film 4", "second page first title ok" );
plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 7);
}
-use lib 't/testlib';
+use lib 't/cdbi/testlib';
use Film;
Film->create_test_film;
plan tests => 19;
}
-my $DB = "t/testdb";
+my $DB = "t/var/cdbi_testdb";
unlink $DB if -e $DB;
my @DSN = ("dbi:SQLite:dbname=$DB", '', '', { AutoCommit => 0 });
-#!/usr/bin/perl -w
-
use Test::More;
use strict;
}
INIT {
- use lib 't/testlib';
+ use lib 't/cdbi/testlib';
use Film;
}
-#!/usr/bin/perl -w
-
use strict;
use Test::More;
use Test::Warn;
: ('no_plan');
}
-use lib 't/testlib';
+use lib 't/cdbi/testlib';
use Film;
my $waves = Film->insert({
-#!/usr/bin/perl -w
-
use strict;
use Test::More;
-#!/usr/bin/perl -w
-
use strict;
use Test::More;
}
INIT {
- use lib 't/testlib';
+ use lib 't/cdbi/testlib';
use Film;
}
-#!/usr/bin/perl -w
-
use strict;
use Test::More;
}
INIT {
- use lib 't/testlib';
+ use lib 't/cdbi/testlib';
}
{
}
-use lib 't/testlib';
+use lib 't/cdbi/testlib';
use Director;
# Test that has_many() will load the foreign class.
-#!/usr/bin/perl -w
-
use strict;
use Test::More;
-#!/usr/bin/perl -w
-
use strict;
use Test::More;
plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 2);
}
-use lib 't/testlib';
+use lib 't/cdbi/testlib';
use Film;
Film->create({
-#!/usr/bin/perl -w
-
use strict;
use Test::More;
}
INIT {
- use lib 't/testlib';
+ use lib 't/cdbi/testlib';
require Film;
}
-#!/usr/bin/perl -w
-
use strict;
use Test::More;
}
INIT {
- use lib 't/testlib';
+ use lib 't/cdbi/testlib';
use Film;
}
-#!/usr/bin/perl -w
-
use strict;
use Test::More;
}
INIT {
- use lib 't/testlib';
+ use lib 't/cdbi/testlib';
use Film;
}
-#!/usr/bin/perl -w
-
use strict;
use Test::More;
-#!/usr/bin/perl -w
-
use strict;
use Test::More;
use Test::Exception;
package # hide from PAUSE
Actor;
-BEGIN { unshift @INC, './t/testlib'; }
-
use strict;
use warnings;
-package # hide from PAUSE \r
- ActorAlias;\r
-\r
-BEGIN { unshift @INC, './t/testlib'; }\r
-\r
-use strict;\r
-use warnings;\r
-\r
-use base 'DBIx::Class::Test::SQLite';\r
-\r
-__PACKAGE__->set_table( 'ActorAlias' );\r
-\r
-__PACKAGE__->columns( Primary => 'id' );\r
-__PACKAGE__->columns( All => qw/ actor alias / );\r
-__PACKAGE__->has_a( actor => 'Actor' );\r
-__PACKAGE__->has_a( alias => 'Actor' );\r
-\r
-sub create_sql {\r
- return qq{\r
- id INTEGER PRIMARY KEY,\r
- actor INTEGER,\r
- alias INTEGER\r
- }\r
-}\r
-\r
-1;\r
-\r
+package # hide from PAUSE
+ ActorAlias;
+
+use strict;
+use warnings;
+
+use base 'DBIx::Class::Test::SQLite';
+
+__PACKAGE__->set_table( 'ActorAlias' );
+
+__PACKAGE__->columns( Primary => 'id' );
+__PACKAGE__->columns( All => qw/ actor alias / );
+__PACKAGE__->has_a( actor => 'Actor' );
+__PACKAGE__->has_a( alias => 'Actor' );
+
+sub create_sql {
+ return qq{
+ id INTEGER PRIMARY KEY,
+ actor INTEGER,
+ alias INTEGER
+ }
+}
+
+1;
+
package # hide from PAUSE
Binary;
-BEGIN { unshift @INC, './t/testlib'; }
-
use strict;
use base 'PgBase';
package # hide from PAUSE
Blurb;
-BEGIN { unshift @INC, './t/testlib'; }
-
use strict;
use base 'DBIx::Class::Test::SQLite';
package # hide from PAUSE
Director;
-BEGIN { unshift @INC, './t/testlib'; }
-
use strict;
use base 'DBIx::Class::Test::SQLite';
package # hide from PAUSE
Film;
-BEGIN { unshift @INC, './t/testlib'; }
use base 'DBIx::Class::Test::SQLite';
use strict;
package # hide from PAUSE
Lazy;
-BEGIN { unshift @INC, './t/testlib'; }
use base 'DBIx::Class::Test::SQLite';
use strict;
package # hide from PAUSE
Log;
-BEGIN { unshift @INC, './t/testlib'; }
use base 'MyBase';
use strict;
package # hide from PAUSE
MyFilm;
-BEGIN { unshift @INC, './t/testlib'; }
use base 'MyBase';
use MyStarLink;
package # hide from PAUSE
MyFoo;
-BEGIN { unshift @INC, './t/testlib'; }
use base 'MyBase';
use strict;
package # hide from PAUSE
MyStar;
-BEGIN { unshift @INC, './t/testlib'; }
use base 'MyBase';
use strict;
package # hide from PAUSE
MyStarLink;
-BEGIN { unshift @INC, './t/testlib'; }
use base 'MyBase';
use strict;
package # hide from PAUSE
MyStarLinkMCPK;
-BEGIN { unshift @INC, './t/testlib'; }
use base 'MyBase';
use MyStar;
package # hide from PAUSE
Order;
-BEGIN { unshift @INC, './t/testlib'; }
-
use strict;
use base 'DBIx::Class::Test::SQLite';
$self->debugfh(undef);
$self->dbictest_sql_ref($sql_ref);
- $self->dbictest_bind_ref($bind_ref);
+ $self->dbictest_bind_ref($bind_ref || []);
return $self;
}
our @EXPORT = qw/
&is_same_sql_bind
+ &is_same_sql
+ &is_same_bind
&eq_sql
&eq_bind
+ &eq_sql_bind
/;
$tb->ok($same_sql && $same_bind, $msg);
if (!$same_sql) {
- $tb->diag("SQL expressions differ\n"
- . " got: $sql1\n"
- . "expected: $sql2\n"
- );
+ _sql_differ_diag($sql1, $sql2);
}
if (!$same_bind) {
- $tb->diag("BIND values differ\n"
- . " got: " . Dumper($bind_ref1)
- . "expected: " . Dumper($bind_ref2)
- );
+ _bind_differ_diag($bind_ref1, $bind_ref2);
}
}
+ sub is_same_sql
+ {
+ my ($sql1, $sql2, $msg) = @_;
+
+ my $same_sql = eq_sql($sql1, $sql2);
+
+ $tb->ok($same_sql, $msg);
+
+ if (!$same_sql) {
+ _sql_differ_diag($sql1, $sql2);
+ }
+ }
+
+ sub is_same_bind
+ {
+ my ($bind_ref1, $bind_ref2, $msg) = @_;
+
+ my $same_bind = eq_bind($bind_ref1, $bind_ref2);
+
+ $tb->ok($same_bind, $msg);
+
+ if (!$same_bind) {
+ _bind_differ_diag($bind_ref1, $bind_ref2);
+ }
+ }
+
+ sub _sql_differ_diag
+ {
+ my ($sql1, $sql2) = @_;
+
+ $tb->diag("SQL expressions differ\n"
+ . " got: $sql1\n"
+ . "expected: $sql2\n"
+ );
+ }
+
+ sub _bind_differ_diag
+ {
+ my ($bind_ref1, $bind_ref2) = @_;
+
+ $tb->diag("BIND values differ\n"
+ . " got: " . Dumper($bind_ref1)
+ . "expected: " . Dumper($bind_ref2)
+ );
+ }
+
sub eq_sql
{
my ($left, $right) = @_;
return eq_deeply($bind_ref1, $bind_ref2);
}
+
+ sub eq_sql_bind
+ {
+ my ($sql1, $bind_ref1, $sql2, $bind_ref2) = @_;
+
+ return eq_sql($sql1, $sql2) && eq_bind($bind_ref1, $bind_ref2);
+ }
}
eval "use SQL::Abstract::Test;";
# SQL::Abstract::Test available
*is_same_sql_bind = \&SQL::Abstract::Test::is_same_sql_bind;
+ *is_same_sql = \&SQL::Abstract::Test::is_same_sql;
+ *is_same_bind = \&SQL::Abstract::Test::is_same_bind;
*eq_sql = \&SQL::Abstract::Test::eq_sql;
*eq_bind = \&SQL::Abstract::Test::eq_bind;
+ *eq_sql_bind = \&SQL::Abstract::Test::eq_sql_bind;
} else {
# old SQL::Abstract
*is_same_sql_bind = \&DBIC::SqlMakerTest::SQLATest::is_same_sql_bind;
+ *is_same_sql = \&DBIC::SqlMakerTest::SQLATest::is_same_sql;
+ *is_same_bind = \&DBIC::SqlMakerTest::SQLATest::is_same_bind;
*eq_sql = \&DBIC::SqlMakerTest::SQLATest::eq_sql;
*eq_bind = \&DBIC::SqlMakerTest::SQLATest::eq_bind;
+ *eq_sql_bind = \&DBIC::SqlMakerTest::SQLATest::eq_sql_bind;
}
Compares given and expected pairs of C<($sql, \@bind)>, and calls
L<Test::Builder/ok> on the result, with C<$test_msg> as message.
+=head2 is_same_sql
+
+ is_same_sql(
+ $given_sql,
+ $expected_sql,
+ $test_msg
+ );
+
+Compares given and expected SQL statement, and calls L<Test::Builder/ok> on the
+result, with C<$test_msg> as message.
+
+=head2 is_same_bind
+
+ is_same_bind(
+ \@given_bind,
+ \@expected_bind,
+ $test_msg
+ );
+
+Compares given and expected bind value lists, and calls L<Test::Builder/ok> on
+the result, with C<$test_msg> as message.
+
=head2 eq_sql
my $is_same = eq_sql($given_sql, $expected_sql);
Compares two lists of bind values. Returns true IFF their values are the same.
+=head2 eq_sql_bind
+
+ my $is_same = eq_sql_bind(
+ $given_sql, \@given_bind,
+ $expected_sql, \@expected_bind
+ );
+
+Compares the two SQL statements and the two lists of bind values. Returns true
+IFF they are equivalent and the bind values are the same.
+
=head1 SEE ALSO
'ArtistSubclass',
'Producer',
'CD_to_Producer',
+ 'Dummy', # this is a real result class we remove in the hook below
),
qw/SelfRefAlias TreeLike TwoKeyTreeLike Event EventTZ NoPrimaryKey/,
qw/Collection CollectionObject TypedObject Owners BooksInLibrary/,
- qw/ForceForeign/,
+ qw/ForceForeign Encoded/,
);
sub sqlt_deploy_hook {
sub sqlt_deploy_hook {
my ($self, $sqlt_table) = @_;
-
if ($sqlt_table->schema->translator->producer_type =~ /SQLite$/ ) {
- $sqlt_table->add_index( name => 'artist_name', fields => ['name'] )
+ $sqlt_table->add_index( name => 'artist_name_hookidx', fields => ['name'] )
or die $sqlt_table->error;
}
}
--- /dev/null
+package # hide from PAUSE
+ DBICTest::Schema::Encoded;
+
+use base 'DBIx::Class::Core';
+
+use strict;
+use warnings;
+
+__PACKAGE__->table('encoded');
+__PACKAGE__->add_columns(
+ 'id' => {
+ data_type => 'integer',
+ is_auto_increment => 1
+ },
+ 'encoded' => {
+ data_type => 'varchar',
+ size => 100,
+ is_nullable => 1,
+ },
+);
+
+__PACKAGE__->set_primary_key('id');
+
+sub set_column {
+ my ($self, $col, $value) = @_;
+ if( $col eq 'encoded' ){
+ $value = reverse split '', $value;
+ }
+ $self->next::method($col, $value);
+}
+
+sub new {
+ my($self, $attr, @rest) = @_;
+ $attr->{encoded} = reverse split '', $attr->{encoded}
+ if defined $attr->{encoded};
+ return $self->next::method($attr, @rest);
+}
+
+1;
__PACKAGE__->add_columns(
id => { data_type => 'integer', is_auto_increment => 1 },
- starts_at => { data_type => 'datetime', extra => { timezone => "America/Chicago" } },
+ starts_at => { data_type => 'datetime', extra => { timezone => "America/Chicago", locale => 'de_DE' } },
created_on => { data_type => 'timestamp', extra => { timezone => "America/Chicago", floating_tz_ok => 1 } },
);
value varchar(100) NOT NULL
);
+--
+-- Table: encoded
+--
+CREATE TABLE encoded (
+ id INTEGER PRIMARY KEY NOT NULL,
+ encoded varchar(100) NOT NULL
+);
COMMIT;
--- /dev/null
+#!/usr/bin/perl
+
+use strict;
+use warnings FATAL => 'all';
+
+use Data::Dumper;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBIC::SqlMakerTest;
+
+plan tests => 4;
+
+my $schema = DBICTest->init_schema();
+my $art_rs = $schema->resultset('Artist');
+my $cdrs = $schema->resultset('CD');
+
+{
+ my $arr = $art_rs->as_query;
+ my ($query, @bind) = @{$$arr};
+
+ is_same_sql_bind(
+ $query, \@bind,
+ "(SELECT me.artistid, me.name, me.rank, me.charfield FROM artist me)", [],
+ );
+}
+
+$art_rs = $art_rs->search({ name => 'Billy Joel' });
+
+{
+ my $arr = $art_rs->as_query;
+ my ($query, @bind) = @{$$arr};
+
+ is_same_sql_bind(
+ $query, \@bind,
+ "(SELECT me.artistid, me.name, me.rank, me.charfield FROM artist me WHERE ( name = ? ))",
+ [ [ name => 'Billy Joel' ] ],
+ );
+}
+
+$art_rs = $art_rs->search({ rank => 2 });
+
+{
+ my $arr = $art_rs->as_query;
+ my ($query, @bind) = @{$$arr};
+
+ is_same_sql_bind(
+ $query, \@bind,
+ "(SELECT me.artistid, me.name, me.rank, me.charfield FROM artist me WHERE ( ( ( rank = ? ) AND ( name = ? ) ) ) )",
+ [ [ rank => 2 ], [ name => 'Billy Joel' ] ],
+ );
+}
+
+my $rscol = $art_rs->get_column( 'charfield' );
+
+{
+ my $arr = $rscol->as_query;
+ my ($query, @bind) = @{$$arr};
+
+ is_same_sql_bind(
+ $query, \@bind,
+ "(SELECT me.charfield FROM artist me WHERE ( ( ( rank = ? ) AND ( name = ? ) ) ) )",
+ [ [ rank => 2 ], [ name => 'Billy Joel' ] ],
+ );
+}
+
+__END__
--- /dev/null
+#!/usr/bin/perl
+
+use strict;
+use warnings FATAL => 'all';
+
+use Data::Dumper;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBIC::SqlMakerTest;
+
+plan tests => 5;
+
+my $schema = DBICTest->init_schema();
+my $art_rs = $schema->resultset('Artist');
+my $cdrs = $schema->resultset('CD');
+
+{
+ my $cdrs2 = $cdrs->search({
+ artist_id => { 'in' => $art_rs->search({}, { rows => 1 })->get_column( 'id' )->as_query },
+ });
+
+ my $arr = $cdrs2->as_query;
+ my ($query, @bind) = @{$$arr};
+ is_same_sql_bind(
+ $query, \@bind,
+ "SELECT me.cdid,me.artist,me.title,me.year,me.genreid,me.single_track FROM cd me WHERE artist_id IN ( SELECT id FROM artist me LIMIT 1 )",
+ [],
+ );
+}
+
+TODO: {
+ local $TODO = "'+select' doesn't work with as_query yet.";
+ my $rs = $art_rs->search(
+ {},
+ {
+ '+select' => [
+ $cdrs->search({}, { rows => 1 })->get_column('id')->as_query,
+ ],
+ '+as' => [
+ 'cdid',
+ ],
+ },
+ );
+
+ my $arr = $rs->as_query;
+ my ($query, @bind) = @{$$arr};
+ is_same_sql_bind(
+ $query, \@bind,
+ "SELECT me.artistid, me.name, me.rank, me.charfield, (SELECT id FROM cds LIMIT 1) AS cdid FROM artist me",
+ [],
+ );
+}
+
+TODO: {
+ local $TODO = "'from' doesn't work with as_query yet.";
+ my $rs = $cdrs->search(
+ {},
+ {
+ alias => 'cd2',
+ from => [
+ { cd2 => $cdrs->search({ id => { '>' => 20 } })->as_query },
+ ],
+ },
+ );
+
+ my $arr = $rs->as_query;
+ my ($query, @bind) = @{$$arr};
+ is_same_sql_bind(
+ $query, \@bind,
+ "SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track FROM (SELECT me.artistid, me.name, me.rank, me.charfield FROM cds me WHERE id > 20) cd2",
+ [],
+ );
+}
+
+# nested from
+TODO: {
+ local $TODO = "'from' doesn't work with as_query yet.";
+ my $art_rs2 = $schema->resultset('Artist')->search({},
+ {
+ from => [ { 'me' => 'artist' },
+ [ { 'cds' => $cdrs->search({},{ 'select' => [\'me.artist as cds_artist' ]})->as_query },
+ { 'me.artistid' => 'cds_artist' } ] ]
+ });
+
+ my $arr = $art_rs2->as_query;
+ my ($query, @bind) = @{$$arr};
+ is_same_sql_bind(
+ $query, \@bind,
+ "SELECT me.artistid, me.name, me.rank, me.charfield FROM artist me JOIN (SELECT me.artist as cds_artist FROM cd me) cds ON me.artistid = cds_artist", []
+ );
+
+
+}
+
+{
+ my $rs = $cdrs->search({
+ year => {
+ '=' => $cdrs->search(
+ { artistid => { '=' => \'me.artistid' } },
+ { alias => 'inner' }
+ )->get_column('year')->max_rs->as_query,
+ },
+ });
+ my $arr = $rs->as_query;
+ my ($query, @bind) = @{$$arr};
+ is_same_sql_bind(
+ $query, \@bind,
+ "SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE year = (SELECT MAX(inner.year) FROM cd inner WHERE artistid = me.artistid)",
+ [],
+ );
+}
+
+__END__